repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
spr/album-sound-check | mutagen/musepack.py | 11 | 4118 | # A Musepack reader/tagger
#
# Copyright 2006 Lukas Lalinsky <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# $Id: musepack.py 4013 2007-04-23 09:18:22Z luks $
"""Musepack audio streams with APEv2 tags.
Musepack is an audio format originally based on the MPEG-1 Layer-2
algorithms. Stream versions 4 through 7 are supported.
For more information, see http://www.musepack.net/.
"""
__all__ = ["Musepack", "Open", "delete"]
import struct
from mutagen.apev2 import APEv2File, error, delete
from mutagen.id3 import BitPaddedInt
from mutagen._util import cdata
class MusepackHeaderError(error): pass
RATES = [44100, 48000, 37800, 32000]
class MusepackInfo(object):
"""Musepack stream information.
Attributes:
channels -- number of audio channels
length -- file length in seconds, as a float
sample_rate -- audio sampling rate in Hz
bitrate -- audio bitrate, in bits per second
version -- Musepack stream version
Optional Attributes:
title_gain, title_peak -- Replay Gain and peak data for this song
album_gain, album_peak -- Replay Gain and peak data for this album
These attributes are only available in stream version 7. The
gains are a float, +/- some dB. The peaks are a percentage [0..1] of
the maximum amplitude. This means to get a number comparable to
VorbisGain, you must multiply the peak by 2.
"""
def __init__(self, fileobj):
header = fileobj.read(32)
if len(header) != 32:
raise MusepackHeaderError("not a Musepack file")
# Skip ID3v2 tags
if header[:3] == "ID3":
size = 10 + BitPaddedInt(header[6:10])
fileobj.seek(size)
header = fileobj.read(32)
if len(header) != 32:
raise MusepackHeaderError("not a Musepack file")
# SV7
if header.startswith("MP+"):
self.version = ord(header[3]) & 0xF
if self.version < 7:
raise MusepackHeaderError("not a Musepack file")
frames = cdata.uint_le(header[4:8])
flags = cdata.uint_le(header[8:12])
self.title_peak, self.title_gain = struct.unpack(
"<Hh", header[12:16])
self.album_peak, self.album_gain = struct.unpack(
"<Hh", header[16:20])
self.title_gain /= 100.0
self.album_gain /= 100.0
self.title_peak /= 65535.0
self.album_peak /= 65535.0
self.sample_rate = RATES[(flags >> 16) & 0x0003]
self.bitrate = 0
# SV4-SV6
else:
header_dword = cdata.uint_le(header[0:4])
self.version = (header_dword >> 11) & 0x03FF;
if self.version < 4 or self.version > 6:
raise MusepackHeaderError("not a Musepack file")
self.bitrate = (header_dword >> 23) & 0x01FF;
self.sample_rate = 44100
if self.version >= 5:
frames = cdata.uint_le(header[4:8])
else:
frames = cdata.ushort_le(header[6:8])
if self.version < 6:
frames -= 1
self.channels = 2
self.length = float(frames * 1152 - 576) / self.sample_rate
if not self.bitrate and self.length != 0:
fileobj.seek(0, 2)
self.bitrate = int(fileobj.tell() * 8 / (self.length * 1000) + 0.5)
def pprint(self):
if self.version >= 7:
rg_data = ", Gain: %+0.2f (title), %+0.2f (album)" %(
self.title_gain, self.album_gain)
else:
rg_data = ""
return "Musepack, %.2f seconds, %d Hz%s" % (
self.length, self.sample_rate, rg_data)
class Musepack(APEv2File):
_Info = MusepackInfo
_mimes = ["audio/x-musepack", "audio/x-mpc"]
def score(filename, fileobj, header):
return header.startswith("MP+") + filename.endswith(".mpc")
score = staticmethod(score)
Open = Musepack
| gpl-2.0 | -556,819,875,958,927,100 | 33.898305 | 79 | 0.595435 | false |
barachka/odoo | addons/website/controllers/main.py | 8 | 18654 | # -*- coding: utf-8 -*-
import cStringIO
import datetime
from itertools import islice
import json
import logging
import re
from sys import maxint
import werkzeug.utils
import werkzeug.wrappers
from PIL import Image
import openerp
from openerp.addons.web import http
from openerp.http import request, Response
logger = logging.getLogger(__name__)
# Completely arbitrary limits
MAX_IMAGE_WIDTH, MAX_IMAGE_HEIGHT = IMAGE_LIMITS = (1024, 768)
LOC_PER_SITEMAP = 45000
SITEMAP_CACHE_TIME = datetime.timedelta(hours=12)
class Website(openerp.addons.web.controllers.main.Home):
#------------------------------------------------------
# View
#------------------------------------------------------
@http.route('/', type='http', auth="public", website=True)
def index(self, **kw):
page = 'homepage'
try:
main_menu = request.registry['ir.model.data'].get_object(request.cr, request.uid, 'website', 'main_menu')
except Exception:
pass
else:
first_menu = main_menu.child_id and main_menu.child_id[0]
if first_menu:
if not (first_menu.url.startswith(('/page/', '/?', '/#')) or (first_menu.url=='/')):
return request.redirect(first_menu.url)
if first_menu.url.startswith('/page/'):
return request.registry['ir.http'].reroute(first_menu.url)
return self.page(page)
@http.route(website=True, auth="public")
def web_login(self, *args, **kw):
# TODO: can't we just put auth=public, ... in web client ?
return super(Website, self).web_login(*args, **kw)
@http.route('/page/<page:page>', type='http', auth="public", website=True)
def page(self, page, **opt):
values = {
'path': page,
}
# allow shortcut for /page/<website_xml_id>
if '.' not in page:
page = 'website.%s' % page
try:
request.website.get_template(page)
except ValueError, e:
# page not found
if request.website.is_publisher():
page = 'website.page_404'
else:
return request.registry['ir.http']._handle_exception(e, 404)
return request.render(page, values)
@http.route(['/robots.txt'], type='http', auth="public")
def robots(self):
return request.render('website.robots', {'url_root': request.httprequest.url_root}, mimetype='text/plain')
@http.route('/sitemap.xml', type='http', auth="public", website=True)
def sitemap_xml_index(self):
cr, uid, context = request.cr, openerp.SUPERUSER_ID, request.context
ira = request.registry['ir.attachment']
iuv = request.registry['ir.ui.view']
mimetype ='application/xml;charset=utf-8'
content = None
def create_sitemap(url, content):
ira.create(cr, uid, dict(
datas=content.encode('base64'),
mimetype=mimetype,
type='binary',
name=url,
url=url,
), context=context)
sitemap = ira.search_read(cr, uid, [('url', '=' , '/sitemap.xml'), ('type', '=', 'binary')], ('datas', 'create_date'), context=context)
if sitemap:
# Check if stored version is still valid
server_format = openerp.tools.misc.DEFAULT_SERVER_DATETIME_FORMAT
create_date = datetime.datetime.strptime(sitemap[0]['create_date'], server_format)
delta = datetime.datetime.now() - create_date
if delta < SITEMAP_CACHE_TIME:
content = sitemap[0]['datas'].decode('base64')
if not content:
# Remove all sitemaps in ir.attachments as we're going to regenerated them
sitemap_ids = ira.search(cr, uid, [('url', '=like' , '/sitemap%.xml'), ('type', '=', 'binary')], context=context)
if sitemap_ids:
ira.unlink(cr, uid, sitemap_ids, context=context)
pages = 0
first_page = None
locs = request.website.enumerate_pages()
while True:
start = pages * LOC_PER_SITEMAP
values = {
'locs': islice(locs, start, start + LOC_PER_SITEMAP),
'url_root': request.httprequest.url_root[:-1],
}
urls = iuv.render(cr, uid, 'website.sitemap_locs', values, context=context)
if urls.strip():
page = iuv.render(cr, uid, 'website.sitemap_xml', dict(content=urls), context=context)
if not first_page:
first_page = page
pages += 1
create_sitemap('/sitemap-%d.xml' % pages, page)
else:
break
if not pages:
return request.not_found()
elif pages == 1:
content = first_page
else:
# Sitemaps must be split in several smaller files with a sitemap index
content = iuv.render(cr, uid, 'website.sitemap_index_xml', dict(
pages=range(1, pages + 1),
url_root=request.httprequest.url_root,
), context=context)
create_sitemap('/sitemap.xml', content)
return request.make_response(content, [('Content-Type', mimetype)])
#------------------------------------------------------
# Edit
#------------------------------------------------------
@http.route('/website/add/<path:path>', type='http', auth="user", website=True)
def pagenew(self, path, noredirect=False, add_menu=None):
xml_id = request.registry['website'].new_page(request.cr, request.uid, path, context=request.context)
if add_menu:
model, id = request.registry["ir.model.data"].get_object_reference(request.cr, request.uid, 'website', 'main_menu')
request.registry['website.menu'].create(request.cr, request.uid, {
'name': path,
'url': "/page/" + xml_id,
'parent_id': id,
}, context=request.context)
# Reverse action in order to allow shortcut for /page/<website_xml_id>
url = "/page/" + re.sub(r"^website\.", '', xml_id)
if noredirect:
return werkzeug.wrappers.Response(url, mimetype='text/plain')
return werkzeug.utils.redirect(url)
@http.route('/website/theme_change', type='http', auth="user", website=True)
def theme_change(self, theme_id=False, **kwargs):
imd = request.registry['ir.model.data']
Views = request.registry['ir.ui.view']
_, theme_template_id = imd.get_object_reference(
request.cr, request.uid, 'website', 'theme')
views = Views.search(request.cr, request.uid, [
('inherit_id', '=', theme_template_id),
('application', '=', 'enabled'),
], context=request.context)
Views.write(request.cr, request.uid, views, {
'application': 'disabled',
}, context=request.context)
if theme_id:
module, xml_id = theme_id.split('.')
_, view_id = imd.get_object_reference(
request.cr, request.uid, module, xml_id)
Views.write(request.cr, request.uid, [view_id], {
'application': 'enabled'
}, context=request.context)
return request.render('website.themes', {'theme_changed': True})
@http.route(['/website/snippets'], type='json', auth="public", website=True)
def snippets(self):
return request.website._render('website.snippets')
@http.route('/website/reset_templates', type='http', auth='user', methods=['POST'], website=True)
def reset_template(self, templates, redirect='/'):
templates = request.httprequest.form.getlist('templates')
modules_to_update = []
for temp_id in templates:
view = request.registry['ir.ui.view'].browse(request.cr, request.uid, int(temp_id), context=request.context)
view.model_data_id.write({
'noupdate': False
})
if view.model_data_id.module not in modules_to_update:
modules_to_update.append(view.model_data_id.module)
module_obj = request.registry['ir.module.module']
module_ids = module_obj.search(request.cr, request.uid, [('name', 'in', modules_to_update)], context=request.context)
module_obj.button_immediate_upgrade(request.cr, request.uid, module_ids, context=request.context)
return request.redirect(redirect)
@http.route('/website/customize_template_get', type='json', auth='user', website=True)
def customize_template_get(self, xml_id, full=False, bundles=False):
""" Lists the templates customizing ``xml_id``. By default, only
returns optional templates (which can be toggled on and off), if
``full=True`` returns all templates customizing ``xml_id``
``bundles=True`` returns also the asset bundles
"""
imd = request.registry['ir.model.data']
view_model, view_theme_id = imd.get_object_reference(
request.cr, request.uid, 'website', 'theme')
user = request.registry['res.users']\
.browse(request.cr, request.uid, request.uid, request.context)
user_groups = set(user.groups_id)
views = request.registry["ir.ui.view"]\
._views_get(request.cr, request.uid, xml_id, bundles=bundles, context=request.context)
done = set()
result = []
for v in views:
if not user_groups.issuperset(v.groups_id):
continue
if full or (v.application != 'always' and v.inherit_id.id != view_theme_id):
if v.inherit_id not in done:
result.append({
'name': v.inherit_id.name,
'id': v.id,
'xml_id': v.xml_id,
'inherit_id': v.inherit_id.id,
'header': True,
'active': False
})
done.add(v.inherit_id)
result.append({
'name': v.name,
'id': v.id,
'xml_id': v.xml_id,
'inherit_id': v.inherit_id.id,
'header': False,
'active': v.application in ('always', 'enabled'),
})
return result
@http.route('/website/get_view_translations', type='json', auth='public', website=True)
def get_view_translations(self, xml_id, lang=None):
lang = lang or request.context.get('lang')
views = self.customize_template_get(xml_id, full=True)
views_ids = [view.get('id') for view in views if view.get('active')]
domain = [('type', '=', 'view'), ('res_id', 'in', views_ids), ('lang', '=', lang)]
irt = request.registry.get('ir.translation')
return irt.search_read(request.cr, request.uid, domain, ['id', 'res_id', 'value','state','gengo_translation'], context=request.context)
@http.route('/website/set_translations', type='json', auth='public', website=True)
def set_translations(self, data, lang):
irt = request.registry.get('ir.translation')
for view_id, trans in data.items():
view_id = int(view_id)
for t in trans:
initial_content = t['initial_content'].strip()
new_content = t['new_content'].strip()
tid = t['translation_id']
if not tid:
old_trans = irt.search_read(
request.cr, request.uid,
[
('type', '=', 'view'),
('res_id', '=', view_id),
('lang', '=', lang),
('src', '=', initial_content),
])
if old_trans:
tid = old_trans[0]['id']
if tid:
vals = {'value': new_content}
irt.write(request.cr, request.uid, [tid], vals)
else:
new_trans = {
'name': 'website',
'res_id': view_id,
'lang': lang,
'type': 'view',
'source': initial_content,
'value': new_content,
}
if t.get('gengo_translation'):
new_trans['gengo_translation'] = t.get('gengo_translation')
new_trans['gengo_comment'] = t.get('gengo_comment')
irt.create(request.cr, request.uid, new_trans)
return True
@http.route('/website/attach', type='http', auth='user', methods=['POST'], website=True)
def attach(self, func, upload=None, url=None):
Attachments = request.registry['ir.attachment']
website_url = message = None
if not upload:
website_url = url
name = url.split("/").pop()
attachment_id = Attachments.create(request.cr, request.uid, {
'name':name,
'type': 'url',
'url': url,
'res_model': 'ir.ui.view',
}, request.context)
else:
try:
image_data = upload.read()
image = Image.open(cStringIO.StringIO(image_data))
w, h = image.size
if w*h > 42e6: # Nokia Lumia 1020 photo resolution
raise ValueError(
u"Image size excessive, uploaded images must be smaller "
u"than 42 million pixel")
attachment_id = Attachments.create(request.cr, request.uid, {
'name': upload.filename,
'datas': image_data.encode('base64'),
'datas_fname': upload.filename,
'res_model': 'ir.ui.view',
}, request.context)
[attachment] = Attachments.read(
request.cr, request.uid, [attachment_id], ['website_url'],
context=request.context)
website_url = attachment['website_url']
except Exception, e:
logger.exception("Failed to upload image to attachment")
message = unicode(e)
return """<script type='text/javascript'>
window.parent['%s'](%s, %s);
</script>""" % (func, json.dumps(website_url), json.dumps(message))
@http.route(['/website/publish'], type='json', auth="public", website=True)
def publish(self, id, object):
_id = int(id)
_object = request.registry[object]
obj = _object.browse(request.cr, request.uid, _id)
values = {}
if 'website_published' in _object._all_columns:
values['website_published'] = not obj.website_published
_object.write(request.cr, request.uid, [_id],
values, context=request.context)
obj = _object.browse(request.cr, request.uid, _id)
return bool(obj.website_published)
#------------------------------------------------------
# Helpers
#------------------------------------------------------
@http.route(['/website/kanban'], type='http', auth="public", methods=['POST'], website=True)
def kanban(self, **post):
return request.website.kanban_col(**post)
def placeholder(self, response):
return request.registry['website']._image_placeholder(response)
@http.route([
'/website/image',
'/website/image/<model>/<id>/<field>'
], auth="public", website=True)
def website_image(self, model, id, field, max_width=None, max_height=None):
""" Fetches the requested field and ensures it does not go above
(max_width, max_height), resizing it if necessary.
If the record is not found or does not have the requested field,
returns a placeholder image via :meth:`~.placeholder`.
Sets and checks conditional response parameters:
* :mailheader:`ETag` is always set (and checked)
* :mailheader:`Last-Modified is set iif the record has a concurrency
field (``__last_update``)
The requested field is assumed to be base64-encoded image data in
all cases.
"""
response = werkzeug.wrappers.Response()
return request.registry['website']._image(
request.cr, request.uid, model, id, field, response, max_width, max_height)
#------------------------------------------------------
# Server actions
#------------------------------------------------------
@http.route('/website/action/<path_or_xml_id_or_id>', type='http', auth="public", website=True)
def actions_server(self, path_or_xml_id_or_id, **post):
cr, uid, context = request.cr, request.uid, request.context
res, action_id, action = None, None, None
ServerActions = request.registry['ir.actions.server']
# find the action_id: either an xml_id, the path, or an ID
if isinstance(path_or_xml_id_or_id, basestring) and '.' in path_or_xml_id_or_id:
action_id = request.registry['ir.model.data'].xmlid_to_res_id(request.cr, request.uid, path_or_xml_id_or_id, raise_if_not_found=False)
if not action_id:
action_ids = ServerActions.search(cr, uid, [('website_path', '=', path_or_xml_id_or_id), ('website_published', '=', True)], context=context)
action_id = action_ids and action_ids[0] or None
if not action_id:
try:
action_id = int(path_or_xml_id_or_id)
except ValueError:
pass
# check it effectively exists
if action_id:
action_ids = ServerActions.exists(cr, uid, [action_id], context=context)
action_id = action_ids and action_ids[0] or None
# run it, return only if we got a Response object
if action_id:
action = ServerActions.browse(cr, uid, action_id, context=context)
if action.state == 'code' and action.website_published:
action_res = ServerActions.run(cr, uid, [action_id], context=context)
if isinstance(action_res, werkzeug.wrappers.Response):
res = action_res
if res:
return res
return request.redirect('/')
| agpl-3.0 | -8,984,082,276,017,371,000 | 43.308789 | 152 | 0.534738 | false |
aminert/scikit-learn | examples/applications/plot_model_complexity_influence.py | 323 | 6372 | """
==========================
Model Complexity Influence
==========================
Demonstrate how model complexity influences both prediction accuracy and
computational performance.
The dataset is the Boston Housing dataset (resp. 20 Newsgroups) for
regression (resp. classification).
For each class of models we make the model complexity vary through the choice
of relevant model parameters and measure the influence on both computational
performance (latency) and predictive power (MSE or Hamming Loss).
"""
print(__doc__)
# Author: Eustache Diemert <[email protected]>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.parasite_axes import host_subplot
from mpl_toolkits.axisartist.axislines import Axes
from scipy.sparse.csr import csr_matrix
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
from sklearn.svm.classes import NuSVR
from sklearn.ensemble.gradient_boosting import GradientBoostingRegressor
from sklearn.linear_model.stochastic_gradient import SGDClassifier
from sklearn.metrics import hamming_loss
###############################################################################
# Routines
# initialize random generator
np.random.seed(0)
def generate_data(case, sparse=False):
"""Generate regression/classification data."""
bunch = None
if case == 'regression':
bunch = datasets.load_boston()
elif case == 'classification':
bunch = datasets.fetch_20newsgroups_vectorized(subset='all')
X, y = shuffle(bunch.data, bunch.target)
offset = int(X.shape[0] * 0.8)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
if sparse:
X_train = csr_matrix(X_train)
X_test = csr_matrix(X_test)
else:
X_train = np.array(X_train)
X_test = np.array(X_test)
y_test = np.array(y_test)
y_train = np.array(y_train)
data = {'X_train': X_train, 'X_test': X_test, 'y_train': y_train,
'y_test': y_test}
return data
def benchmark_influence(conf):
"""
Benchmark influence of :changing_param: on both MSE and latency.
"""
prediction_times = []
prediction_powers = []
complexities = []
for param_value in conf['changing_param_values']:
conf['tuned_params'][conf['changing_param']] = param_value
estimator = conf['estimator'](**conf['tuned_params'])
print("Benchmarking %s" % estimator)
estimator.fit(conf['data']['X_train'], conf['data']['y_train'])
conf['postfit_hook'](estimator)
complexity = conf['complexity_computer'](estimator)
complexities.append(complexity)
start_time = time.time()
for _ in range(conf['n_samples']):
y_pred = estimator.predict(conf['data']['X_test'])
elapsed_time = (time.time() - start_time) / float(conf['n_samples'])
prediction_times.append(elapsed_time)
pred_score = conf['prediction_performance_computer'](
conf['data']['y_test'], y_pred)
prediction_powers.append(pred_score)
print("Complexity: %d | %s: %.4f | Pred. Time: %fs\n" % (
complexity, conf['prediction_performance_label'], pred_score,
elapsed_time))
return prediction_powers, prediction_times, complexities
def plot_influence(conf, mse_values, prediction_times, complexities):
"""
Plot influence of model complexity on both accuracy and latency.
"""
plt.figure(figsize=(12, 6))
host = host_subplot(111, axes_class=Axes)
plt.subplots_adjust(right=0.75)
par1 = host.twinx()
host.set_xlabel('Model Complexity (%s)' % conf['complexity_label'])
y1_label = conf['prediction_performance_label']
y2_label = "Time (s)"
host.set_ylabel(y1_label)
par1.set_ylabel(y2_label)
p1, = host.plot(complexities, mse_values, 'b-', label="prediction error")
p2, = par1.plot(complexities, prediction_times, 'r-',
label="latency")
host.legend(loc='upper right')
host.axis["left"].label.set_color(p1.get_color())
par1.axis["right"].label.set_color(p2.get_color())
plt.title('Influence of Model Complexity - %s' % conf['estimator'].__name__)
plt.show()
def _count_nonzero_coefficients(estimator):
a = estimator.coef_.toarray()
return np.count_nonzero(a)
###############################################################################
# main code
regression_data = generate_data('regression')
classification_data = generate_data('classification', sparse=True)
configurations = [
{'estimator': SGDClassifier,
'tuned_params': {'penalty': 'elasticnet', 'alpha': 0.001, 'loss':
'modified_huber', 'fit_intercept': True},
'changing_param': 'l1_ratio',
'changing_param_values': [0.25, 0.5, 0.75, 0.9],
'complexity_label': 'non_zero coefficients',
'complexity_computer': _count_nonzero_coefficients,
'prediction_performance_computer': hamming_loss,
'prediction_performance_label': 'Hamming Loss (Misclassification Ratio)',
'postfit_hook': lambda x: x.sparsify(),
'data': classification_data,
'n_samples': 30},
{'estimator': NuSVR,
'tuned_params': {'C': 1e3, 'gamma': 2 ** -15},
'changing_param': 'nu',
'changing_param_values': [0.1, 0.25, 0.5, 0.75, 0.9],
'complexity_label': 'n_support_vectors',
'complexity_computer': lambda x: len(x.support_vectors_),
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
{'estimator': GradientBoostingRegressor,
'tuned_params': {'loss': 'ls'},
'changing_param': 'n_estimators',
'changing_param_values': [10, 50, 100, 200, 500],
'complexity_label': 'n_trees',
'complexity_computer': lambda x: x.n_estimators,
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
]
for conf in configurations:
prediction_performances, prediction_times, complexities = \
benchmark_influence(conf)
plot_influence(conf, prediction_performances, prediction_times,
complexities)
| bsd-3-clause | 4,867,020,870,719,933,000 | 36.704142 | 80 | 0.639046 | false |
CS-SI/QGIS | python/plugins/processing/algs/gdal/aspect.py | 1 | 5659 | # -*- coding: utf-8 -*-
"""
***************************************************************************
aspect.py
---------------------
Date : October 2013
Copyright : (C) 2013 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'October 2013'
__copyright__ = '(C) 2013, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from qgis.core import (QgsRasterFileWriter,
QgsProcessingParameterDefinition,
QgsProcessingParameterRasterLayer,
QgsProcessingParameterBand,
QgsProcessingParameterBoolean,
QgsProcessingParameterString,
QgsProcessingParameterRasterDestination)
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.algs.gdal.GdalUtils import GdalUtils
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class aspect(GdalAlgorithm):
INPUT = 'INPUT'
BAND = 'BAND'
COMPUTE_EDGES = 'COMPUTE_EDGES'
ZEVENBERGEN = 'ZEVENBERGEN'
TRIG_ANGLE = 'TRIG_ANGLE'
ZERO_FLAT = 'ZERO_FLAT'
OPTIONS = 'OPTIONS'
OUTPUT = 'OUTPUT'
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.addParameter(QgsProcessingParameterRasterLayer(self.INPUT, self.tr('Input layer')))
self.addParameter(QgsProcessingParameterBand(self.BAND,
self.tr('Band number'),
parentLayerParameterName=self.INPUT))
self.addParameter(QgsProcessingParameterBoolean(self.TRIG_ANGLE,
self.tr('Return trigonometric angle instead of azimuth'),
defaultValue=False))
self.addParameter(QgsProcessingParameterBoolean(self.ZERO_FLAT,
self.tr('Return 0 for flat instead of -9999'),
defaultValue=False))
self.addParameter(QgsProcessingParameterBoolean(self.COMPUTE_EDGES,
self.tr('Compute edges'),
defaultValue=False))
self.addParameter(QgsProcessingParameterBoolean(self.ZEVENBERGEN,
self.tr("Use Zevenbergen&Thorne formula instead of the Horn's one"),
defaultValue=False))
options_param = QgsProcessingParameterString(self.OPTIONS,
self.tr('Additional creation parameters'),
defaultValue='',
optional=True)
options_param.setFlags(options_param.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
options_param.setMetadata({
'widget_wrapper': {
'class': 'processing.algs.gdal.ui.RasterOptionsWidget.RasterOptionsWidgetWrapper'}})
self.addParameter(options_param)
self.addParameter(QgsProcessingParameterRasterDestination(self.OUTPUT, self.tr('Aspect')))
def name(self):
return 'aspect'
def displayName(self):
return self.tr('Aspect')
def group(self):
return self.tr('Raster analysis')
def groupId(self):
return 'rasteranalysis'
def getConsoleCommands(self, parameters, context, feedback, executing=True):
arguments = ['aspect']
inLayer = self.parameterAsRasterLayer(parameters, self.INPUT, context)
arguments.append(inLayer.source())
out = self.parameterAsOutputLayer(parameters, self.OUTPUT, context)
arguments.append(out)
arguments.append('-of')
arguments.append(QgsRasterFileWriter.driverForExtension(os.path.splitext(out)[1]))
arguments.append('-b')
arguments.append(str(self.parameterAsInt(parameters, self.BAND, context)))
if self.parameterAsBool(parameters, self.TRIG_ANGLE, context):
arguments.append('-trigonometric')
if self.parameterAsBool(parameters, self.ZERO_FLAT, context):
arguments.append('-zero_for_flat')
if self.parameterAsBool(parameters, self.COMPUTE_EDGES, context):
arguments.append('-compute_edges')
if self.parameterAsBool(parameters, self.ZEVENBERGEN, context):
arguments.append('-alg')
arguments.append('ZevenbergenThorne')
options = self.parameterAsString(parameters, self.OPTIONS, context)
if options:
arguments.extend(GdalUtils.parseCreationOptions(options))
return ['gdaldem', GdalUtils.escapeAndJoin(arguments)]
| gpl-2.0 | -4,678,324,985,353,725,000 | 42.530769 | 124 | 0.540201 | false |
LSS-USP/kiskadee | kiskadee/queue.py | 1 | 2457 | """Provide kiskadee queues and operations on them."""
import time
from multiprocessing import Queue
import kiskadee
analysis = Queue()
results = Queue()
packages = Queue()
class Queues():
"""Provide kiskadee queues objects."""
@staticmethod
def enqueue_analysis(package_to_analysis):
"""Put a analysis on the analysis queue."""
log_msg = "MONITOR STATE: Sending package {}-{} for analysis"\
.format(package_to_analysis['name'],
package_to_analysis['version'])
kiskadee.logger.debug(log_msg)
analysis.put(package_to_analysis)
@staticmethod
def dequeue_analysis():
"""Get a analysis from the analysis queue."""
package_to_analysis = analysis.get()
fetcher = package_to_analysis ['fetcher'].split('.')[-1]
kiskadee.logger.debug(
'RUNNER STATE: dequeued {}-{} from {}'
.format(package_to_analysis['name'],
package_to_analysis['version'],
fetcher)
)
return package_to_analysis
@staticmethod
def enqueue_result(package):
"""Put a result on the results queue."""
kiskadee.logger.debug(
"RUNNER STATE: Sending {}-{} to Monitor"
.format(package["name"],
package["version"])
)
results.put(package)
@staticmethod
def dequeue_result():
"""Get a result from the results queue."""
result = results.get()
kiskadee.logger.debug(
"MONITOR STATE: Pick Up analyzed package"
.format(result["name"],
result["version"])
)
return result
@staticmethod
def enqueue_package(package, fetcher=None):
"""Put a result on the results queue."""
if fetcher:
kiskadee.logger.debug(
"FETCHER {}: sending package {}-{} for monitor"
.format(fetcher, package['name'], package['version'])
)
packages.put(package)
@staticmethod
def dequeue_package():
"""Get a result from the results queue."""
package = packages.get()
kiskadee.logger.debug(
"MONITOR STATE: Pick Up monitored package."
.format(package["name"],
package["version"])
)
return package
| agpl-3.0 | 4,248,233,573,297,684,000 | 31.328947 | 73 | 0.540904 | false |
crosswalk-project/blink-crosswalk-efl | Tools/Scripts/webkitpy/layout_tests/print_layout_test_times.py | 44 | 5791 | # Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import json
import optparse
from webkitpy.layout_tests.port import Port
def main(host, argv):
parser = optparse.OptionParser(usage='%prog [times_ms.json]')
parser.add_option('-f', '--forward', action='store', type='int',
help='group times by first N directories of test')
parser.add_option('-b', '--backward', action='store', type='int',
help='group times by last N directories of test')
parser.add_option('--fastest', action='store', type='float',
help='print a list of tests that will take N % of the time')
epilog = """
You can print out aggregate times per directory using the -f and -b
flags. The value passed to each flag indicates the "depth" of the flag,
similar to positive and negative arguments to python arrays.
For example, given fast/forms/week/week-input-type.html, -f 1
truncates to 'fast', -f 2 and -b 2 truncates to 'fast/forms', and -b 1
truncates to fast/forms/week . -f 0 truncates to '', which can be used
to produce a single total time for the run."""
parser.epilog = '\n'.join(s.lstrip() for s in epilog.splitlines())
options, args = parser.parse_args(argv)
port = host.port_factory.get()
if args and args[0]:
times_ms_path = args[0]
else:
times_ms_path = host.filesystem.join(port.results_directory(), 'times_ms.json')
times_trie = json.loads(host.filesystem.read_text_file(times_ms_path))
times = convert_trie_to_flat_paths(times_trie)
if options.fastest:
if options.forward is None and options.backward is None:
options.forward = 0
print_fastest(host, port, options, times)
else:
print_times(host, options, times)
def print_times(host, options, times):
by_key = times_by_key(times, options.forward, options.backward)
for key in sorted(by_key):
if key:
host.print_("%s %d" % (key, by_key[key]))
else:
host.print_("%d" % by_key[key])
def print_fastest(host, port, options, times):
total = times_by_key(times, 0, None)['']
by_key = times_by_key(times, options.forward, options.backward)
keys_by_time = sorted(by_key, key=lambda k: (by_key[k], k))
tests_by_key = {}
for test_name in sorted(times):
key = key_for(test_name, options.forward, options.backward)
if key in sorted(tests_by_key):
tests_by_key[key].append(test_name)
else:
tests_by_key[key] = [test_name]
fast_tests_by_key = {}
total_so_far = 0
per_key = total * options.fastest / (len(keys_by_time) * 100.0)
budget = 0
while keys_by_time:
budget += per_key
key = keys_by_time.pop(0)
tests_by_time = sorted(tests_by_key[key], key=lambda t: (times[t], t))
fast_tests_by_key[key] = []
while tests_by_time and total_so_far <= budget:
test = tests_by_time.pop(0)
test_time = times[test]
# Make sure test time > 0 so we don't include tests that are skipped.
if test_time and total_so_far + test_time <= budget:
fast_tests_by_key[key].append(test)
total_so_far += test_time
for k in sorted(fast_tests_by_key):
for t in fast_tests_by_key[k]:
host.print_("%s %d" % (t, times[t]))
return
def key_for(path, forward, backward):
sep = Port.TEST_PATH_SEPARATOR
if forward is not None:
return sep.join(path.split(sep)[:-1][:forward])
if backward is not None:
return sep.join(path.split(sep)[:-backward])
return path
def times_by_key(times, forward, backward):
by_key = {}
for test_name in times:
key = key_for(test_name, forward, backward)
if key in by_key:
by_key[key] += times[test_name]
else:
by_key[key] = times[test_name]
return by_key
def convert_trie_to_flat_paths(trie, prefix=None):
result = {}
for name, data in trie.iteritems():
if prefix:
name = prefix + "/" + name
if isinstance(data, int):
result[name] = data
else:
result.update(convert_trie_to_flat_paths(data, name))
return result
| bsd-3-clause | -106,250,072,559,601,300 | 37.606667 | 87 | 0.646002 | false |
Didacti/elixir | tests/test_o2m.py | 1 | 7242 | """
test one to many relationships
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import six
from elixir import *
from sqlalchemy import and_
from sqlalchemy.ext.orderinglist import ordering_list
def setup():
metadata.bind = 'sqlite://'
class TestOneToMany(object):
def teardown(self):
cleanup_all(True)
def test_simple(self):
class A(Entity):
name = Field(String(60))
bs = OneToMany('B')
class B(Entity):
name = Field(String(60))
a = ManyToOne('A')
setup_all(True)
a1 = A(name='a1')
b1 = B(name='b1', a=a1)
# does it work before a commit? (does the backref work?)
assert b1 in a1.bs
session.commit()
session.close()
b = B.query.one()
a = b.a
assert b in a.bs
def test_selfref(self):
class Person(Entity):
name = Field(String(30))
father = ManyToOne('Person', inverse='children')
children = OneToMany('Person', inverse='father')
setup_all(True)
grampa = Person(name="Abe")
homer = Person(name="Homer")
bart = Person(name="Bart")
lisa = Person(name="Lisa")
grampa.children.append(homer)
homer.children.append(bart)
lisa.father = homer
session.commit()
session.close()
p = Person.get_by(name="Homer")
assert p in p.father.children
assert p.father is Person.get_by(name="Abe")
assert p is Person.get_by(name="Lisa").father
def test_multiple_selfref(self):
# define a self-referential table with several relations
class TreeNode(Entity):
using_options(order_by='name')
name = Field(String(50), required=True)
parent = ManyToOne('TreeNode')
children = OneToMany('TreeNode', inverse='parent')
root = ManyToOne('TreeNode')
setup_all(True)
root = TreeNode(name='rootnode')
root.children.append(TreeNode(name='node1', root=root))
node2 = TreeNode(name='node2', root=root)
node2.children.append(TreeNode(name='subnode1', root=root))
node2.children.append(TreeNode(name='subnode2', root=root))
root.children.append(node2)
root.children.append(TreeNode(name='node3', root=root))
session.commit()
session.close()
root = TreeNode.get_by(name='rootnode')
sub2 = TreeNode.get_by(name='subnode2')
assert sub2 in root.children[1].children
assert sub2.root == root
def test_viewonly(self):
class User(Entity):
name = Field(String(50))
boston_addresses = OneToMany('Address', primaryjoin=lambda:
and_(Address.user_id == User.id, Address.city == 'Boston'),
viewonly=True
)
addresses = OneToMany('Address')
class Address(Entity):
user = ManyToOne('User')
street = Field(Unicode(255))
city = Field(Unicode(255))
setup_all(True)
user = User(name="u1",
addresses=[Address(street="Queen Astrid Avenue, 32",
city="Brussels"),
Address(street="Cambridge Street, 5",
city="Boston")])
session.commit()
session.close()
user = User.get(1)
assert len(user.addresses) == 2
assert len(user.boston_addresses) == 1
assert "Cambridge" in user.boston_addresses[0].street
def test_filter_func(self):
class User(Entity):
name = Field(String(50))
boston_addresses = OneToMany('Address', filter=lambda c:
c.city == 'Boston')
addresses = OneToMany('Address')
class Address(Entity):
user = ManyToOne('User')
street = Field(Unicode(255))
city = Field(Unicode(255))
setup_all(True)
user = User(name="u1",
addresses=[Address(street="Queen Astrid Avenue, 32",
city="Brussels"),
Address(street="Cambridge Street, 5",
city="Boston")])
session.commit()
session.close()
user = User.get(1)
assert len(user.addresses) == 2
assert len(user.boston_addresses) == 1
assert "Cambridge" in user.boston_addresses[0].street
def test_ordering_list(self):
class User(Entity):
name = Field(String(50))
blurbs = OneToMany('Blurb',
collection_class=ordering_list('position'),
order_by='position')
class Blurb(Entity):
user = ManyToOne('User')
position = Field(Integer)
text = Field(Unicode(255))
setup_all(True)
user = User(name="u1",
blurbs=[Blurb(text='zero'),
Blurb(text='one'),
Blurb(text='two')])
session.commit()
session.close()
user = User.get(1)
assert len(user.blurbs) == 3
user.blurbs.insert(1, Blurb(text='new one'))
assert user.blurbs[2].text == "one"
assert user.blurbs[2].position == 2
assert user.blurbs[3].text == "two"
assert user.blurbs[3].position == 3
# def test_manual_join_no_inverse(self):
# class A(Entity):
# name = Field(String(60))
# bs = OneToMany('B')
#
# class B(Entity):
# name = Field(String(60))
# a_id = Field(Integer, ForeignKey('a.id'))
#
# setup_all(True)
#
# a1 = A(name='a1', bs=[B(name='b1')])
#
# session.commit()
# session.close()
#
# b = B.query.one()
#
# assert b.a_id == 1
#
def test_inverse_has_non_pk_target(self):
class A(Entity):
name = Field(String(60), unique=True)
bs = OneToMany('B')
class B(Entity):
name = Field(String(60))
a = ManyToOne('A', target_column='name')
setup_all(True)
a1 = A(name='a1')
b1 = B(name='b1', a=a1)
# does it work before a commit? (does the backref work?)
assert b1 in a1.bs
session.commit()
session.close()
b = B.query.one()
a = b.a
assert b.a.name == 'a1'
assert b in a.bs
def test_has_many_syntax(self):
class Person(Entity):
has_field('name', String(30))
has_many('pets', of_kind='Animal')
class Animal(Entity):
has_field('name', String(30))
belongs_to('owner', of_kind='Person')
setup_all(True)
santa = Person(name="Santa Claus")
rudolph = Animal(name="Rudolph", owner=santa)
session.commit()
session.close()
santa = Person.get_by(name="Santa Claus")
assert Animal.get_by(name="Rudolph") in santa.pets
| mit | 694,852,872,440,373,000 | 27.4 | 75 | 0.526098 | false |
LegoStormtroopr/mallard-questionnaire-registry | mallard_qr/models.py | 1 | 2839 | from __future__ import unicode_literals
from django.db import models
from django.utils.translation import ugettext as _
from model_utils import Choices
import aristotle_mdr as aristotle
"""
These models are based on the DDI3.2 and the SQBL XML formats.
"""
class AdministrationMode(aristotle.models.unmanagedObject):
pass
class Question(aristotle.models.concept):
template = "mallard_qr/question.html"
collected_data_element = models.ForeignKey(aristotle.models.DataElement,blank=True,null=True,related_name="questions")
question_text = aristotle.models.RichTextField(blank=True)
instruction_text = aristotle.models.RichTextField(blank=True)
# administration_modes = models.ManyToManyField(AdministrationMode,blank=True,null=True)
estimated_seconds_response_time = models.PositiveIntegerField(
null=True, blank=True,
help_text=_("he estimated amount of time required to answer a question expressed in seconds.")
)
class ResponseDomain(aristotle.models.aristotleComponent):
class Meta:
ordering = ['order']
@property
def parentItem(self):
return self.question
question = models.ForeignKey(Question, related_name="response_domains")
value_domain = models.ForeignKey(aristotle.models.ValueDomain)
maximum_occurances = models.PositiveIntegerField(
default=1,
help_text=_("The maximum number of times a response can be included in a question")
)
minimum_occurances = models.PositiveIntegerField(
default=1,
help_text=_("The minimum number of times a response can be included in a question")
)
blank_is_missing_value = models.BooleanField(default=False, help_text=_("When value is true a blank or empty variable content should be treated as a missing value."))
order = models.PositiveSmallIntegerField(
"Position",
null=True,
blank=True,
help_text=_("If a dataset is ordered, this indicates which position this item is in a dataset.")
)
"""
class QuestionModule(aristotle.models.concept):
template = "mallard-qr/questionmodule.html"
questions = models.ManyToManyField(Question,blank=True,null=True)
submodules = models.ManyToManyField('QuestionModule',blank=True,null=True)
instruction_text = aristotle.models.RichTextField(blank=True,null=True)
sqbl_definition = TextField(blank=True,null=True)
administration_modes = models.ManyToManyField(AdministrationMode,blank=True,null=True)
class Questionnaire(aristotle.models.concept):
template = "mallard-qr/questionnaire.html"
submodules = models.ManyToManyField(QuestionModule,blank=True,null=True)
instructionText = aristotle.models.RichTextField(blank=True)
administration_modes = models.ManyToManyField(AdministrationMode,blank=True,null=True)
""" | gpl-2.0 | -5,326,762,414,326,911,000 | 40.15942 | 170 | 0.738288 | false |
Revanth47/addons-server | src/olympia/tags/tests/test_helpers.py | 4 | 1150 | from jingo import get_env
from mock import Mock
from pyquery import PyQuery as pq
from olympia import amo
from olympia.addons.models import Addon
def render(s, context=None):
"""Taken from jingo.tests.utils, previously jingo.tests.test_helpers."""
if context is None:
context = {}
t = get_env().from_string(s)
return t.render(context)
class TestHelpers(amo.tests.BaseTestCase):
fixtures = ('base/addon_3615', 'base/user_2519', 'base/user_4043307',
'tags/tags')
def test_tag_list(self):
addon = Addon.objects.get(id=3615)
request = Mock()
request.user = addon.authors.all()[0]
tags = addon.tags.not_denied()
ctx = {
'APP': amo.FIREFOX,
'LANG': 'en-us',
'request': request,
'addon': addon,
'tags': tags}
# no tags, no list
s = render('{{ tag_list(addon) }}', ctx)
assert s.strip() == ""
s = render('{{ tag_list(addon, tags=tags) }}', ctx)
assert s, "Non-empty tags must return tag list."
doc = pq(s)
assert doc('li').length == len(tags)
| bsd-3-clause | 5,751,300,487,915,684,000 | 25.744186 | 76 | 0.566087 | false |
Epirex/android_external_chromium_org | third_party/tlslite/tlslite/integration/SMTP_TLS.py | 87 | 4726 | """TLS Lite + smtplib."""
from smtplib import SMTP
from tlslite.TLSConnection import TLSConnection
from tlslite.integration.ClientHelper import ClientHelper
class SMTP_TLS(SMTP):
"""This class extends L{smtplib.SMTP} with TLS support."""
def starttls(self,
username=None, password=None, sharedKey=None,
certChain=None, privateKey=None,
cryptoID=None, protocol=None,
x509Fingerprint=None,
x509TrustList=None, x509CommonName=None,
settings=None):
"""Puts the connection to the SMTP server into TLS mode.
If the server supports TLS, this will encrypt the rest of the SMTP
session.
For client authentication, use one of these argument
combinations:
- username, password (SRP)
- username, sharedKey (shared-key)
- certChain, privateKey (certificate)
For server authentication, you can either rely on the
implicit mutual authentication performed by SRP or
shared-keys, or you can do certificate-based server
authentication with one of these argument combinations:
- cryptoID[, protocol] (requires cryptoIDlib)
- x509Fingerprint
- x509TrustList[, x509CommonName] (requires cryptlib_py)
Certificate-based server authentication is compatible with
SRP or certificate-based client authentication. It is
not compatible with shared-keys.
The caller should be prepared to handle TLS-specific
exceptions. See the client handshake functions in
L{tlslite.TLSConnection.TLSConnection} for details on which
exceptions might be raised.
@type username: str
@param username: SRP or shared-key username. Requires the
'password' or 'sharedKey' argument.
@type password: str
@param password: SRP password for mutual authentication.
Requires the 'username' argument.
@type sharedKey: str
@param sharedKey: Shared key for mutual authentication.
Requires the 'username' argument.
@type certChain: L{tlslite.X509CertChain.X509CertChain} or
L{cryptoIDlib.CertChain.CertChain}
@param certChain: Certificate chain for client authentication.
Requires the 'privateKey' argument. Excludes the SRP or
shared-key related arguments.
@type privateKey: L{tlslite.utils.RSAKey.RSAKey}
@param privateKey: Private key for client authentication.
Requires the 'certChain' argument. Excludes the SRP or
shared-key related arguments.
@type cryptoID: str
@param cryptoID: cryptoID for server authentication. Mutually
exclusive with the 'x509...' arguments.
@type protocol: str
@param protocol: cryptoID protocol URI for server
authentication. Requires the 'cryptoID' argument.
@type x509Fingerprint: str
@param x509Fingerprint: Hex-encoded X.509 fingerprint for
server authentication. Mutually exclusive with the 'cryptoID'
and 'x509TrustList' arguments.
@type x509TrustList: list of L{tlslite.X509.X509}
@param x509TrustList: A list of trusted root certificates. The
other party must present a certificate chain which extends to
one of these root certificates. The cryptlib_py module must be
installed to use this parameter. Mutually exclusive with the
'cryptoID' and 'x509Fingerprint' arguments.
@type x509CommonName: str
@param x509CommonName: The end-entity certificate's 'CN' field
must match this value. For a web server, this is typically a
server name such as 'www.amazon.com'. Mutually exclusive with
the 'cryptoID' and 'x509Fingerprint' arguments. Requires the
'x509TrustList' argument.
@type settings: L{tlslite.HandshakeSettings.HandshakeSettings}
@param settings: Various settings which can be used to control
the ciphersuites, certificate types, and SSL/TLS versions
offered by the client.
"""
(resp, reply) = self.docmd("STARTTLS")
if resp == 220:
helper = ClientHelper(
username, password, sharedKey,
certChain, privateKey,
cryptoID, protocol,
x509Fingerprint,
x509TrustList, x509CommonName,
settings)
conn = TLSConnection(self.sock)
conn.closeSocket = True
helper._handshake(conn)
self.sock = conn
self.file = conn.makefile('rb')
return (resp, reply) | bsd-3-clause | 5,859,670,067,391,380,000 | 40.464912 | 74 | 0.650444 | false |
vladzur/radiotray | data/plugins/MateMediaKeysPlugin.py | 1 | 2312 | ##########################################################################
# Copyright 2009 Carlos Ribeiro
#
# This file is part of Radio Tray
#
# Radio Tray is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 1 of the License, or
# (at your option) any later version.
#
# Radio Tray is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Radio Tray. If not, see <http://www.gnu.org/licenses/>.
#
##########################################################################
from Plugin import Plugin
import dbus
class MateMediaKeysPlugin(Plugin):
def __init__(self):
super(MateMediaKeysPlugin, self).__init__()
def initialize(self, name, eventManagerWrapper, eventSubscriber, provider, cfgProvider, mediator, tooltip):
self.name = name
self.eventManagerWrapper = eventManagerWrapper
self.eventSubscriber = eventSubscriber
self.provider = provider
self.cfgProvider = cfgProvider
self.mediator = mediator
self.tooltip = tooltip
def getName(self):
return self.name
def activate(self):
try:
self.bus = dbus.SessionBus()
self.bus_object = self.bus.get_object('org.mate.SettingsDaemon', '/org/mate/SettingsDaemon/MediaKeys')
self.bus_object.GrabMediaPlayerKeys("RadioTray", 0, dbus_interface='org.mate.SettingsDaemon.MediaKeys')
self.bus_object.connect_to_signal('MediaPlayerKeyPressed', self.handle_mediakey)
except:
print "Could not bind to mate for Media Keys"
def handle_mediakey(self, *mmkeys):
for key in mmkeys:
if key == "Play":
if (self.mediator.isPlaying()):
self.mediator.stop()
else:
self.mediator.playLast()
elif key == "Stop":
if (self.mediator.isPlaying()):
self.mediator.stop()
| gpl-2.0 | 4,130,183,598,858,673,000 | 35.125 | 115 | 0.605536 | false |
frodrigo/osmose-backend | analysers/analyser_merge_public_equipment_FR_rennes_toilets.py | 4 | 2991 | #!/usr/bin/env python
#-*- coding: utf-8 -*-
###########################################################################
## ##
## Copyrights Adrien Pavie 2017 ##
## ##
## This program is free software: you can redistribute it and/or modify ##
## it under the terms of the GNU General Public License as published by ##
## the Free Software Foundation, either version 3 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program. If not, see <http://www.gnu.org/licenses/>. ##
## ##
###########################################################################
from modules.OsmoseTranslation import T_
from .Analyser_Merge import Analyser_Merge, SourceOpenDataSoft, CSV, Load, Conflate, Select, Mapping
class Analyser_Merge_Public_Equipment_FR_Rennes_Toilets(Analyser_Merge):
def __init__(self, config, logger = None):
Analyser_Merge.__init__(self, config, logger)
self.def_class_missing_official(item = 8180, id =2, level = 3, tags = ['merge', 'public equipment', 'fix:survey', 'fix:picture'],
title = T_('{0} toilets not integrated', 'Rennes'))
self.init(
"https://data.rennesmetropole.fr/explore/dataset/toilettes_publiques_vdr/",
"Toilettes publiques",
CSV(SourceOpenDataSoft(
attribution="Ville de Rennes",
url="https://data.rennesmetropole.fr/explore/dataset/toilettes_publiques_vdr/")),
Load("Geo Point", "Geo Point",
xFunction = lambda x: x and x.split(',')[1],
yFunction = lambda y: y and y.split(',')[0]),
Conflate(
select = Select(
types = ["nodes", "ways"],
tags = {"amenity": "toilets"}),
conflationDistance = 100,
mapping = Mapping(
static1 = {
"amenity": "toilets",
"access": "yes"},
static2 = {"source": self.source},
mapping1 = {
"wheelchair": lambda res: "yes" if res["pmr"] == "OUI" else "no" if res["pmr"] == "NON" else None} )))
| gpl-3.0 | 130,431,469,709,637,580 | 55.433962 | 137 | 0.458041 | false |
ethanrowe/python-data-packager | setup.py | 1 | 1089 | from setuptools import setup
import os
readme = open(os.path.join(os.path.dirname(__file__), 'README'), 'r').read()
license = open(os.path.join(os.path.dirname(__file__), 'LICENSE'), 'r').read()
setup(
name = "data_packager",
version = "0.0.1",
author = "Ethan Rowe",
author_email = "[email protected]",
description = ("Provides dirt-simple tool for releasing datasets as packages"),
license = "MIT",
keywords = "",
url = "https://github.com/ethanrowe/python-data-packager",
packages=['data_packager',
'data_packager.test',
],
long_description="%s\n\n# License #\n\n%s" % (readme, license),
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Topic :: Utilities",
],
tests_require=[
'virtualenv',
'nose',
],
test_suite='nose.collector',
)
| mit | 5,411,192,722,517,334,000 | 30.114286 | 83 | 0.585859 | false |
floresconlimon/qutebrowser | tests/unit/browser/network/test_schemehandler.py | 8 | 1174 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2015 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Tests for browser.network.schemehandler."""
import pytest
from qutebrowser.browser.network import schemehandler
def test_init():
handler = schemehandler.SchemeHandler(0)
assert handler._win_id == 0
def test_create_request():
handler = schemehandler.SchemeHandler(0)
with pytest.raises(NotImplementedError):
handler.createRequest(None, None, None)
| gpl-3.0 | -69,623,217,615,093,930 | 32.542857 | 70 | 0.752981 | false |
joel-airspring/Diamond | src/collectors/gridengine/gridengine.py | 29 | 4886 | # coding=utf-8
"""
The GridEngineCollector parses qstat statistics from Sun Grid Engine,
Univa Grid Engine and Open Grid Scheduler.
#### Dependencies
* Grid Engine qstat
"""
import os
import re
import subprocess
import sys
import xml.dom.minidom
import diamond.collector
class GridEngineCollector(diamond.collector.Collector):
"""Diamond collector for Grid Engine performance data
"""
class QueueStatsEntry:
def __init__(self, name=None, load=None, used=None, resv=None,
available=None, total=None, temp_disabled=None,
manual_intervention=None):
self.name = name
self.load = load
self.used = used
self.resv = resv
self.available = available
self.total = total
self.temp_disabled = temp_disabled
self.manual_intervention = manual_intervention
class StatsParser(object):
def __init__(self, document):
self.dom = xml.dom.minidom.parseString(document.strip())
def get_tag_text(self, node, tag_name):
el = node.getElementsByTagName(tag_name)[0]
return self.get_text(el)
def get_text(self, node):
rc = []
for node in node.childNodes:
if node.nodeType == node.TEXT_NODE:
rc.append(node.data)
return ''.join(rc)
class QueueStatsParser(StatsParser):
def __init__(self, document):
self.dom = xml.dom.minidom.parseString(document.strip())
def parse(self):
cluster_queue_summaries = self.dom.getElementsByTagName(
"cluster_queue_summary")
return [
self._parse_cluster_stats_entry(node)
for node in cluster_queue_summaries]
def _parse_cluster_stats_entry(self, node):
name = self.get_tag_text(node, "name")
load = float(self.get_tag_text(node, "load"))
used = int(self.get_tag_text(node, "used"))
resv = int(self.get_tag_text(node, "resv"))
available = int(self.get_tag_text(node, "available"))
total = int(self.get_tag_text(node, "total"))
temp_disabled = int(self.get_tag_text(node, "temp_disabled"))
manual_intervention = int(self.get_tag_text(
node,
"manual_intervention"))
return GridEngineCollector.QueueStatsEntry(
name=name,
load=load,
used=used,
resv=resv,
available=available,
total=total,
temp_disabled=temp_disabled,
manual_intervention=manual_intervention)
def process_config(self):
super(GridEngineCollector, self).process_config()
os.environ['SGE_ROOT'] = self.config['sge_root']
def get_default_config_help(self):
config_help = super(GridEngineCollector,
self).get_default_config_help()
config_help.update({
'bin_path': "The path to Grid Engine's qstat",
'sge_root': "The SGE_ROOT value to provide to qstat"
})
return config_help
def get_default_config(self):
config = super(GridEngineCollector, self).get_default_config()
config.update({
'bin_path': '/opt/gridengine/bin/lx-amd64/qstat',
'path': 'gridengine',
'sge_root': self._sge_root(),
})
return config
def collect(self):
"""Collect statistics from Grid Engine via qstat.
"""
self._collect_queue_stats()
def _capture_output(self, cmd):
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
bytestr = p.communicate()[0]
output = bytestr.decode(sys.getdefaultencoding())
return output
def _collect_queue_stats(self):
output = self._queue_stats_xml()
parser = self.QueueStatsParser(output)
for cq in parser.parse():
name = self._sanitize(cq.name)
prefix = 'queues.%s' % (name)
metrics = ['load', 'used', 'resv', 'available', 'total',
'temp_disabled', 'manual_intervention']
for metric in metrics:
path = '%s.%s' % (prefix, metric)
value = getattr(cq, metric)
self.publish(path, value)
def _queue_stats_xml(self):
bin_path = self.config['bin_path']
return self._capture_output([bin_path, '-g', 'c', '-xml'])
def _sanitize(self, s):
"""Sanitize the name of a metric to remove unwanted chars
"""
return re.sub("[^\w-]", "_", s)
def _sge_root(self):
sge_root = os.environ.get('SGE_ROOT')
if sge_root:
return sge_root
else:
return '/opt/gridengine'
| mit | -1,557,603,442,933,474,300 | 32.238095 | 73 | 0.558944 | false |
ljgabc/lfs | usr/lib/python2.7/pdb.py | 119 | 46016 | #! /usr/bin/env python
"""A Python debugger."""
# (See pdb.doc for documentation.)
import sys
import linecache
import cmd
import bdb
from repr import Repr
import os
import re
import pprint
import traceback
class Restart(Exception):
"""Causes a debugger to be restarted for the debugged python program."""
pass
# Create a custom safe Repr instance and increase its maxstring.
# The default of 30 truncates error messages too easily.
_repr = Repr()
_repr.maxstring = 200
_saferepr = _repr.repr
__all__ = ["run", "pm", "Pdb", "runeval", "runctx", "runcall", "set_trace",
"post_mortem", "help"]
def find_function(funcname, filename):
cre = re.compile(r'def\s+%s\s*[(]' % re.escape(funcname))
try:
fp = open(filename)
except IOError:
return None
# consumer of this info expects the first line to be 1
lineno = 1
answer = None
while 1:
line = fp.readline()
if line == '':
break
if cre.match(line):
answer = funcname, filename, lineno
break
lineno = lineno + 1
fp.close()
return answer
# Interaction prompt line will separate file and call info from code
# text using value of line_prefix string. A newline and arrow may
# be to your liking. You can set it once pdb is imported using the
# command "pdb.line_prefix = '\n% '".
# line_prefix = ': ' # Use this to get the old situation back
line_prefix = '\n-> ' # Probably a better default
class Pdb(bdb.Bdb, cmd.Cmd):
def __init__(self, completekey='tab', stdin=None, stdout=None, skip=None):
bdb.Bdb.__init__(self, skip=skip)
cmd.Cmd.__init__(self, completekey, stdin, stdout)
if stdout:
self.use_rawinput = 0
self.prompt = '(Pdb) '
self.aliases = {}
self.mainpyfile = ''
self._wait_for_mainpyfile = 0
# Try to load readline if it exists
try:
import readline
except ImportError:
pass
# Read $HOME/.pdbrc and ./.pdbrc
self.rcLines = []
if 'HOME' in os.environ:
envHome = os.environ['HOME']
try:
rcFile = open(os.path.join(envHome, ".pdbrc"))
except IOError:
pass
else:
for line in rcFile.readlines():
self.rcLines.append(line)
rcFile.close()
try:
rcFile = open(".pdbrc")
except IOError:
pass
else:
for line in rcFile.readlines():
self.rcLines.append(line)
rcFile.close()
self.commands = {} # associates a command list to breakpoint numbers
self.commands_doprompt = {} # for each bp num, tells if the prompt
# must be disp. after execing the cmd list
self.commands_silent = {} # for each bp num, tells if the stack trace
# must be disp. after execing the cmd list
self.commands_defining = False # True while in the process of defining
# a command list
self.commands_bnum = None # The breakpoint number for which we are
# defining a list
def reset(self):
bdb.Bdb.reset(self)
self.forget()
def forget(self):
self.lineno = None
self.stack = []
self.curindex = 0
self.curframe = None
def setup(self, f, t):
self.forget()
self.stack, self.curindex = self.get_stack(f, t)
self.curframe = self.stack[self.curindex][0]
# The f_locals dictionary is updated from the actual frame
# locals whenever the .f_locals accessor is called, so we
# cache it here to ensure that modifications are not overwritten.
self.curframe_locals = self.curframe.f_locals
self.execRcLines()
# Can be executed earlier than 'setup' if desired
def execRcLines(self):
if self.rcLines:
# Make local copy because of recursion
rcLines = self.rcLines
# executed only once
self.rcLines = []
for line in rcLines:
line = line[:-1]
if len(line) > 0 and line[0] != '#':
self.onecmd(line)
# Override Bdb methods
def user_call(self, frame, argument_list):
"""This method is called when there is the remote possibility
that we ever need to stop in this function."""
if self._wait_for_mainpyfile:
return
if self.stop_here(frame):
print >>self.stdout, '--Call--'
self.interaction(frame, None)
def user_line(self, frame):
"""This function is called when we stop or break at this line."""
if self._wait_for_mainpyfile:
if (self.mainpyfile != self.canonic(frame.f_code.co_filename)
or frame.f_lineno<= 0):
return
self._wait_for_mainpyfile = 0
if self.bp_commands(frame):
self.interaction(frame, None)
def bp_commands(self,frame):
"""Call every command that was set for the current active breakpoint
(if there is one).
Returns True if the normal interaction function must be called,
False otherwise."""
# self.currentbp is set in bdb in Bdb.break_here if a breakpoint was hit
if getattr(self, "currentbp", False) and \
self.currentbp in self.commands:
currentbp = self.currentbp
self.currentbp = 0
lastcmd_back = self.lastcmd
self.setup(frame, None)
for line in self.commands[currentbp]:
self.onecmd(line)
self.lastcmd = lastcmd_back
if not self.commands_silent[currentbp]:
self.print_stack_entry(self.stack[self.curindex])
if self.commands_doprompt[currentbp]:
self.cmdloop()
self.forget()
return
return 1
def user_return(self, frame, return_value):
"""This function is called when a return trap is set here."""
if self._wait_for_mainpyfile:
return
frame.f_locals['__return__'] = return_value
print >>self.stdout, '--Return--'
self.interaction(frame, None)
def user_exception(self, frame, exc_info):
"""This function is called if an exception occurs,
but only if we are to stop at or just below this level."""
if self._wait_for_mainpyfile:
return
exc_type, exc_value, exc_traceback = exc_info
frame.f_locals['__exception__'] = exc_type, exc_value
if type(exc_type) == type(''):
exc_type_name = exc_type
else: exc_type_name = exc_type.__name__
print >>self.stdout, exc_type_name + ':', _saferepr(exc_value)
self.interaction(frame, exc_traceback)
# General interaction function
def interaction(self, frame, traceback):
self.setup(frame, traceback)
self.print_stack_entry(self.stack[self.curindex])
self.cmdloop()
self.forget()
def displayhook(self, obj):
"""Custom displayhook for the exec in default(), which prevents
assignment of the _ variable in the builtins.
"""
# reproduce the behavior of the standard displayhook, not printing None
if obj is not None:
print repr(obj)
def default(self, line):
if line[:1] == '!': line = line[1:]
locals = self.curframe_locals
globals = self.curframe.f_globals
try:
code = compile(line + '\n', '<stdin>', 'single')
save_stdout = sys.stdout
save_stdin = sys.stdin
save_displayhook = sys.displayhook
try:
sys.stdin = self.stdin
sys.stdout = self.stdout
sys.displayhook = self.displayhook
exec code in globals, locals
finally:
sys.stdout = save_stdout
sys.stdin = save_stdin
sys.displayhook = save_displayhook
except:
t, v = sys.exc_info()[:2]
if type(t) == type(''):
exc_type_name = t
else: exc_type_name = t.__name__
print >>self.stdout, '***', exc_type_name + ':', v
def precmd(self, line):
"""Handle alias expansion and ';;' separator."""
if not line.strip():
return line
args = line.split()
while args[0] in self.aliases:
line = self.aliases[args[0]]
ii = 1
for tmpArg in args[1:]:
line = line.replace("%" + str(ii),
tmpArg)
ii = ii + 1
line = line.replace("%*", ' '.join(args[1:]))
args = line.split()
# split into ';;' separated commands
# unless it's an alias command
if args[0] != 'alias':
marker = line.find(';;')
if marker >= 0:
# queue up everything after marker
next = line[marker+2:].lstrip()
self.cmdqueue.append(next)
line = line[:marker].rstrip()
return line
def onecmd(self, line):
"""Interpret the argument as though it had been typed in response
to the prompt.
Checks whether this line is typed at the normal prompt or in
a breakpoint command list definition.
"""
if not self.commands_defining:
return cmd.Cmd.onecmd(self, line)
else:
return self.handle_command_def(line)
def handle_command_def(self,line):
"""Handles one command line during command list definition."""
cmd, arg, line = self.parseline(line)
if not cmd:
return
if cmd == 'silent':
self.commands_silent[self.commands_bnum] = True
return # continue to handle other cmd def in the cmd list
elif cmd == 'end':
self.cmdqueue = []
return 1 # end of cmd list
cmdlist = self.commands[self.commands_bnum]
if arg:
cmdlist.append(cmd+' '+arg)
else:
cmdlist.append(cmd)
# Determine if we must stop
try:
func = getattr(self, 'do_' + cmd)
except AttributeError:
func = self.default
# one of the resuming commands
if func.func_name in self.commands_resuming:
self.commands_doprompt[self.commands_bnum] = False
self.cmdqueue = []
return 1
return
# Command definitions, called by cmdloop()
# The argument is the remaining string on the command line
# Return true to exit from the command loop
do_h = cmd.Cmd.do_help
def do_commands(self, arg):
"""Defines a list of commands associated to a breakpoint.
Those commands will be executed whenever the breakpoint causes
the program to stop execution."""
if not arg:
bnum = len(bdb.Breakpoint.bpbynumber)-1
else:
try:
bnum = int(arg)
except:
print >>self.stdout, "Usage : commands [bnum]\n ..." \
"\n end"
return
self.commands_bnum = bnum
self.commands[bnum] = []
self.commands_doprompt[bnum] = True
self.commands_silent[bnum] = False
prompt_back = self.prompt
self.prompt = '(com) '
self.commands_defining = True
try:
self.cmdloop()
finally:
self.commands_defining = False
self.prompt = prompt_back
def do_break(self, arg, temporary = 0):
# break [ ([filename:]lineno | function) [, "condition"] ]
if not arg:
if self.breaks: # There's at least one
print >>self.stdout, "Num Type Disp Enb Where"
for bp in bdb.Breakpoint.bpbynumber:
if bp:
bp.bpprint(self.stdout)
return
# parse arguments; comma has lowest precedence
# and cannot occur in filename
filename = None
lineno = None
cond = None
comma = arg.find(',')
if comma > 0:
# parse stuff after comma: "condition"
cond = arg[comma+1:].lstrip()
arg = arg[:comma].rstrip()
# parse stuff before comma: [filename:]lineno | function
colon = arg.rfind(':')
funcname = None
if colon >= 0:
filename = arg[:colon].rstrip()
f = self.lookupmodule(filename)
if not f:
print >>self.stdout, '*** ', repr(filename),
print >>self.stdout, 'not found from sys.path'
return
else:
filename = f
arg = arg[colon+1:].lstrip()
try:
lineno = int(arg)
except ValueError, msg:
print >>self.stdout, '*** Bad lineno:', arg
return
else:
# no colon; can be lineno or function
try:
lineno = int(arg)
except ValueError:
try:
func = eval(arg,
self.curframe.f_globals,
self.curframe_locals)
except:
func = arg
try:
if hasattr(func, 'im_func'):
func = func.im_func
code = func.func_code
#use co_name to identify the bkpt (function names
#could be aliased, but co_name is invariant)
funcname = code.co_name
lineno = code.co_firstlineno
filename = code.co_filename
except:
# last thing to try
(ok, filename, ln) = self.lineinfo(arg)
if not ok:
print >>self.stdout, '*** The specified object',
print >>self.stdout, repr(arg),
print >>self.stdout, 'is not a function'
print >>self.stdout, 'or was not found along sys.path.'
return
funcname = ok # ok contains a function name
lineno = int(ln)
if not filename:
filename = self.defaultFile()
# Check for reasonable breakpoint
line = self.checkline(filename, lineno)
if line:
# now set the break point
err = self.set_break(filename, line, temporary, cond, funcname)
if err: print >>self.stdout, '***', err
else:
bp = self.get_breaks(filename, line)[-1]
print >>self.stdout, "Breakpoint %d at %s:%d" % (bp.number,
bp.file,
bp.line)
# To be overridden in derived debuggers
def defaultFile(self):
"""Produce a reasonable default."""
filename = self.curframe.f_code.co_filename
if filename == '<string>' and self.mainpyfile:
filename = self.mainpyfile
return filename
do_b = do_break
def do_tbreak(self, arg):
self.do_break(arg, 1)
def lineinfo(self, identifier):
failed = (None, None, None)
# Input is identifier, may be in single quotes
idstring = identifier.split("'")
if len(idstring) == 1:
# not in single quotes
id = idstring[0].strip()
elif len(idstring) == 3:
# quoted
id = idstring[1].strip()
else:
return failed
if id == '': return failed
parts = id.split('.')
# Protection for derived debuggers
if parts[0] == 'self':
del parts[0]
if len(parts) == 0:
return failed
# Best first guess at file to look at
fname = self.defaultFile()
if len(parts) == 1:
item = parts[0]
else:
# More than one part.
# First is module, second is method/class
f = self.lookupmodule(parts[0])
if f:
fname = f
item = parts[1]
answer = find_function(item, fname)
return answer or failed
def checkline(self, filename, lineno):
"""Check whether specified line seems to be executable.
Return `lineno` if it is, 0 if not (e.g. a docstring, comment, blank
line or EOF). Warning: testing is not comprehensive.
"""
# this method should be callable before starting debugging, so default
# to "no globals" if there is no current frame
globs = self.curframe.f_globals if hasattr(self, 'curframe') else None
line = linecache.getline(filename, lineno, globs)
if not line:
print >>self.stdout, 'End of file'
return 0
line = line.strip()
# Don't allow setting breakpoint at a blank line
if (not line or (line[0] == '#') or
(line[:3] == '"""') or line[:3] == "'''"):
print >>self.stdout, '*** Blank or comment'
return 0
return lineno
def do_enable(self, arg):
args = arg.split()
for i in args:
try:
i = int(i)
except ValueError:
print >>self.stdout, 'Breakpoint index %r is not a number' % i
continue
if not (0 <= i < len(bdb.Breakpoint.bpbynumber)):
print >>self.stdout, 'No breakpoint numbered', i
continue
bp = bdb.Breakpoint.bpbynumber[i]
if bp:
bp.enable()
def do_disable(self, arg):
args = arg.split()
for i in args:
try:
i = int(i)
except ValueError:
print >>self.stdout, 'Breakpoint index %r is not a number' % i
continue
if not (0 <= i < len(bdb.Breakpoint.bpbynumber)):
print >>self.stdout, 'No breakpoint numbered', i
continue
bp = bdb.Breakpoint.bpbynumber[i]
if bp:
bp.disable()
def do_condition(self, arg):
# arg is breakpoint number and condition
args = arg.split(' ', 1)
try:
bpnum = int(args[0].strip())
except ValueError:
# something went wrong
print >>self.stdout, \
'Breakpoint index %r is not a number' % args[0]
return
try:
cond = args[1]
except:
cond = None
try:
bp = bdb.Breakpoint.bpbynumber[bpnum]
except IndexError:
print >>self.stdout, 'Breakpoint index %r is not valid' % args[0]
return
if bp:
bp.cond = cond
if not cond:
print >>self.stdout, 'Breakpoint', bpnum,
print >>self.stdout, 'is now unconditional.'
def do_ignore(self,arg):
"""arg is bp number followed by ignore count."""
args = arg.split()
try:
bpnum = int(args[0].strip())
except ValueError:
# something went wrong
print >>self.stdout, \
'Breakpoint index %r is not a number' % args[0]
return
try:
count = int(args[1].strip())
except:
count = 0
try:
bp = bdb.Breakpoint.bpbynumber[bpnum]
except IndexError:
print >>self.stdout, 'Breakpoint index %r is not valid' % args[0]
return
if bp:
bp.ignore = count
if count > 0:
reply = 'Will ignore next '
if count > 1:
reply = reply + '%d crossings' % count
else:
reply = reply + '1 crossing'
print >>self.stdout, reply + ' of breakpoint %d.' % bpnum
else:
print >>self.stdout, 'Will stop next time breakpoint',
print >>self.stdout, bpnum, 'is reached.'
def do_clear(self, arg):
"""Three possibilities, tried in this order:
clear -> clear all breaks, ask for confirmation
clear file:lineno -> clear all breaks at file:lineno
clear bpno bpno ... -> clear breakpoints by number"""
if not arg:
try:
reply = raw_input('Clear all breaks? ')
except EOFError:
reply = 'no'
reply = reply.strip().lower()
if reply in ('y', 'yes'):
self.clear_all_breaks()
return
if ':' in arg:
# Make sure it works for "clear C:\foo\bar.py:12"
i = arg.rfind(':')
filename = arg[:i]
arg = arg[i+1:]
try:
lineno = int(arg)
except ValueError:
err = "Invalid line number (%s)" % arg
else:
err = self.clear_break(filename, lineno)
if err: print >>self.stdout, '***', err
return
numberlist = arg.split()
for i in numberlist:
try:
i = int(i)
except ValueError:
print >>self.stdout, 'Breakpoint index %r is not a number' % i
continue
if not (0 <= i < len(bdb.Breakpoint.bpbynumber)):
print >>self.stdout, 'No breakpoint numbered', i
continue
err = self.clear_bpbynumber(i)
if err:
print >>self.stdout, '***', err
else:
print >>self.stdout, 'Deleted breakpoint', i
do_cl = do_clear # 'c' is already an abbreviation for 'continue'
def do_where(self, arg):
self.print_stack_trace()
do_w = do_where
do_bt = do_where
def do_up(self, arg):
if self.curindex == 0:
print >>self.stdout, '*** Oldest frame'
else:
self.curindex = self.curindex - 1
self.curframe = self.stack[self.curindex][0]
self.curframe_locals = self.curframe.f_locals
self.print_stack_entry(self.stack[self.curindex])
self.lineno = None
do_u = do_up
def do_down(self, arg):
if self.curindex + 1 == len(self.stack):
print >>self.stdout, '*** Newest frame'
else:
self.curindex = self.curindex + 1
self.curframe = self.stack[self.curindex][0]
self.curframe_locals = self.curframe.f_locals
self.print_stack_entry(self.stack[self.curindex])
self.lineno = None
do_d = do_down
def do_until(self, arg):
self.set_until(self.curframe)
return 1
do_unt = do_until
def do_step(self, arg):
self.set_step()
return 1
do_s = do_step
def do_next(self, arg):
self.set_next(self.curframe)
return 1
do_n = do_next
def do_run(self, arg):
"""Restart program by raising an exception to be caught in the main
debugger loop. If arguments were given, set them in sys.argv."""
if arg:
import shlex
argv0 = sys.argv[0:1]
sys.argv = shlex.split(arg)
sys.argv[:0] = argv0
raise Restart
do_restart = do_run
def do_return(self, arg):
self.set_return(self.curframe)
return 1
do_r = do_return
def do_continue(self, arg):
self.set_continue()
return 1
do_c = do_cont = do_continue
def do_jump(self, arg):
if self.curindex + 1 != len(self.stack):
print >>self.stdout, "*** You can only jump within the bottom frame"
return
try:
arg = int(arg)
except ValueError:
print >>self.stdout, "*** The 'jump' command requires a line number."
else:
try:
# Do the jump, fix up our copy of the stack, and display the
# new position
self.curframe.f_lineno = arg
self.stack[self.curindex] = self.stack[self.curindex][0], arg
self.print_stack_entry(self.stack[self.curindex])
except ValueError, e:
print >>self.stdout, '*** Jump failed:', e
do_j = do_jump
def do_debug(self, arg):
sys.settrace(None)
globals = self.curframe.f_globals
locals = self.curframe_locals
p = Pdb(self.completekey, self.stdin, self.stdout)
p.prompt = "(%s) " % self.prompt.strip()
print >>self.stdout, "ENTERING RECURSIVE DEBUGGER"
sys.call_tracing(p.run, (arg, globals, locals))
print >>self.stdout, "LEAVING RECURSIVE DEBUGGER"
sys.settrace(self.trace_dispatch)
self.lastcmd = p.lastcmd
def do_quit(self, arg):
self._user_requested_quit = 1
self.set_quit()
return 1
do_q = do_quit
do_exit = do_quit
def do_EOF(self, arg):
print >>self.stdout
self._user_requested_quit = 1
self.set_quit()
return 1
def do_args(self, arg):
co = self.curframe.f_code
dict = self.curframe_locals
n = co.co_argcount
if co.co_flags & 4: n = n+1
if co.co_flags & 8: n = n+1
for i in range(n):
name = co.co_varnames[i]
print >>self.stdout, name, '=',
if name in dict: print >>self.stdout, dict[name]
else: print >>self.stdout, "*** undefined ***"
do_a = do_args
def do_retval(self, arg):
if '__return__' in self.curframe_locals:
print >>self.stdout, self.curframe_locals['__return__']
else:
print >>self.stdout, '*** Not yet returned!'
do_rv = do_retval
def _getval(self, arg):
try:
return eval(arg, self.curframe.f_globals,
self.curframe_locals)
except:
t, v = sys.exc_info()[:2]
if isinstance(t, str):
exc_type_name = t
else: exc_type_name = t.__name__
print >>self.stdout, '***', exc_type_name + ':', repr(v)
raise
def do_p(self, arg):
try:
print >>self.stdout, repr(self._getval(arg))
except:
pass
def do_pp(self, arg):
try:
pprint.pprint(self._getval(arg), self.stdout)
except:
pass
def do_list(self, arg):
self.lastcmd = 'list'
last = None
if arg:
try:
x = eval(arg, {}, {})
if type(x) == type(()):
first, last = x
first = int(first)
last = int(last)
if last < first:
# Assume it's a count
last = first + last
else:
first = max(1, int(x) - 5)
except:
print >>self.stdout, '*** Error in argument:', repr(arg)
return
elif self.lineno is None:
first = max(1, self.curframe.f_lineno - 5)
else:
first = self.lineno + 1
if last is None:
last = first + 10
filename = self.curframe.f_code.co_filename
breaklist = self.get_file_breaks(filename)
try:
for lineno in range(first, last+1):
line = linecache.getline(filename, lineno,
self.curframe.f_globals)
if not line:
print >>self.stdout, '[EOF]'
break
else:
s = repr(lineno).rjust(3)
if len(s) < 4: s = s + ' '
if lineno in breaklist: s = s + 'B'
else: s = s + ' '
if lineno == self.curframe.f_lineno:
s = s + '->'
print >>self.stdout, s + '\t' + line,
self.lineno = lineno
except KeyboardInterrupt:
pass
do_l = do_list
def do_whatis(self, arg):
try:
value = eval(arg, self.curframe.f_globals,
self.curframe_locals)
except:
t, v = sys.exc_info()[:2]
if type(t) == type(''):
exc_type_name = t
else: exc_type_name = t.__name__
print >>self.stdout, '***', exc_type_name + ':', repr(v)
return
code = None
# Is it a function?
try: code = value.func_code
except: pass
if code:
print >>self.stdout, 'Function', code.co_name
return
# Is it an instance method?
try: code = value.im_func.func_code
except: pass
if code:
print >>self.stdout, 'Method', code.co_name
return
# None of the above...
print >>self.stdout, type(value)
def do_alias(self, arg):
args = arg.split()
if len(args) == 0:
keys = self.aliases.keys()
keys.sort()
for alias in keys:
print >>self.stdout, "%s = %s" % (alias, self.aliases[alias])
return
if args[0] in self.aliases and len(args) == 1:
print >>self.stdout, "%s = %s" % (args[0], self.aliases[args[0]])
else:
self.aliases[args[0]] = ' '.join(args[1:])
def do_unalias(self, arg):
args = arg.split()
if len(args) == 0: return
if args[0] in self.aliases:
del self.aliases[args[0]]
#list of all the commands making the program resume execution.
commands_resuming = ['do_continue', 'do_step', 'do_next', 'do_return',
'do_quit', 'do_jump']
# Print a traceback starting at the top stack frame.
# The most recently entered frame is printed last;
# this is different from dbx and gdb, but consistent with
# the Python interpreter's stack trace.
# It is also consistent with the up/down commands (which are
# compatible with dbx and gdb: up moves towards 'main()'
# and down moves towards the most recent stack frame).
def print_stack_trace(self):
try:
for frame_lineno in self.stack:
self.print_stack_entry(frame_lineno)
except KeyboardInterrupt:
pass
def print_stack_entry(self, frame_lineno, prompt_prefix=line_prefix):
frame, lineno = frame_lineno
if frame is self.curframe:
print >>self.stdout, '>',
else:
print >>self.stdout, ' ',
print >>self.stdout, self.format_stack_entry(frame_lineno,
prompt_prefix)
# Help methods (derived from pdb.doc)
def help_help(self):
self.help_h()
def help_h(self):
print >>self.stdout, """h(elp)
Without argument, print the list of available commands.
With a command name as argument, print help about that command
"help pdb" pipes the full documentation file to the $PAGER
"help exec" gives help on the ! command"""
def help_where(self):
self.help_w()
def help_w(self):
print >>self.stdout, """w(here)
Print a stack trace, with the most recent frame at the bottom.
An arrow indicates the "current frame", which determines the
context of most commands. 'bt' is an alias for this command."""
help_bt = help_w
def help_down(self):
self.help_d()
def help_d(self):
print >>self.stdout, """d(own)
Move the current frame one level down in the stack trace
(to a newer frame)."""
def help_up(self):
self.help_u()
def help_u(self):
print >>self.stdout, """u(p)
Move the current frame one level up in the stack trace
(to an older frame)."""
def help_break(self):
self.help_b()
def help_b(self):
print >>self.stdout, """b(reak) ([file:]lineno | function) [, condition]
With a line number argument, set a break there in the current
file. With a function name, set a break at first executable line
of that function. Without argument, list all breaks. If a second
argument is present, it is a string specifying an expression
which must evaluate to true before the breakpoint is honored.
The line number may be prefixed with a filename and a colon,
to specify a breakpoint in another file (probably one that
hasn't been loaded yet). The file is searched for on sys.path;
the .py suffix may be omitted."""
def help_clear(self):
self.help_cl()
def help_cl(self):
print >>self.stdout, "cl(ear) filename:lineno"
print >>self.stdout, """cl(ear) [bpnumber [bpnumber...]]
With a space separated list of breakpoint numbers, clear
those breakpoints. Without argument, clear all breaks (but
first ask confirmation). With a filename:lineno argument,
clear all breaks at that line in that file.
Note that the argument is different from previous versions of
the debugger (in python distributions 1.5.1 and before) where
a linenumber was used instead of either filename:lineno or
breakpoint numbers."""
def help_tbreak(self):
print >>self.stdout, """tbreak same arguments as break, but breakpoint
is removed when first hit."""
def help_enable(self):
print >>self.stdout, """enable bpnumber [bpnumber ...]
Enables the breakpoints given as a space separated list of
bp numbers."""
def help_disable(self):
print >>self.stdout, """disable bpnumber [bpnumber ...]
Disables the breakpoints given as a space separated list of
bp numbers."""
def help_ignore(self):
print >>self.stdout, """ignore bpnumber count
Sets the ignore count for the given breakpoint number. A breakpoint
becomes active when the ignore count is zero. When non-zero, the
count is decremented each time the breakpoint is reached and the
breakpoint is not disabled and any associated condition evaluates
to true."""
def help_condition(self):
print >>self.stdout, """condition bpnumber str_condition
str_condition is a string specifying an expression which
must evaluate to true before the breakpoint is honored.
If str_condition is absent, any existing condition is removed;
i.e., the breakpoint is made unconditional."""
def help_step(self):
self.help_s()
def help_s(self):
print >>self.stdout, """s(tep)
Execute the current line, stop at the first possible occasion
(either in a function that is called or in the current function)."""
def help_until(self):
self.help_unt()
def help_unt(self):
print """unt(il)
Continue execution until the line with a number greater than the current
one is reached or until the current frame returns"""
def help_next(self):
self.help_n()
def help_n(self):
print >>self.stdout, """n(ext)
Continue execution until the next line in the current function
is reached or it returns."""
def help_return(self):
self.help_r()
def help_r(self):
print >>self.stdout, """r(eturn)
Continue execution until the current function returns."""
def help_continue(self):
self.help_c()
def help_cont(self):
self.help_c()
def help_c(self):
print >>self.stdout, """c(ont(inue))
Continue execution, only stop when a breakpoint is encountered."""
def help_jump(self):
self.help_j()
def help_j(self):
print >>self.stdout, """j(ump) lineno
Set the next line that will be executed."""
def help_debug(self):
print >>self.stdout, """debug code
Enter a recursive debugger that steps through the code argument
(which is an arbitrary expression or statement to be executed
in the current environment)."""
def help_list(self):
self.help_l()
def help_l(self):
print >>self.stdout, """l(ist) [first [,last]]
List source code for the current file.
Without arguments, list 11 lines around the current line
or continue the previous listing.
With one argument, list 11 lines starting at that line.
With two arguments, list the given range;
if the second argument is less than the first, it is a count."""
def help_args(self):
self.help_a()
def help_a(self):
print >>self.stdout, """a(rgs)
Print the arguments of the current function."""
def help_p(self):
print >>self.stdout, """p expression
Print the value of the expression."""
def help_pp(self):
print >>self.stdout, """pp expression
Pretty-print the value of the expression."""
def help_exec(self):
print >>self.stdout, """(!) statement
Execute the (one-line) statement in the context of
the current stack frame.
The exclamation point can be omitted unless the first word
of the statement resembles a debugger command.
To assign to a global variable you must always prefix the
command with a 'global' command, e.g.:
(Pdb) global list_options; list_options = ['-l']
(Pdb)"""
def help_run(self):
print """run [args...]
Restart the debugged python program. If a string is supplied, it is
splitted with "shlex" and the result is used as the new sys.argv.
History, breakpoints, actions and debugger options are preserved.
"restart" is an alias for "run"."""
help_restart = help_run
def help_quit(self):
self.help_q()
def help_q(self):
print >>self.stdout, """q(uit) or exit - Quit from the debugger.
The program being executed is aborted."""
help_exit = help_q
def help_whatis(self):
print >>self.stdout, """whatis arg
Prints the type of the argument."""
def help_EOF(self):
print >>self.stdout, """EOF
Handles the receipt of EOF as a command."""
def help_alias(self):
print >>self.stdout, """alias [name [command [parameter parameter ...]]]
Creates an alias called 'name' the executes 'command'. The command
must *not* be enclosed in quotes. Replaceable parameters are
indicated by %1, %2, and so on, while %* is replaced by all the
parameters. If no command is given, the current alias for name
is shown. If no name is given, all aliases are listed.
Aliases may be nested and can contain anything that can be
legally typed at the pdb prompt. Note! You *can* override
internal pdb commands with aliases! Those internal commands
are then hidden until the alias is removed. Aliasing is recursively
applied to the first word of the command line; all other words
in the line are left alone.
Some useful aliases (especially when placed in the .pdbrc file) are:
#Print instance variables (usage "pi classInst")
alias pi for k in %1.__dict__.keys(): print "%1.",k,"=",%1.__dict__[k]
#Print instance variables in self
alias ps pi self
"""
def help_unalias(self):
print >>self.stdout, """unalias name
Deletes the specified alias."""
def help_commands(self):
print >>self.stdout, """commands [bpnumber]
(com) ...
(com) end
(Pdb)
Specify a list of commands for breakpoint number bpnumber. The
commands themselves appear on the following lines. Type a line
containing just 'end' to terminate the commands.
To remove all commands from a breakpoint, type commands and
follow it immediately with end; that is, give no commands.
With no bpnumber argument, commands refers to the last
breakpoint set.
You can use breakpoint commands to start your program up again.
Simply use the continue command, or step, or any other
command that resumes execution.
Specifying any command resuming execution (currently continue,
step, next, return, jump, quit and their abbreviations) terminates
the command list (as if that command was immediately followed by end).
This is because any time you resume execution
(even with a simple next or step), you may encounter
another breakpoint--which could have its own command list, leading to
ambiguities about which list to execute.
If you use the 'silent' command in the command list, the
usual message about stopping at a breakpoint is not printed. This may
be desirable for breakpoints that are to print a specific message and
then continue. If none of the other commands print anything, you
see no sign that the breakpoint was reached.
"""
def help_pdb(self):
help()
def lookupmodule(self, filename):
"""Helper function for break/clear parsing -- may be overridden.
lookupmodule() translates (possibly incomplete) file or module name
into an absolute file name.
"""
if os.path.isabs(filename) and os.path.exists(filename):
return filename
f = os.path.join(sys.path[0], filename)
if os.path.exists(f) and self.canonic(f) == self.mainpyfile:
return f
root, ext = os.path.splitext(filename)
if ext == '':
filename = filename + '.py'
if os.path.isabs(filename):
return filename
for dirname in sys.path:
while os.path.islink(dirname):
dirname = os.readlink(dirname)
fullname = os.path.join(dirname, filename)
if os.path.exists(fullname):
return fullname
return None
def _runscript(self, filename):
# The script has to run in __main__ namespace (or imports from
# __main__ will break).
#
# So we clear up the __main__ and set several special variables
# (this gets rid of pdb's globals and cleans old variables on restarts).
import __main__
__main__.__dict__.clear()
__main__.__dict__.update({"__name__" : "__main__",
"__file__" : filename,
"__builtins__": __builtins__,
})
# When bdb sets tracing, a number of call and line events happens
# BEFORE debugger even reaches user's code (and the exact sequence of
# events depends on python version). So we take special measures to
# avoid stopping before we reach the main script (see user_line and
# user_call for details).
self._wait_for_mainpyfile = 1
self.mainpyfile = self.canonic(filename)
self._user_requested_quit = 0
statement = 'execfile(%r)' % filename
self.run(statement)
# Simplified interface
def run(statement, globals=None, locals=None):
Pdb().run(statement, globals, locals)
def runeval(expression, globals=None, locals=None):
return Pdb().runeval(expression, globals, locals)
def runctx(statement, globals, locals):
# B/W compatibility
run(statement, globals, locals)
def runcall(*args, **kwds):
return Pdb().runcall(*args, **kwds)
def set_trace():
Pdb().set_trace(sys._getframe().f_back)
# Post-Mortem interface
def post_mortem(t=None):
# handling the default
if t is None:
# sys.exc_info() returns (type, value, traceback) if an exception is
# being handled, otherwise it returns None
t = sys.exc_info()[2]
if t is None:
raise ValueError("A valid traceback must be passed if no "
"exception is being handled")
p = Pdb()
p.reset()
p.interaction(None, t)
def pm():
post_mortem(sys.last_traceback)
# Main program for testing
TESTCMD = 'import x; x.main()'
def test():
run(TESTCMD)
# print help
def help():
for dirname in sys.path:
fullname = os.path.join(dirname, 'pdb.doc')
if os.path.exists(fullname):
sts = os.system('${PAGER-more} '+fullname)
if sts: print '*** Pager exit status:', sts
break
else:
print 'Sorry, can\'t find the help file "pdb.doc"',
print 'along the Python search path'
def main():
if not sys.argv[1:] or sys.argv[1] in ("--help", "-h"):
print "usage: pdb.py scriptfile [arg] ..."
sys.exit(2)
mainpyfile = sys.argv[1] # Get script filename
if not os.path.exists(mainpyfile):
print 'Error:', mainpyfile, 'does not exist'
sys.exit(1)
del sys.argv[0] # Hide "pdb.py" from argument list
# Replace pdb's dir with script's dir in front of module search path.
sys.path[0] = os.path.dirname(mainpyfile)
# Note on saving/restoring sys.argv: it's a good idea when sys.argv was
# modified by the script being debugged. It's a bad idea when it was
# changed by the user from the command line. There is a "restart" command
# which allows explicit specification of command line arguments.
pdb = Pdb()
while True:
try:
pdb._runscript(mainpyfile)
if pdb._user_requested_quit:
break
print "The program finished and will be restarted"
except Restart:
print "Restarting", mainpyfile, "with arguments:"
print "\t" + " ".join(sys.argv[1:])
except SystemExit:
# In most cases SystemExit does not warrant a post-mortem session.
print "The program exited via sys.exit(). Exit status: ",
print sys.exc_info()[1]
except:
traceback.print_exc()
print "Uncaught exception. Entering post mortem debugging"
print "Running 'cont' or 'step' will restart the program"
t = sys.exc_info()[2]
pdb.interaction(None, t)
print "Post mortem debugger finished. The " + mainpyfile + \
" will be restarted"
# When invoked as main program, invoke the debugger on a script
if __name__ == '__main__':
import pdb
pdb.main()
| gpl-2.0 | 4,847,551,637,663,001,000 | 33.391629 | 81 | 0.557415 | false |
ClearCorp/server-tools | base_import_match/models/base_import.py | 2 | 10305 | # -*- coding: utf-8 -*-
# Copyright 2016 Grupo ESOC Ingeniería de Servicios, S.L.U. - Jairo Llopis
# Copyright 2016 Tecnativa - Vicent Cubells
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp import api, fields, models
from openerp import SUPERUSER_ID # TODO remove in v10
class BaseImportMatch(models.Model):
_name = "base_import.match"
_description = "Deduplicate settings prior to CSV imports."
_order = "sequence, name"
name = fields.Char(
compute="_compute_name",
store=True,
index=True)
sequence = fields.Integer(index=True)
model_id = fields.Many2one(
"ir.model",
"Model",
required=True,
ondelete="cascade",
domain=[("transient ", "=", False)],
help="In this model you will apply the match.")
model_name = fields.Char(
related="model_id.model",
store=True,
index=True)
field_ids = fields.One2many(
comodel_name="base_import.match.field",
inverse_name="match_id",
string="Fields",
required=True,
help="Fields that will define an unique key.")
@api.multi
@api.onchange("model_id")
def _onchange_model_id(self):
self.field_ids.unlink()
@api.model
def create(self, vals):
"""Wrap the model after creation."""
result = super(BaseImportMatch, self).create(vals)
self._load_autopatch(result.model_name)
return result
@api.multi
def unlink(self):
"""Unwrap the model after deletion."""
models = set(self.mapped("model_name"))
result = super(BaseImportMatch, self).unlink()
for model in models:
self._load_autopatch(model)
return result
@api.multi
def write(self, vals):
"""Wrap the model after writing."""
result = super(BaseImportMatch, self).write(vals)
if "model_id" in vals or "model_name" in vals:
for s in self:
self._load_autopatch(s.model_name)
return result
# TODO convert to @api.model_cr in v10
def _register_hook(self, cr):
"""Autopatch on init."""
models = set(
self.browse(
cr,
SUPERUSER_ID,
self.search(cr, SUPERUSER_ID, list()))
.mapped("model_name"))
for model in models:
self._load_autopatch(cr, SUPERUSER_ID, model)
@api.multi
@api.depends("model_id", "field_ids")
def _compute_name(self):
"""Automatic self-descriptive name for the setting records."""
for s in self:
s.name = u"{}: {}".format(
s.model_id.display_name,
" + ".join(
s.field_ids.mapped(
lambda r: (
(u"{} ({})" if r.conditional else u"{}").format(
r.field_id.name,
r.imported_value)))))
@api.model
def _match_find(self, model, converted_row, imported_row):
"""Find a update target for the given row.
This will traverse by order all match rules that can be used with the
imported data, and return a match for the first rule that returns a
single result.
:param openerp.models.Model model:
Model object that is being imported.
:param dict converted_row:
Row converted to Odoo api format, like the 3rd value that
:meth:`openerp.models.Model._convert_records` returns.
:param dict imported_row:
Row as it is being imported, in format::
{
"field_name": "string value",
"other_field": "True",
...
}
:return openerp.models.Model:
Return a dataset with one single match if it was found, or an
empty dataset if none or multiple matches were found.
"""
# Get usable rules to perform matches
usable = self._usable_for_load(model._name, converted_row.keys())
# Traverse usable combinations
for combination in usable:
combination_valid = True
domain = list()
for field in combination.field_ids:
# Check imported value if it is a conditional field
if field.conditional:
# Invalid combinations are skipped
if imported_row[field.name] != field.imported_value:
combination_valid = False
break
domain.append((field.name, "=", converted_row[field.name]))
if not combination_valid:
continue
match = model.search(domain)
# When a single match is found, stop searching
if len(match) == 1:
return match
# Return an empty match if none or multiple was found
return model
@api.model
def _load_wrapper(self):
"""Create a new load patch method."""
@api.model
def wrapper(self, fields, data):
"""Try to identify rows by other pseudo-unique keys.
It searches for rows that have no XMLID specified, and gives them
one if any :attr:`~.field_ids` combination is found. With a valid
XMLID in place, Odoo will understand that it must *update* the
record instead of *creating* a new one.
"""
newdata = list()
# Data conversion to ORM format
import_fields = map(models.fix_import_export_id_paths, fields)
converted_data = self._convert_records(
self._extract_records(import_fields, data))
# Mock Odoo to believe the user is importing the ID field
if "id" not in fields:
fields.append("id")
import_fields.append(["id"])
# Needed to match with converted data field names
clean_fields = [f[0] for f in import_fields]
for dbid, xmlid, record, info in converted_data:
row = dict(zip(clean_fields, data[info["record"]]))
match = self
if xmlid:
# Skip rows with ID, they do not need all this
row["id"] = xmlid
elif dbid:
# Find the xmlid for this dbid
match = self.browse(dbid)
else:
# Store records that match a combination
match = self.env["base_import.match"]._match_find(
self, record, row)
# Give a valid XMLID to this row if a match was found
row["id"] = (match._BaseModel__export_xml_id()
if match else row.get("id", u""))
# Store the modified row, in the same order as fields
newdata.append(tuple(row[f] for f in clean_fields))
# Leave the rest to Odoo itself
del data
return wrapper.origin(self, fields, newdata)
# Flag to avoid confusions with other possible wrappers
wrapper.__base_import_match = True
return wrapper
@api.model
def _load_autopatch(self, model_name):
"""[Un]apply patch automatically."""
self._load_unpatch(model_name)
if self.search([("model_name", "=", model_name)]):
self._load_patch(model_name)
@api.model
def _load_patch(self, model_name):
"""Apply patch for :param:`model_name`'s load method.
:param str model_name:
Model technical name, such as ``res.partner``.
"""
self.env[model_name]._patch_method(
"load", self._load_wrapper())
@api.model
def _load_unpatch(self, model_name):
"""Apply patch for :param:`model_name`'s load method.
:param str model_name:
Model technical name, such as ``res.partner``.
"""
model = self.env[model_name]
# Unapply patch only if there is one
try:
if model.load.__base_import_match:
model._revert_method("load")
except AttributeError:
pass
@api.model
def _usable_for_load(self, model_name, fields):
"""Return a set of elements usable for calling ``load()``.
:param str model_name:
Technical name of the model where you are loading data.
E.g. ``res.partner``.
:param list(str|bool) fields:
List of field names being imported.
"""
result = self
available = self.search([("model_name", "=", model_name)])
# Use only criteria with all required fields to match
for record in available:
if all(f.name in fields for f in record.field_ids):
result += record
return result
class BaseImportMatchField(models.Model):
_name = "base_import.match.field"
_description = "Field import match definition"
name = fields.Char(
related="field_id.name")
field_id = fields.Many2one(
comodel_name="ir.model.fields",
string="Field",
required=True,
ondelete="cascade",
domain="[('model_id', '=', model_id)]",
help="Field that will be part of an unique key.")
match_id = fields.Many2one(
comodel_name="base_import.match",
string="Match",
ondelete="cascade",
required=True)
model_id = fields.Many2one(
related="match_id.model_id")
conditional = fields.Boolean(
help="Enable if you want to use this field only in some conditions.")
imported_value = fields.Char(
help="If the imported value is not this, the whole matching rule will "
"be discarded. Be careful, this data is always treated as a "
"string, and comparison is case-sensitive so if you set 'True', "
"it will NOT match '1' nor 'true', only EXACTLY 'True'.")
@api.multi
@api.onchange("field_id", "match_id", "conditional", "imported_value")
def _onchange_match_id_name(self):
"""Update match name."""
self.mapped("match_id")._compute_name()
| agpl-3.0 | -3,822,570,387,524,895,000 | 33.577181 | 79 | 0.555318 | false |
BTCfork/hardfork_prototype_1_mvf-bu | contrib/devtools/optimize-pngs.py | 126 | 3201 | #!/usr/bin/env python
'''
Run this script every time you change one of the png files. Using pngcrush, it will optimize the png files, remove various color profiles, remove ancillary chunks (alla) and text chunks (text).
#pngcrush -brute -ow -rem gAMA -rem cHRM -rem iCCP -rem sRGB -rem alla -rem text
'''
import os
import sys
import subprocess
import hashlib
from PIL import Image
def file_hash(filename):
'''Return hash of raw file contents'''
with open(filename, 'rb') as f:
return hashlib.sha256(f.read()).hexdigest()
def content_hash(filename):
'''Return hash of RGBA contents of image'''
i = Image.open(filename)
i = i.convert('RGBA')
data = i.tobytes()
return hashlib.sha256(data).hexdigest()
pngcrush = 'pngcrush'
git = 'git'
folders = ["src/qt/res/movies", "src/qt/res/icons", "share/pixmaps"]
basePath = subprocess.check_output([git, 'rev-parse', '--show-toplevel']).rstrip('\n')
totalSaveBytes = 0
noHashChange = True
outputArray = []
for folder in folders:
absFolder=os.path.join(basePath, folder)
for file in os.listdir(absFolder):
extension = os.path.splitext(file)[1]
if extension.lower() == '.png':
print("optimizing "+file+"..."),
file_path = os.path.join(absFolder, file)
fileMetaMap = {'file' : file, 'osize': os.path.getsize(file_path), 'sha256Old' : file_hash(file_path)};
fileMetaMap['contentHashPre'] = content_hash(file_path)
pngCrushOutput = ""
try:
pngCrushOutput = subprocess.check_output(
[pngcrush, "-brute", "-ow", "-rem", "gAMA", "-rem", "cHRM", "-rem", "iCCP", "-rem", "sRGB", "-rem", "alla", "-rem", "text", file_path],
stderr=subprocess.STDOUT).rstrip('\n')
except:
print "pngcrush is not installed, aborting..."
sys.exit(0)
#verify
if "Not a PNG file" in subprocess.check_output([pngcrush, "-n", "-v", file_path], stderr=subprocess.STDOUT):
print "PNG file "+file+" is corrupted after crushing, check out pngcursh version"
sys.exit(1)
fileMetaMap['sha256New'] = file_hash(file_path)
fileMetaMap['contentHashPost'] = content_hash(file_path)
if fileMetaMap['contentHashPre'] != fileMetaMap['contentHashPost']:
print "Image contents of PNG file "+file+" before and after crushing don't match"
sys.exit(1)
fileMetaMap['psize'] = os.path.getsize(file_path)
outputArray.append(fileMetaMap)
print("done\n"),
print "summary:\n+++++++++++++++++"
for fileDict in outputArray:
oldHash = fileDict['sha256Old']
newHash = fileDict['sha256New']
totalSaveBytes += fileDict['osize'] - fileDict['psize']
noHashChange = noHashChange and (oldHash == newHash)
print fileDict['file']+"\n size diff from: "+str(fileDict['osize'])+" to: "+str(fileDict['psize'])+"\n old sha256: "+oldHash+"\n new sha256: "+newHash+"\n"
print "completed. Checksum stable: "+str(noHashChange)+". Total reduction: "+str(totalSaveBytes)+" bytes"
| mit | -8,090,174,829,810,936,000 | 41.68 | 193 | 0.607935 | false |
wiltonlazary/arangodb | 3rdParty/V8/V8-5.0.71.39/build/gyp/test/variables/commands/gyptest-commands.py | 311 | 1208 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Test variable expansion of '<!()' syntax commands.
"""
import os
import TestGyp
test = TestGyp.TestGyp(format='gypd')
expect = test.read('commands.gyp.stdout').replace('\r', '')
test.run_gyp('commands.gyp',
'--debug', 'variables',
stdout=expect, ignore_line_numbers=True)
# Verify the commands.gypd against the checked-in expected contents.
#
# Normally, we should canonicalize line endings in the expected
# contents file setting the Subversion svn:eol-style to native,
# but that would still fail if multiple systems are sharing a single
# workspace on a network-mounted file system. Consequently, we
# massage the Windows line endings ('\r\n') in the output to the
# checked-in UNIX endings ('\n').
contents = test.read('commands.gypd').replace('\r', '')
expect = test.read('commands.gypd.golden').replace('\r', '')
if not test.match(contents, expect):
print "Unexpected contents of `commands.gypd'"
test.diff(expect, contents, 'commands.gypd ')
test.fail_test()
test.pass_test()
| apache-2.0 | 8,222,151,072,918,191,000 | 29.974359 | 72 | 0.707781 | false |
nburn42/tensorflow | tensorflow/python/summary/writer/writer.py | 13 | 16422 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides an API for generating Event protocol buffers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import time
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import summary_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.util import event_pb2
from tensorflow.python.eager import context
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import plugin_asset
from tensorflow.python.summary.writer.event_file_writer import EventFileWriter
from tensorflow.python.summary.writer.event_file_writer_v2 import EventFileWriterV2
from tensorflow.python.util.tf_export import tf_export
_PLUGINS_DIR = "plugins"
class SummaryToEventTransformer(object):
"""Abstractly implements the SummaryWriter API.
This API basically implements a number of endpoints (add_summary,
add_session_log, etc). The endpoints all generate an event protobuf, which is
passed to the contained event_writer.
"""
def __init__(self, event_writer, graph=None, graph_def=None):
"""Creates a `SummaryWriter` and an event file.
On construction the summary writer creates a new event file in `logdir`.
This event file will contain `Event` protocol buffers constructed when you
call one of the following functions: `add_summary()`, `add_session_log()`,
`add_event()`, or `add_graph()`.
If you pass a `Graph` to the constructor it is added to
the event file. (This is equivalent to calling `add_graph()` later).
TensorBoard will pick the graph from the file and display it graphically so
you can interactively explore the graph you built. You will usually pass
the graph from the session in which you launched it:
```python
...create a graph...
# Launch the graph in a session.
sess = tf.Session()
# Create a summary writer, add the 'graph' to the event file.
writer = tf.summary.FileWriter(<some-directory>, sess.graph)
```
Args:
event_writer: An EventWriter. Implements add_event and get_logdir.
graph: A `Graph` object, such as `sess.graph`.
graph_def: DEPRECATED: Use the `graph` argument instead.
"""
self.event_writer = event_writer
# For storing used tags for session.run() outputs.
self._session_run_tags = {}
if graph is not None or graph_def is not None:
# Calling it with both graph and graph_def for backward compatibility.
self.add_graph(graph=graph, graph_def=graph_def)
# Also export the meta_graph_def in this case.
# graph may itself be a graph_def due to positional arguments
maybe_graph_as_def = (graph.as_graph_def(add_shapes=True)
if isinstance(graph, ops.Graph) else graph)
self.add_meta_graph(
meta_graph.create_meta_graph_def(graph_def=graph_def or
maybe_graph_as_def))
# This set contains tags of Summary Values that have been encountered
# already. The motivation here is that the SummaryWriter only keeps the
# metadata property (which is a SummaryMetadata proto) of the first Summary
# Value encountered for each tag. The SummaryWriter strips away the
# SummaryMetadata for all subsequent Summary Values with tags seen
# previously. This saves space.
self._seen_summary_tags = set()
def add_summary(self, summary, global_step=None):
"""Adds a `Summary` protocol buffer to the event file.
This method wraps the provided summary in an `Event` protocol buffer
and adds it to the event file.
You can pass the result of evaluating any summary op, using
@{tf.Session.run} or
@{tf.Tensor.eval}, to this
function. Alternatively, you can pass a `tf.Summary` protocol
buffer that you populate with your own data. The latter is
commonly done to report evaluation results in event files.
Args:
summary: A `Summary` protocol buffer, optionally serialized as a string.
global_step: Number. Optional global step value to record with the
summary.
"""
if isinstance(summary, bytes):
summ = summary_pb2.Summary()
summ.ParseFromString(summary)
summary = summ
# We strip metadata from values with tags that we have seen before in order
# to save space - we just store the metadata on the first value with a
# specific tag.
for value in summary.value:
if not value.metadata:
continue
if value.tag in self._seen_summary_tags:
# This tag has been encountered before. Strip the metadata.
value.ClearField("metadata")
continue
# We encounter a value with a tag we have not encountered previously. And
# it has metadata. Remember to strip metadata from future values with this
# tag string.
self._seen_summary_tags.add(value.tag)
event = event_pb2.Event(summary=summary)
self._add_event(event, global_step)
def add_session_log(self, session_log, global_step=None):
"""Adds a `SessionLog` protocol buffer to the event file.
This method wraps the provided session in an `Event` protocol buffer
and adds it to the event file.
Args:
session_log: A `SessionLog` protocol buffer.
global_step: Number. Optional global step value to record with the
summary.
"""
event = event_pb2.Event(session_log=session_log)
self._add_event(event, global_step)
def _add_graph_def(self, graph_def, global_step=None):
graph_bytes = graph_def.SerializeToString()
event = event_pb2.Event(graph_def=graph_bytes)
self._add_event(event, global_step)
def add_graph(self, graph, global_step=None, graph_def=None):
"""Adds a `Graph` to the event file.
The graph described by the protocol buffer will be displayed by
TensorBoard. Most users pass a graph in the constructor instead.
Args:
graph: A `Graph` object, such as `sess.graph`.
global_step: Number. Optional global step counter to record with the
graph.
graph_def: DEPRECATED. Use the `graph` parameter instead.
Raises:
ValueError: If both graph and graph_def are passed to the method.
"""
if graph is not None and graph_def is not None:
raise ValueError("Please pass only graph, or graph_def (deprecated), "
"but not both.")
if isinstance(graph, ops.Graph) or isinstance(graph_def, ops.Graph):
# The user passed a `Graph`.
# Check if the user passed it via the graph or the graph_def argument and
# correct for that.
if not isinstance(graph, ops.Graph):
logging.warning("When passing a `Graph` object, please use the `graph`"
" named argument instead of `graph_def`.")
graph = graph_def
# Serialize the graph with additional info.
true_graph_def = graph.as_graph_def(add_shapes=True)
self._write_plugin_assets(graph)
elif (isinstance(graph, graph_pb2.GraphDef) or
isinstance(graph_def, graph_pb2.GraphDef)):
# The user passed a `GraphDef`.
logging.warning("Passing a `GraphDef` to the SummaryWriter is deprecated."
" Pass a `Graph` object instead, such as `sess.graph`.")
# Check if the user passed it via the graph or the graph_def argument and
# correct for that.
if isinstance(graph, graph_pb2.GraphDef):
true_graph_def = graph
else:
true_graph_def = graph_def
else:
# The user passed neither `Graph`, nor `GraphDef`.
raise TypeError("The passed graph must be an instance of `Graph` "
"or the deprecated `GraphDef`")
# Finally, add the graph_def to the summary writer.
self._add_graph_def(true_graph_def, global_step)
def _write_plugin_assets(self, graph):
plugin_assets = plugin_asset.get_all_plugin_assets(graph)
logdir = self.event_writer.get_logdir()
for asset_container in plugin_assets:
plugin_name = asset_container.plugin_name
plugin_dir = os.path.join(logdir, _PLUGINS_DIR, plugin_name)
gfile.MakeDirs(plugin_dir)
assets = asset_container.assets()
for (asset_name, content) in assets.items():
asset_path = os.path.join(plugin_dir, asset_name)
with gfile.Open(asset_path, "w") as f:
f.write(content)
def add_meta_graph(self, meta_graph_def, global_step=None):
"""Adds a `MetaGraphDef` to the event file.
The `MetaGraphDef` allows running the given graph via
`saver.import_meta_graph()`.
Args:
meta_graph_def: A `MetaGraphDef` object, often as returned by
`saver.export_meta_graph()`.
global_step: Number. Optional global step counter to record with the
graph.
Raises:
TypeError: If both `meta_graph_def` is not an instance of `MetaGraphDef`.
"""
if not isinstance(meta_graph_def, meta_graph_pb2.MetaGraphDef):
raise TypeError("meta_graph_def must be type MetaGraphDef, saw type: %s" %
type(meta_graph_def))
meta_graph_bytes = meta_graph_def.SerializeToString()
event = event_pb2.Event(meta_graph_def=meta_graph_bytes)
self._add_event(event, global_step)
def add_run_metadata(self, run_metadata, tag, global_step=None):
"""Adds a metadata information for a single session.run() call.
Args:
run_metadata: A `RunMetadata` protobuf object.
tag: The tag name for this metadata.
global_step: Number. Optional global step counter to record with the
StepStats.
Raises:
ValueError: If the provided tag was already used for this type of event.
"""
if tag in self._session_run_tags:
raise ValueError("The provided tag was already used for this event type")
self._session_run_tags[tag] = True
tagged_metadata = event_pb2.TaggedRunMetadata()
tagged_metadata.tag = tag
# Store the `RunMetadata` object as bytes in order to have postponed
# (lazy) deserialization when used later.
tagged_metadata.run_metadata = run_metadata.SerializeToString()
event = event_pb2.Event(tagged_run_metadata=tagged_metadata)
self._add_event(event, global_step)
def _add_event(self, event, step):
event.wall_time = time.time()
if step is not None:
event.step = int(step)
self.event_writer.add_event(event)
@tf_export("summary.FileWriter")
class FileWriter(SummaryToEventTransformer):
"""Writes `Summary` protocol buffers to event files.
The `FileWriter` class provides a mechanism to create an event file in a
given directory and add summaries and events to it. The class updates the
file contents asynchronously. This allows a training program to call methods
to add data to the file directly from the training loop, without slowing down
training.
When constructed with a `tf.Session` parameter, a `FileWriter` instead forms
a compatibility layer over new graph-based summaries (`tf.contrib.summary`)
to facilitate the use of new summary writing with pre-existing code that
expects a `FileWriter` instance.
"""
def __init__(self,
logdir,
graph=None,
max_queue=10,
flush_secs=120,
graph_def=None,
filename_suffix=None,
session=None):
"""Creates a `FileWriter`, optionally shared within the given session.
Typically, constructing a file writer creates a new event file in `logdir`.
This event file will contain `Event` protocol buffers constructed when you
call one of the following functions: `add_summary()`, `add_session_log()`,
`add_event()`, or `add_graph()`.
If you pass a `Graph` to the constructor it is added to
the event file. (This is equivalent to calling `add_graph()` later).
TensorBoard will pick the graph from the file and display it graphically so
you can interactively explore the graph you built. You will usually pass
the graph from the session in which you launched it:
```python
...create a graph...
# Launch the graph in a session.
sess = tf.Session()
# Create a summary writer, add the 'graph' to the event file.
writer = tf.summary.FileWriter(<some-directory>, sess.graph)
```
The `session` argument to the constructor makes the returned `FileWriter` a
a compatibility layer over new graph-based summaries (`tf.contrib.summary`).
Crucially, this means the underlying writer resource and events file will
be shared with any other `FileWriter` using the same `session` and `logdir`,
and with any `tf.contrib.summary.SummaryWriter` in this session using the
the same shared resource name (which by default scoped to the logdir). If
no such resource exists, one will be created using the remaining arguments
to this constructor, but if one already exists those arguments are ignored.
In either case, ops will be added to `session.graph` to control the
underlying file writer resource. See `tf.contrib.summary` for more details.
Args:
logdir: A string. Directory where event file will be written.
graph: A `Graph` object, such as `sess.graph`.
max_queue: Integer. Size of the queue for pending events and summaries.
flush_secs: Number. How often, in seconds, to flush the
pending events and summaries to disk.
graph_def: DEPRECATED: Use the `graph` argument instead.
filename_suffix: A string. Every event file's name is suffixed with
`suffix`.
session: A `tf.Session` object. See details above.
Raises:
RuntimeError: If called with eager execution enabled.
@compatibility(eager)
`FileWriter` is not compatible with eager execution. To write TensorBoard
summaries under eager execution, use `tf.contrib.summary` instead.
@end_compatbility
"""
if context.executing_eagerly():
raise RuntimeError(
"tf.summary.FileWriter is not compatible with eager execution. "
"Use tf.contrib.summary instead.")
if session is not None:
event_writer = EventFileWriterV2(
session, logdir, max_queue, flush_secs, filename_suffix)
else:
event_writer = EventFileWriter(logdir, max_queue, flush_secs,
filename_suffix)
super(FileWriter, self).__init__(event_writer, graph, graph_def)
def __enter__(self):
"""Make usable with "with" statement."""
return self
def __exit__(self, unused_type, unused_value, unused_traceback):
"""Make usable with "with" statement."""
self.close()
def get_logdir(self):
"""Returns the directory where event file will be written."""
return self.event_writer.get_logdir()
def add_event(self, event):
"""Adds an event to the event file.
Args:
event: An `Event` protocol buffer.
"""
self.event_writer.add_event(event)
def flush(self):
"""Flushes the event file to disk.
Call this method to make sure that all pending events have been written to
disk.
"""
self.event_writer.flush()
def close(self):
"""Flushes the event file to disk and close the file.
Call this method when you do not need the summary writer anymore.
"""
self.event_writer.close()
def reopen(self):
"""Reopens the EventFileWriter.
Can be called after `close()` to add more events in the same directory.
The events will go into a new events file.
Does nothing if the EventFileWriter was not closed.
"""
self.event_writer.reopen()
| apache-2.0 | -2,051,896,926,892,857,600 | 38.859223 | 83 | 0.684935 | false |
peterfpeterson/mantid | Framework/PythonInterface/test/python/mantid/geometry/CrystalStructureTest.py | 3 | 3758 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# pylint: disable=no-init,invalid-name,too-many-public-methods,broad-except
import unittest
from mantid.geometry import CrystalStructure
class CrystalStructureTest(unittest.TestCase):
def test_creation(self):
# Some valid constructions
self.assertTrue(self.createCrystalStructureOrRaise("5.43 5.43 5.43", "F d -3 m", "Al 1/3 0.454 1/12 1.0 0.01"))
self.assertTrue(self.createCrystalStructureOrRaise("5.43 5.43 5.43", "C m m m", "Al 1/3 0.454 1/12 1.0 0.01;\n"
"Si 2/3 0.121 1/8"))
self.assertTrue(
self.createCrystalStructureOrRaise("5.43 5.43 5.43 90 90 120", "R -3 c", "Al 1/3 0.454 1/12 1.0 0.01;\n"
"Si 2/3 0.121 1/8"))
# Invalid unit cell specification
self.assertFalse(
self.createCrystalStructureOrRaise("5.43 5.43 5.43 90.0", "C m m m", "Al 1/3 0.454 1/12 1.0 0.01"))
# Invalid space group
self.assertFalse(
self.createCrystalStructureOrRaise("5.43 5.43 5.43", "INVALID", "Al 1/3 0.454 1/12 1.0 0.01"))
# Invalid atom specification
self.assertFalse(
self.createCrystalStructureOrRaise("5.43 5.43 5.43", "C m c e", "Al 1/3 0"))
def createCrystalStructureOrRaise(self, unitCell, spaceGroup, atomStrings):
try:
CrystalStructure(unitCell, spaceGroup, atomStrings)
return True
except Exception:
return False
def test_UnitCell(self):
structure = CrystalStructure("5.43 5.42 5.41", "F d -3 m", "Al 1/3 0.454 1/12 1.0 0.01")
cell = structure.getUnitCell()
self.assertEqual(cell.a(), 5.43)
self.assertEqual(cell.b(), 5.42)
self.assertEqual(cell.c(), 5.41)
def test_SpaceGroup(self):
structure = CrystalStructure("5.43 5.42 5.41", "F d -3 m", "Al 1/3 0.454 1/12 1.0 0.01")
spaceGroup = structure.getSpaceGroup()
self.assertEqual(spaceGroup.getHMSymbol(), "F d -3 m")
def test_scatterers(self):
initialString = "Al 1/3 0.454 1/12 1 0.01;Si 0.1 0.2 0.3 0.99 0.1"
structure = CrystalStructure("5.43 5.42 5.41", "F d -3 m", initialString)
scatterers = structure.getScatterers()
self.assertEqual(';'.join(scatterers), initialString)
def test_to_string(self):
initialString = "Al 1/3 0.454 1/12 1 0.01;Si 0.1 0.2 0.3 0.99 0.1"
structure = CrystalStructure("5.43 5.42 5.41", "F d -3 m", initialString)
expected_str = "Crystal structure with:\nUnit cell: a = 5.43 b = 5.42 "\
"c = 5.41 alpha = 90 beta = 90 gamma = 90\n"\
"Centering: All-face centred\nSpace Group: F d -3 m\n"\
"Scatterers: Al 1/3 0.454 1/12 1 0.01, "\
"Si 0.1 0.2 0.3 0.99 0.1"
expected_repr = "CrystalStructure(\"5.43 5.42 5.41 90 90 90\", "\
"\"F d -3 m\", \"Al 1/3 0.454 1/12 1 0.01; "\
"Si 0.1 0.2 0.3 0.99 0.1\")"
self.assertEqual(expected_str, str(structure))
self.assertEqual(expected_repr, structure.__repr__())
newStructure = eval(structure.__repr__())
self.assertEqual(structure.getUnitCell().a(), newStructure.getUnitCell().a())
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | 4,261,383,491,313,230,300 | 43.211765 | 119 | 0.574508 | false |
marklee77/fail2ban | fail2ban/client/filterreader.py | 3 | 3047 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
# vi: set ft=python sts=4 ts=4 sw=4 noet :
# This file is part of Fail2Ban.
#
# Fail2Ban is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Fail2Ban is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Fail2Ban; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# Author: Cyril Jaquier
#
__author__ = "Cyril Jaquier"
__copyright__ = "Copyright (c) 2004 Cyril Jaquier"
__license__ = "GPL"
import os, shlex
from .configreader import DefinitionInitConfigReader
from ..server.action import CommandAction
from ..helpers import getLogger
# Gets the instance of the logger.
logSys = getLogger(__name__)
class FilterReader(DefinitionInitConfigReader):
_configOpts = [
["string", "ignoreregex", None],
["string", "failregex", ""],
]
def setFile(self, fileName):
self.__file = fileName
DefinitionInitConfigReader.setFile(self, os.path.join("filter.d", fileName))
def getFile(self):
return self.__file
def getCombined(self):
combinedopts = dict(list(self._opts.items()) + list(self._initOpts.items()))
if not len(combinedopts):
return {};
opts = CommandAction.substituteRecursiveTags(combinedopts)
if not opts:
raise ValueError('recursive tag definitions unable to be resolved')
return opts;
def convert(self):
stream = list()
opts = self.getCombined()
if not len(opts):
return stream;
for opt, value in opts.iteritems():
if opt == "failregex":
for regex in value.split('\n'):
# Do not send a command if the rule is empty.
if regex != '':
stream.append(["set", self._jailName, "addfailregex", regex])
elif opt == "ignoreregex":
for regex in value.split('\n'):
# Do not send a command if the rule is empty.
if regex != '':
stream.append(["set", self._jailName, "addignoreregex", regex])
if self._initOpts:
if 'maxlines' in self._initOpts:
# We warn when multiline regex is used without maxlines > 1
# therefore keep sure we set this option first.
stream.insert(0, ["set", self._jailName, "maxlines", self._initOpts["maxlines"]])
if 'datepattern' in self._initOpts:
stream.append(["set", self._jailName, "datepattern", self._initOpts["datepattern"]])
# Do not send a command if the match is empty.
if self._initOpts.get("journalmatch", '') != '':
for match in self._initOpts["journalmatch"].split("\n"):
stream.append(
["set", self._jailName, "addjournalmatch"] +
shlex.split(match))
return stream
| gpl-2.0 | 9,120,545,110,934,668,000 | 33.235955 | 88 | 0.687233 | false |
patrickod/stem | stem/response/mapaddress.py | 3 | 1326 | # Copyright 2012-2017, Damian Johnson and The Tor Project
# See LICENSE for licensing information
import stem.response
import stem.socket
class MapAddressResponse(stem.response.ControlMessage):
"""
Reply for a MAPADDRESS query.
Doesn't raise an exception unless no addresses were mapped successfully.
:var dict entries: mapping between the original and replacement addresses
:raises:
* :class:`stem.OperationFailed` if Tor was unable to satisfy the request
* :class:`stem.InvalidRequest` if the addresses provided were invalid
"""
def _parse_message(self):
# Example:
# 250-127.192.10.10=torproject.org
# 250 1.2.3.4=tor.freehaven.net
if not self.is_ok():
for code, _, message in self.content():
if code == '512':
raise stem.InvalidRequest(code, message)
elif code == '451':
raise stem.OperationFailed(code, message)
else:
raise stem.ProtocolError('MAPADDRESS returned unexpected response code: %s', code)
self.entries = {}
for code, _, message in self.content():
if code == '250':
try:
key, value = message.split('=', 1)
self.entries[key] = value
except ValueError:
raise stem.ProtocolError(None, "MAPADDRESS returned '%s', which isn't a mapping" % message)
| lgpl-3.0 | 5,902,658,498,861,537,000 | 30.571429 | 101 | 0.661388 | false |
jpaalasm/pyglet | contrib/scene2d/tests/scene2d/VIEW_SUBWINDOW.py | 29 | 1420 | #!/usr/bin/env python
'''Testing flat map allow_oob enforcement.
Press 0-9 to set the size of the view in the window (1=10%, 0=100%)
Press arrow keys to move view focal point (little ball) around map.
Press "o" to turn allow_oob on and off.
You should see no black border with allow_oob=False.
Press escape or close the window to finish the test.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import unittest
from render_base import RenderBase
import scene2d
from pyglet.event import *
from pyglet.window.event import *
from pyglet.window import key
from scene2d.debug import gen_rect_map
class OOBTest(RenderBase):
def test_main(self):
self.init_window(256, 256)
self.set_map(gen_rect_map([[{}]*10]*10, 32, 32))
@event(self.w)
def on_text(text):
if text == 'o':
self.view.allow_oob = not self.view.allow_oob
print 'NOTE: allow_oob =', self.view.allow_oob
return
try:
size = int(25.6 * float(text))
if size == 0: size = 256
c = self.view.camera
c.width = c.height = size
c.x = c.y = (256-size)/2
except:
return EVENT_UNHANDLED
print 'NOTE: allow_oob =', self.view.allow_oob
self.show_focus()
self.run_test()
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | -8,695,129,160,914,347,000 | 26.843137 | 67 | 0.583803 | false |
loop1024/pymo-global | android/pgs4a-0.9.6/python-install/lib/python2.7/test/test_bisect.py | 41 | 13043 | import sys
import unittest
from test import test_support
from UserList import UserList
# We do a bit of trickery here to be able to test both the C implementation
# and the Python implementation of the module.
# Make it impossible to import the C implementation anymore.
sys.modules['_bisect'] = 0
# We must also handle the case that bisect was imported before.
if 'bisect' in sys.modules:
del sys.modules['bisect']
# Now we can import the module and get the pure Python implementation.
import bisect as py_bisect
# Restore everything to normal.
del sys.modules['_bisect']
del sys.modules['bisect']
# This is now the module with the C implementation.
import bisect as c_bisect
class TestBisect(unittest.TestCase):
module = None
def setUp(self):
self.precomputedCases = [
(self.module.bisect_right, [], 1, 0),
(self.module.bisect_right, [1], 0, 0),
(self.module.bisect_right, [1], 1, 1),
(self.module.bisect_right, [1], 2, 1),
(self.module.bisect_right, [1, 1], 0, 0),
(self.module.bisect_right, [1, 1], 1, 2),
(self.module.bisect_right, [1, 1], 2, 2),
(self.module.bisect_right, [1, 1, 1], 0, 0),
(self.module.bisect_right, [1, 1, 1], 1, 3),
(self.module.bisect_right, [1, 1, 1], 2, 3),
(self.module.bisect_right, [1, 1, 1, 1], 0, 0),
(self.module.bisect_right, [1, 1, 1, 1], 1, 4),
(self.module.bisect_right, [1, 1, 1, 1], 2, 4),
(self.module.bisect_right, [1, 2], 0, 0),
(self.module.bisect_right, [1, 2], 1, 1),
(self.module.bisect_right, [1, 2], 1.5, 1),
(self.module.bisect_right, [1, 2], 2, 2),
(self.module.bisect_right, [1, 2], 3, 2),
(self.module.bisect_right, [1, 1, 2, 2], 0, 0),
(self.module.bisect_right, [1, 1, 2, 2], 1, 2),
(self.module.bisect_right, [1, 1, 2, 2], 1.5, 2),
(self.module.bisect_right, [1, 1, 2, 2], 2, 4),
(self.module.bisect_right, [1, 1, 2, 2], 3, 4),
(self.module.bisect_right, [1, 2, 3], 0, 0),
(self.module.bisect_right, [1, 2, 3], 1, 1),
(self.module.bisect_right, [1, 2, 3], 1.5, 1),
(self.module.bisect_right, [1, 2, 3], 2, 2),
(self.module.bisect_right, [1, 2, 3], 2.5, 2),
(self.module.bisect_right, [1, 2, 3], 3, 3),
(self.module.bisect_right, [1, 2, 3], 4, 3),
(self.module.bisect_right, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 0, 0),
(self.module.bisect_right, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 1, 1),
(self.module.bisect_right, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 1.5, 1),
(self.module.bisect_right, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 2, 3),
(self.module.bisect_right, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 2.5, 3),
(self.module.bisect_right, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 3, 6),
(self.module.bisect_right, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 3.5, 6),
(self.module.bisect_right, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 4, 10),
(self.module.bisect_right, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 5, 10),
(self.module.bisect_left, [], 1, 0),
(self.module.bisect_left, [1], 0, 0),
(self.module.bisect_left, [1], 1, 0),
(self.module.bisect_left, [1], 2, 1),
(self.module.bisect_left, [1, 1], 0, 0),
(self.module.bisect_left, [1, 1], 1, 0),
(self.module.bisect_left, [1, 1], 2, 2),
(self.module.bisect_left, [1, 1, 1], 0, 0),
(self.module.bisect_left, [1, 1, 1], 1, 0),
(self.module.bisect_left, [1, 1, 1], 2, 3),
(self.module.bisect_left, [1, 1, 1, 1], 0, 0),
(self.module.bisect_left, [1, 1, 1, 1], 1, 0),
(self.module.bisect_left, [1, 1, 1, 1], 2, 4),
(self.module.bisect_left, [1, 2], 0, 0),
(self.module.bisect_left, [1, 2], 1, 0),
(self.module.bisect_left, [1, 2], 1.5, 1),
(self.module.bisect_left, [1, 2], 2, 1),
(self.module.bisect_left, [1, 2], 3, 2),
(self.module.bisect_left, [1, 1, 2, 2], 0, 0),
(self.module.bisect_left, [1, 1, 2, 2], 1, 0),
(self.module.bisect_left, [1, 1, 2, 2], 1.5, 2),
(self.module.bisect_left, [1, 1, 2, 2], 2, 2),
(self.module.bisect_left, [1, 1, 2, 2], 3, 4),
(self.module.bisect_left, [1, 2, 3], 0, 0),
(self.module.bisect_left, [1, 2, 3], 1, 0),
(self.module.bisect_left, [1, 2, 3], 1.5, 1),
(self.module.bisect_left, [1, 2, 3], 2, 1),
(self.module.bisect_left, [1, 2, 3], 2.5, 2),
(self.module.bisect_left, [1, 2, 3], 3, 2),
(self.module.bisect_left, [1, 2, 3], 4, 3),
(self.module.bisect_left, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 0, 0),
(self.module.bisect_left, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 1, 0),
(self.module.bisect_left, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 1.5, 1),
(self.module.bisect_left, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 2, 1),
(self.module.bisect_left, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 2.5, 3),
(self.module.bisect_left, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 3, 3),
(self.module.bisect_left, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 3.5, 6),
(self.module.bisect_left, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 4, 6),
(self.module.bisect_left, [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 5, 10)
]
def test_precomputed(self):
for func, data, elem, expected in self.precomputedCases:
self.assertEqual(func(data, elem), expected)
self.assertEqual(func(UserList(data), elem), expected)
def test_negative_lo(self):
# Issue 3301
mod = self.module
self.assertRaises(ValueError, mod.bisect_left, [1, 2, 3], 5, -1, 3),
self.assertRaises(ValueError, mod.bisect_right, [1, 2, 3], 5, -1, 3),
self.assertRaises(ValueError, mod.insort_left, [1, 2, 3], 5, -1, 3),
self.assertRaises(ValueError, mod.insort_right, [1, 2, 3], 5, -1, 3),
def test_random(self, n=25):
from random import randrange
for i in xrange(n):
data = [randrange(0, n, 2) for j in xrange(i)]
data.sort()
elem = randrange(-1, n+1)
ip = self.module.bisect_left(data, elem)
if ip < len(data):
self.assertTrue(elem <= data[ip])
if ip > 0:
self.assertTrue(data[ip-1] < elem)
ip = self.module.bisect_right(data, elem)
if ip < len(data):
self.assertTrue(elem < data[ip])
if ip > 0:
self.assertTrue(data[ip-1] <= elem)
def test_optionalSlicing(self):
for func, data, elem, expected in self.precomputedCases:
for lo in xrange(4):
lo = min(len(data), lo)
for hi in xrange(3,8):
hi = min(len(data), hi)
ip = func(data, elem, lo, hi)
self.assertTrue(lo <= ip <= hi)
if func is self.module.bisect_left and ip < hi:
self.assertTrue(elem <= data[ip])
if func is self.module.bisect_left and ip > lo:
self.assertTrue(data[ip-1] < elem)
if func is self.module.bisect_right and ip < hi:
self.assertTrue(elem < data[ip])
if func is self.module.bisect_right and ip > lo:
self.assertTrue(data[ip-1] <= elem)
self.assertEqual(ip, max(lo, min(hi, expected)))
def test_backcompatibility(self):
self.assertEqual(self.module.bisect, self.module.bisect_right)
def test_keyword_args(self):
data = [10, 20, 30, 40, 50]
self.assertEqual(self.module.bisect_left(a=data, x=25, lo=1, hi=3), 2)
self.assertEqual(self.module.bisect_right(a=data, x=25, lo=1, hi=3), 2)
self.assertEqual(self.module.bisect(a=data, x=25, lo=1, hi=3), 2)
self.module.insort_left(a=data, x=25, lo=1, hi=3)
self.module.insort_right(a=data, x=25, lo=1, hi=3)
self.module.insort(a=data, x=25, lo=1, hi=3)
self.assertEqual(data, [10, 20, 25, 25, 25, 30, 40, 50])
class TestBisectPython(TestBisect):
module = py_bisect
class TestBisectC(TestBisect):
module = c_bisect
#==============================================================================
class TestInsort(unittest.TestCase):
module = None
def test_vsBuiltinSort(self, n=500):
from random import choice
for insorted in (list(), UserList()):
for i in xrange(n):
digit = choice("0123456789")
if digit in "02468":
f = self.module.insort_left
else:
f = self.module.insort_right
f(insorted, digit)
self.assertEqual(sorted(insorted), insorted)
def test_backcompatibility(self):
self.assertEqual(self.module.insort, self.module.insort_right)
def test_listDerived(self):
class List(list):
data = []
def insert(self, index, item):
self.data.insert(index, item)
lst = List()
self.module.insort_left(lst, 10)
self.module.insort_right(lst, 5)
self.assertEqual([5, 10], lst.data)
class TestInsortPython(TestInsort):
module = py_bisect
class TestInsortC(TestInsort):
module = c_bisect
#==============================================================================
class LenOnly:
"Dummy sequence class defining __len__ but not __getitem__."
def __len__(self):
return 10
class GetOnly:
"Dummy sequence class defining __getitem__ but not __len__."
def __getitem__(self, ndx):
return 10
class CmpErr:
"Dummy element that always raises an error during comparison"
def __cmp__(self, other):
raise ZeroDivisionError
class TestErrorHandling(unittest.TestCase):
module = None
def test_non_sequence(self):
for f in (self.module.bisect_left, self.module.bisect_right,
self.module.insort_left, self.module.insort_right):
self.assertRaises(TypeError, f, 10, 10)
def test_len_only(self):
for f in (self.module.bisect_left, self.module.bisect_right,
self.module.insort_left, self.module.insort_right):
self.assertRaises(AttributeError, f, LenOnly(), 10)
def test_get_only(self):
for f in (self.module.bisect_left, self.module.bisect_right,
self.module.insort_left, self.module.insort_right):
self.assertRaises(AttributeError, f, GetOnly(), 10)
def test_cmp_err(self):
seq = [CmpErr(), CmpErr(), CmpErr()]
for f in (self.module.bisect_left, self.module.bisect_right,
self.module.insort_left, self.module.insort_right):
self.assertRaises(ZeroDivisionError, f, seq, 10)
def test_arg_parsing(self):
for f in (self.module.bisect_left, self.module.bisect_right,
self.module.insort_left, self.module.insort_right):
self.assertRaises(TypeError, f, 10)
class TestErrorHandlingPython(TestErrorHandling):
module = py_bisect
class TestErrorHandlingC(TestErrorHandling):
module = c_bisect
#==============================================================================
libreftest = """
Example from the Library Reference: Doc/library/bisect.rst
The bisect() function is generally useful for categorizing numeric data.
This example uses bisect() to look up a letter grade for an exam total
(say) based on a set of ordered numeric breakpoints: 85 and up is an `A',
75..84 is a `B', etc.
>>> grades = "FEDCBA"
>>> breakpoints = [30, 44, 66, 75, 85]
>>> from bisect import bisect
>>> def grade(total):
... return grades[bisect(breakpoints, total)]
...
>>> grade(66)
'C'
>>> map(grade, [33, 99, 77, 44, 12, 88])
['E', 'A', 'B', 'D', 'F', 'A']
"""
#------------------------------------------------------------------------------
__test__ = {'libreftest' : libreftest}
def test_main(verbose=None):
from test import test_bisect
test_classes = [TestBisectPython, TestBisectC,
TestInsortPython, TestInsortC,
TestErrorHandlingPython, TestErrorHandlingC]
test_support.run_unittest(*test_classes)
test_support.run_doctest(test_bisect, verbose)
# verify reference counting
if verbose and hasattr(sys, "gettotalrefcount"):
import gc
counts = [None] * 5
for i in xrange(len(counts)):
test_support.run_unittest(*test_classes)
gc.collect()
counts[i] = sys.gettotalrefcount()
print counts
if __name__ == "__main__":
test_main(verbose=True)
| mit | -8,524,976,102,129,539,000 | 40.14511 | 79 | 0.526949 | false |
40223151/2015cd_midterm | static/Brython3.1.1-20150328-091302/Lib/http/cookies.py | 735 | 20810 | #!/usr/bin/env python3
#
####
# Copyright 2000 by Timothy O'Malley <[email protected]>
#
# All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software
# and its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Timothy O'Malley not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# Timothy O'Malley DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
# SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS, IN NO EVENT SHALL Timothy O'Malley BE LIABLE FOR
# ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
#
####
#
# Id: Cookie.py,v 2.29 2000/08/23 05:28:49 timo Exp
# by Timothy O'Malley <[email protected]>
#
# Cookie.py is a Python module for the handling of HTTP
# cookies as a Python dictionary. See RFC 2109 for more
# information on cookies.
#
# The original idea to treat Cookies as a dictionary came from
# Dave Mitchell ([email protected]) in 1995, when he released the
# first version of nscookie.py.
#
####
r"""
Here's a sample session to show how to use this module.
At the moment, this is the only documentation.
The Basics
----------
Importing is easy...
>>> from http import cookies
Most of the time you start by creating a cookie.
>>> C = cookies.SimpleCookie()
Once you've created your Cookie, you can add values just as if it were
a dictionary.
>>> C = cookies.SimpleCookie()
>>> C["fig"] = "newton"
>>> C["sugar"] = "wafer"
>>> C.output()
'Set-Cookie: fig=newton\r\nSet-Cookie: sugar=wafer'
Notice that the printable representation of a Cookie is the
appropriate format for a Set-Cookie: header. This is the
default behavior. You can change the header and printed
attributes by using the .output() function
>>> C = cookies.SimpleCookie()
>>> C["rocky"] = "road"
>>> C["rocky"]["path"] = "/cookie"
>>> print(C.output(header="Cookie:"))
Cookie: rocky=road; Path=/cookie
>>> print(C.output(attrs=[], header="Cookie:"))
Cookie: rocky=road
The load() method of a Cookie extracts cookies from a string. In a
CGI script, you would use this method to extract the cookies from the
HTTP_COOKIE environment variable.
>>> C = cookies.SimpleCookie()
>>> C.load("chips=ahoy; vienna=finger")
>>> C.output()
'Set-Cookie: chips=ahoy\r\nSet-Cookie: vienna=finger'
The load() method is darn-tootin smart about identifying cookies
within a string. Escaped quotation marks, nested semicolons, and other
such trickeries do not confuse it.
>>> C = cookies.SimpleCookie()
>>> C.load('keebler="E=everybody; L=\\"Loves\\"; fudge=\\012;";')
>>> print(C)
Set-Cookie: keebler="E=everybody; L=\"Loves\"; fudge=\012;"
Each element of the Cookie also supports all of the RFC 2109
Cookie attributes. Here's an example which sets the Path
attribute.
>>> C = cookies.SimpleCookie()
>>> C["oreo"] = "doublestuff"
>>> C["oreo"]["path"] = "/"
>>> print(C)
Set-Cookie: oreo=doublestuff; Path=/
Each dictionary element has a 'value' attribute, which gives you
back the value associated with the key.
>>> C = cookies.SimpleCookie()
>>> C["twix"] = "none for you"
>>> C["twix"].value
'none for you'
The SimpleCookie expects that all values should be standard strings.
Just to be sure, SimpleCookie invokes the str() builtin to convert
the value to a string, when the values are set dictionary-style.
>>> C = cookies.SimpleCookie()
>>> C["number"] = 7
>>> C["string"] = "seven"
>>> C["number"].value
'7'
>>> C["string"].value
'seven'
>>> C.output()
'Set-Cookie: number=7\r\nSet-Cookie: string=seven'
Finis.
"""
#
# Import our required modules
#
import re
import string
__all__ = ["CookieError", "BaseCookie", "SimpleCookie"]
_nulljoin = ''.join
_semispacejoin = '; '.join
_spacejoin = ' '.join
#
# Define an exception visible to External modules
#
class CookieError(Exception):
pass
# These quoting routines conform to the RFC2109 specification, which in
# turn references the character definitions from RFC2068. They provide
# a two-way quoting algorithm. Any non-text character is translated
# into a 4 character sequence: a forward-slash followed by the
# three-digit octal equivalent of the character. Any '\' or '"' is
# quoted with a preceeding '\' slash.
#
# These are taken from RFC2068 and RFC2109.
# _LegalChars is the list of chars which don't require "'s
# _Translator hash-table for fast quoting
#
_LegalChars = string.ascii_letters + string.digits + "!#$%&'*+-.^_`|~:"
_Translator = {
'\000' : '\\000', '\001' : '\\001', '\002' : '\\002',
'\003' : '\\003', '\004' : '\\004', '\005' : '\\005',
'\006' : '\\006', '\007' : '\\007', '\010' : '\\010',
'\011' : '\\011', '\012' : '\\012', '\013' : '\\013',
'\014' : '\\014', '\015' : '\\015', '\016' : '\\016',
'\017' : '\\017', '\020' : '\\020', '\021' : '\\021',
'\022' : '\\022', '\023' : '\\023', '\024' : '\\024',
'\025' : '\\025', '\026' : '\\026', '\027' : '\\027',
'\030' : '\\030', '\031' : '\\031', '\032' : '\\032',
'\033' : '\\033', '\034' : '\\034', '\035' : '\\035',
'\036' : '\\036', '\037' : '\\037',
# Because of the way browsers really handle cookies (as opposed
# to what the RFC says) we also encode , and ;
',' : '\\054', ';' : '\\073',
'"' : '\\"', '\\' : '\\\\',
'\177' : '\\177', '\200' : '\\200', '\201' : '\\201',
'\202' : '\\202', '\203' : '\\203', '\204' : '\\204',
'\205' : '\\205', '\206' : '\\206', '\207' : '\\207',
'\210' : '\\210', '\211' : '\\211', '\212' : '\\212',
'\213' : '\\213', '\214' : '\\214', '\215' : '\\215',
'\216' : '\\216', '\217' : '\\217', '\220' : '\\220',
'\221' : '\\221', '\222' : '\\222', '\223' : '\\223',
'\224' : '\\224', '\225' : '\\225', '\226' : '\\226',
'\227' : '\\227', '\230' : '\\230', '\231' : '\\231',
'\232' : '\\232', '\233' : '\\233', '\234' : '\\234',
'\235' : '\\235', '\236' : '\\236', '\237' : '\\237',
'\240' : '\\240', '\241' : '\\241', '\242' : '\\242',
'\243' : '\\243', '\244' : '\\244', '\245' : '\\245',
'\246' : '\\246', '\247' : '\\247', '\250' : '\\250',
'\251' : '\\251', '\252' : '\\252', '\253' : '\\253',
'\254' : '\\254', '\255' : '\\255', '\256' : '\\256',
'\257' : '\\257', '\260' : '\\260', '\261' : '\\261',
'\262' : '\\262', '\263' : '\\263', '\264' : '\\264',
'\265' : '\\265', '\266' : '\\266', '\267' : '\\267',
'\270' : '\\270', '\271' : '\\271', '\272' : '\\272',
'\273' : '\\273', '\274' : '\\274', '\275' : '\\275',
'\276' : '\\276', '\277' : '\\277', '\300' : '\\300',
'\301' : '\\301', '\302' : '\\302', '\303' : '\\303',
'\304' : '\\304', '\305' : '\\305', '\306' : '\\306',
'\307' : '\\307', '\310' : '\\310', '\311' : '\\311',
'\312' : '\\312', '\313' : '\\313', '\314' : '\\314',
'\315' : '\\315', '\316' : '\\316', '\317' : '\\317',
'\320' : '\\320', '\321' : '\\321', '\322' : '\\322',
'\323' : '\\323', '\324' : '\\324', '\325' : '\\325',
'\326' : '\\326', '\327' : '\\327', '\330' : '\\330',
'\331' : '\\331', '\332' : '\\332', '\333' : '\\333',
'\334' : '\\334', '\335' : '\\335', '\336' : '\\336',
'\337' : '\\337', '\340' : '\\340', '\341' : '\\341',
'\342' : '\\342', '\343' : '\\343', '\344' : '\\344',
'\345' : '\\345', '\346' : '\\346', '\347' : '\\347',
'\350' : '\\350', '\351' : '\\351', '\352' : '\\352',
'\353' : '\\353', '\354' : '\\354', '\355' : '\\355',
'\356' : '\\356', '\357' : '\\357', '\360' : '\\360',
'\361' : '\\361', '\362' : '\\362', '\363' : '\\363',
'\364' : '\\364', '\365' : '\\365', '\366' : '\\366',
'\367' : '\\367', '\370' : '\\370', '\371' : '\\371',
'\372' : '\\372', '\373' : '\\373', '\374' : '\\374',
'\375' : '\\375', '\376' : '\\376', '\377' : '\\377'
}
def _quote(str, LegalChars=_LegalChars):
r"""Quote a string for use in a cookie header.
If the string does not need to be double-quoted, then just return the
string. Otherwise, surround the string in doublequotes and quote
(with a \) special characters.
"""
if all(c in LegalChars for c in str):
return str
else:
return '"' + _nulljoin(_Translator.get(s, s) for s in str) + '"'
_OctalPatt = re.compile(r"\\[0-3][0-7][0-7]")
_QuotePatt = re.compile(r"[\\].")
def _unquote(str):
# If there aren't any doublequotes,
# then there can't be any special characters. See RFC 2109.
if len(str) < 2:
return str
if str[0] != '"' or str[-1] != '"':
return str
# We have to assume that we must decode this string.
# Down to work.
# Remove the "s
str = str[1:-1]
# Check for special sequences. Examples:
# \012 --> \n
# \" --> "
#
i = 0
n = len(str)
res = []
while 0 <= i < n:
o_match = _OctalPatt.search(str, i)
q_match = _QuotePatt.search(str, i)
if not o_match and not q_match: # Neither matched
res.append(str[i:])
break
# else:
j = k = -1
if o_match:
j = o_match.start(0)
if q_match:
k = q_match.start(0)
if q_match and (not o_match or k < j): # QuotePatt matched
res.append(str[i:k])
res.append(str[k+1])
i = k + 2
else: # OctalPatt matched
res.append(str[i:j])
res.append(chr(int(str[j+1:j+4], 8)))
i = j + 4
return _nulljoin(res)
# The _getdate() routine is used to set the expiration time in the cookie's HTTP
# header. By default, _getdate() returns the current time in the appropriate
# "expires" format for a Set-Cookie header. The one optional argument is an
# offset from now, in seconds. For example, an offset of -3600 means "one hour
# ago". The offset may be a floating point number.
#
_weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
_monthname = [None,
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
def _getdate(future=0, weekdayname=_weekdayname, monthname=_monthname):
from time import gmtime, time
now = time()
year, month, day, hh, mm, ss, wd, y, z = gmtime(now + future)
return "%s, %02d %3s %4d %02d:%02d:%02d GMT" % \
(weekdayname[wd], day, monthname[month], year, hh, mm, ss)
class Morsel(dict):
"""A class to hold ONE (key, value) pair.
In a cookie, each such pair may have several attributes, so this class is
used to keep the attributes associated with the appropriate key,value pair.
This class also includes a coded_value attribute, which is used to hold
the network representation of the value. This is most useful when Python
objects are pickled for network transit.
"""
# RFC 2109 lists these attributes as reserved:
# path comment domain
# max-age secure version
#
# For historical reasons, these attributes are also reserved:
# expires
#
# This is an extension from Microsoft:
# httponly
#
# This dictionary provides a mapping from the lowercase
# variant on the left to the appropriate traditional
# formatting on the right.
_reserved = {
"expires" : "expires",
"path" : "Path",
"comment" : "Comment",
"domain" : "Domain",
"max-age" : "Max-Age",
"secure" : "secure",
"httponly" : "httponly",
"version" : "Version",
}
_flags = {'secure', 'httponly'}
def __init__(self):
# Set defaults
self.key = self.value = self.coded_value = None
# Set default attributes
for key in self._reserved:
dict.__setitem__(self, key, "")
def __setitem__(self, K, V):
K = K.lower()
if not K in self._reserved:
raise CookieError("Invalid Attribute %s" % K)
dict.__setitem__(self, K, V)
def isReservedKey(self, K):
return K.lower() in self._reserved
def set(self, key, val, coded_val, LegalChars=_LegalChars):
# First we verify that the key isn't a reserved word
# Second we make sure it only contains legal characters
if key.lower() in self._reserved:
raise CookieError("Attempt to set a reserved key: %s" % key)
if any(c not in LegalChars for c in key):
raise CookieError("Illegal key value: %s" % key)
# It's a good key, so save it.
self.key = key
self.value = val
self.coded_value = coded_val
def output(self, attrs=None, header="Set-Cookie:"):
return "%s %s" % (header, self.OutputString(attrs))
__str__ = output
def __repr__(self):
return '<%s: %s=%s>' % (self.__class__.__name__,
self.key, repr(self.value))
def js_output(self, attrs=None):
# Print javascript
return """
<script type="text/javascript">
<!-- begin hiding
document.cookie = \"%s\";
// end hiding -->
</script>
""" % (self.OutputString(attrs).replace('"', r'\"'))
def OutputString(self, attrs=None):
# Build up our result
#
result = []
append = result.append
# First, the key=value pair
append("%s=%s" % (self.key, self.coded_value))
# Now add any defined attributes
if attrs is None:
attrs = self._reserved
items = sorted(self.items())
for key, value in items:
if value == "":
continue
if key not in attrs:
continue
if key == "expires" and isinstance(value, int):
append("%s=%s" % (self._reserved[key], _getdate(value)))
elif key == "max-age" and isinstance(value, int):
append("%s=%d" % (self._reserved[key], value))
elif key == "secure":
append(str(self._reserved[key]))
elif key == "httponly":
append(str(self._reserved[key]))
else:
append("%s=%s" % (self._reserved[key], value))
# Return the result
return _semispacejoin(result)
#
# Pattern for finding cookie
#
# This used to be strict parsing based on the RFC2109 and RFC2068
# specifications. I have since discovered that MSIE 3.0x doesn't
# follow the character rules outlined in those specs. As a
# result, the parsing rules here are less strict.
#
_LegalCharsPatt = r"[\w\d!#%&'~_`><@,:/\$\*\+\-\.\^\|\)\(\?\}\{\=]"
_CookiePattern = re.compile(r"""
(?x) # This is a verbose pattern
(?P<key> # Start of group 'key'
""" + _LegalCharsPatt + r"""+? # Any word of at least one letter
) # End of group 'key'
( # Optional group: there may not be a value.
\s*=\s* # Equal Sign
(?P<val> # Start of group 'val'
"(?:[^\\"]|\\.)*" # Any doublequoted string
| # or
\w{3},\s[\w\d\s-]{9,11}\s[\d:]{8}\sGMT # Special case for "expires" attr
| # or
""" + _LegalCharsPatt + r"""* # Any word or empty string
) # End of group 'val'
)? # End of optional value group
\s* # Any number of spaces.
(\s+|;|$) # Ending either at space, semicolon, or EOS.
""", re.ASCII) # May be removed if safe.
# At long last, here is the cookie class. Using this class is almost just like
# using a dictionary. See this module's docstring for example usage.
#
class BaseCookie(dict):
"""A container class for a set of Morsels."""
def value_decode(self, val):
"""real_value, coded_value = value_decode(STRING)
Called prior to setting a cookie's value from the network
representation. The VALUE is the value read from HTTP
header.
Override this function to modify the behavior of cookies.
"""
return val, val
def value_encode(self, val):
"""real_value, coded_value = value_encode(VALUE)
Called prior to setting a cookie's value from the dictionary
representation. The VALUE is the value being assigned.
Override this function to modify the behavior of cookies.
"""
strval = str(val)
return strval, strval
def __init__(self, input=None):
if input:
self.load(input)
def __set(self, key, real_value, coded_value):
"""Private method for setting a cookie's value"""
M = self.get(key, Morsel())
M.set(key, real_value, coded_value)
dict.__setitem__(self, key, M)
def __setitem__(self, key, value):
"""Dictionary style assignment."""
rval, cval = self.value_encode(value)
self.__set(key, rval, cval)
def output(self, attrs=None, header="Set-Cookie:", sep="\015\012"):
"""Return a string suitable for HTTP."""
result = []
items = sorted(self.items())
for key, value in items:
result.append(value.output(attrs, header))
return sep.join(result)
__str__ = output
def __repr__(self):
l = []
items = sorted(self.items())
for key, value in items:
l.append('%s=%s' % (key, repr(value.value)))
return '<%s: %s>' % (self.__class__.__name__, _spacejoin(l))
def js_output(self, attrs=None):
"""Return a string suitable for JavaScript."""
result = []
items = sorted(self.items())
for key, value in items:
result.append(value.js_output(attrs))
return _nulljoin(result)
def load(self, rawdata):
"""Load cookies from a string (presumably HTTP_COOKIE) or
from a dictionary. Loading cookies from a dictionary 'd'
is equivalent to calling:
map(Cookie.__setitem__, d.keys(), d.values())
"""
if isinstance(rawdata, str):
self.__parse_string(rawdata)
else:
# self.update() wouldn't call our custom __setitem__
for key, value in rawdata.items():
self[key] = value
return
def __parse_string(self, str, patt=_CookiePattern):
i = 0 # Our starting point
n = len(str) # Length of string
M = None # current morsel
while 0 <= i < n:
# Start looking for a cookie
match = patt.search(str, i)
if not match:
# No more cookies
break
key, value = match.group("key"), match.group("val")
i = match.end(0)
# Parse the key, value in case it's metainfo
if key[0] == "$":
# We ignore attributes which pertain to the cookie
# mechanism as a whole. See RFC 2109.
# (Does anyone care?)
if M:
M[key[1:]] = value
elif key.lower() in Morsel._reserved:
if M:
if value is None:
if key.lower() in Morsel._flags:
M[key] = True
else:
M[key] = _unquote(value)
elif value is not None:
rval, cval = self.value_decode(value)
self.__set(key, rval, cval)
M = self[key]
class SimpleCookie(BaseCookie):
"""
SimpleCookie supports strings as cookie values. When setting
the value using the dictionary assignment notation, SimpleCookie
calls the builtin str() to convert the value to a string. Values
received from HTTP are kept as strings.
"""
def value_decode(self, val):
return _unquote(val), val
def value_encode(self, val):
strval = str(val)
return strval, _quote(strval)
| gpl-3.0 | -3,975,010,980,582,204,400 | 34.87931 | 80 | 0.533782 | false |
capoe/espressopp.soap | src/storage/DomainDecompositionNonBlocking.py | 2 | 2753 | # Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
*****************************************************
**espressopp.storage.DomainDecompositionNonBlocking**
*****************************************************
.. function:: espressopp.storage.DomainDecompositionNonBlocking(system, nodeGrid, cellGrid)
:param system:
:param nodeGrid:
:param cellGrid:
:type system:
:type nodeGrid:
:type cellGrid:
"""
from espressopp import pmi
from espressopp.esutil import cxxinit
from _espressopp import storage_DomainDecomposition
from _espressopp import storage_DomainDecompositionNonBlocking
from espressopp import Int3D, toInt3DFromVector
import mpi4py.MPI as MPI
#from espressopp.storage.Storage import *
from espressopp.storage.DomainDecomposition import *
class DomainDecompositionNonBlockingLocal(DomainDecompositionLocal, storage_DomainDecompositionNonBlocking):
def __init__(self, system, nodeGrid, cellGrid):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, storage_DomainDecompositionNonBlocking, system, nodeGrid, cellGrid)
if pmi.isController:
class DomainDecompositionNonBlocking(DomainDecomposition):
pmiproxydefs = dict(
cls = 'espressopp.storage.DomainDecompositionNonBlockingLocal'
)
def __init__(self, system,
nodeGrid='auto',
cellGrid='auto'):
if nodeGrid == 'auto':
nodeGrid = Int3D(system.comm.rank, 1, 1)
else:
nodeGrid = toInt3DFromVector(nodeGrid)
if cellGrid == 'auto':
# TODO: Implement
raise 'Automatic cell size calculation not yet implemented'
else:
cellGrid = toInt3DFromVector(cellGrid)
self.next_id = 0
self.pmiinit(system, nodeGrid, cellGrid)
| gpl-3.0 | 6,599,087,145,958,169,000 | 36.712329 | 112 | 0.666182 | false |
pothosware/gnuradio | gr-vocoder/examples/g723_40_audio_loopback.py | 58 | 1477 | #!/usr/bin/env python
#
# Copyright 2011 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import audio
from gnuradio import blocks
from gnuradio import vocoder
def build_graph():
tb = gr.top_block()
src = audio.source(8000)
src_scale = blocks.multiply_const_ff(32767)
f2s = blocks.float_to_short()
enc = vocoder.g723_40_encode_sb()
dec = vocoder.g723_40_decode_bs()
s2f = blocks.short_to_float()
sink_scale = blocks.multiply_const_ff(1.0/32767.)
sink = audio.sink(8000)
tb.connect(src, src_scale, f2s, enc, dec, s2f, sink_scale, sink)
return tb
if __name__ == '__main__':
tb = build_graph()
tb.start()
raw_input ('Press Enter to exit: ')
tb.stop()
tb.wait()
| gpl-3.0 | -694,967,656,096,583,600 | 31.108696 | 70 | 0.702099 | false |
Lineberty/kubernetes | cluster/juju/charms/trusty/kubernetes/unit_tests/lib/test_registrator.py | 232 | 2215 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from mock import MagicMock, patch, call
from path import Path
import pytest
import sys
d = Path('__file__').parent.abspath() / 'hooks'
sys.path.insert(0, d.abspath())
from lib.registrator import Registrator
class TestRegistrator():
def setup_method(self, method):
self.r = Registrator()
def test_data_type(self):
if type(self.r.data) is not dict:
pytest.fail("Invalid type")
@patch('json.loads')
@patch('httplib.HTTPConnection')
def test_register(self, httplibmock, jsonmock):
result = self.r.register('foo', 80, '/v1/test')
httplibmock.assert_called_with('foo', 80)
requestmock = httplibmock().request
requestmock.assert_called_with(
"POST", "/v1/test",
json.dumps(self.r.data),
{"Content-type": "application/json",
"Accept": "application/json"})
def test_command_succeeded(self):
response = MagicMock()
result = json.loads('{"status": "Failure", "kind": "Status", "code": 409, "apiVersion": "v1", "reason": "AlreadyExists", "details": {"kind": "node", "name": "10.200.147.200"}, "message": "node \\"10.200.147.200\\" already exists", "creationTimestamp": null}')
response.status = 200
self.r.command_succeeded(response, result)
response.status = 500
with pytest.raises(RuntimeError):
self.r.command_succeeded(response, result)
response.status = 409
with pytest.raises(ValueError):
self.r.command_succeeded(response, result)
| apache-2.0 | -5,280,946,304,947,545,000 | 35.311475 | 267 | 0.657788 | false |
aps-sids/ansible-modules-extras | system/svc.py | 83 | 9627 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, Brian Coca <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>
DOCUMENTATION = '''
---
module: svc
author: "Brian Coca (@bcoca)"
version_added:
short_description: Manage daemontools services.
description:
- Controls daemontools services on remote hosts using the svc utility.
options:
name:
required: true
description:
- Name of the service to manage.
state:
required: false
choices: [ started, stopped, restarted, reloaded, once ]
description:
- C(Started)/C(stopped) are idempotent actions that will not run
commands unless necessary. C(restarted) will always bounce the
svc (svc -t) and C(killed) will always bounce the svc (svc -k).
C(reloaded) will send a sigusr1 (svc -u).
C(once) will run a normally downed svc once (svc -o), not really
an idempotent operation.
downed:
required: false
choices: [ "yes", "no" ]
default: no
description:
- Should a 'down' file exist or not, if it exists it disables auto startup.
defaults to no. Downed does not imply stopped.
enabled:
required: false
choices: [ "yes", "no" ]
description:
- Wheater the service is enabled or not, if disabled it also implies stopped.
Make note that a service can be enabled and downed (no auto restart).
service_dir:
required: false
default: /service
description:
- directory svscan watches for services
service_src:
required: false
description:
- directory where services are defined, the source of symlinks to service_dir.
'''
EXAMPLES = '''
# Example action to start svc dnscache, if not running
- svc: name=dnscache state=started
# Example action to stop svc dnscache, if running
- svc: name=dnscache state=stopped
# Example action to kill svc dnscache, in all cases
- svc : name=dnscache state=killed
# Example action to restart svc dnscache, in all cases
- svc : name=dnscache state=restarted
# Example action to reload svc dnscache, in all cases
- svc: name=dnscache state=reloaded
# Example using alt svc directory location
- svc: name=dnscache state=reloaded service_dir=/var/service
'''
import platform
import shlex
def _load_dist_subclass(cls, *args, **kwargs):
'''
Used for derivative implementations
'''
subclass = None
distro = kwargs['module'].params['distro']
# get the most specific superclass for this platform
if distro is not None:
for sc in cls.__subclasses__():
if sc.distro is not None and sc.distro == distro:
subclass = sc
if subclass is None:
subclass = cls
return super(cls, subclass).__new__(subclass)
class Svc(object):
"""
Main class that handles daemontools, can be subclassed and overriden in case
we want to use a 'derivative' like encore, s6, etc
"""
#def __new__(cls, *args, **kwargs):
# return _load_dist_subclass(cls, args, kwargs)
def __init__(self, module):
self.extra_paths = [ '/command', '/usr/local/bin' ]
self.report_vars = ['state', 'enabled', 'downed', 'svc_full', 'src_full', 'pid', 'duration', 'full_state']
self.module = module
self.name = module.params['name']
self.service_dir = module.params['service_dir']
self.service_src = module.params['service_src']
self.enabled = None
self.downed = None
self.full_state = None
self.state = None
self.pid = None
self.duration = None
self.svc_cmd = module.get_bin_path('svc', opt_dirs=self.extra_paths)
self.svstat_cmd = module.get_bin_path('svstat', opt_dirs=self.extra_paths)
self.svc_full = '/'.join([ self.service_dir, self.name ])
self.src_full = '/'.join([ self.service_src, self.name ])
self.enabled = os.path.lexists(self.svc_full)
if self.enabled:
self.downed = os.path.lexists('%s/down' % self.svc_full)
self.get_status()
else:
self.downed = os.path.lexists('%s/down' % self.src_full)
self.state = 'stopped'
def enable(self):
if os.path.exists(self.src_full):
try:
os.symlink(self.src_full, self.svc_full)
except OSError, e:
self.module.fail_json(path=self.src_full, msg='Error while linking: %s' % str(e))
else:
self.module.fail_json(msg="Could not find source for service to enable (%s)." % self.src_full)
def disable(self):
try:
os.unlink(self.svc_full)
except OSError, e:
self.module.fail_json(path=self.svc_full, msg='Error while unlinking: %s' % str(e))
self.execute_command([self.svc_cmd,'-dx',self.src_full])
src_log = '%s/log' % self.src_full
if os.path.exists(src_log):
self.execute_command([self.svc_cmd,'-dx',src_log])
def get_status(self):
(rc, out, err) = self.execute_command([self.svstat_cmd, self.svc_full])
if err is not None and err:
self.full_state = self.state = err
else:
self.full_state = out
m = re.search('\(pid (\d+)\)', out)
if m:
self.pid = m.group(1)
m = re.search('(\d+) seconds', out)
if m:
self.duration = m.group(1)
if re.search(' up ', out):
self.state = 'start'
elif re.search(' down ', out):
self.state = 'stopp'
else:
self.state = 'unknown'
return
if re.search(' want ', out):
self.state += 'ing'
else:
self.state += 'ed'
def start(self):
return self.execute_command([self.svc_cmd, '-u', self.svc_full])
def stopp(self):
return self.stop()
def stop(self):
return self.execute_command([self.svc_cmd, '-d', self.svc_full])
def once(self):
return self.execute_command([self.svc_cmd, '-o', self.svc_full])
def reload(self):
return self.execute_command([self.svc_cmd, '-1', self.svc_full])
def restart(self):
return self.execute_command([self.svc_cmd, '-t', self.svc_full])
def kill(self):
return self.execute_command([self.svc_cmd, '-k', self.svc_full])
def execute_command(self, cmd):
try:
(rc, out, err) = self.module.run_command(' '.join(cmd))
except Exception, e:
self.module.fail_json(msg="failed to execute: %s" % str(e))
return (rc, out, err)
def report(self):
self.get_status()
states = {}
for k in self.report_vars:
states[k] = self.__dict__[k]
return states
# ===========================================
# Main control flow
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True),
state = dict(choices=['started', 'stopped', 'restarted', 'killed', 'reloaded', 'once']),
enabled = dict(required=False, type='bool', choices=BOOLEANS),
downed = dict(required=False, type='bool', choices=BOOLEANS),
dist = dict(required=False, default='daemontools'),
service_dir = dict(required=False, default='/service'),
service_src = dict(required=False, default='/etc/service'),
),
supports_check_mode=True,
)
state = module.params['state']
enabled = module.params['enabled']
downed = module.params['downed']
svc = Svc(module)
changed = False
orig_state = svc.report()
if enabled is not None and enabled != svc.enabled:
changed = True
if not module.check_mode:
try:
if enabled:
svc.enable()
else:
svc.disable()
except (OSError, IOError), e:
module.fail_json(msg="Could change service link: %s" % str(e))
if state is not None and state != svc.state:
changed = True
if not module.check_mode:
getattr(svc,state[:-2])()
if downed is not None and downed != svc.downed:
changed = True
if not module.check_mode:
d_file = "%s/down" % svc.svc_full
try:
if downed:
open(d_file, "a").close()
else:
os.unlink(d_file)
except (OSError, IOError), e:
module.fail_json(msg="Could change downed file: %s " % (str(e)))
module.exit_json(changed=changed, svc=svc.report())
# this is magic, not normal python include
from ansible.module_utils.basic import *
main()
| gpl-3.0 | 5,526,848,003,930,311,000 | 31.744898 | 114 | 0.575776 | false |
ArtsiomCh/tensorflow | tensorflow/contrib/data/python/kernel_tests/list_files_dataset_op_test.py | 51 | 5617 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from os import path
import shutil
import tempfile
from tensorflow.contrib.data.python.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
from tensorflow.python.util import compat
class ListFilesDatasetOpTest(test.TestCase):
def setUp(self):
self.tmp_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmp_dir, ignore_errors=True)
def _touchTempFiles(self, filenames):
for filename in filenames:
open(path.join(self.tmp_dir, filename), 'a').close()
def testEmptyDirectory(self):
dataset = dataset_ops.Dataset.list_files(path.join(self.tmp_dir, '*'))
with self.test_session() as sess:
itr = dataset.make_one_shot_iterator()
with self.assertRaises(errors.OutOfRangeError):
sess.run(itr.get_next())
def testSimpleDirectory(self):
filenames = ['a', 'b', 'c']
self._touchTempFiles(filenames)
dataset = dataset_ops.Dataset.list_files(path.join(self.tmp_dir, '*'))
with self.test_session() as sess:
itr = dataset.make_one_shot_iterator()
full_filenames = []
produced_filenames = []
for filename in filenames:
full_filenames.append(
compat.as_bytes(path.join(self.tmp_dir, filename)))
produced_filenames.append(compat.as_bytes(sess.run(itr.get_next())))
self.assertItemsEqual(full_filenames, produced_filenames)
with self.assertRaises(errors.OutOfRangeError):
sess.run(itr.get_next())
def testEmptyDirectoryInitializer(self):
filename_placeholder = array_ops.placeholder(dtypes.string, shape=[])
dataset = dataset_ops.Dataset.list_files(filename_placeholder)
with self.test_session() as sess:
itr = dataset.make_initializable_iterator()
sess.run(
itr.initializer,
feed_dict={filename_placeholder: path.join(self.tmp_dir, '*')})
with self.assertRaises(errors.OutOfRangeError):
sess.run(itr.get_next())
def testSimpleDirectoryInitializer(self):
filenames = ['a', 'b', 'c']
self._touchTempFiles(filenames)
filename_placeholder = array_ops.placeholder(dtypes.string, shape=[])
dataset = dataset_ops.Dataset.list_files(filename_placeholder)
with self.test_session() as sess:
itr = dataset.make_initializable_iterator()
sess.run(
itr.initializer,
feed_dict={filename_placeholder: path.join(self.tmp_dir, '*')})
full_filenames = []
produced_filenames = []
for filename in filenames:
full_filenames.append(
compat.as_bytes(path.join(self.tmp_dir, filename)))
produced_filenames.append(compat.as_bytes(sess.run(itr.get_next())))
self.assertItemsEqual(full_filenames, produced_filenames)
with self.assertRaises(errors.OutOfRangeError):
sess.run(itr.get_next())
def testFileSuffixes(self):
filenames = ['a.txt', 'b.py', 'c.py', 'd.pyc']
self._touchTempFiles(filenames)
filename_placeholder = array_ops.placeholder(dtypes.string, shape=[])
dataset = dataset_ops.Dataset.list_files(filename_placeholder)
with self.test_session() as sess:
itr = dataset.make_initializable_iterator()
sess.run(
itr.initializer,
feed_dict={filename_placeholder: path.join(self.tmp_dir, '*.py')})
full_filenames = []
produced_filenames = []
for filename in filenames[1:-1]:
full_filenames.append(
compat.as_bytes(path.join(self.tmp_dir, filename)))
produced_filenames.append(compat.as_bytes(sess.run(itr.get_next())))
self.assertItemsEqual(full_filenames, produced_filenames)
with self.assertRaises(errors.OutOfRangeError):
sess.run(itr.get_next())
def testFileMiddles(self):
filenames = ['a.txt', 'b.py', 'c.pyc']
self._touchTempFiles(filenames)
filename_placeholder = array_ops.placeholder(dtypes.string, shape=[])
dataset = dataset_ops.Dataset.list_files(filename_placeholder)
with self.test_session() as sess:
itr = dataset.make_initializable_iterator()
sess.run(
itr.initializer,
feed_dict={filename_placeholder: path.join(self.tmp_dir, '*.py*')})
full_filenames = []
produced_filenames = []
for filename in filenames[1:]:
full_filenames.append(
compat.as_bytes(path.join(self.tmp_dir, filename)))
produced_filenames.append(compat.as_bytes(sess.run(itr.get_next())))
self.assertItemsEqual(full_filenames, produced_filenames)
with self.assertRaises(errors.OutOfRangeError):
sess.run(itr.get_next())
if __name__ == '__main__':
test.main()
| apache-2.0 | -3,929,717,069,308,672,000 | 34.327044 | 80 | 0.676696 | false |
RasPlex/plex-home-theatre | plex/Third-Party/gtest/scripts/upload.py | 2511 | 51024 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool for uploading diffs from a version control system to the codereview app.
Usage summary: upload.py [options] [-- diff_options]
Diff options are passed to the diff command of the underlying system.
Supported version control systems:
Git
Mercurial
Subversion
It is important for Git/Mercurial users to specify a tree/node/branch to diff
against by using the '--rev' option.
"""
# This code is derived from appcfg.py in the App Engine SDK (open source),
# and from ASPN recipe #146306.
import cookielib
import getpass
import logging
import md5
import mimetypes
import optparse
import os
import re
import socket
import subprocess
import sys
import urllib
import urllib2
import urlparse
try:
import readline
except ImportError:
pass
# The logging verbosity:
# 0: Errors only.
# 1: Status messages.
# 2: Info logs.
# 3: Debug logs.
verbosity = 1
# Max size of patch or base file.
MAX_UPLOAD_SIZE = 900 * 1024
def GetEmail(prompt):
"""Prompts the user for their email address and returns it.
The last used email address is saved to a file and offered up as a suggestion
to the user. If the user presses enter without typing in anything the last
used email address is used. If the user enters a new address, it is saved
for next time we prompt.
"""
last_email_file_name = os.path.expanduser("~/.last_codereview_email_address")
last_email = ""
if os.path.exists(last_email_file_name):
try:
last_email_file = open(last_email_file_name, "r")
last_email = last_email_file.readline().strip("\n")
last_email_file.close()
prompt += " [%s]" % last_email
except IOError, e:
pass
email = raw_input(prompt + ": ").strip()
if email:
try:
last_email_file = open(last_email_file_name, "w")
last_email_file.write(email)
last_email_file.close()
except IOError, e:
pass
else:
email = last_email
return email
def StatusUpdate(msg):
"""Print a status message to stdout.
If 'verbosity' is greater than 0, print the message.
Args:
msg: The string to print.
"""
if verbosity > 0:
print msg
def ErrorExit(msg):
"""Print an error message to stderr and exit."""
print >>sys.stderr, msg
sys.exit(1)
class ClientLoginError(urllib2.HTTPError):
"""Raised to indicate there was an error authenticating with ClientLogin."""
def __init__(self, url, code, msg, headers, args):
urllib2.HTTPError.__init__(self, url, code, msg, headers, None)
self.args = args
self.reason = args["Error"]
class AbstractRpcServer(object):
"""Provides a common interface for a simple RPC server."""
def __init__(self, host, auth_function, host_override=None, extra_headers={},
save_cookies=False):
"""Creates a new HttpRpcServer.
Args:
host: The host to send requests to.
auth_function: A function that takes no arguments and returns an
(email, password) tuple when called. Will be called if authentication
is required.
host_override: The host header to send to the server (defaults to host).
extra_headers: A dict of extra headers to append to every request.
save_cookies: If True, save the authentication cookies to local disk.
If False, use an in-memory cookiejar instead. Subclasses must
implement this functionality. Defaults to False.
"""
self.host = host
self.host_override = host_override
self.auth_function = auth_function
self.authenticated = False
self.extra_headers = extra_headers
self.save_cookies = save_cookies
self.opener = self._GetOpener()
if self.host_override:
logging.info("Server: %s; Host: %s", self.host, self.host_override)
else:
logging.info("Server: %s", self.host)
def _GetOpener(self):
"""Returns an OpenerDirector for making HTTP requests.
Returns:
A urllib2.OpenerDirector object.
"""
raise NotImplementedError()
def _CreateRequest(self, url, data=None):
"""Creates a new urllib request."""
logging.debug("Creating request for: '%s' with payload:\n%s", url, data)
req = urllib2.Request(url, data=data)
if self.host_override:
req.add_header("Host", self.host_override)
for key, value in self.extra_headers.iteritems():
req.add_header(key, value)
return req
def _GetAuthToken(self, email, password):
"""Uses ClientLogin to authenticate the user, returning an auth token.
Args:
email: The user's email address
password: The user's password
Raises:
ClientLoginError: If there was an error authenticating with ClientLogin.
HTTPError: If there was some other form of HTTP error.
Returns:
The authentication token returned by ClientLogin.
"""
account_type = "GOOGLE"
if self.host.endswith(".google.com"):
# Needed for use inside Google.
account_type = "HOSTED"
req = self._CreateRequest(
url="https://www.google.com/accounts/ClientLogin",
data=urllib.urlencode({
"Email": email,
"Passwd": password,
"service": "ah",
"source": "rietveld-codereview-upload",
"accountType": account_type,
}),
)
try:
response = self.opener.open(req)
response_body = response.read()
response_dict = dict(x.split("=")
for x in response_body.split("\n") if x)
return response_dict["Auth"]
except urllib2.HTTPError, e:
if e.code == 403:
body = e.read()
response_dict = dict(x.split("=", 1) for x in body.split("\n") if x)
raise ClientLoginError(req.get_full_url(), e.code, e.msg,
e.headers, response_dict)
else:
raise
def _GetAuthCookie(self, auth_token):
"""Fetches authentication cookies for an authentication token.
Args:
auth_token: The authentication token returned by ClientLogin.
Raises:
HTTPError: If there was an error fetching the authentication cookies.
"""
# This is a dummy value to allow us to identify when we're successful.
continue_location = "http://localhost/"
args = {"continue": continue_location, "auth": auth_token}
req = self._CreateRequest("http://%s/_ah/login?%s" %
(self.host, urllib.urlencode(args)))
try:
response = self.opener.open(req)
except urllib2.HTTPError, e:
response = e
if (response.code != 302 or
response.info()["location"] != continue_location):
raise urllib2.HTTPError(req.get_full_url(), response.code, response.msg,
response.headers, response.fp)
self.authenticated = True
def _Authenticate(self):
"""Authenticates the user.
The authentication process works as follows:
1) We get a username and password from the user
2) We use ClientLogin to obtain an AUTH token for the user
(see http://code.google.com/apis/accounts/AuthForInstalledApps.html).
3) We pass the auth token to /_ah/login on the server to obtain an
authentication cookie. If login was successful, it tries to redirect
us to the URL we provided.
If we attempt to access the upload API without first obtaining an
authentication cookie, it returns a 401 response and directs us to
authenticate ourselves with ClientLogin.
"""
for i in range(3):
credentials = self.auth_function()
try:
auth_token = self._GetAuthToken(credentials[0], credentials[1])
except ClientLoginError, e:
if e.reason == "BadAuthentication":
print >>sys.stderr, "Invalid username or password."
continue
if e.reason == "CaptchaRequired":
print >>sys.stderr, (
"Please go to\n"
"https://www.google.com/accounts/DisplayUnlockCaptcha\n"
"and verify you are a human. Then try again.")
break
if e.reason == "NotVerified":
print >>sys.stderr, "Account not verified."
break
if e.reason == "TermsNotAgreed":
print >>sys.stderr, "User has not agreed to TOS."
break
if e.reason == "AccountDeleted":
print >>sys.stderr, "The user account has been deleted."
break
if e.reason == "AccountDisabled":
print >>sys.stderr, "The user account has been disabled."
break
if e.reason == "ServiceDisabled":
print >>sys.stderr, ("The user's access to the service has been "
"disabled.")
break
if e.reason == "ServiceUnavailable":
print >>sys.stderr, "The service is not available; try again later."
break
raise
self._GetAuthCookie(auth_token)
return
def Send(self, request_path, payload=None,
content_type="application/octet-stream",
timeout=None,
**kwargs):
"""Sends an RPC and returns the response.
Args:
request_path: The path to send the request to, eg /api/appversion/create.
payload: The body of the request, or None to send an empty request.
content_type: The Content-Type header to use.
timeout: timeout in seconds; default None i.e. no timeout.
(Note: for large requests on OS X, the timeout doesn't work right.)
kwargs: Any keyword arguments are converted into query string parameters.
Returns:
The response body, as a string.
"""
# TODO: Don't require authentication. Let the server say
# whether it is necessary.
if not self.authenticated:
self._Authenticate()
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
tries = 0
while True:
tries += 1
args = dict(kwargs)
url = "http://%s%s" % (self.host, request_path)
if args:
url += "?" + urllib.urlencode(args)
req = self._CreateRequest(url=url, data=payload)
req.add_header("Content-Type", content_type)
try:
f = self.opener.open(req)
response = f.read()
f.close()
return response
except urllib2.HTTPError, e:
if tries > 3:
raise
elif e.code == 401:
self._Authenticate()
## elif e.code >= 500 and e.code < 600:
## # Server Error - try again.
## continue
else:
raise
finally:
socket.setdefaulttimeout(old_timeout)
class HttpRpcServer(AbstractRpcServer):
"""Provides a simplified RPC-style interface for HTTP requests."""
def _Authenticate(self):
"""Save the cookie jar after authentication."""
super(HttpRpcServer, self)._Authenticate()
if self.save_cookies:
StatusUpdate("Saving authentication cookies to %s" % self.cookie_file)
self.cookie_jar.save()
def _GetOpener(self):
"""Returns an OpenerDirector that supports cookies and ignores redirects.
Returns:
A urllib2.OpenerDirector object.
"""
opener = urllib2.OpenerDirector()
opener.add_handler(urllib2.ProxyHandler())
opener.add_handler(urllib2.UnknownHandler())
opener.add_handler(urllib2.HTTPHandler())
opener.add_handler(urllib2.HTTPDefaultErrorHandler())
opener.add_handler(urllib2.HTTPSHandler())
opener.add_handler(urllib2.HTTPErrorProcessor())
if self.save_cookies:
self.cookie_file = os.path.expanduser("~/.codereview_upload_cookies")
self.cookie_jar = cookielib.MozillaCookieJar(self.cookie_file)
if os.path.exists(self.cookie_file):
try:
self.cookie_jar.load()
self.authenticated = True
StatusUpdate("Loaded authentication cookies from %s" %
self.cookie_file)
except (cookielib.LoadError, IOError):
# Failed to load cookies - just ignore them.
pass
else:
# Create an empty cookie file with mode 600
fd = os.open(self.cookie_file, os.O_CREAT, 0600)
os.close(fd)
# Always chmod the cookie file
os.chmod(self.cookie_file, 0600)
else:
# Don't save cookies across runs of update.py.
self.cookie_jar = cookielib.CookieJar()
opener.add_handler(urllib2.HTTPCookieProcessor(self.cookie_jar))
return opener
parser = optparse.OptionParser(usage="%prog [options] [-- diff_options]")
parser.add_option("-y", "--assume_yes", action="store_true",
dest="assume_yes", default=False,
help="Assume that the answer to yes/no questions is 'yes'.")
# Logging
group = parser.add_option_group("Logging options")
group.add_option("-q", "--quiet", action="store_const", const=0,
dest="verbose", help="Print errors only.")
group.add_option("-v", "--verbose", action="store_const", const=2,
dest="verbose", default=1,
help="Print info level logs (default).")
group.add_option("--noisy", action="store_const", const=3,
dest="verbose", help="Print all logs.")
# Review server
group = parser.add_option_group("Review server options")
group.add_option("-s", "--server", action="store", dest="server",
default="codereview.appspot.com",
metavar="SERVER",
help=("The server to upload to. The format is host[:port]. "
"Defaults to 'codereview.appspot.com'."))
group.add_option("-e", "--email", action="store", dest="email",
metavar="EMAIL", default=None,
help="The username to use. Will prompt if omitted.")
group.add_option("-H", "--host", action="store", dest="host",
metavar="HOST", default=None,
help="Overrides the Host header sent with all RPCs.")
group.add_option("--no_cookies", action="store_false",
dest="save_cookies", default=True,
help="Do not save authentication cookies to local disk.")
# Issue
group = parser.add_option_group("Issue options")
group.add_option("-d", "--description", action="store", dest="description",
metavar="DESCRIPTION", default=None,
help="Optional description when creating an issue.")
group.add_option("-f", "--description_file", action="store",
dest="description_file", metavar="DESCRIPTION_FILE",
default=None,
help="Optional path of a file that contains "
"the description when creating an issue.")
group.add_option("-r", "--reviewers", action="store", dest="reviewers",
metavar="REVIEWERS", default=None,
help="Add reviewers (comma separated email addresses).")
group.add_option("--cc", action="store", dest="cc",
metavar="CC", default=None,
help="Add CC (comma separated email addresses).")
# Upload options
group = parser.add_option_group("Patch options")
group.add_option("-m", "--message", action="store", dest="message",
metavar="MESSAGE", default=None,
help="A message to identify the patch. "
"Will prompt if omitted.")
group.add_option("-i", "--issue", type="int", action="store",
metavar="ISSUE", default=None,
help="Issue number to which to add. Defaults to new issue.")
group.add_option("--download_base", action="store_true",
dest="download_base", default=False,
help="Base files will be downloaded by the server "
"(side-by-side diffs may not work on files with CRs).")
group.add_option("--rev", action="store", dest="revision",
metavar="REV", default=None,
help="Branch/tree/revision to diff against (used by DVCS).")
group.add_option("--send_mail", action="store_true",
dest="send_mail", default=False,
help="Send notification email to reviewers.")
def GetRpcServer(options):
"""Returns an instance of an AbstractRpcServer.
Returns:
A new AbstractRpcServer, on which RPC calls can be made.
"""
rpc_server_class = HttpRpcServer
def GetUserCredentials():
"""Prompts the user for a username and password."""
email = options.email
if email is None:
email = GetEmail("Email (login for uploading to %s)" % options.server)
password = getpass.getpass("Password for %s: " % email)
return (email, password)
# If this is the dev_appserver, use fake authentication.
host = (options.host or options.server).lower()
if host == "localhost" or host.startswith("localhost:"):
email = options.email
if email is None:
email = "[email protected]"
logging.info("Using debug user %s. Override with --email" % email)
server = rpc_server_class(
options.server,
lambda: (email, "password"),
host_override=options.host,
extra_headers={"Cookie":
'dev_appserver_login="%s:False"' % email},
save_cookies=options.save_cookies)
# Don't try to talk to ClientLogin.
server.authenticated = True
return server
return rpc_server_class(options.server, GetUserCredentials,
host_override=options.host,
save_cookies=options.save_cookies)
def EncodeMultipartFormData(fields, files):
"""Encode form fields for multipart/form-data.
Args:
fields: A sequence of (name, value) elements for regular form fields.
files: A sequence of (name, filename, value) elements for data to be
uploaded as files.
Returns:
(content_type, body) ready for httplib.HTTP instance.
Source:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306
"""
BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-'
CRLF = '\r\n'
lines = []
for (key, value) in fields:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"' % key)
lines.append('')
lines.append(value)
for (key, filename, value) in files:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"; filename="%s"' %
(key, filename))
lines.append('Content-Type: %s' % GetContentType(filename))
lines.append('')
lines.append(value)
lines.append('--' + BOUNDARY + '--')
lines.append('')
body = CRLF.join(lines)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
def GetContentType(filename):
"""Helper to guess the content-type from the filename."""
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
# Use a shell for subcommands on Windows to get a PATH search.
use_shell = sys.platform.startswith("win")
def RunShellWithReturnCode(command, print_output=False,
universal_newlines=True):
"""Executes a command and returns the output from stdout and the return code.
Args:
command: Command to execute.
print_output: If True, the output is printed to stdout.
If False, both stdout and stderr are ignored.
universal_newlines: Use universal_newlines flag (default: True).
Returns:
Tuple (output, return code)
"""
logging.info("Running %s", command)
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=use_shell, universal_newlines=universal_newlines)
if print_output:
output_array = []
while True:
line = p.stdout.readline()
if not line:
break
print line.strip("\n")
output_array.append(line)
output = "".join(output_array)
else:
output = p.stdout.read()
p.wait()
errout = p.stderr.read()
if print_output and errout:
print >>sys.stderr, errout
p.stdout.close()
p.stderr.close()
return output, p.returncode
def RunShell(command, silent_ok=False, universal_newlines=True,
print_output=False):
data, retcode = RunShellWithReturnCode(command, print_output,
universal_newlines)
if retcode:
ErrorExit("Got error status from %s:\n%s" % (command, data))
if not silent_ok and not data:
ErrorExit("No output from %s" % command)
return data
class VersionControlSystem(object):
"""Abstract base class providing an interface to the VCS."""
def __init__(self, options):
"""Constructor.
Args:
options: Command line options.
"""
self.options = options
def GenerateDiff(self, args):
"""Return the current diff as a string.
Args:
args: Extra arguments to pass to the diff command.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def CheckForUnknownFiles(self):
"""Show an "are you sure?" prompt if there are unknown files."""
unknown_files = self.GetUnknownFiles()
if unknown_files:
print "The following files are not added to version control:"
for line in unknown_files:
print line
prompt = "Are you sure to continue?(y/N) "
answer = raw_input(prompt).strip()
if answer != "y":
ErrorExit("User aborted")
def GetBaseFile(self, filename):
"""Get the content of the upstream version of a file.
Returns:
A tuple (base_content, new_content, is_binary, status)
base_content: The contents of the base file.
new_content: For text files, this is empty. For binary files, this is
the contents of the new file, since the diff output won't contain
information to reconstruct the current file.
is_binary: True iff the file is binary.
status: The status of the file.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetBaseFiles(self, diff):
"""Helper that calls GetBase file for each file in the patch.
Returns:
A dictionary that maps from filename to GetBaseFile's tuple. Filenames
are retrieved based on lines that start with "Index:" or
"Property changes on:".
"""
files = {}
for line in diff.splitlines(True):
if line.startswith('Index:') or line.startswith('Property changes on:'):
unused, filename = line.split(':', 1)
# On Windows if a file has property changes its filename uses '\'
# instead of '/'.
filename = filename.strip().replace('\\', '/')
files[filename] = self.GetBaseFile(filename)
return files
def UploadBaseFiles(self, issue, rpc_server, patch_list, patchset, options,
files):
"""Uploads the base files (and if necessary, the current ones as well)."""
def UploadFile(filename, file_id, content, is_binary, status, is_base):
"""Uploads a file to the server."""
file_too_large = False
if is_base:
type = "base"
else:
type = "current"
if len(content) > MAX_UPLOAD_SIZE:
print ("Not uploading the %s file for %s because it's too large." %
(type, filename))
file_too_large = True
content = ""
checksum = md5.new(content).hexdigest()
if options.verbose > 0 and not file_too_large:
print "Uploading %s file for %s" % (type, filename)
url = "/%d/upload_content/%d/%d" % (int(issue), int(patchset), file_id)
form_fields = [("filename", filename),
("status", status),
("checksum", checksum),
("is_binary", str(is_binary)),
("is_current", str(not is_base)),
]
if file_too_large:
form_fields.append(("file_too_large", "1"))
if options.email:
form_fields.append(("user", options.email))
ctype, body = EncodeMultipartFormData(form_fields,
[("data", filename, content)])
response_body = rpc_server.Send(url, body,
content_type=ctype)
if not response_body.startswith("OK"):
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
patches = dict()
[patches.setdefault(v, k) for k, v in patch_list]
for filename in patches.keys():
base_content, new_content, is_binary, status = files[filename]
file_id_str = patches.get(filename)
if file_id_str.find("nobase") != -1:
base_content = None
file_id_str = file_id_str[file_id_str.rfind("_") + 1:]
file_id = int(file_id_str)
if base_content != None:
UploadFile(filename, file_id, base_content, is_binary, status, True)
if new_content != None:
UploadFile(filename, file_id, new_content, is_binary, status, False)
def IsImage(self, filename):
"""Returns true if the filename has an image extension."""
mimetype = mimetypes.guess_type(filename)[0]
if not mimetype:
return False
return mimetype.startswith("image/")
class SubversionVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Subversion."""
def __init__(self, options):
super(SubversionVCS, self).__init__(options)
if self.options.revision:
match = re.match(r"(\d+)(:(\d+))?", self.options.revision)
if not match:
ErrorExit("Invalid Subversion revision %s." % self.options.revision)
self.rev_start = match.group(1)
self.rev_end = match.group(3)
else:
self.rev_start = self.rev_end = None
# Cache output from "svn list -r REVNO dirname".
# Keys: dirname, Values: 2-tuple (ouput for start rev and end rev).
self.svnls_cache = {}
# SVN base URL is required to fetch files deleted in an older revision.
# Result is cached to not guess it over and over again in GetBaseFile().
required = self.options.download_base or self.options.revision is not None
self.svn_base = self._GuessBase(required)
def GuessBase(self, required):
"""Wrapper for _GuessBase."""
return self.svn_base
def _GuessBase(self, required):
"""Returns the SVN base URL.
Args:
required: If true, exits if the url can't be guessed, otherwise None is
returned.
"""
info = RunShell(["svn", "info"])
for line in info.splitlines():
words = line.split()
if len(words) == 2 and words[0] == "URL:":
url = words[1]
scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)
username, netloc = urllib.splituser(netloc)
if username:
logging.info("Removed username from base URL")
if netloc.endswith("svn.python.org"):
if netloc == "svn.python.org":
if path.startswith("/projects/"):
path = path[9:]
elif netloc != "[email protected]":
ErrorExit("Unrecognized Python URL: %s" % url)
base = "http://svn.python.org/view/*checkout*%s/" % path
logging.info("Guessed Python base = %s", base)
elif netloc.endswith("svn.collab.net"):
if path.startswith("/repos/"):
path = path[6:]
base = "http://svn.collab.net/viewvc/*checkout*%s/" % path
logging.info("Guessed CollabNet base = %s", base)
elif netloc.endswith(".googlecode.com"):
path = path + "/"
base = urlparse.urlunparse(("http", netloc, path, params,
query, fragment))
logging.info("Guessed Google Code base = %s", base)
else:
path = path + "/"
base = urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
logging.info("Guessed base = %s", base)
return base
if required:
ErrorExit("Can't find URL in output from svn info")
return None
def GenerateDiff(self, args):
cmd = ["svn", "diff"]
if self.options.revision:
cmd += ["-r", self.options.revision]
cmd.extend(args)
data = RunShell(cmd)
count = 0
for line in data.splitlines():
if line.startswith("Index:") or line.startswith("Property changes on:"):
count += 1
logging.info(line)
if not count:
ErrorExit("No valid patches found in output from svn diff")
return data
def _CollapseKeywords(self, content, keyword_str):
"""Collapses SVN keywords."""
# svn cat translates keywords but svn diff doesn't. As a result of this
# behavior patching.PatchChunks() fails with a chunk mismatch error.
# This part was originally written by the Review Board development team
# who had the same problem (http://reviews.review-board.org/r/276/).
# Mapping of keywords to known aliases
svn_keywords = {
# Standard keywords
'Date': ['Date', 'LastChangedDate'],
'Revision': ['Revision', 'LastChangedRevision', 'Rev'],
'Author': ['Author', 'LastChangedBy'],
'HeadURL': ['HeadURL', 'URL'],
'Id': ['Id'],
# Aliases
'LastChangedDate': ['LastChangedDate', 'Date'],
'LastChangedRevision': ['LastChangedRevision', 'Rev', 'Revision'],
'LastChangedBy': ['LastChangedBy', 'Author'],
'URL': ['URL', 'HeadURL'],
}
def repl(m):
if m.group(2):
return "$%s::%s$" % (m.group(1), " " * len(m.group(3)))
return "$%s$" % m.group(1)
keywords = [keyword
for name in keyword_str.split(" ")
for keyword in svn_keywords.get(name, [])]
return re.sub(r"\$(%s):(:?)([^\$]+)\$" % '|'.join(keywords), repl, content)
def GetUnknownFiles(self):
status = RunShell(["svn", "status", "--ignore-externals"], silent_ok=True)
unknown_files = []
for line in status.split("\n"):
if line and line[0] == "?":
unknown_files.append(line)
return unknown_files
def ReadFile(self, filename):
"""Returns the contents of a file."""
file = open(filename, 'rb')
result = ""
try:
result = file.read()
finally:
file.close()
return result
def GetStatus(self, filename):
"""Returns the status of a file."""
if not self.options.revision:
status = RunShell(["svn", "status", "--ignore-externals", filename])
if not status:
ErrorExit("svn status returned no output for %s" % filename)
status_lines = status.splitlines()
# If file is in a cl, the output will begin with
# "\n--- Changelist 'cl_name':\n". See
# http://svn.collab.net/repos/svn/trunk/notes/changelist-design.txt
if (len(status_lines) == 3 and
not status_lines[0] and
status_lines[1].startswith("--- Changelist")):
status = status_lines[2]
else:
status = status_lines[0]
# If we have a revision to diff against we need to run "svn list"
# for the old and the new revision and compare the results to get
# the correct status for a file.
else:
dirname, relfilename = os.path.split(filename)
if dirname not in self.svnls_cache:
cmd = ["svn", "list", "-r", self.rev_start, dirname or "."]
out, returncode = RunShellWithReturnCode(cmd)
if returncode:
ErrorExit("Failed to get status for %s." % filename)
old_files = out.splitlines()
args = ["svn", "list"]
if self.rev_end:
args += ["-r", self.rev_end]
cmd = args + [dirname or "."]
out, returncode = RunShellWithReturnCode(cmd)
if returncode:
ErrorExit("Failed to run command %s" % cmd)
self.svnls_cache[dirname] = (old_files, out.splitlines())
old_files, new_files = self.svnls_cache[dirname]
if relfilename in old_files and relfilename not in new_files:
status = "D "
elif relfilename in old_files and relfilename in new_files:
status = "M "
else:
status = "A "
return status
def GetBaseFile(self, filename):
status = self.GetStatus(filename)
base_content = None
new_content = None
# If a file is copied its status will be "A +", which signifies
# "addition-with-history". See "svn st" for more information. We need to
# upload the original file or else diff parsing will fail if the file was
# edited.
if status[0] == "A" and status[3] != "+":
# We'll need to upload the new content if we're adding a binary file
# since diff's output won't contain it.
mimetype = RunShell(["svn", "propget", "svn:mime-type", filename],
silent_ok=True)
base_content = ""
is_binary = mimetype and not mimetype.startswith("text/")
if is_binary and self.IsImage(filename):
new_content = self.ReadFile(filename)
elif (status[0] in ("M", "D", "R") or
(status[0] == "A" and status[3] == "+") or # Copied file.
(status[0] == " " and status[1] == "M")): # Property change.
args = []
if self.options.revision:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
# Don't change filename, it's needed later.
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:mime-type", url]
mimetype, returncode = RunShellWithReturnCode(cmd)
if returncode:
# File does not exist in the requested revision.
# Reset mimetype, it contains an error message.
mimetype = ""
get_base = False
is_binary = mimetype and not mimetype.startswith("text/")
if status[0] == " ":
# Empty base content just to force an upload.
base_content = ""
elif is_binary:
if self.IsImage(filename):
get_base = True
if status[0] == "M":
if not self.rev_end:
new_content = self.ReadFile(filename)
else:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_end)
new_content = RunShell(["svn", "cat", url],
universal_newlines=True, silent_ok=True)
else:
base_content = ""
else:
get_base = True
if get_base:
if is_binary:
universal_newlines = False
else:
universal_newlines = True
if self.rev_start:
# "svn cat -r REV delete_file.txt" doesn't work. cat requires
# the full URL with "@REV" appended instead of using "-r" option.
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
base_content = RunShell(["svn", "cat", url],
universal_newlines=universal_newlines,
silent_ok=True)
else:
base_content = RunShell(["svn", "cat", filename],
universal_newlines=universal_newlines,
silent_ok=True)
if not is_binary:
args = []
if self.rev_start:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:keywords", url]
keywords, returncode = RunShellWithReturnCode(cmd)
if keywords and not returncode:
base_content = self._CollapseKeywords(base_content, keywords)
else:
StatusUpdate("svn status returned unexpected output: %s" % status)
sys.exit(1)
return base_content, new_content, is_binary, status[0:5]
class GitVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Git."""
def __init__(self, options):
super(GitVCS, self).__init__(options)
# Map of filename -> hash of base file.
self.base_hashes = {}
def GenerateDiff(self, extra_args):
# This is more complicated than svn's GenerateDiff because we must convert
# the diff output to include an svn-style "Index:" line as well as record
# the hashes of the base files, so we can upload them along with our diff.
if self.options.revision:
extra_args = [self.options.revision] + extra_args
gitdiff = RunShell(["git", "diff", "--full-index"] + extra_args)
svndiff = []
filecount = 0
filename = None
for line in gitdiff.splitlines():
match = re.match(r"diff --git a/(.*) b/.*$", line)
if match:
filecount += 1
filename = match.group(1)
svndiff.append("Index: %s\n" % filename)
else:
# The "index" line in a git diff looks like this (long hashes elided):
# index 82c0d44..b2cee3f 100755
# We want to save the left hash, as that identifies the base file.
match = re.match(r"index (\w+)\.\.", line)
if match:
self.base_hashes[filename] = match.group(1)
svndiff.append(line + "\n")
if not filecount:
ErrorExit("No valid patches found in output from git diff")
return "".join(svndiff)
def GetUnknownFiles(self):
status = RunShell(["git", "ls-files", "--exclude-standard", "--others"],
silent_ok=True)
return status.splitlines()
def GetBaseFile(self, filename):
hash = self.base_hashes[filename]
base_content = None
new_content = None
is_binary = False
if hash == "0" * 40: # All-zero hash indicates no base file.
status = "A"
base_content = ""
else:
status = "M"
base_content, returncode = RunShellWithReturnCode(["git", "show", hash])
if returncode:
ErrorExit("Got error status from 'git show %s'" % hash)
return (base_content, new_content, is_binary, status)
class MercurialVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Mercurial."""
def __init__(self, options, repo_dir):
super(MercurialVCS, self).__init__(options)
# Absolute path to repository (we can be in a subdir)
self.repo_dir = os.path.normpath(repo_dir)
# Compute the subdir
cwd = os.path.normpath(os.getcwd())
assert cwd.startswith(self.repo_dir)
self.subdir = cwd[len(self.repo_dir):].lstrip(r"\/")
if self.options.revision:
self.base_rev = self.options.revision
else:
self.base_rev = RunShell(["hg", "parent", "-q"]).split(':')[1].strip()
def _GetRelPath(self, filename):
"""Get relative path of a file according to the current directory,
given its logical path in the repo."""
assert filename.startswith(self.subdir), filename
return filename[len(self.subdir):].lstrip(r"\/")
def GenerateDiff(self, extra_args):
# If no file specified, restrict to the current subdir
extra_args = extra_args or ["."]
cmd = ["hg", "diff", "--git", "-r", self.base_rev] + extra_args
data = RunShell(cmd, silent_ok=True)
svndiff = []
filecount = 0
for line in data.splitlines():
m = re.match("diff --git a/(\S+) b/(\S+)", line)
if m:
# Modify line to make it look like as it comes from svn diff.
# With this modification no changes on the server side are required
# to make upload.py work with Mercurial repos.
# NOTE: for proper handling of moved/copied files, we have to use
# the second filename.
filename = m.group(2)
svndiff.append("Index: %s" % filename)
svndiff.append("=" * 67)
filecount += 1
logging.info(line)
else:
svndiff.append(line)
if not filecount:
ErrorExit("No valid patches found in output from hg diff")
return "\n".join(svndiff) + "\n"
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
args = []
status = RunShell(["hg", "status", "--rev", self.base_rev, "-u", "."],
silent_ok=True)
unknown_files = []
for line in status.splitlines():
st, fn = line.split(" ", 1)
if st == "?":
unknown_files.append(fn)
return unknown_files
def GetBaseFile(self, filename):
# "hg status" and "hg cat" both take a path relative to the current subdir
# rather than to the repo root, but "hg diff" has given us the full path
# to the repo root.
base_content = ""
new_content = None
is_binary = False
oldrelpath = relpath = self._GetRelPath(filename)
# "hg status -C" returns two lines for moved/copied files, one otherwise
out = RunShell(["hg", "status", "-C", "--rev", self.base_rev, relpath])
out = out.splitlines()
# HACK: strip error message about missing file/directory if it isn't in
# the working copy
if out[0].startswith('%s: ' % relpath):
out = out[1:]
if len(out) > 1:
# Moved/copied => considered as modified, use old filename to
# retrieve base contents
oldrelpath = out[1].strip()
status = "M"
else:
status, _ = out[0].split(' ', 1)
if status != "A":
base_content = RunShell(["hg", "cat", "-r", self.base_rev, oldrelpath],
silent_ok=True)
is_binary = "\0" in base_content # Mercurial's heuristic
if status != "R":
new_content = open(relpath, "rb").read()
is_binary = is_binary or "\0" in new_content
if is_binary and base_content:
# Fetch again without converting newlines
base_content = RunShell(["hg", "cat", "-r", self.base_rev, oldrelpath],
silent_ok=True, universal_newlines=False)
if not is_binary or not self.IsImage(relpath):
new_content = None
return base_content, new_content, is_binary, status
# NOTE: The SplitPatch function is duplicated in engine.py, keep them in sync.
def SplitPatch(data):
"""Splits a patch into separate pieces for each file.
Args:
data: A string containing the output of svn diff.
Returns:
A list of 2-tuple (filename, text) where text is the svn diff output
pertaining to filename.
"""
patches = []
filename = None
diff = []
for line in data.splitlines(True):
new_filename = None
if line.startswith('Index:'):
unused, new_filename = line.split(':', 1)
new_filename = new_filename.strip()
elif line.startswith('Property changes on:'):
unused, temp_filename = line.split(':', 1)
# When a file is modified, paths use '/' between directories, however
# when a property is modified '\' is used on Windows. Make them the same
# otherwise the file shows up twice.
temp_filename = temp_filename.strip().replace('\\', '/')
if temp_filename != filename:
# File has property changes but no modifications, create a new diff.
new_filename = temp_filename
if new_filename:
if filename and diff:
patches.append((filename, ''.join(diff)))
filename = new_filename
diff = [line]
continue
if diff is not None:
diff.append(line)
if filename and diff:
patches.append((filename, ''.join(diff)))
return patches
def UploadSeparatePatches(issue, rpc_server, patchset, data, options):
"""Uploads a separate patch for each file in the diff output.
Returns a list of [patch_key, filename] for each file.
"""
patches = SplitPatch(data)
rv = []
for patch in patches:
if len(patch[1]) > MAX_UPLOAD_SIZE:
print ("Not uploading the patch for " + patch[0] +
" because the file is too large.")
continue
form_fields = [("filename", patch[0])]
if not options.download_base:
form_fields.append(("content_upload", "1"))
files = [("data", "data.diff", patch[1])]
ctype, body = EncodeMultipartFormData(form_fields, files)
url = "/%d/upload_patch/%d" % (int(issue), int(patchset))
print "Uploading patch for " + patch[0]
response_body = rpc_server.Send(url, body, content_type=ctype)
lines = response_body.splitlines()
if not lines or lines[0] != "OK":
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
rv.append([lines[1], patch[0]])
return rv
def GuessVCS(options):
"""Helper to guess the version control system.
This examines the current directory, guesses which VersionControlSystem
we're using, and returns an instance of the appropriate class. Exit with an
error if we can't figure it out.
Returns:
A VersionControlSystem instance. Exits if the VCS can't be guessed.
"""
# Mercurial has a command to get the base directory of a repository
# Try running it, but don't die if we don't have hg installed.
# NOTE: we try Mercurial first as it can sit on top of an SVN working copy.
try:
out, returncode = RunShellWithReturnCode(["hg", "root"])
if returncode == 0:
return MercurialVCS(options, out.strip())
except OSError, (errno, message):
if errno != 2: # ENOENT -- they don't have hg installed.
raise
# Subversion has a .svn in all working directories.
if os.path.isdir('.svn'):
logging.info("Guessed VCS = Subversion")
return SubversionVCS(options)
# Git has a command to test if you're in a git tree.
# Try running it, but don't die if we don't have git installed.
try:
out, returncode = RunShellWithReturnCode(["git", "rev-parse",
"--is-inside-work-tree"])
if returncode == 0:
return GitVCS(options)
except OSError, (errno, message):
if errno != 2: # ENOENT -- they don't have git installed.
raise
ErrorExit(("Could not guess version control system. "
"Are you in a working copy directory?"))
def RealMain(argv, data=None):
"""The real main function.
Args:
argv: Command line arguments.
data: Diff contents. If None (default) the diff is generated by
the VersionControlSystem implementation returned by GuessVCS().
Returns:
A 2-tuple (issue id, patchset id).
The patchset id is None if the base files are not uploaded by this
script (applies only to SVN checkouts).
"""
logging.basicConfig(format=("%(asctime).19s %(levelname)s %(filename)s:"
"%(lineno)s %(message)s "))
os.environ['LC_ALL'] = 'C'
options, args = parser.parse_args(argv[1:])
global verbosity
verbosity = options.verbose
if verbosity >= 3:
logging.getLogger().setLevel(logging.DEBUG)
elif verbosity >= 2:
logging.getLogger().setLevel(logging.INFO)
vcs = GuessVCS(options)
if isinstance(vcs, SubversionVCS):
# base field is only allowed for Subversion.
# Note: Fetching base files may become deprecated in future releases.
base = vcs.GuessBase(options.download_base)
else:
base = None
if not base and options.download_base:
options.download_base = True
logging.info("Enabled upload of base file")
if not options.assume_yes:
vcs.CheckForUnknownFiles()
if data is None:
data = vcs.GenerateDiff(args)
files = vcs.GetBaseFiles(data)
if verbosity >= 1:
print "Upload server:", options.server, "(change with -s/--server)"
if options.issue:
prompt = "Message describing this patch set: "
else:
prompt = "New issue subject: "
message = options.message or raw_input(prompt).strip()
if not message:
ErrorExit("A non-empty message is required")
rpc_server = GetRpcServer(options)
form_fields = [("subject", message)]
if base:
form_fields.append(("base", base))
if options.issue:
form_fields.append(("issue", str(options.issue)))
if options.email:
form_fields.append(("user", options.email))
if options.reviewers:
for reviewer in options.reviewers.split(','):
if "@" in reviewer and not reviewer.split("@")[1].count(".") == 1:
ErrorExit("Invalid email address: %s" % reviewer)
form_fields.append(("reviewers", options.reviewers))
if options.cc:
for cc in options.cc.split(','):
if "@" in cc and not cc.split("@")[1].count(".") == 1:
ErrorExit("Invalid email address: %s" % cc)
form_fields.append(("cc", options.cc))
description = options.description
if options.description_file:
if options.description:
ErrorExit("Can't specify description and description_file")
file = open(options.description_file, 'r')
description = file.read()
file.close()
if description:
form_fields.append(("description", description))
# Send a hash of all the base file so the server can determine if a copy
# already exists in an earlier patchset.
base_hashes = ""
for file, info in files.iteritems():
if not info[0] is None:
checksum = md5.new(info[0]).hexdigest()
if base_hashes:
base_hashes += "|"
base_hashes += checksum + ":" + file
form_fields.append(("base_hashes", base_hashes))
# If we're uploading base files, don't send the email before the uploads, so
# that it contains the file status.
if options.send_mail and options.download_base:
form_fields.append(("send_mail", "1"))
if not options.download_base:
form_fields.append(("content_upload", "1"))
if len(data) > MAX_UPLOAD_SIZE:
print "Patch is large, so uploading file patches separately."
uploaded_diff_file = []
form_fields.append(("separate_patches", "1"))
else:
uploaded_diff_file = [("data", "data.diff", data)]
ctype, body = EncodeMultipartFormData(form_fields, uploaded_diff_file)
response_body = rpc_server.Send("/upload", body, content_type=ctype)
patchset = None
if not options.download_base or not uploaded_diff_file:
lines = response_body.splitlines()
if len(lines) >= 2:
msg = lines[0]
patchset = lines[1].strip()
patches = [x.split(" ", 1) for x in lines[2:]]
else:
msg = response_body
else:
msg = response_body
StatusUpdate(msg)
if not response_body.startswith("Issue created.") and \
not response_body.startswith("Issue updated."):
sys.exit(0)
issue = msg[msg.rfind("/")+1:]
if not uploaded_diff_file:
result = UploadSeparatePatches(issue, rpc_server, patchset, data, options)
if not options.download_base:
patches = result
if not options.download_base:
vcs.UploadBaseFiles(issue, rpc_server, patches, patchset, options, files)
if options.send_mail:
rpc_server.Send("/" + issue + "/mail", payload="")
return issue, patchset
def main():
try:
RealMain(sys.argv)
except KeyboardInterrupt:
print
StatusUpdate("Interrupted.")
sys.exit(1)
if __name__ == "__main__":
main()
| gpl-2.0 | 3,042,281,864,596,984,000 | 35.787311 | 80 | 0.619728 | false |
MattDevo/edk2 | AppPkg/Applications/Python/Python-2.7.2/Tools/bgen/bgen/macsupport.py | 6 | 7828 | """\
Augment the "bgen" package with definitions that are useful on the Apple Macintosh.
Intended usage is "from macsupport import *" -- this implies all bgen's goodies.
"""
# Import everything from bgen (for ourselves as well as for re-export)
from bgen import *
# Simple types
Boolean = Type("Boolean", "b")
SignedByte = Type("SignedByte", "b")
Size = Type("Size", "l")
Style = Type("Style", "b")
StyleParameter = Type("StyleParameter", "h")
CharParameter = Type("CharParameter", "h")
TextEncoding = Type("TextEncoding", "l")
ByteCount = Type("ByteCount", "l")
Duration = Type("Duration", "l")
ByteOffset = Type("ByteOffset", "l")
OptionBits = Type("OptionBits", "l")
ItemCount = Type("ItemCount", "l")
PBVersion = Type("PBVersion", "l")
ScriptCode = Type("ScriptCode", "h")
LangCode = Type("LangCode", "h")
RegionCode = Type("RegionCode", "h")
UInt8 = Type("UInt8", "b")
SInt8 = Type("SInt8", "b")
UInt16 = Type("UInt16", "H")
SInt16 = Type("SInt16", "h")
UInt32 = Type("UInt32", "l")
SInt32 = Type("SInt32", "l")
Float32 = Type("Float32", "f")
wide = OpaqueByValueType("wide", "PyMac_Buildwide", "PyMac_Getwide")
wide_ptr = OpaqueType("wide", "PyMac_Buildwide", "PyMac_Getwide")
# Pascal strings
ConstStr255Param = OpaqueArrayType("Str255", "PyMac_BuildStr255", "PyMac_GetStr255")
Str255 = OpaqueArrayType("Str255", "PyMac_BuildStr255", "PyMac_GetStr255")
StringPtr = OpaqueByValueType("StringPtr", "PyMac_BuildStr255", "PyMac_GetStr255")
ConstStringPtr = StringPtr
# File System Specifications
FSSpec_ptr = OpaqueType("FSSpec", "PyMac_BuildFSSpec", "PyMac_GetFSSpec")
FSSpec = OpaqueByValueStructType("FSSpec", "PyMac_BuildFSSpec", "PyMac_GetFSSpec")
FSRef_ptr = OpaqueType("FSRef", "PyMac_BuildFSRef", "PyMac_GetFSRef")
FSRef = OpaqueByValueStructType("FSRef", "PyMac_BuildFSRef", "PyMac_GetFSRef")
# OSType and ResType: 4-byte character strings
def OSTypeType(typename):
return OpaqueByValueType(typename, "PyMac_BuildOSType", "PyMac_GetOSType")
OSType = OSTypeType("OSType")
ResType = OSTypeType("ResType")
FourCharCode = OSTypeType("FourCharCode")
# Version numbers
NumVersion = OpaqueByValueType("NumVersion", "PyMac_BuildNumVersion", "BUG")
# Handles (always resources in our case)
Handle = OpaqueByValueType("Handle", "ResObj")
MenuHandle = OpaqueByValueType("MenuHandle", "MenuObj")
MenuRef = MenuHandle
ControlHandle = OpaqueByValueType("ControlHandle", "CtlObj")
ControlRef = ControlHandle
# Windows and Dialogs
WindowPtr = OpaqueByValueType("WindowPtr", "WinObj")
WindowRef = WindowPtr
DialogPtr = OpaqueByValueType("DialogPtr", "DlgObj")
DialogRef = DialogPtr
ExistingWindowPtr = OpaqueByValueType("WindowPtr", "WinObj_WhichWindow", "BUG")
ExistingDialogPtr = OpaqueByValueType("DialogPtr", "DlgObj_WhichDialog", "BUG")
# NULL pointer passed in as optional storage -- not present in Python version
NullStorage = FakeType("(void *)0")
# More standard datatypes
Fixed = OpaqueByValueType("Fixed", "PyMac_BuildFixed", "PyMac_GetFixed")
# Quickdraw data types
Rect = Rect_ptr = OpaqueType("Rect", "PyMac_BuildRect", "PyMac_GetRect")
Point = OpaqueByValueType("Point", "PyMac_BuildPoint", "PyMac_GetPoint")
Point_ptr = OpaqueType("Point", "PyMac_BuildPoint", "PyMac_GetPoint")
# Event records
EventRecord = OpaqueType("EventRecord", "PyMac_BuildEventRecord", "PyMac_GetEventRecord")
EventRecord_ptr = EventRecord
# CoreFoundation datatypes
CFTypeRef = OpaqueByValueType("CFTypeRef", "CFTypeRefObj")
CFStringRef = OpaqueByValueType("CFStringRef", "CFStringRefObj")
CFMutableStringRef = OpaqueByValueType("CFMutableStringRef", "CFMutableStringRefObj")
CFArrayRef = OpaqueByValueType("CFArrayRef", "CFArrayRefObj")
CFMutableArrayRef = OpaqueByValueType("CFMutableArrayRef", "CFMutableArrayRefObj")
CFDictionaryRef = OpaqueByValueType("CFDictionaryRef", "CFDictionaryRefObj")
CFMutableDictionaryRef = OpaqueByValueType("CFMutableDictionaryRef", "CFMutableDictionaryRefObj")
CFURLRef = OpaqueByValueType("CFURLRef", "CFURLRefObj")
OptionalCFURLRef = OpaqueByValueType("CFURLRef", "OptionalCFURLRefObj")
# OSErr is special because it is turned into an exception
# (Could do this with less code using a variant of mkvalue("O&")?)
class OSErrType(Type):
def errorCheck(self, name):
Output("if (%s != noErr) return PyMac_Error(%s);", name, name)
self.used = 1
OSErr = OSErrType("OSErr", 'h')
OSStatus = OSErrType("OSStatus", 'l')
# Various buffer types
InBuffer = VarInputBufferType('char', 'long', 'l') # (buf, len)
UcharInBuffer = VarInputBufferType('unsigned char', 'long', 'l') # (buf, len)
OptionalInBuffer = OptionalVarInputBufferType('char', 'long', 'l') # (buf, len)
InOutBuffer = HeapInputOutputBufferType('char', 'long', 'l') # (inbuf, outbuf, len)
VarInOutBuffer = VarHeapInputOutputBufferType('char', 'long', 'l') # (inbuf, outbuf, &len)
OutBuffer = HeapOutputBufferType('char', 'long', 'l') # (buf, len)
VarOutBuffer = VarHeapOutputBufferType('char', 'long', 'l') # (buf, &len)
VarVarOutBuffer = VarVarHeapOutputBufferType('char', 'long', 'l') # (buf, len, &len)
# Unicode arguments sometimes have reversed len, buffer (don't understand why Apple did this...)
class VarUnicodeInputBufferType(VarInputBufferType):
def getargsFormat(self):
return "u#"
class VarUnicodeReverseInputBufferType(ReverseInputBufferMixin, VarUnicodeInputBufferType):
pass
UnicodeInBuffer = VarUnicodeInputBufferType('UniChar', 'UniCharCount', 'l')
UnicodeReverseInBuffer = VarUnicodeReverseInputBufferType('UniChar', 'UniCharCount', 'l')
UniChar_ptr = InputOnlyType("UniCharPtr", "u")
# Predefine various pieces of program text to be passed to Module() later:
# Stuff added immediately after the system include files
includestuff = """
#include "pymactoolbox.h"
/* Macro to test whether a weak-loaded CFM function exists */
#define PyMac_PRECHECK(rtn) do { if ( &rtn == NULL ) {\\
PyErr_SetString(PyExc_NotImplementedError, \\
"Not available in this shared library/OS version"); \\
return NULL; \\
}} while(0)
"""
# Stuff added just before the module's init function
finalstuff = """
"""
# Stuff added inside the module's init function
initstuff = """
"""
# Generator classes with a twist -- if the function returns OSErr,
# its mode is manipulated so that it turns into an exception or disappears
# (and its name is changed to _err, for documentation purposes).
# This requires that the OSErr type (defined above) has a non-trivial
# errorCheck method.
class OSErrMixIn:
"Mix-in class to treat OSErr/OSStatus return values special"
def makereturnvar(self):
if self.returntype.__class__ == OSErrType:
return Variable(self.returntype, "_err", ErrorMode)
else:
return Variable(self.returntype, "_rv", OutMode)
class OSErrFunctionGenerator(OSErrMixIn, FunctionGenerator): pass
class OSErrMethodGenerator(OSErrMixIn, MethodGenerator): pass
class WeakLinkMixIn:
"Mix-in to test the function actually exists (!= NULL) before calling"
def precheck(self):
Output('#ifndef %s', self.name)
Output('PyMac_PRECHECK(%s);', self.name)
Output('#endif')
class WeakLinkFunctionGenerator(WeakLinkMixIn, FunctionGenerator): pass
class WeakLinkMethodGenerator(WeakLinkMixIn, MethodGenerator): pass
class OSErrWeakLinkFunctionGenerator(OSErrMixIn, WeakLinkMixIn, FunctionGenerator): pass
class OSErrWeakLinkMethodGenerator(OSErrMixIn, WeakLinkMixIn, MethodGenerator): pass
class MacModule(Module):
"Subclass which gets the exception initializer from macglue.c"
def exceptionInitializer(self):
return "PyMac_GetOSErrException()"
| bsd-2-clause | 9,070,596,239,462,261,000 | 37.736041 | 97 | 0.716275 | false |
BernhardDenner/libelektra | src/bindings/swig/python/tests/test_kdb.py | 2 | 1614 | import kdb, unittest
TEST_NS = "user/tests/swig_py3"
class Constants(unittest.TestCase):
def setUp(self):
pass
def test_kdbconfig_h(self):
self.assertIsInstance(kdb.DB_SYSTEM, str)
self.assertIsInstance(kdb.DB_USER, str)
self.assertIsInstance(kdb.DB_HOME, str)
self.assertIsInstance(kdb.DEBUG, int)
def test_kdb_h(self):
self.assertIsInstance(kdb.VERSION, str)
self.assertIsInstance(kdb.VERSION_MAJOR, int)
self.assertIsInstance(kdb.VERSION_MINOR, int)
self.assertIsInstance(kdb.VERSION_MICRO, int)
self.assertIsNone(kdb.KS_END)
class KDB(unittest.TestCase):
def test_ctor(self):
self.assertIsInstance(kdb.KDB(), kdb.KDB)
error = kdb.Key()
self.assertIsInstance(kdb.KDB(error), kdb.KDB)
def test_get(self):
with kdb.KDB() as db:
ks = kdb.KeySet()
db.get(ks, "system/elektra")
import os
if os.getenv("CHECK_VERSION") is None:
key = ks["system/elektra/version/constants/KDB_VERSION"]
self.assertEqual(key.value, kdb.VERSION)
def test_set(self):
with kdb.KDB() as db:
ks = kdb.KeySet(100)
db.get(ks, TEST_NS)
try:
key = ks[TEST_NS + "/mykey"]
except KeyError:
key = kdb.Key(TEST_NS + "/mykey")
ks.append(key)
key.value = "new_value"
db.set(ks, TEST_NS)
with kdb.KDB() as db:
ks = kdb.KeySet(100)
db.get(ks, TEST_NS)
self.assertEqual(ks[TEST_NS + "/mykey"].value, "new_value")
@classmethod
def tearDownClass(cls):
# cleanup
with kdb.KDB() as db:
ks = kdb.KeySet(100)
db.get(ks, TEST_NS)
ks.cut(kdb.Key(TEST_NS))
db.set(ks, TEST_NS)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | 3,109,756,360,589,687,000 | 23.089552 | 62 | 0.664188 | false |
xodus7/tensorflow | tensorflow/contrib/distributions/python/ops/bijectors/inline.py | 32 | 6367 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Inline bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops.distributions import bijector
from tensorflow.python.util import deprecation
__all__ = [
"Inline",
]
class Inline(bijector.Bijector):
"""Bijector constructed from custom callables.
Example Use:
```python
exp = Inline(
forward_fn=tf.exp,
inverse_fn=tf.log,
inverse_log_det_jacobian_fn=(
lambda y: -tf.reduce_sum(tf.log(y), axis=-1)),
name="exp")
```
The above example is equivalent to the `Bijector` `Exp()`.
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
forward_fn=None,
inverse_fn=None,
inverse_log_det_jacobian_fn=None,
forward_log_det_jacobian_fn=None,
forward_event_shape_fn=None,
forward_event_shape_tensor_fn=None,
inverse_event_shape_fn=None,
inverse_event_shape_tensor_fn=None,
is_constant_jacobian=False,
validate_args=False,
forward_min_event_ndims=None,
inverse_min_event_ndims=None,
name="inline"):
"""Creates a `Bijector` from callables.
Args:
forward_fn: Python callable implementing the forward transformation.
inverse_fn: Python callable implementing the inverse transformation.
inverse_log_det_jacobian_fn: Python callable implementing the
log o det o jacobian of the inverse transformation.
forward_log_det_jacobian_fn: Python callable implementing the
log o det o jacobian of the forward transformation.
forward_event_shape_fn: Python callable implementing non-identical
static event shape changes. Default: shape is assumed unchanged.
forward_event_shape_tensor_fn: Python callable implementing non-identical
event shape changes. Default: shape is assumed unchanged.
inverse_event_shape_fn: Python callable implementing non-identical
static event shape changes. Default: shape is assumed unchanged.
inverse_event_shape_tensor_fn: Python callable implementing non-identical
event shape changes. Default: shape is assumed unchanged.
is_constant_jacobian: Python `bool` indicating that the Jacobian is
constant for all input arguments.
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
forward_min_event_ndims: Python `int` indicating the minimal
dimensionality this bijector acts on.
inverse_min_event_ndims: Python `int` indicating the minimal
dimensionality this bijector acts on.
name: Python `str`, name given to ops managed by this object.
"""
super(Inline, self).__init__(
forward_min_event_ndims=forward_min_event_ndims,
inverse_min_event_ndims=inverse_min_event_ndims,
is_constant_jacobian=is_constant_jacobian,
validate_args=validate_args,
name=name)
self._forward_fn = forward_fn
self._inverse_fn = inverse_fn
self._inverse_log_det_jacobian_fn = inverse_log_det_jacobian_fn
self._forward_log_det_jacobian_fn = forward_log_det_jacobian_fn
self._forward_event_shape_fn = forward_event_shape_fn
self._forward_event_shape_tensor_fn = forward_event_shape_tensor_fn
self._inverse_event_shape_fn = inverse_event_shape_fn
self._inverse_event_shape_tensor_fn = inverse_event_shape_tensor_fn
def _forward_event_shape(self, input_shape):
if self._forward_event_shape_fn is None:
# By default assume shape doesn't change.
return input_shape
return self._forward_event_shape_fn(input_shape)
def _forward_event_shape_tensor(self, input_shape):
if self._forward_event_shape_tensor_fn is None:
# By default assume shape doesn't change.
return input_shape
return self._forward_event_shape_tensor_fn(input_shape)
def _inverse_event_shape(self, output_shape):
if self._inverse_event_shape_fn is None:
# By default assume shape doesn't change.
return output_shape
return self._inverse_event_shape_fn(output_shape)
def _inverse_event_shape_tensor(self, output_shape):
if self._inverse_event_shape_tensor_fn is None:
# By default assume shape doesn't change.
return output_shape
return self._inverse_event_shape_tensor_fn(output_shape)
def _forward(self, x, **kwargs):
if not callable(self._forward_fn):
raise NotImplementedError(
"forward_fn is not a callable function.")
return self._forward_fn(x, **kwargs)
def _inverse(self, y, **kwargs):
if not callable(self._inverse_fn):
raise NotImplementedError(
"inverse_fn is not a callable function.")
return self._inverse_fn(y, **kwargs)
def _inverse_log_det_jacobian(self, y, **kwargs):
if not callable(self._inverse_log_det_jacobian_fn):
raise NotImplementedError(
"inverse_log_det_jacobian_fn is not a callable function.")
return self._inverse_log_det_jacobian_fn(y, **kwargs)
def _forward_log_det_jacobian(self, x, **kwargs):
if not callable(self._forward_log_det_jacobian_fn):
raise NotImplementedError(
"forward_log_det_jacobian_fn is not a callable function.")
return self._forward_log_det_jacobian_fn(x, **kwargs)
| apache-2.0 | -2,422,619,218,310,176,000 | 39.55414 | 80 | 0.678499 | false |
Elandril/SickRage | lib/unidecode/x0b4.py | 253 | 4762 | data = (
'dwaen', # 0x00
'dwaenj', # 0x01
'dwaenh', # 0x02
'dwaed', # 0x03
'dwael', # 0x04
'dwaelg', # 0x05
'dwaelm', # 0x06
'dwaelb', # 0x07
'dwaels', # 0x08
'dwaelt', # 0x09
'dwaelp', # 0x0a
'dwaelh', # 0x0b
'dwaem', # 0x0c
'dwaeb', # 0x0d
'dwaebs', # 0x0e
'dwaes', # 0x0f
'dwaess', # 0x10
'dwaeng', # 0x11
'dwaej', # 0x12
'dwaec', # 0x13
'dwaek', # 0x14
'dwaet', # 0x15
'dwaep', # 0x16
'dwaeh', # 0x17
'doe', # 0x18
'doeg', # 0x19
'doegg', # 0x1a
'doegs', # 0x1b
'doen', # 0x1c
'doenj', # 0x1d
'doenh', # 0x1e
'doed', # 0x1f
'doel', # 0x20
'doelg', # 0x21
'doelm', # 0x22
'doelb', # 0x23
'doels', # 0x24
'doelt', # 0x25
'doelp', # 0x26
'doelh', # 0x27
'doem', # 0x28
'doeb', # 0x29
'doebs', # 0x2a
'does', # 0x2b
'doess', # 0x2c
'doeng', # 0x2d
'doej', # 0x2e
'doec', # 0x2f
'doek', # 0x30
'doet', # 0x31
'doep', # 0x32
'doeh', # 0x33
'dyo', # 0x34
'dyog', # 0x35
'dyogg', # 0x36
'dyogs', # 0x37
'dyon', # 0x38
'dyonj', # 0x39
'dyonh', # 0x3a
'dyod', # 0x3b
'dyol', # 0x3c
'dyolg', # 0x3d
'dyolm', # 0x3e
'dyolb', # 0x3f
'dyols', # 0x40
'dyolt', # 0x41
'dyolp', # 0x42
'dyolh', # 0x43
'dyom', # 0x44
'dyob', # 0x45
'dyobs', # 0x46
'dyos', # 0x47
'dyoss', # 0x48
'dyong', # 0x49
'dyoj', # 0x4a
'dyoc', # 0x4b
'dyok', # 0x4c
'dyot', # 0x4d
'dyop', # 0x4e
'dyoh', # 0x4f
'du', # 0x50
'dug', # 0x51
'dugg', # 0x52
'dugs', # 0x53
'dun', # 0x54
'dunj', # 0x55
'dunh', # 0x56
'dud', # 0x57
'dul', # 0x58
'dulg', # 0x59
'dulm', # 0x5a
'dulb', # 0x5b
'duls', # 0x5c
'dult', # 0x5d
'dulp', # 0x5e
'dulh', # 0x5f
'dum', # 0x60
'dub', # 0x61
'dubs', # 0x62
'dus', # 0x63
'duss', # 0x64
'dung', # 0x65
'duj', # 0x66
'duc', # 0x67
'duk', # 0x68
'dut', # 0x69
'dup', # 0x6a
'duh', # 0x6b
'dweo', # 0x6c
'dweog', # 0x6d
'dweogg', # 0x6e
'dweogs', # 0x6f
'dweon', # 0x70
'dweonj', # 0x71
'dweonh', # 0x72
'dweod', # 0x73
'dweol', # 0x74
'dweolg', # 0x75
'dweolm', # 0x76
'dweolb', # 0x77
'dweols', # 0x78
'dweolt', # 0x79
'dweolp', # 0x7a
'dweolh', # 0x7b
'dweom', # 0x7c
'dweob', # 0x7d
'dweobs', # 0x7e
'dweos', # 0x7f
'dweoss', # 0x80
'dweong', # 0x81
'dweoj', # 0x82
'dweoc', # 0x83
'dweok', # 0x84
'dweot', # 0x85
'dweop', # 0x86
'dweoh', # 0x87
'dwe', # 0x88
'dweg', # 0x89
'dwegg', # 0x8a
'dwegs', # 0x8b
'dwen', # 0x8c
'dwenj', # 0x8d
'dwenh', # 0x8e
'dwed', # 0x8f
'dwel', # 0x90
'dwelg', # 0x91
'dwelm', # 0x92
'dwelb', # 0x93
'dwels', # 0x94
'dwelt', # 0x95
'dwelp', # 0x96
'dwelh', # 0x97
'dwem', # 0x98
'dweb', # 0x99
'dwebs', # 0x9a
'dwes', # 0x9b
'dwess', # 0x9c
'dweng', # 0x9d
'dwej', # 0x9e
'dwec', # 0x9f
'dwek', # 0xa0
'dwet', # 0xa1
'dwep', # 0xa2
'dweh', # 0xa3
'dwi', # 0xa4
'dwig', # 0xa5
'dwigg', # 0xa6
'dwigs', # 0xa7
'dwin', # 0xa8
'dwinj', # 0xa9
'dwinh', # 0xaa
'dwid', # 0xab
'dwil', # 0xac
'dwilg', # 0xad
'dwilm', # 0xae
'dwilb', # 0xaf
'dwils', # 0xb0
'dwilt', # 0xb1
'dwilp', # 0xb2
'dwilh', # 0xb3
'dwim', # 0xb4
'dwib', # 0xb5
'dwibs', # 0xb6
'dwis', # 0xb7
'dwiss', # 0xb8
'dwing', # 0xb9
'dwij', # 0xba
'dwic', # 0xbb
'dwik', # 0xbc
'dwit', # 0xbd
'dwip', # 0xbe
'dwih', # 0xbf
'dyu', # 0xc0
'dyug', # 0xc1
'dyugg', # 0xc2
'dyugs', # 0xc3
'dyun', # 0xc4
'dyunj', # 0xc5
'dyunh', # 0xc6
'dyud', # 0xc7
'dyul', # 0xc8
'dyulg', # 0xc9
'dyulm', # 0xca
'dyulb', # 0xcb
'dyuls', # 0xcc
'dyult', # 0xcd
'dyulp', # 0xce
'dyulh', # 0xcf
'dyum', # 0xd0
'dyub', # 0xd1
'dyubs', # 0xd2
'dyus', # 0xd3
'dyuss', # 0xd4
'dyung', # 0xd5
'dyuj', # 0xd6
'dyuc', # 0xd7
'dyuk', # 0xd8
'dyut', # 0xd9
'dyup', # 0xda
'dyuh', # 0xdb
'deu', # 0xdc
'deug', # 0xdd
'deugg', # 0xde
'deugs', # 0xdf
'deun', # 0xe0
'deunj', # 0xe1
'deunh', # 0xe2
'deud', # 0xe3
'deul', # 0xe4
'deulg', # 0xe5
'deulm', # 0xe6
'deulb', # 0xe7
'deuls', # 0xe8
'deult', # 0xe9
'deulp', # 0xea
'deulh', # 0xeb
'deum', # 0xec
'deub', # 0xed
'deubs', # 0xee
'deus', # 0xef
'deuss', # 0xf0
'deung', # 0xf1
'deuj', # 0xf2
'deuc', # 0xf3
'deuk', # 0xf4
'deut', # 0xf5
'deup', # 0xf6
'deuh', # 0xf7
'dyi', # 0xf8
'dyig', # 0xf9
'dyigg', # 0xfa
'dyigs', # 0xfb
'dyin', # 0xfc
'dyinj', # 0xfd
'dyinh', # 0xfe
'dyid', # 0xff
)
| gpl-3.0 | 7,938,486,689,126,531,000 | 17.457364 | 19 | 0.460941 | false |
lexxito/monitoring | ceilometer/image/glance.py | 1 | 4868 | # -*- encoding: utf-8 -*-
#
# Copyright © 2012 New Dream Network, LLC (DreamHost)
#
# Author: Julien Danjou <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Common code for working with images
"""
from __future__ import absolute_import
import itertools
import glanceclient
from oslo.config import cfg
from ceilometer.openstack.common import timeutils
from ceilometer import plugin
from ceilometer import sample
class _Base(plugin.PollsterBase):
@staticmethod
def get_glance_client(ksclient):
endpoint = ksclient.service_catalog.url_for(
service_type='image',
endpoint_type=cfg.CONF.service_credentials.os_endpoint_type)
# hard-code v1 glance API version selection while v2 API matures
return glanceclient.Client('1', endpoint,
token=ksclient.auth_token)
def _get_images(self, ksclient):
client = self.get_glance_client(ksclient)
#TODO(eglynn): use pagination to protect against unbounded
# memory usage
rawImageList = list(itertools.chain(
client.images.list(filters={"is_public": True}),
#TODO(eglynn): extend glance API with all_tenants logic to
# avoid second call to retrieve private images
client.images.list(filters={"is_public": False})))
# When retrieving images from glance, glance will check
# whether the user is of 'admin_role' which is
# configured in glance-api.conf. If the user is of
# admin_role, and is querying public images(which means
# that the 'is_public' param is set to be True),
# glance will ignore 'is_public' parameter and returns
# all the public images together with private images.
# As a result, if the user/tenant has an admin role
# for ceilometer to collect image list,
# the _Base.iter_images method will return a image list
# which contains duplicate images. Add the following
# code to avoid recording down duplicate image events.
imageIdSet = set(image.id for image in rawImageList)
for image in rawImageList:
if image.id in imageIdSet:
imageIdSet -= set([image.id])
yield image
def _iter_images(self, ksclient, cache):
"""Iterate over all images."""
if 'images' not in cache:
cache['images'] = list(self._get_images(ksclient))
return iter(cache['images'])
@staticmethod
def extract_image_metadata(image):
return dict((k, getattr(image, k))
for k in
[
"status",
"is_public",
"name",
"deleted",
"container_format",
"created_at",
"disk_format",
"updated_at",
"properties",
"min_disk",
"protected",
"checksum",
"deleted_at",
"min_ram",
"size",
])
class ImagePollster(_Base):
def get_samples(self, manager, cache, resources=[]):
for image in self._iter_images(manager.keystone, cache):
yield sample.Sample(
name='image',
type=sample.TYPE_GAUGE,
unit='image',
volume=1,
user_id=None,
project_id=image.owner,
resource_id=image.id,
timestamp=timeutils.isotime(),
resource_metadata=self.extract_image_metadata(image),
)
class ImageSizePollster(_Base):
def get_samples(self, manager, cache, resources=[]):
for image in self._iter_images(manager.keystone, cache):
yield sample.Sample(
name='image.size',
type=sample.TYPE_GAUGE,
unit='B',
volume=image.size,
user_id=None,
project_id=image.owner,
resource_id=image.id,
timestamp=timeutils.isotime(),
resource_metadata=self.extract_image_metadata(image),
)
| apache-2.0 | -4,854,512,705,900,788,000 | 35.593985 | 75 | 0.569961 | false |
Blake-R/pylijm | tests/test_list.py | 1 | 1069 | # -*- coding: utf8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from unittest2 import TestCase
from pylijm.list import List
class TestDict(TestCase):
@property
def fixture(self):
return List(int)
def test_init_good(self):
Fix = self.fixture
self.assertListEqual([], Fix())
self.assertListEqual([0], Fix([0]))
self.assertListEqual([0], Fix(['0']))
def test_init_bad(self):
Fix = self.fixture
self.assertRaises(TypeError, Fix, [None])
self.assertRaises(TypeError, Fix, [dict()])
def test_set(self):
fix = self.fixture([0])
fix[0] = 1
self.assertEqual(1, fix[0])
with self.assertRaises(TypeError):
fix[None] = 0
with self.assertRaises(TypeError):
fix['0'] = 0
def test_unset(self):
fix = self.fixture([0])
del fix[0]
with self.assertRaises(TypeError):
del fix[None]
with self.assertRaises(TypeError):
del fix['0']
| gpl-3.0 | 8,008,563,741,887,081,000 | 25.725 | 82 | 0.579981 | false |
ENCODE-DCC/encoded | src/encoded/tests/test_audit_characterization.py | 1 | 2460 | import pytest
def test_audit_biosample_characterization_review_lane_not_required(
testapp,
biosample_characterization,
review,
):
testapp.patch_json(
biosample_characterization['@id'],
{
'review': review,
'characterization_method': 'immunoprecipitation followed by mass spectrometry',
}
)
res = testapp.get(biosample_characterization['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert not any(error['category'] == 'missing review lane' for error in errors_list)
def test_audit_biosample_characterization_review_lane_required(
testapp,
biosample_characterization,
review,
):
testapp.patch_json(
biosample_characterization['@id'],
{
'review': review,
'characterization_method': 'immunoblot',
}
)
res = testapp.get(biosample_characterization['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert any(error['category'] == 'missing review lane' for error in errors_list)
def test_audit_genetic_modification_characterization_review_lane_not_required(
testapp,
gm_characterization,
review,
):
testapp.patch_json(
gm_characterization['@id'],
{
'review': review,
'characterization_method': 'Sanger sequencing',
}
)
res = testapp.get(gm_characterization['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert not any(error['category'] == 'missing review lane' for error in errors_list)
def test_audit_genetic_modification_characterization_review_lane_required(
testapp,
gm_characterization,
review,
):
testapp.patch_json(
gm_characterization['@id'],
{
'review': review,
'characterization_method': 'immunoblot',
}
)
res = testapp.get(gm_characterization['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert any(error['category'] == 'missing review lane' for error in errors_list)
| mit | 1,709,638,554,847,089,000 | 29.37037 | 91 | 0.614228 | false |
gnmiller/craig-bot | craig-bot/lib/python3.6/site-packages/pip/_vendor/packaging/specifiers.py | 62 | 27778 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import abc
import functools
import itertools
import re
from ._compat import string_types, with_metaclass
from .version import Version, LegacyVersion, parse
class InvalidSpecifier(ValueError):
"""
An invalid specifier was found, users should refer to PEP 440.
"""
class BaseSpecifier(with_metaclass(abc.ABCMeta, object)):
@abc.abstractmethod
def __str__(self):
"""
Returns the str representation of this Specifier like object. This
should be representative of the Specifier itself.
"""
@abc.abstractmethod
def __hash__(self):
"""
Returns a hash value for this Specifier like object.
"""
@abc.abstractmethod
def __eq__(self, other):
"""
Returns a boolean representing whether or not the two Specifier like
objects are equal.
"""
@abc.abstractmethod
def __ne__(self, other):
"""
Returns a boolean representing whether or not the two Specifier like
objects are not equal.
"""
@abc.abstractproperty
def prereleases(self):
"""
Returns whether or not pre-releases as a whole are allowed by this
specifier.
"""
@prereleases.setter
def prereleases(self, value):
"""
Sets whether or not pre-releases as a whole are allowed by this
specifier.
"""
@abc.abstractmethod
def contains(self, item, prereleases=None):
"""
Determines if the given item is contained within this specifier.
"""
@abc.abstractmethod
def filter(self, iterable, prereleases=None):
"""
Takes an iterable of items and filters them so that only items which
are contained within this specifier are allowed in it.
"""
class _IndividualSpecifier(BaseSpecifier):
_operators = {}
def __init__(self, spec="", prereleases=None):
match = self._regex.search(spec)
if not match:
raise InvalidSpecifier("Invalid specifier: '{0}'".format(spec))
self._spec = (match.group("operator").strip(), match.group("version").strip())
# Store whether or not this Specifier should accept prereleases
self._prereleases = prereleases
def __repr__(self):
pre = (
", prereleases={0!r}".format(self.prereleases)
if self._prereleases is not None
else ""
)
return "<{0}({1!r}{2})>".format(self.__class__.__name__, str(self), pre)
def __str__(self):
return "{0}{1}".format(*self._spec)
def __hash__(self):
return hash(self._spec)
def __eq__(self, other):
if isinstance(other, string_types):
try:
other = self.__class__(other)
except InvalidSpecifier:
return NotImplemented
elif not isinstance(other, self.__class__):
return NotImplemented
return self._spec == other._spec
def __ne__(self, other):
if isinstance(other, string_types):
try:
other = self.__class__(other)
except InvalidSpecifier:
return NotImplemented
elif not isinstance(other, self.__class__):
return NotImplemented
return self._spec != other._spec
def _get_operator(self, op):
return getattr(self, "_compare_{0}".format(self._operators[op]))
def _coerce_version(self, version):
if not isinstance(version, (LegacyVersion, Version)):
version = parse(version)
return version
@property
def operator(self):
return self._spec[0]
@property
def version(self):
return self._spec[1]
@property
def prereleases(self):
return self._prereleases
@prereleases.setter
def prereleases(self, value):
self._prereleases = value
def __contains__(self, item):
return self.contains(item)
def contains(self, item, prereleases=None):
# Determine if prereleases are to be allowed or not.
if prereleases is None:
prereleases = self.prereleases
# Normalize item to a Version or LegacyVersion, this allows us to have
# a shortcut for ``"2.0" in Specifier(">=2")
item = self._coerce_version(item)
# Determine if we should be supporting prereleases in this specifier
# or not, if we do not support prereleases than we can short circuit
# logic if this version is a prereleases.
if item.is_prerelease and not prereleases:
return False
# Actually do the comparison to determine if this item is contained
# within this Specifier or not.
return self._get_operator(self.operator)(item, self.version)
def filter(self, iterable, prereleases=None):
yielded = False
found_prereleases = []
kw = {"prereleases": prereleases if prereleases is not None else True}
# Attempt to iterate over all the values in the iterable and if any of
# them match, yield them.
for version in iterable:
parsed_version = self._coerce_version(version)
if self.contains(parsed_version, **kw):
# If our version is a prerelease, and we were not set to allow
# prereleases, then we'll store it for later incase nothing
# else matches this specifier.
if parsed_version.is_prerelease and not (
prereleases or self.prereleases
):
found_prereleases.append(version)
# Either this is not a prerelease, or we should have been
# accepting prereleases from the beginning.
else:
yielded = True
yield version
# Now that we've iterated over everything, determine if we've yielded
# any values, and if we have not and we have any prereleases stored up
# then we will go ahead and yield the prereleases.
if not yielded and found_prereleases:
for version in found_prereleases:
yield version
class LegacySpecifier(_IndividualSpecifier):
_regex_str = r"""
(?P<operator>(==|!=|<=|>=|<|>))
\s*
(?P<version>
[^,;\s)]* # Since this is a "legacy" specifier, and the version
# string can be just about anything, we match everything
# except for whitespace, a semi-colon for marker support,
# a closing paren since versions can be enclosed in
# them, and a comma since it's a version separator.
)
"""
_regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
_operators = {
"==": "equal",
"!=": "not_equal",
"<=": "less_than_equal",
">=": "greater_than_equal",
"<": "less_than",
">": "greater_than",
}
def _coerce_version(self, version):
if not isinstance(version, LegacyVersion):
version = LegacyVersion(str(version))
return version
def _compare_equal(self, prospective, spec):
return prospective == self._coerce_version(spec)
def _compare_not_equal(self, prospective, spec):
return prospective != self._coerce_version(spec)
def _compare_less_than_equal(self, prospective, spec):
return prospective <= self._coerce_version(spec)
def _compare_greater_than_equal(self, prospective, spec):
return prospective >= self._coerce_version(spec)
def _compare_less_than(self, prospective, spec):
return prospective < self._coerce_version(spec)
def _compare_greater_than(self, prospective, spec):
return prospective > self._coerce_version(spec)
def _require_version_compare(fn):
@functools.wraps(fn)
def wrapped(self, prospective, spec):
if not isinstance(prospective, Version):
return False
return fn(self, prospective, spec)
return wrapped
class Specifier(_IndividualSpecifier):
_regex_str = r"""
(?P<operator>(~=|==|!=|<=|>=|<|>|===))
(?P<version>
(?:
# The identity operators allow for an escape hatch that will
# do an exact string match of the version you wish to install.
# This will not be parsed by PEP 440 and we cannot determine
# any semantic meaning from it. This operator is discouraged
# but included entirely as an escape hatch.
(?<====) # Only match for the identity operator
\s*
[^\s]* # We just match everything, except for whitespace
# since we are only testing for strict identity.
)
|
(?:
# The (non)equality operators allow for wild card and local
# versions to be specified so we have to define these two
# operators separately to enable that.
(?<===|!=) # Only match for equals and not equals
\s*
v?
(?:[0-9]+!)? # epoch
[0-9]+(?:\.[0-9]+)* # release
(?: # pre release
[-_\.]?
(a|b|c|rc|alpha|beta|pre|preview)
[-_\.]?
[0-9]*
)?
(?: # post release
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
)?
# You cannot use a wild card and a dev or local version
# together so group them with a | and make them optional.
(?:
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
(?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local
|
\.\* # Wild card syntax of .*
)?
)
|
(?:
# The compatible operator requires at least two digits in the
# release segment.
(?<=~=) # Only match for the compatible operator
\s*
v?
(?:[0-9]+!)? # epoch
[0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *)
(?: # pre release
[-_\.]?
(a|b|c|rc|alpha|beta|pre|preview)
[-_\.]?
[0-9]*
)?
(?: # post release
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
)?
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
)
|
(?:
# All other operators only allow a sub set of what the
# (non)equality operators do. Specifically they do not allow
# local versions to be specified nor do they allow the prefix
# matching wild cards.
(?<!==|!=|~=) # We have special cases for these
# operators so we want to make sure they
# don't match here.
\s*
v?
(?:[0-9]+!)? # epoch
[0-9]+(?:\.[0-9]+)* # release
(?: # pre release
[-_\.]?
(a|b|c|rc|alpha|beta|pre|preview)
[-_\.]?
[0-9]*
)?
(?: # post release
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
)?
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
)
)
"""
_regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
_operators = {
"~=": "compatible",
"==": "equal",
"!=": "not_equal",
"<=": "less_than_equal",
">=": "greater_than_equal",
"<": "less_than",
">": "greater_than",
"===": "arbitrary",
}
@_require_version_compare
def _compare_compatible(self, prospective, spec):
# Compatible releases have an equivalent combination of >= and ==. That
# is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to
# implement this in terms of the other specifiers instead of
# implementing it ourselves. The only thing we need to do is construct
# the other specifiers.
# We want everything but the last item in the version, but we want to
# ignore post and dev releases and we want to treat the pre-release as
# it's own separate segment.
prefix = ".".join(
list(
itertools.takewhile(
lambda x: (not x.startswith("post") and not x.startswith("dev")),
_version_split(spec),
)
)[:-1]
)
# Add the prefix notation to the end of our string
prefix += ".*"
return self._get_operator(">=")(prospective, spec) and self._get_operator("==")(
prospective, prefix
)
@_require_version_compare
def _compare_equal(self, prospective, spec):
# We need special logic to handle prefix matching
if spec.endswith(".*"):
# In the case of prefix matching we want to ignore local segment.
prospective = Version(prospective.public)
# Split the spec out by dots, and pretend that there is an implicit
# dot in between a release segment and a pre-release segment.
spec = _version_split(spec[:-2]) # Remove the trailing .*
# Split the prospective version out by dots, and pretend that there
# is an implicit dot in between a release segment and a pre-release
# segment.
prospective = _version_split(str(prospective))
# Shorten the prospective version to be the same length as the spec
# so that we can determine if the specifier is a prefix of the
# prospective version or not.
prospective = prospective[: len(spec)]
# Pad out our two sides with zeros so that they both equal the same
# length.
spec, prospective = _pad_version(spec, prospective)
else:
# Convert our spec string into a Version
spec = Version(spec)
# If the specifier does not have a local segment, then we want to
# act as if the prospective version also does not have a local
# segment.
if not spec.local:
prospective = Version(prospective.public)
return prospective == spec
@_require_version_compare
def _compare_not_equal(self, prospective, spec):
return not self._compare_equal(prospective, spec)
@_require_version_compare
def _compare_less_than_equal(self, prospective, spec):
return prospective <= Version(spec)
@_require_version_compare
def _compare_greater_than_equal(self, prospective, spec):
return prospective >= Version(spec)
@_require_version_compare
def _compare_less_than(self, prospective, spec):
# Convert our spec to a Version instance, since we'll want to work with
# it as a version.
spec = Version(spec)
# Check to see if the prospective version is less than the spec
# version. If it's not we can short circuit and just return False now
# instead of doing extra unneeded work.
if not prospective < spec:
return False
# This special case is here so that, unless the specifier itself
# includes is a pre-release version, that we do not accept pre-release
# versions for the version mentioned in the specifier (e.g. <3.1 should
# not match 3.1.dev0, but should match 3.0.dev0).
if not spec.is_prerelease and prospective.is_prerelease:
if Version(prospective.base_version) == Version(spec.base_version):
return False
# If we've gotten to here, it means that prospective version is both
# less than the spec version *and* it's not a pre-release of the same
# version in the spec.
return True
@_require_version_compare
def _compare_greater_than(self, prospective, spec):
# Convert our spec to a Version instance, since we'll want to work with
# it as a version.
spec = Version(spec)
# Check to see if the prospective version is greater than the spec
# version. If it's not we can short circuit and just return False now
# instead of doing extra unneeded work.
if not prospective > spec:
return False
# This special case is here so that, unless the specifier itself
# includes is a post-release version, that we do not accept
# post-release versions for the version mentioned in the specifier
# (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0).
if not spec.is_postrelease and prospective.is_postrelease:
if Version(prospective.base_version) == Version(spec.base_version):
return False
# Ensure that we do not allow a local version of the version mentioned
# in the specifier, which is technically greater than, to match.
if prospective.local is not None:
if Version(prospective.base_version) == Version(spec.base_version):
return False
# If we've gotten to here, it means that prospective version is both
# greater than the spec version *and* it's not a pre-release of the
# same version in the spec.
return True
def _compare_arbitrary(self, prospective, spec):
return str(prospective).lower() == str(spec).lower()
@property
def prereleases(self):
# If there is an explicit prereleases set for this, then we'll just
# blindly use that.
if self._prereleases is not None:
return self._prereleases
# Look at all of our specifiers and determine if they are inclusive
# operators, and if they are if they are including an explicit
# prerelease.
operator, version = self._spec
if operator in ["==", ">=", "<=", "~=", "==="]:
# The == specifier can include a trailing .*, if it does we
# want to remove before parsing.
if operator == "==" and version.endswith(".*"):
version = version[:-2]
# Parse the version, and if it is a pre-release than this
# specifier allows pre-releases.
if parse(version).is_prerelease:
return True
return False
@prereleases.setter
def prereleases(self, value):
self._prereleases = value
_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$")
def _version_split(version):
result = []
for item in version.split("."):
match = _prefix_regex.search(item)
if match:
result.extend(match.groups())
else:
result.append(item)
return result
def _pad_version(left, right):
left_split, right_split = [], []
# Get the release segment of our versions
left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left)))
right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right)))
# Get the rest of our versions
left_split.append(left[len(left_split[0]) :])
right_split.append(right[len(right_split[0]) :])
# Insert our padding
left_split.insert(1, ["0"] * max(0, len(right_split[0]) - len(left_split[0])))
right_split.insert(1, ["0"] * max(0, len(left_split[0]) - len(right_split[0])))
return (list(itertools.chain(*left_split)), list(itertools.chain(*right_split)))
class SpecifierSet(BaseSpecifier):
def __init__(self, specifiers="", prereleases=None):
# Split on , to break each indidivual specifier into it's own item, and
# strip each item to remove leading/trailing whitespace.
specifiers = [s.strip() for s in specifiers.split(",") if s.strip()]
# Parsed each individual specifier, attempting first to make it a
# Specifier and falling back to a LegacySpecifier.
parsed = set()
for specifier in specifiers:
try:
parsed.add(Specifier(specifier))
except InvalidSpecifier:
parsed.add(LegacySpecifier(specifier))
# Turn our parsed specifiers into a frozen set and save them for later.
self._specs = frozenset(parsed)
# Store our prereleases value so we can use it later to determine if
# we accept prereleases or not.
self._prereleases = prereleases
def __repr__(self):
pre = (
", prereleases={0!r}".format(self.prereleases)
if self._prereleases is not None
else ""
)
return "<SpecifierSet({0!r}{1})>".format(str(self), pre)
def __str__(self):
return ",".join(sorted(str(s) for s in self._specs))
def __hash__(self):
return hash(self._specs)
def __and__(self, other):
if isinstance(other, string_types):
other = SpecifierSet(other)
elif not isinstance(other, SpecifierSet):
return NotImplemented
specifier = SpecifierSet()
specifier._specs = frozenset(self._specs | other._specs)
if self._prereleases is None and other._prereleases is not None:
specifier._prereleases = other._prereleases
elif self._prereleases is not None and other._prereleases is None:
specifier._prereleases = self._prereleases
elif self._prereleases == other._prereleases:
specifier._prereleases = self._prereleases
else:
raise ValueError(
"Cannot combine SpecifierSets with True and False prerelease "
"overrides."
)
return specifier
def __eq__(self, other):
if isinstance(other, string_types):
other = SpecifierSet(other)
elif isinstance(other, _IndividualSpecifier):
other = SpecifierSet(str(other))
elif not isinstance(other, SpecifierSet):
return NotImplemented
return self._specs == other._specs
def __ne__(self, other):
if isinstance(other, string_types):
other = SpecifierSet(other)
elif isinstance(other, _IndividualSpecifier):
other = SpecifierSet(str(other))
elif not isinstance(other, SpecifierSet):
return NotImplemented
return self._specs != other._specs
def __len__(self):
return len(self._specs)
def __iter__(self):
return iter(self._specs)
@property
def prereleases(self):
# If we have been given an explicit prerelease modifier, then we'll
# pass that through here.
if self._prereleases is not None:
return self._prereleases
# If we don't have any specifiers, and we don't have a forced value,
# then we'll just return None since we don't know if this should have
# pre-releases or not.
if not self._specs:
return None
# Otherwise we'll see if any of the given specifiers accept
# prereleases, if any of them do we'll return True, otherwise False.
return any(s.prereleases for s in self._specs)
@prereleases.setter
def prereleases(self, value):
self._prereleases = value
def __contains__(self, item):
return self.contains(item)
def contains(self, item, prereleases=None):
# Ensure that our item is a Version or LegacyVersion instance.
if not isinstance(item, (LegacyVersion, Version)):
item = parse(item)
# Determine if we're forcing a prerelease or not, if we're not forcing
# one for this particular filter call, then we'll use whatever the
# SpecifierSet thinks for whether or not we should support prereleases.
if prereleases is None:
prereleases = self.prereleases
# We can determine if we're going to allow pre-releases by looking to
# see if any of the underlying items supports them. If none of them do
# and this item is a pre-release then we do not allow it and we can
# short circuit that here.
# Note: This means that 1.0.dev1 would not be contained in something
# like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0
if not prereleases and item.is_prerelease:
return False
# We simply dispatch to the underlying specs here to make sure that the
# given version is contained within all of them.
# Note: This use of all() here means that an empty set of specifiers
# will always return True, this is an explicit design decision.
return all(s.contains(item, prereleases=prereleases) for s in self._specs)
def filter(self, iterable, prereleases=None):
# Determine if we're forcing a prerelease or not, if we're not forcing
# one for this particular filter call, then we'll use whatever the
# SpecifierSet thinks for whether or not we should support prereleases.
if prereleases is None:
prereleases = self.prereleases
# If we have any specifiers, then we want to wrap our iterable in the
# filter method for each one, this will act as a logical AND amongst
# each specifier.
if self._specs:
for spec in self._specs:
iterable = spec.filter(iterable, prereleases=bool(prereleases))
return iterable
# If we do not have any specifiers, then we need to have a rough filter
# which will filter out any pre-releases, unless there are no final
# releases, and which will filter out LegacyVersion in general.
else:
filtered = []
found_prereleases = []
for item in iterable:
# Ensure that we some kind of Version class for this item.
if not isinstance(item, (LegacyVersion, Version)):
parsed_version = parse(item)
else:
parsed_version = item
# Filter out any item which is parsed as a LegacyVersion
if isinstance(parsed_version, LegacyVersion):
continue
# Store any item which is a pre-release for later unless we've
# already found a final version or we are accepting prereleases
if parsed_version.is_prerelease and not prereleases:
if not filtered:
found_prereleases.append(item)
else:
filtered.append(item)
# If we've found no items except for pre-releases, then we'll go
# ahead and use the pre-releases
if not filtered and found_prereleases and prereleases is None:
return found_prereleases
return filtered
| mit | -2,955,790,614,907,081,700 | 36.086782 | 88 | 0.565987 | false |
incaser/odoo-odoo | addons/account_budget/report/__init__.py | 444 | 1139 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import crossovered_budget_report
import analytic_account_budget_report
import budget_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 5,983,278,381,091,800,000 | 41.185185 | 78 | 0.631255 | false |
Nexenta/cinder | cinder/volume/drivers/coprhd/helpers/virtualpool.py | 7 | 2887 | # Copyright (c) 2016 EMC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder.i18n import _
from cinder.volume.drivers.coprhd.helpers import commoncoprhdapi as common
class VirtualPool(common.CoprHDResource):
URI_VPOOL = "/{0}/vpools"
URI_VPOOL_SHOW = URI_VPOOL + "/{1}"
URI_VPOOL_SEARCH = URI_VPOOL + "/search?name={1}"
def vpool_show_uri(self, vpooltype, uri):
"""Makes REST API call and retrieves vpool details based on UUID.
This function will take uri as input and returns with
all parameters of VPOOL like label, urn and type.
:param vpooltype : Type of virtual pool {'block'}
:param uri : unique resource identifier of the vpool
:returns: object containing all the details of vpool
"""
(s, h) = common.service_json_request(
self.ipaddr, self.port,
"GET",
self.URI_VPOOL_SHOW.format(vpooltype, uri), None)
o = common.json_decode(s)
if o['inactive']:
return None
return o
def vpool_query(self, name, vpooltype):
"""Makes REST API call to query the vpool by name and type.
This function will take the VPOOL name and type of VPOOL
as input and get uri of the first occurence of given VPOOL.
:param name: Name of the VPOOL
:param vpooltype: Type of the VPOOL {'block'}
:returns: uri of the given vpool
"""
if common.is_uri(name):
return name
(s, h) = common.service_json_request(
self.ipaddr, self.port, "GET",
self.URI_VPOOL_SEARCH.format(vpooltype, name), None)
o = common.json_decode(s)
if len(o['resource']) > 0:
# Get the Active vpool ID.
for vpool in o['resource']:
if self.vpool_show_uri(vpooltype, vpool['id']) is not None:
return vpool['id']
# Raise not found exception. as we did not find any active vpool.
raise common.CoprHdError(common.CoprHdError.NOT_FOUND_ERR,
(_("VPool %(name)s ( %(vpooltype)s ) :"
" not found") %
{'name': name,
'vpooltype': vpooltype
}))
| apache-2.0 | -3,929,665,384,914,477,600 | 36.493506 | 78 | 0.593003 | false |
sencha/chromium-spacewalk | third_party/pexpect/pexpect.py | 173 | 77354 | """Pexpect is a Python module for spawning child applications and controlling
them automatically. Pexpect can be used for automating interactive applications
such as ssh, ftp, passwd, telnet, etc. It can be used to a automate setup
scripts for duplicating software package installations on different servers. It
can be used for automated software testing. Pexpect is in the spirit of Don
Libes' Expect, but Pexpect is pure Python. Other Expect-like modules for Python
require TCL and Expect or require C extensions to be compiled. Pexpect does not
use C, Expect, or TCL extensions. It should work on any platform that supports
the standard Python pty module. The Pexpect interface focuses on ease of use so
that simple tasks are easy.
There are two main interfaces to the Pexpect system; these are the function,
run() and the class, spawn. The spawn class is more powerful. The run()
function is simpler than spawn, and is good for quickly calling program. When
you call the run() function it executes a given program and then returns the
output. This is a handy replacement for os.system().
For example::
pexpect.run('ls -la')
The spawn class is the more powerful interface to the Pexpect system. You can
use this to spawn a child program then interact with it by sending input and
expecting responses (waiting for patterns in the child's output).
For example::
child = pexpect.spawn('scp foo [email protected]:.')
child.expect('Password:')
child.sendline(mypassword)
This works even for commands that ask for passwords or other input outside of
the normal stdio streams. For example, ssh reads input directly from the TTY
device which bypasses stdin.
Credits: Noah Spurrier, Richard Holden, Marco Molteni, Kimberley Burchett,
Robert Stone, Hartmut Goebel, Chad Schroeder, Erick Tryzelaar, Dave Kirby, Ids
vander Molen, George Todd, Noel Taylor, Nicolas D. Cesar, Alexander Gattin,
Jacques-Etienne Baudoux, Geoffrey Marshall, Francisco Lourenco, Glen Mabey,
Karthik Gurusamy, Fernando Perez, Corey Minyard, Jon Cohen, Guillaume
Chazarain, Andrew Ryan, Nick Craig-Wood, Andrew Stone, Jorgen Grahn, John
Spiegel, Jan Grant, and Shane Kerr. Let me know if I forgot anyone.
Pexpect is free, open source, and all that good stuff.
http://pexpect.sourceforge.net/
PEXPECT LICENSE
This license is approved by the OSI and FSF as GPL-compatible.
http://opensource.org/licenses/isc-license.txt
Copyright (c) 2012, Noah Spurrier <[email protected]>
PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
try:
import os
import sys
import time
import select
import string
import re
import struct
import resource
import types
import pty
import tty
import termios
import fcntl
import errno
import traceback
import signal
except ImportError as e:
raise ImportError(str(e) + """
A critical module was not found. Probably this operating system does not
support it. Pexpect is intended for UNIX-like operating systems.""")
__version__ = '2.6'
__revision__ = '1'
__all__ = ['ExceptionPexpect', 'EOF', 'TIMEOUT', 'spawn', 'run', 'which',
'split_command_line', '__version__', '__revision__']
# Exception classes used by this module.
class ExceptionPexpect(Exception):
"""Base class for all exceptions raised by this module.
"""
def __init__(self, value):
self.value = value
def __str__(self):
return str(self.value)
def get_trace(self):
"""This returns an abbreviated stack trace with lines that only concern
the caller. In other words, the stack trace inside the Pexpect module
is not included. """
tblist = traceback.extract_tb(sys.exc_info()[2])
#tblist = filter(self.__filter_not_pexpect, tblist)
tblist = [item for item in tblist if self.__filter_not_pexpect(item)]
tblist = traceback.format_list(tblist)
return ''.join(tblist)
def __filter_not_pexpect(self, trace_list_item):
"""This returns True if list item 0 the string 'pexpect.py' in it. """
if trace_list_item[0].find('pexpect.py') == -1:
return True
else:
return False
class EOF(ExceptionPexpect):
"""Raised when EOF is read from a child.
This usually means the child has exited."""
class TIMEOUT(ExceptionPexpect):
"""Raised when a read time exceeds the timeout. """
##class TIMEOUT_PATTERN(TIMEOUT):
## """Raised when the pattern match time exceeds the timeout.
## This is different than a read TIMEOUT because the child process may
## give output, thus never give a TIMEOUT, but the output
## may never match a pattern.
## """
##class MAXBUFFER(ExceptionPexpect):
## """Raised when a buffer fills before matching an expected pattern."""
def run(command, timeout=-1, withexitstatus=False, events=None,
extra_args=None, logfile=None, cwd=None, env=None):
"""
This function runs the given command; waits for it to finish; then
returns all output as a string. STDERR is included in output. If the full
path to the command is not given then the path is searched.
Note that lines are terminated by CR/LF (\\r\\n) combination even on
UNIX-like systems because this is the standard for pseudottys. If you set
'withexitstatus' to true, then run will return a tuple of (command_output,
exitstatus). If 'withexitstatus' is false then this returns just
command_output.
The run() function can often be used instead of creating a spawn instance.
For example, the following code uses spawn::
from pexpect import *
child = spawn('scp foo [email protected]:.')
child.expect('(?i)password')
child.sendline(mypassword)
The previous code can be replace with the following::
from pexpect import *
run('scp foo [email protected]:.', events={'(?i)password': mypassword})
Examples
========
Start the apache daemon on the local machine::
from pexpect import *
run("/usr/local/apache/bin/apachectl start")
Check in a file using SVN::
from pexpect import *
run("svn ci -m 'automatic commit' my_file.py")
Run a command and capture exit status::
from pexpect import *
(command_output, exitstatus) = run('ls -l /bin', withexitstatus=1)
Tricky Examples
===============
The following will run SSH and execute 'ls -l' on the remote machine. The
password 'secret' will be sent if the '(?i)password' pattern is ever seen::
run("ssh [email protected] 'ls -l'",
events={'(?i)password':'secret\\n'})
This will start mencoder to rip a video from DVD. This will also display
progress ticks every 5 seconds as it runs. For example::
from pexpect import *
def print_ticks(d):
print d['event_count'],
run("mencoder dvd://1 -o video.avi -oac copy -ovc copy",
events={TIMEOUT:print_ticks}, timeout=5)
The 'events' argument should be a dictionary of patterns and responses.
Whenever one of the patterns is seen in the command out run() will send the
associated response string. Note that you should put newlines in your
string if Enter is necessary. The responses may also contain callback
functions. Any callback is function that takes a dictionary as an argument.
The dictionary contains all the locals from the run() function, so you can
access the child spawn object or any other variable defined in run()
(event_count, child, and extra_args are the most useful). A callback may
return True to stop the current run process otherwise run() continues until
the next event. A callback may also return a string which will be sent to
the child. 'extra_args' is not used by directly run(). It provides a way to
pass data to a callback function through run() through the locals
dictionary passed to a callback. """
if timeout == -1:
child = spawn(command, maxread=2000, logfile=logfile, cwd=cwd, env=env)
else:
child = spawn(command, timeout=timeout, maxread=2000, logfile=logfile,
cwd=cwd, env=env)
if events is not None:
patterns = list(events.keys())
responses = list(events.values())
else:
# This assumes EOF or TIMEOUT will eventually cause run to terminate.
patterns = None
responses = None
child_result_list = []
event_count = 0
while True:
try:
index = child.expect(patterns)
if type(child.after) in types.StringTypes:
child_result_list.append(child.before + child.after)
else:
# child.after may have been a TIMEOUT or EOF,
# which we don't want appended to the list.
child_result_list.append(child.before)
if type(responses[index]) in types.StringTypes:
child.send(responses[index])
elif isinstance(responses[index], types.FunctionType):
callback_result = responses[index](locals())
sys.stdout.flush()
if type(callback_result) in types.StringTypes:
child.send(callback_result)
elif callback_result:
break
else:
raise TypeError('The callback must be a string or function.')
event_count = event_count + 1
except TIMEOUT as e:
child_result_list.append(child.before)
break
except EOF as e:
child_result_list.append(child.before)
break
child_result = ''.join(child_result_list)
if withexitstatus:
child.close()
return (child_result, child.exitstatus)
else:
return child_result
class spawn(object):
"""This is the main class interface for Pexpect. Use this class to start
and control child applications. """
def __init__(self, command, args=[], timeout=30, maxread=2000,
searchwindowsize=None, logfile=None, cwd=None, env=None):
"""This is the constructor. The command parameter may be a string that
includes a command and any arguments to the command. For example::
child = pexpect.spawn('/usr/bin/ftp')
child = pexpect.spawn('/usr/bin/ssh [email protected]')
child = pexpect.spawn('ls -latr /tmp')
You may also construct it with a list of arguments like so::
child = pexpect.spawn('/usr/bin/ftp', [])
child = pexpect.spawn('/usr/bin/ssh', ['[email protected]'])
child = pexpect.spawn('ls', ['-latr', '/tmp'])
After this the child application will be created and will be ready to
talk to. For normal use, see expect() and send() and sendline().
Remember that Pexpect does NOT interpret shell meta characters such as
redirect, pipe, or wild cards (>, |, or *). This is a common mistake.
If you want to run a command and pipe it through another command then
you must also start a shell. For example::
child = pexpect.spawn('/bin/bash -c "ls -l | grep LOG > logs.txt"')
child.expect(pexpect.EOF)
The second form of spawn (where you pass a list of arguments) is useful
in situations where you wish to spawn a command and pass it its own
argument list. This can make syntax more clear. For example, the
following is equivalent to the previous example::
shell_cmd = 'ls -l | grep LOG > logs.txt'
child = pexpect.spawn('/bin/bash', ['-c', shell_cmd])
child.expect(pexpect.EOF)
The maxread attribute sets the read buffer size. This is maximum number
of bytes that Pexpect will try to read from a TTY at one time. Setting
the maxread size to 1 will turn off buffering. Setting the maxread
value higher may help performance in cases where large amounts of
output are read back from the child. This feature is useful in
conjunction with searchwindowsize.
The searchwindowsize attribute sets the how far back in the incomming
seach buffer Pexpect will search for pattern matches. Every time
Pexpect reads some data from the child it will append the data to the
incomming buffer. The default is to search from the beginning of the
imcomming buffer each time new data is read from the child. But this is
very inefficient if you are running a command that generates a large
amount of data where you want to match The searchwindowsize does not
effect the size of the incomming data buffer. You will still have
access to the full buffer after expect() returns.
The logfile member turns on or off logging. All input and output will
be copied to the given file object. Set logfile to None to stop
logging. This is the default. Set logfile to sys.stdout to echo
everything to standard output. The logfile is flushed after each write.
Example log input and output to a file::
child = pexpect.spawn('some_command')
fout = file('mylog.txt','w')
child.logfile = fout
Example log to stdout::
child = pexpect.spawn('some_command')
child.logfile = sys.stdout
The logfile_read and logfile_send members can be used to separately log
the input from the child and output sent to the child. Sometimes you
don't want to see everything you write to the child. You only want to
log what the child sends back. For example::
child = pexpect.spawn('some_command')
child.logfile_read = sys.stdout
To separately log output sent to the child use logfile_send::
self.logfile_send = fout
The delaybeforesend helps overcome a weird behavior that many users
were experiencing. The typical problem was that a user would expect() a
"Password:" prompt and then immediately call sendline() to send the
password. The user would then see that their password was echoed back
to them. Passwords don't normally echo. The problem is caused by the
fact that most applications print out the "Password" prompt and then
turn off stdin echo, but if you send your password before the
application turned off echo, then you get your password echoed.
Normally this wouldn't be a problem when interacting with a human at a
real keyboard. If you introduce a slight delay just before writing then
this seems to clear up the problem. This was such a common problem for
many users that I decided that the default pexpect behavior should be
to sleep just before writing to the child application. 1/20th of a
second (50 ms) seems to be enough to clear up the problem. You can set
delaybeforesend to 0 to return to the old behavior. Most Linux machines
don't like this to be below 0.03. I don't know why.
Note that spawn is clever about finding commands on your path.
It uses the same logic that "which" uses to find executables.
If you wish to get the exit status of the child you must call the
close() method. The exit or signal status of the child will be stored
in self.exitstatus or self.signalstatus. If the child exited normally
then exitstatus will store the exit return code and signalstatus will
be None. If the child was terminated abnormally with a signal then
signalstatus will store the signal value and exitstatus will be None.
If you need more detail you can also read the self.status member which
stores the status returned by os.waitpid. You can interpret this using
os.WIFEXITED/os.WEXITSTATUS or os.WIFSIGNALED/os.TERMSIG. """
self.STDIN_FILENO = pty.STDIN_FILENO
self.STDOUT_FILENO = pty.STDOUT_FILENO
self.STDERR_FILENO = pty.STDERR_FILENO
self.stdin = sys.stdin
self.stdout = sys.stdout
self.stderr = sys.stderr
self.searcher = None
self.ignorecase = False
self.before = None
self.after = None
self.match = None
self.match_index = None
self.terminated = True
self.exitstatus = None
self.signalstatus = None
# status returned by os.waitpid
self.status = None
self.flag_eof = False
self.pid = None
# the chile filedescriptor is initially closed
self.child_fd = -1
self.timeout = timeout
self.delimiter = EOF
self.logfile = logfile
# input from child (read_nonblocking)
self.logfile_read = None
# output to send (send, sendline)
self.logfile_send = None
# max bytes to read at one time into buffer
self.maxread = maxread
# This is the read buffer. See maxread.
self.buffer = ''
# Data before searchwindowsize point is preserved, but not searched.
self.searchwindowsize = searchwindowsize
# Delay used before sending data to child. Time in seconds.
# Most Linux machines don't like this to be below 0.03 (30 ms).
self.delaybeforesend = 0.05
# Used by close() to give kernel time to update process status.
# Time in seconds.
self.delayafterclose = 0.1
# Used by terminate() to give kernel time to update process status.
# Time in seconds.
self.delayafterterminate = 0.1
self.softspace = False
self.name = '<' + repr(self) + '>'
self.encoding = None
self.closed = True
self.cwd = cwd
self.env = env
# This flags if we are running on irix
self.__irix_hack = (sys.platform.lower().find('irix') >= 0)
# Solaris uses internal __fork_pty(). All others use pty.fork().
if ((sys.platform.lower().find('solaris') >= 0)
or (sys.platform.lower().find('sunos5') >= 0)):
self.use_native_pty_fork = False
else:
self.use_native_pty_fork = True
# Support subclasses that do not use command or args.
if command is None:
self.command = None
self.args = None
self.name = '<pexpect factory incomplete>'
else:
self._spawn(command, args)
def __del__(self):
"""This makes sure that no system resources are left open. Python only
garbage collects Python objects. OS file descriptors are not Python
objects, so they must be handled explicitly. If the child file
descriptor was opened outside of this class (passed to the constructor)
then this does not close it. """
if not self.closed:
# It is possible for __del__ methods to execute during the
# teardown of the Python VM itself. Thus self.close() may
# trigger an exception because os.close may be None.
# -- Fernando Perez
try:
self.close()
except:
pass
def __str__(self):
"""This returns a human-readable string that represents the state of
the object. """
s = []
s.append(repr(self))
s.append('version: ' + __version__ + ' (' + __revision__ + ')')
s.append('command: ' + str(self.command))
s.append('args: ' + str(self.args))
s.append('searcher: ' + str(self.searcher))
s.append('buffer (last 100 chars): ' + str(self.buffer)[-100:])
s.append('before (last 100 chars): ' + str(self.before)[-100:])
s.append('after: ' + str(self.after))
s.append('match: ' + str(self.match))
s.append('match_index: ' + str(self.match_index))
s.append('exitstatus: ' + str(self.exitstatus))
s.append('flag_eof: ' + str(self.flag_eof))
s.append('pid: ' + str(self.pid))
s.append('child_fd: ' + str(self.child_fd))
s.append('closed: ' + str(self.closed))
s.append('timeout: ' + str(self.timeout))
s.append('delimiter: ' + str(self.delimiter))
s.append('logfile: ' + str(self.logfile))
s.append('logfile_read: ' + str(self.logfile_read))
s.append('logfile_send: ' + str(self.logfile_send))
s.append('maxread: ' + str(self.maxread))
s.append('ignorecase: ' + str(self.ignorecase))
s.append('searchwindowsize: ' + str(self.searchwindowsize))
s.append('delaybeforesend: ' + str(self.delaybeforesend))
s.append('delayafterclose: ' + str(self.delayafterclose))
s.append('delayafterterminate: ' + str(self.delayafterterminate))
return '\n'.join(s)
def _spawn(self, command, args=[]):
"""This starts the given command in a child process. This does all the
fork/exec type of stuff for a pty. This is called by __init__. If args
is empty then command will be parsed (split on spaces) and args will be
set to parsed arguments. """
# The pid and child_fd of this object get set by this method.
# Note that it is difficult for this method to fail.
# You cannot detect if the child process cannot start.
# So the only way you can tell if the child process started
# or not is to try to read from the file descriptor. If you get
# EOF immediately then it means that the child is already dead.
# That may not necessarily be bad because you may have spawned a child
# that performs some task; creates no stdout output; and then dies.
# If command is an int type then it may represent a file descriptor.
if isinstance(command, type(0)):
raise ExceptionPexpect('Command is an int type. ' +
'If this is a file descriptor then maybe you want to ' +
'use fdpexpect.fdspawn which takes an existing ' +
'file descriptor instead of a command string.')
if not isinstance(args, type([])):
raise TypeError('The argument, args, must be a list.')
if args == []:
self.args = split_command_line(command)
self.command = self.args[0]
else:
# Make a shallow copy of the args list.
self.args = args[:]
self.args.insert(0, command)
self.command = command
command_with_path = which(self.command)
if command_with_path is None:
raise ExceptionPexpect('The command was not found or was not ' +
'executable: %s.' % self.command)
self.command = command_with_path
self.args[0] = self.command
self.name = '<' + ' '.join(self.args) + '>'
assert self.pid is None, 'The pid member must be None.'
assert self.command is not None, 'The command member must not be None.'
if self.use_native_pty_fork:
try:
self.pid, self.child_fd = pty.fork()
except OSError as e:
raise ExceptionPexpect('pty.fork() failed: ' + str(e))
else:
# Use internal __fork_pty
self.pid, self.child_fd = self.__fork_pty()
if self.pid == 0:
# Child
try:
# used by setwinsize()
self.child_fd = sys.stdout.fileno()
self.setwinsize(24, 80)
except:
# Some platforms do not like setwinsize (Cygwin).
# This will cause problem when running applications that
# are very picky about window size.
# This is a serious limitation, but not a show stopper.
pass
# Do not allow child to inherit open file descriptors from parent.
max_fd = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
for i in range(3, max_fd):
try:
os.close(i)
except OSError:
pass
# I don't know why this works, but ignoring SIGHUP fixes a
# problem when trying to start a Java daemon with sudo
# (specifically, Tomcat).
signal.signal(signal.SIGHUP, signal.SIG_IGN)
if self.cwd is not None:
os.chdir(self.cwd)
if self.env is None:
os.execv(self.command, self.args)
else:
os.execvpe(self.command, self.args, self.env)
# Parent
self.terminated = False
self.closed = False
def __fork_pty(self):
"""This implements a substitute for the forkpty system call. This
should be more portable than the pty.fork() function. Specifically,
this should work on Solaris.
Modified 10.06.05 by Geoff Marshall: Implemented __fork_pty() method to
resolve the issue with Python's pty.fork() not supporting Solaris,
particularly ssh. Based on patch to posixmodule.c authored by Noah
Spurrier::
http://mail.python.org/pipermail/python-dev/2003-May/035281.html
"""
parent_fd, child_fd = os.openpty()
if parent_fd < 0 or child_fd < 0:
raise ExceptionPexpect("Could not open with os.openpty().")
pid = os.fork()
if pid < 0:
raise ExceptionPexpect("Failed os.fork().")
elif pid == 0:
# Child.
os.close(parent_fd)
self.__pty_make_controlling_tty(child_fd)
os.dup2(child_fd, 0)
os.dup2(child_fd, 1)
os.dup2(child_fd, 2)
if child_fd > 2:
os.close(child_fd)
else:
# Parent.
os.close(child_fd)
return pid, parent_fd
def __pty_make_controlling_tty(self, tty_fd):
"""This makes the pseudo-terminal the controlling tty. This should be
more portable than the pty.fork() function. Specifically, this should
work on Solaris. """
child_name = os.ttyname(tty_fd)
# Disconnect from controlling tty. Harmless if not already connected.
try:
fd = os.open("/dev/tty", os.O_RDWR | os.O_NOCTTY)
if fd >= 0:
os.close(fd)
except:
# Already disconnected. This happens if running inside cron.
pass
os.setsid()
# Verify we are disconnected from controlling tty
# by attempting to open it again.
try:
fd = os.open("/dev/tty", os.O_RDWR | os.O_NOCTTY)
if fd >= 0:
os.close(fd)
raise ExceptionPexpect('Failed to disconnect from ' +
'controlling tty. It is still possible to open /dev/tty.')
except:
# Good! We are disconnected from a controlling tty.
pass
# Verify we can open child pty.
fd = os.open(child_name, os.O_RDWR)
if fd < 0:
raise ExceptionPexpect("Could not open child pty, " + child_name)
else:
os.close(fd)
# Verify we now have a controlling tty.
fd = os.open("/dev/tty", os.O_WRONLY)
if fd < 0:
raise ExceptionPexpect("Could not open controlling tty, /dev/tty")
else:
os.close(fd)
def fileno(self):
"""This returns the file descriptor of the pty for the child.
"""
return self.child_fd
def close(self, force=True):
"""This closes the connection with the child application. Note that
calling close() more than once is valid. This emulates standard Python
behavior with files. Set force to True if you want to make sure that
the child is terminated (SIGKILL is sent if the child ignores SIGHUP
and SIGINT). """
if not self.closed:
self.flush()
os.close(self.child_fd)
# Give kernel time to update process status.
time.sleep(self.delayafterclose)
if self.isalive():
if not self.terminate(force):
raise ExceptionPexpect('Could not terminate the child.')
self.child_fd = -1
self.closed = True
#self.pid = None
def flush(self):
"""This does nothing. It is here to support the interface for a
File-like object. """
pass
def isatty(self):
"""This returns True if the file descriptor is open and connected to a
tty(-like) device, else False. """
return os.isatty(self.child_fd)
def waitnoecho(self, timeout=-1):
"""This waits until the terminal ECHO flag is set False. This returns
True if the echo mode is off. This returns False if the ECHO flag was
not set False before the timeout. This can be used to detect when the
child is waiting for a password. Usually a child application will turn
off echo mode when it is waiting for the user to enter a password. For
example, instead of expecting the "password:" prompt you can wait for
the child to set ECHO off::
p = pexpect.spawn('ssh [email protected]')
p.waitnoecho()
p.sendline(mypassword)
If timeout==-1 then this method will use the value in self.timeout.
If timeout==None then this method to block until ECHO flag is False.
"""
if timeout == -1:
timeout = self.timeout
if timeout is not None:
end_time = time.time() + timeout
while True:
if not self.getecho():
return True
if timeout < 0 and timeout is not None:
return False
if timeout is not None:
timeout = end_time - time.time()
time.sleep(0.1)
def getecho(self):
"""This returns the terminal echo mode. This returns True if echo is
on or False if echo is off. Child applications that are expecting you
to enter a password often set ECHO False. See waitnoecho(). """
attr = termios.tcgetattr(self.child_fd)
if attr[3] & termios.ECHO:
return True
return False
def setecho(self, state):
"""This sets the terminal echo mode on or off. Note that anything the
child sent before the echo will be lost, so you should be sure that
your input buffer is empty before you call setecho(). For example, the
following will work as expected::
p = pexpect.spawn('cat') # Echo is on by default.
p.sendline('1234') # We expect see this twice from the child...
p.expect(['1234']) # ... once from the tty echo...
p.expect(['1234']) # ... and again from cat itself.
p.setecho(False) # Turn off tty echo
p.sendline('abcd') # We will set this only once (echoed by cat).
p.sendline('wxyz') # We will set this only once (echoed by cat)
p.expect(['abcd'])
p.expect(['wxyz'])
The following WILL NOT WORK because the lines sent before the setecho
will be lost::
p = pexpect.spawn('cat')
p.sendline('1234')
p.setecho(False) # Turn off tty echo
p.sendline('abcd') # We will set this only once (echoed by cat).
p.sendline('wxyz') # We will set this only once (echoed by cat)
p.expect(['1234'])
p.expect(['1234'])
p.expect(['abcd'])
p.expect(['wxyz'])
"""
self.child_fd
attr = termios.tcgetattr(self.child_fd)
if state:
attr[3] = attr[3] | termios.ECHO
else:
attr[3] = attr[3] & ~termios.ECHO
# I tried TCSADRAIN and TCSAFLUSH, but
# these were inconsistent and blocked on some platforms.
# TCSADRAIN would probably be ideal if it worked.
termios.tcsetattr(self.child_fd, termios.TCSANOW, attr)
def read_nonblocking(self, size=1, timeout=-1):
"""This reads at most size characters from the child application. It
includes a timeout. If the read does not complete within the timeout
period then a TIMEOUT exception is raised. If the end of file is read
then an EOF exception will be raised. If a log file was set using
setlog() then all data will also be written to the log file.
If timeout is None then the read may block indefinitely.
If timeout is -1 then the self.timeout value is used. If timeout is 0
then the child is polled and if there is no data immediately ready
then this will raise a TIMEOUT exception.
The timeout refers only to the amount of time to read at least one
character. This is not effected by the 'size' parameter, so if you call
read_nonblocking(size=100, timeout=30) and only one character is
available right away then one character will be returned immediately.
It will not wait for 30 seconds for another 99 characters to come in.
This is a wrapper around os.read(). It uses select.select() to
implement the timeout. """
if self.closed:
raise ValueError('I/O operation on closed file.')
if timeout == -1:
timeout = self.timeout
# Note that some systems such as Solaris do not give an EOF when
# the child dies. In fact, you can still try to read
# from the child_fd -- it will block forever or until TIMEOUT.
# For this case, I test isalive() before doing any reading.
# If isalive() is false, then I pretend that this is the same as EOF.
if not self.isalive():
# timeout of 0 means "poll"
r, w, e = self.__select([self.child_fd], [], [], 0)
if not r:
self.flag_eof = True
raise EOF('End Of File (EOF). Braindead platform.')
elif self.__irix_hack:
# Irix takes a long time before it realizes a child was terminated.
# FIXME So does this mean Irix systems are forced to always have
# FIXME a 2 second delay when calling read_nonblocking? That sucks.
r, w, e = self.__select([self.child_fd], [], [], 2)
if not r and not self.isalive():
self.flag_eof = True
raise EOF('End Of File (EOF). Slow platform.')
r, w, e = self.__select([self.child_fd], [], [], timeout)
if not r:
if not self.isalive():
# Some platforms, such as Irix, will claim that their
# processes are alive; timeout on the select; and
# then finally admit that they are not alive.
self.flag_eof = True
raise EOF('End of File (EOF). Very slow platform.')
else:
raise TIMEOUT('Timeout exceeded.')
if self.child_fd in r:
try:
s = os.read(self.child_fd, size)
except OSError as e:
# Linux does this
self.flag_eof = True
raise EOF('End Of File (EOF). Exception style platform.')
if s == '':
# BSD style
self.flag_eof = True
raise EOF('End Of File (EOF). Empty string style platform.')
if self.logfile is not None:
self.logfile.write(s)
self.logfile.flush()
if self.logfile_read is not None:
self.logfile_read.write(s)
self.logfile_read.flush()
return s
raise ExceptionPexpect('Reached an unexpected state.')
def read(self, size=-1):
"""This reads at most "size" bytes from the file (less if the read hits
EOF before obtaining size bytes). If the size argument is negative or
omitted, read all data until EOF is reached. The bytes are returned as
a string object. An empty string is returned when EOF is encountered
immediately. """
if size == 0:
return ''
if size < 0:
# delimiter default is EOF
self.expect(self.delimiter)
return self.before
# I could have done this more directly by not using expect(), but
# I deliberately decided to couple read() to expect() so that
# I would catch any bugs early and ensure consistant behavior.
# It's a little less efficient, but there is less for me to
# worry about if I have to later modify read() or expect().
# Note, it's OK if size==-1 in the regex. That just means it
# will never match anything in which case we stop only on EOF.
cre = re.compile('.{%d}' % size, re.DOTALL)
# delimiter default is EOF
index = self.expect([cre, self.delimiter])
if index == 0:
### FIXME self.before should be ''. Should I assert this?
return self.after
return self.before
def readline(self, size=-1):
"""This reads and returns one entire line. The newline at the end of
line is returned as part of the string, unless the file ends without a
newline. An empty string is returned if EOF is encountered immediately.
This looks for a newline as a CR/LF pair (\\r\\n) even on UNIX because
this is what the pseudotty device returns. So contrary to what you may
expect you will receive newlines as \\r\\n.
If the size argument is 0 then an empty string is returned. In all
other cases the size argument is ignored, which is not standard
behavior for a file-like object. """
if size == 0:
return ''
# delimiter default is EOF
index = self.expect(['\r\n', self.delimiter])
if index == 0:
return self.before + '\r\n'
else:
return self.before
def __iter__(self):
"""This is to support iterators over a file-like object.
"""
return self
def __next__(self):
"""This is to support iterators over a file-like object.
"""
result = self.readline()
if result == "":
raise StopIteration
return result
def readlines(self, sizehint=-1):
"""This reads until EOF using readline() and returns a list containing
the lines thus read. The optional 'sizehint' argument is ignored. """
lines = []
while True:
line = self.readline()
if not line:
break
lines.append(line)
return lines
def write(self, s):
"""This is similar to send() except that there is no return value.
"""
self.send(s)
def writelines(self, sequence):
"""This calls write() for each element in the sequence. The sequence
can be any iterable object producing strings, typically a list of
strings. This does not add line separators There is no return value.
"""
for s in sequence:
self.write(s)
def send(self, s):
"""This sends a string to the child process. This returns the number of
bytes written. If a log file was set then the data is also written to
the log. """
time.sleep(self.delaybeforesend)
if self.logfile is not None:
self.logfile.write(s)
self.logfile.flush()
if self.logfile_send is not None:
self.logfile_send.write(s)
self.logfile_send.flush()
c = os.write(self.child_fd, s.encode("utf-8"))
return c
def sendline(self, s=''):
"""This is like send(), but it adds a linefeed (os.linesep). This
returns the number of bytes written. """
n = self.send(s)
n = n + self.send(os.linesep)
return n
def sendcontrol(self, char):
"""This sends a control character to the child such as Ctrl-C or
Ctrl-D. For example, to send a Ctrl-G (ASCII 7)::
child.sendcontrol('g')
See also, sendintr() and sendeof().
"""
char = char.lower()
a = ord(char)
if a >= 97 and a <= 122:
a = a - ord('a') + 1
return self.send(chr(a))
d = {'@': 0, '`': 0,
'[': 27, '{': 27,
'\\': 28, '|': 28,
']': 29, '}': 29,
'^': 30, '~': 30,
'_': 31,
'?': 127}
if char not in d:
return 0
return self.send(chr(d[char]))
def sendeof(self):
"""This sends an EOF to the child. This sends a character which causes
the pending parent output buffer to be sent to the waiting child
program without waiting for end-of-line. If it is the first character
of the line, the read() in the user program returns 0, which signifies
end-of-file. This means to work as expected a sendeof() has to be
called at the beginning of a line. This method does not send a newline.
It is the responsibility of the caller to ensure the eof is sent at the
beginning of a line. """
### Hmmm... how do I send an EOF?
###C if ((m = write(pty, *buf, p - *buf)) < 0)
###C return (errno == EWOULDBLOCK) ? n : -1;
#fd = sys.stdin.fileno()
#old = termios.tcgetattr(fd) # remember current state
#attr = termios.tcgetattr(fd)
#attr[3] = attr[3] | termios.ICANON # ICANON must be set to see EOF
#try: # use try/finally to ensure state gets restored
# termios.tcsetattr(fd, termios.TCSADRAIN, attr)
# if hasattr(termios, 'CEOF'):
# os.write(self.child_fd, '%c' % termios.CEOF)
# else:
# # Silly platform does not define CEOF so assume CTRL-D
# os.write(self.child_fd, '%c' % 4)
#finally: # restore state
# termios.tcsetattr(fd, termios.TCSADRAIN, old)
if hasattr(termios, 'VEOF'):
char = termios.tcgetattr(self.child_fd)[6][termios.VEOF]
else:
# platform does not define VEOF so assume CTRL-D
char = chr(4)
self.send(char)
def sendintr(self):
"""This sends a SIGINT to the child. It does not require
the SIGINT to be the first character on a line. """
if hasattr(termios, 'VINTR'):
char = termios.tcgetattr(self.child_fd)[6][termios.VINTR]
else:
# platform does not define VINTR so assume CTRL-C
char = chr(3)
self.send(char)
def eof(self):
"""This returns True if the EOF exception was ever raised.
"""
return self.flag_eof
def terminate(self, force=False):
"""This forces a child process to terminate. It starts nicely with
SIGHUP and SIGINT. If "force" is True then moves onto SIGKILL. This
returns True if the child was terminated. This returns False if the
child could not be terminated. """
if not self.isalive():
return True
try:
self.kill(signal.SIGHUP)
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
self.kill(signal.SIGCONT)
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
self.kill(signal.SIGINT)
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
if force:
self.kill(signal.SIGKILL)
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
else:
return False
return False
except OSError as e:
# I think there are kernel timing issues that sometimes cause
# this to happen. I think isalive() reports True, but the
# process is dead to the kernel.
# Make one last attempt to see if the kernel is up to date.
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
else:
return False
def wait(self):
"""This waits until the child exits. This is a blocking call. This will
not read any data from the child, so this will block forever if the
child has unread output and has terminated. In other words, the child
may have printed output then called exit(), but, the child is
technically still alive until its output is read by the parent. """
if self.isalive():
pid, status = os.waitpid(self.pid, 0)
else:
raise ExceptionPexpect('Cannot wait for dead child process.')
self.exitstatus = os.WEXITSTATUS(status)
if os.WIFEXITED(status):
self.status = status
self.exitstatus = os.WEXITSTATUS(status)
self.signalstatus = None
self.terminated = True
elif os.WIFSIGNALED(status):
self.status = status
self.exitstatus = None
self.signalstatus = os.WTERMSIG(status)
self.terminated = True
elif os.WIFSTOPPED(status):
# You can't call wait() on a child process in the stopped state.
raise ExceptionPexpect('Called wait() on a stopped child ' +
'process. This is not supported. Is some other ' +
'process attempting job control with our child pid?')
return self.exitstatus
def isalive(self):
"""This tests if the child process is running or not. This is
non-blocking. If the child was terminated then this will read the
exitstatus or signalstatus of the child. This returns True if the child
process appears to be running or False if not. It can take literally
SECONDS for Solaris to return the right status. """
if self.terminated:
return False
if self.flag_eof:
# This is for Linux, which requires the blocking form
# of waitpid to # get status of a defunct process.
# This is super-lame. The flag_eof would have been set
# in read_nonblocking(), so this should be safe.
waitpid_options = 0
else:
waitpid_options = os.WNOHANG
try:
pid, status = os.waitpid(self.pid, waitpid_options)
except OSError as e:
# No child processes
if e[0] == errno.ECHILD:
raise ExceptionPexpect('isalive() encountered condition ' +
'where "terminated" is 0, but there was no child ' +
'process. Did someone else call waitpid() ' +
'on our process?')
else:
raise e
# I have to do this twice for Solaris.
# I can't even believe that I figured this out...
# If waitpid() returns 0 it means that no child process
# wishes to report, and the value of status is undefined.
if pid == 0:
try:
### os.WNOHANG) # Solaris!
pid, status = os.waitpid(self.pid, waitpid_options)
except OSError as e:
# This should never happen...
if e[0] == errno.ECHILD:
raise ExceptionPexpect('isalive() encountered condition ' +
'that should never happen. There was no child ' +
'process. Did someone else call waitpid() ' +
'on our process?')
else:
raise e
# If pid is still 0 after two calls to waitpid() then the process
# really is alive. This seems to work on all platforms, except for
# Irix which seems to require a blocking call on waitpid or select,
# so I let read_nonblocking take care of this situation
# (unfortunately, this requires waiting through the timeout).
if pid == 0:
return True
if pid == 0:
return True
if os.WIFEXITED(status):
self.status = status
self.exitstatus = os.WEXITSTATUS(status)
self.signalstatus = None
self.terminated = True
elif os.WIFSIGNALED(status):
self.status = status
self.exitstatus = None
self.signalstatus = os.WTERMSIG(status)
self.terminated = True
elif os.WIFSTOPPED(status):
raise ExceptionPexpect('isalive() encountered condition ' +
'where child process is stopped. This is not ' +
'supported. Is some other process attempting ' +
'job control with our child pid?')
return False
def kill(self, sig):
"""This sends the given signal to the child application. In keeping
with UNIX tradition it has a misleading name. It does not necessarily
kill the child unless you send the right signal. """
# Same as os.kill, but the pid is given for you.
if self.isalive():
os.kill(self.pid, sig)
def compile_pattern_list(self, patterns):
"""This compiles a pattern-string or a list of pattern-strings.
Patterns must be a StringType, EOF, TIMEOUT, SRE_Pattern, or a list of
those. Patterns may also be None which results in an empty list (you
might do this if waiting for an EOF or TIMEOUT condition without
expecting any pattern).
This is used by expect() when calling expect_list(). Thus expect() is
nothing more than::
cpl = self.compile_pattern_list(pl)
return self.expect_list(cpl, timeout)
If you are using expect() within a loop it may be more
efficient to compile the patterns first and then call expect_list().
This avoid calls in a loop to compile_pattern_list()::
cpl = self.compile_pattern_list(my_pattern)
while some_condition:
...
i = self.expect_list(clp, timeout)
...
"""
if patterns is None:
return []
if not isinstance(patterns, list):
patterns = [patterns]
# Allow dot to match \n
compile_flags = re.DOTALL
if self.ignorecase:
compile_flags = compile_flags | re.IGNORECASE
compiled_pattern_list = []
for p in patterns:
if type(p) in types.StringTypes:
compiled_pattern_list.append(re.compile(p, compile_flags))
elif p is EOF:
compiled_pattern_list.append(EOF)
elif p is TIMEOUT:
compiled_pattern_list.append(TIMEOUT)
elif isinstance(p, type(re.compile(''))):
compiled_pattern_list.append(p)
else:
raise TypeError('Argument must be one of StringTypes, ' +
'EOF, TIMEOUT, SRE_Pattern, or a list of those ' +
'type. %s' % str(type(p)))
return compiled_pattern_list
def expect(self, pattern, timeout=-1, searchwindowsize=-1):
"""This seeks through the stream until a pattern is matched. The
pattern is overloaded and may take several types. The pattern can be a
StringType, EOF, a compiled re, or a list of any of those types.
Strings will be compiled to re types. This returns the index into the
pattern list. If the pattern was not a list this returns index 0 on a
successful match. This may raise exceptions for EOF or TIMEOUT. To
avoid the EOF or TIMEOUT exceptions add EOF or TIMEOUT to the pattern
list. That will cause expect to match an EOF or TIMEOUT condition
instead of raising an exception.
If you pass a list of patterns and more than one matches, the first
match in the stream is chosen. If more than one pattern matches at that
point, the leftmost in the pattern list is chosen. For example::
# the input is 'foobar'
index = p.expect(['bar', 'foo', 'foobar'])
# returns 1('foo') even though 'foobar' is a "better" match
Please note, however, that buffering can affect this behavior, since
input arrives in unpredictable chunks. For example::
# the input is 'foobar'
index = p.expect(['foobar', 'foo'])
# returns 0('foobar') if all input is available at once,
# but returs 1('foo') if parts of the final 'bar' arrive late
After a match is found the instance attributes 'before', 'after' and
'match' will be set. You can see all the data read before the match in
'before'. You can see the data that was matched in 'after'. The
re.MatchObject used in the re match will be in 'match'. If an error
occurred then 'before' will be set to all the data read so far and
'after' and 'match' will be None.
If timeout is -1 then timeout will be set to the self.timeout value.
A list entry may be EOF or TIMEOUT instead of a string. This will
catch these exceptions and return the index of the list entry instead
of raising the exception. The attribute 'after' will be set to the
exception type. The attribute 'match' will be None. This allows you to
write code like this::
index = p.expect(['good', 'bad', pexpect.EOF, pexpect.TIMEOUT])
if index == 0:
do_something()
elif index == 1:
do_something_else()
elif index == 2:
do_some_other_thing()
elif index == 3:
do_something_completely_different()
instead of code like this::
try:
index = p.expect(['good', 'bad'])
if index == 0:
do_something()
elif index == 1:
do_something_else()
except EOF:
do_some_other_thing()
except TIMEOUT:
do_something_completely_different()
These two forms are equivalent. It all depends on what you want. You
can also just expect the EOF if you are waiting for all output of a
child to finish. For example::
p = pexpect.spawn('/bin/ls')
p.expect(pexpect.EOF)
print p.before
If you are trying to optimize for speed then see expect_list().
"""
compiled_pattern_list = self.compile_pattern_list(pattern)
return self.expect_list(compiled_pattern_list,
timeout, searchwindowsize)
def expect_list(self, pattern_list, timeout=-1, searchwindowsize=-1):
"""This takes a list of compiled regular expressions and returns the
index into the pattern_list that matched the child output. The list may
also contain EOF or TIMEOUT(which are not compiled regular
expressions). This method is similar to the expect() method except that
expect_list() does not recompile the pattern list on every call. This
may help if you are trying to optimize for speed, otherwise just use
the expect() method. This is called by expect(). If timeout==-1 then
the self.timeout value is used. If searchwindowsize==-1 then the
self.searchwindowsize value is used. """
return self.expect_loop(searcher_re(pattern_list),
timeout, searchwindowsize)
def expect_exact(self, pattern_list, timeout=-1, searchwindowsize=-1):
"""This is similar to expect(), but uses plain string matching instead
of compiled regular expressions in 'pattern_list'. The 'pattern_list'
may be a string; a list or other sequence of strings; or TIMEOUT and
EOF.
This call might be faster than expect() for two reasons: string
searching is faster than RE matching and it is possible to limit the
search to just the end of the input buffer.
This method is also useful when you don't want to have to worry about
escaping regular expression characters that you want to match."""
if (type(pattern_list) in types.StringTypes or
pattern_list in (TIMEOUT, EOF)):
pattern_list = [pattern_list]
return self.expect_loop(searcher_string(pattern_list),
timeout, searchwindowsize)
def expect_loop(self, searcher, timeout=-1, searchwindowsize=-1):
"""This is the common loop used inside expect. The 'searcher' should be
an instance of searcher_re or searcher_string, which describes how and
what to search for in the input.
See expect() for other arguments, return value and exceptions. """
self.searcher = searcher
if timeout == -1:
timeout = self.timeout
if timeout is not None:
end_time = time.time() + timeout
if searchwindowsize == -1:
searchwindowsize = self.searchwindowsize
try:
incoming = self.buffer
freshlen = len(incoming)
while True:
# Keep reading until exception or return.
index = searcher.search(incoming, freshlen, searchwindowsize)
if index >= 0:
self.buffer = incoming[searcher.end:]
self.before = incoming[: searcher.start]
self.after = incoming[searcher.start: searcher.end]
self.match = searcher.match
self.match_index = index
return self.match_index
# No match at this point
if timeout < 0 and timeout is not None:
raise TIMEOUT('Timeout exceeded in expect_any().')
# Still have time left, so read more data
c = self.read_nonblocking(self.maxread, timeout)
freshlen = len(c)
time.sleep(0.0001)
incoming = incoming + c
if timeout is not None:
timeout = end_time - time.time()
except EOF as e:
self.buffer = ''
self.before = incoming
self.after = EOF
index = searcher.eof_index
if index >= 0:
self.match = EOF
self.match_index = index
return self.match_index
else:
self.match = None
self.match_index = None
raise EOF(str(e) + '\n' + str(self))
except TIMEOUT as e:
self.buffer = incoming
self.before = incoming
self.after = TIMEOUT
index = searcher.timeout_index
if index >= 0:
self.match = TIMEOUT
self.match_index = index
return self.match_index
else:
self.match = None
self.match_index = None
raise TIMEOUT(str(e) + '\n' + str(self))
except:
self.before = incoming
self.after = None
self.match = None
self.match_index = None
raise
def getwinsize(self):
"""This returns the terminal window size of the child tty. The return
value is a tuple of (rows, cols). """
TIOCGWINSZ = getattr(termios, 'TIOCGWINSZ', 1074295912)
s = struct.pack('HHHH', 0, 0, 0, 0)
x = fcntl.ioctl(self.fileno(), TIOCGWINSZ, s)
return struct.unpack('HHHH', x)[0:2]
def setwinsize(self, rows, cols):
"""This sets the terminal window size of the child tty. This will cause
a SIGWINCH signal to be sent to the child. This does not change the
physical window size. It changes the size reported to TTY-aware
applications like vi or curses -- applications that respond to the
SIGWINCH signal. """
# Check for buggy platforms. Some Python versions on some platforms
# (notably OSF1 Alpha and RedHat 7.1) truncate the value for
# termios.TIOCSWINSZ. It is not clear why this happens.
# These platforms don't seem to handle the signed int very well;
# yet other platforms like OpenBSD have a large negative value for
# TIOCSWINSZ and they don't have a truncate problem.
# Newer versions of Linux have totally different values for TIOCSWINSZ.
# Note that this fix is a hack.
TIOCSWINSZ = getattr(termios, 'TIOCSWINSZ', -2146929561)
if TIOCSWINSZ == 2148037735:
# Same bits, but with sign.
TIOCSWINSZ = -2146929561
# Note, assume ws_xpixel and ws_ypixel are zero.
s = struct.pack('HHHH', rows, cols, 0, 0)
fcntl.ioctl(self.fileno(), TIOCSWINSZ, s)
def interact(self, escape_character=chr(29),
input_filter=None, output_filter=None):
"""This gives control of the child process to the interactive user (the
human at the keyboard). Keystrokes are sent to the child process, and
the stdout and stderr output of the child process is printed. This
simply echos the child stdout and child stderr to the real stdout and
it echos the real stdin to the child stdin. When the user types the
escape_character this method will stop. The default for
escape_character is ^]. This should not be confused with ASCII 27 --
the ESC character. ASCII 29 was chosen for historical merit because
this is the character used by 'telnet' as the escape character. The
escape_character will not be sent to the child process.
You may pass in optional input and output filter functions. These
functions should take a string and return a string. The output_filter
will be passed all the output from the child process. The input_filter
will be passed all the keyboard input from the user. The input_filter
is run BEFORE the check for the escape_character.
Note that if you change the window size of the parent the SIGWINCH
signal will not be passed through to the child. If you want the child
window size to change when the parent's window size changes then do
something like the following example::
import pexpect, struct, fcntl, termios, signal, sys
def sigwinch_passthrough (sig, data):
s = struct.pack("HHHH", 0, 0, 0, 0)
a = struct.unpack('hhhh', fcntl.ioctl(sys.stdout.fileno(),
termios.TIOCGWINSZ , s))
global p
p.setwinsize(a[0],a[1])
# Note this 'p' global and used in sigwinch_passthrough.
p = pexpect.spawn('/bin/bash')
signal.signal(signal.SIGWINCH, sigwinch_passthrough)
p.interact()
"""
# Flush the buffer.
self.stdout.write(self.buffer)
self.stdout.flush()
self.buffer = ''
mode = tty.tcgetattr(self.STDIN_FILENO)
tty.setraw(self.STDIN_FILENO)
try:
self.__interact_copy(escape_character, input_filter, output_filter)
finally:
tty.tcsetattr(self.STDIN_FILENO, tty.TCSAFLUSH, mode)
def __interact_writen(self, fd, data):
"""This is used by the interact() method.
"""
while data != '' and self.isalive():
n = os.write(fd, data)
data = data[n:]
def __interact_read(self, fd):
"""This is used by the interact() method.
"""
return os.read(fd, 1000)
def __interact_copy(self, escape_character=None,
input_filter=None, output_filter=None):
"""This is used by the interact() method.
"""
while self.isalive():
r, w, e = self.__select([self.child_fd, self.STDIN_FILENO], [], [])
if self.child_fd in r:
data = self.__interact_read(self.child_fd)
if output_filter:
data = output_filter(data)
if self.logfile is not None:
self.logfile.write(data)
self.logfile.flush()
os.write(self.STDOUT_FILENO, data)
if self.STDIN_FILENO in r:
data = self.__interact_read(self.STDIN_FILENO)
if input_filter:
data = input_filter(data)
i = data.rfind(escape_character)
if i != -1:
data = data[:i]
self.__interact_writen(self.child_fd, data)
break
self.__interact_writen(self.child_fd, data)
def __select(self, iwtd, owtd, ewtd, timeout=None):
"""This is a wrapper around select.select() that ignores signals. If
select.select raises a select.error exception and errno is an EINTR
error then it is ignored. Mainly this is used to ignore sigwinch
(terminal resize). """
# if select() is interrupted by a signal (errno==EINTR) then
# we loop back and enter the select() again.
if timeout is not None:
end_time = time.time() + timeout
while True:
try:
return select.select(iwtd, owtd, ewtd, timeout)
except select.error as e:
if e[0] == errno.EINTR:
# if we loop back we have to subtract the
# amount of time we already waited.
if timeout is not None:
timeout = end_time - time.time()
if timeout < 0:
return([], [], [])
else:
# something else caused the select.error, so
# this actually is an exception.
raise
##############################################################################
# The following methods are no longer supported or allowed.
def setmaxread(self, maxread):
"""This method is no longer supported or allowed. I don't like getters
and setters without a good reason. """
raise ExceptionPexpect('This method is no longer supported ' +
'or allowed. Just assign a value to the ' +
'maxread member variable.')
def setlog(self, fileobject):
"""This method is no longer supported or allowed.
"""
raise ExceptionPexpect('This method is no longer supported ' +
'or allowed. Just assign a value to the logfile ' +
'member variable.')
##############################################################################
# End of spawn class
##############################################################################
class searcher_string(object):
"""This is a plain string search helper for the spawn.expect_any() method.
This helper class is for speed. For more powerful regex patterns
see the helper class, searcher_re.
Attributes:
eof_index - index of EOF, or -1
timeout_index - index of TIMEOUT, or -1
After a successful match by the search() method the following attributes
are available:
start - index into the buffer, first byte of match
end - index into the buffer, first byte after match
match - the matching string itself
"""
def __init__(self, strings):
"""This creates an instance of searcher_string. This argument 'strings'
may be a list; a sequence of strings; or the EOF or TIMEOUT types. """
self.eof_index = -1
self.timeout_index = -1
self._strings = []
for n, s in zip(list(range(len(strings))), strings):
if s is EOF:
self.eof_index = n
continue
if s is TIMEOUT:
self.timeout_index = n
continue
self._strings.append((n, s))
def __str__(self):
"""This returns a human-readable string that represents the state of
the object."""
ss = [(ns[0], ' %d: "%s"' % ns) for ns in self._strings]
ss.append((-1, 'searcher_string:'))
if self.eof_index >= 0:
ss.append((self.eof_index, ' %d: EOF' % self.eof_index))
if self.timeout_index >= 0:
ss.append((self.timeout_index,
' %d: TIMEOUT' % self.timeout_index))
ss.sort()
ss = zip(*ss)[1]
return '\n'.join(ss)
def search(self, buffer, freshlen, searchwindowsize=None):
"""This searches 'buffer' for the first occurence of one of the search
strings. 'freshlen' must indicate the number of bytes at the end of
'buffer' which have not been searched before. It helps to avoid
searching the same, possibly big, buffer over and over again.
See class spawn for the 'searchwindowsize' argument.
If there is a match this returns the index of that string, and sets
'start', 'end' and 'match'. Otherwise, this returns -1. """
absurd_match = len(buffer)
first_match = absurd_match
# 'freshlen' helps a lot here. Further optimizations could
# possibly include:
#
# using something like the Boyer-Moore Fast String Searching
# Algorithm; pre-compiling the search through a list of
# strings into something that can scan the input once to
# search for all N strings; realize that if we search for
# ['bar', 'baz'] and the input is '...foo' we need not bother
# rescanning until we've read three more bytes.
#
# Sadly, I don't know enough about this interesting topic. /grahn
for index, s in self._strings:
if searchwindowsize is None:
# the match, if any, can only be in the fresh data,
# or at the very end of the old data
offset = -(freshlen + len(s))
else:
# better obey searchwindowsize
offset = -searchwindowsize
n = buffer.find(s, offset)
if n >= 0 and n < first_match:
first_match = n
best_index, best_match = index, s
if first_match == absurd_match:
return -1
self.match = best_match
self.start = first_match
self.end = self.start + len(self.match)
return best_index
class searcher_re(object):
"""This is regular expression string search helper for the
spawn.expect_any() method. This helper class is for powerful
pattern matching. For speed, see the helper class, searcher_string.
Attributes:
eof_index - index of EOF, or -1
timeout_index - index of TIMEOUT, or -1
After a successful match by the search() method the following attributes
are available:
start - index into the buffer, first byte of match
end - index into the buffer, first byte after match
match - the re.match object returned by a succesful re.search
"""
def __init__(self, patterns):
"""This creates an instance that searches for 'patterns' Where
'patterns' may be a list or other sequence of compiled regular
expressions, or the EOF or TIMEOUT types."""
self.eof_index = -1
self.timeout_index = -1
self._searches = []
for n, s in zip(list(range(len(patterns))), patterns):
if s is EOF:
self.eof_index = n
continue
if s is TIMEOUT:
self.timeout_index = n
continue
self._searches.append((n, s))
def __str__(self):
"""This returns a human-readable string that represents the state of
the object."""
ss = [(n, ' %d: re.compile("%s")' %
(n, str(s.pattern))) for n, s in self._searches]
ss.append((-1, 'searcher_re:'))
if self.eof_index >= 0:
ss.append((self.eof_index, ' %d: EOF' % self.eof_index))
if self.timeout_index >= 0:
ss.append((self.timeout_index, ' %d: TIMEOUT' %
self.timeout_index))
ss.sort()
ss = zip(*ss)[1]
return '\n'.join(ss)
def search(self, buffer, freshlen, searchwindowsize=None):
"""This searches 'buffer' for the first occurence of one of the regular
expressions. 'freshlen' must indicate the number of bytes at the end of
'buffer' which have not been searched before.
See class spawn for the 'searchwindowsize' argument.
If there is a match this returns the index of that string, and sets
'start', 'end' and 'match'. Otherwise, returns -1."""
absurd_match = len(buffer)
first_match = absurd_match
# 'freshlen' doesn't help here -- we cannot predict the
# length of a match, and the re module provides no help.
if searchwindowsize is None:
searchstart = 0
else:
searchstart = max(0, len(buffer) - searchwindowsize)
for index, s in self._searches:
match = s.search(buffer, searchstart)
if match is None:
continue
n = match.start()
if n < first_match:
first_match = n
the_match = match
best_index = index
if first_match == absurd_match:
return -1
self.start = first_match
self.match = the_match
self.end = self.match.end()
return best_index
def which(filename):
"""This takes a given filename; tries to find it in the environment path;
then checks if it is executable. This returns the full path to the filename
if found and executable. Otherwise this returns None."""
# Special case where filename contains an explicit path.
if os.path.dirname(filename) != '':
if os.access(filename, os.X_OK):
return filename
if 'PATH' not in os.environ or os.environ['PATH'] == '':
p = os.defpath
else:
p = os.environ['PATH']
pathlist = string.split(p, os.pathsep)
for path in pathlist:
ff = os.path.join(path, filename)
if os.access(ff, os.X_OK):
return ff
return None
def split_command_line(command_line):
"""This splits a command line into a list of arguments. It splits arguments
on spaces, but handles embedded quotes, doublequotes, and escaped
characters. It's impossible to do this with a regular expression, so I
wrote a little state machine to parse the command line. """
arg_list = []
arg = ''
# Constants to name the states we can be in.
state_basic = 0
state_esc = 1
state_singlequote = 2
state_doublequote = 3
# The state when consuming whitespace between commands.
state_whitespace = 4
state = state_basic
for c in command_line:
if state == state_basic or state == state_whitespace:
if c == '\\':
# Escape the next character
state = state_esc
elif c == r"'":
# Handle single quote
state = state_singlequote
elif c == r'"':
# Handle double quote
state = state_doublequote
elif c.isspace():
# Add arg to arg_list if we aren't in the middle of whitespace.
if state == state_whitespace:
# Do nothing.
None
else:
arg_list.append(arg)
arg = ''
state = state_whitespace
else:
arg = arg + c
state = state_basic
elif state == state_esc:
arg = arg + c
state = state_basic
elif state == state_singlequote:
if c == r"'":
state = state_basic
else:
arg = arg + c
elif state == state_doublequote:
if c == r'"':
state = state_basic
else:
arg = arg + c
if arg != '':
arg_list.append(arg)
return arg_list
# vi:set sr et ts=4 sw=4 ft=python :
| bsd-3-clause | 2,995,844,815,318,937,000 | 38.709446 | 79 | 0.591993 | false |
kyle0311/oliot-llrp | ThirdParty/libxml2/python/tests/build.py | 87 | 1542 | #!/usr/bin/python -u
import libxml2
import sys
# Memory debug specific
libxml2.debugMemory(1)
doc = libxml2.newDoc("1.0")
comment = doc.newDocComment("This is a generated document")
doc.addChild(comment)
pi = libxml2.newPI("test", "PI content")
doc.addChild(pi)
root = doc.newChild(None, "doc", None)
ns = root.newNs("http://example.com/doc", "my")
root.setNs(ns)
elem = root.newChild(None, "foo", "bar")
elem.setBase("http://example.com/imgs")
elem.setProp("img", "image.gif")
doc.saveFile("tmp.xml")
doc.freeDoc()
doc = libxml2.parseFile("tmp.xml")
comment = doc.children
if comment.type != "comment" or \
comment.content != "This is a generated document":
print "error rereading comment"
sys.exit(1)
pi = comment.next
if pi.type != "pi" or pi.name != "test" or pi.content != "PI content":
print "error rereading PI"
sys.exit(1)
root = pi.next
if root.name != "doc":
print "error rereading root"
sys.exit(1)
ns = root.ns()
if ns.name != "my" or ns.content != "http://example.com/doc":
print "error rereading namespace"
sys.exit(1)
elem = root.children
if elem.name != "foo":
print "error rereading elem"
sys.exit(1)
if elem.getBase(None) != "http://example.com/imgs":
print "error rereading base"
sys.exit(1)
if elem.prop("img") != "image.gif":
print "error rereading property"
sys.exit(1)
doc.freeDoc()
# Memory debug specific
libxml2.cleanupParser()
if libxml2.debugMemory(1) == 0:
print "OK"
else:
print "Memory leak %d bytes" % (libxml2.debugMemory(1))
libxml2.dumpMemory()
| lgpl-2.1 | -4,481,043,176,662,225,000 | 25.135593 | 70 | 0.677691 | false |
harry-7/addons-server | src/olympia/devhub/feeds.py | 2 | 1711 | import uuid
from django import http
from django.contrib.syndication.views import Feed
from django.shortcuts import get_object_or_404
from django.utils.feedgenerator import Rss201rev2Feed as RSS
from django.utils.translation import ugettext
from olympia import amo
from olympia.activity.models import ActivityLog
from olympia.addons.models import Addon
from olympia.amo.templatetags.jinja_helpers import absolutify, url
from olympia.devhub.models import RssKey
from olympia.translations.templatetags.jinja_helpers import clean as clean_html
class ActivityFeedRSS(Feed):
feed_type = RSS
def get_object(self, request):
try:
rsskey = request.GET.get('privaterss')
rsskey = uuid.UUID(rsskey)
except ValueError:
raise http.Http404
key = get_object_or_404(RssKey, key=rsskey.hex)
return key
def items(self, key):
if key.addon:
addons = key.addon
else: # We are showing all the add-ons
addons = Addon.objects.filter(authors=key.user)
return (ActivityLog.objects.for_addons(addons)
.exclude(action__in=amo.LOG_HIDE_DEVELOPER))[:20]
def item_title(self, item):
return clean_html(item.to_string(), True)
def title(self, key):
"""Title for the feed as a whole"""
if key.addon:
return ugettext(u'Recent Changes for %s') % key.addon
else:
return ugettext(u'Recent Changes for My Add-ons')
def link(self):
"""Link for the feed as a whole"""
return absolutify(url('devhub.feed_all'))
def item_link(self):
return self.link()
def item_guid(self):
pass
| bsd-3-clause | 5,334,442,605,886,305,000 | 29.017544 | 79 | 0.656341 | false |
blckshrk/Weboob | modules/parolesmania/test.py | 4 | 1700 | # -*- coding: utf-8 -*-
# Copyright(C) 2013 Julien Veyssier
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.tools.test import BackendTest
from weboob.capabilities.base import NotLoaded
class ParolesmaniaTest(BackendTest):
BACKEND = 'parolesmania'
def test_search_song_n_get(self):
l_lyrics = list(self.backend.iter_lyrics('song', 'chien'))
for songlyrics in l_lyrics:
assert songlyrics.id
assert songlyrics.title
assert songlyrics.artist
assert songlyrics.content is NotLoaded
full_lyr = self.backend.get_lyrics(songlyrics.id)
assert full_lyr.id
assert full_lyr.title
assert full_lyr.artist
assert full_lyr.content is not NotLoaded
def test_search_artist(self):
l_lyrics = list(self.backend.iter_lyrics('artist', 'boris'))
for songlyrics in l_lyrics:
assert songlyrics.id
assert songlyrics.title
assert songlyrics.artist
assert songlyrics.content is NotLoaded
| agpl-3.0 | 2,955,897,075,949,236,000 | 35.956522 | 77 | 0.687059 | false |
cwu2011/seaborn | doc/sphinxext/ipython_directive.py | 37 | 37557 | # -*- coding: utf-8 -*-
"""
Sphinx directive to support embedded IPython code.
This directive allows pasting of entire interactive IPython sessions, prompts
and all, and their code will actually get re-executed at doc build time, with
all prompts renumbered sequentially. It also allows you to input code as a pure
python input by giving the argument python to the directive. The output looks
like an interactive ipython section.
To enable this directive, simply list it in your Sphinx ``conf.py`` file
(making sure the directory where you placed it is visible to sphinx, as is
needed for all Sphinx directives). For example, to enable syntax highlighting
and the IPython directive::
extensions = ['IPython.sphinxext.ipython_console_highlighting',
'IPython.sphinxext.ipython_directive']
The IPython directive outputs code-blocks with the language 'ipython'. So
if you do not have the syntax highlighting extension enabled as well, then
all rendered code-blocks will be uncolored. By default this directive assumes
that your prompts are unchanged IPython ones, but this can be customized.
The configurable options that can be placed in conf.py are:
ipython_savefig_dir:
The directory in which to save the figures. This is relative to the
Sphinx source directory. The default is `html_static_path`.
ipython_rgxin:
The compiled regular expression to denote the start of IPython input
lines. The default is re.compile('In \[(\d+)\]:\s?(.*)\s*'). You
shouldn't need to change this.
ipython_rgxout:
The compiled regular expression to denote the start of IPython output
lines. The default is re.compile('Out\[(\d+)\]:\s?(.*)\s*'). You
shouldn't need to change this.
ipython_promptin:
The string to represent the IPython input prompt in the generated ReST.
The default is 'In [%d]:'. This expects that the line numbers are used
in the prompt.
ipython_promptout:
The string to represent the IPython prompt in the generated ReST. The
default is 'Out [%d]:'. This expects that the line numbers are used
in the prompt.
ipython_mplbackend:
The string which specifies if the embedded Sphinx shell should import
Matplotlib and set the backend. The value specifies a backend that is
passed to `matplotlib.use()` before any lines in `ipython_execlines` are
executed. If not specified in conf.py, then the default value of 'agg' is
used. To use the IPython directive without matplotlib as a dependency, set
the value to `None`. It may end up that matplotlib is still imported
if the user specifies so in `ipython_execlines` or makes use of the
@savefig pseudo decorator.
ipython_execlines:
A list of strings to be exec'd in the embedded Sphinx shell. Typical
usage is to make certain packages always available. Set this to an empty
list if you wish to have no imports always available. If specified in
conf.py as `None`, then it has the effect of making no imports available.
If omitted from conf.py altogether, then the default value of
['import numpy as np', 'import matplotlib.pyplot as plt'] is used.
ipython_holdcount
When the @suppress pseudo-decorator is used, the execution count can be
incremented or not. The default behavior is to hold the execution count,
corresponding to a value of `True`. Set this to `False` to increment
the execution count after each suppressed command.
As an example, to use the IPython directive when `matplotlib` is not available,
one sets the backend to `None`::
ipython_mplbackend = None
An example usage of the directive is:
.. code-block:: rst
.. ipython::
In [1]: x = 1
In [2]: y = x**2
In [3]: print(y)
See http://matplotlib.org/sampledoc/ipython_directive.html for additional
documentation.
ToDo
----
- Turn the ad-hoc test() function into a real test suite.
- Break up ipython-specific functionality from matplotlib stuff into better
separated code.
Authors
-------
- John D Hunter: orignal author.
- Fernando Perez: refactoring, documentation, cleanups, port to 0.11.
- VáclavŠmilauer <eudoxos-AT-arcig.cz>: Prompt generalizations.
- Skipper Seabold, refactoring, cleanups, pure python addition
"""
from __future__ import print_function
from __future__ import unicode_literals
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Stdlib
import os
import re
import sys
import tempfile
import ast
from pandas.compat import zip, range, map, lmap, u, cStringIO as StringIO
import warnings
# To keep compatibility with various python versions
try:
from hashlib import md5
except ImportError:
from md5 import md5
# Third-party
import sphinx
from docutils.parsers.rst import directives
from docutils import nodes
from sphinx.util.compat import Directive
# Our own
from IPython import Config, InteractiveShell
from IPython.core.profiledir import ProfileDir
from IPython.utils import io
from IPython.utils.py3compat import PY3
if PY3:
from io import StringIO
text_type = str
else:
from StringIO import StringIO
text_type = unicode
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
# for tokenizing blocks
COMMENT, INPUT, OUTPUT = range(3)
#-----------------------------------------------------------------------------
# Functions and class declarations
#-----------------------------------------------------------------------------
def block_parser(part, rgxin, rgxout, fmtin, fmtout):
"""
part is a string of ipython text, comprised of at most one
input, one ouput, comments, and blank lines. The block parser
parses the text into a list of::
blocks = [ (TOKEN0, data0), (TOKEN1, data1), ...]
where TOKEN is one of [COMMENT | INPUT | OUTPUT ] and
data is, depending on the type of token::
COMMENT : the comment string
INPUT: the (DECORATOR, INPUT_LINE, REST) where
DECORATOR: the input decorator (or None)
INPUT_LINE: the input as string (possibly multi-line)
REST : any stdout generated by the input line (not OUTPUT)
OUTPUT: the output string, possibly multi-line
"""
block = []
lines = part.split('\n')
N = len(lines)
i = 0
decorator = None
while 1:
if i==N:
# nothing left to parse -- the last line
break
line = lines[i]
i += 1
line_stripped = line.strip()
if line_stripped.startswith('#'):
block.append((COMMENT, line))
continue
if line_stripped.startswith('@'):
# we're assuming at most one decorator -- may need to
# rethink
decorator = line_stripped
continue
# does this look like an input line?
matchin = rgxin.match(line)
if matchin:
lineno, inputline = int(matchin.group(1)), matchin.group(2)
# the ....: continuation string
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
Nc = len(continuation)
# input lines can continue on for more than one line, if
# we have a '\' line continuation char or a function call
# echo line 'print'. The input line can only be
# terminated by the end of the block or an output line, so
# we parse out the rest of the input line if it is
# multiline as well as any echo text
rest = []
while i<N:
# look ahead; if the next line is blank, or a comment, or
# an output line, we're done
nextline = lines[i]
matchout = rgxout.match(nextline)
#print "nextline=%s, continuation=%s, starts=%s"%(nextline, continuation, nextline.startswith(continuation))
if matchout or nextline.startswith('#'):
break
elif nextline.startswith(continuation):
nextline = nextline[Nc:]
if nextline and nextline[0] == ' ':
nextline = nextline[1:]
inputline += '\n' + nextline
else:
rest.append(nextline)
i+= 1
block.append((INPUT, (decorator, inputline, '\n'.join(rest))))
continue
# if it looks like an output line grab all the text to the end
# of the block
matchout = rgxout.match(line)
if matchout:
lineno, output = int(matchout.group(1)), matchout.group(2)
if i<N-1:
output = '\n'.join([output] + lines[i:])
block.append((OUTPUT, output))
break
return block
class DecodingStringIO(StringIO, object):
def __init__(self,buf='',encodings=('utf8',), *args, **kwds):
super(DecodingStringIO, self).__init__(buf, *args, **kwds)
self.set_encodings(encodings)
def set_encodings(self, encodings):
self.encodings = encodings
def write(self,data):
if isinstance(data, text_type):
return super(DecodingStringIO, self).write(data)
else:
for enc in self.encodings:
try:
data = data.decode(enc)
return super(DecodingStringIO, self).write(data)
except :
pass
# default to brute utf8 if no encoding succeded
return super(DecodingStringIO, self).write(data.decode('utf8', 'replace'))
class EmbeddedSphinxShell(object):
"""An embedded IPython instance to run inside Sphinx"""
def __init__(self, exec_lines=None,state=None):
self.cout = DecodingStringIO(u'')
if exec_lines is None:
exec_lines = []
self.state = state
# Create config object for IPython
config = Config()
config.InteractiveShell.autocall = False
config.InteractiveShell.autoindent = False
config.InteractiveShell.colors = 'NoColor'
# create a profile so instance history isn't saved
tmp_profile_dir = tempfile.mkdtemp(prefix='profile_')
profname = 'auto_profile_sphinx_build'
pdir = os.path.join(tmp_profile_dir,profname)
profile = ProfileDir.create_profile_dir(pdir)
# Create and initialize global ipython, but don't start its mainloop.
# This will persist across different EmbededSphinxShell instances.
IP = InteractiveShell.instance(config=config, profile_dir=profile)
# io.stdout redirect must be done after instantiating InteractiveShell
io.stdout = self.cout
io.stderr = self.cout
# For debugging, so we can see normal output, use this:
#from IPython.utils.io import Tee
#io.stdout = Tee(self.cout, channel='stdout') # dbg
#io.stderr = Tee(self.cout, channel='stderr') # dbg
# Store a few parts of IPython we'll need.
self.IP = IP
self.user_ns = self.IP.user_ns
self.user_global_ns = self.IP.user_global_ns
self.input = ''
self.output = ''
self.is_verbatim = False
self.is_doctest = False
self.is_suppress = False
# Optionally, provide more detailed information to shell.
self.directive = None
# on the first call to the savefig decorator, we'll import
# pyplot as plt so we can make a call to the plt.gcf().savefig
self._pyplot_imported = False
# Prepopulate the namespace.
for line in exec_lines:
self.process_input_line(line, store_history=False)
def clear_cout(self):
self.cout.seek(0)
self.cout.truncate(0)
def process_input_line(self, line, store_history=True):
"""process the input, capturing stdout"""
stdout = sys.stdout
splitter = self.IP.input_splitter
try:
sys.stdout = self.cout
splitter.push(line)
more = splitter.push_accepts_more()
if not more:
try:
source_raw = splitter.source_raw_reset()[1]
except:
# recent ipython #4504
source_raw = splitter.raw_reset()
self.IP.run_cell(source_raw, store_history=store_history)
finally:
sys.stdout = stdout
def process_image(self, decorator):
"""
# build out an image directive like
# .. image:: somefile.png
# :width 4in
#
# from an input like
# savefig somefile.png width=4in
"""
savefig_dir = self.savefig_dir
source_dir = self.source_dir
saveargs = decorator.split(' ')
filename = saveargs[1]
# insert relative path to image file in source
outfile = os.path.relpath(os.path.join(savefig_dir,filename),
source_dir)
imagerows = ['.. image:: %s'%outfile]
for kwarg in saveargs[2:]:
arg, val = kwarg.split('=')
arg = arg.strip()
val = val.strip()
imagerows.append(' :%s: %s'%(arg, val))
image_file = os.path.basename(outfile) # only return file name
image_directive = '\n'.join(imagerows)
return image_file, image_directive
# Callbacks for each type of token
def process_input(self, data, input_prompt, lineno):
"""
Process data block for INPUT token.
"""
decorator, input, rest = data
image_file = None
image_directive = None
is_verbatim = decorator=='@verbatim' or self.is_verbatim
is_doctest = (decorator is not None and \
decorator.startswith('@doctest')) or self.is_doctest
is_suppress = decorator=='@suppress' or self.is_suppress
is_okexcept = decorator=='@okexcept' or self.is_okexcept
is_okwarning = decorator=='@okwarning' or self.is_okwarning
is_savefig = decorator is not None and \
decorator.startswith('@savefig')
# set the encodings to be used by DecodingStringIO
# to convert the execution output into unicode if
# needed. this attrib is set by IpythonDirective.run()
# based on the specified block options, defaulting to ['ut
self.cout.set_encodings(self.output_encoding)
input_lines = input.split('\n')
if len(input_lines) > 1:
if input_lines[-1] != "":
input_lines.append('') # make sure there's a blank line
# so splitter buffer gets reset
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
if is_savefig:
image_file, image_directive = self.process_image(decorator)
ret = []
is_semicolon = False
# Hold the execution count, if requested to do so.
if is_suppress and self.hold_count:
store_history = False
else:
store_history = True
# Note: catch_warnings is not thread safe
with warnings.catch_warnings(record=True) as ws:
for i, line in enumerate(input_lines):
if line.endswith(';'):
is_semicolon = True
if i == 0:
# process the first input line
if is_verbatim:
self.process_input_line('')
self.IP.execution_count += 1 # increment it anyway
else:
# only submit the line in non-verbatim mode
self.process_input_line(line, store_history=store_history)
formatted_line = '%s %s'%(input_prompt, line)
else:
# process a continuation line
if not is_verbatim:
self.process_input_line(line, store_history=store_history)
formatted_line = '%s %s'%(continuation, line)
if not is_suppress:
ret.append(formatted_line)
if not is_suppress and len(rest.strip()) and is_verbatim:
# the "rest" is the standard output of the
# input, which needs to be added in
# verbatim mode
ret.append(rest)
self.cout.seek(0)
output = self.cout.read()
if not is_suppress and not is_semicolon:
ret.append(output)
elif is_semicolon: # get spacing right
ret.append('')
# context information
filename = self.state.document.current_source
lineno = self.state.document.current_line
# output any exceptions raised during execution to stdout
# unless :okexcept: has been specified.
if not is_okexcept and "Traceback" in output:
s = "\nException in %s at block ending on line %s\n" % (filename, lineno)
s += "Specify :okexcept: as an option in the ipython:: block to suppress this message\n"
sys.stdout.write('\n\n>>>' + ('-' * 73))
sys.stdout.write(s)
sys.stdout.write(output)
sys.stdout.write('<<<' + ('-' * 73) + '\n\n')
# output any warning raised during execution to stdout
# unless :okwarning: has been specified.
if not is_okwarning:
for w in ws:
s = "\nWarning in %s at block ending on line %s\n" % (filename, lineno)
s += "Specify :okwarning: as an option in the ipython:: block to suppress this message\n"
sys.stdout.write('\n\n>>>' + ('-' * 73))
sys.stdout.write(s)
sys.stdout.write('-' * 76 + '\n')
s=warnings.formatwarning(w.message, w.category,
w.filename, w.lineno, w.line)
sys.stdout.write(s)
sys.stdout.write('<<<' + ('-' * 73) + '\n')
self.cout.truncate(0)
return (ret, input_lines, output, is_doctest, decorator, image_file,
image_directive)
def process_output(self, data, output_prompt,
input_lines, output, is_doctest, decorator, image_file):
"""
Process data block for OUTPUT token.
"""
TAB = ' ' * 4
if is_doctest and output is not None:
found = output
found = found.strip()
submitted = data.strip()
if self.directive is None:
source = 'Unavailable'
content = 'Unavailable'
else:
source = self.directive.state.document.current_source
content = self.directive.content
# Add tabs and join into a single string.
content = '\n'.join([TAB + line for line in content])
# Make sure the output contains the output prompt.
ind = found.find(output_prompt)
if ind < 0:
e = ('output does not contain output prompt\n\n'
'Document source: {0}\n\n'
'Raw content: \n{1}\n\n'
'Input line(s):\n{TAB}{2}\n\n'
'Output line(s):\n{TAB}{3}\n\n')
e = e.format(source, content, '\n'.join(input_lines),
repr(found), TAB=TAB)
raise RuntimeError(e)
found = found[len(output_prompt):].strip()
# Handle the actual doctest comparison.
if decorator.strip() == '@doctest':
# Standard doctest
if found != submitted:
e = ('doctest failure\n\n'
'Document source: {0}\n\n'
'Raw content: \n{1}\n\n'
'On input line(s):\n{TAB}{2}\n\n'
'we found output:\n{TAB}{3}\n\n'
'instead of the expected:\n{TAB}{4}\n\n')
e = e.format(source, content, '\n'.join(input_lines),
repr(found), repr(submitted), TAB=TAB)
raise RuntimeError(e)
else:
self.custom_doctest(decorator, input_lines, found, submitted)
def process_comment(self, data):
"""Process data fPblock for COMMENT token."""
if not self.is_suppress:
return [data]
def save_image(self, image_file):
"""
Saves the image file to disk.
"""
self.ensure_pyplot()
command = ('plt.gcf().savefig("%s", bbox_inches="tight", '
'dpi=100)' % image_file)
#print 'SAVEFIG', command # dbg
self.process_input_line('bookmark ipy_thisdir', store_history=False)
self.process_input_line('cd -b ipy_savedir', store_history=False)
self.process_input_line(command, store_history=False)
self.process_input_line('cd -b ipy_thisdir', store_history=False)
self.process_input_line('bookmark -d ipy_thisdir', store_history=False)
self.clear_cout()
def process_block(self, block):
"""
process block from the block_parser and return a list of processed lines
"""
ret = []
output = None
input_lines = None
lineno = self.IP.execution_count
input_prompt = self.promptin % lineno
output_prompt = self.promptout % lineno
image_file = None
image_directive = None
for token, data in block:
if token == COMMENT:
out_data = self.process_comment(data)
elif token == INPUT:
(out_data, input_lines, output, is_doctest, decorator,
image_file, image_directive) = \
self.process_input(data, input_prompt, lineno)
elif token == OUTPUT:
out_data = \
self.process_output(data, output_prompt,
input_lines, output, is_doctest,
decorator, image_file)
if out_data:
ret.extend(out_data)
# save the image files
if image_file is not None:
self.save_image(image_file)
return ret, image_directive
def ensure_pyplot(self):
"""
Ensures that pyplot has been imported into the embedded IPython shell.
Also, makes sure to set the backend appropriately if not set already.
"""
# We are here if the @figure pseudo decorator was used. Thus, it's
# possible that we could be here even if python_mplbackend were set to
# `None`. That's also strange and perhaps worthy of raising an
# exception, but for now, we just set the backend to 'agg'.
if not self._pyplot_imported:
if 'matplotlib.backends' not in sys.modules:
# Then ipython_matplotlib was set to None but there was a
# call to the @figure decorator (and ipython_execlines did
# not set a backend).
#raise Exception("No backend was set, but @figure was used!")
import matplotlib
matplotlib.use('agg')
# Always import pyplot into embedded shell.
self.process_input_line('import matplotlib.pyplot as plt',
store_history=False)
self._pyplot_imported = True
def process_pure_python(self, content):
"""
content is a list of strings. it is unedited directive content
This runs it line by line in the InteractiveShell, prepends
prompts as needed capturing stderr and stdout, then returns
the content as a list as if it were ipython code
"""
output = []
savefig = False # keep up with this to clear figure
multiline = False # to handle line continuation
multiline_start = None
fmtin = self.promptin
ct = 0
for lineno, line in enumerate(content):
line_stripped = line.strip()
if not len(line):
output.append(line)
continue
# handle decorators
if line_stripped.startswith('@'):
output.extend([line])
if 'savefig' in line:
savefig = True # and need to clear figure
continue
# handle comments
if line_stripped.startswith('#'):
output.extend([line])
continue
# deal with lines checking for multiline
continuation = u' %s:'% ''.join(['.']*(len(str(ct))+2))
if not multiline:
modified = u"%s %s" % (fmtin % ct, line_stripped)
output.append(modified)
ct += 1
try:
ast.parse(line_stripped)
output.append(u'')
except Exception: # on a multiline
multiline = True
multiline_start = lineno
else: # still on a multiline
modified = u'%s %s' % (continuation, line)
output.append(modified)
# if the next line is indented, it should be part of multiline
if len(content) > lineno + 1:
nextline = content[lineno + 1]
if len(nextline) - len(nextline.lstrip()) > 3:
continue
try:
mod = ast.parse(
'\n'.join(content[multiline_start:lineno+1]))
if isinstance(mod.body[0], ast.FunctionDef):
# check to see if we have the whole function
for element in mod.body[0].body:
if isinstance(element, ast.Return):
multiline = False
else:
output.append(u'')
multiline = False
except Exception:
pass
if savefig: # clear figure if plotted
self.ensure_pyplot()
self.process_input_line('plt.clf()', store_history=False)
self.clear_cout()
savefig = False
return output
def custom_doctest(self, decorator, input_lines, found, submitted):
"""
Perform a specialized doctest.
"""
from .custom_doctests import doctests
args = decorator.split()
doctest_type = args[1]
if doctest_type in doctests:
doctests[doctest_type](self, args, input_lines, found, submitted)
else:
e = "Invalid option to @doctest: {0}".format(doctest_type)
raise Exception(e)
class IPythonDirective(Directive):
has_content = True
required_arguments = 0
optional_arguments = 4 # python, suppress, verbatim, doctest
final_argumuent_whitespace = True
option_spec = { 'python': directives.unchanged,
'suppress' : directives.flag,
'verbatim' : directives.flag,
'doctest' : directives.flag,
'okexcept': directives.flag,
'okwarning': directives.flag,
'output_encoding': directives.unchanged_required
}
shell = None
seen_docs = set()
def get_config_options(self):
# contains sphinx configuration variables
config = self.state.document.settings.env.config
# get config variables to set figure output directory
confdir = self.state.document.settings.env.app.confdir
savefig_dir = config.ipython_savefig_dir
source_dir = os.path.dirname(self.state.document.current_source)
if savefig_dir is None:
savefig_dir = config.html_static_path
if isinstance(savefig_dir, list):
savefig_dir = savefig_dir[0] # safe to assume only one path?
savefig_dir = os.path.join(confdir, savefig_dir)
# get regex and prompt stuff
rgxin = config.ipython_rgxin
rgxout = config.ipython_rgxout
promptin = config.ipython_promptin
promptout = config.ipython_promptout
mplbackend = config.ipython_mplbackend
exec_lines = config.ipython_execlines
hold_count = config.ipython_holdcount
return (savefig_dir, source_dir, rgxin, rgxout,
promptin, promptout, mplbackend, exec_lines, hold_count)
def setup(self):
# Get configuration values.
(savefig_dir, source_dir, rgxin, rgxout, promptin, promptout,
mplbackend, exec_lines, hold_count) = self.get_config_options()
if self.shell is None:
# We will be here many times. However, when the
# EmbeddedSphinxShell is created, its interactive shell member
# is the same for each instance.
if mplbackend:
import matplotlib
# Repeated calls to use() will not hurt us since `mplbackend`
# is the same each time.
matplotlib.use(mplbackend)
# Must be called after (potentially) importing matplotlib and
# setting its backend since exec_lines might import pylab.
self.shell = EmbeddedSphinxShell(exec_lines, self.state)
# Store IPython directive to enable better error messages
self.shell.directive = self
# reset the execution count if we haven't processed this doc
#NOTE: this may be borked if there are multiple seen_doc tmp files
#check time stamp?
if not self.state.document.current_source in self.seen_docs:
self.shell.IP.history_manager.reset()
self.shell.IP.execution_count = 1
self.shell.IP.prompt_manager.width = 0
self.seen_docs.add(self.state.document.current_source)
# and attach to shell so we don't have to pass them around
self.shell.rgxin = rgxin
self.shell.rgxout = rgxout
self.shell.promptin = promptin
self.shell.promptout = promptout
self.shell.savefig_dir = savefig_dir
self.shell.source_dir = source_dir
self.shell.hold_count = hold_count
# setup bookmark for saving figures directory
self.shell.process_input_line('bookmark ipy_savedir %s'%savefig_dir,
store_history=False)
self.shell.clear_cout()
return rgxin, rgxout, promptin, promptout
def teardown(self):
# delete last bookmark
self.shell.process_input_line('bookmark -d ipy_savedir',
store_history=False)
self.shell.clear_cout()
def run(self):
debug = False
#TODO, any reason block_parser can't be a method of embeddable shell
# then we wouldn't have to carry these around
rgxin, rgxout, promptin, promptout = self.setup()
options = self.options
self.shell.is_suppress = 'suppress' in options
self.shell.is_doctest = 'doctest' in options
self.shell.is_verbatim = 'verbatim' in options
self.shell.is_okexcept = 'okexcept' in options
self.shell.is_okwarning = 'okwarning' in options
self.shell.output_encoding = [options.get('output_encoding', 'utf8')]
# handle pure python code
if 'python' in self.arguments:
content = self.content
self.content = self.shell.process_pure_python(content)
parts = '\n'.join(self.content).split('\n\n')
lines = ['.. code-block:: ipython', '']
figures = []
for part in parts:
block = block_parser(part, rgxin, rgxout, promptin, promptout)
if len(block):
rows, figure = self.shell.process_block(block)
for row in rows:
lines.extend([' %s'%line for line in row.split('\n')])
if figure is not None:
figures.append(figure)
for figure in figures:
lines.append('')
lines.extend(figure.split('\n'))
lines.append('')
if len(lines)>2:
if debug:
print('\n'.join(lines))
else:
# This has to do with input, not output. But if we comment
# these lines out, then no IPython code will appear in the
# final output.
self.state_machine.insert_input(
lines, self.state_machine.input_lines.source(0))
# cleanup
self.teardown()
return []
# Enable as a proper Sphinx directive
def setup(app):
setup.app = app
app.add_directive('ipython', IPythonDirective)
app.add_config_value('ipython_savefig_dir', None, 'env')
app.add_config_value('ipython_rgxin',
re.compile('In \[(\d+)\]:\s?(.*)\s*'), 'env')
app.add_config_value('ipython_rgxout',
re.compile('Out\[(\d+)\]:\s?(.*)\s*'), 'env')
app.add_config_value('ipython_promptin', 'In [%d]:', 'env')
app.add_config_value('ipython_promptout', 'Out[%d]:', 'env')
# We could just let matplotlib pick whatever is specified as the default
# backend in the matplotlibrc file, but this would cause issues if the
# backend didn't work in headless environments. For this reason, 'agg'
# is a good default backend choice.
app.add_config_value('ipython_mplbackend', 'agg', 'env')
# If the user sets this config value to `None`, then EmbeddedSphinxShell's
# __init__ method will treat it as [].
execlines = ['import numpy as np', 'import matplotlib.pyplot as plt']
app.add_config_value('ipython_execlines', execlines, 'env')
app.add_config_value('ipython_holdcount', True, 'env')
# Simple smoke test, needs to be converted to a proper automatic test.
def test():
examples = [
r"""
In [9]: pwd
Out[9]: '/home/jdhunter/py4science/book'
In [10]: cd bookdata/
/home/jdhunter/py4science/book/bookdata
In [2]: from pylab import *
In [2]: ion()
In [3]: im = imread('stinkbug.png')
@savefig mystinkbug.png width=4in
In [4]: imshow(im)
Out[4]: <matplotlib.image.AxesImage object at 0x39ea850>
""",
r"""
In [1]: x = 'hello world'
# string methods can be
# used to alter the string
@doctest
In [2]: x.upper()
Out[2]: 'HELLO WORLD'
@verbatim
In [3]: x.st<TAB>
x.startswith x.strip
""",
r"""
In [130]: url = 'http://ichart.finance.yahoo.com/table.csv?s=CROX\
.....: &d=9&e=22&f=2009&g=d&a=1&br=8&c=2006&ignore=.csv'
In [131]: print url.split('&')
['http://ichart.finance.yahoo.com/table.csv?s=CROX', 'd=9', 'e=22', 'f=2009', 'g=d', 'a=1', 'b=8', 'c=2006', 'ignore=.csv']
In [60]: import urllib
""",
r"""\
In [133]: import numpy.random
@suppress
In [134]: numpy.random.seed(2358)
@doctest
In [135]: numpy.random.rand(10,2)
Out[135]:
array([[ 0.64524308, 0.59943846],
[ 0.47102322, 0.8715456 ],
[ 0.29370834, 0.74776844],
[ 0.99539577, 0.1313423 ],
[ 0.16250302, 0.21103583],
[ 0.81626524, 0.1312433 ],
[ 0.67338089, 0.72302393],
[ 0.7566368 , 0.07033696],
[ 0.22591016, 0.77731835],
[ 0.0072729 , 0.34273127]])
""",
r"""
In [106]: print x
jdh
In [109]: for i in range(10):
.....: print i
.....:
.....:
0
1
2
3
4
5
6
7
8
9
""",
r"""
In [144]: from pylab import *
In [145]: ion()
# use a semicolon to suppress the output
@savefig test_hist.png width=4in
In [151]: hist(np.random.randn(10000), 100);
@savefig test_plot.png width=4in
In [151]: plot(np.random.randn(10000), 'o');
""",
r"""
# use a semicolon to suppress the output
In [151]: plt.clf()
@savefig plot_simple.png width=4in
In [151]: plot([1,2,3])
@savefig hist_simple.png width=4in
In [151]: hist(np.random.randn(10000), 100);
""",
r"""
# update the current fig
In [151]: ylabel('number')
In [152]: title('normal distribution')
@savefig hist_with_text.png
In [153]: grid(True)
@doctest float
In [154]: 0.1 + 0.2
Out[154]: 0.3
@doctest float
In [155]: np.arange(16).reshape(4,4)
Out[155]:
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
In [1]: x = np.arange(16, dtype=float).reshape(4,4)
In [2]: x[0,0] = np.inf
In [3]: x[0,1] = np.nan
@doctest float
In [4]: x
Out[4]:
array([[ inf, nan, 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]])
""",
]
# skip local-file depending first example:
examples = examples[1:]
#ipython_directive.DEBUG = True # dbg
#options = dict(suppress=True) # dbg
options = dict()
for example in examples:
content = example.split('\n')
IPythonDirective('debug', arguments=None, options=options,
content=content, lineno=0,
content_offset=None, block_text=None,
state=None, state_machine=None,
)
# Run test suite as a script
if __name__=='__main__':
if not os.path.isdir('_static'):
os.mkdir('_static')
test()
print('All OK? Check figures in _static/')
| bsd-3-clause | 8,006,001,777,438,705,000 | 33.612903 | 124 | 0.569724 | false |
prajjwal1/prajjwal1.github.io | markdown_generator/talks.py | 199 | 4000 |
# coding: utf-8
# # Talks markdown generator for academicpages
#
# Takes a TSV of talks with metadata and converts them for use with [academicpages.github.io](academicpages.github.io). This is an interactive Jupyter notebook ([see more info here](http://jupyter-notebook-beginner-guide.readthedocs.io/en/latest/what_is_jupyter.html)). The core python code is also in `talks.py`. Run either from the `markdown_generator` folder after replacing `talks.tsv` with one containing your data.
#
# TODO: Make this work with BibTex and other databases, rather than Stuart's non-standard TSV format and citation style.
# In[1]:
import pandas as pd
import os
# ## Data format
#
# The TSV needs to have the following columns: title, type, url_slug, venue, date, location, talk_url, description, with a header at the top. Many of these fields can be blank, but the columns must be in the TSV.
#
# - Fields that cannot be blank: `title`, `url_slug`, `date`. All else can be blank. `type` defaults to "Talk"
# - `date` must be formatted as YYYY-MM-DD.
# - `url_slug` will be the descriptive part of the .md file and the permalink URL for the page about the paper.
# - The .md file will be `YYYY-MM-DD-[url_slug].md` and the permalink will be `https://[yourdomain]/talks/YYYY-MM-DD-[url_slug]`
# - The combination of `url_slug` and `date` must be unique, as it will be the basis for your filenames
#
# ## Import TSV
#
# Pandas makes this easy with the read_csv function. We are using a TSV, so we specify the separator as a tab, or `\t`.
#
# I found it important to put this data in a tab-separated values format, because there are a lot of commas in this kind of data and comma-separated values can get messed up. However, you can modify the import statement, as pandas also has read_excel(), read_json(), and others.
# In[3]:
talks = pd.read_csv("talks.tsv", sep="\t", header=0)
talks
# ## Escape special characters
#
# YAML is very picky about how it takes a valid string, so we are replacing single and double quotes (and ampersands) with their HTML encoded equivilents. This makes them look not so readable in raw format, but they are parsed and rendered nicely.
# In[4]:
html_escape_table = {
"&": "&",
'"': """,
"'": "'"
}
def html_escape(text):
if type(text) is str:
return "".join(html_escape_table.get(c,c) for c in text)
else:
return "False"
# ## Creating the markdown files
#
# This is where the heavy lifting is done. This loops through all the rows in the TSV dataframe, then starts to concatentate a big string (```md```) that contains the markdown for each type. It does the YAML metadata first, then does the description for the individual page.
# In[5]:
loc_dict = {}
for row, item in talks.iterrows():
md_filename = str(item.date) + "-" + item.url_slug + ".md"
html_filename = str(item.date) + "-" + item.url_slug
year = item.date[:4]
md = "---\ntitle: \"" + item.title + '"\n'
md += "collection: talks" + "\n"
if len(str(item.type)) > 3:
md += 'type: "' + item.type + '"\n'
else:
md += 'type: "Talk"\n'
md += "permalink: /talks/" + html_filename + "\n"
if len(str(item.venue)) > 3:
md += 'venue: "' + item.venue + '"\n'
if len(str(item.location)) > 3:
md += "date: " + str(item.date) + "\n"
if len(str(item.location)) > 3:
md += 'location: "' + str(item.location) + '"\n'
md += "---\n"
if len(str(item.talk_url)) > 3:
md += "\n[More information here](" + item.talk_url + ")\n"
if len(str(item.description)) > 3:
md += "\n" + html_escape(item.description) + "\n"
md_filename = os.path.basename(md_filename)
#print(md)
with open("../_talks/" + md_filename, 'w') as f:
f.write(md)
# These files are in the talks directory, one directory below where we're working from.
| mit | -6,477,765,706,653,022,000 | 35.036036 | 420 | 0.638 | false |
mhnatiuk/phd_sociology_of_religion | scrapper/lib/python2.7/site-packages/twisted/mail/bounce.py | 26 | 2292 | # -*- test-case-name: twisted.mail.test.test_bounce -*-
#
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Support for bounce message generation.
"""
import StringIO
import rfc822
import time
import os
from twisted.mail import smtp
BOUNCE_FORMAT = """\
From: postmaster@%(failedDomain)s
To: %(failedFrom)s
Subject: Returned Mail: see transcript for details
Message-ID: %(messageID)s
Content-Type: multipart/report; report-type=delivery-status;
boundary="%(boundary)s"
--%(boundary)s
%(transcript)s
--%(boundary)s
Content-Type: message/delivery-status
Arrival-Date: %(ctime)s
Final-Recipient: RFC822; %(failedTo)s
"""
def generateBounce(message, failedFrom, failedTo, transcript=''):
"""
Generate a bounce message for an undeliverable email message.
@type message: L{bytes}
@param message: The undeliverable message.
@type failedFrom: L{bytes}
@param failedFrom: The originator of the undeliverable message.
@type failedTo: L{bytes}
@param failedTo: The destination of the undeliverable message.
@type transcript: L{bytes}
@param transcript: An error message to include in the bounce message.
@rtype: 3-L{tuple} of (E{1}) L{bytes}, (E{2}) L{bytes}, (E{3}) L{bytes}
@return: The originator, the destination and the contents of the bounce
message. The destination of the bounce message is the originator of
the undeliverable message.
"""
if not transcript:
transcript = '''\
I'm sorry, the following address has permanent errors: %(failedTo)s.
I've given up, and I will not retry the message again.
''' % vars()
boundary = "%s_%s_%s" % (time.time(), os.getpid(), 'XXXXX')
failedAddress = rfc822.AddressList(failedTo)[0][1]
failedDomain = failedAddress.split('@', 1)[1]
messageID = smtp.messageid(uniq='bounce')
ctime = time.ctime(time.time())
fp = StringIO.StringIO()
fp.write(BOUNCE_FORMAT % vars())
orig = message.tell()
message.seek(2, 0)
sz = message.tell()
message.seek(0, orig)
if sz > 10000:
while 1:
line = message.readline()
if len(line)<=1:
break
fp.write(line)
else:
fp.write(message.read())
return '', failedFrom, fp.getvalue()
| gpl-2.0 | 4,059,714,975,690,690,000 | 25.651163 | 76 | 0.662304 | false |
taohungyang/cloud-custodian | c7n/filters/missing.py | 1 | 2009 | # Copyright 2018 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .core import Filter
from c7n.exceptions import PolicyValidationError
from c7n.utils import type_schema
from c7n.policy import Policy
class Missing(Filter):
"""Assert the absence of a particular resource.
Intended for use at a logical account/subscription/project level
This works as an effectively an embedded policy thats evaluated.
"""
schema = type_schema(
'missing', policy={'type': 'object'}, required=['policy'])
def __init__(self, data, manager):
super(Missing, self).__init__(data, manager)
self.data['policy']['name'] = self.manager.ctx.policy.name
self.embedded_policy = Policy(self.data['policy'], self.manager.config)
def validate(self):
if 'mode' in self.data['policy']:
raise PolicyValidationError(
"Execution mode can't be specified in "
"embedded policy %s" % self.data)
if 'actions' in self.data['policy']:
raise PolicyValidationError(
"Actions can't be specified in "
"embedded policy %s" % self.data)
self.embedded_policy.validate()
return self
def get_permissions(self):
return self.embedded_policy.get_permissions()
def process(self, resources, event=None):
check_resources = self.embedded_policy.poll()
if not check_resources:
return resources
return []
| apache-2.0 | 374,986,875,597,269,800 | 34.245614 | 79 | 0.672474 | false |
bjtrost/TCAG-WGS-CNV-workflow | convert_CNV_calls_to_common_format.py | 1 | 1095 | #!/usr/bin/env python
# Convert calls from Canvas, cn.MOPS, CNVnator, ERDS, Genome STRiP, or RDXplorer to a common format
# Usage example:
# convert_CNV_calls_to_common_format.py input_filename name_of_caller
# name_of_caller must be one of "Canvas", "cn.MOPS", "CNVnator", "ERDS", "Genome_STRiP" (note underscore), or "RDXplorer"
import os
import re
import sys
import argparse
####################################
### Parse command-line arguments ###
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("input_filename", type=str)
parser.add_argument("caller", type=str)
args = parser.parse_args()
#####################################
args.caller = args.caller.replace(".", "").replace(" ", "_") # Convert cn.MOPS to cnMOPS
import_str = "import {}".format(args.caller)
exec(import_str)
run_str = "converter={}.{} (\"{}\")".format(args.caller, args.caller, args.input_filename)
print("Chr\tStart\tEnd\tSize\tType\tAlgorithm-specific filtering data\tAlgorithm\tOther information provided by algorithm")
exec(run_str)
converter.run()
| mit | 62,324,985,761,163,590 | 38.107143 | 123 | 0.690411 | false |
AnasGhrab/scikit-learn | doc/tutorial/text_analytics/skeletons/exercise_02_sentiment.py | 256 | 2406 | """Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
# TASK: print the cross-validated scores for the each parameters set
# explored by the grid search
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
| bsd-3-clause | 7,371,653,782,344,567,000 | 37.190476 | 79 | 0.720698 | false |
kevint2u/audio-collector | server/node_modules/binaryjs/node_modules/binarypack/node_modules/buffercursor/node_modules/verror/node_modules/extsprintf/deps/javascriptlint/javascriptlint/htmlparse.py | 28 | 1348 | # vim: ts=4 sw=4 expandtab
import HTMLParser
import unittest
class _Parser(HTMLParser.HTMLParser):
def __init__(self):
HTMLParser.HTMLParser.__init__(self)
self._tags = []
def handle_starttag(self, tag, attributes):
if tag.lower() == 'script':
attr = dict(attributes)
self._tags.append({
'type': 'start',
'lineno': self.lineno,
'offset': self.offset,
'len': len(self.get_starttag_text()),
'attr': attr
})
def handle_endtag(self, tag):
if tag.lower() == 'script':
self._tags.append({
'type': 'end',
'lineno': self.lineno,
'offset': self.offset,
})
def unknown_decl(self, data):
# Ignore unknown declarations instead of raising an exception.
pass
def gettags(self):
return self._tags
def findscripttags(s):
""" Note that the lineno is 1-based.
"""
parser = _Parser()
parser.feed(s)
parser.close()
return parser.gettags()
class TestHTMLParse(unittest.TestCase):
def testConditionalComments(self):
html = """
<!--[if IE]>This is Internet Explorer.<![endif]-->
<![if !IE]>This is not Internet Explorer<![endif]>
"""
findscripttags(html)
| mit | 812,280,903,600,477,700 | 25.431373 | 70 | 0.538576 | false |
pakpoomton/CellmodellerShadow | CellModeller/Integration/CrankNicIntegrator.py | 1 | 6405 | import numpy
import scipy.integrate.odepack
from scipy.sparse.linalg import LinearOperator
from scipy.ndimage.filters import convolve
from scipy.sparse.linalg import gmres
import pyopencl as cl
import pyopencl.array as cl_array
from pyopencl.array import vec
import math
class CrankNicIntegrator:
def __init__(self, sim, nSignals, nSpecies, maxCells, sig, greensThreshold=1e-12, regul=None):
self.sim = sim
self.dt = self.sim.dt
self.greensThreshold = greensThreshold
self.regul = regul
self.cellStates = sim.cellStates
self.nCells = len(self.cellStates)
self.nSpecies = nSpecies
self.nSignals = nSignals
self.maxCells = maxCells
# The signalling model, must be a grid based thing
self.signalling = sig
self.gridDim = sig.gridDim
self.signalDataLen = self.signalling.dataLen()
self.maxSpecDataLen = self.maxCells*nSpecies
# no need to scale up signal storage
storageLen = self.maxSpecDataLen + self.signalDataLen
# These arrays store the level and rate of signals and species
# in a contiguous form. The first part is the signals,
# then the cell species
# To avoid reallocation, create enough space for maxCells
self.levels = numpy.zeros(storageLen)
self.rates = numpy.zeros(storageLen)
self.makeViews()
# Set initial distribution of signals
if self.signalling.initLevels:
for s in range(self.nSignals):
grid = self.signalLevel.reshape(self.gridDim)
grid[s,:] = self.signalling.initLevels[s]
self.computeGreensFunc()
# Initialise map of cell ids to index in arrays
# set the species for existing states to views of the levels array
cs = self.cellStates
for c in cs.items():
c.species = self.specLevels[c.idx,:]
def makeViews(self):
# Level views (references) to the data
self.signalLevel = self.levels[0:self.signalDataLen]
self.specLevel = self.levels[self.signalDataLen:self.signalDataLen+self.maxSpecDataLen].reshape(self.maxCells,self.nSpecies)
# Rate views (references) to the data
self.signalRate = self.rates[0:self.signalDataLen]
self.specRate = self.rates[self.signalDataLen:self.signalDataLen+self.maxSpecDataLen].reshape(self.maxCells,self.nSpecies)
def CNOperator(self, v):
# Transport operator
self.signalling.transportRates(self.signalRate, v)
# Return (I-hT/2)v, where T is transport operator, h=dt
return v - 0.5*self.dt*self.signalRate
def computeGreensFunc(self):
L = LinearOperator((self.signalDataLen,self.signalDataLen), matvec=self.CNOperator, dtype=numpy.float32)
rhs = numpy.zeros(self.gridDim, dtype=numpy.float32)
idx = ( math.floor(self.gridDim[1]*0.5), math.floor(self.gridDim[2]*0.5), math.floor(self.gridDim[3]*0.5) )
for s in xrange(self.nSignals):
rhs[(s,)+idx] = 1.0 # ~delta function in each signal
(self.greensFunc, info) = gmres(L,rhs.reshape(self.signalDataLen)) # Solve impulse response = greens func
# Take only bounding box of region where G > threshold
self.greensFunc.shape = self.gridDim
inds = numpy.transpose(numpy.nonzero(self.greensFunc.reshape(self.gridDim)>self.greensThreshold))
self.greensFunc = self.greensFunc[:, min(inds[:,1]):max(inds[:,1])+1, \
min(inds[:,2]):max(inds[:,2])+1, \
min(inds[:,3]):max(inds[:,3])+1]
print "Truncated Green's function size is " + str(self.greensFunc.shape)
def addCell(self, cellState):
idx = cellState.idx
self.nCells += 1
cellState.species = self.specLevel[idx,:]
def divide(self, pState, d1State, d2State):
# Simulator should have organised indexing:
# Set up slicing of levels for each daughter and copy parent levels
d1idx = d1State.idx
self.nCells += 1
self.specLevel[d1idx,:] = pState.species
d1State.species = self.specLevel[d1idx,:]
d2idx = d2State.idx
self.nCells += 1
self.specLevel[d2idx,:] = pState.species
d2State.species = self.specLevel[d2idx,:]
def setSignalling(self, sig):
self.sig = sig
def setRegulator(self, regul):
self.regul = regul
def dydt(self):
# compute cell species production rates into rates array
# Loop over cells to get rates
states = self.cellStates
for (id,c) in states.items():
idx = c.idx
cellSignals = self.signalling.signals(c, self.signalLevel)
self.specRate[idx,:] = self.regul.speciesRates(c, self.specLevel[idx,:], cellSignals)
cellRates = self.regul.signalRates(c, self.specLevel[idx,:],
cellSignals)
self.signalling.cellProdRates(self.signalRate, c, cellRates)
def step(self, dt):
if dt!=self.dt:
print "I can only integrate at fixed dt!"
return
self.nCells = len(self.cellStates)
# Check we have enough space allocated
try:
s = self.specLevel[self.nCells-1]
except IndexError:
# Could resize here, then would have to rebuild views
print "Number of cells exceeded " \
+ self.__class__.__name__ \
+ "::maxCells (" + self.maxCells + ")"
self.dataLen = self.signalDataLen + self.nCells*self.nSpecies
# Do u += h(T(u_t)/2 + hf(u_t)) where T=transport operator, f(u_t) is
# our regulation function dydt
self.signalling.transportRates(self.signalRate, self.signalLevel)
self.signalRate *= 0.5
self.dydt()
self.rates[0:self.dataLen] *= self.dt
self.levels[0:self.dataLen] += self.rates[0:self.dataLen]
# Convolve (I+hT/2)u_t + f(u_t) with the Greens func to get u_{t+1}
sigLvl = self.signalLevel.reshape(self.gridDim)
convolve(sigLvl, self.greensFunc, mode='nearest')
# Put the final signal levels into the cell states
states = self.cellStates
for (id,c) in states.items():
if self.signalling:
c.signals = self.signalling.signals(c, self.signalLevel)
| bsd-3-clause | -4,341,253,483,647,895,000 | 40.057692 | 132 | 0.628884 | false |
ykim362/mxnet | example/ssd/config/config.py | 31 | 3112 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
from config.utils import DotDict, namedtuple_with_defaults, zip_namedtuple, config_as_dict
RandCropper = namedtuple_with_defaults('RandCropper',
'min_crop_scales, max_crop_scales, \
min_crop_aspect_ratios, max_crop_aspect_ratios, \
min_crop_overlaps, max_crop_overlaps, \
min_crop_sample_coverages, max_crop_sample_coverages, \
min_crop_object_coverages, max_crop_object_coverages, \
max_crop_trials',
[0.0, 1.0,
0.5, 2.0,
0.0, 1.0,
0.0, 1.0,
0.0, 1.0,
25])
RandPadder = namedtuple_with_defaults('RandPadder',
'rand_pad_prob, max_pad_scale, fill_value',
[0.0, 1.0, 127])
ColorJitter = namedtuple_with_defaults('ColorJitter',
'random_hue_prob, max_random_hue, \
random_saturation_prob, max_random_saturation, \
random_illumination_prob, max_random_illumination, \
random_contrast_prob, max_random_contrast',
[0.0, 18,
0.0, 32,
0.0, 32,
0.0, 0.5])
cfg = DotDict()
cfg.ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
# training configs
cfg.train = DotDict()
# random cropping samplers
cfg.train.rand_crop_samplers = [
RandCropper(min_crop_scales=0.3, min_crop_overlaps=0.1),
RandCropper(min_crop_scales=0.3, min_crop_overlaps=0.3),
RandCropper(min_crop_scales=0.3, min_crop_overlaps=0.5),
RandCropper(min_crop_scales=0.3, min_crop_overlaps=0.7),
RandCropper(min_crop_scales=0.3, min_crop_overlaps=0.9),]
cfg.train.crop_emit_mode = 'center'
# cfg.train.emit_overlap_thresh = 0.4
# random padding
cfg.train.rand_pad = RandPadder(rand_pad_prob=0.5, max_pad_scale=4.0)
# random color jitter
cfg.train.color_jitter = ColorJitter(random_hue_prob=0.5, random_saturation_prob=0.5,
random_illumination_prob=0.5, random_contrast_prob=0.5)
cfg.train.inter_method = 10 # random interpolation
cfg.train.rand_mirror_prob = 0.5
cfg.train.shuffle = True
cfg.train.seed = 233
cfg.train.preprocess_threads = 48
cfg.train = config_as_dict(cfg.train) # convert to normal dict
# validation
cfg.valid = DotDict()
cfg.valid.rand_crop_samplers = []
cfg.valid.rand_pad = RandPadder()
cfg.valid.color_jitter = ColorJitter()
cfg.valid.rand_mirror_prob = 0
cfg.valid.shuffle = False
cfg.valid.seed = 0
cfg.valid.preprocess_threads = 32
cfg.valid = config_as_dict(cfg.valid) # convert to normal dict
| apache-2.0 | -3,705,892,600,971,301,000 | 35.611765 | 90 | 0.718188 | false |
Just-D/chromium-1 | tools/telemetry/third_party/gsutilz/gslib/gcs_json_media.py | 13 | 21674 | # -*- coding: utf-8 -*-
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Media helper functions and classes for Google Cloud Storage JSON API."""
from __future__ import absolute_import
import copy
import cStringIO
import httplib
import logging
import socket
import types
import urlparse
from apitools.base.py import exceptions as apitools_exceptions
import httplib2
from httplib2 import parse_uri
from gslib.cloud_api import BadRequestException
from gslib.progress_callback import ProgressCallbackWithBackoff
from gslib.util import SSL_TIMEOUT
from gslib.util import TRANSFER_BUFFER_SIZE
class BytesTransferredContainer(object):
"""Container class for passing number of bytes transferred to lower layers.
For resumed transfers or connection rebuilds in the middle of a transfer, we
need to rebuild the connection class with how much we've transferred so far.
For uploads, we don't know the total number of bytes uploaded until we've
queried the server, but we need to create the connection class to pass to
httplib2 before we can query the server. This container object allows us to
pass a reference into Upload/DownloadCallbackConnection.
"""
def __init__(self):
self.__bytes_transferred = 0
@property
def bytes_transferred(self):
return self.__bytes_transferred
@bytes_transferred.setter
def bytes_transferred(self, value):
self.__bytes_transferred = value
class UploadCallbackConnectionClassFactory(object):
"""Creates a class that can override an httplib2 connection.
This is used to provide progress callbacks and disable dumping the upload
payload during debug statements. It can later be used to provide on-the-fly
hash digestion during upload.
"""
def __init__(self, bytes_uploaded_container,
buffer_size=TRANSFER_BUFFER_SIZE,
total_size=0, progress_callback=None):
self.bytes_uploaded_container = bytes_uploaded_container
self.buffer_size = buffer_size
self.total_size = total_size
self.progress_callback = progress_callback
def GetConnectionClass(self):
"""Returns a connection class that overrides send."""
outer_bytes_uploaded_container = self.bytes_uploaded_container
outer_buffer_size = self.buffer_size
outer_total_size = self.total_size
outer_progress_callback = self.progress_callback
class UploadCallbackConnection(httplib2.HTTPSConnectionWithTimeout):
"""Connection class override for uploads."""
bytes_uploaded_container = outer_bytes_uploaded_container
# After we instantiate this class, apitools will check with the server
# to find out how many bytes remain for a resumable upload. This allows
# us to update our progress once based on that number.
processed_initial_bytes = False
GCS_JSON_BUFFER_SIZE = outer_buffer_size
callback_processor = None
size = outer_total_size
def __init__(self, *args, **kwargs):
kwargs['timeout'] = SSL_TIMEOUT
httplib2.HTTPSConnectionWithTimeout.__init__(self, *args, **kwargs)
def send(self, data):
"""Overrides HTTPConnection.send."""
if not self.processed_initial_bytes:
self.processed_initial_bytes = True
if outer_progress_callback:
self.callback_processor = ProgressCallbackWithBackoff(
outer_total_size, outer_progress_callback)
self.callback_processor.Progress(
self.bytes_uploaded_container.bytes_transferred)
# httplib.HTTPConnection.send accepts either a string or a file-like
# object (anything that implements read()).
if isinstance(data, basestring):
full_buffer = cStringIO.StringIO(data)
else:
full_buffer = data
partial_buffer = full_buffer.read(self.GCS_JSON_BUFFER_SIZE)
while partial_buffer:
httplib2.HTTPSConnectionWithTimeout.send(self, partial_buffer)
send_length = len(partial_buffer)
if self.callback_processor:
# This is the only place where gsutil has control over making a
# callback, but here we can't differentiate the metadata bytes
# (such as headers and OAuth2 refreshes) sent during an upload
# from the actual upload bytes, so we will actually report
# slightly more bytes than desired to the callback handler.
#
# One considered/rejected alternative is to move the callbacks
# into the HashingFileUploadWrapper which only processes reads on
# the bytes. This has the disadvantages of being removed from
# where we actually send the bytes and unnecessarily
# multi-purposing that class.
self.callback_processor.Progress(send_length)
partial_buffer = full_buffer.read(self.GCS_JSON_BUFFER_SIZE)
return UploadCallbackConnection
def WrapUploadHttpRequest(upload_http):
"""Wraps upload_http so we only use our custom connection_type on PUTs.
POSTs are used to refresh oauth tokens, and we don't want to process the
data sent in those requests.
Args:
upload_http: httplib2.Http instance to wrap
"""
request_orig = upload_http.request
def NewRequest(uri, method='GET', body=None, headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS,
connection_type=None):
if method == 'PUT' or method == 'POST':
override_connection_type = connection_type
else:
override_connection_type = None
return request_orig(uri, method=method, body=body,
headers=headers, redirections=redirections,
connection_type=override_connection_type)
# Replace the request method with our own closure.
upload_http.request = NewRequest
class DownloadCallbackConnectionClassFactory(object):
"""Creates a class that can override an httplib2 connection.
This is used to provide progress callbacks, disable dumping the download
payload during debug statements, and provide on-the-fly hash digestion during
download. On-the-fly digestion is particularly important because httplib2
will decompress gzipped content on-the-fly, thus this class provides our
only opportunity to calculate the correct hash for an object that has a
gzip hash in the cloud.
"""
def __init__(self, bytes_downloaded_container,
buffer_size=TRANSFER_BUFFER_SIZE, total_size=0,
progress_callback=None, digesters=None):
self.buffer_size = buffer_size
self.total_size = total_size
self.progress_callback = progress_callback
self.digesters = digesters
self.bytes_downloaded_container = bytes_downloaded_container
def GetConnectionClass(self):
"""Returns a connection class that overrides getresponse."""
class DownloadCallbackConnection(httplib2.HTTPSConnectionWithTimeout):
"""Connection class override for downloads."""
outer_total_size = self.total_size
outer_digesters = self.digesters
outer_progress_callback = self.progress_callback
outer_bytes_downloaded_container = self.bytes_downloaded_container
processed_initial_bytes = False
callback_processor = None
def __init__(self, *args, **kwargs):
kwargs['timeout'] = SSL_TIMEOUT
httplib2.HTTPSConnectionWithTimeout.__init__(self, *args, **kwargs)
def getresponse(self, buffering=False):
"""Wraps an HTTPResponse to perform callbacks and hashing.
In this function, self is a DownloadCallbackConnection.
Args:
buffering: Unused. This function uses a local buffer.
Returns:
HTTPResponse object with wrapped read function.
"""
orig_response = httplib.HTTPConnection.getresponse(self)
if orig_response.status not in (httplib.OK, httplib.PARTIAL_CONTENT):
return orig_response
orig_read_func = orig_response.read
def read(amt=None): # pylint: disable=invalid-name
"""Overrides HTTPConnection.getresponse.read.
This function only supports reads of TRANSFER_BUFFER_SIZE or smaller.
Args:
amt: Integer n where 0 < n <= TRANSFER_BUFFER_SIZE. This is a
keyword argument to match the read function it overrides,
but it is required.
Returns:
Data read from HTTPConnection.
"""
if not amt or amt > TRANSFER_BUFFER_SIZE:
raise BadRequestException(
'Invalid HTTP read size %s during download, expected %s.' %
(amt, TRANSFER_BUFFER_SIZE))
else:
amt = amt or TRANSFER_BUFFER_SIZE
if not self.processed_initial_bytes:
self.processed_initial_bytes = True
if self.outer_progress_callback:
self.callback_processor = ProgressCallbackWithBackoff(
self.outer_total_size, self.outer_progress_callback)
self.callback_processor.Progress(
self.outer_bytes_downloaded_container.bytes_transferred)
data = orig_read_func(amt)
read_length = len(data)
if self.callback_processor:
self.callback_processor.Progress(read_length)
if self.outer_digesters:
for alg in self.outer_digesters:
self.outer_digesters[alg].update(data)
return data
orig_response.read = read
return orig_response
return DownloadCallbackConnection
def WrapDownloadHttpRequest(download_http):
"""Overrides download request functions for an httplib2.Http object.
Args:
download_http: httplib2.Http.object to wrap / override.
Returns:
Wrapped / overridden httplib2.Http object.
"""
# httplib2 has a bug https://code.google.com/p/httplib2/issues/detail?id=305
# where custom connection_type is not respected after redirects. This
# function is copied from httplib2 and overrides the request function so that
# the connection_type is properly passed through.
# pylint: disable=protected-access,g-inconsistent-quotes,unused-variable
# pylint: disable=g-equals-none,g-doc-return-or-yield
# pylint: disable=g-short-docstring-punctuation,g-doc-args
# pylint: disable=too-many-statements
def OverrideRequest(self, conn, host, absolute_uri, request_uri, method,
body, headers, redirections, cachekey):
"""Do the actual request using the connection object.
Also follow one level of redirects if necessary.
"""
auths = ([(auth.depth(request_uri), auth) for auth in self.authorizations
if auth.inscope(host, request_uri)])
auth = auths and sorted(auths)[0][1] or None
if auth:
auth.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body,
headers)
if auth:
if auth.response(response, body):
auth.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method,
body, headers)
response._stale_digest = 1
if response.status == 401:
for authorization in self._auth_from_challenge(
host, request_uri, headers, response, content):
authorization.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method,
body, headers)
if response.status != 401:
self.authorizations.append(authorization)
authorization.response(response, body)
break
if (self.follow_all_redirects or (method in ["GET", "HEAD"])
or response.status == 303):
if self.follow_redirects and response.status in [300, 301, 302,
303, 307]:
# Pick out the location header and basically start from the beginning
# remembering first to strip the ETag header and decrement our 'depth'
if redirections:
if not response.has_key('location') and response.status != 300:
raise httplib2.RedirectMissingLocation(
"Redirected but the response is missing a Location: header.",
response, content)
# Fix-up relative redirects (which violate an RFC 2616 MUST)
if response.has_key('location'):
location = response['location']
(scheme, authority, path, query, fragment) = parse_uri(location)
if authority == None:
response['location'] = urlparse.urljoin(absolute_uri, location)
if response.status == 301 and method in ["GET", "HEAD"]:
response['-x-permanent-redirect-url'] = response['location']
if not response.has_key('content-location'):
response['content-location'] = absolute_uri
httplib2._updateCache(headers, response, content, self.cache,
cachekey)
if headers.has_key('if-none-match'):
del headers['if-none-match']
if headers.has_key('if-modified-since'):
del headers['if-modified-since']
if ('authorization' in headers and
not self.forward_authorization_headers):
del headers['authorization']
if response.has_key('location'):
location = response['location']
old_response = copy.deepcopy(response)
if not old_response.has_key('content-location'):
old_response['content-location'] = absolute_uri
redirect_method = method
if response.status in [302, 303]:
redirect_method = "GET"
body = None
(response, content) = self.request(
location, redirect_method, body=body, headers=headers,
redirections=redirections-1,
connection_type=conn.__class__)
response.previous = old_response
else:
raise httplib2.RedirectLimit(
"Redirected more times than redirection_limit allows.",
response, content)
elif response.status in [200, 203] and method in ["GET", "HEAD"]:
# Don't cache 206's since we aren't going to handle byte range
# requests
if not response.has_key('content-location'):
response['content-location'] = absolute_uri
httplib2._updateCache(headers, response, content, self.cache,
cachekey)
return (response, content)
# Wrap download_http so we do not use our custom connection_type
# on POSTS, which are used to refresh oauth tokens. We don't want to
# process the data received in those requests.
request_orig = download_http.request
def NewRequest(uri, method='GET', body=None, headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS,
connection_type=None):
if method == 'POST':
return request_orig(uri, method=method, body=body,
headers=headers, redirections=redirections,
connection_type=None)
else:
return request_orig(uri, method=method, body=body,
headers=headers, redirections=redirections,
connection_type=connection_type)
# Replace the request methods with our own closures.
download_http._request = types.MethodType(OverrideRequest, download_http)
download_http.request = NewRequest
return download_http
class HttpWithNoRetries(httplib2.Http):
"""httplib2.Http variant that does not retry.
httplib2 automatically retries requests according to httplib2.RETRIES, but
in certain cases httplib2 ignores the RETRIES value and forces a retry.
Because httplib2 does not handle the case where the underlying request body
is a stream, a retry may cause a non-idempotent write as the stream is
partially consumed and not reset before the retry occurs.
Here we override _conn_request to disable retries unequivocally, so that
uploads may be retried at higher layers that properly handle stream request
bodies.
"""
def _conn_request(self, conn, request_uri, method, body, headers): # pylint: disable=too-many-statements
try:
if hasattr(conn, 'sock') and conn.sock is None:
conn.connect()
conn.request(method, request_uri, body, headers)
except socket.timeout:
raise
except socket.gaierror:
conn.close()
raise httplib2.ServerNotFoundError(
'Unable to find the server at %s' % conn.host)
except httplib2.ssl_SSLError:
conn.close()
raise
except socket.error, e:
err = 0
if hasattr(e, 'args'):
err = getattr(e, 'args')[0]
else:
err = e.errno
if err == httplib2.errno.ECONNREFUSED: # Connection refused
raise
except httplib.HTTPException:
conn.close()
raise
try:
response = conn.getresponse()
except (socket.error, httplib.HTTPException):
conn.close()
raise
else:
content = ''
if method == 'HEAD':
conn.close()
else:
content = response.read()
response = httplib2.Response(response)
if method != 'HEAD':
# pylint: disable=protected-access
content = httplib2._decompressContent(response, content)
return (response, content)
class HttpWithDownloadStream(httplib2.Http):
"""httplib2.Http variant that only pushes bytes through a stream.
httplib2 handles media by storing entire chunks of responses in memory, which
is undesirable particularly when multiple instances are used during
multi-threaded/multi-process copy. This class copies and then overrides some
httplib2 functions to use a streaming copy approach that uses small memory
buffers.
Also disables httplib2 retries (for reasons stated in the HttpWithNoRetries
class doc).
"""
def __init__(self, stream=None, *args, **kwds):
if stream is None:
raise apitools_exceptions.InvalidUserInputError(
'Cannot create HttpWithDownloadStream with no stream')
self._stream = stream
self._logger = logging.getLogger()
super(HttpWithDownloadStream, self).__init__(*args, **kwds)
@property
def stream(self):
return self._stream
def _conn_request(self, conn, request_uri, method, body, headers): # pylint: disable=too-many-statements
try:
if hasattr(conn, 'sock') and conn.sock is None:
conn.connect()
conn.request(method, request_uri, body, headers)
except socket.timeout:
raise
except socket.gaierror:
conn.close()
raise httplib2.ServerNotFoundError(
'Unable to find the server at %s' % conn.host)
except httplib2.ssl_SSLError:
conn.close()
raise
except socket.error, e:
err = 0
if hasattr(e, 'args'):
err = getattr(e, 'args')[0]
else:
err = e.errno
if err == httplib2.errno.ECONNREFUSED: # Connection refused
raise
except httplib.HTTPException:
# Just because the server closed the connection doesn't apparently mean
# that the server didn't send a response.
conn.close()
raise
try:
response = conn.getresponse()
except (socket.error, httplib.HTTPException):
conn.close()
raise
else:
content = ''
if method == 'HEAD':
conn.close()
response = httplib2.Response(response)
else:
if response.status in (httplib.OK, httplib.PARTIAL_CONTENT):
content_length = None
if hasattr(response, 'msg'):
content_length = response.getheader('content-length')
http_stream = response
bytes_read = 0
while True:
new_data = http_stream.read(TRANSFER_BUFFER_SIZE)
if new_data:
self.stream.write(new_data)
bytes_read += len(new_data)
else:
break
if (content_length is not None and
long(bytes_read) != long(content_length)):
# The input stream terminated before we were able to read the
# entire contents, possibly due to a network condition. Set
# content-length to indicate how many bytes we actually read.
self._logger.log(
logging.DEBUG, 'Only got %s bytes out of content-length %s '
'for request URI %s. Resetting content-length to match '
'bytes read.', bytes_read, content_length, request_uri)
response.msg['content-length'] = str(bytes_read)
response = httplib2.Response(response)
else:
# We fall back to the current httplib2 behavior if we're
# not processing bytes (eg it's a redirect).
content = response.read()
response = httplib2.Response(response)
# pylint: disable=protected-access
content = httplib2._decompressContent(response, content)
return (response, content)
| bsd-3-clause | -3,501,952,690,221,323,000 | 39.211503 | 107 | 0.656455 | false |
jaywreddy/django | django/contrib/gis/gdal/base.py | 654 | 1179 | from ctypes import c_void_p
from django.contrib.gis.gdal.error import GDALException
from django.utils import six
class GDALBase(object):
"""
Base object for GDAL objects that has a pointer access property
that controls access to the underlying C pointer.
"""
# Initially the pointer is NULL.
_ptr = None
# Default allowed pointer type.
ptr_type = c_void_p
# Pointer access property.
def _get_ptr(self):
# Raise an exception if the pointer isn't valid don't
# want to be passing NULL pointers to routines --
# that's very bad.
if self._ptr:
return self._ptr
else:
raise GDALException('GDAL %s pointer no longer valid.' % self.__class__.__name__)
def _set_ptr(self, ptr):
# Only allow the pointer to be set with pointers of the
# compatible type or None (NULL).
if isinstance(ptr, six.integer_types):
self._ptr = self.ptr_type(ptr)
elif ptr is None or isinstance(ptr, self.ptr_type):
self._ptr = ptr
else:
raise TypeError('Incompatible pointer type')
ptr = property(_get_ptr, _set_ptr)
| bsd-3-clause | 3,358,892,005,888,274,000 | 30.026316 | 93 | 0.619169 | false |
konstruktoid/ansible-upstream | lib/ansible/plugins/lookup/aws_ssm.py | 20 | 10325 | # (c) 2016, Bill Wang <ozbillwang(at)gmail.com>
# (c) 2017, Marat Bakeev <hawara(at)gmail.com>
# (c) 2018, Michael De La Rue <siblemitcom.mddlr(at)spamgourmet.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
lookup: aws_ssm
author:
- Bill Wang <ozbillwang(at)gmail.com>
- Marat Bakeev <hawara(at)gmail.com>
- Michael De La Rue <[email protected]>
version_added: 2.5
requirements:
- boto3
- botocore
short_description: Get the value for a SSM parameter or all parameters under a path.
description:
- Get the value for an Amazon Simple Systems Manager parameter or a hierarchy of parameters.
The first argument you pass the lookup can either be a parameter name or a hierarchy of
parameters. Hierarchies start with a forward slash and end with the parameter name. Up to
5 layers may be specified.
- If looking up an explicitly listed parameter by name which does not exist then the lookup will
return a None value which will be interpreted by Jinja2 as an empty string. You can use the
```default``` filter to give a default value in this case but must set the second parameter to
true (see examples below)
- When looking up a path for parameters under it a dictionary will be returned for each path.
If there is no parameter under that path then the return will be successful but the
dictionary will be empty.
- If the lookup fails due to lack of permissions or due to an AWS client error then the aws_ssm
will generate an error, normally crashing the current ansible task. This is normally the right
thing since ignoring a value that IAM isn't giving access to could cause bigger problems and
wrong behavour or loss of data. If you want to continue in this case then you will have to set
up two ansible tasks, one which sets a variable and ignores failures one which uses the value
of that variable with a default. See the examples below.
options:
decrypt:
description: A boolean to indicate whether to decrypt the parameter.
default: false
type: boolean
bypath:
description: A boolean to indicate whether the parameter is provided as a hierarchy.
default: false
type: boolean
recursive:
description: A boolean to indicate whether to retrieve all parameters within a hierarchy.
default: false
type: boolean
shortnames:
description: Indicates whether to return the name only without path if using a parameter hierarchy.
default: false
type: boolean
'''
EXAMPLES = '''
# lookup sample:
- name: lookup ssm parameter store in the current region
debug: msg="{{ lookup('aws_ssm', 'Hello' ) }}"
- name: lookup ssm parameter store in nominated region
debug: msg="{{ lookup('aws_ssm', 'Hello', region='us-east-2' ) }}"
- name: lookup ssm parameter store without decrypted
debug: msg="{{ lookup('aws_ssm', 'Hello', decrypt=False ) }}"
- name: lookup ssm parameter store in nominated aws profile
debug: msg="{{ lookup('aws_ssm', 'Hello', aws_profile='myprofile' ) }}"
- name: lookup ssm parameter store with all options.
debug: msg="{{ lookup('aws_ssm', 'Hello', decrypt=false, region='us-east-2', aws_profile='myprofile') }}"
- name: lookup a key which doesn't exist, returns ""
debug: msg="{{ lookup('aws_ssm', 'NoKey') }}"
- name: lookup a key which doesn't exist, returning a default ('root')
debug: msg="{{ lookup('aws_ssm', 'AdminID') | default('root', true) }}"
- name: lookup a key which doesn't exist failing to store it in a fact
set_fact:
temp_secret: "{{ lookup('aws_ssm', '/NoAccess/hiddensecret') }}"
ignore_errors: true
- name: show fact default to "access failed" if we don't have access
debug: msg="{{ "the secret was:" ~ temp_secret | default('couldn\'t access secret') }}"
- name: return a dictionary of ssm parameters from a hierarchy path
debug: msg="{{ lookup('aws_ssm', '/PATH/to/params', region='ap-southeast-2', bypath=true, recursive=true ) }}"
- name: return a dictionary of ssm parameters from a hierarchy path with shortened names (param instead of /PATH/to/param)
debug: msg="{{ lookup('aws_ssm', '/PATH/to/params', region='ap-southeast-2', shortnames=true, bypath=true, recursive=true ) }}"
- name: Iterate over a parameter hierarchy
debug: msg='key contains {{item.Name}} with value {{item.Value}} '
loop: '{{ query("aws_ssm", "/TEST/test-list", region="ap-southeast-2", bypath=true) }}'
'''
from ansible.module_utils._text import to_native
from ansible.module_utils.ec2 import HAS_BOTO3, boto3_tag_list_to_ansible_dict
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
try:
from botocore.exceptions import ClientError
import botocore
import boto3
except ImportError:
pass # will be captured by imported HAS_BOTO3
def _boto3_conn(region, credentials):
if 'boto_profile' in credentials:
boto_profile = credentials.pop('boto_profile')
else:
boto_profile = None
try:
connection = boto3.session.Session(profile_name=boto_profile).client('ssm', region, **credentials)
except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError):
if boto_profile:
try:
connection = boto3.session.Session(profile_name=boto_profile).client('ssm', region)
# FIXME: we should probably do better passing on of the error information
except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError):
raise AnsibleError("Insufficient credentials found.")
else:
raise AnsibleError("Insufficient credentials found.")
return connection
class LookupModule(LookupBase):
def run(self, terms, variables=None, boto_profile=None, aws_profile=None,
aws_secret_key=None, aws_access_key=None, aws_security_token=None, region=None,
bypath=False, shortnames=False, recursive=False, decrypt=True):
'''
:arg terms: a list of lookups to run.
e.g. ['parameter_name', 'parameter_name_too' ]
:kwarg variables: ansible variables active at the time of the lookup
:kwarg aws_secret_key: identity of the AWS key to use
:kwarg aws_access_key: AWS seret key (matching identity)
:kwarg aws_security_token: AWS session key if using STS
:kwarg decrypt: Set to True to get decrypted parameters
:kwarg region: AWS region in which to do the lookup
:kwarg bypath: Set to True to do a lookup of variables under a path
:kwarg recursive: Set to True to recurse below the path (requires bypath=True)
:returns: A list of parameter values or a list of dictionaries if bypath=True.
'''
if not HAS_BOTO3:
raise AnsibleError('botocore and boto3 are required for aws_ssm lookup.')
ret = []
response = {}
ssm_dict = {}
credentials = {}
if aws_profile:
credentials['boto_profile'] = aws_profile
else:
credentials['boto_profile'] = boto_profile
credentials['aws_secret_access_key'] = aws_secret_key
credentials['aws_access_key_id'] = aws_access_key
credentials['aws_session_token'] = aws_security_token
client = _boto3_conn(region, credentials)
ssm_dict['WithDecryption'] = decrypt
# Lookup by path
if bypath:
ssm_dict['Recursive'] = recursive
for term in terms:
ssm_dict["Path"] = term
display.vvv("AWS_ssm path lookup term: %s in region: %s" % (term, region))
try:
response = client.get_parameters_by_path(**ssm_dict)
except ClientError as e:
raise AnsibleError("SSM lookup exception: {0}".format(to_native(e)))
paramlist = list()
paramlist.extend(response['Parameters'])
# Manual pagination, since boto doesn't support it yet for get_parameters_by_path
while 'NextToken' in response:
response = client.get_parameters_by_path(NextToken=response['NextToken'], **ssm_dict)
paramlist.extend(response['Parameters'])
# shorten parameter names. yes, this will return duplicate names with different values.
if shortnames:
for x in paramlist:
x['Name'] = x['Name'][x['Name'].rfind('/') + 1:]
display.vvvv("AWS_ssm path lookup returned: %s" % str(paramlist))
if len(paramlist):
ret.append(boto3_tag_list_to_ansible_dict(paramlist,
tag_name_key_name="Name",
tag_value_key_name="Value"))
else:
ret.append({})
# Lookup by parameter name - always returns a list with one or no entry.
else:
display.vvv("AWS_ssm name lookup term: %s" % terms)
ssm_dict["Names"] = terms
try:
response = client.get_parameters(**ssm_dict)
except ClientError as e:
raise AnsibleError("SSM lookup exception: {0}".format(to_native(e)))
params = boto3_tag_list_to_ansible_dict(response['Parameters'], tag_name_key_name="Name",
tag_value_key_name="Value")
for i in terms:
if i in params:
ret.append(params[i])
elif i in response['InvalidParameters']:
ret.append(None)
else:
raise AnsibleError("Ansible internal error: aws_ssm lookup failed to understand boto3 return value: {0}".format(str(response)))
return ret
display.vvvv("AWS_ssm path lookup returning: %s " % str(ret))
return ret
| gpl-3.0 | -3,372,730,191,875,912,000 | 44.087336 | 147 | 0.642712 | false |
HiroIshikawa/21playground | voting/venv/lib/python3.5/site-packages/pip/_vendor/html5lib/treewalkers/lxmletree.py | 436 | 5992 | from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type
from lxml import etree
from ..treebuilders.etree import tag_regexp
from . import _base
from .. import ihatexml
def ensure_str(s):
if s is None:
return None
elif isinstance(s, text_type):
return s
else:
return s.decode("utf-8", "strict")
class Root(object):
def __init__(self, et):
self.elementtree = et
self.children = []
if et.docinfo.internalDTD:
self.children.append(Doctype(self,
ensure_str(et.docinfo.root_name),
ensure_str(et.docinfo.public_id),
ensure_str(et.docinfo.system_url)))
root = et.getroot()
node = root
while node.getprevious() is not None:
node = node.getprevious()
while node is not None:
self.children.append(node)
node = node.getnext()
self.text = None
self.tail = None
def __getitem__(self, key):
return self.children[key]
def getnext(self):
return None
def __len__(self):
return 1
class Doctype(object):
def __init__(self, root_node, name, public_id, system_id):
self.root_node = root_node
self.name = name
self.public_id = public_id
self.system_id = system_id
self.text = None
self.tail = None
def getnext(self):
return self.root_node.children[1]
class FragmentRoot(Root):
def __init__(self, children):
self.children = [FragmentWrapper(self, child) for child in children]
self.text = self.tail = None
def getnext(self):
return None
class FragmentWrapper(object):
def __init__(self, fragment_root, obj):
self.root_node = fragment_root
self.obj = obj
if hasattr(self.obj, 'text'):
self.text = ensure_str(self.obj.text)
else:
self.text = None
if hasattr(self.obj, 'tail'):
self.tail = ensure_str(self.obj.tail)
else:
self.tail = None
def __getattr__(self, name):
return getattr(self.obj, name)
def getnext(self):
siblings = self.root_node.children
idx = siblings.index(self)
if idx < len(siblings) - 1:
return siblings[idx + 1]
else:
return None
def __getitem__(self, key):
return self.obj[key]
def __bool__(self):
return bool(self.obj)
def getparent(self):
return None
def __str__(self):
return str(self.obj)
def __unicode__(self):
return str(self.obj)
def __len__(self):
return len(self.obj)
class TreeWalker(_base.NonRecursiveTreeWalker):
def __init__(self, tree):
if hasattr(tree, "getroot"):
tree = Root(tree)
elif isinstance(tree, list):
tree = FragmentRoot(tree)
_base.NonRecursiveTreeWalker.__init__(self, tree)
self.filter = ihatexml.InfosetFilter()
def getNodeDetails(self, node):
if isinstance(node, tuple): # Text node
node, key = node
assert key in ("text", "tail"), "Text nodes are text or tail, found %s" % key
return _base.TEXT, ensure_str(getattr(node, key))
elif isinstance(node, Root):
return (_base.DOCUMENT,)
elif isinstance(node, Doctype):
return _base.DOCTYPE, node.name, node.public_id, node.system_id
elif isinstance(node, FragmentWrapper) and not hasattr(node, "tag"):
return _base.TEXT, node.obj
elif node.tag == etree.Comment:
return _base.COMMENT, ensure_str(node.text)
elif node.tag == etree.Entity:
return _base.ENTITY, ensure_str(node.text)[1:-1] # strip &;
else:
# This is assumed to be an ordinary element
match = tag_regexp.match(ensure_str(node.tag))
if match:
namespace, tag = match.groups()
else:
namespace = None
tag = ensure_str(node.tag)
attrs = {}
for name, value in list(node.attrib.items()):
name = ensure_str(name)
value = ensure_str(value)
match = tag_regexp.match(name)
if match:
attrs[(match.group(1), match.group(2))] = value
else:
attrs[(None, name)] = value
return (_base.ELEMENT, namespace, self.filter.fromXmlName(tag),
attrs, len(node) > 0 or node.text)
def getFirstChild(self, node):
assert not isinstance(node, tuple), "Text nodes have no children"
assert len(node) or node.text, "Node has no children"
if node.text:
return (node, "text")
else:
return node[0]
def getNextSibling(self, node):
if isinstance(node, tuple): # Text node
node, key = node
assert key in ("text", "tail"), "Text nodes are text or tail, found %s" % key
if key == "text":
# XXX: we cannot use a "bool(node) and node[0] or None" construct here
# because node[0] might evaluate to False if it has no child element
if len(node):
return node[0]
else:
return None
else: # tail
return node.getnext()
return (node, "tail") if node.tail else node.getnext()
def getParentNode(self, node):
if isinstance(node, tuple): # Text node
node, key = node
assert key in ("text", "tail"), "Text nodes are text or tail, found %s" % key
if key == "text":
return node
# else: fallback to "normal" processing
return node.getparent()
| mit | -3,224,730,919,782,621,000 | 28.810945 | 89 | 0.539052 | false |
xmission/d-note | venv/lib/python2.7/site-packages/werkzeug/contrib/securecookie.py | 254 | 12206 | # -*- coding: utf-8 -*-
r"""
werkzeug.contrib.securecookie
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module implements a cookie that is not alterable from the client
because it adds a checksum the server checks for. You can use it as
session replacement if all you have is a user id or something to mark
a logged in user.
Keep in mind that the data is still readable from the client as a
normal cookie is. However you don't have to store and flush the
sessions you have at the server.
Example usage:
>>> from werkzeug.contrib.securecookie import SecureCookie
>>> x = SecureCookie({"foo": 42, "baz": (1, 2, 3)}, "deadbeef")
Dumping into a string so that one can store it in a cookie:
>>> value = x.serialize()
Loading from that string again:
>>> x = SecureCookie.unserialize(value, "deadbeef")
>>> x["baz"]
(1, 2, 3)
If someone modifies the cookie and the checksum is wrong the unserialize
method will fail silently and return a new empty `SecureCookie` object.
Keep in mind that the values will be visible in the cookie so do not
store data in a cookie you don't want the user to see.
Application Integration
=======================
If you are using the werkzeug request objects you could integrate the
secure cookie into your application like this::
from werkzeug.utils import cached_property
from werkzeug.wrappers import BaseRequest
from werkzeug.contrib.securecookie import SecureCookie
# don't use this key but a different one; you could just use
# os.urandom(20) to get something random
SECRET_KEY = '\xfa\xdd\xb8z\xae\xe0}4\x8b\xea'
class Request(BaseRequest):
@cached_property
def client_session(self):
data = self.cookies.get('session_data')
if not data:
return SecureCookie(secret_key=SECRET_KEY)
return SecureCookie.unserialize(data, SECRET_KEY)
def application(environ, start_response):
request = Request(environ, start_response)
# get a response object here
response = ...
if request.client_session.should_save:
session_data = request.client_session.serialize()
response.set_cookie('session_data', session_data,
httponly=True)
return response(environ, start_response)
A less verbose integration can be achieved by using shorthand methods::
class Request(BaseRequest):
@cached_property
def client_session(self):
return SecureCookie.load_cookie(self, secret_key=COOKIE_SECRET)
def application(environ, start_response):
request = Request(environ, start_response)
# get a response object here
response = ...
request.client_session.save_cookie(response)
return response(environ, start_response)
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import pickle
import base64
from hmac import new as hmac
from time import time
from hashlib import sha1 as _default_hash
from werkzeug._compat import iteritems, text_type
from werkzeug.urls import url_quote_plus, url_unquote_plus
from werkzeug._internal import _date_to_unix
from werkzeug.contrib.sessions import ModificationTrackingDict
from werkzeug.security import safe_str_cmp
from werkzeug._compat import to_native
class UnquoteError(Exception):
"""Internal exception used to signal failures on quoting."""
class SecureCookie(ModificationTrackingDict):
"""Represents a secure cookie. You can subclass this class and provide
an alternative mac method. The import thing is that the mac method
is a function with a similar interface to the hashlib. Required
methods are update() and digest().
Example usage:
>>> x = SecureCookie({"foo": 42, "baz": (1, 2, 3)}, "deadbeef")
>>> x["foo"]
42
>>> x["baz"]
(1, 2, 3)
>>> x["blafasel"] = 23
>>> x.should_save
True
:param data: the initial data. Either a dict, list of tuples or `None`.
:param secret_key: the secret key. If not set `None` or not specified
it has to be set before :meth:`serialize` is called.
:param new: The initial value of the `new` flag.
"""
#: The hash method to use. This has to be a module with a new function
#: or a function that creates a hashlib object. Such as `hashlib.md5`
#: Subclasses can override this attribute. The default hash is sha1.
#: Make sure to wrap this in staticmethod() if you store an arbitrary
#: function there such as hashlib.sha1 which might be implemented
#: as a function.
hash_method = staticmethod(_default_hash)
#: the module used for serialization. Unless overriden by subclasses
#: the standard pickle module is used.
serialization_method = pickle
#: if the contents should be base64 quoted. This can be disabled if the
#: serialization process returns cookie safe strings only.
quote_base64 = True
def __init__(self, data=None, secret_key=None, new=True):
ModificationTrackingDict.__init__(self, data or ())
# explicitly convert it into a bytestring because python 2.6
# no longer performs an implicit string conversion on hmac
if secret_key is not None:
secret_key = bytes(secret_key)
self.secret_key = secret_key
self.new = new
def __repr__(self):
return '<%s %s%s>' % (
self.__class__.__name__,
dict.__repr__(self),
self.should_save and '*' or ''
)
@property
def should_save(self):
"""True if the session should be saved. By default this is only true
for :attr:`modified` cookies, not :attr:`new`.
"""
return self.modified
@classmethod
def quote(cls, value):
"""Quote the value for the cookie. This can be any object supported
by :attr:`serialization_method`.
:param value: the value to quote.
"""
if cls.serialization_method is not None:
value = cls.serialization_method.dumps(value)
if cls.quote_base64:
value = b''.join(base64.b64encode(value).splitlines()).strip()
return value
@classmethod
def unquote(cls, value):
"""Unquote the value for the cookie. If unquoting does not work a
:exc:`UnquoteError` is raised.
:param value: the value to unquote.
"""
try:
if cls.quote_base64:
value = base64.b64decode(value)
if cls.serialization_method is not None:
value = cls.serialization_method.loads(value)
return value
except Exception:
# unfortunately pickle and other serialization modules can
# cause pretty every error here. if we get one we catch it
# and convert it into an UnquoteError
raise UnquoteError()
def serialize(self, expires=None):
"""Serialize the secure cookie into a string.
If expires is provided, the session will be automatically invalidated
after expiration when you unseralize it. This provides better
protection against session cookie theft.
:param expires: an optional expiration date for the cookie (a
:class:`datetime.datetime` object)
"""
if self.secret_key is None:
raise RuntimeError('no secret key defined')
if expires:
self['_expires'] = _date_to_unix(expires)
result = []
mac = hmac(self.secret_key, None, self.hash_method)
for key, value in sorted(self.items()):
result.append(('%s=%s' % (
url_quote_plus(key),
self.quote(value).decode('ascii')
)).encode('ascii'))
mac.update(b'|' + result[-1])
return b'?'.join([
base64.b64encode(mac.digest()).strip(),
b'&'.join(result)
])
@classmethod
def unserialize(cls, string, secret_key):
"""Load the secure cookie from a serialized string.
:param string: the cookie value to unserialize.
:param secret_key: the secret key used to serialize the cookie.
:return: a new :class:`SecureCookie`.
"""
if isinstance(string, text_type):
string = string.encode('utf-8', 'replace')
if isinstance(secret_key, text_type):
secret_key = secret_key.encode('utf-8', 'replace')
try:
base64_hash, data = string.split(b'?', 1)
except (ValueError, IndexError):
items = ()
else:
items = {}
mac = hmac(secret_key, None, cls.hash_method)
for item in data.split(b'&'):
mac.update(b'|' + item)
if b'=' not in item:
items = None
break
key, value = item.split(b'=', 1)
# try to make the key a string
key = url_unquote_plus(key.decode('ascii'))
try:
key = to_native(key)
except UnicodeError:
pass
items[key] = value
# no parsing error and the mac looks okay, we can now
# sercurely unpickle our cookie.
try:
client_hash = base64.b64decode(base64_hash)
except TypeError:
items = client_hash = None
if items is not None and safe_str_cmp(client_hash, mac.digest()):
try:
for key, value in iteritems(items):
items[key] = cls.unquote(value)
except UnquoteError:
items = ()
else:
if '_expires' in items:
if time() > items['_expires']:
items = ()
else:
del items['_expires']
else:
items = ()
return cls(items, secret_key, False)
@classmethod
def load_cookie(cls, request, key='session', secret_key=None):
"""Loads a :class:`SecureCookie` from a cookie in request. If the
cookie is not set, a new :class:`SecureCookie` instanced is
returned.
:param request: a request object that has a `cookies` attribute
which is a dict of all cookie values.
:param key: the name of the cookie.
:param secret_key: the secret key used to unquote the cookie.
Always provide the value even though it has
no default!
"""
data = request.cookies.get(key)
if not data:
return cls(secret_key=secret_key)
return cls.unserialize(data, secret_key)
def save_cookie(self, response, key='session', expires=None,
session_expires=None, max_age=None, path='/', domain=None,
secure=None, httponly=False, force=False):
"""Saves the SecureCookie in a cookie on response object. All
parameters that are not described here are forwarded directly
to :meth:`~BaseResponse.set_cookie`.
:param response: a response object that has a
:meth:`~BaseResponse.set_cookie` method.
:param key: the name of the cookie.
:param session_expires: the expiration date of the secure cookie
stored information. If this is not provided
the cookie `expires` date is used instead.
"""
if force or self.should_save:
data = self.serialize(session_expires or expires)
response.set_cookie(key, data, expires=expires, max_age=max_age,
path=path, domain=domain, secure=secure,
httponly=httponly)
| agpl-3.0 | 5,012,040,430,092,661,000 | 36.789474 | 79 | 0.588071 | false |
ros2/demos | demo_nodes_py/setup.py | 1 | 1716 | from setuptools import find_packages
from setuptools import setup
package_name = 'demo_nodes_py'
setup(
name=package_name,
version='0.15.0',
packages=find_packages(exclude=['test']),
data_files=[
('share/ament_index/resource_index/packages',
['resource/' + package_name]),
('share/' + package_name, ['package.xml']),
],
install_requires=['setuptools'],
zip_safe=True,
author='Esteve Fernandez',
author_email='[email protected]',
maintainer='Mikael Arguedas',
maintainer_email='[email protected]',
keywords=['ROS'],
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Topic :: Software Development',
],
description=(
'Python nodes which were previously in the ros2/examples repository '
'but are now just used for demo purposes.'
),
license='Apache License, Version 2.0',
tests_require=['pytest'],
entry_points={
'console_scripts': [
'listener = demo_nodes_py.topics.listener:main',
'talker = demo_nodes_py.topics.talker:main',
'listener_qos = demo_nodes_py.topics.listener_qos:main',
'talker_qos = demo_nodes_py.topics.talker_qos:main',
'listener_serialized = demo_nodes_py.topics.listener_serialized:main',
'add_two_ints_client = demo_nodes_py.services.add_two_ints_client:main',
'add_two_ints_client_async = demo_nodes_py.services.add_two_ints_client_async:main',
'add_two_ints_server = demo_nodes_py.services.add_two_ints_server:main'
],
},
)
| apache-2.0 | 6,057,231,378,288,691,000 | 36.304348 | 96 | 0.625874 | false |
nigeriacoin/p2pool | p2pool/web.py | 47 | 25848 | from __future__ import division
import errno
import json
import os
import sys
import time
import traceback
from twisted.internet import defer, reactor
from twisted.python import log
from twisted.web import resource, static
import p2pool
from bitcoin import data as bitcoin_data
from . import data as p2pool_data, p2p
from util import deferral, deferred_resource, graph, math, memory, pack, variable
def _atomic_read(filename):
try:
with open(filename, 'rb') as f:
return f.read()
except IOError, e:
if e.errno != errno.ENOENT:
raise
try:
with open(filename + '.new', 'rb') as f:
return f.read()
except IOError, e:
if e.errno != errno.ENOENT:
raise
return None
def _atomic_write(filename, data):
with open(filename + '.new', 'wb') as f:
f.write(data)
f.flush()
try:
os.fsync(f.fileno())
except:
pass
try:
os.rename(filename + '.new', filename)
except: # XXX windows can't overwrite
os.remove(filename)
os.rename(filename + '.new', filename)
def get_web_root(wb, datadir_path, bitcoind_getinfo_var, stop_event=variable.Event()):
node = wb.node
start_time = time.time()
web_root = resource.Resource()
def get_users():
height, last = node.tracker.get_height_and_last(node.best_share_var.value)
weights, total_weight, donation_weight = node.tracker.get_cumulative_weights(node.best_share_var.value, min(height, 720), 65535*2**256)
res = {}
for script in sorted(weights, key=lambda s: weights[s]):
res[bitcoin_data.script2_to_address(script, node.net.PARENT)] = weights[script]/total_weight
return res
def get_current_scaled_txouts(scale, trunc=0):
txouts = node.get_current_txouts()
total = sum(txouts.itervalues())
results = dict((script, value*scale//total) for script, value in txouts.iteritems())
if trunc > 0:
total_random = 0
random_set = set()
for s in sorted(results, key=results.__getitem__):
if results[s] >= trunc:
break
total_random += results[s]
random_set.add(s)
if total_random:
winner = math.weighted_choice((script, results[script]) for script in random_set)
for script in random_set:
del results[script]
results[winner] = total_random
if sum(results.itervalues()) < int(scale):
results[math.weighted_choice(results.iteritems())] += int(scale) - sum(results.itervalues())
return results
def get_patron_sendmany(total=None, trunc='0.01'):
if total is None:
return 'need total argument. go to patron_sendmany/<TOTAL>'
total = int(float(total)*1e8)
trunc = int(float(trunc)*1e8)
return json.dumps(dict(
(bitcoin_data.script2_to_address(script, node.net.PARENT), value/1e8)
for script, value in get_current_scaled_txouts(total, trunc).iteritems()
if bitcoin_data.script2_to_address(script, node.net.PARENT) is not None
))
def get_global_stats():
# averaged over last hour
if node.tracker.get_height(node.best_share_var.value) < 10:
return None
lookbehind = min(node.tracker.get_height(node.best_share_var.value), 3600//node.net.SHARE_PERIOD)
nonstale_hash_rate = p2pool_data.get_pool_attempts_per_second(node.tracker, node.best_share_var.value, lookbehind)
stale_prop = p2pool_data.get_average_stale_prop(node.tracker, node.best_share_var.value, lookbehind)
diff = bitcoin_data.target_to_difficulty(wb.current_work.value['bits'].target)
return dict(
pool_nonstale_hash_rate=nonstale_hash_rate,
pool_hash_rate=nonstale_hash_rate/(1 - stale_prop),
pool_stale_prop=stale_prop,
min_difficulty=bitcoin_data.target_to_difficulty(node.tracker.items[node.best_share_var.value].max_target),
network_block_difficulty=diff,
network_hashrate=(diff * 2**32 // node.net.PARENT.BLOCK_PERIOD),
)
def get_local_stats():
if node.tracker.get_height(node.best_share_var.value) < 10:
return None
lookbehind = min(node.tracker.get_height(node.best_share_var.value), 3600//node.net.SHARE_PERIOD)
global_stale_prop = p2pool_data.get_average_stale_prop(node.tracker, node.best_share_var.value, lookbehind)
my_unstale_count = sum(1 for share in node.tracker.get_chain(node.best_share_var.value, lookbehind) if share.hash in wb.my_share_hashes)
my_orphan_count = sum(1 for share in node.tracker.get_chain(node.best_share_var.value, lookbehind) if share.hash in wb.my_share_hashes and share.share_data['stale_info'] == 'orphan')
my_doa_count = sum(1 for share in node.tracker.get_chain(node.best_share_var.value, lookbehind) if share.hash in wb.my_share_hashes and share.share_data['stale_info'] == 'doa')
my_share_count = my_unstale_count + my_orphan_count + my_doa_count
my_stale_count = my_orphan_count + my_doa_count
my_stale_prop = my_stale_count/my_share_count if my_share_count != 0 else None
my_work = sum(bitcoin_data.target_to_average_attempts(share.target)
for share in node.tracker.get_chain(node.best_share_var.value, lookbehind - 1)
if share.hash in wb.my_share_hashes)
actual_time = (node.tracker.items[node.best_share_var.value].timestamp -
node.tracker.items[node.tracker.get_nth_parent_hash(node.best_share_var.value, lookbehind - 1)].timestamp)
share_att_s = my_work / actual_time
miner_hash_rates, miner_dead_hash_rates = wb.get_local_rates()
(stale_orphan_shares, stale_doa_shares), shares, _ = wb.get_stale_counts()
miner_last_difficulties = {}
for addr in wb.last_work_shares.value:
miner_last_difficulties[addr] = bitcoin_data.target_to_difficulty(wb.last_work_shares.value[addr].target)
return dict(
my_hash_rates_in_last_hour=dict(
note="DEPRECATED",
nonstale=share_att_s,
rewarded=share_att_s/(1 - global_stale_prop),
actual=share_att_s/(1 - my_stale_prop) if my_stale_prop is not None else 0, # 0 because we don't have any shares anyway
),
my_share_counts_in_last_hour=dict(
shares=my_share_count,
unstale_shares=my_unstale_count,
stale_shares=my_stale_count,
orphan_stale_shares=my_orphan_count,
doa_stale_shares=my_doa_count,
),
my_stale_proportions_in_last_hour=dict(
stale=my_stale_prop,
orphan_stale=my_orphan_count/my_share_count if my_share_count != 0 else None,
dead_stale=my_doa_count/my_share_count if my_share_count != 0 else None,
),
miner_hash_rates=miner_hash_rates,
miner_dead_hash_rates=miner_dead_hash_rates,
miner_last_difficulties=miner_last_difficulties,
efficiency_if_miner_perfect=(1 - stale_orphan_shares/shares)/(1 - global_stale_prop) if shares else None, # ignores dead shares because those are miner's fault and indicated by pseudoshare rejection
efficiency=(1 - (stale_orphan_shares+stale_doa_shares)/shares)/(1 - global_stale_prop) if shares else None,
peers=dict(
incoming=sum(1 for peer in node.p2p_node.peers.itervalues() if peer.incoming),
outgoing=sum(1 for peer in node.p2p_node.peers.itervalues() if not peer.incoming),
),
shares=dict(
total=shares,
orphan=stale_orphan_shares,
dead=stale_doa_shares,
),
uptime=time.time() - start_time,
attempts_to_share=bitcoin_data.target_to_average_attempts(node.tracker.items[node.best_share_var.value].max_target),
attempts_to_block=bitcoin_data.target_to_average_attempts(node.bitcoind_work.value['bits'].target),
block_value=node.bitcoind_work.value['subsidy']*1e-8,
warnings=p2pool_data.get_warnings(node.tracker, node.best_share_var.value, node.net, bitcoind_getinfo_var.value, node.bitcoind_work.value),
donation_proportion=wb.donation_percentage/100,
version=p2pool.__version__,
protocol_version=p2p.Protocol.VERSION,
fee=wb.worker_fee,
)
class WebInterface(deferred_resource.DeferredResource):
def __init__(self, func, mime_type='application/json', args=()):
deferred_resource.DeferredResource.__init__(self)
self.func, self.mime_type, self.args = func, mime_type, args
def getChild(self, child, request):
return WebInterface(self.func, self.mime_type, self.args + (child,))
@defer.inlineCallbacks
def render_GET(self, request):
request.setHeader('Content-Type', self.mime_type)
request.setHeader('Access-Control-Allow-Origin', '*')
res = yield self.func(*self.args)
defer.returnValue(json.dumps(res) if self.mime_type == 'application/json' else res)
def decent_height():
return min(node.tracker.get_height(node.best_share_var.value), 720)
web_root.putChild('rate', WebInterface(lambda: p2pool_data.get_pool_attempts_per_second(node.tracker, node.best_share_var.value, decent_height())/(1-p2pool_data.get_average_stale_prop(node.tracker, node.best_share_var.value, decent_height()))))
web_root.putChild('difficulty', WebInterface(lambda: bitcoin_data.target_to_difficulty(node.tracker.items[node.best_share_var.value].max_target)))
web_root.putChild('users', WebInterface(get_users))
web_root.putChild('user_stales', WebInterface(lambda: dict((bitcoin_data.pubkey_hash_to_address(ph, node.net.PARENT), prop) for ph, prop in
p2pool_data.get_user_stale_props(node.tracker, node.best_share_var.value, node.tracker.get_height(node.best_share_var.value)).iteritems())))
web_root.putChild('fee', WebInterface(lambda: wb.worker_fee))
web_root.putChild('current_payouts', WebInterface(lambda: dict((bitcoin_data.script2_to_address(script, node.net.PARENT), value/1e8) for script, value in node.get_current_txouts().iteritems())))
web_root.putChild('patron_sendmany', WebInterface(get_patron_sendmany, 'text/plain'))
web_root.putChild('global_stats', WebInterface(get_global_stats))
web_root.putChild('local_stats', WebInterface(get_local_stats))
web_root.putChild('peer_addresses', WebInterface(lambda: ' '.join('%s%s' % (peer.transport.getPeer().host, ':'+str(peer.transport.getPeer().port) if peer.transport.getPeer().port != node.net.P2P_PORT else '') for peer in node.p2p_node.peers.itervalues())))
web_root.putChild('peer_txpool_sizes', WebInterface(lambda: dict(('%s:%i' % (peer.transport.getPeer().host, peer.transport.getPeer().port), peer.remembered_txs_size) for peer in node.p2p_node.peers.itervalues())))
web_root.putChild('pings', WebInterface(defer.inlineCallbacks(lambda: defer.returnValue(
dict([(a, (yield b)) for a, b in
[(
'%s:%i' % (peer.transport.getPeer().host, peer.transport.getPeer().port),
defer.inlineCallbacks(lambda peer=peer: defer.returnValue(
min([(yield peer.do_ping().addCallback(lambda x: x/0.001).addErrback(lambda fail: None)) for i in xrange(3)])
))()
) for peer in list(node.p2p_node.peers.itervalues())]
])
))))
web_root.putChild('peer_versions', WebInterface(lambda: dict(('%s:%i' % peer.addr, peer.other_sub_version) for peer in node.p2p_node.peers.itervalues())))
web_root.putChild('payout_addr', WebInterface(lambda: bitcoin_data.pubkey_hash_to_address(wb.my_pubkey_hash, node.net.PARENT)))
web_root.putChild('recent_blocks', WebInterface(lambda: [dict(
ts=s.timestamp,
hash='%064x' % s.header_hash,
number=pack.IntType(24).unpack(s.share_data['coinbase'][1:4]) if len(s.share_data['coinbase']) >= 4 else None,
share='%064x' % s.hash,
) for s in node.tracker.get_chain(node.best_share_var.value, min(node.tracker.get_height(node.best_share_var.value), 24*60*60//node.net.SHARE_PERIOD)) if s.pow_hash <= s.header['bits'].target]))
web_root.putChild('uptime', WebInterface(lambda: time.time() - start_time))
web_root.putChild('stale_rates', WebInterface(lambda: p2pool_data.get_stale_counts(node.tracker, node.best_share_var.value, decent_height(), rates=True)))
new_root = resource.Resource()
web_root.putChild('web', new_root)
stat_log = []
if os.path.exists(os.path.join(datadir_path, 'stats')):
try:
with open(os.path.join(datadir_path, 'stats'), 'rb') as f:
stat_log = json.loads(f.read())
except:
log.err(None, 'Error loading stats:')
def update_stat_log():
while stat_log and stat_log[0]['time'] < time.time() - 24*60*60:
stat_log.pop(0)
lookbehind = 3600//node.net.SHARE_PERIOD
if node.tracker.get_height(node.best_share_var.value) < lookbehind:
return None
global_stale_prop = p2pool_data.get_average_stale_prop(node.tracker, node.best_share_var.value, lookbehind)
(stale_orphan_shares, stale_doa_shares), shares, _ = wb.get_stale_counts()
miner_hash_rates, miner_dead_hash_rates = wb.get_local_rates()
stat_log.append(dict(
time=time.time(),
pool_hash_rate=p2pool_data.get_pool_attempts_per_second(node.tracker, node.best_share_var.value, lookbehind)/(1-global_stale_prop),
pool_stale_prop=global_stale_prop,
local_hash_rates=miner_hash_rates,
local_dead_hash_rates=miner_dead_hash_rates,
shares=shares,
stale_shares=stale_orphan_shares + stale_doa_shares,
stale_shares_breakdown=dict(orphan=stale_orphan_shares, doa=stale_doa_shares),
current_payout=node.get_current_txouts().get(bitcoin_data.pubkey_hash_to_script2(wb.my_pubkey_hash), 0)*1e-8,
peers=dict(
incoming=sum(1 for peer in node.p2p_node.peers.itervalues() if peer.incoming),
outgoing=sum(1 for peer in node.p2p_node.peers.itervalues() if not peer.incoming),
),
attempts_to_share=bitcoin_data.target_to_average_attempts(node.tracker.items[node.best_share_var.value].max_target),
attempts_to_block=bitcoin_data.target_to_average_attempts(node.bitcoind_work.value['bits'].target),
block_value=node.bitcoind_work.value['subsidy']*1e-8,
))
with open(os.path.join(datadir_path, 'stats'), 'wb') as f:
f.write(json.dumps(stat_log))
x = deferral.RobustLoopingCall(update_stat_log)
x.start(5*60)
stop_event.watch(x.stop)
new_root.putChild('log', WebInterface(lambda: stat_log))
def get_share(share_hash_str):
if int(share_hash_str, 16) not in node.tracker.items:
return None
share = node.tracker.items[int(share_hash_str, 16)]
return dict(
parent='%064x' % share.previous_hash,
children=['%064x' % x for x in sorted(node.tracker.reverse.get(share.hash, set()), key=lambda sh: -len(node.tracker.reverse.get(sh, set())))], # sorted from most children to least children
type_name=type(share).__name__,
local=dict(
verified=share.hash in node.tracker.verified.items,
time_first_seen=start_time if share.time_seen == 0 else share.time_seen,
peer_first_received_from=share.peer_addr,
),
share_data=dict(
timestamp=share.timestamp,
target=share.target,
max_target=share.max_target,
payout_address=bitcoin_data.script2_to_address(share.new_script, node.net.PARENT),
donation=share.share_data['donation']/65535,
stale_info=share.share_data['stale_info'],
nonce=share.share_data['nonce'],
desired_version=share.share_data['desired_version'],
absheight=share.absheight,
abswork=share.abswork,
),
block=dict(
hash='%064x' % share.header_hash,
header=dict(
version=share.header['version'],
previous_block='%064x' % share.header['previous_block'],
merkle_root='%064x' % share.header['merkle_root'],
timestamp=share.header['timestamp'],
target=share.header['bits'].target,
nonce=share.header['nonce'],
),
gentx=dict(
hash='%064x' % share.gentx_hash,
coinbase=share.share_data['coinbase'].ljust(2, '\x00').encode('hex'),
value=share.share_data['subsidy']*1e-8,
last_txout_nonce='%016x' % share.contents['last_txout_nonce'],
),
other_transaction_hashes=['%064x' % x for x in share.get_other_tx_hashes(node.tracker)],
),
)
new_root.putChild('share', WebInterface(lambda share_hash_str: get_share(share_hash_str)))
new_root.putChild('heads', WebInterface(lambda: ['%064x' % x for x in node.tracker.heads]))
new_root.putChild('verified_heads', WebInterface(lambda: ['%064x' % x for x in node.tracker.verified.heads]))
new_root.putChild('tails', WebInterface(lambda: ['%064x' % x for t in node.tracker.tails for x in node.tracker.reverse.get(t, set())]))
new_root.putChild('verified_tails', WebInterface(lambda: ['%064x' % x for t in node.tracker.verified.tails for x in node.tracker.verified.reverse.get(t, set())]))
new_root.putChild('best_share_hash', WebInterface(lambda: '%064x' % node.best_share_var.value))
new_root.putChild('my_share_hashes', WebInterface(lambda: ['%064x' % my_share_hash for my_share_hash in wb.my_share_hashes]))
def get_share_data(share_hash_str):
if int(share_hash_str, 16) not in node.tracker.items:
return ''
share = node.tracker.items[int(share_hash_str, 16)]
return p2pool_data.share_type.pack(share.as_share1a())
new_root.putChild('share_data', WebInterface(lambda share_hash_str: get_share_data(share_hash_str), 'application/octet-stream'))
new_root.putChild('currency_info', WebInterface(lambda: dict(
symbol=node.net.PARENT.SYMBOL,
block_explorer_url_prefix=node.net.PARENT.BLOCK_EXPLORER_URL_PREFIX,
address_explorer_url_prefix=node.net.PARENT.ADDRESS_EXPLORER_URL_PREFIX,
tx_explorer_url_prefix=node.net.PARENT.TX_EXPLORER_URL_PREFIX,
)))
new_root.putChild('version', WebInterface(lambda: p2pool.__version__))
hd_path = os.path.join(datadir_path, 'graph_db')
hd_data = _atomic_read(hd_path)
hd_obj = {}
if hd_data is not None:
try:
hd_obj = json.loads(hd_data)
except Exception:
log.err(None, 'Error reading graph database:')
dataview_descriptions = {
'last_hour': graph.DataViewDescription(150, 60*60),
'last_day': graph.DataViewDescription(300, 60*60*24),
'last_week': graph.DataViewDescription(300, 60*60*24*7),
'last_month': graph.DataViewDescription(300, 60*60*24*30),
'last_year': graph.DataViewDescription(300, 60*60*24*365.25),
}
hd = graph.HistoryDatabase.from_obj({
'local_hash_rate': graph.DataStreamDescription(dataview_descriptions, is_gauge=False),
'local_dead_hash_rate': graph.DataStreamDescription(dataview_descriptions, is_gauge=False),
'local_share_hash_rates': graph.DataStreamDescription(dataview_descriptions, is_gauge=False,
multivalues=True, multivalue_undefined_means_0=True,
default_func=graph.make_multivalue_migrator(dict(good='local_share_hash_rate', dead='local_dead_share_hash_rate', orphan='local_orphan_share_hash_rate'),
post_func=lambda bins: [dict((k, (v[0] - (sum(bin.get(rem_k, (0, 0))[0] for rem_k in ['dead', 'orphan']) if k == 'good' else 0), v[1])) for k, v in bin.iteritems()) for bin in bins])),
'pool_rates': graph.DataStreamDescription(dataview_descriptions, multivalues=True,
multivalue_undefined_means_0=True),
'current_payout': graph.DataStreamDescription(dataview_descriptions),
'current_payouts': graph.DataStreamDescription(dataview_descriptions, multivalues=True),
'peers': graph.DataStreamDescription(dataview_descriptions, multivalues=True, default_func=graph.make_multivalue_migrator(dict(incoming='incoming_peers', outgoing='outgoing_peers'))),
'miner_hash_rates': graph.DataStreamDescription(dataview_descriptions, is_gauge=False, multivalues=True),
'miner_dead_hash_rates': graph.DataStreamDescription(dataview_descriptions, is_gauge=False, multivalues=True),
'desired_version_rates': graph.DataStreamDescription(dataview_descriptions, multivalues=True,
multivalue_undefined_means_0=True),
'traffic_rate': graph.DataStreamDescription(dataview_descriptions, is_gauge=False, multivalues=True),
'getwork_latency': graph.DataStreamDescription(dataview_descriptions),
'memory_usage': graph.DataStreamDescription(dataview_descriptions),
}, hd_obj)
x = deferral.RobustLoopingCall(lambda: _atomic_write(hd_path, json.dumps(hd.to_obj())))
x.start(100)
stop_event.watch(x.stop)
@wb.pseudoshare_received.watch
def _(work, dead, user):
t = time.time()
hd.datastreams['local_hash_rate'].add_datum(t, work)
if dead:
hd.datastreams['local_dead_hash_rate'].add_datum(t, work)
if user is not None:
hd.datastreams['miner_hash_rates'].add_datum(t, {user: work})
if dead:
hd.datastreams['miner_dead_hash_rates'].add_datum(t, {user: work})
@wb.share_received.watch
def _(work, dead, share_hash):
t = time.time()
if not dead:
hd.datastreams['local_share_hash_rates'].add_datum(t, dict(good=work))
else:
hd.datastreams['local_share_hash_rates'].add_datum(t, dict(dead=work))
def later():
res = node.tracker.is_child_of(share_hash, node.best_share_var.value)
if res is None: res = False # share isn't connected to sharechain? assume orphaned
if res and dead: # share was DOA, but is now in sharechain
# move from dead to good
hd.datastreams['local_share_hash_rates'].add_datum(t, dict(dead=-work, good=work))
elif not res and not dead: # share wasn't DOA, and isn't in sharechain
# move from good to orphan
hd.datastreams['local_share_hash_rates'].add_datum(t, dict(good=-work, orphan=work))
reactor.callLater(200, later)
@node.p2p_node.traffic_happened.watch
def _(name, bytes):
hd.datastreams['traffic_rate'].add_datum(time.time(), {name: bytes})
def add_point():
if node.tracker.get_height(node.best_share_var.value) < 10:
return None
lookbehind = min(node.net.CHAIN_LENGTH, 60*60//node.net.SHARE_PERIOD, node.tracker.get_height(node.best_share_var.value))
t = time.time()
pool_rates = p2pool_data.get_stale_counts(node.tracker, node.best_share_var.value, lookbehind, rates=True)
pool_total = sum(pool_rates.itervalues())
hd.datastreams['pool_rates'].add_datum(t, pool_rates)
current_txouts = node.get_current_txouts()
hd.datastreams['current_payout'].add_datum(t, current_txouts.get(bitcoin_data.pubkey_hash_to_script2(wb.my_pubkey_hash), 0)*1e-8)
miner_hash_rates, miner_dead_hash_rates = wb.get_local_rates()
current_txouts_by_address = dict((bitcoin_data.script2_to_address(script, node.net.PARENT), amount) for script, amount in current_txouts.iteritems())
hd.datastreams['current_payouts'].add_datum(t, dict((user, current_txouts_by_address[user]*1e-8) for user in miner_hash_rates if user in current_txouts_by_address))
hd.datastreams['peers'].add_datum(t, dict(
incoming=sum(1 for peer in node.p2p_node.peers.itervalues() if peer.incoming),
outgoing=sum(1 for peer in node.p2p_node.peers.itervalues() if not peer.incoming),
))
vs = p2pool_data.get_desired_version_counts(node.tracker, node.best_share_var.value, lookbehind)
vs_total = sum(vs.itervalues())
hd.datastreams['desired_version_rates'].add_datum(t, dict((str(k), v/vs_total*pool_total) for k, v in vs.iteritems()))
try:
hd.datastreams['memory_usage'].add_datum(t, memory.resident())
except:
if p2pool.DEBUG:
traceback.print_exc()
x = deferral.RobustLoopingCall(add_point)
x.start(5)
stop_event.watch(x.stop)
@node.bitcoind_work.changed.watch
def _(new_work):
hd.datastreams['getwork_latency'].add_datum(time.time(), new_work['latency'])
new_root.putChild('graph_data', WebInterface(lambda source, view: hd.datastreams[source].dataviews[view].get_data(time.time())))
web_root.putChild('static', static.File(os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), 'web-static')))
return web_root
| gpl-3.0 | -5,866,161,282,974,693,000 | 55.560175 | 260 | 0.633163 | false |
Noviat/account-financial-reporting-V3-intrastat | account_financial_report_webkit/wizard/balance_common.py | 7 | 18311 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2011 Camptocamp SA (http://www.camptocamp.com)
#
# Author: Guewen Baconnier (Camptocamp)
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
import time
from lxml import etree
from datetime import datetime
from openerp.osv import fields, orm
from openerp.tools.translate import _
def previous_year_date(date, nb_prev=1):
if not date:
return False
parsed_date = datetime.strptime(date, '%Y-%m-%d')
previous_date = datetime(year=parsed_date.year - nb_prev,
month=parsed_date.month,
day=parsed_date.day)
return previous_date
class AccountBalanceCommonWizard(orm.TransientModel):
"""Will launch trial balance report and pass required args"""
_inherit = "account.common.account.report"
_name = "account.common.balance.report"
_description = "Common Balance Report"
# an update module should be done if changed
# in order to create fields in db
COMPARISON_LEVEL = 3
COMPARE_SELECTION = [('filter_no', 'No Comparison'),
('filter_year', 'Fiscal Year'),
('filter_date', 'Date'),
('filter_period', 'Periods'),
('filter_opening', 'Opening Only')]
M2O_DYNAMIC_FIELDS = [f % index for f in ["comp%s_fiscalyear_id",
"comp%s_period_from",
"comp%s_period_to"]
for index in range(COMPARISON_LEVEL)]
SIMPLE_DYNAMIC_FIELDS = [f % index for f in ["comp%s_filter",
"comp%s_date_from",
"comp%s_date_to"]
for index in range(COMPARISON_LEVEL)]
DYNAMIC_FIELDS = M2O_DYNAMIC_FIELDS + SIMPLE_DYNAMIC_FIELDS
def _get_account_ids(self, cr, uid, context=None):
res = False
if context.get('active_model', False) == 'account.account' \
and context.get('active_ids', False):
res = context['active_ids']
return res
_columns = {
'account_ids': fields.many2many(
'account.account', string='Filter on accounts',
help="Only selected accounts will be printed. Leave empty to \
print all accounts."),
'filter': fields.selection(
[('filter_no', 'No Filters'),
('filter_date', 'Date'),
('filter_period', 'Periods'),
('filter_opening', 'Opening Only')],
"Filter by",
required=True,
help='Filter by date: no opening balance will be displayed. '
'(opening balance can only be computed based on period to be \
correct).'),
# Set statically because of the impossibility of changing the selection
# field when changing chart_account_id
'account_level': fields.selection(
[('1', '1'), ('2', '2'), ('3', '3'), ('4', '4'), ('5', '5'),
('6', '6')], string="Account level"),
}
for index in range(COMPARISON_LEVEL):
_columns.update(
{"comp%s_filter" % index:
fields.selection(
COMPARE_SELECTION, string='Compare By', required=True),
"comp%s_fiscalyear_id" % index:
fields.many2one('account.fiscalyear', 'Fiscal Year'),
"comp%s_period_from" % index:
fields.many2one('account.period', 'Start Period'),
"comp%s_period_to" % index:
fields.many2one('account.period', 'End Period'),
"comp%s_date_from" % index:
fields.date("Start Date"),
"comp%s_date_to" % index:
fields.date("End Date")})
_defaults = {
'account_ids': _get_account_ids,
}
def _check_fiscalyear(self, cr, uid, ids, context=None):
obj = self.read(
cr, uid, ids[0], ['fiscalyear_id', 'filter'], context=context)
if not obj['fiscalyear_id'] and obj['filter'] == 'filter_no':
return False
return True
_constraints = [
(_check_fiscalyear,
'When no Fiscal year is selected, you must choose to filter by \
periods or by date.', ['filter']),
]
def default_get(self, cr, uid, fields, context=None):
"""
To get default values for the object.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param fields: List of fields for which we want default values
@param context: A standard dictionary
@return: A dictionary which of fields with values.
"""
res = super(AccountBalanceCommonWizard, self).default_get(
cr, uid, fields, context=context)
for index in range(self.COMPARISON_LEVEL):
field = "comp%s_filter" % (index,)
if not res.get(field, False):
res[field] = 'filter_no'
return res
def fields_view_get(self, cr, uid, view_id=None, view_type='form',
context=None, toolbar=False, submenu=False):
res = super(AccountBalanceCommonWizard, self).fields_view_get(
cr, uid, view_id, view_type, context=context, toolbar=toolbar,
submenu=submenu)
res['fields'].update(self.fields_get(cr, uid,
allfields=self.DYNAMIC_FIELDS,
context=context, write_access=True))
eview = etree.fromstring(res['arch'])
placeholder = eview.xpath("//page[@name='placeholder']")
if placeholder:
placeholder = placeholder[0]
for index in range(self.COMPARISON_LEVEL):
page = etree.Element(
'page',
{'name': "comp%s" % index,
'string': _("Comparison %s") % (index + 1, )})
group = etree.Element('group')
page.append(group)
def modifiers_and_append(elem):
orm.setup_modifiers(elem)
group.append(elem)
modifiers_and_append(etree.Element(
'field',
{'name': "comp%s_filter" % index,
'on_change': "onchange_comp_filter(%(index)s, filter,\
comp%(index)s_filter, fiscalyear_id, date_from, date_to)"
% {'index': index}}))
modifiers_and_append(etree.Element(
'field',
{'name': "comp%s_fiscalyear_id" % index,
'attrs':
"{'required': [('comp%(index)s_filter','in',\
('filter_year','filter_opening'))],"
" 'invisible': [('comp%(index)s_filter','not in',\
('filter_year','filter_opening'))]}" % {'index': index}}))
dates_attrs = "{'required': [('comp%(index)s_filter','=',\
'filter_date')], " \
" 'invisible': [('comp%(index)s_filter','!=',\
'filter_date')]}" % {
'index': index}
modifiers_and_append(etree.Element(
'separator',
{'string': _('Dates'),
'colspan': '4',
'attrs': dates_attrs}))
modifiers_and_append(etree.Element(
'field',
{'name': "comp%s_date_from" % index,
'attrs': dates_attrs}))
modifiers_and_append(etree.Element(
'field',
{'name': "comp%s_date_to" % index,
'attrs': dates_attrs}))
periods_attrs = "{'required': [('comp%(index)s_filter','=',\
'filter_period')]," \
" 'invisible': [('comp%(index)s_filter','!=',\
'filter_period')]}" % {
'index': index}
periods_domain = "[('special', '=', False)]"
modifiers_and_append(etree.Element(
'separator',
{'string': _('Periods'),
'colspan': '4',
'attrs': periods_attrs}))
modifiers_and_append(etree.Element(
'field',
{'name': "comp%s_period_from" % index,
'attrs': periods_attrs,
'domain': periods_domain}))
modifiers_and_append(etree.Element(
'field',
{'name': "comp%s_period_to" % index,
'attrs': periods_attrs,
'domain': periods_domain}))
placeholder.addprevious(page)
placeholder.getparent().remove(placeholder)
res['arch'] = etree.tostring(eview)
return res
def onchange_filter(self, cr, uid, ids, filter='filter_no',
fiscalyear_id=False, context=None):
res = {}
if filter == 'filter_no':
res['value'] = {'period_from': False,
'period_to': False,
'date_from': False,
'date_to': False}
if filter == 'filter_date':
if fiscalyear_id:
fyear = self.pool.get('account.fiscalyear').browse(
cr, uid, fiscalyear_id, context=context)
date_from = fyear.date_start
date_to = fyear.date_stop > time.strftime(
'%Y-%m-%d') and time.strftime('%Y-%m-%d') \
or fyear.date_stop
else:
date_from, date_to = time.strftime(
'%Y-01-01'), time.strftime('%Y-%m-%d')
res['value'] = {'period_from': False, 'period_to':
False, 'date_from': date_from, 'date_to': date_to}
if filter == 'filter_period' and fiscalyear_id:
start_period = end_period = False
cr.execute('''
SELECT * FROM (SELECT p.id
FROM account_period p
LEFT JOIN account_fiscalyear f
ON (p.fiscalyear_id = f.id)
WHERE f.id = %s
AND COALESCE(p.special, FALSE) = FALSE
ORDER BY p.date_start ASC
LIMIT 1) AS period_start
UNION ALL
SELECT * FROM (SELECT p.id
FROM account_period p
LEFT JOIN account_fiscalyear f
ON (p.fiscalyear_id = f.id)
WHERE f.id = %s
AND p.date_start < NOW()
AND COALESCE(p.special, FALSE) = FALSE
ORDER BY p.date_stop DESC
LIMIT 1) AS period_stop''',
(fiscalyear_id, fiscalyear_id))
periods = [i[0] for i in cr.fetchall()]
if periods:
start_period = end_period = periods[0]
if len(periods) > 1:
end_period = periods[1]
res['value'] = {'period_from': start_period, 'period_to':
end_period, 'date_from': False, 'date_to': False}
return res
def onchange_comp_filter(self, cr, uid, ids, index,
main_filter='filter_no', comp_filter='filter_no',
fiscalyear_id=False, start_date=False,
stop_date=False, context=None):
res = {}
fy_obj = self.pool.get('account.fiscalyear')
last_fiscalyear_id = False
if fiscalyear_id:
fiscalyear = fy_obj.browse(cr, uid, fiscalyear_id, context=context)
last_fiscalyear_ids = fy_obj.search(
cr, uid, [('date_stop', '<', fiscalyear.date_start)],
limit=self.COMPARISON_LEVEL, order='date_start desc',
context=context)
if last_fiscalyear_ids:
if len(last_fiscalyear_ids) > index:
# first element for the comparison 1, second element for
# the comparison 2
last_fiscalyear_id = last_fiscalyear_ids[index]
fy_id_field = "comp%s_fiscalyear_id" % (index,)
period_from_field = "comp%s_period_from" % (index,)
period_to_field = "comp%s_period_to" % (index,)
date_from_field = "comp%s_date_from" % (index,)
date_to_field = "comp%s_date_to" % (index,)
if comp_filter == 'filter_no':
res['value'] = {
fy_id_field: False,
period_from_field: False,
period_to_field: False,
date_from_field: False,
date_to_field: False
}
if comp_filter in ('filter_year', 'filter_opening'):
res['value'] = {
fy_id_field: last_fiscalyear_id,
period_from_field: False,
period_to_field: False,
date_from_field: False,
date_to_field: False
}
if comp_filter == 'filter_date':
dates = {}
if main_filter == 'filter_date':
dates = {
'date_start': previous_year_date(start_date, index + 1).
strftime('%Y-%m-%d'),
'date_stop': previous_year_date(stop_date, index + 1).
strftime('%Y-%m-%d'),
}
elif last_fiscalyear_id:
dates = fy_obj.read(
cr, uid, last_fiscalyear_id, ['date_start', 'date_stop'],
context=context)
res['value'] = {fy_id_field: False,
period_from_field: False,
period_to_field: False,
date_from_field: dates.get('date_start', False),
date_to_field: dates.get('date_stop', False)}
if comp_filter == 'filter_period' and last_fiscalyear_id:
start_period = end_period = False
cr.execute('''
SELECT * FROM (SELECT p.id
FROM account_period p
LEFT JOIN account_fiscalyear f
ON (p.fiscalyear_id = f.id)
WHERE f.id = %(fiscalyear)s
AND COALESCE(p.special, FALSE) = FALSE
ORDER BY p.date_start ASC
LIMIT 1) AS period_start
UNION ALL
SELECT * FROM (SELECT p.id
FROM account_period p
LEFT JOIN account_fiscalyear f
ON (p.fiscalyear_id = f.id)
WHERE f.id = %(fiscalyear)s
AND p.date_start < NOW()
AND COALESCE(p.special, FALSE) = FALSE
ORDER BY p.date_stop DESC
LIMIT 1) AS period_stop''',
{'fiscalyear': last_fiscalyear_id})
periods = [i[0] for i in cr.fetchall()]
if periods and len(periods) > 1:
start_period = end_period = periods[0]
if len(periods) > 1:
end_period = periods[1]
res['value'] = {fy_id_field: False,
period_from_field: start_period,
period_to_field: end_period,
date_from_field: False,
date_to_field: False}
return res
def pre_print_report(self, cr, uid, ids, data, context=None):
data = super(AccountBalanceCommonWizard, self).pre_print_report(
cr, uid, ids, data, context=context)
if context is None:
context = {}
# will be used to attach the report on the main account
data['ids'] = [data['form']['chart_account_id']]
fields_to_read = ['account_ids', 'account_level']
fields_to_read += self.DYNAMIC_FIELDS
vals = self.read(cr, uid, ids, fields_to_read, context=context)[0]
# extract the id from the m2o tuple (id, name)
for field in self.M2O_DYNAMIC_FIELDS:
if isinstance(vals[field], tuple):
vals[field] = vals[field][0]
vals['max_comparison'] = self.COMPARISON_LEVEL
data['form'].update(vals)
return data
| agpl-3.0 | -4,487,714,554,082,221,600 | 43.336562 | 79 | 0.479985 | false |
dgasmith/psi4 | psi4/driver/qcdb/qcformat.py | 3 | 4080 | #
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2019 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""Parent classes for quantum chemistry program input and output file
formats.
"""
import re
class InputFormat(object):
def __init__(self, mem, mtd, bas, mol, sys, cast):
# total job memory in MB
self.memory = mem
# computational method
self.method = mtd.lower()
# qcdb.Molecule object
self.molecule = mol
# database member index
self.index = sys
# orbital basis set
self.basis = bas.lower()
# do cast up from sto-3g basis?
self.castup = cast
def corresponding_aux_basis(self):
"""For Dunning basis sets, returns strings from which auxiliary
basis sets and heavy-aug can be constructed. Note that
valence/core-valence/etc. is conserved and X-zeta/(X+d)zeta is
not, since this is the usual aux basis pattern.
*augbasis* is round up to the nearest aug-cc-pVXZ
*rootbasis* is round down to the nearest cc-pVXZ
*auxbasis* is round up to the nearest cc-pVXZ or aug-cc-pVXZ
"""
Dunmatch = re.compile(r'^(.*cc-)(pv|pcv|pwcv).*?([dtq56]).*z$').match(self.basis)
if Dunmatch:
rootbas = 'cc-' + Dunmatch.group(2) + Dunmatch.group(3) + 'z'
augbas = 'aug-cc-' + Dunmatch.group(2) + Dunmatch.group(3) + 'z'
if Dunmatch.group(1) == 'cc-':
auxbas = rootbas
else:
auxbas = augbas
else:
rootbas = None
augbas = None
auxbas = None
return [rootbas, augbas, auxbas]
class InputFormat2(object):
def __init__(self, mem, mol, mtd, der, opt):
# total job memory in MB
self.memory = mem
# qcdb.Molecule object
self.molecule = mol
# computational method
self.method = mtd.lower()
# computational derivative level
self.dertype = der
# options dictionary
self.options = opt
# orbital basis set
self.basis = opt['GLOBALS']['BASIS']['value'].lower()
# do cast up from sto-3g basis?
self.castup = opt['SCF']['BASIS_GUESS']['value']
def corresponding_aux_basis(self):
"""For Dunning basis sets, returns strings from which auxiliary
basis sets and heavy-aug can be constructed. Note that
valence/core-valence/etc. is conserved and X-zeta/(X+d)zeta is
not, since this is the usual aux basis pattern.
*augbasis* is round up to the nearest aug-cc-pVXZ
*rootbasis* is round down to the nearest cc-pVXZ
*auxbasis* is round up to the nearest cc-pVXZ or aug-cc-pVXZ
"""
Dunmatch = re.compile(r'^(.*cc-)(pv|pcv|pwcv).*?([dtq56]).*z$').match(self.basis)
if Dunmatch:
rootbas = 'cc-' + Dunmatch.group(2) + Dunmatch.group(3) + 'z'
augbas = 'aug-cc-' + Dunmatch.group(2) + Dunmatch.group(3) + 'z'
if Dunmatch.group(1) == 'cc-':
auxbas = rootbas
else:
auxbas = augbas
else:
rootbas = None
augbas = None
auxbas = None
return [rootbas, augbas, auxbas]
| lgpl-3.0 | 6,329,365,719,109,166,000 | 33 | 89 | 0.609804 | false |
shsingh/ansible | lib/ansible/modules/network/fortios/fortios_firewall_interface_policy.py | 7 | 19948 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_firewall_interface_policy
short_description: Configure IPv4 interface policies in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify firewall feature and interface_policy category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
state:
description:
- Indicates whether to create or remove the object.
This attribute was present already in previous version in a deeper level.
It has been moved out to this outer level.
type: str
required: false
choices:
- present
- absent
version_added: 2.9
firewall_interface_policy:
description:
- Configure IPv4 interface policies.
default: null
type: dict
suboptions:
state:
description:
- B(Deprecated)
- Starting with Ansible 2.9 we recommend using the top-level 'state' parameter.
- HORIZONTALLINE
- Indicates whether to create or remove the object.
type: str
required: false
choices:
- present
- absent
address_type:
description:
- Policy address type (IPv4 or IPv6).
type: str
choices:
- ipv4
- ipv6
application_list:
description:
- Application list name. Source application.list.name.
type: str
application_list_status:
description:
- Enable/disable application control.
type: str
choices:
- enable
- disable
av_profile:
description:
- Antivirus profile. Source antivirus.profile.name.
type: str
av_profile_status:
description:
- Enable/disable antivirus.
type: str
choices:
- enable
- disable
comments:
description:
- Comments.
type: str
dlp_sensor:
description:
- DLP sensor name. Source dlp.sensor.name.
type: str
dlp_sensor_status:
description:
- Enable/disable DLP.
type: str
choices:
- enable
- disable
dsri:
description:
- Enable/disable DSRI.
type: str
choices:
- enable
- disable
dstaddr:
description:
- Address object to limit traffic monitoring to network traffic sent to the specified address or range.
type: list
suboptions:
name:
description:
- Address name. Source firewall.address.name firewall.addrgrp.name.
required: true
type: str
interface:
description:
- Monitored interface name from available interfaces. Source system.zone.name system.interface.name.
type: str
ips_sensor:
description:
- IPS sensor name. Source ips.sensor.name.
type: str
ips_sensor_status:
description:
- Enable/disable IPS.
type: str
choices:
- enable
- disable
label:
description:
- Label.
type: str
logtraffic:
description:
- "Logging type to be used in this policy (Options: all | utm | disable)."
type: str
choices:
- all
- utm
- disable
policyid:
description:
- Policy ID.
required: true
type: int
scan_botnet_connections:
description:
- Enable/disable scanning for connections to Botnet servers.
type: str
choices:
- disable
- block
- monitor
service:
description:
- Service object from available options.
type: list
suboptions:
name:
description:
- Service name. Source firewall.service.custom.name firewall.service.group.name.
required: true
type: str
spamfilter_profile:
description:
- Antispam profile. Source spamfilter.profile.name.
type: str
spamfilter_profile_status:
description:
- Enable/disable antispam.
type: str
choices:
- enable
- disable
srcaddr:
description:
- Address object to limit traffic monitoring to network traffic sent from the specified address or range.
type: list
suboptions:
name:
description:
- Address name. Source firewall.address.name firewall.addrgrp.name.
required: true
type: str
status:
description:
- Enable/disable this policy.
type: str
choices:
- enable
- disable
webfilter_profile:
description:
- Web filter profile. Source webfilter.profile.name.
type: str
webfilter_profile_status:
description:
- Enable/disable web filtering.
type: str
choices:
- enable
- disable
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure IPv4 interface policies.
fortios_firewall_interface_policy:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
firewall_interface_policy:
address_type: "ipv4"
application_list: "<your_own_value> (source application.list.name)"
application_list_status: "enable"
av_profile: "<your_own_value> (source antivirus.profile.name)"
av_profile_status: "enable"
comments: "<your_own_value>"
dlp_sensor: "<your_own_value> (source dlp.sensor.name)"
dlp_sensor_status: "enable"
dsri: "enable"
dstaddr:
-
name: "default_name_13 (source firewall.address.name firewall.addrgrp.name)"
interface: "<your_own_value> (source system.zone.name system.interface.name)"
ips_sensor: "<your_own_value> (source ips.sensor.name)"
ips_sensor_status: "enable"
label: "<your_own_value>"
logtraffic: "all"
policyid: "19"
scan_botnet_connections: "disable"
service:
-
name: "default_name_22 (source firewall.service.custom.name firewall.service.group.name)"
spamfilter_profile: "<your_own_value> (source spamfilter.profile.name)"
spamfilter_profile_status: "enable"
srcaddr:
-
name: "default_name_26 (source firewall.address.name firewall.addrgrp.name)"
status: "enable"
webfilter_profile: "<your_own_value> (source webfilter.profile.name)"
webfilter_profile_status: "enable"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_firewall_interface_policy_data(json):
option_list = ['address_type', 'application_list', 'application_list_status',
'av_profile', 'av_profile_status', 'comments',
'dlp_sensor', 'dlp_sensor_status', 'dsri',
'dstaddr', 'interface', 'ips_sensor',
'ips_sensor_status', 'label', 'logtraffic',
'policyid', 'scan_botnet_connections', 'service',
'spamfilter_profile', 'spamfilter_profile_status', 'srcaddr',
'status', 'webfilter_profile', 'webfilter_profile_status']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for i, elem in enumerate(data):
data[i] = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def firewall_interface_policy(data, fos):
vdom = data['vdom']
if 'state' in data and data['state']:
state = data['state']
elif 'state' in data['firewall_interface_policy'] and data['firewall_interface_policy']:
state = data['firewall_interface_policy']['state']
else:
state = True
firewall_interface_policy_data = data['firewall_interface_policy']
filtered_data = underscore_to_hyphen(filter_firewall_interface_policy_data(firewall_interface_policy_data))
if state == "present":
return fos.set('firewall',
'interface-policy',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('firewall',
'interface-policy',
mkey=filtered_data['policyid'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_firewall(data, fos):
if data['firewall_interface_policy']:
resp = firewall_interface_policy(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"firewall_interface_policy": {
"required": False, "type": "dict", "default": None,
"options": {
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"address_type": {"required": False, "type": "str",
"choices": ["ipv4", "ipv6"]},
"application_list": {"required": False, "type": "str"},
"application_list_status": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"av_profile": {"required": False, "type": "str"},
"av_profile_status": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"comments": {"required": False, "type": "str"},
"dlp_sensor": {"required": False, "type": "str"},
"dlp_sensor_status": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"dsri": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"dstaddr": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"interface": {"required": False, "type": "str"},
"ips_sensor": {"required": False, "type": "str"},
"ips_sensor_status": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"label": {"required": False, "type": "str"},
"logtraffic": {"required": False, "type": "str",
"choices": ["all", "utm", "disable"]},
"policyid": {"required": True, "type": "int"},
"scan_botnet_connections": {"required": False, "type": "str",
"choices": ["disable", "block", "monitor"]},
"service": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"spamfilter_profile": {"required": False, "type": "str"},
"spamfilter_profile_status": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"srcaddr": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"status": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"webfilter_profile": {"required": False, "type": "str"},
"webfilter_profile_status": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_firewall(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_firewall(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 | 6,754,358,664,655,787,000 | 34.942342 | 125 | 0.518799 | false |
loopCM/chromium | native_client_sdk/src/build_tools/sdk_tools/command/info.py | 4 | 1216 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import command_common
import logging
import manifest_util
def Info(manifest, bundle_names):
valid_bundles, invalid_bundles = command_common.GetValidBundles(manifest,
bundle_names)
if invalid_bundles:
logging.warn('Unknown bundle(s): %s\n' % (', '.join(invalid_bundles)))
if not valid_bundles:
logging.warn('No valid bundles given.')
return
for bundle_name in valid_bundles:
bundle = manifest.GetBundle(bundle_name)
print bundle.name
for key in sorted(bundle.iterkeys()):
value = bundle[key]
if key == manifest_util.ARCHIVES_KEY:
archive = bundle.GetHostOSArchive()
print ' Archive:'
if archive:
for archive_key in sorted(archive.iterkeys()):
print ' %s: %s' % (archive_key, archive[archive_key])
else:
print ' No archives for this host.'
elif key not in (manifest_util.ARCHIVES_KEY, manifest_util.NAME_KEY):
print ' %s: %s' % (key, value)
print
| bsd-3-clause | 7,244,273,317,173,926,000 | 33.742857 | 79 | 0.623355 | false |
webmull/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/commands/rebaselineserver.py | 127 | 4570 | # Copyright (c) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Starts a local HTTP server which displays layout test failures (given a test
results directory), provides comparisons of expected and actual results (both
images and text) and allows one-click rebaselining of tests."""
from webkitpy.common import system
from webkitpy.common.net.resultsjsonparser import for_each_test, JSONTestResult
from webkitpy.layout_tests.layout_package import json_results_generator
from webkitpy.tool.commands.abstractlocalservercommand import AbstractLocalServerCommand
from webkitpy.tool.servers.rebaselineserver import get_test_baselines, RebaselineHTTPServer, STATE_NEEDS_REBASELINE
class TestConfig(object):
def __init__(self, test_port, layout_tests_directory, results_directory, platforms, filesystem, scm):
self.test_port = test_port
self.layout_tests_directory = layout_tests_directory
self.results_directory = results_directory
self.platforms = platforms
self.filesystem = filesystem
self.scm = scm
class RebaselineServer(AbstractLocalServerCommand):
name = "rebaseline-server"
help_text = __doc__
argument_names = "/path/to/results/directory"
server = RebaselineHTTPServer
def _gather_baselines(self, results_json):
# Rebaseline server and it's associated JavaScript expected the tests subtree to
# be key-value pairs instead of hierarchical.
# FIXME: make the rebaseline server use the hierarchical tree.
new_tests_subtree = {}
def gather_baselines_for_test(test_name, result_dict):
result = JSONTestResult(test_name, result_dict)
if result.did_pass_or_run_as_expected():
return
result_dict['state'] = STATE_NEEDS_REBASELINE
result_dict['baselines'] = get_test_baselines(test_name, self._test_config)
new_tests_subtree[test_name] = result_dict
for_each_test(results_json['tests'], gather_baselines_for_test)
results_json['tests'] = new_tests_subtree
def _prepare_config(self, options, args, tool):
results_directory = args[0]
filesystem = system.filesystem.FileSystem()
scm = self._tool.scm()
print 'Parsing full_results.json...'
results_json_path = filesystem.join(results_directory, 'full_results.json')
results_json = json_results_generator.load_json(filesystem, results_json_path)
port = tool.port_factory.get()
layout_tests_directory = port.layout_tests_dir()
platforms = filesystem.listdir(filesystem.join(layout_tests_directory, 'platform'))
self._test_config = TestConfig(port, layout_tests_directory, results_directory, platforms, filesystem, scm)
print 'Gathering current baselines...'
self._gather_baselines(results_json)
return {
'test_config': self._test_config,
"results_json": results_json,
"platforms_json": {
'platforms': platforms,
'defaultPlatform': port.name(),
},
}
| bsd-3-clause | -8,580,828,464,751,876,000 | 45.632653 | 115 | 0.715755 | false |
philipgian/pre-commit | pre_commit/output.py | 1 | 2217 | from __future__ import unicode_literals
import sys
from pre_commit import color
from pre_commit import five
def get_hook_message(
start,
postfix='',
end_msg=None,
end_len=0,
end_color=None,
use_color=None,
cols=80,
):
"""Prints a message for running a hook.
This currently supports three approaches:
# Print `start` followed by dots, leaving 6 characters at the end
>>> print_hook_message('start', end_len=6)
start...............................................................
# Print `start` followed by dots with the end message colored if coloring
# is specified and a newline afterwards
>>> print_hook_message(
'start',
end_msg='end',
end_color=color.RED,
use_color=True,
)
start...................................................................end
# Print `start` followed by dots, followed by the `postfix` message
# uncolored, followed by the `end_msg` colored if specified and a newline
# afterwards
>>> print_hook_message(
'start',
postfix='postfix ',
end_msg='end',
end_color=color.RED,
use_color=True,
)
start...........................................................postfix end
"""
if bool(end_msg) == bool(end_len):
raise ValueError('Expected one of (`end_msg`, `end_len`)')
if end_msg is not None and (end_color is None or use_color is None):
raise ValueError(
'`end_color` and `use_color` are required with `end_msg`'
)
if end_len:
return start + '.' * (cols - len(start) - end_len - 1)
else:
return '{}{}{}{}\n'.format(
start,
'.' * (cols - len(start) - len(postfix) - len(end_msg) - 1),
postfix,
color.format_color(end_msg, end_color, use_color),
)
stdout_byte_stream = getattr(sys.stdout, 'buffer', sys.stdout)
def write(s, stream=stdout_byte_stream):
stream.write(five.to_bytes(s))
stream.flush()
def write_line(s=None, stream=stdout_byte_stream):
if s is not None:
stream.write(five.to_bytes(s))
stream.write(b'\n')
stream.flush()
| mit | 5,790,725,397,560,274,000 | 27.423077 | 79 | 0.534957 | false |
Ryanglambert/pybrain | pybrain/structure/connections/fullnotself.py | 31 | 1276 | __author__ = 'Thomas Rueckstiess, [email protected]'
from scipy import reshape, dot, outer, eye
from pybrain.structure.connections import FullConnection
class FullNotSelfConnection(FullConnection):
"""Connection which connects every element from the first module's
output buffer to the second module's input buffer in a matrix multiplicative
manner, EXCEPT the corresponding elements with the same index of each buffer
(the diagonal of the parameter matrix is 0). Asserts that in and out dimensions
are equal. """
#:TODO: the values on the diagonal are counted as parameters but not used! FIX!
def __init__(self, *args, **kwargs):
FullConnection.__init__(self, *args, **kwargs)
assert self.indim == self.outdim, \
"Indim (%i) does not equal outdim (%i)" % (
self.indim, self.outdim)
def _forwardImplementation(self, inbuf, outbuf):
p = reshape(self.params, (self.outdim, self.indim)) * (1-eye(self.outdim))
outbuf += dot(p, inbuf)
def _backwardImplementation(self, outerr, inerr, inbuf):
p = reshape(self.params, (self.outdim, self.indim)) * (1-eye(self.outdim))
inerr += dot(p.T, outerr)
ds = self.derivs
ds += outer(inbuf, outerr).T.flatten()
| bsd-3-clause | -2,442,303,445,124,675,600 | 43 | 83 | 0.668495 | false |
SNAPPETITE/backend | flask/lib/python2.7/site-packages/tempita/_looper.py | 140 | 4161 | """
Helper for looping over sequences, particular in templates.
Often in a loop in a template it's handy to know what's next up,
previously up, if this is the first or last item in the sequence, etc.
These can be awkward to manage in a normal Python loop, but using the
looper you can get a better sense of the context. Use like::
>>> for loop, item in looper(['a', 'b', 'c']):
... print loop.number, item
... if not loop.last:
... print '---'
1 a
---
2 b
---
3 c
"""
import sys
from tempita.compat3 import basestring_
__all__ = ['looper']
class looper(object):
"""
Helper for looping (particularly in templates)
Use this like::
for loop, item in looper(seq):
if loop.first:
...
"""
def __init__(self, seq):
self.seq = seq
def __iter__(self):
return looper_iter(self.seq)
def __repr__(self):
return '<%s for %r>' % (
self.__class__.__name__, self.seq)
class looper_iter(object):
def __init__(self, seq):
self.seq = list(seq)
self.pos = 0
def __iter__(self):
return self
def __next__(self):
if self.pos >= len(self.seq):
raise StopIteration
result = loop_pos(self.seq, self.pos), self.seq[self.pos]
self.pos += 1
return result
if sys.version < "3":
next = __next__
class loop_pos(object):
def __init__(self, seq, pos):
self.seq = seq
self.pos = pos
def __repr__(self):
return '<loop pos=%r at %r>' % (
self.seq[self.pos], self.pos)
def index(self):
return self.pos
index = property(index)
def number(self):
return self.pos + 1
number = property(number)
def item(self):
return self.seq[self.pos]
item = property(item)
def __next__(self):
try:
return self.seq[self.pos + 1]
except IndexError:
return None
__next__ = property(__next__)
if sys.version < "3":
next = __next__
def previous(self):
if self.pos == 0:
return None
return self.seq[self.pos - 1]
previous = property(previous)
def odd(self):
return not self.pos % 2
odd = property(odd)
def even(self):
return self.pos % 2
even = property(even)
def first(self):
return self.pos == 0
first = property(first)
def last(self):
return self.pos == len(self.seq) - 1
last = property(last)
def length(self):
return len(self.seq)
length = property(length)
def first_group(self, getter=None):
"""
Returns true if this item is the start of a new group,
where groups mean that some attribute has changed. The getter
can be None (the item itself changes), an attribute name like
``'.attr'``, a function, or a dict key or list index.
"""
if self.first:
return True
return self._compare_group(self.item, self.previous, getter)
def last_group(self, getter=None):
"""
Returns true if this item is the end of a new group,
where groups mean that some attribute has changed. The getter
can be None (the item itself changes), an attribute name like
``'.attr'``, a function, or a dict key or list index.
"""
if self.last:
return True
return self._compare_group(self.item, self.__next__, getter)
def _compare_group(self, item, other, getter):
if getter is None:
return item != other
elif (isinstance(getter, basestring_)
and getter.startswith('.')):
getter = getter[1:]
if getter.endswith('()'):
getter = getter[:-2]
return getattr(item, getter)() != getattr(other, getter)()
else:
return getattr(item, getter) != getattr(other, getter)
elif hasattr(getter, '__call__'):
return getter(item) != getter(other)
else:
return item[getter] != other[getter]
| mit | -8,545,492,950,607,345,000 | 24.527607 | 74 | 0.546984 | false |
alexteodor/odoo | addons/sales_team/__openerp__.py | 51 | 1799 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Sale Team',
'version': '1.0',
'author': 'OpenERP SA',
'category': 'Sales Management',
'summary': 'Sales Team',
'description': """
Using this application you can manage Sales Team with CRM and/or Sales
=======================================================================
""",
'website': 'https://www.odoo.com/page/crm',
'depends': ['base','mail','web_kanban_sparkline',],
'data': ['security/sales_team_security.xml',
'security/ir.model.access.csv',
'res_config_view.xml',
'sales_team_data.xml',
'sales_team.xml',],
'demo': ['sales_team_demo.xml'],
'css': ['static/src/css/sales_team.css'],
'installable': True,
'auto_install': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -3,152,143,440,756,593,700 | 40.837209 | 78 | 0.568093 | false |
sxjscience/tvm | tests/python/unittest/test_te_schedule.py | 4 | 11434 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import tvm
from tvm import te
import pickle as pkl
def test_schedule_create():
m = te.size_var("m")
n = te.size_var("n")
l = te.size_var("l")
A = te.placeholder((m, l), name="A")
B = te.placeholder((n, l), name="B")
AA = te.compute((m, l), lambda i, j: A[i, j])
T = te.compute((m, n, l), lambda i, j, k: AA(i, k) * B(j, k))
s = te.create_schedule(T.op)
s[AA].set_scope("shared")
xo, xi = s[T].split(T.op.axis[0], factor=10)
xi1, xi2 = s[T].split(xi, factor=2)
s[AA].compute_at(s[T], xi1)
xo, xi = s[AA].split(AA.op.axis[0], factor=10)
s[T].reorder(xi2, xi1)
assert T.op.axis[1] in s[T].leaf_iter_vars
# save load json
json_str = tvm.ir.save_json(s)
s_loaded = tvm.ir.load_json(json_str)
assert isinstance(s_loaded, tvm.te.schedule.Schedule)
assert str(s_loaded.outputs[0].body) == str(s.outputs[0].body)
# pickle unpickle
dump = pkl.dumps(s)
s_loaded = pkl.loads(dump)
assert isinstance(s_loaded, tvm.te.schedule.Schedule)
assert str(s_loaded.outputs[0].body) == str(s.outputs[0].body)
def test_reorder():
m = te.size_var("m")
A = te.placeholder((m,), name="A")
T = te.compute(m, lambda i: A[i + 1])
s = te.create_schedule(T.op)
xo, xi = s[T].split(T.op.axis[0], factor=10)
xi1, xi2 = s[T].split(xi, factor=2)
order = (xi2, xi1, xo)
assert tuple(s[T].leaf_iter_vars) != order
s[T].reorder(*order)
assert tuple(s[T].leaf_iter_vars) == order
try:
# pass duplicate IterVar
# must raise an error
s[T].reorder(xi2, xi1, xi2)
assert False
except tvm.error.TVMError:
pass
def test_split():
m = te.size_var("m")
A = te.placeholder((m,), name="A")
T = te.compute((m,), lambda i: A[i])
s = te.create_schedule(T.op)
xo, xi = s[T].split(T.op.axis[0], factor=10)
assert tuple(s[T].leaf_iter_vars) == (xo, xi)
def test_tile():
m = te.size_var("m")
n = te.size_var("n")
A = te.placeholder((m, n), name="A")
T = te.compute((m, n), lambda i, j: A[i, j])
s = te.create_schedule(T.op)
xo, yo, xi, yi = s[T].tile(T.op.axis[0], T.op.axis[1], x_factor=10, y_factor=5)
assert tuple(s[T].leaf_iter_vars) == (xo, yo, xi, yi)
def test_fuse():
m = te.size_var("m")
n = te.size_var("n")
A = te.placeholder((m, n), name="A")
T = te.compute((m, n), lambda i, j: A[i, j])
s = te.create_schedule(T.op)
xo, yo, xi, yi = s[T].tile(T.op.axis[0], T.op.axis[1], x_factor=10, y_factor=5)
fused = s[T].fuse(xo, yo)
assert any(isinstance(x, tvm.te.schedule.Fuse) for x in s[T].relations)
assert tuple(s[T].leaf_iter_vars) == (fused, xi, yi)
def test_fuse_with_split():
m = te.size_var("m")
n = te.size_var("n")
A = te.placeholder((m, n), name="A")
T = te.compute((m, n), lambda i, j: A[i, j])
s = te.create_schedule(T.op)
y = T.op.axis[1]
xo, xi = s[T].split(T.op.axis[0], factor=10)
fused = s[T].fuse(xi, y)
assert any(isinstance(x, tvm.te.schedule.Fuse) for x in s[T].relations)
assert tuple(s[T].leaf_iter_vars) == (xo, fused)
def test_fuse_with_out_of_order_axis():
m = te.size_var("m")
n = te.size_var("n")
A = te.placeholder((m, n), name="A")
T = te.compute((m, n), lambda i, j: A[i, j])
s = te.create_schedule(T.op)
y = T.op.axis[1]
xo, xi = s[T].split(T.op.axis[0], factor=10)
with pytest.raises(RuntimeError):
fused = s[T].fuse(xo, y) # should throw here
def test_fuse_with_out_of_order_axis_with_reorder():
m = te.size_var("m")
n = te.size_var("n")
A = te.placeholder((m, n), name="A")
T = te.compute((m, n), lambda i, j: A[i, j])
s = te.create_schedule(T.op)
y = T.op.axis[1]
xo, xi = s[T].split(T.op.axis[0], factor=10)
s[T].reorder(y, xo, xi)
fused = s[T].fuse(y, xo) # should be ok
s = te.create_schedule(T.op)
y = T.op.axis[1]
xo, xi = s[T].split(T.op.axis[0], factor=10)
s[T].reorder(y, xo, xi)
with pytest.raises(RuntimeError):
fused = s[T].fuse(y, xi) # should throw here
def test_singleton():
A = te.placeholder((), name="A")
T = te.compute((), lambda: A() + 1)
s = te.create_schedule(T.op)
fused = s[T].fuse()
assert any(isinstance(x, tvm.te.schedule.Singleton) for x in s[T].relations)
assert tuple(s[T].leaf_iter_vars) == (fused,)
dump = pkl.dumps(s)
s_loaded = pkl.loads(dump)
assert isinstance(s_loaded, tvm.te.schedule.Schedule)
def test_vectorize():
m = te.size_var("m")
n = te.size_var("n")
A = te.placeholder((m, n), name="A")
T = te.compute((m, n), lambda i, j: A[i, j])
s = te.create_schedule(T.op)
xo, yo, xi, yi = s[T].tile(T.op.axis[0], T.op.axis[1], x_factor=10, y_factor=5)
s[T].vectorize(yi)
s[T].unroll(xi)
UNROLL = tvm.te.schedule.IterVar.Unrolled
VECTORIZE = tvm.te.schedule.IterVar.Vectorized
assert s[T].iter_var_attrs[xi].iter_type == UNROLL
assert s[T].iter_var_attrs[yi].iter_type == VECTORIZE
def test_vectorize_commreduce():
V = te.placeholder((128,), name="V")
ax = te.reduce_axis((0, 128), name="ax")
O = te.compute((1,), lambda _: te.sum(V[ax], axis=[ax]))
s = te.create_schedule(O.op)
with pytest.raises(RuntimeError):
s[O].vectorize(ax) # should throw here
def test_pragma():
m = 100
A = te.placeholder((m,), name="A")
T = te.compute((m,), lambda i: A[i])
s = te.create_schedule(T.op)
xo, xi = s[T].split(T.op.axis[0], factor=10)
s[T].pragma(xo, "pragma1")
s[T].pragma(xi, "vectorize")
VECTORIZE = tvm.te.schedule.IterVar.Vectorized
assert s[T].iter_var_attrs[xo].pragma_keys[0].value == "pragma1"
assert s[T].iter_var_attrs[xi].iter_type == VECTORIZE
def test_rfactor():
n = te.size_var("n")
k1 = te.reduce_axis((0, n), name="k1")
k2 = te.reduce_axis((0, n), name="k2")
A = te.placeholder((n, n, n), name="A")
B = te.compute((n,), lambda i: te.sum(A[i, k1, k2], axis=[k1, k2]))
# normal schedule
s = te.create_schedule(B.op)
BF = s.rfactor(B, k1)
assert tuple(BF.shape) == (n, n)
assert set(BF.op.body[0].axis) == set([k2])
assert s[B].op.body[0].axis[0].dom.extent == n
assert len(s[B].all_iter_vars) == 2
# schedule with splot
s = te.create_schedule(B.op)
ko, ki = s[B].split(k1, factor=4)
xo, xi = s[B].split(B.op.axis[0], factor=8)
BF = s.rfactor(B, ki)
assert BF.shape[0].value == 4
assert BF.shape[1] == n
assert BF.op.body[0].axis[0] == k2
assert BF.op.body[0].axis[1].var == ko.var
assert s[B].op.body[0].axis[0].dom.extent.value == 4
# schedule with factor_axis
s = te.create_schedule(B.op)
ko, ki = s[B].split(k1, factor=4)
xo, xi = s[B].split(B.op.axis[0], factor=8)
BF = s.rfactor(B, ki, 1)
assert n == BF.shape[0]
assert BF.shape[1].value == 4
assert BF.op.body[0].axis[0] == k2
assert BF.op.body[0].axis[1].var == ko.var
assert s[B].op.body[0].axis[0].dom.extent.value == 4
def test_tensor_intrin():
n = 16
x = te.placeholder((n,), name="x")
y = te.placeholder((n,), name="y")
z = te.compute(x.shape, lambda i: x[i] + y[i], name="z")
def intrin_func(ins, outs):
assert isinstance(ins[0], tvm.te.schedule.Buffer)
assert ins[0].shape[0].value == n
return tvm.tir.call_packed("vadd", ins[0].data, outs[0].data, ins[0].shape[0])
intrin = te.decl_tensor_intrin(z.op, intrin_func)
assert intrin.op == z.op
assert intrin.reduce_init is None
assert tuple(intrin.inputs) == tuple(z.op.input_tensors)
assert intrin.buffers[0].shape[0].value == n
m = 32
x = te.placeholder((m,), name="x")
y = te.placeholder((m,), name="y")
z = te.compute(x.shape, lambda i: x[i] + y[i], name="z")
s = te.create_schedule(z.op)
xo, xi = s[z].split(z.op.axis[0], factor=n)
s[z].tensorize(xi, intrin)
assert s[z].iter_var_attrs[xi].tensor_intrin == intrin
assert s[z].iter_var_attrs[xi].iter_type == tvm.te.schedule.IterVar.Tensorized
def test_tensor_intrin_scalar_params():
n = te.size_var("n")
x = te.placeholder((n,), name="x")
v = te.size_var("v")
w = te.size_var("w")
z = te.compute((n,), lambda i: x[i] * v + w, name="z")
def intrin_func(ins, outs, sp):
assert isinstance(ins[0], tvm.te.schedule.Buffer)
assert ins[0].shape[0] == n
assert sp[0] == v
assert sp[1] == w
return tvm.tir.call_packed("hw_func", ins[0].data, outs[0].data, sp[0], sp[1])
intrin = te.decl_tensor_intrin(
z.op, intrin_func, scalar_params=[v, w], default_buffer_params={"offset_factor": 1}
)
assert intrin.op == z.op
assert intrin.reduce_init is None
assert tuple(intrin.inputs) == tuple(z.op.input_tensors)
assert intrin.buffers[0].shape[0] == n
assert tuple(intrin.scalar_params) == tuple((v, w))
A = te.placeholder((10, 10), name="A")
# Pass scalar inputs to the TensorIntrin, interleaved with tensor inputs
C = te.compute((10, 10), lambda i, j: intrin(i * i, A[i, j], i + j), name="C")
s = te.create_schedule(C.op)
stmt = tvm.lower(s, [A, C])["main"].body
assert isinstance(stmt.body.body, tvm.tir.Evaluate)
assert len(stmt.body.body.value.args) == 5
assert str(stmt.body.body.value.args[3]) == "(i: int32*i)"
assert str(stmt.body.body.value.args[4]) == "(i: int32 + j: int32)"
def test_legalize_invalid_attach():
A = te.compute((10, 10), lambda i, j: 1.0, name="A")
B = te.compute((10, 10), lambda i, j: A[i][j], name="B")
# Case 1: Split an axis which is the target of a compute_at
s = te.create_schedule([B.op])
s[A].compute_at(s[B], B.op.axis[1])
s[B].split(B.op.axis[1], 2)
stmt = tvm.lower(s, [A, B], simple_mode=True)["main"].body
assert isinstance(stmt.body.body, tvm.tir.stmt.For)
# Case 2: Fuse an axis which is the target of a compute_at
s = te.create_schedule([B.op])
s[A].compute_at(s[B], B.op.axis[1])
s[B].fuse(B.op.axis[0], B.op.axis[1])
stmt = tvm.lower(s, [A, B], simple_mode=True)["main"].body
assert isinstance(stmt, tvm.tir.stmt.For)
if __name__ == "__main__":
test_singleton()
test_pragma()
test_tensor_intrin()
test_tensor_intrin_scalar_params()
test_rfactor()
test_schedule_create()
test_reorder()
test_tile()
test_split()
test_fuse()
test_fuse_with_split()
test_fuse_with_out_of_order_axis()
test_fuse_with_out_of_order_axis_with_reorder()
test_vectorize()
test_vectorize_commreduce()
test_legalize_invalid_attach()
| apache-2.0 | -7,170,016,014,588,445,000 | 32.629412 | 91 | 0.595767 | false |
awkspace/ansible | lib/ansible/plugins/filter/json_query.py | 197 | 1857 | # (c) 2015, Filipe Niero Felisbino <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import AnsibleError, AnsibleFilterError
try:
import jmespath
HAS_LIB = True
except ImportError:
HAS_LIB = False
def json_query(data, expr):
'''Query data using jmespath query language ( http://jmespath.org ). Example:
- debug: msg="{{ instance | json_query(tagged_instances[*].block_device_mapping.*.volume_id') }}"
'''
if not HAS_LIB:
raise AnsibleError('You need to install "jmespath" prior to running '
'json_query filter')
try:
return jmespath.search(expr, data)
except jmespath.exceptions.JMESPathError as e:
raise AnsibleFilterError('JMESPathError in json_query filter plugin:\n%s' % e)
except Exception as e:
# For older jmespath, we can get ValueError and TypeError without much info.
raise AnsibleFilterError('Error in jmespath.search in json_query filter plugin:\n%s' % e)
class FilterModule(object):
''' Query filter '''
def filters(self):
return {
'json_query': json_query
}
| gpl-3.0 | -3,595,491,509,937,588,000 | 34.037736 | 101 | 0.693592 | false |
AkademieOlympia/sympy | sympy/physics/unitsystems/prefixes.py | 91 | 4190 | # -*- coding: utf-8 -*-
"""
Module defining unit prefixe class and some constants.
Constant dict for SI and binary prefixes are defined as PREFIXES and
BIN_PREFIXES.
"""
from sympy import sympify
class Prefix(object):
"""
This class represent prefixes, with their name, symbol and factor.
Prefixes are used to create derived units from a given unit. They should
always be encapsulated into units.
The factor is constructed from a base (default is 10) to some power, and
it gives the total multiple or fraction. For example the kilometer km
is constructed from the meter (factor 1) and the kilo (10 to the power 3,
i.e. 1000). The base can be changed to allow e.g. binary prefixes.
A prefix multiplied by something will always return the product of this
other object times the factor, except if the other object:
- is a prefix and they can be combined into a new prefix;
- defines multiplication with prefixes (which is the case for the Unit
class).
"""
def __init__(self, name, abbrev, exponent, base=sympify(10)):
self.name = name
self.abbrev = abbrev
self.factor = base**exponent
def __str__(self):
return self.name
__repr__ = __str__
def __mul__(self, other):
fact = self.factor * other.factor
if fact == 1:
return 1
elif isinstance(other, Prefix):
# simplify prefix
for p in PREFIXES:
if PREFIXES[p].factor == fact:
return PREFIXES[p]
return fact
return self.factor * other
def __div__(self, other):
fact = self.factor / other.factor
if fact == 1:
return 1
elif isinstance(other, Prefix):
for p in PREFIXES:
if PREFIXES[p].factor == fact:
return PREFIXES[p]
return fact
return self.factor / other
__truediv__ = __div__
def __rdiv__(self, other):
if other == 1:
for p in PREFIXES:
if PREFIXES[p].factor == 1 / self.factor:
return PREFIXES[p]
return other / self.factor
__rtruediv__ = __rdiv__
def prefix_unit(unit, prefixes):
"""
Return a list of all units formed by unit and the given prefixes.
You can use the predefined PREFIXES or BIN_PREFIXES, but you can also
pass as argument a subdict of them if you don't want all prefixed units.
>>> from sympy.physics.unitsystems.prefixes import (PREFIXES,
... prefix_unit)
>>> from sympy.physics.unitsystems.systems import mks
>>> m = mks["m"]
>>> pref = {"m": PREFIXES["m"], "c": PREFIXES["c"], "d": PREFIXES["d"]}
>>> prefix_unit(m, pref) #doctest: +SKIP
[cm, dm, mm]
"""
from sympy.physics.unitsystems.units import Unit
prefixed_units = []
for prefix in prefixes:
prefixed_units.append(Unit(unit, abbrev=unit.abbrev,
prefix=prefixes[prefix]))
return prefixed_units
# http://physics.nist.gov/cuu/Units/prefixes.html
PREFIXES = {
'Y': Prefix('yotta', 'Y', 24),
'Z': Prefix('zetta', 'Z', 21),
'E': Prefix('exa', 'E', 18),
'P': Prefix('peta', 'P', 15),
'T': Prefix('tera', 'T', 12),
'G': Prefix('giga', 'G', 9),
'M': Prefix('mega', 'M', 6),
'k': Prefix('kilo', 'k', 3),
'h': Prefix('hecto', 'h', 2),
'da': Prefix('deca', 'da', 1),
'd': Prefix('deci', 'd', -1),
'c': Prefix('centi', 'c', -2),
'm': Prefix('milli', 'm', -3),
'µ': Prefix('micro', 'µ', -6),
'n': Prefix('nano', 'n', -9),
'p': Prefix('pico', 'p', -12),
'f': Prefix('femto', 'f', -15),
'a': Prefix('atto', 'a', -18),
'z': Prefix('zepto', 'z', -21),
'y': Prefix('yocto', 'y', -24)
}
# http://physics.nist.gov/cuu/Units/binary.html
BIN_PREFIXES = {
'Ki': Prefix('kibi', 'Y', 10, 2),
'Mi': Prefix('mebi', 'Y', 20, 2),
'Gi': Prefix('gibi', 'Y', 30, 2),
'Ti': Prefix('tebi', 'Y', 40, 2),
'Pi': Prefix('pebi', 'Y', 50, 2),
'Ei': Prefix('exbi', 'Y', 60, 2)
}
| bsd-3-clause | -7,314,212,655,000,997,000 | 28.286713 | 79 | 0.549666 | false |
krisys/django | django/core/serializers/xml_serializer.py | 17 | 15681 | """
XML serializer.
"""
from __future__ import unicode_literals
from collections import OrderedDict
from xml.dom import pulldom
from xml.sax import handler
from xml.sax.expatreader import ExpatParser as _ExpatParser
from django.apps import apps
from django.conf import settings
from django.core.serializers import base
from django.db import DEFAULT_DB_ALIAS, models
from django.utils.encoding import smart_text
from django.utils.xmlutils import (
SimplerXMLGenerator, UnserializableContentError,
)
class Serializer(base.Serializer):
"""
Serializes a QuerySet to XML.
"""
def indent(self, level):
if self.options.get('indent') is not None:
self.xml.ignorableWhitespace('\n' + ' ' * self.options.get('indent') * level)
def start_serialization(self):
"""
Start serialization -- open the XML document and the root element.
"""
self.xml = SimplerXMLGenerator(self.stream, self.options.get("encoding", settings.DEFAULT_CHARSET))
self.xml.startDocument()
self.xml.startElement("django-objects", {"version": "1.0"})
def end_serialization(self):
"""
End serialization -- end the document.
"""
self.indent(0)
self.xml.endElement("django-objects")
self.xml.endDocument()
def start_object(self, obj):
"""
Called as each object is handled.
"""
if not hasattr(obj, "_meta"):
raise base.SerializationError("Non-model object (%s) encountered during serialization" % type(obj))
self.indent(1)
model = obj._meta.proxy_for_model if obj._deferred else obj.__class__
attrs = OrderedDict([("model", smart_text(model._meta))])
if not self.use_natural_primary_keys or not hasattr(obj, 'natural_key'):
obj_pk = obj._get_pk_val()
if obj_pk is not None:
attrs['pk'] = smart_text(obj_pk)
self.xml.startElement("object", attrs)
def end_object(self, obj):
"""
Called after handling all fields for an object.
"""
self.indent(1)
self.xml.endElement("object")
def handle_field(self, obj, field):
"""
Called to handle each field on an object (except for ForeignKeys and
ManyToManyFields)
"""
self.indent(2)
self.xml.startElement("field", OrderedDict([
("name", field.name),
("type", field.get_internal_type()),
]))
# Get a "string version" of the object's data.
if getattr(obj, field.name) is not None:
try:
self.xml.characters(field.value_to_string(obj))
except UnserializableContentError:
raise ValueError("%s.%s (pk:%s) contains unserializable characters" % (
obj.__class__.__name__, field.name, obj._get_pk_val()))
else:
self.xml.addQuickElement("None")
self.xml.endElement("field")
def handle_fk_field(self, obj, field):
"""
Called to handle a ForeignKey (we need to treat them slightly
differently from regular fields).
"""
self._start_relational_field(field)
related_att = getattr(obj, field.get_attname())
if related_att is not None:
if self.use_natural_foreign_keys and hasattr(field.remote_field.model, 'natural_key'):
related = getattr(obj, field.name)
# If related object has a natural key, use it
related = related.natural_key()
# Iterable natural keys are rolled out as subelements
for key_value in related:
self.xml.startElement("natural", {})
self.xml.characters(smart_text(key_value))
self.xml.endElement("natural")
else:
self.xml.characters(smart_text(related_att))
else:
self.xml.addQuickElement("None")
self.xml.endElement("field")
def handle_m2m_field(self, obj, field):
"""
Called to handle a ManyToManyField. Related objects are only
serialized as references to the object's PK (i.e. the related *data*
is not dumped, just the relation).
"""
if field.remote_field.through._meta.auto_created:
self._start_relational_field(field)
if self.use_natural_foreign_keys and hasattr(field.remote_field.model, 'natural_key'):
# If the objects in the m2m have a natural key, use it
def handle_m2m(value):
natural = value.natural_key()
# Iterable natural keys are rolled out as subelements
self.xml.startElement("object", {})
for key_value in natural:
self.xml.startElement("natural", {})
self.xml.characters(smart_text(key_value))
self.xml.endElement("natural")
self.xml.endElement("object")
else:
def handle_m2m(value):
self.xml.addQuickElement("object", attrs={
'pk': smart_text(value._get_pk_val())
})
for relobj in getattr(obj, field.name).iterator():
handle_m2m(relobj)
self.xml.endElement("field")
def _start_relational_field(self, field):
"""
Helper to output the <field> element for relational fields
"""
self.indent(2)
self.xml.startElement("field", OrderedDict([
("name", field.name),
("rel", field.remote_field.__class__.__name__),
("to", smart_text(field.remote_field.model._meta)),
]))
class Deserializer(base.Deserializer):
"""
Deserialize XML.
"""
def __init__(self, stream_or_string, **options):
super(Deserializer, self).__init__(stream_or_string, **options)
self.event_stream = pulldom.parse(self.stream, self._make_parser())
self.db = options.pop('using', DEFAULT_DB_ALIAS)
self.ignore = options.pop('ignorenonexistent', False)
def _make_parser(self):
"""Create a hardened XML parser (no custom/external entities)."""
return DefusedExpatParser()
def __next__(self):
for event, node in self.event_stream:
if event == "START_ELEMENT" and node.nodeName == "object":
self.event_stream.expandNode(node)
return self._handle_object(node)
raise StopIteration
def _handle_object(self, node):
"""
Convert an <object> node to a DeserializedObject.
"""
# Look up the model using the model loading mechanism. If this fails,
# bail.
Model = self._get_model_from_node(node, "model")
# Start building a data dictionary from the object.
data = {}
if node.hasAttribute('pk'):
data[Model._meta.pk.attname] = Model._meta.pk.to_python(
node.getAttribute('pk'))
# Also start building a dict of m2m data (this is saved as
# {m2m_accessor_attribute : [list_of_related_objects]})
m2m_data = {}
field_names = {f.name for f in Model._meta.get_fields()}
# Deserialize each field.
for field_node in node.getElementsByTagName("field"):
# If the field is missing the name attribute, bail (are you
# sensing a pattern here?)
field_name = field_node.getAttribute("name")
if not field_name:
raise base.DeserializationError("<field> node is missing the 'name' attribute")
# Get the field from the Model. This will raise a
# FieldDoesNotExist if, well, the field doesn't exist, which will
# be propagated correctly unless ignorenonexistent=True is used.
if self.ignore and field_name not in field_names:
continue
field = Model._meta.get_field(field_name)
# As is usually the case, relation fields get the special treatment.
if field.remote_field and isinstance(field.remote_field, models.ManyToManyRel):
m2m_data[field.name] = self._handle_m2m_field_node(field_node, field)
elif field.remote_field and isinstance(field.remote_field, models.ManyToOneRel):
data[field.attname] = self._handle_fk_field_node(field_node, field)
else:
if field_node.getElementsByTagName('None'):
value = None
else:
value = field.to_python(getInnerText(field_node).strip())
data[field.name] = value
obj = base.build_instance(Model, data, self.db)
# Return a DeserializedObject so that the m2m data has a place to live.
return base.DeserializedObject(obj, m2m_data)
def _handle_fk_field_node(self, node, field):
"""
Handle a <field> node for a ForeignKey
"""
# Check if there is a child node named 'None', returning None if so.
if node.getElementsByTagName('None'):
return None
else:
model = field.remote_field.model
if hasattr(model._default_manager, 'get_by_natural_key'):
keys = node.getElementsByTagName('natural')
if keys:
# If there are 'natural' subelements, it must be a natural key
field_value = [getInnerText(k).strip() for k in keys]
obj = model._default_manager.db_manager(self.db).get_by_natural_key(*field_value)
obj_pk = getattr(obj, field.remote_field.field_name)
# If this is a natural foreign key to an object that
# has a FK/O2O as the foreign key, use the FK value
if field.remote_field.model._meta.pk.remote_field:
obj_pk = obj_pk.pk
else:
# Otherwise, treat like a normal PK
field_value = getInnerText(node).strip()
obj_pk = model._meta.get_field(field.remote_field.field_name).to_python(field_value)
return obj_pk
else:
field_value = getInnerText(node).strip()
return model._meta.get_field(field.remote_field.field_name).to_python(field_value)
def _handle_m2m_field_node(self, node, field):
"""
Handle a <field> node for a ManyToManyField.
"""
model = field.remote_field.model
default_manager = model._default_manager
if hasattr(default_manager, 'get_by_natural_key'):
def m2m_convert(n):
keys = n.getElementsByTagName('natural')
if keys:
# If there are 'natural' subelements, it must be a natural key
field_value = [getInnerText(k).strip() for k in keys]
obj_pk = default_manager.db_manager(self.db).get_by_natural_key(*field_value).pk
else:
# Otherwise, treat like a normal PK value.
obj_pk = model._meta.pk.to_python(n.getAttribute('pk'))
return obj_pk
else:
def m2m_convert(n):
return model._meta.pk.to_python(n.getAttribute('pk'))
return [m2m_convert(c) for c in node.getElementsByTagName("object")]
def _get_model_from_node(self, node, attr):
"""
Helper to look up a model from a <object model=...> or a <field
rel=... to=...> node.
"""
model_identifier = node.getAttribute(attr)
if not model_identifier:
raise base.DeserializationError(
"<%s> node is missing the required '%s' attribute"
% (node.nodeName, attr))
try:
return apps.get_model(model_identifier)
except (LookupError, TypeError):
raise base.DeserializationError(
"<%s> node has invalid model identifier: '%s'"
% (node.nodeName, model_identifier))
def getInnerText(node):
"""
Get all the inner text of a DOM node (recursively).
"""
# inspired by http://mail.python.org/pipermail/xml-sig/2005-March/011022.html
inner_text = []
for child in node.childNodes:
if child.nodeType == child.TEXT_NODE or child.nodeType == child.CDATA_SECTION_NODE:
inner_text.append(child.data)
elif child.nodeType == child.ELEMENT_NODE:
inner_text.extend(getInnerText(child))
else:
pass
return "".join(inner_text)
# Below code based on Christian Heimes' defusedxml
class DefusedExpatParser(_ExpatParser):
"""
An expat parser hardened against XML bomb attacks.
Forbids DTDs, external entity references
"""
def __init__(self, *args, **kwargs):
_ExpatParser.__init__(self, *args, **kwargs)
self.setFeature(handler.feature_external_ges, False)
self.setFeature(handler.feature_external_pes, False)
def start_doctype_decl(self, name, sysid, pubid, has_internal_subset):
raise DTDForbidden(name, sysid, pubid)
def entity_decl(self, name, is_parameter_entity, value, base,
sysid, pubid, notation_name):
raise EntitiesForbidden(name, value, base, sysid, pubid, notation_name)
def unparsed_entity_decl(self, name, base, sysid, pubid, notation_name):
# expat 1.2
raise EntitiesForbidden(name, None, base, sysid, pubid, notation_name)
def external_entity_ref_handler(self, context, base, sysid, pubid):
raise ExternalReferenceForbidden(context, base, sysid, pubid)
def reset(self):
_ExpatParser.reset(self)
parser = self._parser
parser.StartDoctypeDeclHandler = self.start_doctype_decl
parser.EntityDeclHandler = self.entity_decl
parser.UnparsedEntityDeclHandler = self.unparsed_entity_decl
parser.ExternalEntityRefHandler = self.external_entity_ref_handler
class DefusedXmlException(ValueError):
"""Base exception."""
def __repr__(self):
return str(self)
class DTDForbidden(DefusedXmlException):
"""Document type definition is forbidden."""
def __init__(self, name, sysid, pubid):
super(DTDForbidden, self).__init__()
self.name = name
self.sysid = sysid
self.pubid = pubid
def __str__(self):
tpl = "DTDForbidden(name='{}', system_id={!r}, public_id={!r})"
return tpl.format(self.name, self.sysid, self.pubid)
class EntitiesForbidden(DefusedXmlException):
"""Entity definition is forbidden."""
def __init__(self, name, value, base, sysid, pubid, notation_name):
super(EntitiesForbidden, self).__init__()
self.name = name
self.value = value
self.base = base
self.sysid = sysid
self.pubid = pubid
self.notation_name = notation_name
def __str__(self):
tpl = "EntitiesForbidden(name='{}', system_id={!r}, public_id={!r})"
return tpl.format(self.name, self.sysid, self.pubid)
class ExternalReferenceForbidden(DefusedXmlException):
"""Resolving an external reference is forbidden."""
def __init__(self, context, base, sysid, pubid):
super(ExternalReferenceForbidden, self).__init__()
self.context = context
self.base = base
self.sysid = sysid
self.pubid = pubid
def __str__(self):
tpl = "ExternalReferenceForbidden(system_id='{}', public_id={})"
return tpl.format(self.sysid, self.pubid)
| bsd-3-clause | -3,009,759,731,297,462,300 | 38.300752 | 111 | 0.588866 | false |
trivoldus28/pulsarch-verilog | tools/local/bas-release/bas,3.9/lib/python/lib/python2.3/hotshot/stats.py | 252 | 2582 | """Statistics analyzer for HotShot."""
import profile
import pstats
import hotshot.log
from hotshot.log import ENTER, EXIT
def load(filename):
return StatsLoader(filename).load()
class StatsLoader:
def __init__(self, logfn):
self._logfn = logfn
self._code = {}
self._stack = []
self.pop_frame = self._stack.pop
def load(self):
# The timer selected by the profiler should never be used, so make
# sure it doesn't work:
p = Profile()
p.get_time = _brokentimer
log = hotshot.log.LogReader(self._logfn)
taccum = 0
for event in log:
what, (filename, lineno, funcname), tdelta = event
if tdelta > 0:
taccum += tdelta
# We multiply taccum to convert from the microseconds we
# have to the seconds that the profile/pstats module work
# with; this allows the numbers to have some basis in
# reality (ignoring calibration issues for now).
if what == ENTER:
frame = self.new_frame(filename, lineno, funcname)
p.trace_dispatch_call(frame, taccum * .000001)
taccum = 0
elif what == EXIT:
frame = self.pop_frame()
p.trace_dispatch_return(frame, taccum * .000001)
taccum = 0
# no further work for line events
assert not self._stack
return pstats.Stats(p)
def new_frame(self, *args):
# args must be filename, firstlineno, funcname
# our code objects are cached since we don't need to create
# new ones every time
try:
code = self._code[args]
except KeyError:
code = FakeCode(*args)
self._code[args] = code
# frame objects are create fresh, since the back pointer will
# vary considerably
if self._stack:
back = self._stack[-1]
else:
back = None
frame = FakeFrame(code, back)
self._stack.append(frame)
return frame
class Profile(profile.Profile):
def simulate_cmd_complete(self):
pass
class FakeCode:
def __init__(self, filename, firstlineno, funcname):
self.co_filename = filename
self.co_firstlineno = firstlineno
self.co_name = self.__name__ = funcname
class FakeFrame:
def __init__(self, code, back):
self.f_back = back
self.f_code = code
def _brokentimer():
raise RuntimeError, "this timer should not be called"
| gpl-2.0 | 2,576,195,772,669,107,700 | 26.763441 | 74 | 0.572812 | false |
ZhangXinNan/tensorflow | tensorflow/python/kernel_tests/distributions/uniform_test.py | 11 | 11353 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Uniform distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import numpy as np
from tensorflow.python.eager import backprop
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import uniform as uniform_lib
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def try_import(name): # pylint: disable=invalid-name
module = None
try:
module = importlib.import_module(name)
except ImportError as e:
tf_logging.warning("Could not import %s: %s" % (name, str(e)))
return module
stats = try_import("scipy.stats")
class UniformTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testUniformRange(self):
with self.test_session():
a = 3.0
b = 10.0
uniform = uniform_lib.Uniform(low=a, high=b)
self.assertAllClose(a, self.evaluate(uniform.low))
self.assertAllClose(b, self.evaluate(uniform.high))
self.assertAllClose(b - a, self.evaluate(uniform.range()))
@test_util.run_in_graph_and_eager_modes
def testUniformPDF(self):
with self.test_session():
a = constant_op.constant([-3.0] * 5 + [15.0])
b = constant_op.constant([11.0] * 5 + [20.0])
uniform = uniform_lib.Uniform(low=a, high=b)
a_v = -3.0
b_v = 11.0
x = np.array([-10.5, 4.0, 0.0, 10.99, 11.3, 17.0], dtype=np.float32)
def _expected_pdf():
pdf = np.zeros_like(x) + 1.0 / (b_v - a_v)
pdf[x > b_v] = 0.0
pdf[x < a_v] = 0.0
pdf[5] = 1.0 / (20.0 - 15.0)
return pdf
expected_pdf = _expected_pdf()
pdf = uniform.prob(x)
self.assertAllClose(expected_pdf, self.evaluate(pdf))
log_pdf = uniform.log_prob(x)
self.assertAllClose(np.log(expected_pdf), self.evaluate(log_pdf))
@test_util.run_in_graph_and_eager_modes
def testUniformShape(self):
with self.test_session():
a = constant_op.constant([-3.0] * 5)
b = constant_op.constant(11.0)
uniform = uniform_lib.Uniform(low=a, high=b)
self.assertEqual(self.evaluate(uniform.batch_shape_tensor()), (5,))
self.assertEqual(uniform.batch_shape, tensor_shape.TensorShape([5]))
self.assertAllEqual(self.evaluate(uniform.event_shape_tensor()), [])
self.assertEqual(uniform.event_shape, tensor_shape.TensorShape([]))
@test_util.run_in_graph_and_eager_modes
def testUniformPDFWithScalarEndpoint(self):
with self.test_session():
a = constant_op.constant([0.0, 5.0])
b = constant_op.constant(10.0)
uniform = uniform_lib.Uniform(low=a, high=b)
x = np.array([0.0, 8.0], dtype=np.float32)
expected_pdf = np.array([1.0 / (10.0 - 0.0), 1.0 / (10.0 - 5.0)])
pdf = uniform.prob(x)
self.assertAllClose(expected_pdf, self.evaluate(pdf))
@test_util.run_in_graph_and_eager_modes
def testUniformCDF(self):
with self.test_session():
batch_size = 6
a = constant_op.constant([1.0] * batch_size)
b = constant_op.constant([11.0] * batch_size)
a_v = 1.0
b_v = 11.0
x = np.array([-2.5, 2.5, 4.0, 0.0, 10.99, 12.0], dtype=np.float32)
uniform = uniform_lib.Uniform(low=a, high=b)
def _expected_cdf():
cdf = (x - a_v) / (b_v - a_v)
cdf[x >= b_v] = 1
cdf[x < a_v] = 0
return cdf
cdf = uniform.cdf(x)
self.assertAllClose(_expected_cdf(), self.evaluate(cdf))
log_cdf = uniform.log_cdf(x)
self.assertAllClose(np.log(_expected_cdf()), self.evaluate(log_cdf))
@test_util.run_in_graph_and_eager_modes
def testUniformEntropy(self):
with self.test_session():
a_v = np.array([1.0, 1.0, 1.0])
b_v = np.array([[1.5, 2.0, 3.0]])
uniform = uniform_lib.Uniform(low=a_v, high=b_v)
expected_entropy = np.log(b_v - a_v)
self.assertAllClose(expected_entropy, self.evaluate(uniform.entropy()))
@test_util.run_in_graph_and_eager_modes
def testUniformAssertMaxGtMin(self):
with self.test_session():
a_v = np.array([1.0, 1.0, 1.0], dtype=np.float32)
b_v = np.array([1.0, 2.0, 3.0], dtype=np.float32)
with self.assertRaisesWithPredicateMatch(errors.InvalidArgumentError,
"x < y"):
uniform = uniform_lib.Uniform(low=a_v, high=b_v, validate_args=True)
self.evaluate(uniform.low)
@test_util.run_in_graph_and_eager_modes
def testUniformSample(self):
with self.test_session():
a = constant_op.constant([3.0, 4.0])
b = constant_op.constant(13.0)
a1_v = 3.0
a2_v = 4.0
b_v = 13.0
n = constant_op.constant(100000)
uniform = uniform_lib.Uniform(low=a, high=b)
samples = uniform.sample(n, seed=137)
sample_values = self.evaluate(samples)
self.assertEqual(sample_values.shape, (100000, 2))
self.assertAllClose(
sample_values[::, 0].mean(), (b_v + a1_v) / 2, atol=1e-1, rtol=0.)
self.assertAllClose(
sample_values[::, 1].mean(), (b_v + a2_v) / 2, atol=1e-1, rtol=0.)
self.assertFalse(
np.any(sample_values[::, 0] < a1_v) or np.any(sample_values >= b_v))
self.assertFalse(
np.any(sample_values[::, 1] < a2_v) or np.any(sample_values >= b_v))
@test_util.run_in_graph_and_eager_modes
def _testUniformSampleMultiDimensional(self):
# DISABLED: Please enable this test once b/issues/30149644 is resolved.
with self.test_session():
batch_size = 2
a_v = [3.0, 22.0]
b_v = [13.0, 35.0]
a = constant_op.constant([a_v] * batch_size)
b = constant_op.constant([b_v] * batch_size)
uniform = uniform_lib.Uniform(low=a, high=b)
n_v = 100000
n = constant_op.constant(n_v)
samples = uniform.sample(n)
self.assertEqual(samples.get_shape(), (n_v, batch_size, 2))
sample_values = self.evaluate(samples)
self.assertFalse(
np.any(sample_values[:, 0, 0] < a_v[0]) or
np.any(sample_values[:, 0, 0] >= b_v[0]))
self.assertFalse(
np.any(sample_values[:, 0, 1] < a_v[1]) or
np.any(sample_values[:, 0, 1] >= b_v[1]))
self.assertAllClose(
sample_values[:, 0, 0].mean(), (a_v[0] + b_v[0]) / 2, atol=1e-2)
self.assertAllClose(
sample_values[:, 0, 1].mean(), (a_v[1] + b_v[1]) / 2, atol=1e-2)
@test_util.run_in_graph_and_eager_modes
def testUniformMean(self):
with self.test_session():
a = 10.0
b = 100.0
uniform = uniform_lib.Uniform(low=a, high=b)
if not stats:
return
s_uniform = stats.uniform(loc=a, scale=b - a)
self.assertAllClose(self.evaluate(uniform.mean()), s_uniform.mean())
@test_util.run_in_graph_and_eager_modes
def testUniformVariance(self):
with self.test_session():
a = 10.0
b = 100.0
uniform = uniform_lib.Uniform(low=a, high=b)
if not stats:
return
s_uniform = stats.uniform(loc=a, scale=b - a)
self.assertAllClose(self.evaluate(uniform.variance()), s_uniform.var())
@test_util.run_in_graph_and_eager_modes
def testUniformStd(self):
with self.test_session():
a = 10.0
b = 100.0
uniform = uniform_lib.Uniform(low=a, high=b)
if not stats:
return
s_uniform = stats.uniform(loc=a, scale=b - a)
self.assertAllClose(self.evaluate(uniform.stddev()), s_uniform.std())
@test_util.run_in_graph_and_eager_modes
def testUniformNans(self):
with self.test_session():
a = 10.0
b = [11.0, 100.0]
uniform = uniform_lib.Uniform(low=a, high=b)
no_nans = constant_op.constant(1.0)
nans = constant_op.constant(0.0) / constant_op.constant(0.0)
self.assertTrue(self.evaluate(math_ops.is_nan(nans)))
with_nans = array_ops.stack([no_nans, nans])
pdf = uniform.prob(with_nans)
is_nan = self.evaluate(math_ops.is_nan(pdf))
self.assertFalse(is_nan[0])
self.assertTrue(is_nan[1])
@test_util.run_in_graph_and_eager_modes
def testUniformSamplePdf(self):
with self.test_session():
a = 10.0
b = [11.0, 100.0]
uniform = uniform_lib.Uniform(a, b)
self.assertTrue(
self.evaluate(
math_ops.reduce_all(uniform.prob(uniform.sample(10)) > 0)))
@test_util.run_in_graph_and_eager_modes
def testUniformBroadcasting(self):
with self.test_session():
a = 10.0
b = [11.0, 20.0]
uniform = uniform_lib.Uniform(a, b)
pdf = uniform.prob([[10.5, 11.5], [9.0, 19.0], [10.5, 21.0]])
expected_pdf = np.array([[1.0, 0.1], [0.0, 0.1], [1.0, 0.0]])
self.assertAllClose(expected_pdf, self.evaluate(pdf))
@test_util.run_in_graph_and_eager_modes
def testUniformSampleWithShape(self):
with self.test_session():
a = 10.0
b = [11.0, 20.0]
uniform = uniform_lib.Uniform(a, b)
pdf = uniform.prob(uniform.sample((2, 3)))
# pylint: disable=bad-continuation
expected_pdf = [
[[1.0, 0.1], [1.0, 0.1], [1.0, 0.1]],
[[1.0, 0.1], [1.0, 0.1], [1.0, 0.1]],
]
# pylint: enable=bad-continuation
self.assertAllClose(expected_pdf, self.evaluate(pdf))
pdf = uniform.prob(uniform.sample())
expected_pdf = [1.0, 0.1]
self.assertAllClose(expected_pdf, self.evaluate(pdf))
def testFullyReparameterized(self):
a = constant_op.constant(0.1)
b = constant_op.constant(0.8)
with backprop.GradientTape() as tape:
tape.watch(a)
tape.watch(b)
uniform = uniform_lib.Uniform(a, b)
samples = uniform.sample(100)
grad_a, grad_b = tape.gradient(samples, [a, b])
self.assertIsNotNone(grad_a)
self.assertIsNotNone(grad_b)
# Eager doesn't pass due to a type mismatch in one of the ops.
def testUniformFloat64(self):
uniform = uniform_lib.Uniform(
low=np.float64(0.), high=np.float64(1.))
self.assertAllClose(
[1., 1.],
self.evaluate(uniform.prob(np.array([0.5, 0.6], dtype=np.float64))))
self.assertAllClose(
[0.5, 0.6],
self.evaluate(uniform.cdf(np.array([0.5, 0.6], dtype=np.float64))))
self.assertAllClose(0.5, self.evaluate(uniform.mean()))
self.assertAllClose(1 / 12., self.evaluate(uniform.variance()))
self.assertAllClose(0., self.evaluate(uniform.entropy()))
if __name__ == "__main__":
test.main()
| apache-2.0 | -9,176,198,563,497,223,000 | 32.991018 | 80 | 0.618515 | false |
dturner-tw/pants | tests/python/pants_test/android/tasks/test_zipalign.py | 16 | 2041 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from pants.backend.android.tasks.zipalign import Zipalign
from pants_test.android.test_android_base import TestAndroidBase, distribution
class TestZipalign(TestAndroidBase):
"""Test class for the Zipalign task."""
@classmethod
def task_type(cls):
return Zipalign
def test_zipalign_smoke(self):
task = self.create_task(self.context())
task.execute()
def test_zipalign_binary(self):
with distribution() as dist:
with self.android_binary() as android_binary:
self.set_options(sdk_path=dist)
task = self.create_task(self.context())
target = android_binary
self.assertEqual(task.zipalign_binary(target),
os.path.join(dist, 'build-tools', target.build_tools_version, 'zipalign'))
def test_zipalign_out(self):
with distribution() as dist:
with self.android_binary() as android_binary:
self.set_options(sdk_path=dist)
task = self.create_task(self.context())
target = android_binary
self.assertEqual(task.zipalign_out(target), os.path.join(task._distdir, target.name))
def test_render_args(self):
with distribution() as dist:
with self.android_binary() as android_binary:
self.set_options(sdk_path=dist)
task = self.create_task(self.context())
target = android_binary
expected_args = [os.path.join(dist, 'build-tools', target.build_tools_version, 'zipalign'),
'-f', '4', 'package/path',
os.path.join(task._distdir, target.name,
'{0}.signed.apk'.format(target.manifest.package_name))]
self.assertEqual(task._render_args('package/path', target), expected_args)
| apache-2.0 | 1,583,761,434,071,562,200 | 38.25 | 99 | 0.653111 | false |
ghyde/letsencrypt | letsencrypt/plugins/disco_test.py | 1 | 9977 | """Tests for letsencrypt.plugins.disco."""
import pkg_resources
import unittest
import mock
import zope.interface
from letsencrypt import errors
from letsencrypt import interfaces
from letsencrypt.plugins import standalone
EP_SA = pkg_resources.EntryPoint(
"sa", "letsencrypt.plugins.standalone",
attrs=("Authenticator",),
dist=mock.MagicMock(key="letsencrypt"))
class PluginEntryPointTest(unittest.TestCase):
"""Tests for letsencrypt.plugins.disco.PluginEntryPoint."""
def setUp(self):
self.ep1 = pkg_resources.EntryPoint(
"ep1", "p1.ep1", dist=mock.MagicMock(key="p1"))
self.ep1prim = pkg_resources.EntryPoint(
"ep1", "p2.ep2", dist=mock.MagicMock(key="p2"))
# nested
self.ep2 = pkg_resources.EntryPoint(
"ep2", "p2.foo.ep2", dist=mock.MagicMock(key="p2"))
# project name != top-level package name
self.ep3 = pkg_resources.EntryPoint(
"ep3", "a.ep3", dist=mock.MagicMock(key="p3"))
from letsencrypt.plugins.disco import PluginEntryPoint
self.plugin_ep = PluginEntryPoint(EP_SA)
def test_entry_point_to_plugin_name(self):
from letsencrypt.plugins.disco import PluginEntryPoint
names = {
self.ep1: "p1:ep1",
self.ep1prim: "p2:ep1",
self.ep2: "p2:ep2",
self.ep3: "p3:ep3",
EP_SA: "sa",
}
for entry_point, name in names.iteritems():
self.assertEqual(
name, PluginEntryPoint.entry_point_to_plugin_name(entry_point))
def test_description(self):
self.assertEqual("Standalone Authenticator", self.plugin_ep.description)
def test_description_with_name(self):
self.plugin_ep.plugin_cls = mock.MagicMock(description="Desc")
self.assertEqual(
"Desc (sa)", self.plugin_ep.description_with_name)
def test_ifaces(self):
self.assertTrue(self.plugin_ep.ifaces((interfaces.IAuthenticator,)))
self.assertFalse(self.plugin_ep.ifaces((interfaces.IInstaller,)))
self.assertFalse(self.plugin_ep.ifaces((
interfaces.IInstaller, interfaces.IAuthenticator)))
def test__init__(self):
self.assertFalse(self.plugin_ep.initialized)
self.assertFalse(self.plugin_ep.prepared)
self.assertFalse(self.plugin_ep.misconfigured)
self.assertFalse(self.plugin_ep.available)
self.assertTrue(self.plugin_ep.problem is None)
self.assertTrue(self.plugin_ep.entry_point is EP_SA)
self.assertEqual("sa", self.plugin_ep.name)
self.assertTrue(self.plugin_ep.plugin_cls is standalone.Authenticator)
def test_init(self):
config = mock.MagicMock()
plugin = self.plugin_ep.init(config=config)
self.assertTrue(self.plugin_ep.initialized)
self.assertTrue(plugin.config is config)
# memoize!
self.assertTrue(self.plugin_ep.init() is plugin)
self.assertTrue(plugin.config is config)
# try to give different config
self.assertTrue(self.plugin_ep.init(123) is plugin)
self.assertTrue(plugin.config is config)
self.assertFalse(self.plugin_ep.prepared)
self.assertFalse(self.plugin_ep.misconfigured)
self.assertFalse(self.plugin_ep.available)
def test_verify(self):
iface1 = mock.MagicMock(__name__="iface1")
iface2 = mock.MagicMock(__name__="iface2")
iface3 = mock.MagicMock(__name__="iface3")
# pylint: disable=protected-access
self.plugin_ep._initialized = plugin = mock.MagicMock()
exceptions = zope.interface.exceptions
with mock.patch("letsencrypt.plugins."
"disco.zope.interface") as mock_zope:
mock_zope.exceptions = exceptions
def verify_object(iface, obj): # pylint: disable=missing-docstring
assert obj is plugin
assert iface is iface1 or iface is iface2 or iface is iface3
if iface is iface3:
raise mock_zope.exceptions.BrokenImplementation(None, None)
mock_zope.verify.verifyObject.side_effect = verify_object
self.assertTrue(self.plugin_ep.verify((iface1,)))
self.assertTrue(self.plugin_ep.verify((iface1, iface2)))
self.assertFalse(self.plugin_ep.verify((iface3,)))
self.assertFalse(self.plugin_ep.verify((iface1, iface3)))
def test_prepare(self):
config = mock.MagicMock()
self.plugin_ep.init(config=config)
self.plugin_ep.prepare()
self.assertTrue(self.plugin_ep.prepared)
self.assertFalse(self.plugin_ep.misconfigured)
# output doesn't matter that much, just test if it runs
str(self.plugin_ep)
def test_prepare_misconfigured(self):
plugin = mock.MagicMock()
plugin.prepare.side_effect = errors.MisconfigurationError
# pylint: disable=protected-access
self.plugin_ep._initialized = plugin
self.assertTrue(isinstance(self.plugin_ep.prepare(),
errors.MisconfigurationError))
self.assertTrue(self.plugin_ep.prepared)
self.assertTrue(self.plugin_ep.misconfigured)
self.assertTrue(isinstance(self.plugin_ep.problem,
errors.MisconfigurationError))
self.assertTrue(self.plugin_ep.available)
def test_prepare_no_installation(self):
plugin = mock.MagicMock()
plugin.prepare.side_effect = errors.NoInstallationError
# pylint: disable=protected-access
self.plugin_ep._initialized = plugin
self.assertTrue(isinstance(self.plugin_ep.prepare(),
errors.NoInstallationError))
self.assertTrue(self.plugin_ep.prepared)
self.assertFalse(self.plugin_ep.misconfigured)
self.assertFalse(self.plugin_ep.available)
def test_prepare_generic_plugin_error(self):
plugin = mock.MagicMock()
plugin.prepare.side_effect = errors.PluginError
# pylint: disable=protected-access
self.plugin_ep._initialized = plugin
self.assertTrue(isinstance(self.plugin_ep.prepare(), errors.PluginError))
self.assertTrue(self.plugin_ep.prepared)
self.assertFalse(self.plugin_ep.misconfigured)
self.assertFalse(self.plugin_ep.available)
def test_repr(self):
self.assertEqual("PluginEntryPoint#sa", repr(self.plugin_ep))
class PluginsRegistryTest(unittest.TestCase):
"""Tests for letsencrypt.plugins.disco.PluginsRegistry."""
def setUp(self):
from letsencrypt.plugins.disco import PluginsRegistry
self.plugin_ep = mock.MagicMock(name="mock")
self.plugin_ep.__hash__.side_effect = TypeError
self.plugins = {"mock": self.plugin_ep}
self.reg = PluginsRegistry(self.plugins)
def test_find_all(self):
from letsencrypt.plugins.disco import PluginsRegistry
with mock.patch("letsencrypt.plugins.disco.pkg_resources") as mock_pkg:
mock_pkg.iter_entry_points.return_value = iter([EP_SA])
plugins = PluginsRegistry.find_all()
self.assertTrue(plugins["sa"].plugin_cls is standalone.Authenticator)
self.assertTrue(plugins["sa"].entry_point is EP_SA)
def test_getitem(self):
self.assertEqual(self.plugin_ep, self.reg["mock"])
def test_iter(self):
self.assertEqual(["mock"], list(self.reg))
def test_len(self):
self.assertEqual(1, len(self.reg))
self.plugins.clear()
self.assertEqual(0, len(self.reg))
def test_init(self):
self.plugin_ep.init.return_value = "baz"
self.assertEqual(["baz"], self.reg.init("bar"))
self.plugin_ep.init.assert_called_once_with("bar")
def test_filter(self):
self.plugins.update({
"foo": "bar",
"bar": "foo",
"baz": "boo",
})
self.assertEqual(
{"foo": "bar", "baz": "boo"},
self.reg.filter(lambda p_ep: str(p_ep).startswith("b")))
def test_ifaces(self):
self.plugin_ep.ifaces.return_value = True
# pylint: disable=protected-access
self.assertEqual(self.plugins, self.reg.ifaces()._plugins)
self.plugin_ep.ifaces.return_value = False
self.assertEqual({}, self.reg.ifaces()._plugins)
def test_verify(self):
self.plugin_ep.verify.return_value = True
# pylint: disable=protected-access
self.assertEqual(
self.plugins, self.reg.verify(mock.MagicMock())._plugins)
self.plugin_ep.verify.return_value = False
self.assertEqual({}, self.reg.verify(mock.MagicMock())._plugins)
def test_prepare(self):
self.plugin_ep.prepare.return_value = "baz"
self.assertEqual(["baz"], self.reg.prepare())
self.plugin_ep.prepare.assert_called_once_with()
def test_available(self):
self.plugin_ep.available = True
# pylint: disable=protected-access
self.assertEqual(self.plugins, self.reg.available()._plugins)
self.plugin_ep.available = False
self.assertEqual({}, self.reg.available()._plugins)
def test_find_init(self):
self.assertTrue(self.reg.find_init(mock.Mock()) is None)
self.plugin_ep.initalized = True
self.assertTrue(
self.reg.find_init(self.plugin_ep.init()) is self.plugin_ep)
def test_repr(self):
self.plugin_ep.__repr__ = lambda _: "PluginEntryPoint#mock"
self.assertEqual("PluginsRegistry(PluginEntryPoint#mock)",
repr(self.reg))
def test_str(self):
self.plugin_ep.__str__ = lambda _: "Mock"
self.plugins["foo"] = "Mock"
self.assertEqual("Mock\n\nMock", str(self.reg))
self.plugins.clear()
self.assertEqual("No plugins", str(self.reg))
if __name__ == "__main__":
unittest.main() # pragma: no cover
| apache-2.0 | 3,893,390,962,680,766,500 | 38.12549 | 81 | 0.636865 | false |
yujikato/DIRAC | src/DIRAC/Interfaces/scripts/dirac_admin_get_pilot_output.py | 2 | 1395 | #!/usr/bin/env python
########################################################################
# File : dirac-admin-get-pilot-output
# Author : Stuart Paterson
########################################################################
"""
Retrieve output of a Grid pilot
Usage:
dirac-admin-get-pilot-output [options] ... PilotID ...
Arguments:
PilotID: Grid ID of the pilot
Example:
$ dirac-admin-get-pilot-output https://marlb.in2p3.fr:9000/26KCLKBFtxXKHF4_ZrQjkw
$ ls -la
drwxr-xr-x 2 hamar marseill 2048 Feb 21 14:13 pilot_26KCLKBFtxXKHF4_ZrQjkw
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
__RCSID__ = "$Id$"
from DIRAC.Core.Base import Script
from DIRAC.Core.Utilities.DIRACScript import DIRACScript
@DIRACScript()
def main():
Script.parseCommandLine(ignoreErrors=True)
args = Script.getPositionalArgs()
if len(args) < 1:
Script.showHelp()
from DIRAC import exit as DIRACExit
from DIRAC.Interfaces.API.DiracAdmin import DiracAdmin
diracAdmin = DiracAdmin()
exitCode = 0
errorList = []
for gridID in args:
result = diracAdmin.getPilotOutput(gridID)
if not result['OK']:
errorList.append((gridID, result['Message']))
exitCode = 2
for error in errorList:
print("ERROR %s: %s" % error)
DIRACExit(exitCode)
if __name__ == "__main__":
main()
| gpl-3.0 | -7,738,591,539,812,243,000 | 23.051724 | 83 | 0.622222 | false |
balloob/home-assistant | homeassistant/components/deconz/sensor.py | 5 | 8316 | """Support for deCONZ sensors."""
from pydeconz.sensor import (
Battery,
Consumption,
Daylight,
Humidity,
LightLevel,
Power,
Pressure,
Switch,
Temperature,
Thermostat,
)
from homeassistant.components.sensor import DOMAIN
from homeassistant.const import (
ATTR_TEMPERATURE,
ATTR_VOLTAGE,
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_ILLUMINANCE,
DEVICE_CLASS_POWER,
DEVICE_CLASS_PRESSURE,
DEVICE_CLASS_TEMPERATURE,
ENERGY_KILO_WATT_HOUR,
LIGHT_LUX,
PERCENTAGE,
POWER_WATT,
PRESSURE_HPA,
TEMP_CELSIUS,
)
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from .const import ATTR_DARK, ATTR_ON, NEW_SENSOR
from .deconz_device import DeconzDevice
from .gateway import get_gateway_from_config_entry
ATTR_CURRENT = "current"
ATTR_POWER = "power"
ATTR_DAYLIGHT = "daylight"
ATTR_EVENT_ID = "event_id"
DEVICE_CLASS = {
Humidity: DEVICE_CLASS_HUMIDITY,
LightLevel: DEVICE_CLASS_ILLUMINANCE,
Power: DEVICE_CLASS_POWER,
Pressure: DEVICE_CLASS_PRESSURE,
Temperature: DEVICE_CLASS_TEMPERATURE,
}
ICON = {
Daylight: "mdi:white-balance-sunny",
Pressure: "mdi:gauge",
Temperature: "mdi:thermometer",
}
UNIT_OF_MEASUREMENT = {
Consumption: ENERGY_KILO_WATT_HOUR,
Humidity: PERCENTAGE,
LightLevel: LIGHT_LUX,
Power: POWER_WATT,
Pressure: PRESSURE_HPA,
Temperature: TEMP_CELSIUS,
}
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the deCONZ sensors."""
gateway = get_gateway_from_config_entry(hass, config_entry)
gateway.entities[DOMAIN] = set()
battery_handler = DeconzBatteryHandler(gateway)
@callback
def async_add_sensor(sensors):
"""Add sensors from deCONZ.
Create DeconzBattery if sensor has a battery attribute.
Create DeconzSensor if not a battery, switch or thermostat and not a binary sensor.
"""
entities = []
for sensor in sensors:
if not gateway.option_allow_clip_sensor and sensor.type.startswith("CLIP"):
continue
if sensor.battery is not None:
battery_handler.remove_tracker(sensor)
known_batteries = set(gateway.entities[DOMAIN])
new_battery = DeconzBattery(sensor, gateway)
if new_battery.unique_id not in known_batteries:
entities.append(new_battery)
else:
battery_handler.create_tracker(sensor)
if (
not sensor.BINARY
and sensor.type
not in Battery.ZHATYPE + Switch.ZHATYPE + Thermostat.ZHATYPE
and sensor.uniqueid not in gateway.entities[DOMAIN]
):
entities.append(DeconzSensor(sensor, gateway))
if entities:
async_add_entities(entities)
gateway.listeners.append(
async_dispatcher_connect(
hass, gateway.async_signal_new_device(NEW_SENSOR), async_add_sensor
)
)
async_add_sensor(
[gateway.api.sensors[key] for key in sorted(gateway.api.sensors, key=int)]
)
class DeconzSensor(DeconzDevice):
"""Representation of a deCONZ sensor."""
TYPE = DOMAIN
@callback
def async_update_callback(self, force_update=False):
"""Update the sensor's state."""
keys = {"on", "reachable", "state"}
if force_update or self._device.changed_keys.intersection(keys):
super().async_update_callback(force_update=force_update)
@property
def state(self):
"""Return the state of the sensor."""
return self._device.state
@property
def device_class(self):
"""Return the class of the sensor."""
return DEVICE_CLASS.get(type(self._device))
@property
def icon(self):
"""Return the icon to use in the frontend."""
return ICON.get(type(self._device))
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this sensor."""
return UNIT_OF_MEASUREMENT.get(type(self._device))
@property
def device_state_attributes(self):
"""Return the state attributes of the sensor."""
attr = {}
if self._device.on is not None:
attr[ATTR_ON] = self._device.on
if self._device.secondary_temperature is not None:
attr[ATTR_TEMPERATURE] = self._device.secondary_temperature
if self._device.type in Consumption.ZHATYPE:
attr[ATTR_POWER] = self._device.power
elif self._device.type in Daylight.ZHATYPE:
attr[ATTR_DAYLIGHT] = self._device.daylight
elif self._device.type in LightLevel.ZHATYPE:
if self._device.dark is not None:
attr[ATTR_DARK] = self._device.dark
if self._device.daylight is not None:
attr[ATTR_DAYLIGHT] = self._device.daylight
elif self._device.type in Power.ZHATYPE:
attr[ATTR_CURRENT] = self._device.current
attr[ATTR_VOLTAGE] = self._device.voltage
return attr
class DeconzBattery(DeconzDevice):
"""Battery class for when a device is only represented as an event."""
TYPE = DOMAIN
@callback
def async_update_callback(self, force_update=False):
"""Update the battery's state, if needed."""
keys = {"battery", "reachable"}
if force_update or self._device.changed_keys.intersection(keys):
super().async_update_callback(force_update=force_update)
@property
def unique_id(self):
"""Return a unique identifier for this device."""
return f"{self.serial}-battery"
@property
def state(self):
"""Return the state of the battery."""
return self._device.battery
@property
def name(self):
"""Return the name of the battery."""
return f"{self._device.name} Battery Level"
@property
def device_class(self):
"""Return the class of the sensor."""
return DEVICE_CLASS_BATTERY
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity."""
return PERCENTAGE
@property
def device_state_attributes(self):
"""Return the state attributes of the battery."""
attr = {}
if self._device.type in Switch.ZHATYPE:
for event in self.gateway.events:
if self._device == event.device:
attr[ATTR_EVENT_ID] = event.event_id
return attr
class DeconzSensorStateTracker:
"""Track sensors without a battery state and signal when battery state exist."""
def __init__(self, sensor, gateway):
"""Set up tracker."""
self.sensor = sensor
self.gateway = gateway
sensor.register_callback(self.async_update_callback)
@callback
def close(self):
"""Clean up tracker."""
self.sensor.remove_callback(self.async_update_callback)
self.gateway = None
self.sensor = None
@callback
def async_update_callback(self, ignore_update=False):
"""Sensor state updated."""
if "battery" in self.sensor.changed_keys:
async_dispatcher_send(
self.gateway.hass,
self.gateway.async_signal_new_device(NEW_SENSOR),
[self.sensor],
)
class DeconzBatteryHandler:
"""Creates and stores trackers for sensors without a battery state."""
def __init__(self, gateway):
"""Set up battery handler."""
self.gateway = gateway
self._trackers = set()
@callback
def create_tracker(self, sensor):
"""Create new tracker for battery state."""
for tracker in self._trackers:
if sensor == tracker.sensor:
return
self._trackers.add(DeconzSensorStateTracker(sensor, self.gateway))
@callback
def remove_tracker(self, sensor):
"""Remove tracker of battery state."""
for tracker in self._trackers:
if sensor == tracker.sensor:
tracker.close()
self._trackers.remove(tracker)
break
| apache-2.0 | 4,234,025,814,958,995,000 | 27.775087 | 91 | 0.62001 | false |
jgao54/airflow | airflow/migrations/versions/64de9cddf6c9_add_task_fails_journal_table.py | 12 | 1697 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""add task fails journal table
Revision ID: 64de9cddf6c9
Revises: 211e584da130
Create Date: 2016-08-03 14:02:59.203021
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '64de9cddf6c9'
down_revision = '211e584da130'
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
'task_fail',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('task_id', sa.String(length=250), nullable=False),
sa.Column('dag_id', sa.String(length=250), nullable=False),
sa.Column('execution_date', sa.DateTime(), nullable=False),
sa.Column('start_date', sa.DateTime(), nullable=True),
sa.Column('end_date', sa.DateTime(), nullable=True),
sa.Column('duration', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id'),
)
def downgrade():
op.drop_table('task_fail')
| apache-2.0 | -4,374,993,686,527,747,600 | 32.27451 | 68 | 0.711844 | false |
dodocat/git-repo | progress.py | 143 | 2036 | #
# Copyright (C) 2009 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from time import time
from trace import IsTrace
_NOT_TTY = not os.isatty(2)
class Progress(object):
def __init__(self, title, total=0, units=''):
self._title = title
self._total = total
self._done = 0
self._lastp = -1
self._start = time()
self._show = False
self._units = units
def update(self, inc=1):
self._done += inc
if _NOT_TTY or IsTrace():
return
if not self._show:
if 0.5 <= time() - self._start:
self._show = True
else:
return
if self._total <= 0:
sys.stderr.write('\r%s: %d, ' % (
self._title,
self._done))
sys.stderr.flush()
else:
p = (100 * self._done) / self._total
if self._lastp != p:
self._lastp = p
sys.stderr.write('\r%s: %3d%% (%d%s/%d%s) ' % (
self._title,
p,
self._done, self._units,
self._total, self._units))
sys.stderr.flush()
def end(self):
if _NOT_TTY or IsTrace() or not self._show:
return
if self._total <= 0:
sys.stderr.write('\r%s: %d, done. \n' % (
self._title,
self._done))
sys.stderr.flush()
else:
p = (100 * self._done) / self._total
sys.stderr.write('\r%s: %3d%% (%d%s/%d%s), done. \n' % (
self._title,
p,
self._done, self._units,
self._total, self._units))
sys.stderr.flush()
| apache-2.0 | 7,858,466,868,841,076,000 | 25.102564 | 74 | 0.578585 | false |
nevir/plexability | extern/gyp/pylib/gyp/generator/make.py | 17 | 88238 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Notes:
#
# This is all roughly based on the Makefile system used by the Linux
# kernel, but is a non-recursive make -- we put the entire dependency
# graph in front of make and let it figure it out.
#
# The code below generates a separate .mk file for each target, but
# all are sourced by the top-level Makefile. This means that all
# variables in .mk-files clobber one another. Be careful to use :=
# where appropriate for immediate evaluation, and similarly to watch
# that you're not relying on a variable value to last beween different
# .mk files.
#
# TODOs:
#
# Global settings and utility functions are currently stuffed in the
# toplevel Makefile. It may make sense to generate some .mk files on
# the side to keep the the files readable.
import os
import re
import sys
import subprocess
import gyp
import gyp.common
import gyp.xcode_emulation
from gyp.common import GetEnvironFallback
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'SHARED_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'INTERMEDIATE_DIR': '$(obj).$(TOOLSET)/$(TARGET)/geni',
'SHARED_INTERMEDIATE_DIR': '$(obj)/gen',
'PRODUCT_DIR': '$(builddir)',
'RULE_INPUT_ROOT': '%(INPUT_ROOT)s', # This gets expanded by Python.
'RULE_INPUT_DIRNAME': '%(INPUT_DIRNAME)s', # This gets expanded by Python.
'RULE_INPUT_PATH': '$(abspath $<)',
'RULE_INPUT_EXT': '$(suffix $<)',
'RULE_INPUT_NAME': '$(notdir $<)',
'CONFIGURATION_NAME': '$(BUILDTYPE)',
}
# Make supports multiple toolsets
generator_supports_multiple_toolsets = True
# Request sorted dependencies in the order from dependents to dependencies.
generator_wants_sorted_dependencies = False
# Placates pylint.
generator_additional_non_configuration_keys = []
generator_additional_path_sections = []
generator_extra_sources_for_rules = []
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
flavor = gyp.common.GetFlavor(params)
if flavor == 'mac':
default_variables.setdefault('OS', 'mac')
default_variables.setdefault('SHARED_LIB_SUFFIX', '.dylib')
default_variables.setdefault('SHARED_LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
default_variables.setdefault('LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
# Copy additional generator configuration data from Xcode, which is shared
# by the Mac Make generator.
import gyp.generator.xcode as xcode_generator
global generator_additional_non_configuration_keys
generator_additional_non_configuration_keys = getattr(xcode_generator,
'generator_additional_non_configuration_keys', [])
global generator_additional_path_sections
generator_additional_path_sections = getattr(xcode_generator,
'generator_additional_path_sections', [])
global generator_extra_sources_for_rules
generator_extra_sources_for_rules = getattr(xcode_generator,
'generator_extra_sources_for_rules', [])
COMPILABLE_EXTENSIONS.update({'.m': 'objc', '.mm' : 'objcxx'})
else:
operating_system = flavor
if flavor == 'android':
operating_system = 'linux' # Keep this legacy behavior for now.
default_variables.setdefault('OS', operating_system)
default_variables.setdefault('SHARED_LIB_SUFFIX', '.so')
default_variables.setdefault('SHARED_LIB_DIR','$(builddir)/lib.$(TOOLSET)')
default_variables.setdefault('LIB_DIR', '$(obj).$(TOOLSET)')
def CalculateGeneratorInputInfo(params):
"""Calculate the generator specific info that gets fed to input (called by
gyp)."""
generator_flags = params.get('generator_flags', {})
android_ndk_version = generator_flags.get('android_ndk_version', None)
# Android NDK requires a strict link order.
if android_ndk_version:
global generator_wants_sorted_dependencies
generator_wants_sorted_dependencies = True
def ensure_directory_exists(path):
dir = os.path.dirname(path)
if dir and not os.path.exists(dir):
os.makedirs(dir)
# The .d checking code below uses these functions:
# wildcard, sort, foreach, shell, wordlist
# wildcard can handle spaces, the rest can't.
# Since I could find no way to make foreach work with spaces in filenames
# correctly, the .d files have spaces replaced with another character. The .d
# file for
# Chromium\ Framework.framework/foo
# is for example
# out/Release/.deps/out/Release/Chromium?Framework.framework/foo
# This is the replacement character.
SPACE_REPLACEMENT = '?'
LINK_COMMANDS_LINUX = """\
quiet_cmd_alink = AR($(TOOLSET)) $@
cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) crs $@ $(filter %.o,$^)
quiet_cmd_alink_thin = AR($(TOOLSET)) $@
cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) crsT $@ $(filter %.o,$^)
# Due to circular dependencies between libraries :(, we wrap the
# special "figure out circular dependencies" flags around the entire
# input list during linking.
quiet_cmd_link = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ -Wl,--start-group $(LD_INPUTS) -Wl,--end-group $(LIBS)
# We support two kinds of shared objects (.so):
# 1) shared_library, which is just bundling together many dependent libraries
# into a link line.
# 2) loadable_module, which is generating a module intended for dlopen().
#
# They differ only slightly:
# In the former case, we want to package all dependent code into the .so.
# In the latter case, we want to package just the API exposed by the
# outermost module.
# This means shared_library uses --whole-archive, while loadable_module doesn't.
# (Note that --whole-archive is incompatible with the --start-group used in
# normal linking.)
# Other shared-object link notes:
# - Set SONAME to the library filename so our binaries don't reference
# the local, absolute paths used on the link command-line.
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--whole-archive $(LD_INPUTS) -Wl,--no-whole-archive $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--start-group $(filter-out FORCE_DO_CMD, $^) -Wl,--end-group $(LIBS)
"""
LINK_COMMANDS_MAC = """\
quiet_cmd_alink = LIBTOOL-STATIC $@
cmd_alink = rm -f $@ && ./gyp-mac-tool filter-libtool libtool $(GYP_LIBTOOLFLAGS) -static -o $@ $(filter %.o,$^)
quiet_cmd_link = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o "$@" $(LD_INPUTS) $(LIBS)
# TODO(thakis): Find out and document the difference between shared_library and
# loadable_module on mac.
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o "$@" $(LD_INPUTS) $(LIBS)
# TODO(thakis): The solink_module rule is likely wrong. Xcode seems to pass
# -bundle -single_module here (for osmesa.so).
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS)
"""
LINK_COMMANDS_ANDROID = """\
quiet_cmd_alink = AR($(TOOLSET)) $@
cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) crs $@ $(filter %.o,$^)
quiet_cmd_alink_thin = AR($(TOOLSET)) $@
cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) crsT $@ $(filter %.o,$^)
# Due to circular dependencies between libraries :(, we wrap the
# special "figure out circular dependencies" flags around the entire
# input list during linking.
quiet_cmd_link = LINK($(TOOLSET)) $@
quiet_cmd_link_host = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ -Wl,--start-group $(LD_INPUTS) -Wl,--end-group $(LIBS)
cmd_link_host = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS)
# Other shared-object link notes:
# - Set SONAME to the library filename so our binaries don't reference
# the local, absolute paths used on the link command-line.
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--whole-archive $(LD_INPUTS) -Wl,--no-whole-archive $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--start-group $(filter-out FORCE_DO_CMD, $^) -Wl,--end-group $(LIBS)
quiet_cmd_solink_module_host = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module_host = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS)
"""
# Header of toplevel Makefile.
# This should go into the build tree, but it's easier to keep it here for now.
SHARED_HEADER = ("""\
# We borrow heavily from the kernel build setup, though we are simpler since
# we don't have Kconfig tweaking settings on us.
# The implicit make rules have it looking for RCS files, among other things.
# We instead explicitly write all the rules we care about.
# It's even quicker (saves ~200ms) to pass -r on the command line.
MAKEFLAGS=-r
# The source directory tree.
srcdir := %(srcdir)s
abs_srcdir := $(abspath $(srcdir))
# The name of the builddir.
builddir_name ?= %(builddir)s
# The V=1 flag on command line makes us verbosely print command lines.
ifdef V
quiet=
else
quiet=quiet_
endif
# Specify BUILDTYPE=Release on the command line for a release build.
BUILDTYPE ?= %(default_configuration)s
# Directory all our build output goes into.
# Note that this must be two directories beneath src/ for unit tests to pass,
# as they reach into the src/ directory for data with relative paths.
builddir ?= $(builddir_name)/$(BUILDTYPE)
abs_builddir := $(abspath $(builddir))
depsdir := $(builddir)/.deps
# Object output directory.
obj := $(builddir)/obj
abs_obj := $(abspath $(obj))
# We build up a list of every single one of the targets so we can slurp in the
# generated dependency rule Makefiles in one pass.
all_deps :=
%(make_global_settings)s
# C++ apps need to be linked with g++.
#
# Note: flock is used to seralize linking. Linking is a memory-intensive
# process so running parallel links can often lead to thrashing. To disable
# the serialization, override LINK via an envrionment variable as follows:
#
# export LINK=g++
#
# This will allow make to invoke N linker processes as specified in -jN.
LINK ?= %(flock)s $(builddir)/linker.lock $(CXX.target)
CC.target ?= %(CC.target)s
CFLAGS.target ?= $(CFLAGS)
CXX.target ?= %(CXX.target)s
CXXFLAGS.target ?= $(CXXFLAGS)
LINK.target ?= %(LINK.target)s
LDFLAGS.target ?= $(LDFLAGS)
AR.target ?= $(AR)
# TODO(evan): move all cross-compilation logic to gyp-time so we don't need
# to replicate this environment fallback in make as well.
CC.host ?= %(CC.host)s
CFLAGS.host ?=
CXX.host ?= %(CXX.host)s
CXXFLAGS.host ?=
LINK.host ?= %(LINK.host)s
LDFLAGS.host ?=
AR.host ?= %(AR.host)s
# Define a dir function that can handle spaces.
# http://www.gnu.org/software/make/manual/make.html#Syntax-of-Functions
# "leading spaces cannot appear in the text of the first argument as written.
# These characters can be put into the argument value by variable substitution."
empty :=
space := $(empty) $(empty)
# http://stackoverflow.com/questions/1189781/using-make-dir-or-notdir-on-a-path-with-spaces
replace_spaces = $(subst $(space),""" + SPACE_REPLACEMENT + """,$1)
unreplace_spaces = $(subst """ + SPACE_REPLACEMENT + """,$(space),$1)
dirx = $(call unreplace_spaces,$(dir $(call replace_spaces,$1)))
# Flags to make gcc output dependency info. Note that you need to be
# careful here to use the flags that ccache and distcc can understand.
# We write to a dep file on the side first and then rename at the end
# so we can't end up with a broken dep file.
depfile = $(depsdir)/$(call replace_spaces,$@).d
DEPFLAGS = -MMD -MF $(depfile).raw
# We have to fixup the deps output in a few ways.
# (1) the file output should mention the proper .o file.
# ccache or distcc lose the path to the target, so we convert a rule of
# the form:
# foobar.o: DEP1 DEP2
# into
# path/to/foobar.o: DEP1 DEP2
# (2) we want missing files not to cause us to fail to build.
# We want to rewrite
# foobar.o: DEP1 DEP2 \\
# DEP3
# to
# DEP1:
# DEP2:
# DEP3:
# so if the files are missing, they're just considered phony rules.
# We have to do some pretty insane escaping to get those backslashes
# and dollar signs past make, the shell, and sed at the same time.
# Doesn't work with spaces, but that's fine: .d files have spaces in
# their names replaced with other characters."""
r"""
define fixup_dep
# The depfile may not exist if the input file didn't have any #includes.
touch $(depfile).raw
# Fixup path as in (1).
sed -e "s|^$(notdir $@)|$@|" $(depfile).raw >> $(depfile)
# Add extra rules as in (2).
# We remove slashes and replace spaces with new lines;
# remove blank lines;
# delete the first line and append a colon to the remaining lines.
sed -e 's|\\||' -e 'y| |\n|' $(depfile).raw |\
grep -v '^$$' |\
sed -e 1d -e 's|$$|:|' \
>> $(depfile)
rm $(depfile).raw
endef
"""
"""
# Command definitions:
# - cmd_foo is the actual command to run;
# - quiet_cmd_foo is the brief-output summary of the command.
quiet_cmd_cc = CC($(TOOLSET)) $@
cmd_cc = $(CC.$(TOOLSET)) $(GYP_CFLAGS) $(DEPFLAGS) $(CFLAGS.$(TOOLSET)) -c -o $@ $<
quiet_cmd_cxx = CXX($(TOOLSET)) $@
cmd_cxx = $(CXX.$(TOOLSET)) $(GYP_CXXFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $<
%(extra_commands)s
quiet_cmd_touch = TOUCH $@
cmd_touch = touch $@
quiet_cmd_copy = COPY $@
# send stderr to /dev/null to ignore messages when linking directories.
cmd_copy = ln -f "$<" "$@" 2>/dev/null || (rm -rf "$@" && cp -af "$<" "$@")
%(link_commands)s
"""
r"""
# Define an escape_quotes function to escape single quotes.
# This allows us to handle quotes properly as long as we always use
# use single quotes and escape_quotes.
escape_quotes = $(subst ','\'',$(1))
# This comment is here just to include a ' to unconfuse syntax highlighting.
# Define an escape_vars function to escape '$' variable syntax.
# This allows us to read/write command lines with shell variables (e.g.
# $LD_LIBRARY_PATH), without triggering make substitution.
escape_vars = $(subst $$,$$$$,$(1))
# Helper that expands to a shell command to echo a string exactly as it is in
# make. This uses printf instead of echo because printf's behaviour with respect
# to escape sequences is more portable than echo's across different shells
# (e.g., dash, bash).
exact_echo = printf '%%s\n' '$(call escape_quotes,$(1))'
"""
"""
# Helper to compare the command we're about to run against the command
# we logged the last time we ran the command. Produces an empty
# string (false) when the commands match.
# Tricky point: Make has no string-equality test function.
# The kernel uses the following, but it seems like it would have false
# positives, where one string reordered its arguments.
# arg_check = $(strip $(filter-out $(cmd_$(1)), $(cmd_$@)) \\
# $(filter-out $(cmd_$@), $(cmd_$(1))))
# We instead substitute each for the empty string into the other, and
# say they're equal if both substitutions produce the empty string.
# .d files contain """ + SPACE_REPLACEMENT + \
""" instead of spaces, take that into account.
command_changed = $(or $(subst $(cmd_$(1)),,$(cmd_$(call replace_spaces,$@))),\\
$(subst $(cmd_$(call replace_spaces,$@)),,$(cmd_$(1))))
# Helper that is non-empty when a prerequisite changes.
# Normally make does this implicitly, but we force rules to always run
# so we can check their command lines.
# $? -- new prerequisites
# $| -- order-only dependencies
prereq_changed = $(filter-out FORCE_DO_CMD,$(filter-out $|,$?))
# Helper that executes all postbuilds until one fails.
define do_postbuilds
@E=0;\\
for p in $(POSTBUILDS); do\\
eval $$p;\\
E=$$?;\\
if [ $$E -ne 0 ]; then\\
break;\\
fi;\\
done;\\
if [ $$E -ne 0 ]; then\\
rm -rf "$@";\\
exit $$E;\\
fi
endef
# do_cmd: run a command via the above cmd_foo names, if necessary.
# Should always run for a given target to handle command-line changes.
# Second argument, if non-zero, makes it do asm/C/C++ dependency munging.
# Third argument, if non-zero, makes it do POSTBUILDS processing.
# Note: We intentionally do NOT call dirx for depfile, since it contains """ + \
SPACE_REPLACEMENT + """ for
# spaces already and dirx strips the """ + SPACE_REPLACEMENT + \
""" characters.
define do_cmd
$(if $(or $(command_changed),$(prereq_changed)),
@$(call exact_echo, $($(quiet)cmd_$(1)))
@mkdir -p "$(call dirx,$@)" "$(dir $(depfile))"
$(if $(findstring flock,$(word %(flock_index)d,$(cmd_$1))),
@$(cmd_$(1))
@echo " $(quiet_cmd_$(1)): Finished",
@$(cmd_$(1))
)
@$(call exact_echo,$(call escape_vars,cmd_$(call replace_spaces,$@) := $(cmd_$(1)))) > $(depfile)
@$(if $(2),$(fixup_dep))
$(if $(and $(3), $(POSTBUILDS)),
$(call do_postbuilds)
)
)
endef
# Declare the "%(default_target)s" target first so it is the default,
# even though we don't have the deps yet.
.PHONY: %(default_target)s
%(default_target)s:
# make looks for ways to re-generate included makefiles, but in our case, we
# don't have a direct way. Explicitly telling make that it has nothing to do
# for them makes it go faster.
%%.d: ;
# Use FORCE_DO_CMD to force a target to run. Should be coupled with
# do_cmd.
.PHONY: FORCE_DO_CMD
FORCE_DO_CMD:
""")
SHARED_HEADER_MAC_COMMANDS = """
quiet_cmd_objc = CXX($(TOOLSET)) $@
cmd_objc = $(CC.$(TOOLSET)) $(GYP_OBJCFLAGS) $(DEPFLAGS) -c -o $@ $<
quiet_cmd_objcxx = CXX($(TOOLSET)) $@
cmd_objcxx = $(CXX.$(TOOLSET)) $(GYP_OBJCXXFLAGS) $(DEPFLAGS) -c -o $@ $<
# Commands for precompiled header files.
quiet_cmd_pch_c = CXX($(TOOLSET)) $@
cmd_pch_c = $(CC.$(TOOLSET)) $(GYP_PCH_CFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $<
quiet_cmd_pch_cc = CXX($(TOOLSET)) $@
cmd_pch_cc = $(CC.$(TOOLSET)) $(GYP_PCH_CXXFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $<
quiet_cmd_pch_m = CXX($(TOOLSET)) $@
cmd_pch_m = $(CC.$(TOOLSET)) $(GYP_PCH_OBJCFLAGS) $(DEPFLAGS) -c -o $@ $<
quiet_cmd_pch_mm = CXX($(TOOLSET)) $@
cmd_pch_mm = $(CC.$(TOOLSET)) $(GYP_PCH_OBJCXXFLAGS) $(DEPFLAGS) -c -o $@ $<
# gyp-mac-tool is written next to the root Makefile by gyp.
# Use $(4) for the command, since $(2) and $(3) are used as flag by do_cmd
# already.
quiet_cmd_mac_tool = MACTOOL $(4) $<
cmd_mac_tool = ./gyp-mac-tool $(4) $< "$@"
quiet_cmd_mac_package_framework = PACKAGE FRAMEWORK $@
cmd_mac_package_framework = ./gyp-mac-tool package-framework "$@" $(4)
quiet_cmd_infoplist = INFOPLIST $@
cmd_infoplist = $(CC.$(TOOLSET)) -E -P -Wno-trigraphs -x c $(INFOPLIST_DEFINES) "$<" -o "$@"
"""
SHARED_HEADER_SUN_COMMANDS = """
# gyp-sun-tool is written next to the root Makefile by gyp.
# Use $(4) for the command, since $(2) and $(3) are used as flag by do_cmd
# already.
quiet_cmd_sun_tool = SUNTOOL $(4) $<
cmd_sun_tool = ./gyp-sun-tool $(4) $< "$@"
"""
def WriteRootHeaderSuffixRules(writer):
extensions = sorted(COMPILABLE_EXTENSIONS.keys(), key=str.lower)
writer.write('# Suffix rules, putting all outputs into $(obj).\n')
for ext in extensions:
writer.write('$(obj).$(TOOLSET)/%%.o: $(srcdir)/%%%s FORCE_DO_CMD\n' % ext)
writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext])
writer.write('\n# Try building from generated source, too.\n')
for ext in extensions:
writer.write(
'$(obj).$(TOOLSET)/%%.o: $(obj).$(TOOLSET)/%%%s FORCE_DO_CMD\n' % ext)
writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext])
writer.write('\n')
for ext in extensions:
writer.write('$(obj).$(TOOLSET)/%%.o: $(obj)/%%%s FORCE_DO_CMD\n' % ext)
writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext])
writer.write('\n')
SHARED_HEADER_SUFFIX_RULES_COMMENT1 = ("""\
# Suffix rules, putting all outputs into $(obj).
""")
SHARED_HEADER_SUFFIX_RULES_COMMENT2 = ("""\
# Try building from generated source, too.
""")
SHARED_FOOTER = """\
# "all" is a concatenation of the "all" targets from all the included
# sub-makefiles. This is just here to clarify.
all:
# Add in dependency-tracking rules. $(all_deps) is the list of every single
# target in our tree. Only consider the ones with .d (dependency) info:
d_files := $(wildcard $(foreach f,$(all_deps),$(depsdir)/$(f).d))
ifneq ($(d_files),)
include $(d_files)
endif
"""
header = """\
# This file is generated by gyp; do not edit.
"""
# Maps every compilable file extension to the do_cmd that compiles it.
COMPILABLE_EXTENSIONS = {
'.c': 'cc',
'.cc': 'cxx',
'.cpp': 'cxx',
'.cxx': 'cxx',
'.s': 'cc',
'.S': 'cc',
}
def Compilable(filename):
"""Return true if the file is compilable (should be in OBJS)."""
for res in (filename.endswith(e) for e in COMPILABLE_EXTENSIONS):
if res:
return True
return False
def Linkable(filename):
"""Return true if the file is linkable (should be on the link line)."""
return filename.endswith('.o')
def Target(filename):
"""Translate a compilable filename to its .o target."""
return os.path.splitext(filename)[0] + '.o'
def EscapeShellArgument(s):
"""Quotes an argument so that it will be interpreted literally by a POSIX
shell. Taken from
http://stackoverflow.com/questions/35817/whats-the-best-way-to-escape-ossystem-calls-in-python
"""
return "'" + s.replace("'", "'\\''") + "'"
def EscapeMakeVariableExpansion(s):
"""Make has its own variable expansion syntax using $. We must escape it for
string to be interpreted literally."""
return s.replace('$', '$$')
def EscapeCppDefine(s):
"""Escapes a CPP define so that it will reach the compiler unaltered."""
s = EscapeShellArgument(s)
s = EscapeMakeVariableExpansion(s)
# '#' characters must be escaped even embedded in a string, else Make will
# treat it as the start of a comment.
return s.replace('#', r'\#')
def QuoteIfNecessary(string):
"""TODO: Should this ideally be replaced with one or more of the above
functions?"""
if '"' in string:
string = '"' + string.replace('"', '\\"') + '"'
return string
def StringToMakefileVariable(string):
"""Convert a string to a value that is acceptable as a make variable name."""
return re.sub('[^a-zA-Z0-9_]', '_', string)
srcdir_prefix = ''
def Sourceify(path):
"""Convert a path to its source directory form."""
if '$(' in path:
return path
if os.path.isabs(path):
return path
return srcdir_prefix + path
def QuoteSpaces(s, quote=r'\ '):
return s.replace(' ', quote)
# Map from qualified target to path to output.
target_outputs = {}
# Map from qualified target to any linkable output. A subset
# of target_outputs. E.g. when mybinary depends on liba, we want to
# include liba in the linker line; when otherbinary depends on
# mybinary, we just want to build mybinary first.
target_link_deps = {}
class MakefileWriter:
"""MakefileWriter packages up the writing of one target-specific foobar.mk.
Its only real entry point is Write(), and is mostly used for namespacing.
"""
def __init__(self, generator_flags, flavor):
self.generator_flags = generator_flags
self.flavor = flavor
self.suffix_rules_srcdir = {}
self.suffix_rules_objdir1 = {}
self.suffix_rules_objdir2 = {}
# Generate suffix rules for all compilable extensions.
for ext in COMPILABLE_EXTENSIONS.keys():
# Suffix rules for source folder.
self.suffix_rules_srcdir.update({ext: ("""\
$(obj).$(TOOLSET)/$(TARGET)/%%.o: $(srcdir)/%%%s FORCE_DO_CMD
@$(call do_cmd,%s,1)
""" % (ext, COMPILABLE_EXTENSIONS[ext]))})
# Suffix rules for generated source files.
self.suffix_rules_objdir1.update({ext: ("""\
$(obj).$(TOOLSET)/$(TARGET)/%%.o: $(obj).$(TOOLSET)/%%%s FORCE_DO_CMD
@$(call do_cmd,%s,1)
""" % (ext, COMPILABLE_EXTENSIONS[ext]))})
self.suffix_rules_objdir2.update({ext: ("""\
$(obj).$(TOOLSET)/$(TARGET)/%%.o: $(obj)/%%%s FORCE_DO_CMD
@$(call do_cmd,%s,1)
""" % (ext, COMPILABLE_EXTENSIONS[ext]))})
def Write(self, qualified_target, base_path, output_filename, spec, configs,
part_of_all):
"""The main entry point: writes a .mk file for a single target.
Arguments:
qualified_target: target we're generating
base_path: path relative to source root we're building in, used to resolve
target-relative paths
output_filename: output .mk file name to write
spec, configs: gyp info
part_of_all: flag indicating this target is part of 'all'
"""
ensure_directory_exists(output_filename)
self.fp = open(output_filename, 'w')
self.fp.write(header)
self.qualified_target = qualified_target
self.path = base_path
self.target = spec['target_name']
self.type = spec['type']
self.toolset = spec['toolset']
self.is_mac_bundle = gyp.xcode_emulation.IsMacBundle(self.flavor, spec)
if self.flavor == 'mac':
self.xcode_settings = gyp.xcode_emulation.XcodeSettings(spec)
else:
self.xcode_settings = None
deps, link_deps = self.ComputeDeps(spec)
# Some of the generation below can add extra output, sources, or
# link dependencies. All of the out params of the functions that
# follow use names like extra_foo.
extra_outputs = []
extra_sources = []
extra_link_deps = []
extra_mac_bundle_resources = []
mac_bundle_deps = []
if self.is_mac_bundle:
self.output = self.ComputeMacBundleOutput(spec)
self.output_binary = self.ComputeMacBundleBinaryOutput(spec)
else:
self.output = self.output_binary = self.ComputeOutput(spec)
self.is_standalone_static_library = bool(
spec.get('standalone_static_library', 0))
self._INSTALLABLE_TARGETS = ('executable', 'loadable_module',
'shared_library')
if (self.is_standalone_static_library or
self.type in self._INSTALLABLE_TARGETS):
self.alias = os.path.basename(self.output)
install_path = self._InstallableTargetInstallPath()
else:
self.alias = self.output
install_path = self.output
self.WriteLn("TOOLSET := " + self.toolset)
self.WriteLn("TARGET := " + self.target)
# Actions must come first, since they can generate more OBJs for use below.
if 'actions' in spec:
self.WriteActions(spec['actions'], extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all)
# Rules must be early like actions.
if 'rules' in spec:
self.WriteRules(spec['rules'], extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all)
if 'copies' in spec:
self.WriteCopies(spec['copies'], extra_outputs, part_of_all)
# Bundle resources.
if self.is_mac_bundle:
all_mac_bundle_resources = (
spec.get('mac_bundle_resources', []) + extra_mac_bundle_resources)
self.WriteMacBundleResources(all_mac_bundle_resources, mac_bundle_deps)
self.WriteMacInfoPlist(mac_bundle_deps)
# Sources.
all_sources = spec.get('sources', []) + extra_sources
if all_sources:
self.WriteSources(
configs, deps, all_sources, extra_outputs,
extra_link_deps, part_of_all,
gyp.xcode_emulation.MacPrefixHeader(
self.xcode_settings, lambda p: Sourceify(self.Absolutify(p)),
self.Pchify))
sources = filter(Compilable, all_sources)
if sources:
self.WriteLn(SHARED_HEADER_SUFFIX_RULES_COMMENT1)
extensions = set([os.path.splitext(s)[1] for s in sources])
for ext in extensions:
if ext in self.suffix_rules_srcdir:
self.WriteLn(self.suffix_rules_srcdir[ext])
self.WriteLn(SHARED_HEADER_SUFFIX_RULES_COMMENT2)
for ext in extensions:
if ext in self.suffix_rules_objdir1:
self.WriteLn(self.suffix_rules_objdir1[ext])
for ext in extensions:
if ext in self.suffix_rules_objdir2:
self.WriteLn(self.suffix_rules_objdir2[ext])
self.WriteLn('# End of this set of suffix rules')
# Add dependency from bundle to bundle binary.
if self.is_mac_bundle:
mac_bundle_deps.append(self.output_binary)
self.WriteTarget(spec, configs, deps, extra_link_deps + link_deps,
mac_bundle_deps, extra_outputs, part_of_all)
# Update global list of target outputs, used in dependency tracking.
target_outputs[qualified_target] = install_path
# Update global list of link dependencies.
if self.type in ('static_library', 'shared_library'):
target_link_deps[qualified_target] = self.output_binary
# Currently any versions have the same effect, but in future the behavior
# could be different.
if self.generator_flags.get('android_ndk_version', None):
self.WriteAndroidNdkModuleRule(self.target, all_sources, link_deps)
self.fp.close()
def WriteSubMake(self, output_filename, makefile_path, targets, build_dir):
"""Write a "sub-project" Makefile.
This is a small, wrapper Makefile that calls the top-level Makefile to build
the targets from a single gyp file (i.e. a sub-project).
Arguments:
output_filename: sub-project Makefile name to write
makefile_path: path to the top-level Makefile
targets: list of "all" targets for this sub-project
build_dir: build output directory, relative to the sub-project
"""
ensure_directory_exists(output_filename)
self.fp = open(output_filename, 'w')
self.fp.write(header)
# For consistency with other builders, put sub-project build output in the
# sub-project dir (see test/subdirectory/gyptest-subdir-all.py).
self.WriteLn('export builddir_name ?= %s' %
os.path.join(os.path.dirname(output_filename), build_dir))
self.WriteLn('.PHONY: all')
self.WriteLn('all:')
if makefile_path:
makefile_path = ' -C ' + makefile_path
self.WriteLn('\t$(MAKE)%s %s' % (makefile_path, ' '.join(targets)))
self.fp.close()
def WriteActions(self, actions, extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all):
"""Write Makefile code for any 'actions' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
actions (used to make other pieces dependent on these
actions)
part_of_all: flag indicating this target is part of 'all'
"""
env = self.GetSortedXcodeEnv()
for action in actions:
name = StringToMakefileVariable('%s_%s' % (self.qualified_target,
action['action_name']))
self.WriteLn('### Rules for action "%s":' % action['action_name'])
inputs = action['inputs']
outputs = action['outputs']
# Build up a list of outputs.
# Collect the output dirs we'll need.
dirs = set()
for out in outputs:
dir = os.path.split(out)[0]
if dir:
dirs.add(dir)
if int(action.get('process_outputs_as_sources', False)):
extra_sources += outputs
if int(action.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += outputs
# Write the actual command.
action_commands = action['action']
if self.flavor == 'mac':
action_commands = [gyp.xcode_emulation.ExpandEnvVars(command, env)
for command in action_commands]
command = gyp.common.EncodePOSIXShellList(action_commands)
if 'message' in action:
self.WriteLn('quiet_cmd_%s = ACTION %s $@' % (name, action['message']))
else:
self.WriteLn('quiet_cmd_%s = ACTION %s $@' % (name, name))
if len(dirs) > 0:
command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command
cd_action = 'cd %s; ' % Sourceify(self.path or '.')
# command and cd_action get written to a toplevel variable called
# cmd_foo. Toplevel variables can't handle things that change per
# makefile like $(TARGET), so hardcode the target.
command = command.replace('$(TARGET)', self.target)
cd_action = cd_action.replace('$(TARGET)', self.target)
# Set LD_LIBRARY_PATH in case the action runs an executable from this
# build which links to shared libs from this build.
# actions run on the host, so they should in theory only use host
# libraries, but until everything is made cross-compile safe, also use
# target libraries.
# TODO(piman): when everything is cross-compile safe, remove lib.target
self.WriteLn('cmd_%s = LD_LIBRARY_PATH=$(builddir)/lib.host:'
'$(builddir)/lib.target:$$LD_LIBRARY_PATH; '
'export LD_LIBRARY_PATH; '
'%s%s'
% (name, cd_action, command))
self.WriteLn()
outputs = map(self.Absolutify, outputs)
# The makefile rules are all relative to the top dir, but the gyp actions
# are defined relative to their containing dir. This replaces the obj
# variable for the action rule with an absolute version so that the output
# goes in the right place.
# Only write the 'obj' and 'builddir' rules for the "primary" output (:1);
# it's superfluous for the "extra outputs", and this avoids accidentally
# writing duplicate dummy rules for those outputs.
# Same for environment.
self.WriteLn("%s: obj := $(abs_obj)" % QuoteSpaces(outputs[0]))
self.WriteLn("%s: builddir := $(abs_builddir)" % QuoteSpaces(outputs[0]))
self.WriteSortedXcodeEnv(outputs[0], self.GetSortedXcodeEnv())
for input in inputs:
assert ' ' not in input, (
"Spaces in action input filenames not supported (%s)" % input)
for output in outputs:
assert ' ' not in output, (
"Spaces in action output filenames not supported (%s)" % output)
# See the comment in WriteCopies about expanding env vars.
outputs = [gyp.xcode_emulation.ExpandEnvVars(o, env) for o in outputs]
inputs = [gyp.xcode_emulation.ExpandEnvVars(i, env) for i in inputs]
self.WriteDoCmd(outputs, map(Sourceify, map(self.Absolutify, inputs)),
part_of_all=part_of_all, command=name)
# Stuff the outputs in a variable so we can refer to them later.
outputs_variable = 'action_%s_outputs' % name
self.WriteLn('%s := %s' % (outputs_variable, ' '.join(outputs)))
extra_outputs.append('$(%s)' % outputs_variable)
self.WriteLn()
self.WriteLn()
def WriteRules(self, rules, extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all):
"""Write Makefile code for any 'rules' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
rules (used to make other pieces dependent on these rules)
part_of_all: flag indicating this target is part of 'all'
"""
env = self.GetSortedXcodeEnv()
for rule in rules:
name = StringToMakefileVariable('%s_%s' % (self.qualified_target,
rule['rule_name']))
count = 0
self.WriteLn('### Generated for rule %s:' % name)
all_outputs = []
for rule_source in rule.get('rule_sources', []):
dirs = set()
(rule_source_dirname, rule_source_basename) = os.path.split(rule_source)
(rule_source_root, rule_source_ext) = \
os.path.splitext(rule_source_basename)
outputs = [self.ExpandInputRoot(out, rule_source_root,
rule_source_dirname)
for out in rule['outputs']]
for out in outputs:
dir = os.path.dirname(out)
if dir:
dirs.add(dir)
if int(rule.get('process_outputs_as_sources', False)):
extra_sources += outputs
if int(rule.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += outputs
inputs = map(Sourceify, map(self.Absolutify, [rule_source] +
rule.get('inputs', [])))
actions = ['$(call do_cmd,%s_%d)' % (name, count)]
if name == 'resources_grit':
# HACK: This is ugly. Grit intentionally doesn't touch the
# timestamp of its output file when the file doesn't change,
# which is fine in hash-based dependency systems like scons
# and forge, but not kosher in the make world. After some
# discussion, hacking around it here seems like the least
# amount of pain.
actions += ['@touch --no-create $@']
# See the comment in WriteCopies about expanding env vars.
outputs = [gyp.xcode_emulation.ExpandEnvVars(o, env) for o in outputs]
inputs = [gyp.xcode_emulation.ExpandEnvVars(i, env) for i in inputs]
outputs = map(self.Absolutify, outputs)
all_outputs += outputs
# Only write the 'obj' and 'builddir' rules for the "primary" output
# (:1); it's superfluous for the "extra outputs", and this avoids
# accidentally writing duplicate dummy rules for those outputs.
self.WriteLn('%s: obj := $(abs_obj)' % outputs[0])
self.WriteLn('%s: builddir := $(abs_builddir)' % outputs[0])
self.WriteMakeRule(outputs, inputs + ['FORCE_DO_CMD'], actions)
for output in outputs:
assert ' ' not in output, (
"Spaces in rule filenames not yet supported (%s)" % output)
self.WriteLn('all_deps += %s' % ' '.join(outputs))
action = [self.ExpandInputRoot(ac, rule_source_root,
rule_source_dirname)
for ac in rule['action']]
mkdirs = ''
if len(dirs) > 0:
mkdirs = 'mkdir -p %s; ' % ' '.join(dirs)
cd_action = 'cd %s; ' % Sourceify(self.path or '.')
# action, cd_action, and mkdirs get written to a toplevel variable
# called cmd_foo. Toplevel variables can't handle things that change
# per makefile like $(TARGET), so hardcode the target.
if self.flavor == 'mac':
action = [gyp.xcode_emulation.ExpandEnvVars(command, env)
for command in action]
action = gyp.common.EncodePOSIXShellList(action)
action = action.replace('$(TARGET)', self.target)
cd_action = cd_action.replace('$(TARGET)', self.target)
mkdirs = mkdirs.replace('$(TARGET)', self.target)
# Set LD_LIBRARY_PATH in case the rule runs an executable from this
# build which links to shared libs from this build.
# rules run on the host, so they should in theory only use host
# libraries, but until everything is made cross-compile safe, also use
# target libraries.
# TODO(piman): when everything is cross-compile safe, remove lib.target
self.WriteLn(
"cmd_%(name)s_%(count)d = LD_LIBRARY_PATH="
"$(builddir)/lib.host:$(builddir)/lib.target:$$LD_LIBRARY_PATH; "
"export LD_LIBRARY_PATH; "
"%(cd_action)s%(mkdirs)s%(action)s" % {
'action': action,
'cd_action': cd_action,
'count': count,
'mkdirs': mkdirs,
'name': name,
})
self.WriteLn(
'quiet_cmd_%(name)s_%(count)d = RULE %(name)s_%(count)d $@' % {
'count': count,
'name': name,
})
self.WriteLn()
count += 1
outputs_variable = 'rule_%s_outputs' % name
self.WriteList(all_outputs, outputs_variable)
extra_outputs.append('$(%s)' % outputs_variable)
self.WriteLn('### Finished generating for rule: %s' % name)
self.WriteLn()
self.WriteLn('### Finished generating for all rules')
self.WriteLn('')
def WriteCopies(self, copies, extra_outputs, part_of_all):
"""Write Makefile code for any 'copies' from the gyp input.
extra_outputs: a list that will be filled in with any outputs of this action
(used to make other pieces dependent on this action)
part_of_all: flag indicating this target is part of 'all'
"""
self.WriteLn('### Generated for copy rule.')
variable = StringToMakefileVariable(self.qualified_target + '_copies')
outputs = []
for copy in copies:
for path in copy['files']:
# Absolutify() may call normpath, and will strip trailing slashes.
path = Sourceify(self.Absolutify(path))
filename = os.path.split(path)[1]
output = Sourceify(self.Absolutify(os.path.join(copy['destination'],
filename)))
# If the output path has variables in it, which happens in practice for
# 'copies', writing the environment as target-local doesn't work,
# because the variables are already needed for the target name.
# Copying the environment variables into global make variables doesn't
# work either, because then the .d files will potentially contain spaces
# after variable expansion, and .d file handling cannot handle spaces.
# As a workaround, manually expand variables at gyp time. Since 'copies'
# can't run scripts, there's no need to write the env then.
# WriteDoCmd() will escape spaces for .d files.
env = self.GetSortedXcodeEnv()
output = gyp.xcode_emulation.ExpandEnvVars(output, env)
path = gyp.xcode_emulation.ExpandEnvVars(path, env)
self.WriteDoCmd([output], [path], 'copy', part_of_all)
outputs.append(output)
self.WriteLn('%s = %s' % (variable, ' '.join(map(QuoteSpaces, outputs))))
extra_outputs.append('$(%s)' % variable)
self.WriteLn()
def WriteMacBundleResources(self, resources, bundle_deps):
"""Writes Makefile code for 'mac_bundle_resources'."""
self.WriteLn('### Generated for mac_bundle_resources')
for output, res in gyp.xcode_emulation.GetMacBundleResources(
generator_default_variables['PRODUCT_DIR'], self.xcode_settings,
map(Sourceify, map(self.Absolutify, resources))):
self.WriteDoCmd([output], [res], 'mac_tool,,,copy-bundle-resource',
part_of_all=True)
bundle_deps.append(output)
def WriteMacInfoPlist(self, bundle_deps):
"""Write Makefile code for bundle Info.plist files."""
info_plist, out, defines, extra_env = gyp.xcode_emulation.GetMacInfoPlist(
generator_default_variables['PRODUCT_DIR'], self.xcode_settings,
lambda p: Sourceify(self.Absolutify(p)))
if not info_plist:
return
if defines:
# Create an intermediate file to store preprocessed results.
intermediate_plist = ('$(obj).$(TOOLSET)/$(TARGET)/' +
os.path.basename(info_plist))
self.WriteList(defines, intermediate_plist + ': INFOPLIST_DEFINES', '-D',
quoter=EscapeCppDefine)
self.WriteMakeRule([intermediate_plist], [info_plist],
['$(call do_cmd,infoplist)',
# "Convert" the plist so that any weird whitespace changes from the
# preprocessor do not affect the XML parser in mac_tool.
'@plutil -convert xml1 $@ $@'])
info_plist = intermediate_plist
# plists can contain envvars and substitute them into the file.
self.WriteSortedXcodeEnv(
out, self.GetSortedXcodeEnv(additional_settings=extra_env))
self.WriteDoCmd([out], [info_plist], 'mac_tool,,,copy-info-plist',
part_of_all=True)
bundle_deps.append(out)
def WriteSources(self, configs, deps, sources,
extra_outputs, extra_link_deps,
part_of_all, precompiled_header):
"""Write Makefile code for any 'sources' from the gyp input.
These are source files necessary to build the current target.
configs, deps, sources: input from gyp.
extra_outputs: a list of extra outputs this action should be dependent on;
used to serialize action/rules before compilation
extra_link_deps: a list that will be filled in with any outputs of
compilation (to be used in link lines)
part_of_all: flag indicating this target is part of 'all'
"""
# Write configuration-specific variables for CFLAGS, etc.
for configname in sorted(configs.keys()):
config = configs[configname]
self.WriteList(config.get('defines'), 'DEFS_%s' % configname, prefix='-D',
quoter=EscapeCppDefine)
if self.flavor == 'mac':
cflags = self.xcode_settings.GetCflags(configname)
cflags_c = self.xcode_settings.GetCflagsC(configname)
cflags_cc = self.xcode_settings.GetCflagsCC(configname)
cflags_objc = self.xcode_settings.GetCflagsObjC(configname)
cflags_objcc = self.xcode_settings.GetCflagsObjCC(configname)
else:
cflags = config.get('cflags')
cflags_c = config.get('cflags_c')
cflags_cc = config.get('cflags_cc')
self.WriteLn("# Flags passed to all source files.");
self.WriteList(cflags, 'CFLAGS_%s' % configname)
self.WriteLn("# Flags passed to only C files.");
self.WriteList(cflags_c, 'CFLAGS_C_%s' % configname)
self.WriteLn("# Flags passed to only C++ files.");
self.WriteList(cflags_cc, 'CFLAGS_CC_%s' % configname)
if self.flavor == 'mac':
self.WriteLn("# Flags passed to only ObjC files.");
self.WriteList(cflags_objc, 'CFLAGS_OBJC_%s' % configname)
self.WriteLn("# Flags passed to only ObjC++ files.");
self.WriteList(cflags_objcc, 'CFLAGS_OBJCC_%s' % configname)
includes = config.get('include_dirs')
if includes:
includes = map(Sourceify, map(self.Absolutify, includes))
self.WriteList(includes, 'INCS_%s' % configname, prefix='-I')
compilable = filter(Compilable, sources)
objs = map(self.Objectify, map(self.Absolutify, map(Target, compilable)))
self.WriteList(objs, 'OBJS')
for obj in objs:
assert ' ' not in obj, (
"Spaces in object filenames not supported (%s)" % obj)
self.WriteLn('# Add to the list of files we specially track '
'dependencies for.')
self.WriteLn('all_deps += $(OBJS)')
self.WriteLn()
# Make sure our dependencies are built first.
if deps:
self.WriteMakeRule(['$(OBJS)'], deps,
comment = 'Make sure our dependencies are built '
'before any of us.',
order_only = True)
# Make sure the actions and rules run first.
# If they generate any extra headers etc., the per-.o file dep tracking
# will catch the proper rebuilds, so order only is still ok here.
if extra_outputs:
self.WriteMakeRule(['$(OBJS)'], extra_outputs,
comment = 'Make sure our actions/rules run '
'before any of us.',
order_only = True)
pchdeps = precompiled_header.GetObjDependencies(compilable, objs )
if pchdeps:
self.WriteLn('# Dependencies from obj files to their precompiled headers')
for source, obj, gch in pchdeps:
self.WriteLn('%s: %s' % (obj, gch))
self.WriteLn('# End precompiled header dependencies')
if objs:
extra_link_deps.append('$(OBJS)')
self.WriteLn("""\
# CFLAGS et al overrides must be target-local.
# See "Target-specific Variable Values" in the GNU Make manual.""")
self.WriteLn("$(OBJS): TOOLSET := $(TOOLSET)")
self.WriteLn("$(OBJS): GYP_CFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('c') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_C_$(BUILDTYPE))")
self.WriteLn("$(OBJS): GYP_CXXFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('cc') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_CC_$(BUILDTYPE))")
if self.flavor == 'mac':
self.WriteLn("$(OBJS): GYP_OBJCFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('m') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_C_$(BUILDTYPE)) "
"$(CFLAGS_OBJC_$(BUILDTYPE))")
self.WriteLn("$(OBJS): GYP_OBJCXXFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('mm') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_CC_$(BUILDTYPE)) "
"$(CFLAGS_OBJCC_$(BUILDTYPE))")
self.WritePchTargets(precompiled_header.GetPchBuildCommands())
# If there are any object files in our input file list, link them into our
# output.
extra_link_deps += filter(Linkable, sources)
self.WriteLn()
def WritePchTargets(self, pch_commands):
"""Writes make rules to compile prefix headers."""
if not pch_commands:
return
for gch, lang_flag, lang, input in pch_commands:
extra_flags = {
'c': '$(CFLAGS_C_$(BUILDTYPE))',
'cc': '$(CFLAGS_CC_$(BUILDTYPE))',
'm': '$(CFLAGS_C_$(BUILDTYPE)) $(CFLAGS_OBJC_$(BUILDTYPE))',
'mm': '$(CFLAGS_CC_$(BUILDTYPE)) $(CFLAGS_OBJCC_$(BUILDTYPE))',
}[lang]
var_name = {
'c': 'GYP_PCH_CFLAGS',
'cc': 'GYP_PCH_CXXFLAGS',
'm': 'GYP_PCH_OBJCFLAGS',
'mm': 'GYP_PCH_OBJCXXFLAGS',
}[lang]
self.WriteLn("%s: %s := %s " % (gch, var_name, lang_flag) +
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"$(CFLAGS_$(BUILDTYPE)) " +
extra_flags)
self.WriteLn('%s: %s FORCE_DO_CMD' % (gch, input))
self.WriteLn('\t@$(call do_cmd,pch_%s,1)' % lang)
self.WriteLn('')
assert ' ' not in gch, (
"Spaces in gch filenames not supported (%s)" % gch)
self.WriteLn('all_deps += %s' % gch)
self.WriteLn('')
def ComputeOutputBasename(self, spec):
"""Return the 'output basename' of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'libfoobar.so'
"""
assert not self.is_mac_bundle
if self.flavor == 'mac' and self.type in (
'static_library', 'executable', 'shared_library', 'loadable_module'):
return self.xcode_settings.GetExecutablePath()
target = spec['target_name']
target_prefix = ''
target_ext = ''
if self.type == 'static_library':
if target[:3] == 'lib':
target = target[3:]
target_prefix = 'lib'
target_ext = '.a'
elif self.type in ('loadable_module', 'shared_library'):
if target[:3] == 'lib':
target = target[3:]
target_prefix = 'lib'
target_ext = '.so'
elif self.type == 'none':
target = '%s.stamp' % target
elif self.type != 'executable':
print ("ERROR: What output file should be generated?",
"type", self.type, "target", target)
target_prefix = spec.get('product_prefix', target_prefix)
target = spec.get('product_name', target)
product_ext = spec.get('product_extension')
if product_ext:
target_ext = '.' + product_ext
return target_prefix + target + target_ext
def _InstallImmediately(self):
return self.toolset == 'target' and self.flavor == 'mac' and self.type in (
'static_library', 'executable', 'shared_library', 'loadable_module')
def ComputeOutput(self, spec):
"""Return the 'output' (full output path) of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'$(obj)/baz/libfoobar.so'
"""
assert not self.is_mac_bundle
path = os.path.join('$(obj).' + self.toolset, self.path)
if self.type == 'executable' or self._InstallImmediately():
path = '$(builddir)'
path = spec.get('product_dir', path)
return os.path.join(path, self.ComputeOutputBasename(spec))
def ComputeMacBundleOutput(self, spec):
"""Return the 'output' (full output path) to a bundle output directory."""
assert self.is_mac_bundle
path = generator_default_variables['PRODUCT_DIR']
return os.path.join(path, self.xcode_settings.GetWrapperName())
def ComputeMacBundleBinaryOutput(self, spec):
"""Return the 'output' (full output path) to the binary in a bundle."""
path = generator_default_variables['PRODUCT_DIR']
return os.path.join(path, self.xcode_settings.GetExecutablePath())
def ComputeDeps(self, spec):
"""Compute the dependencies of a gyp spec.
Returns a tuple (deps, link_deps), where each is a list of
filenames that will need to be put in front of make for either
building (deps) or linking (link_deps).
"""
deps = []
link_deps = []
if 'dependencies' in spec:
deps.extend([target_outputs[dep] for dep in spec['dependencies']
if target_outputs[dep]])
for dep in spec['dependencies']:
if dep in target_link_deps:
link_deps.append(target_link_deps[dep])
deps.extend(link_deps)
# TODO: It seems we need to transitively link in libraries (e.g. -lfoo)?
# This hack makes it work:
# link_deps.extend(spec.get('libraries', []))
return (gyp.common.uniquer(deps), gyp.common.uniquer(link_deps))
def WriteDependencyOnExtraOutputs(self, target, extra_outputs):
self.WriteMakeRule([self.output_binary], extra_outputs,
comment = 'Build our special outputs first.',
order_only = True)
def WriteTarget(self, spec, configs, deps, link_deps, bundle_deps,
extra_outputs, part_of_all):
"""Write Makefile code to produce the final target of the gyp spec.
spec, configs: input from gyp.
deps, link_deps: dependency lists; see ComputeDeps()
extra_outputs: any extra outputs that our target should depend on
part_of_all: flag indicating this target is part of 'all'
"""
self.WriteLn('### Rules for final target.')
if extra_outputs:
self.WriteDependencyOnExtraOutputs(self.output_binary, extra_outputs)
self.WriteMakeRule(extra_outputs, deps,
comment=('Preserve order dependency of '
'special output on deps.'),
order_only = True)
target_postbuilds = {}
if self.type != 'none':
for configname in sorted(configs.keys()):
config = configs[configname]
if self.flavor == 'mac':
ldflags = self.xcode_settings.GetLdflags(configname,
generator_default_variables['PRODUCT_DIR'],
lambda p: Sourceify(self.Absolutify(p)))
# TARGET_POSTBUILDS_$(BUILDTYPE) is added to postbuilds later on.
gyp_to_build = gyp.common.InvertRelativePath(self.path)
target_postbuild = self.xcode_settings.GetTargetPostbuilds(
configname,
QuoteSpaces(os.path.normpath(os.path.join(gyp_to_build,
self.output))),
QuoteSpaces(os.path.normpath(os.path.join(gyp_to_build,
self.output_binary))))
if target_postbuild:
target_postbuilds[configname] = target_postbuild
else:
ldflags = config.get('ldflags', [])
# Compute an rpath for this output if needed.
if any(dep.endswith('.so') or '.so.' in dep for dep in deps):
# We want to get the literal string "$ORIGIN" into the link command,
# so we need lots of escaping.
ldflags.append(r'-Wl,-rpath=\$$ORIGIN/lib.%s/' % self.toolset)
ldflags.append(r'-Wl,-rpath-link=\$(builddir)/lib.%s/' %
self.toolset)
self.WriteList(ldflags, 'LDFLAGS_%s' % configname)
if self.flavor == 'mac':
self.WriteList(self.xcode_settings.GetLibtoolflags(configname),
'LIBTOOLFLAGS_%s' % configname)
libraries = spec.get('libraries')
if libraries:
# Remove duplicate entries
libraries = gyp.common.uniquer(libraries)
if self.flavor == 'mac':
libraries = self.xcode_settings.AdjustLibraries(libraries)
self.WriteList(libraries, 'LIBS')
self.WriteLn('%s: GYP_LDFLAGS := $(LDFLAGS_$(BUILDTYPE))' %
QuoteSpaces(self.output_binary))
self.WriteLn('%s: LIBS := $(LIBS)' % QuoteSpaces(self.output_binary))
if self.flavor == 'mac':
self.WriteLn('%s: GYP_LIBTOOLFLAGS := $(LIBTOOLFLAGS_$(BUILDTYPE))' %
QuoteSpaces(self.output_binary))
# Postbuild actions. Like actions, but implicitly depend on the target's
# output.
postbuilds = []
if self.flavor == 'mac':
if target_postbuilds:
postbuilds.append('$(TARGET_POSTBUILDS_$(BUILDTYPE))')
postbuilds.extend(
gyp.xcode_emulation.GetSpecPostbuildCommands(spec))
if postbuilds:
# Envvars may be referenced by TARGET_POSTBUILDS_$(BUILDTYPE),
# so we must output its definition first, since we declare variables
# using ":=".
self.WriteSortedXcodeEnv(self.output, self.GetSortedXcodePostbuildEnv())
for configname in target_postbuilds:
self.WriteLn('%s: TARGET_POSTBUILDS_%s := %s' %
(QuoteSpaces(self.output),
configname,
gyp.common.EncodePOSIXShellList(target_postbuilds[configname])))
# Postbuilds expect to be run in the gyp file's directory, so insert an
# implicit postbuild to cd to there.
postbuilds.insert(0, gyp.common.EncodePOSIXShellList(['cd', self.path]))
for i in xrange(len(postbuilds)):
if not postbuilds[i].startswith('$'):
postbuilds[i] = EscapeShellArgument(postbuilds[i])
self.WriteLn('%s: builddir := $(abs_builddir)' % QuoteSpaces(self.output))
self.WriteLn('%s: POSTBUILDS := %s' % (
QuoteSpaces(self.output), ' '.join(postbuilds)))
# A bundle directory depends on its dependencies such as bundle resources
# and bundle binary. When all dependencies have been built, the bundle
# needs to be packaged.
if self.is_mac_bundle:
# If the framework doesn't contain a binary, then nothing depends
# on the actions -- make the framework depend on them directly too.
self.WriteDependencyOnExtraOutputs(self.output, extra_outputs)
# Bundle dependencies. Note that the code below adds actions to this
# target, so if you move these two lines, move the lines below as well.
self.WriteList(map(QuoteSpaces, bundle_deps), 'BUNDLE_DEPS')
self.WriteLn('%s: $(BUNDLE_DEPS)' % QuoteSpaces(self.output))
# After the framework is built, package it. Needs to happen before
# postbuilds, since postbuilds depend on this.
if self.type in ('shared_library', 'loadable_module'):
self.WriteLn('\t@$(call do_cmd,mac_package_framework,,,%s)' %
self.xcode_settings.GetFrameworkVersion())
# Bundle postbuilds can depend on the whole bundle, so run them after
# the bundle is packaged, not already after the bundle binary is done.
if postbuilds:
self.WriteLn('\t@$(call do_postbuilds)')
postbuilds = [] # Don't write postbuilds for target's output.
# Needed by test/mac/gyptest-rebuild.py.
self.WriteLn('\t@true # No-op, used by tests')
# Since this target depends on binary and resources which are in
# nested subfolders, the framework directory will be older than
# its dependencies usually. To prevent this rule from executing
# on every build (expensive, especially with postbuilds), expliclity
# update the time on the framework directory.
self.WriteLn('\t@touch -c %s' % QuoteSpaces(self.output))
if postbuilds:
assert not self.is_mac_bundle, ('Postbuilds for bundles should be done '
'on the bundle, not the binary (target \'%s\')' % self.target)
assert 'product_dir' not in spec, ('Postbuilds do not work with '
'custom product_dir')
if self.type == 'executable':
self.WriteLn('%s: LD_INPUTS := %s' % (
QuoteSpaces(self.output_binary),
' '.join(map(QuoteSpaces, link_deps))))
if self.toolset == 'host' and self.flavor == 'android':
self.WriteDoCmd([self.output_binary], link_deps, 'link_host',
part_of_all, postbuilds=postbuilds)
else:
self.WriteDoCmd([self.output_binary], link_deps, 'link', part_of_all,
postbuilds=postbuilds)
elif self.type == 'static_library':
for link_dep in link_deps:
assert ' ' not in link_dep, (
"Spaces in alink input filenames not supported (%s)" % link_dep)
if (self.flavor not in ('mac', 'openbsd', 'win') and not
self.is_standalone_static_library):
self.WriteDoCmd([self.output_binary], link_deps, 'alink_thin',
part_of_all, postbuilds=postbuilds)
else:
self.WriteDoCmd([self.output_binary], link_deps, 'alink', part_of_all,
postbuilds=postbuilds)
elif self.type == 'shared_library':
self.WriteLn('%s: LD_INPUTS := %s' % (
QuoteSpaces(self.output_binary),
' '.join(map(QuoteSpaces, link_deps))))
self.WriteDoCmd([self.output_binary], link_deps, 'solink', part_of_all,
postbuilds=postbuilds)
elif self.type == 'loadable_module':
for link_dep in link_deps:
assert ' ' not in link_dep, (
"Spaces in module input filenames not supported (%s)" % link_dep)
if self.toolset == 'host' and self.flavor == 'android':
self.WriteDoCmd([self.output_binary], link_deps, 'solink_module_host',
part_of_all, postbuilds=postbuilds)
else:
self.WriteDoCmd(
[self.output_binary], link_deps, 'solink_module', part_of_all,
postbuilds=postbuilds)
elif self.type == 'none':
# Write a stamp line.
self.WriteDoCmd([self.output_binary], deps, 'touch', part_of_all,
postbuilds=postbuilds)
else:
print "WARNING: no output for", self.type, target
# Add an alias for each target (if there are any outputs).
# Installable target aliases are created below.
if ((self.output and self.output != self.target) and
(self.type not in self._INSTALLABLE_TARGETS)):
self.WriteMakeRule([self.target], [self.output],
comment='Add target alias', phony = True)
if part_of_all:
self.WriteMakeRule(['all'], [self.target],
comment = 'Add target alias to "all" target.',
phony = True)
# Add special-case rules for our installable targets.
# 1) They need to install to the build dir or "product" dir.
# 2) They get shortcuts for building (e.g. "make chrome").
# 3) They are part of "make all".
if (self.type in self._INSTALLABLE_TARGETS or
self.is_standalone_static_library):
if self.type == 'shared_library':
file_desc = 'shared library'
elif self.type == 'static_library':
file_desc = 'static library'
else:
file_desc = 'executable'
install_path = self._InstallableTargetInstallPath()
installable_deps = [self.output]
if (self.flavor == 'mac' and not 'product_dir' in spec and
self.toolset == 'target'):
# On mac, products are created in install_path immediately.
assert install_path == self.output, '%s != %s' % (
install_path, self.output)
# Point the target alias to the final binary output.
self.WriteMakeRule([self.target], [install_path],
comment='Add target alias', phony = True)
if install_path != self.output:
assert not self.is_mac_bundle # See comment a few lines above.
self.WriteDoCmd([install_path], [self.output], 'copy',
comment = 'Copy this to the %s output path.' %
file_desc, part_of_all=part_of_all)
installable_deps.append(install_path)
if self.output != self.alias and self.alias != self.target:
self.WriteMakeRule([self.alias], installable_deps,
comment = 'Short alias for building this %s.' %
file_desc, phony = True)
if part_of_all:
self.WriteMakeRule(['all'], [install_path],
comment = 'Add %s to "all" target.' % file_desc,
phony = True)
def WriteList(self, value_list, variable=None, prefix='',
quoter=QuoteIfNecessary):
"""Write a variable definition that is a list of values.
E.g. WriteList(['a','b'], 'foo', prefix='blah') writes out
foo = blaha blahb
but in a pretty-printed style.
"""
values = ''
if value_list:
value_list = [quoter(prefix + l) for l in value_list]
values = ' \\\n\t' + ' \\\n\t'.join(value_list)
self.fp.write('%s :=%s\n\n' % (variable, values))
def WriteDoCmd(self, outputs, inputs, command, part_of_all, comment=None,
postbuilds=False):
"""Write a Makefile rule that uses do_cmd.
This makes the outputs dependent on the command line that was run,
as well as support the V= make command line flag.
"""
suffix = ''
if postbuilds:
assert ',' not in command
suffix = ',,1' # Tell do_cmd to honor $POSTBUILDS
self.WriteMakeRule(outputs, inputs,
actions = ['$(call do_cmd,%s%s)' % (command, suffix)],
comment = comment,
force = True)
# Add our outputs to the list of targets we read depfiles from.
# all_deps is only used for deps file reading, and for deps files we replace
# spaces with ? because escaping doesn't work with make's $(sort) and
# other functions.
outputs = [QuoteSpaces(o, SPACE_REPLACEMENT) for o in outputs]
self.WriteLn('all_deps += %s' % ' '.join(outputs))
def WriteMakeRule(self, outputs, inputs, actions=None, comment=None,
order_only=False, force=False, phony=False):
"""Write a Makefile rule, with some extra tricks.
outputs: a list of outputs for the rule (note: this is not directly
supported by make; see comments below)
inputs: a list of inputs for the rule
actions: a list of shell commands to run for the rule
comment: a comment to put in the Makefile above the rule (also useful
for making this Python script's code self-documenting)
order_only: if true, makes the dependency order-only
force: if true, include FORCE_DO_CMD as an order-only dep
phony: if true, the rule does not actually generate the named output, the
output is just a name to run the rule
"""
outputs = map(QuoteSpaces, outputs)
inputs = map(QuoteSpaces, inputs)
if comment:
self.WriteLn('# ' + comment)
if phony:
self.WriteLn('.PHONY: ' + ' '.join(outputs))
# TODO(evanm): just make order_only a list of deps instead of these hacks.
if order_only:
order_insert = '| '
pick_output = ' '.join(outputs)
else:
order_insert = ''
pick_output = outputs[0]
if force:
force_append = ' FORCE_DO_CMD'
else:
force_append = ''
if actions:
self.WriteLn("%s: TOOLSET := $(TOOLSET)" % outputs[0])
self.WriteLn('%s: %s%s%s' % (pick_output, order_insert, ' '.join(inputs),
force_append))
if actions:
for action in actions:
self.WriteLn('\t%s' % action)
if not order_only and len(outputs) > 1:
# If we have more than one output, a rule like
# foo bar: baz
# that for *each* output we must run the action, potentially
# in parallel. That is not what we're trying to write -- what
# we want is that we run the action once and it generates all
# the files.
# http://www.gnu.org/software/hello/manual/automake/Multiple-Outputs.html
# discusses this problem and has this solution:
# 1) Write the naive rule that would produce parallel runs of
# the action.
# 2) Make the outputs seralized on each other, so we won't start
# a parallel run until the first run finishes, at which point
# we'll have generated all the outputs and we're done.
self.WriteLn('%s: %s' % (' '.join(outputs[1:]), outputs[0]))
# Add a dummy command to the "extra outputs" rule, otherwise make seems to
# think these outputs haven't (couldn't have?) changed, and thus doesn't
# flag them as changed (i.e. include in '$?') when evaluating dependent
# rules, which in turn causes do_cmd() to skip running dependent commands.
self.WriteLn('%s: ;' % (' '.join(outputs[1:])))
self.WriteLn()
def WriteAndroidNdkModuleRule(self, module_name, all_sources, link_deps):
"""Write a set of LOCAL_XXX definitions for Android NDK.
These variable definitions will be used by Android NDK but do nothing for
non-Android applications.
Arguments:
module_name: Android NDK module name, which must be unique among all
module names.
all_sources: A list of source files (will be filtered by Compilable).
link_deps: A list of link dependencies, which must be sorted in
the order from dependencies to dependents.
"""
if self.type not in ('executable', 'shared_library', 'static_library'):
return
self.WriteLn('# Variable definitions for Android applications')
self.WriteLn('include $(CLEAR_VARS)')
self.WriteLn('LOCAL_MODULE := ' + module_name)
self.WriteLn('LOCAL_CFLAGS := $(CFLAGS_$(BUILDTYPE)) '
'$(DEFS_$(BUILDTYPE)) '
# LOCAL_CFLAGS is applied to both of C and C++. There is
# no way to specify $(CFLAGS_C_$(BUILDTYPE)) only for C
# sources.
'$(CFLAGS_C_$(BUILDTYPE)) '
# $(INCS_$(BUILDTYPE)) includes the prefix '-I' while
# LOCAL_C_INCLUDES does not expect it. So put it in
# LOCAL_CFLAGS.
'$(INCS_$(BUILDTYPE))')
# LOCAL_CXXFLAGS is obsolete and LOCAL_CPPFLAGS is preferred.
self.WriteLn('LOCAL_CPPFLAGS := $(CFLAGS_CC_$(BUILDTYPE))')
self.WriteLn('LOCAL_C_INCLUDES :=')
self.WriteLn('LOCAL_LDLIBS := $(LDFLAGS_$(BUILDTYPE)) $(LIBS)')
# Detect the C++ extension.
cpp_ext = {'.cc': 0, '.cpp': 0, '.cxx': 0}
default_cpp_ext = '.cpp'
for filename in all_sources:
ext = os.path.splitext(filename)[1]
if ext in cpp_ext:
cpp_ext[ext] += 1
if cpp_ext[ext] > cpp_ext[default_cpp_ext]:
default_cpp_ext = ext
self.WriteLn('LOCAL_CPP_EXTENSION := ' + default_cpp_ext)
self.WriteList(map(self.Absolutify, filter(Compilable, all_sources)),
'LOCAL_SRC_FILES')
# Filter out those which do not match prefix and suffix and produce
# the resulting list without prefix and suffix.
def DepsToModules(deps, prefix, suffix):
modules = []
for filepath in deps:
filename = os.path.basename(filepath)
if filename.startswith(prefix) and filename.endswith(suffix):
modules.append(filename[len(prefix):-len(suffix)])
return modules
# Retrieve the default value of 'SHARED_LIB_SUFFIX'
params = {'flavor': 'linux'}
default_variables = {}
CalculateVariables(default_variables, params)
self.WriteList(
DepsToModules(link_deps,
generator_default_variables['SHARED_LIB_PREFIX'],
default_variables['SHARED_LIB_SUFFIX']),
'LOCAL_SHARED_LIBRARIES')
self.WriteList(
DepsToModules(link_deps,
generator_default_variables['STATIC_LIB_PREFIX'],
generator_default_variables['STATIC_LIB_SUFFIX']),
'LOCAL_STATIC_LIBRARIES')
if self.type == 'executable':
self.WriteLn('include $(BUILD_EXECUTABLE)')
elif self.type == 'shared_library':
self.WriteLn('include $(BUILD_SHARED_LIBRARY)')
elif self.type == 'static_library':
self.WriteLn('include $(BUILD_STATIC_LIBRARY)')
self.WriteLn()
def WriteLn(self, text=''):
self.fp.write(text + '\n')
def GetSortedXcodeEnv(self, additional_settings=None):
return gyp.xcode_emulation.GetSortedXcodeEnv(
self.xcode_settings, "$(abs_builddir)",
os.path.join("$(abs_srcdir)", self.path), "$(BUILDTYPE)",
additional_settings)
def GetSortedXcodePostbuildEnv(self):
# CHROMIUM_STRIP_SAVE_FILE is a chromium-specific hack.
# TODO(thakis): It would be nice to have some general mechanism instead.
strip_save_file = self.xcode_settings.GetPerTargetSetting(
'CHROMIUM_STRIP_SAVE_FILE', '')
# Even if strip_save_file is empty, explicitly write it. Else a postbuild
# might pick up an export from an earlier target.
return self.GetSortedXcodeEnv(
additional_settings={'CHROMIUM_STRIP_SAVE_FILE': strip_save_file})
def WriteSortedXcodeEnv(self, target, env):
for k, v in env:
# For
# foo := a\ b
# the escaped space does the right thing. For
# export foo := a\ b
# it does not -- the backslash is written to the env as literal character.
# So don't escape spaces in |env[k]|.
self.WriteLn('%s: export %s := %s' % (QuoteSpaces(target), k, v))
def Objectify(self, path):
"""Convert a path to its output directory form."""
if '$(' in path:
path = path.replace('$(obj)/', '$(obj).%s/$(TARGET)/' % self.toolset)
if not '$(obj)' in path:
path = '$(obj).%s/$(TARGET)/%s' % (self.toolset, path)
return path
def Pchify(self, path, lang):
"""Convert a prefix header path to its output directory form."""
path = self.Absolutify(path)
if '$(' in path:
path = path.replace('$(obj)/', '$(obj).%s/$(TARGET)/pch-%s' %
(self.toolset, lang))
return path
return '$(obj).%s/$(TARGET)/pch-%s/%s' % (self.toolset, lang, path)
def Absolutify(self, path):
"""Convert a subdirectory-relative path into a base-relative path.
Skips over paths that contain variables."""
if '$(' in path:
# Don't call normpath in this case, as it might collapse the
# path too aggressively if it features '..'. However it's still
# important to strip trailing slashes.
return path.rstrip('/')
return os.path.normpath(os.path.join(self.path, path))
def ExpandInputRoot(self, template, expansion, dirname):
if '%(INPUT_ROOT)s' not in template and '%(INPUT_DIRNAME)s' not in template:
return template
path = template % {
'INPUT_ROOT': expansion,
'INPUT_DIRNAME': dirname,
}
return path
def _InstallableTargetInstallPath(self):
"""Returns the location of the final output for an installable target."""
# Xcode puts shared_library results into PRODUCT_DIR, and some gyp files
# rely on this. Emulate this behavior for mac.
if (self.type == 'shared_library' and
(self.flavor != 'mac' or self.toolset != 'target')):
# Install all shared libs into a common directory (per toolset) for
# convenient access with LD_LIBRARY_PATH.
return '$(builddir)/lib.%s/%s' % (self.toolset, self.alias)
return '$(builddir)/' + self.alias
def WriteAutoRegenerationRule(params, root_makefile, makefile_name,
build_files):
"""Write the target to regenerate the Makefile."""
options = params['options']
build_files_args = [gyp.common.RelativePath(filename, options.toplevel_dir)
for filename in params['build_files_arg']]
gyp_binary = gyp.common.FixIfRelativePath(params['gyp_binary'],
options.toplevel_dir)
if not gyp_binary.startswith(os.sep):
gyp_binary = os.path.join('.', gyp_binary)
root_makefile.write(
"quiet_cmd_regen_makefile = ACTION Regenerating $@\n"
"cmd_regen_makefile = %(cmd)s\n"
"%(makefile_name)s: %(deps)s\n"
"\t$(call do_cmd,regen_makefile)\n\n" % {
'makefile_name': makefile_name,
'deps': ' '.join(map(Sourceify, build_files)),
'cmd': gyp.common.EncodePOSIXShellList(
[gyp_binary, '-fmake'] +
gyp.RegenerateFlags(options) +
build_files_args)})
def PerformBuild(data, configurations, params):
options = params['options']
for config in configurations:
arguments = ['make']
if options.toplevel_dir and options.toplevel_dir != '.':
arguments += '-C', options.toplevel_dir
arguments.append('BUILDTYPE=' + config)
print 'Building [%s]: %s' % (config, arguments)
subprocess.check_call(arguments)
def GenerateOutput(target_list, target_dicts, data, params):
options = params['options']
flavor = gyp.common.GetFlavor(params)
generator_flags = params.get('generator_flags', {})
builddir_name = generator_flags.get('output_dir', 'out')
android_ndk_version = generator_flags.get('android_ndk_version', None)
default_target = generator_flags.get('default_target', 'all')
def CalculateMakefilePath(build_file, base_name):
"""Determine where to write a Makefile for a given gyp file."""
# Paths in gyp files are relative to the .gyp file, but we want
# paths relative to the source root for the master makefile. Grab
# the path of the .gyp file as the base to relativize against.
# E.g. "foo/bar" when we're constructing targets for "foo/bar/baz.gyp".
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.depth)
# We write the file in the base_path directory.
output_file = os.path.join(options.depth, base_path, base_name)
if options.generator_output:
output_file = os.path.join(options.generator_output, output_file)
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.toplevel_dir)
return base_path, output_file
# TODO: search for the first non-'Default' target. This can go
# away when we add verification that all targets have the
# necessary configurations.
default_configuration = None
toolsets = set([target_dicts[target]['toolset'] for target in target_list])
for target in target_list:
spec = target_dicts[target]
if spec['default_configuration'] != 'Default':
default_configuration = spec['default_configuration']
break
if not default_configuration:
default_configuration = 'Default'
srcdir = '.'
makefile_name = 'Makefile' + options.suffix
makefile_path = os.path.join(options.toplevel_dir, makefile_name)
if options.generator_output:
global srcdir_prefix
makefile_path = os.path.join(options.generator_output, makefile_path)
srcdir = gyp.common.RelativePath(srcdir, options.generator_output)
srcdir_prefix = '$(srcdir)/'
flock_command= 'flock'
header_params = {
'default_target': default_target,
'builddir': builddir_name,
'default_configuration': default_configuration,
'flock': flock_command,
'flock_index': 1,
'link_commands': LINK_COMMANDS_LINUX,
'extra_commands': '',
'srcdir': srcdir,
}
if flavor == 'mac':
flock_command = './gyp-mac-tool flock'
header_params.update({
'flock': flock_command,
'flock_index': 2,
'link_commands': LINK_COMMANDS_MAC,
'extra_commands': SHARED_HEADER_MAC_COMMANDS,
})
elif flavor == 'android':
header_params.update({
'link_commands': LINK_COMMANDS_ANDROID,
})
elif flavor == 'solaris':
header_params.update({
'flock': './gyp-sun-tool flock',
'flock_index': 2,
'extra_commands': SHARED_HEADER_SUN_COMMANDS,
})
elif flavor == 'freebsd':
# Note: OpenBSD has sysutils/flock. lockf seems to be FreeBSD specific.
header_params.update({
'flock': 'lockf',
})
header_params.update({
'CC.target': GetEnvironFallback(('CC_target', 'CC'), '$(CC)'),
'AR.target': GetEnvironFallback(('AR_target', 'AR'), '$(AR)'),
'CXX.target': GetEnvironFallback(('CXX_target', 'CXX'), '$(CXX)'),
'LINK.target': GetEnvironFallback(('LD_target', 'LD'), '$(LINK)'),
'CC.host': GetEnvironFallback(('CC_host',), 'gcc'),
'AR.host': GetEnvironFallback(('AR_host',), 'ar'),
'CXX.host': GetEnvironFallback(('CXX_host',), 'g++'),
'LINK.host': GetEnvironFallback(('LD_host',), 'g++'),
})
build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0])
make_global_settings_array = data[build_file].get('make_global_settings', [])
wrappers = {}
wrappers['LINK'] = '%s $(builddir)/linker.lock' % flock_command
for key, value in make_global_settings_array:
if key.endswith('_wrapper'):
wrappers[key[:-len('_wrapper')]] = '$(abspath %s)' % value
make_global_settings = ''
for key, value in make_global_settings_array:
if re.match('.*_wrapper', key):
continue
if value[0] != '$':
value = '$(abspath %s)' % value
wrapper = wrappers.get(key)
if wrapper:
value = '%s %s' % (wrapper, value)
del wrappers[key]
if key in ('CC', 'CC.host', 'CXX', 'CXX.host'):
make_global_settings += (
'ifneq (,$(filter $(origin %s), undefined default))\n' % key)
# Let gyp-time envvars win over global settings.
if key in os.environ:
value = os.environ[key]
make_global_settings += ' %s = %s\n' % (key, value)
make_global_settings += 'endif\n'
else:
make_global_settings += '%s ?= %s\n' % (key, value)
# TODO(ukai): define cmd when only wrapper is specified in
# make_global_settings.
header_params['make_global_settings'] = make_global_settings
ensure_directory_exists(makefile_path)
root_makefile = open(makefile_path, 'w')
root_makefile.write(SHARED_HEADER % header_params)
# Currently any versions have the same effect, but in future the behavior
# could be different.
if android_ndk_version:
root_makefile.write(
'# Define LOCAL_PATH for build of Android applications.\n'
'LOCAL_PATH := $(call my-dir)\n'
'\n')
for toolset in toolsets:
root_makefile.write('TOOLSET := %s\n' % toolset)
WriteRootHeaderSuffixRules(root_makefile)
# Put build-time support tools next to the root Makefile.
dest_path = os.path.dirname(makefile_path)
gyp.common.CopyTool(flavor, dest_path)
# Find the list of targets that derive from the gyp file(s) being built.
needed_targets = set()
for build_file in params['build_files']:
for target in gyp.common.AllTargets(target_list, target_dicts, build_file):
needed_targets.add(target)
build_files = set()
include_list = set()
for qualified_target in target_list:
build_file, target, toolset = gyp.common.ParseQualifiedTarget(
qualified_target)
this_make_global_settings = data[build_file].get('make_global_settings', [])
assert make_global_settings_array == this_make_global_settings, (
"make_global_settings needs to be the same for all targets.")
build_files.add(gyp.common.RelativePath(build_file, options.toplevel_dir))
included_files = data[build_file]['included_files']
for included_file in included_files:
# The included_files entries are relative to the dir of the build file
# that included them, so we have to undo that and then make them relative
# to the root dir.
relative_include_file = gyp.common.RelativePath(
gyp.common.UnrelativePath(included_file, build_file),
options.toplevel_dir)
abs_include_file = os.path.abspath(relative_include_file)
# If the include file is from the ~/.gyp dir, we should use absolute path
# so that relocating the src dir doesn't break the path.
if (params['home_dot_gyp'] and
abs_include_file.startswith(params['home_dot_gyp'])):
build_files.add(abs_include_file)
else:
build_files.add(relative_include_file)
base_path, output_file = CalculateMakefilePath(build_file,
target + '.' + toolset + options.suffix + '.mk')
spec = target_dicts[qualified_target]
configs = spec['configurations']
if flavor == 'mac':
gyp.xcode_emulation.MergeGlobalXcodeSettingsToSpec(data[build_file], spec)
writer = MakefileWriter(generator_flags, flavor)
writer.Write(qualified_target, base_path, output_file, spec, configs,
part_of_all=qualified_target in needed_targets)
# Our root_makefile lives at the source root. Compute the relative path
# from there to the output_file for including.
mkfile_rel_path = gyp.common.RelativePath(output_file,
os.path.dirname(makefile_path))
include_list.add(mkfile_rel_path)
# Write out per-gyp (sub-project) Makefiles.
depth_rel_path = gyp.common.RelativePath(options.depth, os.getcwd())
for build_file in build_files:
# The paths in build_files were relativized above, so undo that before
# testing against the non-relativized items in target_list and before
# calculating the Makefile path.
build_file = os.path.join(depth_rel_path, build_file)
gyp_targets = [target_dicts[target]['target_name'] for target in target_list
if target.startswith(build_file) and
target in needed_targets]
# Only generate Makefiles for gyp files with targets.
if not gyp_targets:
continue
base_path, output_file = CalculateMakefilePath(build_file,
os.path.splitext(os.path.basename(build_file))[0] + '.Makefile')
makefile_rel_path = gyp.common.RelativePath(os.path.dirname(makefile_path),
os.path.dirname(output_file))
writer.WriteSubMake(output_file, makefile_rel_path, gyp_targets,
builddir_name)
# Write out the sorted list of includes.
root_makefile.write('\n')
for include_file in sorted(include_list):
# We wrap each .mk include in an if statement so users can tell make to
# not load a file by setting NO_LOAD. The below make code says, only
# load the .mk file if the .mk filename doesn't start with a token in
# NO_LOAD.
root_makefile.write(
"ifeq ($(strip $(foreach prefix,$(NO_LOAD),\\\n"
" $(findstring $(join ^,$(prefix)),\\\n"
" $(join ^," + include_file + ")))),)\n")
root_makefile.write(" include " + include_file + "\n")
root_makefile.write("endif\n")
root_makefile.write('\n')
if (not generator_flags.get('standalone')
and generator_flags.get('auto_regeneration', True)):
WriteAutoRegenerationRule(params, root_makefile, makefile_name, build_files)
root_makefile.write(SHARED_FOOTER)
root_makefile.close()
| gpl-2.0 | -4,858,679,361,650,055,000 | 40.060028 | 180 | 0.631191 | false |
sobercoder/gem5 | src/arch/x86/isa/insts/x87/load_constants/load_0_1_or_pi.py | 70 | 2454 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# Copyright (c) 2013 Mark D. Hill and David A. Wood
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
# Nilay Vaish
microcode = '''
def macroop FLDZ {
lfpimm ufp1, 0.0
movfp st(-1), ufp1, spm=-1
};
def macroop FLD1 {
lfpimm ufp1, 1.0
movfp st(-1), ufp1, spm=-1
};
def macroop FLDPI {
lfpimm ufp1, 3.14159265359
movfp st(-1), ufp1, spm=-1
};
'''
| bsd-3-clause | -4,661,555,339,145,643,000 | 42.052632 | 72 | 0.768134 | false |
nekrut/tools-iuc | tools/vsnp/vsnp_determine_ref_from_data.py | 12 | 9491 | #!/usr/bin/env python
import argparse
import gzip
import os
from collections import OrderedDict
import yaml
from Bio.SeqIO.QualityIO import FastqGeneralIterator
OUTPUT_DBKEY_DIR = 'output_dbkey'
OUTPUT_METRICS_DIR = 'output_metrics'
def get_sample_name(file_path):
base_file_name = os.path.basename(file_path)
if base_file_name.find(".") > 0:
# Eliminate the extension.
return os.path.splitext(base_file_name)[0]
return base_file_name
def get_dbkey(dnaprints_dict, key, s):
# dnaprints_dict looks something like this:
# {'brucella': {'NC_002945v4': ['11001110', '11011110', '11001100']}
# {'bovis': {'NC_006895': ['11111110', '00010010', '01111011']}}
d = dnaprints_dict.get(key, {})
for data_table_value, v_list in d.items():
if s in v_list:
return data_table_value
return ""
def get_dnaprints_dict(dnaprint_fields):
# A dndprint_fields entry looks something liek this.
# [['AF2122', '/galaxy/tool-data/vsnp/AF2122/dnaprints/NC_002945v4.yml']]
dnaprints_dict = {}
for item in dnaprint_fields:
# Here item is a 2-element list of data
# table components, # value and path.
value = item[0]
path = item[1].strip()
with open(path, "rt") as fh:
# The format of all dnaprints yaml
# files is something like this:
# brucella:
# - 0111111111111111
print_dict = yaml.load(fh, Loader=yaml.Loader)
for print_dict_k, print_dict_v in print_dict.items():
dnaprints_v_dict = dnaprints_dict.get(print_dict_k, {})
if len(dnaprints_v_dict) > 0:
# dnaprints_dict already contains k (e.g., 'brucella',
# and dnaprints_v_dict will be a dictionary # that
# looks something like this:
# {'NC_002945v4': ['11001110', '11011110', '11001100']}
value_list = dnaprints_v_dict.get(value, [])
value_list = value_list + print_dict_v
dnaprints_v_dict[value] = value_list
else:
# dnaprints_v_dict is an empty dictionary.
dnaprints_v_dict[value] = print_dict_v
dnaprints_dict[print_dict_k] = dnaprints_v_dict
# dnaprints_dict looks something like this:
# {'brucella': {'NC_002945v4': ['11001110', '11011110', '11001100']}
# {'bovis': {'NC_006895': ['11111110', '00010010', '01111011']}}
return dnaprints_dict
def get_group_and_dbkey(dnaprints_dict, brucella_string, brucella_sum, bovis_string, bovis_sum, para_string, para_sum):
if brucella_sum > 3:
group = "Brucella"
dbkey = get_dbkey(dnaprints_dict, "brucella", brucella_string)
elif bovis_sum > 3:
group = "TB"
dbkey = get_dbkey(dnaprints_dict, "bovis", bovis_string)
elif para_sum >= 1:
group = "paraTB"
dbkey = get_dbkey(dnaprints_dict, "para", para_string)
else:
group = ""
dbkey = ""
return group, dbkey
def get_oligo_dict():
oligo_dict = {}
oligo_dict["01_ab1"] = "AATTGTCGGATAGCCTGGCGATAACGACGC"
oligo_dict["02_ab3"] = "CACACGCGGGCCGGAACTGCCGCAAATGAC"
oligo_dict["03_ab5"] = "GCTGAAGCGGCAGACCGGCAGAACGAATAT"
oligo_dict["04_mel"] = "TGTCGCGCGTCAAGCGGCGTGAAATCTCTG"
oligo_dict["05_suis1"] = "TGCGTTGCCGTGAAGCTTAATTCGGCTGAT"
oligo_dict["06_suis2"] = "GGCAATCATGCGCAGGGCTTTGCATTCGTC"
oligo_dict["07_suis3"] = "CAAGGCAGATGCACATAATCCGGCGACCCG"
oligo_dict["08_ceti1"] = "GTGAATATAGGGTGAATTGATCTTCAGCCG"
oligo_dict["09_ceti2"] = "TTACAAGCAGGCCTATGAGCGCGGCGTGAA"
oligo_dict["10_canis4"] = "CTGCTACATAAAGCACCCGGCGACCGAGTT"
oligo_dict["11_canis"] = "ATCGTTTTGCGGCATATCGCTGACCACAGC"
oligo_dict["12_ovis"] = "CACTCAATCTTCTCTACGGGCGTGGTATCC"
oligo_dict["13_ether2"] = "CGAAATCGTGGTGAAGGACGGGACCGAACC"
oligo_dict["14_63B1"] = "CCTGTTTAAAAGAATCGTCGGAACCGCTCT"
oligo_dict["15_16M0"] = "TCCCGCCGCCATGCCGCCGAAAGTCGCCGT"
oligo_dict["16_mel1b"] = "TCTGTCCAAACCCCGTGACCGAACAATAGA"
oligo_dict["17_tb157"] = "CTCTTCGTATACCGTTCCGTCGTCACCATGGTCCT"
oligo_dict["18_tb7"] = "TCACGCAGCCAACGATATTCGTGTACCGCGACGGT"
oligo_dict["19_tbbov"] = "CTGGGCGACCCGGCCGACCTGCACACCGCGCATCA"
oligo_dict["20_tb5"] = "CCGTGGTGGCGTATCGGGCCCCTGGATCGCGCCCT"
oligo_dict["21_tb2"] = "ATGTCTGCGTAAAGAAGTTCCATGTCCGGGAAGTA"
oligo_dict["22_tb3"] = "GAAGACCTTGATGCCGATCTGGGTGTCGATCTTGA"
oligo_dict["23_tb4"] = "CGGTGTTGAAGGGTCCCCCGTTCCAGAAGCCGGTG"
oligo_dict["24_tb6"] = "ACGGTGATTCGGGTGGTCGACACCGATGGTTCAGA"
oligo_dict["25_para"] = "CCTTTCTTGAAGGGTGTTCG"
oligo_dict["26_para_sheep"] = "CGTGGTGGCGACGGCGGCGGGCCTGTCTAT"
oligo_dict["27_para_cattle"] = "TCTCCTCGGTCGGTGATTCGGGGGCGCGGT"
return oligo_dict
def get_seq_counts(value, fastq_list, gzipped):
count = 0
for fastq_file in fastq_list:
if gzipped:
with gzip.open(fastq_file, 'rt') as fh:
for title, seq, qual in FastqGeneralIterator(fh):
count += seq.count(value)
else:
with open(fastq_file, 'r') as fh:
for title, seq, qual in FastqGeneralIterator(fh):
count += seq.count(value)
return(value, count)
def get_species_counts(fastq_list, gzipped):
count_summary = {}
oligo_dict = get_oligo_dict()
for v1 in oligo_dict.values():
returned_value, count = get_seq_counts(v1, fastq_list, gzipped)
for key, v2 in oligo_dict.items():
if returned_value == v2:
count_summary.update({key: count})
count_list = []
for v in count_summary.values():
count_list.append(v)
brucella_sum = sum(count_list[:16])
bovis_sum = sum(count_list[16:24])
para_sum = sum(count_list[24:])
return count_summary, count_list, brucella_sum, bovis_sum, para_sum
def get_species_strings(count_summary):
binary_dictionary = {}
for k, v in count_summary.items():
if v > 1:
binary_dictionary.update({k: 1})
else:
binary_dictionary.update({k: 0})
binary_dictionary = OrderedDict(sorted(binary_dictionary.items()))
binary_list = []
for v in binary_dictionary.values():
binary_list.append(v)
brucella_binary = binary_list[:16]
brucella_string = ''.join(str(e) for e in brucella_binary)
bovis_binary = binary_list[16:24]
bovis_string = ''.join(str(e) for e in bovis_binary)
para_binary = binary_list[24:]
para_string = ''.join(str(e) for e in para_binary)
return brucella_string, bovis_string, para_string
def output_dbkey(file_name, dbkey, output_file):
# Output the dbkey.
with open(output_file, "w") as fh:
fh.write("%s" % dbkey)
def output_files(fastq_file, count_list, group, dbkey, dbkey_file, metrics_file):
base_file_name = get_sample_name(fastq_file)
output_dbkey(base_file_name, dbkey, dbkey_file)
output_metrics(base_file_name, count_list, group, dbkey, metrics_file)
def output_metrics(file_name, count_list, group, dbkey, output_file):
# Output the metrics.
with open(output_file, "w") as fh:
fh.write("Sample: %s\n" % file_name)
fh.write("Brucella counts: ")
for i in count_list[:16]:
fh.write("%d," % i)
fh.write("\nTB counts: ")
for i in count_list[16:24]:
fh.write("%d," % i)
fh.write("\nPara counts: ")
for i in count_list[24:]:
fh.write("%d," % i)
fh.write("\nGroup: %s" % group)
fh.write("\ndbkey: %s\n" % dbkey)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--dnaprint_fields', action='append', dest='dnaprint_fields', nargs=2, help="List of dnaprints data table value, name and path fields")
parser.add_argument('--read1', action='store', dest='read1', help='Required: single read')
parser.add_argument('--read2', action='store', dest='read2', required=False, default=None, help='Optional: paired read')
parser.add_argument('--gzipped', action='store_true', dest='gzipped', help='Input files are gzipped')
parser.add_argument('--output_dbkey', action='store', dest='output_dbkey', help='Output reference file')
parser.add_argument('--output_metrics', action='store', dest='output_metrics', help='Output metrics file')
args = parser.parse_args()
fastq_list = [args.read1]
if args.read2 is not None:
fastq_list.append(args.read2)
# The value of dnaprint_fields is a list of lists, where each list is
# the [value, name, path] components of the vsnp_dnaprints data table.
# The data_manager_vsnp_dnaprints tool assigns the dbkey column from the
# all_fasta data table to the value column in the vsnp_dnaprints data
# table to ensure a proper mapping for discovering the dbkey.
dnaprints_dict = get_dnaprints_dict(args.dnaprint_fields)
# Here fastq_list consists of either a single read
# or a set of paired reads, producing single outputs.
count_summary, count_list, brucella_sum, bovis_sum, para_sum = get_species_counts(fastq_list, args.gzipped)
brucella_string, bovis_string, para_string = get_species_strings(count_summary)
group, dbkey = get_group_and_dbkey(dnaprints_dict, brucella_string, brucella_sum, bovis_string, bovis_sum, para_string, para_sum)
output_files(args.read1, count_list, group, dbkey, dbkey_file=args.output_dbkey, metrics_file=args.output_metrics)
| mit | -4,905,994,647,840,912,000 | 41.182222 | 159 | 0.645664 | false |
robojukie/myrobotlab | src/resource/VirtualDevice/Arduino.py | 3 | 3258 | #############################################
# This is a basic script to emulate the hardware of
# an Arduino microcontroller. The VirtualDevice
# service will execute this script when
# createVirtualArduino(port) is called
import time
import math
import threading
from random import randint
from org.myrobotlab.codec import ArduinoMsgCodec
working = False
worker = None
analogReadPollingPins = []
digitalReadPollingPins = []
def work():
"""thread worker function"""
global working, analogReadPollingPins
x = 0
working = True
while(working):
x = x + 0.09
y = int(math.cos(x) * 100 + 150)
# retcmd = "publishPin/" + str(pin) + "/3/"+ str(y) +"\n"
# uart.write(codec.encode(retcmd))
for pinx in digitalReadPollingPins:
retcmd = "publishPin/" + str(pinx) + "/0/"+str(randint(0,1))+"\n"
uart.write(codec.encode(retcmd))
for pinx in analogReadPollingPins:
#retcmd = "publishPin/" + str(pinx) + "/4/"+ str(y) +"\n"
retcmd = "publishPin/" + str(pinx) + "/" + str(int(pinx)%4) + "/"+ str(y) +"\n"
uart.write(codec.encode(retcmd))
sleep(0.001)
#print (y)
# TODO -------
# if (digitalReadPollingPins.length() == 0 && analogReadPollingPins.length() == 0
# working = False
print("I am done !")
codec = ArduinoMsgCodec()
virtual = Runtime.getService("virtual")
uart = virtual.getUART()
uart.setCodec("arduino")
logic = virtual.getLogic()
logic.subscribe(uart, "publishRX", "onByte")
logic.subscribe(uart, "publishConnect", "onConnect")
logic.subscribe(uart, "publishPortNames", "onPortNames")
logic.subscribe(uart, "publishDisconnect", "onDisconnect")
def onByte(b):
global working, worker, analogReadPollingPins
print("onByte", b)
command = codec.decode(b)
if command != None and len(command) > 0 :
print("decoded", command)
# rstrip trips the \n from the record
command = command.rstrip()
clist = command.split('/')
if command == "getVersion":
uart.write(codec.encode("publishVersion/21\n"))
elif command.startswith("digitalReadPollingStart"):
print("digitalReadPollingStart")
pin = clist[1]
digitalReadPollingPins.append(pin)
if worker == None:
worker = threading.Thread(name='worker', target=work)
worker.setDaemon(True)
worker.start()
elif command.startswith("digitalReadPollingStop"):
print("digitalReadPollingStop")
pin = clist[1]
digitalReadPollingPins.remove(pin)
elif command.startswith("analogReadPollingStart"):
print("analogReadPollingStart")
pin = clist[1]
analogReadPollingPins.append(pin)
if worker == None:
worker = threading.Thread(name='worker', target=work)
worker.setDaemon(True)
worker.start()
elif command.startswith("analogReadPollingStop"):
print("analogReadPollingStop")
pin = clist[1]
analogReadPollingPins.remove(pin)
def off():
working = False
worker = None
def onConnect(portName):
print("connected to ", portName)
def onPortNames(portName):
print("TODO - list portNames")
def onDisconnect(portName):
print("disconnected from ", portName)
| apache-2.0 | -504,969,585,771,665,660 | 27.578947 | 86 | 0.640577 | false |
CouchPotato/CouchPotatoServer | libs/tornado/gen.py | 64 | 35105 | """``tornado.gen`` is a generator-based interface to make it easier to
work in an asynchronous environment. Code using the ``gen`` module
is technically asynchronous, but it is written as a single generator
instead of a collection of separate functions.
For example, the following asynchronous handler::
class AsyncHandler(RequestHandler):
@asynchronous
def get(self):
http_client = AsyncHTTPClient()
http_client.fetch("http://example.com",
callback=self.on_fetch)
def on_fetch(self, response):
do_something_with_response(response)
self.render("template.html")
could be written with ``gen`` as::
class GenAsyncHandler(RequestHandler):
@gen.coroutine
def get(self):
http_client = AsyncHTTPClient()
response = yield http_client.fetch("http://example.com")
do_something_with_response(response)
self.render("template.html")
Most asynchronous functions in Tornado return a `.Future`;
yielding this object returns its `~.Future.result`.
You can also yield a list or dict of ``Futures``, which will be
started at the same time and run in parallel; a list or dict of results will
be returned when they are all finished::
@gen.coroutine
def get(self):
http_client = AsyncHTTPClient()
response1, response2 = yield [http_client.fetch(url1),
http_client.fetch(url2)]
response_dict = yield dict(response3=http_client.fetch(url3),
response4=http_client.fetch(url4))
response3 = response_dict['response3']
response4 = response_dict['response4']
If the `~functools.singledispatch` library is available (standard in
Python 3.4, available via the `singledispatch
<https://pypi.python.org/pypi/singledispatch>`_ package on older
versions), additional types of objects may be yielded. Tornado includes
support for ``asyncio.Future`` and Twisted's ``Deferred`` class when
``tornado.platform.asyncio`` and ``tornado.platform.twisted`` are imported.
See the `convert_yielded` function to extend this mechanism.
.. versionchanged:: 3.2
Dict support added.
.. versionchanged:: 4.1
Support added for yielding ``asyncio`` Futures and Twisted Deferreds
via ``singledispatch``.
"""
from __future__ import absolute_import, division, print_function, with_statement
import collections
import functools
import itertools
import sys
import types
import weakref
from tornado.concurrent import Future, TracebackFuture, is_future, chain_future
from tornado.ioloop import IOLoop
from tornado.log import app_log
from tornado import stack_context
try:
from functools import singledispatch # py34+
except ImportError as e:
try:
from singledispatch import singledispatch # backport
except ImportError:
singledispatch = None
class KeyReuseError(Exception):
pass
class UnknownKeyError(Exception):
pass
class LeakedCallbackError(Exception):
pass
class BadYieldError(Exception):
pass
class ReturnValueIgnoredError(Exception):
pass
class TimeoutError(Exception):
"""Exception raised by ``with_timeout``."""
def engine(func):
"""Callback-oriented decorator for asynchronous generators.
This is an older interface; for new code that does not need to be
compatible with versions of Tornado older than 3.0 the
`coroutine` decorator is recommended instead.
This decorator is similar to `coroutine`, except it does not
return a `.Future` and the ``callback`` argument is not treated
specially.
In most cases, functions decorated with `engine` should take
a ``callback`` argument and invoke it with their result when
they are finished. One notable exception is the
`~tornado.web.RequestHandler` :ref:`HTTP verb methods <verbs>`,
which use ``self.finish()`` in place of a callback argument.
"""
func = _make_coroutine_wrapper(func, replace_callback=False)
@functools.wraps(func)
def wrapper(*args, **kwargs):
future = func(*args, **kwargs)
def final_callback(future):
if future.result() is not None:
raise ReturnValueIgnoredError(
"@gen.engine functions cannot return values: %r" %
(future.result(),))
# The engine interface doesn't give us any way to return
# errors but to raise them into the stack context.
# Save the stack context here to use when the Future has resolved.
future.add_done_callback(stack_context.wrap(final_callback))
return wrapper
def coroutine(func, replace_callback=True):
"""Decorator for asynchronous generators.
Any generator that yields objects from this module must be wrapped
in either this decorator or `engine`.
Coroutines may "return" by raising the special exception
`Return(value) <Return>`. In Python 3.3+, it is also possible for
the function to simply use the ``return value`` statement (prior to
Python 3.3 generators were not allowed to also return values).
In all versions of Python a coroutine that simply wishes to exit
early may use the ``return`` statement without a value.
Functions with this decorator return a `.Future`. Additionally,
they may be called with a ``callback`` keyword argument, which
will be invoked with the future's result when it resolves. If the
coroutine fails, the callback will not be run and an exception
will be raised into the surrounding `.StackContext`. The
``callback`` argument is not visible inside the decorated
function; it is handled by the decorator itself.
From the caller's perspective, ``@gen.coroutine`` is similar to
the combination of ``@return_future`` and ``@gen.engine``.
.. warning::
When exceptions occur inside a coroutine, the exception
information will be stored in the `.Future` object. You must
examine the result of the `.Future` object, or the exception
may go unnoticed by your code. This means yielding the function
if called from another coroutine, using something like
`.IOLoop.run_sync` for top-level calls, or passing the `.Future`
to `.IOLoop.add_future`.
"""
return _make_coroutine_wrapper(func, replace_callback=True)
def _make_coroutine_wrapper(func, replace_callback):
"""The inner workings of ``@gen.coroutine`` and ``@gen.engine``.
The two decorators differ in their treatment of the ``callback``
argument, so we cannot simply implement ``@engine`` in terms of
``@coroutine``.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
future = TracebackFuture()
if replace_callback and 'callback' in kwargs:
callback = kwargs.pop('callback')
IOLoop.current().add_future(
future, lambda future: callback(future.result()))
try:
result = func(*args, **kwargs)
except (Return, StopIteration) as e:
result = getattr(e, 'value', None)
except Exception:
future.set_exc_info(sys.exc_info())
return future
else:
if isinstance(result, types.GeneratorType):
# Inline the first iteration of Runner.run. This lets us
# avoid the cost of creating a Runner when the coroutine
# never actually yields, which in turn allows us to
# use "optional" coroutines in critical path code without
# performance penalty for the synchronous case.
try:
orig_stack_contexts = stack_context._state.contexts
yielded = next(result)
if stack_context._state.contexts is not orig_stack_contexts:
yielded = TracebackFuture()
yielded.set_exception(
stack_context.StackContextInconsistentError(
'stack_context inconsistency (probably caused '
'by yield within a "with StackContext" block)'))
except (StopIteration, Return) as e:
future.set_result(getattr(e, 'value', None))
except Exception:
future.set_exc_info(sys.exc_info())
else:
Runner(result, future, yielded)
try:
return future
finally:
# Subtle memory optimization: if next() raised an exception,
# the future's exc_info contains a traceback which
# includes this stack frame. This creates a cycle,
# which will be collected at the next full GC but has
# been shown to greatly increase memory usage of
# benchmarks (relative to the refcount-based scheme
# used in the absence of cycles). We can avoid the
# cycle by clearing the local variable after we return it.
future = None
future.set_result(result)
return future
return wrapper
class Return(Exception):
"""Special exception to return a value from a `coroutine`.
If this exception is raised, its value argument is used as the
result of the coroutine::
@gen.coroutine
def fetch_json(url):
response = yield AsyncHTTPClient().fetch(url)
raise gen.Return(json_decode(response.body))
In Python 3.3, this exception is no longer necessary: the ``return``
statement can be used directly to return a value (previously
``yield`` and ``return`` with a value could not be combined in the
same function).
By analogy with the return statement, the value argument is optional,
but it is never necessary to ``raise gen.Return()``. The ``return``
statement can be used with no arguments instead.
"""
def __init__(self, value=None):
super(Return, self).__init__()
self.value = value
class WaitIterator(object):
"""Provides an iterator to yield the results of futures as they finish.
Yielding a set of futures like this:
``results = yield [future1, future2]``
pauses the coroutine until both ``future1`` and ``future2``
return, and then restarts the coroutine with the results of both
futures. If either future is an exception, the expression will
raise that exception and all the results will be lost.
If you need to get the result of each future as soon as possible,
or if you need the result of some futures even if others produce
errors, you can use ``WaitIterator``:
::
wait_iterator = gen.WaitIterator(future1, future2)
while not wait_iterator.done():
try:
result = yield wait_iterator.next()
except Exception as e:
print "Error {} from {}".format(e, wait_iterator.current_future)
else:
print "Result {} recieved from {} at {}".format(
result, wait_iterator.current_future,
wait_iterator.current_index)
Because results are returned as soon as they are available the
output from the iterator *will not be in the same order as the
input arguments*. If you need to know which future produced the
current result, you can use the attributes
``WaitIterator.current_future``, or ``WaitIterator.current_index``
to get the index of the future from the input list. (if keyword
arguments were used in the construction of the `WaitIterator`,
``current_index`` will use the corresponding keyword).
.. versionadded:: 4.1
"""
def __init__(self, *args, **kwargs):
if args and kwargs:
raise ValueError(
"You must provide args or kwargs, not both")
if kwargs:
self._unfinished = dict((f, k) for (k, f) in kwargs.items())
futures = list(kwargs.values())
else:
self._unfinished = dict((f, i) for (i, f) in enumerate(args))
futures = args
self._finished = collections.deque()
self.current_index = self.current_future = None
self._running_future = None
self_ref = weakref.ref(self)
for future in futures:
future.add_done_callback(functools.partial(
self._done_callback, self_ref))
def done(self):
"""Returns True if this iterator has no more results."""
if self._finished or self._unfinished:
return False
# Clear the 'current' values when iteration is done.
self.current_index = self.current_future = None
return True
def next(self):
"""Returns a `.Future` that will yield the next available result.
Note that this `.Future` will not be the same object as any of
the inputs.
"""
self._running_future = TracebackFuture()
if self._finished:
self._return_result(self._finished.popleft())
return self._running_future
@staticmethod
def _done_callback(self_ref, done):
self = self_ref()
if self is not None:
if self._running_future and not self._running_future.done():
self._return_result(done)
else:
self._finished.append(done)
def _return_result(self, done):
"""Called set the returned future's state that of the future
we yielded, and set the current future for the iterator.
"""
chain_future(done, self._running_future)
self.current_future = done
self.current_index = self._unfinished.pop(done)
class YieldPoint(object):
"""Base class for objects that may be yielded from the generator.
.. deprecated:: 4.0
Use `Futures <.Future>` instead.
"""
def start(self, runner):
"""Called by the runner after the generator has yielded.
No other methods will be called on this object before ``start``.
"""
raise NotImplementedError()
def is_ready(self):
"""Called by the runner to determine whether to resume the generator.
Returns a boolean; may be called more than once.
"""
raise NotImplementedError()
def get_result(self):
"""Returns the value to use as the result of the yield expression.
This method will only be called once, and only after `is_ready`
has returned true.
"""
raise NotImplementedError()
class Callback(YieldPoint):
"""Returns a callable object that will allow a matching `Wait` to proceed.
The key may be any value suitable for use as a dictionary key, and is
used to match ``Callbacks`` to their corresponding ``Waits``. The key
must be unique among outstanding callbacks within a single run of the
generator function, but may be reused across different runs of the same
function (so constants generally work fine).
The callback may be called with zero or one arguments; if an argument
is given it will be returned by `Wait`.
.. deprecated:: 4.0
Use `Futures <.Future>` instead.
"""
def __init__(self, key):
self.key = key
def start(self, runner):
self.runner = runner
runner.register_callback(self.key)
def is_ready(self):
return True
def get_result(self):
return self.runner.result_callback(self.key)
class Wait(YieldPoint):
"""Returns the argument passed to the result of a previous `Callback`.
.. deprecated:: 4.0
Use `Futures <.Future>` instead.
"""
def __init__(self, key):
self.key = key
def start(self, runner):
self.runner = runner
def is_ready(self):
return self.runner.is_ready(self.key)
def get_result(self):
return self.runner.pop_result(self.key)
class WaitAll(YieldPoint):
"""Returns the results of multiple previous `Callbacks <Callback>`.
The argument is a sequence of `Callback` keys, and the result is
a list of results in the same order.
`WaitAll` is equivalent to yielding a list of `Wait` objects.
.. deprecated:: 4.0
Use `Futures <.Future>` instead.
"""
def __init__(self, keys):
self.keys = keys
def start(self, runner):
self.runner = runner
def is_ready(self):
return all(self.runner.is_ready(key) for key in self.keys)
def get_result(self):
return [self.runner.pop_result(key) for key in self.keys]
def Task(func, *args, **kwargs):
"""Adapts a callback-based asynchronous function for use in coroutines.
Takes a function (and optional additional arguments) and runs it with
those arguments plus a ``callback`` keyword argument. The argument passed
to the callback is returned as the result of the yield expression.
.. versionchanged:: 4.0
``gen.Task`` is now a function that returns a `.Future`, instead of
a subclass of `YieldPoint`. It still behaves the same way when
yielded.
"""
future = Future()
def handle_exception(typ, value, tb):
if future.done():
return False
future.set_exc_info((typ, value, tb))
return True
def set_result(result):
if future.done():
return
future.set_result(result)
with stack_context.ExceptionStackContext(handle_exception):
func(*args, callback=_argument_adapter(set_result), **kwargs)
return future
class YieldFuture(YieldPoint):
def __init__(self, future, io_loop=None):
"""Adapts a `.Future` to the `YieldPoint` interface.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
"""
self.future = future
self.io_loop = io_loop or IOLoop.current()
def start(self, runner):
if not self.future.done():
self.runner = runner
self.key = object()
runner.register_callback(self.key)
self.io_loop.add_future(self.future, runner.result_callback(self.key))
else:
self.runner = None
self.result = self.future.result()
def is_ready(self):
if self.runner is not None:
return self.runner.is_ready(self.key)
else:
return True
def get_result(self):
if self.runner is not None:
return self.runner.pop_result(self.key).result()
else:
return self.result
class Multi(YieldPoint):
"""Runs multiple asynchronous operations in parallel.
Takes a list of ``YieldPoints`` or ``Futures`` and returns a list of
their responses. It is not necessary to call `Multi` explicitly,
since the engine will do so automatically when the generator yields
a list of ``YieldPoints`` or a mixture of ``YieldPoints`` and ``Futures``.
Instead of a list, the argument may also be a dictionary whose values are
Futures, in which case a parallel dictionary is returned mapping the same
keys to their results.
"""
def __init__(self, children):
self.keys = None
if isinstance(children, dict):
self.keys = list(children.keys())
children = children.values()
self.children = []
for i in children:
if is_future(i):
i = YieldFuture(i)
self.children.append(i)
assert all(isinstance(i, YieldPoint) for i in self.children)
self.unfinished_children = set(self.children)
def start(self, runner):
for i in self.children:
i.start(runner)
def is_ready(self):
finished = list(itertools.takewhile(
lambda i: i.is_ready(), self.unfinished_children))
self.unfinished_children.difference_update(finished)
return not self.unfinished_children
def get_result(self):
result = (i.get_result() for i in self.children)
if self.keys is not None:
return dict(zip(self.keys, result))
else:
return list(result)
def multi_future(children):
"""Wait for multiple asynchronous futures in parallel.
Takes a list of ``Futures`` (but *not* other ``YieldPoints``) and returns
a new Future that resolves when all the other Futures are done.
If all the ``Futures`` succeeded, the returned Future's result is a list
of their results. If any failed, the returned Future raises the exception
of the first one to fail.
Instead of a list, the argument may also be a dictionary whose values are
Futures, in which case a parallel dictionary is returned mapping the same
keys to their results.
It is not necessary to call `multi_future` explcitly, since the engine will
do so automatically when the generator yields a list of `Futures`.
This function is faster than the `Multi` `YieldPoint` because it does not
require the creation of a stack context.
.. versionadded:: 4.0
"""
if isinstance(children, dict):
keys = list(children.keys())
children = children.values()
else:
keys = None
assert all(is_future(i) for i in children)
unfinished_children = set(children)
future = Future()
if not children:
future.set_result({} if keys is not None else [])
def callback(f):
unfinished_children.remove(f)
if not unfinished_children:
try:
result_list = [i.result() for i in children]
except Exception:
future.set_exc_info(sys.exc_info())
else:
if keys is not None:
future.set_result(dict(zip(keys, result_list)))
else:
future.set_result(result_list)
for f in children:
f.add_done_callback(callback)
return future
def maybe_future(x):
"""Converts ``x`` into a `.Future`.
If ``x`` is already a `.Future`, it is simply returned; otherwise
it is wrapped in a new `.Future`. This is suitable for use as
``result = yield gen.maybe_future(f())`` when you don't know whether
``f()`` returns a `.Future` or not.
"""
if is_future(x):
return x
else:
fut = Future()
fut.set_result(x)
return fut
def with_timeout(timeout, future, io_loop=None, quiet_exceptions=()):
"""Wraps a `.Future` in a timeout.
Raises `TimeoutError` if the input future does not complete before
``timeout``, which may be specified in any form allowed by
`.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or an absolute time
relative to `.IOLoop.time`)
If the wrapped `.Future` fails after it has timed out, the exception
will be logged unless it is of a type contained in ``quiet_exceptions``
(which may be an exception type or a sequence of types).
Currently only supports Futures, not other `YieldPoint` classes.
.. versionadded:: 4.0
.. versionchanged:: 4.1
Added the ``quiet_exceptions`` argument and the logging of unhandled
exceptions.
"""
# TODO: allow yield points in addition to futures?
# Tricky to do with stack_context semantics.
#
# It's tempting to optimize this by cancelling the input future on timeout
# instead of creating a new one, but A) we can't know if we are the only
# one waiting on the input future, so cancelling it might disrupt other
# callers and B) concurrent futures can only be cancelled while they are
# in the queue, so cancellation cannot reliably bound our waiting time.
result = Future()
chain_future(future, result)
if io_loop is None:
io_loop = IOLoop.current()
def error_callback(future):
try:
future.result()
except Exception as e:
if not isinstance(e, quiet_exceptions):
app_log.error("Exception in Future %r after timeout",
future, exc_info=True)
def timeout_callback():
result.set_exception(TimeoutError("Timeout"))
# In case the wrapped future goes on to fail, log it.
future.add_done_callback(error_callback)
timeout_handle = io_loop.add_timeout(
timeout, timeout_callback)
if isinstance(future, Future):
# We know this future will resolve on the IOLoop, so we don't
# need the extra thread-safety of IOLoop.add_future (and we also
# don't care about StackContext here.
future.add_done_callback(
lambda future: io_loop.remove_timeout(timeout_handle))
else:
# concurrent.futures.Futures may resolve on any thread, so we
# need to route them back to the IOLoop.
io_loop.add_future(
future, lambda future: io_loop.remove_timeout(timeout_handle))
return result
def sleep(duration):
"""Return a `.Future` that resolves after the given number of seconds.
When used with ``yield`` in a coroutine, this is a non-blocking
analogue to `time.sleep` (which should not be used in coroutines
because it is blocking)::
yield gen.sleep(0.5)
Note that calling this function on its own does nothing; you must
wait on the `.Future` it returns (usually by yielding it).
.. versionadded:: 4.1
"""
f = Future()
IOLoop.current().call_later(duration, lambda: f.set_result(None))
return f
_null_future = Future()
_null_future.set_result(None)
moment = Future()
moment.__doc__ = \
"""A special object which may be yielded to allow the IOLoop to run for
one iteration.
This is not needed in normal use but it can be helpful in long-running
coroutines that are likely to yield Futures that are ready instantly.
Usage: ``yield gen.moment``
.. versionadded:: 4.0
"""
moment.set_result(None)
class Runner(object):
"""Internal implementation of `tornado.gen.engine`.
Maintains information about pending callbacks and their results.
The results of the generator are stored in ``result_future`` (a
`.TracebackFuture`)
"""
def __init__(self, gen, result_future, first_yielded):
self.gen = gen
self.result_future = result_future
self.future = _null_future
self.yield_point = None
self.pending_callbacks = None
self.results = None
self.running = False
self.finished = False
self.had_exception = False
self.io_loop = IOLoop.current()
# For efficiency, we do not create a stack context until we
# reach a YieldPoint (stack contexts are required for the historical
# semantics of YieldPoints, but not for Futures). When we have
# done so, this field will be set and must be called at the end
# of the coroutine.
self.stack_context_deactivate = None
if self.handle_yield(first_yielded):
self.run()
def register_callback(self, key):
"""Adds ``key`` to the list of callbacks."""
if self.pending_callbacks is None:
# Lazily initialize the old-style YieldPoint data structures.
self.pending_callbacks = set()
self.results = {}
if key in self.pending_callbacks:
raise KeyReuseError("key %r is already pending" % (key,))
self.pending_callbacks.add(key)
def is_ready(self, key):
"""Returns true if a result is available for ``key``."""
if self.pending_callbacks is None or key not in self.pending_callbacks:
raise UnknownKeyError("key %r is not pending" % (key,))
return key in self.results
def set_result(self, key, result):
"""Sets the result for ``key`` and attempts to resume the generator."""
self.results[key] = result
if self.yield_point is not None and self.yield_point.is_ready():
try:
self.future.set_result(self.yield_point.get_result())
except:
self.future.set_exc_info(sys.exc_info())
self.yield_point = None
self.run()
def pop_result(self, key):
"""Returns the result for ``key`` and unregisters it."""
self.pending_callbacks.remove(key)
return self.results.pop(key)
def run(self):
"""Starts or resumes the generator, running until it reaches a
yield point that is not ready.
"""
if self.running or self.finished:
return
try:
self.running = True
while True:
future = self.future
if not future.done():
return
self.future = None
try:
orig_stack_contexts = stack_context._state.contexts
try:
value = future.result()
except Exception:
self.had_exception = True
yielded = self.gen.throw(*sys.exc_info())
else:
yielded = self.gen.send(value)
if stack_context._state.contexts is not orig_stack_contexts:
self.gen.throw(
stack_context.StackContextInconsistentError(
'stack_context inconsistency (probably caused '
'by yield within a "with StackContext" block)'))
except (StopIteration, Return) as e:
self.finished = True
self.future = _null_future
if self.pending_callbacks and not self.had_exception:
# If we ran cleanly without waiting on all callbacks
# raise an error (really more of a warning). If we
# had an exception then some callbacks may have been
# orphaned, so skip the check in that case.
raise LeakedCallbackError(
"finished without waiting for callbacks %r" %
self.pending_callbacks)
self.result_future.set_result(getattr(e, 'value', None))
self.result_future = None
self._deactivate_stack_context()
return
except Exception:
self.finished = True
self.future = _null_future
self.result_future.set_exc_info(sys.exc_info())
self.result_future = None
self._deactivate_stack_context()
return
if not self.handle_yield(yielded):
return
finally:
self.running = False
def handle_yield(self, yielded):
# Lists containing YieldPoints require stack contexts;
# other lists are handled via multi_future in convert_yielded.
if (isinstance(yielded, list) and
any(isinstance(f, YieldPoint) for f in yielded)):
yielded = Multi(yielded)
elif (isinstance(yielded, dict) and
any(isinstance(f, YieldPoint) for f in yielded.values())):
yielded = Multi(yielded)
if isinstance(yielded, YieldPoint):
# YieldPoints are too closely coupled to the Runner to go
# through the generic convert_yielded mechanism.
self.future = TracebackFuture()
def start_yield_point():
try:
yielded.start(self)
if yielded.is_ready():
self.future.set_result(
yielded.get_result())
else:
self.yield_point = yielded
except Exception:
self.future = TracebackFuture()
self.future.set_exc_info(sys.exc_info())
if self.stack_context_deactivate is None:
# Start a stack context if this is the first
# YieldPoint we've seen.
with stack_context.ExceptionStackContext(
self.handle_exception) as deactivate:
self.stack_context_deactivate = deactivate
def cb():
start_yield_point()
self.run()
self.io_loop.add_callback(cb)
return False
else:
start_yield_point()
else:
try:
self.future = convert_yielded(yielded)
except BadYieldError:
self.future = TracebackFuture()
self.future.set_exc_info(sys.exc_info())
if not self.future.done() or self.future is moment:
self.io_loop.add_future(
self.future, lambda f: self.run())
return False
return True
def result_callback(self, key):
return stack_context.wrap(_argument_adapter(
functools.partial(self.set_result, key)))
def handle_exception(self, typ, value, tb):
if not self.running and not self.finished:
self.future = TracebackFuture()
self.future.set_exc_info((typ, value, tb))
self.run()
return True
else:
return False
def _deactivate_stack_context(self):
if self.stack_context_deactivate is not None:
self.stack_context_deactivate()
self.stack_context_deactivate = None
Arguments = collections.namedtuple('Arguments', ['args', 'kwargs'])
def _argument_adapter(callback):
"""Returns a function that when invoked runs ``callback`` with one arg.
If the function returned by this function is called with exactly
one argument, that argument is passed to ``callback``. Otherwise
the args tuple and kwargs dict are wrapped in an `Arguments` object.
"""
def wrapper(*args, **kwargs):
if kwargs or len(args) > 1:
callback(Arguments(args, kwargs))
elif args:
callback(args[0])
else:
callback(None)
return wrapper
def convert_yielded(yielded):
"""Convert a yielded object into a `.Future`.
The default implementation accepts lists, dictionaries, and Futures.
If the `~functools.singledispatch` library is available, this function
may be extended to support additional types. For example::
@convert_yielded.register(asyncio.Future)
def _(asyncio_future):
return tornado.platform.asyncio.to_tornado_future(asyncio_future)
.. versionadded:: 4.1
"""
# Lists and dicts containing YieldPoints were handled separately
# via Multi().
if isinstance(yielded, (list, dict)):
return multi_future(yielded)
elif is_future(yielded):
return yielded
else:
raise BadYieldError("yielded unknown object %r" % (yielded,))
if singledispatch is not None:
convert_yielded = singledispatch(convert_yielded)
| gpl-3.0 | -1,741,868,975,850,850,800 | 35.605839 | 82 | 0.616408 | false |
aebrahim/cobrapy | cobra/solvers/esolver.py | 5 | 6286 | from subprocess import check_output, check_call, CalledProcessError
from os import unlink, devnull
from os.path import isfile
from tempfile import NamedTemporaryFile
from fractions import Fraction
from six.moves import zip
from . import cglpk
from .wrappers import *
# detect paths to system calls for esolver and gzip
with open(devnull, "w") as DEVNULL:
try:
ESOLVER_COMMAND = check_output(["which", "esolver"],
stderr=DEVNULL).strip()
__esolver_version__ = check_output(["esolver", "-v"], stderr=DEVNULL)
except CalledProcessError:
raise RuntimeError("esolver command not found")
try:
GZIP_COMMAND = check_output(["which", "gzip"], stderr=DEVNULL).strip()
except CalledProcessError:
raise RuntimeError("gzip command not found")
del DEVNULL
solver_name = "esolver"
class Esolver(cglpk.GLP):
"""contain an LP which will be solved through the QSopt_ex
The LP is stored using a GLPK object, and written out to an
LP file which is then solved by the esolver command."""
def __init__(self, cobra_model=None):
cglpk.GLP.__init__(self, cobra_model)
self.solution_filepath = None
self.basis_filepath = None
self.rational_solution = False
self.verbose = False
self.clean_up = True # clean up files
def _clean(self, filename):
"""remove old files"""
if self.clean_up and filename is not None and isfile(filename):
unlink(filename)
def set_parameter(self, parameter_name, value):
if parameter_name == "GLP":
raise Exception("can not be set this way")
if parameter_name == "objective_sense":
self.set_objective_sense(value)
if not hasattr(self, parameter_name):
raise ValueError("Unkonwn parameter '%s'" % parameter_name)
setattr(self, parameter_name, value)
def solve_problem(self, **solver_parameters):
if "objective_sense" in solver_parameters:
self.set_objective_sense(solver_parameters.pop("objective_sense"))
for key, value in solver_parameters.items():
self.set_parameter(key, value)
# remove the old solution file
self._clean(self.solution_filepath)
with NamedTemporaryFile(suffix=".lp", delete=False) as f:
lp_filepath = f.name
self.write(lp_filepath)
existing_basis = self.basis_filepath
with NamedTemporaryFile(suffix=".bas", delete=False) as f:
self.basis_filepath = f.name
with NamedTemporaryFile(suffix=".sol") as f:
self.solution_filepath = f.name
command = [ESOLVER_COMMAND, "-b", self.basis_filepath,
"-O", self.solution_filepath[:-4]]
if existing_basis is not None and isfile(existing_basis):
command.extend(["-B", existing_basis])
command.extend(["-L", lp_filepath])
command_kwargs = {}
if self.verbose:
print(" ".join(command))
DEVNULL = None
else:
DEVNULL = open(devnull, 'wb')
command_kwargs["stdout"] = DEVNULL
command_kwargs["stderr"] = DEVNULL
try:
check_call(command, **command_kwargs)
failed = False
except CalledProcessError as e:
failed = True
if failed:
self.basis_filepath = existing_basis
existing_basis = None
# Sometimes on failure a solution isn't written out
if not isfile(self.solution_filepath):
with open(self.solution_filepath, "w") as outfile:
outfile.write("=infeasible\n")
elif isfile(self.solution_filepath + ".gz"):
# the solution may be written out compressed
check_call([GZIP_COMMAND, "-d", self.solution_filepath + ".gz"])
if DEVNULL is not None:
DEVNULL.close()
self._clean(lp_filepath)
self._clean(existing_basis) # replaced with the new basis
def get_status(self):
with open(self.solution_filepath) as infile:
return infile.readline().split("=")[1].strip().lower()
def _format(self, value):
"""convert a string value into either a fraction or float"""
value = Fraction(value)
return value if self.rational_solution else float(value)
def get_objective_value(self):
with open(self.solution_filepath) as infile:
status = infile.readline().split("=")[1].strip().lower()
if status != "optimal":
raise RuntimeError("status not optimal")
infile.readline()
return self._format(infile.readline().split("=")[1].strip())
def format_solution(self, cobra_model):
m = cobra_model
solution = m.solution.__class__(None)
with open(self.solution_filepath) as infile:
solution.status = infile.readline().split("=")[1].strip().lower()
if solution.status != "optimal":
return solution
infile.readline()
solution.f = self._format(Fraction(infile.readline()
.split("=")[1].strip()))
infile.readline()
value_dict = {}
for line in infile:
if line.endswith(":\n"):
break
varname, value = line.split("=")
value_dict[varname.strip()] = self._format(value.strip())
dual_dict = {}
for line in infile:
if line.endswith(":\n"):
break
varname, value = line.split("=")
dual_dict[varname.strip()] = self._format(value.strip())
solution.x = [value_dict.get("x_%d" % (i + 1), 0)
for i in range(len(m.reactions))]
solution.x_dict = {r.id: v for r, v in zip(m.reactions, solution.x)}
solution.y = [dual_dict.get("r_%d" % (i + 1), 0)
for i in range(len(m.metabolites))]
solution.y_dict = {m.id: v for m, v in zip(m.metabolites, solution.y)}
return solution
# wrappers for the classmethods at the module level
create_problem = Esolver.create_problem
solve = Esolver.solve
| lgpl-2.1 | 5,368,479,664,408,009,000 | 39.554839 | 78 | 0.584314 | false |
shawnadelic/shuup | shuup/front/apps/auth/urls.py | 2 | 1132 | # -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from django.conf.urls import patterns, url
from .views import (
LoginView, LogoutView, RecoverPasswordCompleteView,
RecoverPasswordConfirmView, RecoverPasswordSentView, RecoverPasswordView
)
urlpatterns = patterns(
'',
url(r'^login/$',
LoginView.as_view(),
name='login'),
url(r'^logout/$',
LogoutView.as_view(),
name='logout'),
url(r'^recover-password/$',
RecoverPasswordView.as_view(),
name='recover_password'),
url(r'^recover-password/(?P<uidb64>.+)/(?P<token>.+)/$',
RecoverPasswordConfirmView.as_view(),
name='recover_password_confirm'),
url(r'^recover-password/sent/$',
RecoverPasswordSentView.as_view(),
name='recover_password_sent'),
url(r'^recover-password/complete/$',
RecoverPasswordCompleteView.as_view(),
name='recover_password_complete'),
)
| agpl-3.0 | -7,619,571,747,156,614,000 | 31.342857 | 76 | 0.654594 | false |
tuxcoindev/tuxcoin | contrib/pyminer/pyminer.py | 2 | 6435 | #!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 10337
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
| mit | -8,682,132,293,758,735,000 | 24.535714 | 84 | 0.648951 | false |
louietsai/python-for-android | python-modules/twisted/twisted/test/test_persisted.py | 60 | 8648 |
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
# System Imports
import sys
from twisted.trial import unittest
try:
import cPickle as pickle
except ImportError:
import pickle
try:
import cStringIO as StringIO
except ImportError:
import StringIO
# Twisted Imports
from twisted.persisted import styles, aot, crefutil
class VersionTestCase(unittest.TestCase):
def testNullVersionUpgrade(self):
global NullVersioned
class NullVersioned:
ok = 0
pkcl = pickle.dumps(NullVersioned())
class NullVersioned(styles.Versioned):
persistenceVersion = 1
def upgradeToVersion1(self):
self.ok = 1
mnv = pickle.loads(pkcl)
styles.doUpgrade()
assert mnv.ok, "initial upgrade not run!"
def testVersionUpgrade(self):
global MyVersioned
class MyVersioned(styles.Versioned):
persistenceVersion = 2
persistenceForgets = ['garbagedata']
v3 = 0
v4 = 0
def __init__(self):
self.somedata = 'xxx'
self.garbagedata = lambda q: 'cant persist'
def upgradeToVersion3(self):
self.v3 += 1
def upgradeToVersion4(self):
self.v4 += 1
mv = MyVersioned()
assert not (mv.v3 or mv.v4), "hasn't been upgraded yet"
pickl = pickle.dumps(mv)
MyVersioned.persistenceVersion = 4
obj = pickle.loads(pickl)
styles.doUpgrade()
assert obj.v3, "didn't do version 3 upgrade"
assert obj.v4, "didn't do version 4 upgrade"
pickl = pickle.dumps(obj)
obj = pickle.loads(pickl)
styles.doUpgrade()
assert obj.v3 == 1, "upgraded unnecessarily"
assert obj.v4 == 1, "upgraded unnecessarily"
def testNonIdentityHash(self):
global ClassWithCustomHash
class ClassWithCustomHash(styles.Versioned):
def __init__(self, unique, hash):
self.unique = unique
self.hash = hash
def __hash__(self):
return self.hash
v1 = ClassWithCustomHash('v1', 0)
v2 = ClassWithCustomHash('v2', 0)
pkl = pickle.dumps((v1, v2))
del v1, v2
ClassWithCustomHash.persistenceVersion = 1
ClassWithCustomHash.upgradeToVersion1 = lambda self: setattr(self, 'upgraded', True)
v1, v2 = pickle.loads(pkl)
styles.doUpgrade()
self.assertEquals(v1.unique, 'v1')
self.assertEquals(v2.unique, 'v2')
self.failUnless(v1.upgraded)
self.failUnless(v2.upgraded)
def testUpgradeDeserializesObjectsRequiringUpgrade(self):
global ToyClassA, ToyClassB
class ToyClassA(styles.Versioned):
pass
class ToyClassB(styles.Versioned):
pass
x = ToyClassA()
y = ToyClassB()
pklA, pklB = pickle.dumps(x), pickle.dumps(y)
del x, y
ToyClassA.persistenceVersion = 1
def upgradeToVersion1(self):
self.y = pickle.loads(pklB)
styles.doUpgrade()
ToyClassA.upgradeToVersion1 = upgradeToVersion1
ToyClassB.persistenceVersion = 1
ToyClassB.upgradeToVersion1 = lambda self: setattr(self, 'upgraded', True)
x = pickle.loads(pklA)
styles.doUpgrade()
self.failUnless(x.y.upgraded)
class MyEphemeral(styles.Ephemeral):
def __init__(self, x):
self.x = x
class EphemeralTestCase(unittest.TestCase):
def testEphemeral(self):
o = MyEphemeral(3)
self.assertEquals(o.__class__, MyEphemeral)
self.assertEquals(o.x, 3)
pickl = pickle.dumps(o)
o = pickle.loads(pickl)
self.assertEquals(o.__class__, styles.Ephemeral)
self.assert_(not hasattr(o, 'x'))
class Pickleable:
def __init__(self, x):
self.x = x
def getX(self):
return self.x
class A:
"""
dummy class
"""
def amethod(self):
pass
class B:
"""
dummy class
"""
def bmethod(self):
pass
def funktion():
pass
class PicklingTestCase(unittest.TestCase):
"""Test pickling of extra object types."""
def testModule(self):
pickl = pickle.dumps(styles)
o = pickle.loads(pickl)
self.assertEquals(o, styles)
def testClassMethod(self):
pickl = pickle.dumps(Pickleable.getX)
o = pickle.loads(pickl)
self.assertEquals(o, Pickleable.getX)
def testInstanceMethod(self):
obj = Pickleable(4)
pickl = pickle.dumps(obj.getX)
o = pickle.loads(pickl)
self.assertEquals(o(), 4)
self.assertEquals(type(o), type(obj.getX))
def testStringIO(self):
f = StringIO.StringIO()
f.write("abc")
pickl = pickle.dumps(f)
o = pickle.loads(pickl)
self.assertEquals(type(o), type(f))
self.assertEquals(f.getvalue(), "abc")
class EvilSourceror:
def __init__(self, x):
self.a = self
self.a.b = self
self.a.b.c = x
class NonDictState:
def __getstate__(self):
return self.state
def __setstate__(self, state):
self.state = state
class AOTTestCase(unittest.TestCase):
def testSimpleTypes(self):
obj = (1, 2.0, 3j, True, slice(1, 2, 3), 'hello', u'world', sys.maxint + 1, None, Ellipsis)
rtObj = aot.unjellyFromSource(aot.jellyToSource(obj))
self.assertEquals(obj, rtObj)
def testMethodSelfIdentity(self):
a = A()
b = B()
a.bmethod = b.bmethod
b.a = a
im_ = aot.unjellyFromSource(aot.jellyToSource(b)).a.bmethod
self.assertEquals(im_.im_class, im_.im_self.__class__)
def test_methodNotSelfIdentity(self):
"""
If a class change after an instance has been created,
L{aot.unjellyFromSource} shoud raise a C{TypeError} when trying to
unjelly the instance.
"""
a = A()
b = B()
a.bmethod = b.bmethod
b.a = a
savedbmethod = B.bmethod
del B.bmethod
try:
self.assertRaises(TypeError, aot.unjellyFromSource,
aot.jellyToSource(b))
finally:
B.bmethod = savedbmethod
def test_unsupportedType(self):
"""
L{aot.jellyToSource} should raise a C{TypeError} when trying to jelly
an unknown type.
"""
try:
set
except:
from sets import Set as set
self.assertRaises(TypeError, aot.jellyToSource, set())
def testBasicIdentity(self):
# Anyone wanting to make this datastructure more complex, and thus this
# test more comprehensive, is welcome to do so.
aj = aot.AOTJellier().jellyToAO
d = {'hello': 'world', "method": aj}
l = [1, 2, 3,
"he\tllo\n\n\"x world!",
u"goodbye \n\t\u1010 world!",
1, 1.0, 100 ** 100l, unittest, aot.AOTJellier, d,
funktion
]
t = tuple(l)
l.append(l)
l.append(t)
l.append(t)
uj = aot.unjellyFromSource(aot.jellyToSource([l, l]))
assert uj[0] is uj[1]
assert uj[1][0:5] == l[0:5]
def testNonDictState(self):
a = NonDictState()
a.state = "meringue!"
assert aot.unjellyFromSource(aot.jellyToSource(a)).state == a.state
def testCopyReg(self):
s = "foo_bar"
sio = StringIO.StringIO()
sio.write(s)
uj = aot.unjellyFromSource(aot.jellyToSource(sio))
# print repr(uj.__dict__)
assert uj.getvalue() == s
def testFunkyReferences(self):
o = EvilSourceror(EvilSourceror([]))
j1 = aot.jellyToAOT(o)
oj = aot.unjellyFromAOT(j1)
assert oj.a is oj
assert oj.a.b is oj.b
assert oj.c is not oj.c.c
class CrefUtilTestCase(unittest.TestCase):
"""
Tests for L{crefutil}.
"""
def test_dictUnknownKey(self):
"""
L{crefutil._DictKeyAndValue} only support keys C{0} and C{1}.
"""
d = crefutil._DictKeyAndValue({})
self.assertRaises(RuntimeError, d.__setitem__, 2, 3)
def test_deferSetMultipleTimes(self):
"""
L{crefutil._Defer} can be assigned a key only one time.
"""
d = crefutil._Defer()
d[0] = 1
self.assertRaises(RuntimeError, d.__setitem__, 0, 1)
testCases = [VersionTestCase, EphemeralTestCase, PicklingTestCase]
| apache-2.0 | 7,265,969,973,151,563,000 | 26.541401 | 99 | 0.576318 | false |
ContinuumIO/ashiba | enaml/enaml/wx/wx_spin_box.py | 1 | 16857 | #------------------------------------------------------------------------------
# Copyright (c) 2013, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#------------------------------------------------------------------------------
import wx
import wx.lib.newevent
from atom.api import Int, Typed
from enaml.widgets.spin_box import ProxySpinBox
from .wx_control import WxControl
#: The changed event for the custom spin box
wxSpinBoxEvent, EVT_SPIN_BOX = wx.lib.newevent.NewEvent()
class wxProperSpinBox(wx.SpinCtrl):
""" A custom wx spin control that acts more like QSpinBox.
The standard wx.SpinCtrl doesn't support too many features, and
the ones it does support are (like wrapping) are limited. So,
this custom control hard codes the internal range to the maximum
range of the wx.SpinCtrl and implements wrapping manually.
For changed events, users should bind to EVT_SPIN_BOX rather than
EVT_SPINCTRL.
See the method docstrings for supported functionality.
This control is really a god awful hack and needs to be rewritten
using a combination wx.SpinButton and wx.TextCtrl.
"""
def __init__(self, *args, **kwargs):
""" CustomSpinCtrl constructor.
Parameters
----------
*args, **kwargs
The positional and keyword arguments for initializing a
wx.SpinCtrl.
"""
# The max range of the wx.SpinCtrl is the range of a signed
# 32bit integer. We don't care about wx's internal value of
# the control, since we maintain our own internal counter.
# and because the internal value of the widget gets reset to
# the minimum of the range whenever SetValueString is called.
self._hard_min = -(1 << 31)
self._hard_max = (1 << 31) - 1
self._internal_value = 0
self._low = 0
self._high = 100
self._step = 1
self._prefix = u''
self._suffix = u''
self._special_value_text = u''
self._value_string = unicode(self._low)
self._wrap = False
self._read_only = False
# Stores whether spin-up or spin-down was pressed.
self._spin_state = None
super(wxProperSpinBox, self).__init__(*args, **kwargs)
super(wxProperSpinBox, self).SetRange(self._hard_min, self._hard_max)
# Setting the spin control to process the enter key removes
# its processing of the Tab key. This is desired for two reasons:
# 1) It is consistent with the Qt version of the control.
# 2) The default tab processing is kinda wacky in that when
# tab is pressed, it emits a text event with the string
# representation of the integer value of the control,
# regardless of the value of the user supplied string.
# This is definitely not correct and so processing on
# Enter allows us to avoid the issue entirely.
self.WindowStyle |= wx.TE_PROCESS_ENTER
self.Bind(wx.EVT_SPIN_UP, self.OnSpinUp)
self.Bind(wx.EVT_SPIN_DOWN, self.OnSpinDown)
self.Bind(wx.EVT_SPINCTRL, self.OnSpinCtrl)
self.Bind(wx.EVT_TEXT, self.OnText)
self.Bind(wx.EVT_KILL_FOCUS, self.OnKillFocus)
self.Bind(wx.EVT_TEXT_ENTER, self.OnEnterPressed)
#--------------------------------------------------------------------------
# Event Handlers
#--------------------------------------------------------------------------
def OnEnterPressed(self, event):
""" The event handler for an enter key press. It forces an
interpretation of the current text control value.
"""
self.InterpretText()
def OnKillFocus(self, event):
""" Handles evaluating the text in the control when the control
loses focus.
"""
# The spin control doesn't emit a spin event when losing focus
# to process typed input change unless it results in a different
# value, so we have to handle it manually and update the control
# again after the event. It must be invoked on a CallAfter or it
# doesn't work properly. The lambda avoids a DeadObjectError if
# the app is exited before the callback executes.
wx.CallAfter(lambda: self.InterpretText() if self else None)
def OnText(self, event):
""" Handles the text event of the spin control to store away the
user typed text for later conversion.
"""
if self._read_only:
return
# Do not be tempted to try to implement the 'tracking' feature
# by adding logic to this method. Wx emits this event at weird
# times such as ctrl-a select all as well as when SetValueString
# is called. Granted, this can be avoided with a recursion guard,
# however, there is no way to get/set the caret position on the
# control and every call to SetValueString resets the caret
# position to Zero. So, there is really no possible way to
# implement 'tracking' without creating an entirely new custom
# control. So for now, the wx backend just lacks that feature.
self._value_string = event.GetString()
def OnSpinUp(self, event):
""" The event handler for the spin up event. We veto the spin
event to prevent the control from changing it's internal value.
Instead, we maintain complete control of the value.
"""
event.Veto()
if self._read_only:
return
self._spin_state = 'up'
self.OnSpinCtrl(event)
self._spin_state = None
def OnSpinDown(self, event):
""" The event handler for the spin down event. We veto the spin
event to prevent the control from changing it's internal value.
Instead, we maintain complete control of the value.
"""
event.Veto()
if self._read_only:
return
self._spin_state = 'down'
self.OnSpinCtrl(event)
self._spin_state = None
def OnSpinCtrl(self, event):
""" Handles the spin control being changed by user interaction.
All of the manual stepping and wrapping logic is computed by
this method.
"""
if self._read_only:
return
last = self._internal_value
low = self._low
high = self._high
step = self._step
wrap = self._wrap
spin_state = self._spin_state
if spin_state == 'down':
if last == low:
if wrap:
computed = high
else:
computed = low
else:
computed = last - step
if computed < low:
computed = low
self.SetValue(computed)
elif spin_state == 'up':
if last == high:
if wrap:
computed = low
else:
computed = high
else:
computed = last + step
if computed > high:
computed = high
self.SetValue(computed)
else:
# A suprious spin event generated by wx when the widget loses
# focus. We can safetly ignore it.
pass
#--------------------------------------------------------------------------
# Getters/Setters
#--------------------------------------------------------------------------
def GetLow(self):
""" Returns the minimum value of the control.
"""
return self._low
def GetMin(self):
""" Equivalent to GetLow().
"""
return self._low
def SetLow(self, low):
""" Sets the minimum value of the control and changes the
value to the min if the current value would be out of range.
"""
if low < self._hard_min:
raise ValueError('%s is too low for wxProperSpinBox.' % low)
self._low = low
if self.GetValue() < low:
self.SetValue(low)
def GetHigh(self):
""" Returns the maximum value of the control.
"""
return self._high
def GetMax(self):
""" Equivalent to GetHigh().
"""
return self._high
def SetHigh(self, high):
""" Sets the maximum value of the control and changes the
value to the max if the current value would be out of range.
"""
if high > self._hard_max:
raise ValueError('%s is too high for wxProperSpinBox.' % high)
self._high = high
if self.GetValue() > high:
self.SetValue(high)
def SetRange(self, low, high):
""" Sets the low and high values of the control.
"""
self.SetLow(low)
self.SetHigh(high)
def GetStep(self):
""" Returns the step size of the control.
"""
return self._step
def SetStep(self, step):
""" Sets the step size of the control.
"""
self._step = step
def GetWrap(self):
""" Gets the wrap flag of the control.
"""
return self._wrap
def SetWrap(self, wrap):
""" Sets the wrap flag of the control.
"""
self._wrap = wrap
def GetPrefix(self):
""" Get the prefix text for the control.
Returns
-------
result : unicode
The unicode prefix text.
"""
return self._prefix
def SetPrefix(self, prefix):
""" Set the prefix text for the control.
Parameters
----------
prefix : unicode
The unicode prefix text for the control.
"""
self._prefix = prefix
def GetSuffix(self):
""" Get the suffix text for the control.
Returns
-------
result : unicode
The unicode suffix text.
"""
return self._suffix
def SetSuffix(self, suffix):
""" Set the suffix text for the control.
Parameters
----------
suffix : unicode
The unicode suffix text for the control.
"""
self._suffix = suffix
def GetSpecialValueText(self):
""" Returns the special value text for the spin box.
Returns
-------
result : unicode
The unicode special value text.
"""
return self._special_value_text
def SetSpecialValueText(self, text):
""" Set the special value text for the control.
Parameters
----------
text : unicode
The unicode special value text for the control.
"""
self._special_value_text = text
def GetReadOnly(self):
""" Get the read only flag for the control.
Returns
-------
result : bool
True if the control is read only, False otherwise.
"""
return self._suffix
def SetReadOnly(self, read_only):
""" Set the read only flag for the control
Parameters
----------
read_only : bool
True if the control should be read only, False otherwise.
"""
self._read_only = read_only
def GetValue(self):
""" Returns the internal integer value of the control.
"""
return self._internal_value
def SetValue(self, value):
""" Sets the value of the control to the given value, provided
that the value is within the range of the control. If the
given value is within range, and is different from the current
value of the control, an EVT_SPIN_BOX will be emitted.
"""
different = False
if self._low <= value <= self._high:
different = (self._internal_value != value)
self._internal_value = value
# Always set the value string, just to be overly
# safe that we don't fall out of sync.
self._value_string = self.TextFromValue(self._internal_value)
self.SetValueString(self._value_string)
if different:
evt = wxSpinBoxEvent()
wx.PostEvent(self, evt)
#--------------------------------------------------------------------------
# Support Methods
#--------------------------------------------------------------------------
def InterpretText(self):
""" Interprets the user supplied text and updates the control.
"""
prefix = self._prefix
suffix = self._suffix
svt = self._special_value_text
text = self._value_string
if svt and text == svt:
self.SetValue(self._low)
return
if prefix and text.startswith(prefix):
text = text[len(prefix):]
if suffix and text.endswith(suffix):
text = text[:-len(suffix)]
try:
value = int(text)
except ValueError:
value = self._internal_value
self.SetValue(value)
def TextFromValue(self, value):
""" Converts the given integer to a string for display.
"""
prefix = self._prefix
suffix = self._suffix
svt = self._special_value_text
if value == self._low and svt:
return svt
text = unicode(value)
if prefix:
text = '%s%s' % (prefix, text)
if suffix:
text = '%s%s' % (text, suffix)
return text
#: Cyclic guard flag
VALUE_FLAG = 0x1
class WxSpinBox(WxControl, ProxySpinBox):
""" A Wx implementation of an Enaml ProxySpinBox.
"""
#: A reference to the widget created by the proxy.
widget = Typed(wxProperSpinBox)
#: Cyclic guard flags
_guard = Int(0)
#--------------------------------------------------------------------------
# Initialization API
#--------------------------------------------------------------------------
def create_widget(self):
""" Create the underlying wxProperSpinBox widget.
"""
self.widget = wxProperSpinBox(self.parent_widget())
def init_widget(self, ):
""" Create and initialize the slider control.
"""
super(WxSpinBox, self).init_widget()
d = self.declaration
self.set_maximum(d.maximum)
self.set_minimum(d.minimum)
self.set_value(d.value)
self.set_prefix(d.prefix)
self.set_suffix(d.suffix)
self.set_special_value_text(d.special_value_text)
self.set_single_step(d.single_step)
self.set_read_only(d.read_only)
self.set_wrapping(d.wrapping)
self.widget.Bind(EVT_SPIN_BOX, self.on_value_changed)
#--------------------------------------------------------------------------
# Event Handlers
#--------------------------------------------------------------------------
def on_value_changed(self, event):
""" The event handler for the 'EVT_SPIN_BOX' event.
"""
if not self._guard & VALUE_FLAG:
self._guard |= VALUE_FLAG
try:
self.declaration.value = self.widget.GetValue()
finally:
self._guard &= ~VALUE_FLAG
#--------------------------------------------------------------------------
# ProxySpinBox API
#--------------------------------------------------------------------------
def set_maximum(self, maximum):
""" Set the widget's maximum value.
"""
self.widget.SetHigh(maximum)
def set_minimum(self, minimum):
""" Set the widget's minimum value.
"""
self.widget.SetLow(minimum)
def set_value(self, value):
""" Set the spin box's value.
"""
if not self._guard & VALUE_FLAG:
self._guard |= VALUE_FLAG
try:
self.widget.SetValue(value)
finally:
self._guard &= ~VALUE_FLAG
def set_prefix(self, prefix):
""" Set the prefix for the spin box.
"""
self.widget.SetPrefix(prefix)
def set_suffix(self, suffix):
""" Set the suffix for the spin box.
"""
self.widget.SetSuffix(suffix)
def set_special_value_text(self, text):
""" Set the special value text for the spin box.
"""
self.widget.SetSpecialValueText(text)
def set_single_step(self, step):
""" Set the widget's single step value.
"""
self.widget.SetStep(step)
def set_read_only(self, read_only):
""" Set the widget's read only flag.
"""
self.widget.SetReadOnly(read_only)
def set_wrapping(self, wrapping):
""" Set the widget's wrapping flag.
"""
self.widget.SetWrap(wrapping)
| bsd-3-clause | -6,469,759,024,124,321,000 | 29.649091 | 79 | 0.53764 | false |
Finntack/pootle | pootle/apps/staticpages/templatetags/staticpages.py | 3 | 2425 | # -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
from django import template
from django.core.urlresolvers import reverse
from ..models import LegalPage
register = template.Library()
class LegalPageNode(template.Node):
def __init__(self, context_name):
self.context_name = context_name
def render(self, context):
lps = LegalPage.objects.live().all()
context[self.context_name] = lps
return ''
@register.tag
def get_legalpages(parser, token):
"""
Retrieves all active LegalPage objects.
Populates the template context with them in a variable
whose name is defined by the ``as`` clause.
Syntax::
{% get_legalpages as context_name %}
"""
bits = token.split_contents()
syntax_message = ("%(tag_name)s expects a syntax of %(tag_name)s "
"as context_name" %
dict(tag_name=bits[0]))
if len(bits) == 3:
if bits[1] != 'as':
raise template.TemplateSyntaxError(syntax_message)
context_name = bits[2]
return LegalPageNode(context_name)
else:
raise template.TemplateSyntaxError(syntax_message)
@register.tag
def staticpage_url(parser, token):
"""Returns the internal URL for a static page based on its virtual path.
Syntax::
{% staticpage_url 'virtual/path' %}
"""
bits = token.split_contents()
syntax_message = ("%(tag_name)s expects a syntax of %(tag_name)s "
"'virtual/path'" %
dict(tag_name=bits[0]))
quote_message = "%s tag's argument should be in quotes" % bits[0]
if len(bits) == 2:
virtual_path = bits[1]
if (not (virtual_path[0] == virtual_path[-1] and
virtual_path[0] in ('"', "'"))):
raise template.TemplateSyntaxError(quote_message)
return StaticPageURLNode(virtual_path[1:-1])
raise template.TemplateSyntaxError(syntax_message)
class StaticPageURLNode(template.Node):
def __init__(self, virtual_path):
self.virtual_path = virtual_path
def render(self, context):
return reverse('pootle-staticpages-display', args=[self.virtual_path])
| gpl-3.0 | 5,718,019,808,574,165,000 | 25.944444 | 78 | 0.628041 | false |
eworm-de/systemd | test/sd-script.py | 12 | 21508 | #!/usr/bin/env python3
# SPDX-License-Identifier: LGPL-2.1-or-later
#
# sd-script.py: create LOTS of sd device entries in fake sysfs
#
# (C) 2018 Martin Wilck, SUSE Linux GmbH
#
# Run after sys-script.py
# Usage: sd-script.py <directory> <num>
# <num> is the number of device nodes (disks + partitions)
# to create in addition to what sys-script.py already did.
# The script can be run several times in a row if <num> is increased,
# adding yet more device entries.
# Tested up to 1000 entries, more are possible.
# Note that sys-script.py already creates 10 sd device nodes
# (sda+sdb and partitions). This script starts with sdc.
import re
import os
import errno
import sys
def d(path, mode):
os.mkdir(path, mode)
def l(path, src):
os.symlink(src, path)
def f(path, mode, contents):
with open(path, "wb") as f:
f.write(contents)
os.chmod(path, mode)
class SD(object):
sd_major = [8] + list(range(65, 72)) + list(range(128, 136))
_name_re = re.compile(r'sd(?P<f>[a-z]*)$')
def _init_from_name(self, name):
mt = self._name_re.match(name)
if mt is None:
raise RuntimeError("invalid name %s" % name)
nm = mt.group("f")
base = 1
ls = nm[-1]
nm = nm[:-1]
n = base * (ord(ls)-ord('a'))
while len(nm) > 0:
ls = nm[-1]
nm = nm[:-1]
base *= 26
n += base * (1 + ord(ls)-ord('a'))
self._num = n
def _init_from_dev(self, dev):
maj, min = dev.split(":")
maj = self.sd_major.index(int(maj, 10))
min = int(min, 10)
num = int(min / 16)
self._num = 16*maj + num%16 + 256*int(num/16)
@staticmethod
def _disk_num(a, b):
n = ord(a)-ord('a')
if b != '':
n = 26 * (n+1) + ord(b)-ord('a')
return n
@staticmethod
def _get_major(n):
return SD.sd_major[(n%256)//16]
@staticmethod
def _get_minor(n):
return 16 * (n % 16 + 16 * n//256)
@staticmethod
def _get_name(n):
# see sd_format_disk_name() (sd.c)
s = chr(n % 26 + ord('a'))
n = n // 26 - 1
while n >= 0:
s = chr(n % 26 + ord('a')) + s
n = n // 26 - 1
return "sd" + s
@staticmethod
def _get_dev_t(n):
maj = SD._get_major(n)
min = SD._get_minor(n)
return (maj << 20) + min
def __init__(self, arg):
if type(arg) is type(0):
self._num = arg
elif arg.startswith("sd"):
self._init_from_name(arg)
else:
self._init_from_dev(arg)
def __cmp__(self, other):
return cmp(self._num, other._num)
def __hash__(self):
return hash(self._num)
def __str__(self):
return "%s/%s" % (
self.devstr(),
self._get_name(self._num))
def major(self):
return self._get_major(self._num)
def minor(self):
return self._get_minor(self._num)
def devstr(self):
return "%d:%d" % (self._get_major(self._num),
self._get_minor(self._num))
def namestr(self):
return self._get_name(self._num)
def longstr(self):
return "%d\t%s\t%s\t%08x" % (self._num,
self.devstr(),
self.namestr(),
self._get_dev_t(self._num))
class MySD(SD):
def subst(self, first_sg):
disk = {
"lun": self._num,
"major": self.major(),
"devnode": self.namestr(),
"disk_minor": self.minor(),
"sg_minor": first_sg + self._num,
}
return disk
disk_template = r"""\
l('sys/bus/scsi/drivers/sd/7:0:0:{lun}', '../../../../devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}')
l('sys/bus/scsi/devices/7:0:0:{lun}', '../../../devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}')
l('sys/dev/char/254:{sg_minor}', '../../devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/bsg/7:0:0:{lun}')
l('sys/dev/char/21:{sg_minor}', '../../devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/scsi_generic/sg{sg_minor}')
l('sys/class/scsi_disk/7:0:0:{lun}', '../../devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/scsi_disk/7:0:0:{lun}')
l('sys/class/scsi_generic/sg{sg_minor}', '../../devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/scsi_generic/sg{sg_minor}')
l('sys/class/bsg/7:0:0:{lun}', '../../devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/bsg/7:0:0:{lun}')
l('sys/class/scsi_device/7:0:0:{lun}', '../../devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/scsi_device/7:0:0:{lun}')
d('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}', 0o755)
l('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/generic', 'scsi_generic/sg{sg_minor}')
l('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/subsystem', '../../../../../../../../../bus/scsi')
l('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/driver', '../../../../../../../../../bus/scsi/drivers/sd')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/iodone_cnt', 0o644, b'0xc3\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/device_blocked', 0o644, b'0\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/max_sectors', 0o644, b'240\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/modalias', 0o644, b'scsi:t-0x00\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/scsi_level', 0o644, b'3\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/queue_depth', 0o644, b'1\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/rev', 0o644, b'1.00\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/type', 0o644, b'0\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/iocounterbits', 0o644, b'32\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/vendor', 0o644, b'Generic \n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/state', 0o644, b'running\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/queue_type', 0o644, b'none\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/iorequest_cnt', 0o644, b'0xc3\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/evt_media_change', 0o644, b'0\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/model', 0o644, b'USB Flash Drive \n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/ioerr_cnt', 0o644, b'0x2\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/uevent', 0o644, b'''DEVTYPE=scsi_device
DRIVER=sd
MODALIAS=scsi:t-0x00
''')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/timeout', 0o644, b'60\n')
d('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/scsi_disk', 0o755)
d('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/scsi_disk/7:0:0:{lun}', 0o755)
l('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/scsi_disk/7:0:0:{lun}/subsystem', '../../../../../../../../../../../class/scsi_disk')
l('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/scsi_disk/7:0:0:{lun}/device', '../../../7:0:0:{lun}')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/scsi_disk/7:0:0:{lun}/app_tag_own', 0o644, b'0\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/scsi_disk/7:0:0:{lun}/FUA', 0o644, b'0\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/scsi_disk/7:0:0:{lun}/cache_type', 0o644, b'write through\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/scsi_disk/7:0:0:{lun}/protection_type', 0o644, b'0\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/scsi_disk/7:0:0:{lun}/manage_start_stop', 0o644, b'0\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/scsi_disk/7:0:0:{lun}/allow_restart', 0o644, b'1\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/scsi_disk/7:0:0:{lun}/uevent', 0o644, b'')
d('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/scsi_disk/7:0:0:{lun}/power', 0o755)
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/scsi_disk/7:0:0:{lun}/power/wakeup', 0o644, b'\n')
d('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/power', 0o755)
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/power/wakeup', 0o644, b'\n')
d('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/scsi_generic', 0o755)
d('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/scsi_generic/sg{sg_minor}', 0o755)
l('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/scsi_generic/sg{sg_minor}/subsystem', '../../../../../../../../../../../class/scsi_generic')
l('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/scsi_generic/sg{sg_minor}/device', '../../../7:0:0:{lun}')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/scsi_generic/sg{sg_minor}/dev', 0o644, b'21:{sg_minor}\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/scsi_generic/sg{sg_minor}/uevent', 0o644, b'''MAJOR=21
MINOR={sg_minor}
''')
d('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/scsi_generic/sg{sg_minor}/power', 0o755)
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/scsi_generic/sg{sg_minor}/power/wakeup', 0o644, b'\n')
d('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/bsg', 0o755)
d('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/bsg/7:0:0:{lun}', 0o755)
l('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/bsg/7:0:0:{lun}/subsystem', '../../../../../../../../../../../class/bsg')
l('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/bsg/7:0:0:{lun}/device', '../../../7:0:0:{lun}')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/bsg/7:0:0:{lun}/dev', 0o644, b'254:{sg_minor}\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/bsg/7:0:0:{lun}/uevent', 0o644, b'''MAJOR=254
MINOR={sg_minor}
''')
d('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/bsg/7:0:0:{lun}/power', 0o755)
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/bsg/7:0:0:{lun}/power/wakeup', 0o644, b'\n')
d('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block', 0o755)
d('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/scsi_device', 0o755)
d('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/scsi_device/7:0:0:{lun}', 0o755)
l('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/scsi_device/7:0:0:{lun}/subsystem', '../../../../../../../../../../../class/scsi_device')
l('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/scsi_device/7:0:0:{lun}/device', '../../../7:0:0:{lun}')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/scsi_device/7:0:0:{lun}/uevent', 0o644, b'')
d('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/scsi_device/7:0:0:{lun}/power', 0o755)
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/scsi_device/7:0:0:{lun}/power/wakeup', 0o644, b'\n')
l('sys/dev/block/{major}:{disk_minor}', '../../devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}')
l('sys/class/block/{devnode}', '../../devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}')
l('sys/block/{devnode}', '../devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}')
d('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}', 0o755)
l('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/subsystem', '../../../../../../../../../../../class/block')
l('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/bdi', '../../../../../../../../../../virtual/bdi/{major}:{disk_minor}')
l('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/device', '../../../7:0:0:{lun}')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/capability', 0o644, b'13\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/ro', 0o644, b'0\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/make-it-fail', 0o644, b'0\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/size', 0o644, b'257024\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/dev', 0o644, b'{major}:{disk_minor}\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/range', 0o644, b'16\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/removable', 0o644, b'1\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/stat', 0o644, b' 117 409 2103 272 0 0 0 0 0 194 272\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/uevent', 0o644, b'''MAJOR={major}
MINOR={disk_minor}
DEVTYPE=disk
DEVNAME={devnode}
''')
d('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/queue', 0o755)
l('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/queue/bsg', '../../../bsg/7:0:0:{lun}')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/queue/nr_requests', 0o644, b'128\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/queue/nomerges', 0o644, b'0\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/queue/scheduler', 0o644, b'noop anticipatory deadline [cfq] \n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/queue/hw_sector_size', 0o644, b'512\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/queue/max_hw_sectors_kb', 0o644, b'120\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/queue/read_ahead_kb', 0o644, b'128\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/queue/max_sectors_kb', 0o644, b'120\n')
d('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/queue/iosched', 0o755)
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/queue/iosched/slice_async_rq', 0o644, b'2\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/queue/iosched/back_seek_max', 0o644, b'16384\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/queue/iosched/slice_sync', 0o644, b'100\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/queue/iosched/slice_async', 0o644, b'40\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/queue/iosched/fifo_expire_sync', 0o644, b'125\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/queue/iosched/slice_idle', 0o644, b'8\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/queue/iosched/back_seek_penalty', 0o644, b'2\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/queue/iosched/fifo_expire_async', 0o644, b'250\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/queue/iosched/quantum', 0o644, b'4\n')
d('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/power', 0o755)
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/power/wakeup', 0o644, b'\n')
"""
part_template = r"""\
l('sys/dev/block/{major}:{part_minor}', '../../devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/{devnode}{part_num}')
l('sys/class/block/{devnode}{part_num}', '../../devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/{devnode}{part_num}')
d('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/{devnode}{part_num}', 0o755)
l('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/{devnode}{part_num}/subsystem', '../../../../../../../../../../../../class/block')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/{devnode}{part_num}/start', 0o644, b'32\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/{devnode}{part_num}/make-it-fail', 0o644, b'0\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/{devnode}{part_num}/size', 0o644, b'256992\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/{devnode}{part_num}/dev', 0o644, b'{major}:{part_minor}\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/{devnode}{part_num}/stat', 0o644, b' 109 392 1903 246 0 0 0 0 0 169 246\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/{devnode}{part_num}/uevent', 0o644, b'''MAJOR={major}
MINOR={part_minor}
DEVTYPE=partition
DEVNAME={devnode}{part_num}
''')
"""
if len(sys.argv) != 3:
exit("Usage: {} <target dir> <number>".format(sys.argv[0]))
if not os.path.isdir(sys.argv[1]):
exit("Target dir {} not found".format(sys.argv[1]))
def create_part_sysfs(disk, sd, prt):
part = disk
part.update ({
"part_num": prt,
"part_minor": disk["disk_minor"] + prt,
})
try:
exec(part_template.format(**part))
except OSError:
si = sys.exc_info()[1]
if (si.errno == errno.EEXIST):
print("sysfs structures for %s%d exist" % (sd.namestr(), prt))
else:
print("error for %s%d: %s" % (sd.namestr(), prt, si[1]))
raise
else:
print("sysfs structures for %s%d created" % (sd.namestr(), prt))
def create_disk_sysfs(dsk, first_sg, n):
sd = MySD(dsk)
disk = sd.subst(first_sg)
try:
exec(disk_template.format(**disk))
except OSError:
si = sys.exc_info()[1]
if (si.errno == errno.EEXIST):
print("sysfs structures for %s exist" % sd.namestr())
elif (si.errno == errno.ENOENT):
print("error for %s: %s - have you run sys-script py first?" %
(sd.namestr(), si.strerror))
return -1
else:
print("error for %s: %s" % (sd.namestr(), si.strerror))
raise
else:
print("sysfs structures for %s created" % sd.namestr())
n += 1
if n >= last:
return n
for prt in range(1, 16):
create_part_sysfs(disk, sd, prt)
n += 1
if n >= last:
return n
return n
os.chdir(sys.argv[1])
n = 0
last = int(sys.argv[2])
first_sg = 2
for dsk in range(2, 1000):
n = create_disk_sysfs(dsk, first_sg, n)
if n >= last or n == -1:
break
| gpl-2.0 | -1,557,924,637,991,903,200 | 61.888889 | 240 | 0.617119 | false |
MiLk/ansible | lib/ansible/modules/network/nxos/nxos_rollback.py | 45 | 3780 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: nxos_rollback
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Set a checkpoint or rollback to a checkpoint.
description:
- This module offers the ability to set a configuration checkpoint
file or rollback to a configuration checkpoint file on Cisco NXOS
switches.
author:
- Jason Edelman (@jedelman8)
- Gabriele Gerbino (@GGabriele)
notes:
- Sometimes C(transport=nxapi) may cause a timeout error.
options:
checkpoint_file:
description:
- Name of checkpoint file to create. Mutually exclusive
with rollback_to.
required: false
default: null
rollback_to:
description:
- Name of checkpoint file to rollback to. Mutually exclusive
with checkpoint_file.
required: false
default: null
'''
EXAMPLES = '''
- nxos_rollback:
checkpoint_file: backup.cfg
username: "{{ un }}"
password: "{{ pwd }}"
host: "{{ inventory_hostname }}"
- nxos_rollback:
rollback_to: backup.cfg
username: "{{ un }}"
password: "{{ pwd }}"
host: "{{ inventory_hostname }}"
'''
RETURN = '''
filename:
description: The filename of the checkpoint/rollback file.
returned: success
type: string
sample: 'backup.cfg'
status:
description: Which operation took place and whether it was successful.
returned: success
type: string
sample: 'rollback executed'
'''
from ansible.module_utils.nxos import nxos_argument_spec, run_commands
from ansible.module_utils.basic import AnsibleModule
def checkpoint(filename, module):
commands = ['terminal dont-ask', 'checkpoint file %s' % filename]
run_commands(module, commands)
def rollback(filename, module):
commands = ['rollback running-config file %s' % filename]
run_commands(module, commands)
def main():
argument_spec = dict(
checkpoint_file=dict(required=False),
rollback_to=dict(required=False),
include_defaults=dict(default=True),
config=dict(),
save=dict(type='bool', default=False)
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=[['checkpoint_file',
'rollback_to']],
supports_check_mode=False)
checkpoint_file = module.params['checkpoint_file']
rollback_to = module.params['rollback_to']
status = None
filename = None
changed = False
if checkpoint_file:
checkpoint(checkpoint_file, module)
status = 'checkpoint file created'
elif rollback_to:
rollback(rollback_to, module)
status = 'rollback executed'
changed = True
filename = rollback_to or checkpoint_file
module.exit_json(changed=changed, status=status, filename=filename)
if __name__ == '__main__':
main()
| gpl-3.0 | 6,047,403,224,673,898,000 | 27.854962 | 74 | 0.653968 | false |
mandx/pyrax | tests/unit/test_cloud_blockstorage.py | 10 | 21816 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import random
import unittest
from mock import patch
from mock import MagicMock as Mock
import pyrax.cloudblockstorage
from pyrax.cloudblockstorage import CloudBlockStorageClient
from pyrax.cloudblockstorage import CloudBlockStorageVolume
from pyrax.cloudblockstorage import CloudBlockStorageVolumeType
from pyrax.cloudblockstorage import CloudBlockStorageSnapshot
from pyrax.cloudblockstorage import CloudBlockStorageSnapshotManager
from pyrax.cloudblockstorage import _resolve_id
from pyrax.cloudblockstorage import _resolve_name
from pyrax.cloudblockstorage import assure_volume
from pyrax.cloudblockstorage import assure_snapshot
import pyrax.exceptions as exc
from pyrax.manager import BaseManager
import pyrax.utils as utils
from pyrax import fakes
example_uri = "http://example.com"
class CloudBlockStorageTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(CloudBlockStorageTest, self).__init__(*args, **kwargs)
def setUp(self):
self.client = fakes.FakeBlockStorageClient()
self.volume = fakes.FakeBlockStorageVolume()
self.snapshot = fakes.FakeBlockStorageSnapshot()
def tearDown(self):
pass
def test_resolve_id(self):
target = "test_id"
class Obj_with_id(object):
id = target
obj = Obj_with_id()
self.assertEqual(_resolve_id(obj), target)
self.assertEqual(_resolve_id(obj), target)
self.assertEqual(_resolve_id(obj.id), target)
def test_resolve_name(self):
target = "test_name"
class Obj_with_name(object):
name = target
obj = Obj_with_name()
self.assertEqual(_resolve_name(obj), target)
self.assertEqual(_resolve_name(obj), target)
self.assertEqual(_resolve_name(obj.name), target)
def test_assure_volume(self):
class TestClient(object):
_manager = fakes.FakeBlockStorageManager()
@assure_volume
def test_method(self, volume):
return volume
client = TestClient()
client._manager.get = Mock(return_value=self.volume)
# Pass the volume
ret = client.test_method(self.volume)
self.assertTrue(ret is self.volume)
# Pass the ID
ret = client.test_method(self.volume.id)
self.assertTrue(ret is self.volume)
def test_assure_snapshot(self):
class TestClient(object):
_snapshot_manager = fakes.FakeSnapshotManager()
@assure_snapshot
def test_method(self, snapshot):
return snapshot
client = TestClient()
client._snapshot_manager.get = Mock(return_value=self.snapshot)
# Pass the snapshot
ret = client.test_method(self.snapshot)
self.assertTrue(ret is self.snapshot)
# Pass the ID
ret = client.test_method(self.snapshot.id)
self.assertTrue(ret is self.snapshot)
def test_create_volume(self):
mgr = fakes.FakeManager()
mgr.api.region_name = "FAKE"
sav = pyrax.connect_to_cloudservers
fakenovavol = utils.random_unicode()
class FakeVol(object):
def __init__(self, *args, **kwargs):
self.volumes = fakenovavol
pyrax.connect_to_cloudservers = Mock(return_value=FakeVol())
vol = CloudBlockStorageVolume(mgr, {})
self.assertTrue(isinstance(vol, CloudBlockStorageVolume))
self.assertEqual(vol._nova_volumes, fakenovavol)
pyrax.connect_to_cloudservers = sav
def test_attach_to_instance(self):
vol = self.volume
inst = fakes.FakeServer()
mp = utils.random_unicode()
vol._nova_volumes.create_server_volume = Mock(return_value=vol)
vol.attach_to_instance(inst, mp)
vol._nova_volumes.create_server_volume.assert_called_once_with(inst.id,
vol.id, mp)
def test_attach_to_instance_fail(self):
vol = self.volume
inst = fakes.FakeServer()
mp = utils.random_unicode()
vol._nova_volumes.create_server_volume = Mock(
side_effect=Exception("test"))
self.assertRaises(exc.VolumeAttachmentFailed, vol.attach_to_instance,
inst, mp)
def test_detach_from_instance(self):
vol = self.volume
srv_id = utils.random_unicode()
att_id = utils.random_unicode()
vol.attachments = [{"server_id": srv_id, "id": att_id}]
vol._nova_volumes.delete_server_volume = Mock()
vol.detach()
vol._nova_volumes.delete_server_volume.assert_called_once_with(srv_id,
att_id)
def test_detach_from_instance_fail(self):
vol = self.volume
srv_id = utils.random_unicode()
att_id = utils.random_unicode()
vol.attachments = [{"server_id": srv_id, "id": att_id}]
vol._nova_volumes.delete_server_volume = Mock(
side_effect=Exception("test"))
self.assertRaises(exc.VolumeDetachmentFailed, vol.detach)
def test_detach_from_instance_no_attachment(self):
vol = self.volume
srv_id = utils.random_unicode()
att_id = utils.random_unicode()
vol.attachments = []
vol._nova_volumes.delete_server_volume = Mock()
ret = vol.detach()
self.assertTrue(ret is None)
self.assertFalse(vol._nova_volumes.delete_server_volume.called)
def test_create_snapshot(self):
vol = self.volume
vol.manager.create_snapshot = Mock()
name = utils.random_unicode()
desc = utils.random_unicode()
vol.create_snapshot(name=name, description=desc, force=False)
vol.manager.create_snapshot.assert_called_once_with(volume=vol,
name=name, description=desc, force=False)
def test_create_snapshot_bad_request(self):
vol = self.volume
sav = BaseManager.create
BaseManager.create = Mock(side_effect=exc.BadRequest(
"Invalid volume: must be available"))
name = utils.random_unicode()
desc = utils.random_unicode()
self.assertRaises(exc.VolumeNotAvailable, vol.create_snapshot,
name=name, description=desc, force=False)
BaseManager.create = sav
def test_create_snapshot_bad_request_other(self):
vol = self.volume
sav = BaseManager.create
BaseManager.create = Mock(side_effect=exc.BadRequest("FAKE"))
name = utils.random_unicode()
desc = utils.random_unicode()
self.assertRaises(exc.BadRequest, vol.create_snapshot,
name=name, description=desc, force=False)
BaseManager.create = sav
def test_vol_update_volume(self):
vol = self.volume
mgr = vol.manager
mgr.update = Mock()
nm = utils.random_unicode()
desc = utils.random_unicode()
vol.update(display_name=nm, display_description=desc)
mgr.update.assert_called_once_with(vol, display_name=nm,
display_description=desc)
def test_vol_rename(self):
vol = self.volume
nm = utils.random_unicode()
vol.update = Mock()
vol.rename(nm)
vol.update.assert_called_once_with(display_name=nm)
def test_mgr_update_volume(self):
clt = self.client
vol = self.volume
mgr = clt._manager
mgr.api.method_put = Mock(return_value=(None, None))
name = utils.random_unicode()
desc = utils.random_unicode()
exp_uri = "/%s/%s" % (mgr.uri_base, vol.id)
exp_body = {"volume": {"display_name": name,
"display_description": desc}}
mgr.update(vol, display_name=name, display_description=desc)
mgr.api.method_put.assert_called_once_with(exp_uri, body=exp_body)
def test_mgr_update_volume_empty(self):
clt = self.client
vol = self.volume
mgr = clt._manager
mgr.api.method_put = Mock(return_value=(None, None))
mgr.update(vol)
self.assertEqual(mgr.api.method_put.call_count, 0)
def test_list_types(self):
clt = self.client
clt._types_manager.list = Mock()
clt.list_types()
clt._types_manager.list.assert_called_once_with()
def test_list_snapshots(self):
clt = self.client
clt._snapshot_manager.list = Mock()
clt.list_snapshots()
clt._snapshot_manager.list.assert_called_once_with()
def test_vol_list_snapshots(self):
vol = self.volume
vol.manager.list_snapshots = Mock()
vol.list_snapshots()
vol.manager.list_snapshots.assert_called_once_with()
def test_vol_mgr_list_snapshots(self):
vol = self.volume
mgr = vol.manager
mgr.api.list_snapshots = Mock()
mgr.list_snapshots()
mgr.api.list_snapshots.assert_called_once_with()
def test_create_body_volume_bad_size(self):
mgr = self.client._manager
self.assertRaises(exc.InvalidSize, mgr._create_body, "name",
size='foo')
def test_create_volume_bad_clone_size(self):
mgr = self.client._manager
mgr._create = Mock(side_effect=exc.BadRequest(400,
"Clones currently must be >= original volume size"))
self.assertRaises(exc.VolumeCloneTooSmall, mgr.create, "name",
size=100, clone_id=utils.random_unicode())
def test_create_volume_fail_other(self):
mgr = self.client._manager
mgr._create = Mock(side_effect=exc.BadRequest(400, "FAKE"))
self.assertRaises(exc.BadRequest, mgr.create, "name",
size=100, clone_id=utils.random_unicode())
def test_create_body_volume(self):
mgr = self.client._manager
size = random.randint(100, 1024)
name = utils.random_unicode()
snapshot_id = utils.random_unicode()
clone_id = utils.random_unicode()
display_description = None
volume_type = None
metadata = None
availability_zone = utils.random_unicode()
fake_body = {"volume": {
"size": size,
"snapshot_id": snapshot_id,
"source_volid": clone_id,
"display_name": name,
"display_description": "",
"volume_type": "SATA",
"metadata": {},
"availability_zone": availability_zone,
"imageRef": None,
}}
ret = mgr._create_body(name=name, size=size, volume_type=volume_type,
description=display_description, metadata=metadata,
snapshot_id=snapshot_id, clone_id=clone_id,
availability_zone=availability_zone)
self.assertEqual(ret, fake_body)
def test_create_body_volume_defaults(self):
mgr = self.client._manager
size = random.randint(100, 1024)
name = utils.random_unicode()
snapshot_id = utils.random_unicode()
clone_id = utils.random_unicode()
display_description = utils.random_unicode()
volume_type = utils.random_unicode()
metadata = {}
availability_zone = utils.random_unicode()
fake_body = {"volume": {
"size": size,
"snapshot_id": snapshot_id,
"source_volid": clone_id,
"display_name": name,
"display_description": display_description,
"volume_type": volume_type,
"metadata": metadata,
"availability_zone": availability_zone,
"imageRef": None,
}}
ret = mgr._create_body(name=name, size=size, volume_type=volume_type,
description=display_description, metadata=metadata,
snapshot_id=snapshot_id, clone_id=clone_id,
availability_zone=availability_zone)
self.assertEqual(ret, fake_body)
def test_create_body_snapshot(self):
mgr = self.client._snapshot_manager
vol = self.volume
name = utils.random_unicode()
display_description = utils.random_unicode()
force = True
fake_body = {"snapshot": {
"display_name": name,
"display_description": display_description,
"volume_id": vol.id,
"force": str(force).lower(),
}}
ret = mgr._create_body(name=name, description=display_description,
volume=vol, force=force)
self.assertEqual(ret, fake_body)
def test_client_attach_to_instance(self):
clt = self.client
vol = self.volume
inst = fakes.FakeServer()
mp = utils.random_unicode()
vol.attach_to_instance = Mock()
clt.attach_to_instance(vol, inst, mp)
vol.attach_to_instance.assert_called_once_with(inst, mp)
def test_client_detach(self):
clt = self.client
vol = self.volume
vol.detach = Mock()
clt.detach(vol)
vol.detach.assert_called_once_with()
def test_client_delete_volume(self):
clt = self.client
vol = self.volume
vol.delete = Mock()
clt.delete_volume(vol)
vol.delete.assert_called_once_with(force=False)
def test_client_delete_volume_not_available(self):
clt = self.client
vol = self.volume
vol.manager.delete = Mock(side_effect=exc.VolumeNotAvailable(""))
self.assertRaises(exc.VolumeNotAvailable, clt.delete_volume, vol)
def test_client_delete_volume_force(self):
clt = self.client
vol = self.volume
vol.manager.delete = Mock()
vol.detach = Mock()
vol.delete_all_snapshots = Mock()
clt.delete_volume(vol, force=True)
vol.manager.delete.assert_called_once_with(vol)
vol.detach.assert_called_once_with()
vol.delete_all_snapshots.assert_called_once_with()
def test_volume_delete_all_snapshots(self):
vol = self.volume
snap = fakes.FakeBlockStorageSnapshot()
snap.delete = Mock()
vol.list_snapshots = Mock(return_value=[snap])
vol.delete_all_snapshots()
snap.delete.assert_called_once_with()
def test_client_snap_mgr_create_snapshot(self):
clt = self.client
vol = self.volume
name = utils.random_ascii()
description = utils.random_ascii()
mgr = clt._snapshot_manager
snap = fakes.FakeBlockStorageSnapshot()
mgr._create = Mock(return_value=snap)
ret = mgr.create(name, vol, description=description, force=True)
self.assertTrue(isinstance(ret, CloudBlockStorageSnapshot))
def test_client_create_snapshot(self):
clt = self.client
vol = self.volume
name = utils.random_unicode()
description = utils.random_unicode()
clt._snapshot_manager.create = Mock()
clt.create_snapshot(vol, name=name, description=description,
force=True)
clt._snapshot_manager.create.assert_called_once_with(volume=vol,
name=name, description=description, force=True)
def test_client_create_snapshot_not_available(self):
clt = self.client
vol = self.volume
name = utils.random_unicode()
description = utils.random_unicode()
cli_exc = exc.ClientException(409, "Request conflicts with in-progress")
sav = BaseManager.create
BaseManager.create = Mock(side_effect=cli_exc)
self.assertRaises(exc.VolumeNotAvailable, clt.create_snapshot, vol,
name=name, description=description)
BaseManager.create = sav
def test_client_create_snapshot_409_other(self):
clt = self.client
vol = self.volume
name = utils.random_unicode()
description = utils.random_unicode()
cli_exc = exc.ClientException(409, "FAKE")
sav = BaseManager.create
BaseManager.create = Mock(side_effect=cli_exc)
self.assertRaises(exc.ClientException, clt.create_snapshot, vol,
name=name, description=description)
BaseManager.create = sav
def test_client_create_snapshot_not_409(self):
clt = self.client
vol = self.volume
name = utils.random_unicode()
description = utils.random_unicode()
cli_exc = exc.ClientException(420, "FAKE")
sav = BaseManager.create
BaseManager.create = Mock(side_effect=cli_exc)
self.assertRaises(exc.ClientException, clt.create_snapshot, vol,
name=name, description=description)
BaseManager.create = sav
def test_client_delete_snapshot(self):
clt = self.client
snap = fakes.FakeBlockStorageSnapshot()
snap.delete = Mock()
clt.delete_snapshot(snap)
snap.delete.assert_called_once_with()
def test_snapshot_delete(self):
snap = self.snapshot
snap.manager.delete = Mock()
snap.delete()
snap.manager.delete.assert_called_once_with(snap)
def test_snapshot_delete_unavailable(self):
snap = self.snapshot
snap.status = "busy"
self.assertRaises(exc.SnapshotNotAvailable, snap.delete)
def test_snapshot_delete_retry(self):
snap = self.snapshot
snap.manager.delete = Mock(side_effect=exc.ClientException(
"Request conflicts with in-progress 'DELETE"))
pyrax.cloudblockstorage.RETRY_INTERVAL = 0.1
self.assertRaises(exc.ClientException, snap.delete)
def test_snapshot_update(self):
snap = self.snapshot
snap.manager.update = Mock()
nm = utils.random_unicode()
desc = utils.random_unicode()
snap.update(display_name=nm, display_description=desc)
snap.manager.update.assert_called_once_with(snap, display_name=nm,
display_description=desc)
def test_snapshot_rename(self):
snap = self.snapshot
snap.update = Mock()
nm = utils.random_unicode()
snap.rename(nm)
snap.update.assert_called_once_with(display_name=nm)
def test_volume_name_property(self):
vol = self.volume
nm = utils.random_unicode()
vol.display_name = nm
self.assertEqual(vol.name, vol.display_name)
nm = utils.random_unicode()
vol.name = nm
self.assertEqual(vol.name, vol.display_name)
def test_volume_description_property(self):
vol = self.volume
nm = utils.random_unicode()
vol.display_description = nm
self.assertEqual(vol.description, vol.display_description)
nm = utils.random_unicode()
vol.description = nm
self.assertEqual(vol.description, vol.display_description)
def test_snapshot_name_property(self):
snap = self.snapshot
nm = utils.random_unicode()
snap.display_name = nm
self.assertEqual(snap.name, snap.display_name)
nm = utils.random_unicode()
snap.name = nm
self.assertEqual(snap.name, snap.display_name)
def test_snapshot_description_property(self):
snap = self.snapshot
nm = utils.random_unicode()
snap.display_description = nm
self.assertEqual(snap.description, snap.display_description)
nm = utils.random_unicode()
snap.description = nm
self.assertEqual(snap.description, snap.display_description)
def test_mgr_update_snapshot(self):
clt = self.client
snap = self.snapshot
mgr = clt._snapshot_manager
mgr.api.method_put = Mock(return_value=(None, None))
name = utils.random_unicode()
desc = utils.random_unicode()
exp_uri = "/%s/%s" % (mgr.uri_base, snap.id)
exp_body = {"snapshot": {"display_name": name,
"display_description": desc}}
mgr.update(snap, display_name=name, display_description=desc)
mgr.api.method_put.assert_called_once_with(exp_uri, body=exp_body)
def test_mgr_update_snapshot_empty(self):
clt = self.client
snap = self.snapshot
mgr = clt._snapshot_manager
mgr.api.method_put = Mock(return_value=(None, None))
mgr.update(snap)
self.assertEqual(mgr.api.method_put.call_count, 0)
def test_clt_update_volume(self):
clt = self.client
vol = self.volume
name = utils.random_unicode()
desc = utils.random_unicode()
vol.update = Mock()
clt.update(vol, display_name=name, display_description=desc)
vol.update.assert_called_once_with(display_name=name,
display_description=desc)
def test_clt_rename(self):
clt = self.client
vol = self.volume
nm = utils.random_unicode()
clt.update = Mock()
clt.rename(vol, nm)
clt.update.assert_called_once_with(vol, display_name=nm)
def test_clt_update_snapshot(self):
clt = self.client
snap = self.snapshot
name = utils.random_unicode()
desc = utils.random_unicode()
snap.update = Mock()
clt.update_snapshot(snap, display_name=name, display_description=desc)
snap.update.assert_called_once_with(display_name=name,
display_description=desc)
def test_clt_rename_snapshot(self):
clt = self.client
snap = self.snapshot
nm = utils.random_unicode()
clt.update_snapshot = Mock()
clt.rename_snapshot(snap, nm)
clt.update_snapshot.assert_called_once_with(snap, display_name=nm)
def test_get_snapshot(self):
clt = self.client
mgr = clt._snapshot_manager
mgr.get = Mock()
snap = utils.random_unicode()
clt.get_snapshot(snap)
mgr.get.assert_called_once_with(snap)
if __name__ == "__main__":
unittest.main()
| apache-2.0 | 2,962,352,775,557,892,600 | 35.913706 | 80 | 0.616703 | false |
browseinfo/odoo_saas3_nicolas | addons/purchase/report/request_quotation.py | 3 | 1571 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.report import report_sxw
class request_quotation(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(request_quotation, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
'user': self.pool.get('res.users').browse(cr, uid, uid, context)
})
report_sxw.report_sxw('report.purchase.quotation','purchase.order','addons/purchase/report/request_quotation.rml',parser=request_quotation)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -5,832,913,256,495,361,000 | 43.885714 | 139 | 0.628262 | false |
ShashaQin/erpnext | erpnext/setup/doctype/authorization_rule/authorization_rule.py | 121 | 2292 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cstr, flt
from frappe import _, msgprint
from frappe.model.document import Document
class AuthorizationRule(Document):
def check_duplicate_entry(self):
exists = frappe.db.sql("""select name, docstatus from `tabAuthorization Rule`
where transaction = %s and based_on = %s and system_user = %s
and system_role = %s and approving_user = %s and approving_role = %s
and to_emp =%s and to_designation=%s and name != %s""",
(self.transaction, self.based_on, cstr(self.system_user),
cstr(self.system_role), cstr(self.approving_user),
cstr(self.approving_role), cstr(self.to_emp),
cstr(self.to_designation), self.name))
auth_exists = exists and exists[0][0] or ''
if auth_exists:
frappe.throw(_("Duplicate Entry. Please check Authorization Rule {0}").format(auth_exists))
def validate_rule(self):
if self.transaction != 'Appraisal':
if not self.approving_role and not self.approving_user:
frappe.throw(_("Please enter Approving Role or Approving User"))
elif self.system_user and self.system_user == self.approving_user:
frappe.throw(_("Approving User cannot be same as user the rule is Applicable To"))
elif self.system_role and self.system_role == self.approving_role:
frappe.throw(_("Approving Role cannot be same as role the rule is Applicable To"))
elif self.transaction in ['Purchase Order', 'Purchase Receipt', \
'Purchase Invoice', 'Stock Entry'] and self.based_on \
in ['Average Discount', 'Customerwise Discount', 'Itemwise Discount']:
frappe.throw(_("Cannot set authorization on basis of Discount for {0}").format(self.transaction))
elif self.based_on == 'Average Discount' and flt(self.value) > 100.00:
frappe.throw(_("Discount must be less than 100"))
elif self.based_on == 'Customerwise Discount' and not self.master_name:
frappe.throw(_("Customer required for 'Customerwise Discount'"))
else:
if self.transaction == 'Appraisal':
self.based_on = "Not Applicable"
def validate(self):
self.check_duplicate_entry()
self.validate_rule()
if not self.value: self.value = 0.0
| agpl-3.0 | 3,154,960,566,215,696,000 | 44.84 | 101 | 0.714223 | false |
ChemiKhazi/Sprytile | rx/concurrency/virtualtimescheduler.py | 2 | 4699 | import logging
from rx.internal import PriorityQueue, ArgumentOutOfRangeException
from .schedulerbase import SchedulerBase
from .scheduleditem import ScheduledItem
from .scheduleperiodic import SchedulePeriodic
log = logging.getLogger("Rx")
class VirtualTimeScheduler(SchedulerBase):
"""Virtual Scheduler. This scheduler should work with either
datetime/timespan or ticks as int/int"""
def __init__(self, initial_clock=0):
"""Creates a new virtual time scheduler with the specified initial
clock value and absolute time comparer.
Keyword arguments:
initial_clock -- Initial value for the clock.
comparer -- Comparer to determine causality of events based on absolute
time.
"""
self.clock = initial_clock
self.is_enabled = False
self.queue = PriorityQueue(1024)
super(VirtualTimeScheduler, self).__init__()
@property
def now(self):
"""Gets the schedulers absolute time clock value as datetime offset."""
return self.to_datetime(self.clock)
def schedule(self, action, state=None):
"""Schedules an action to be executed."""
return self.schedule_absolute(self.clock, action, state)
def schedule_relative(self, duetime, action, state=None):
"""Schedules an action to be executed at duetime. Return the disposable
object used to cancel the scheduled action (best effort)
Keyword arguments:
duetime -- Relative time after which to execute the action.
action -- Action to be executed.
state -- [Optional] State passed to the action to be executed."""
runat = self.add(self.clock, self.to_relative(duetime))
return self.schedule_absolute(duetime=runat, action=action, state=state)
def schedule_absolute(self, duetime, action, state=None):
"""Schedules an action to be executed at duetime."""
si = ScheduledItem(self, state, action, duetime)
self.queue.enqueue(si)
return si.disposable
def schedule_periodic(self, period, action, state=None):
scheduler = SchedulePeriodic(self, period, action, state)
return scheduler.start()
def start(self):
"""Starts the virtual time scheduler."""
if self.is_enabled:
return
self.is_enabled = True
while self.is_enabled:
next = self.get_next()
if not next:
break
if next.duetime > self.clock:
self.clock = next.duetime
next.invoke()
self.is_enabled = False
def stop(self):
"""Stops the virtual time scheduler."""
self.is_enabled = False
def advance_to(self, time):
"""Advances the schedulers clock to the specified time, running all
work til that point.
Keyword arguments:
time -- Absolute time to advance the schedulers clock to."""
if self.clock > time:
raise ArgumentOutOfRangeException()
if self.clock == time:
return
if self.is_enabled:
return
self.is_enabled = True
while self.is_enabled:
next = self.get_next()
if not next:
break
if next.duetime > time:
self.queue.enqueue(next)
break
if next.duetime > self.clock:
self.clock = next.duetime
next.invoke()
self.is_enabled = False
self.clock = time
def advance_by(self, time):
"""Advances the schedulers clock by the specified relative time,
running all work scheduled for that timespan.
Keyword arguments:
time -- Relative time to advance the schedulers clock by."""
log.debug("VirtualTimeScheduler.advance_by(time=%s)", time)
dt = self.add(self.clock, time)
if self.clock > dt:
raise ArgumentOutOfRangeException()
return self.advance_to(dt)
def sleep(self, time):
"""Advances the schedulers clock by the specified relative time.
Keyword arguments:
time -- Relative time to advance the schedulers clock by."""
dt = self.add(self.clock, time)
if self.clock > dt:
raise ArgumentOutOfRangeException()
self.clock = dt
def get_next(self):
"""Returns the next scheduled item to be executed."""
while len(self.queue):
next = self.queue.dequeue()
if not next.is_cancelled():
return next
return None
@staticmethod
def add(absolute, relative):
raise NotImplementedError
| mit | -7,471,426,375,865,016,000 | 28.006173 | 80 | 0.614599 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.