repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
jmiserez/pox | pox/forwarding/l2_ofcommand_learning.py | 2 | 5028 | # Copyright 2011 Kyriakos Zarifis
# Copyright 2008 (C) Nicira, Inc.
#
# This file is part of POX.
#
# POX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# POX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
"""
This is an L2 learning switch derived originally from NOX's pyswitch
example. It is now a demonstration of the ofcommand library for constructing
OpenFlow messages.
"""
from time import time
# TODO: mac_to_str and mact_to_int aren't currently defined in packet_utils...
#from pox.lib.packet.packet_utils import mac_to_str, mac_to_int
from pox.lib.packet.ethernet import ethernet
from pox.lib.packet.tcp import tcp
from pox.lib.packet.udp import udp
from pox.lib.packet.vlan import vlan
from pox.lib.packet.ipv4 import ipv4
from pox.lib.packet.icmp import icmp
from pox.lib.packet.ethernet import ethernet
from pox.core import core
from pox.lib.revent import *
from pox.lib.addresses import EthAddr
log = core.getLogger()
import pox.openflow.ofcommand as ofcommand
class dumb_l2_switch (EventMixin):
def __init__ (self):
log.info("Starting")
self.listenTo(core)
self.st = {}
def _handle_GoingUpEvent (self, event):
self.listenTo(core.openflow)
def _handle_PacketIn (self, event):
"""Packet entry method.
Drop LLDP packets (or we get confused) and attempt learning and forwarding
"""
con = event.connection
dpid = event.connection.dpid
inport = event.port
packet = event.parse()
buffer_id = event.ofp.buffer_id
if not packet.parsed:
log.warning("%i %i ignoring unparsed packet", dpid, inport)
return
if not con in self.st:
log.info('registering new switch %s', str(dpid))
self.st[con] = {}
# don't forward lldp packets
if packet.type == ethernet.LLDP_TYPE:
return
# learn MAC on incoming port
self.do_l2_learning(con, inport, packet)
# forward packet
self.forward_l2_packet(con, inport, packet, packet.arr, buffer_id)
def do_l2_learning(self, con, inport, packet):
"""Given a packet, learn the source and peg to a switch/inport
"""
# learn MAC on incoming port
srcaddr = EthAddr(packet.src)
#if ord(srcaddr[0]) & 1:
# return
if self.st[con].has_key(srcaddr.toStr()): # change to raw?
# we had already heard from this switch
dst = self.st[con][srcaddr.toStr()] # raw?
if dst[0] != inport:
# but from a different port
log.info('MAC has moved from %s to %s', str(dst), str(inport))
else:
return
else:
log.info('learned MAC %s on Switch %s, Port %d', srcaddr.toStr(), con.dpid,inport)
# learn or update timestamp of entry
self.st[con][srcaddr.toStr()] = (inport, time(), packet) # raw?
# Replace any old entry for (switch,mac).
#mac = mac_to_int(packet.src)
def forward_l2_packet(self, con, inport, packet, buf, bufid):
"""If we've learned the destination MAC set up a flow and
send only out of its inport. Else, flood.
"""
dstaddr = EthAddr(packet.dst)
#if not ord(dstaddr[0]) & 1 and # what did this do?
if self.st[con].has_key(dstaddr.toStr()): # raw?
prt = self.st[con][dstaddr.toStr()] # raw?
if prt[0] == inport:
log.warning('**warning** learned port = inport')
ofcommand.floodPacket(con, inport, packet, buf, bufid)
else:
# We know the outport, set up a flow
log.info('installing flow for %s', str(packet))
match = ofcommand.extractMatch(packet)
actions = [ofcommand.Output(prt[0])]
ofcommand.addFlowEntry(con, inport, match, actions, bufid)
# Separate bufid, make addFlowEntry() only ADD the entry
# send/wait for Barrier
# sendBufferedPacket(bufid)
else:
# haven't learned destination MAC. Flood
ofcommand.floodPacket(con, inport, packet, buf, bufid)
'''
add arp cache timeout?
# Timeout for cached MAC entries
CACHE_TIMEOUT = 5
def timer_callback():
"""Responsible for timing out cache entries. Called every 1 second.
"""
global st
curtime = time()
for con in st.keys():
for entry in st[con].keys():
if (curtime - st[con][entry][1]) > CACHE_TIMEOUT:
con.msg('timing out entry '+mac_to_str(entry)+" -> "+str(st[con][entry][0])+' on switch ' + str(con))
st[con].pop(entry)
'''
| gpl-3.0 | 3,187,490,792,193,649,000 | 31.43871 | 109 | 0.640613 | false |
iledarn/addons-yelizariev | mail_wall_widgets/models.py | 16 | 12363 | from openerp.osv import osv,fields as old_fields
from openerp import api, models, fields, tools
from openerp.tools.safe_eval import safe_eval
try:
from openerp.addons.email_template.email_template import mako_template_env
except ImportError:
try:
from openerp.addons.mail.mail_template import mako_template_env
except ImportError:
pass
import copy
from openerp.tools.translate import _
from datetime import date, datetime, timedelta
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT
class mail_wall_widgets_widget(models.Model):
_name = 'mail.wall.widgets.widget'
_order = "sequence, id"
_columns = {
'name': old_fields.char('Name', required=True, translate=True),
'type': old_fields.selection(string='Type', selection=[
('list', 'List'),
('funnel', 'Funnel'),
('slice', 'Slice'),
#('', ''),
#('', ''),
#('', ''),
#('', ''),
], help='''
Slice - use "domain" for total and "won_domain" for target
'''),
'description': old_fields.text('Description', translate=True),
'group_ids': old_fields.many2many('res.groups', relation='mail_wall_widgets_widget_group', column1='widget_id', column2='group_id', string='Groups', help="User groups to show widget"),
'model_id': old_fields.many2one('ir.model', string='Model', help='The model object for the field to evaluate'),
'domain': old_fields.char("Filter Domain", help="Domain for filtering records. General rule, not user depending, e.g. [('state', '=', 'done')]. The expression can contain reference to 'user' which is a browse record of the current user if not in batch mode.", required=True),
'limit': old_fields.integer('Limit', help='Limit count of records to show'),
'order': old_fields.char('Order', help='Order of records to show'),
'value_field_id': old_fields.many2one('ir.model.fields',
string='Value field',
help='The field containing the value of record'),
'stage_field_id': old_fields.many2one('ir.model.fields',
string='Stage field',
help='Field to split records in funnel. It can be selection type or many2one (the later should have "sequence" field)'),
#'stage_field_domain': old_fields.many2one('ir.model.fields',
# string='Stage field domain',
# help='(for many2one stage_field_id) Domain to find stage objects'),
'won_domain': old_fields.char('Won domain',
help='Domain to find won objects'),
'field_date_id': old_fields.many2one('ir.model.fields',
string='Date Field',
help='The date to use for the time period evaluated'),
'start_date': old_fields.date('Start Date'),
'end_date': old_fields.date('End Date'), # no start and end = always active
'content': old_fields.char('Line template', help='Mako template to show content'),
'value_field_monetary': old_fields.boolean('Value is monetary'),
'cache': old_fields.boolean('Cache'),
'active': old_fields.boolean('Active'),
'sequence': old_fields.integer('Sequence', help='Sequence number for ordering'),
}
precision = fields.Float('Precision', help='round(Value/precision) * precision. E.g. 12345,333333 will be rounded to 12345,33 for precision=0.01, and to 12000 for precision=1000', default=0.01)
agenda = fields.Boolean('Agenda', help='Split records by date: overdue, today, tomorrow, later')
_defaults = {
'active': True,
'cache': False,
'limit': None,
'order': None,
}
@api.one
def get_data(self, user):
domain = safe_eval(self.domain, {'user': user})
won_domain = safe_eval(self.won_domain or '[]', {'user': user})
field_date_name = self.field_date_id and self.field_date_id.name
if self.start_date and field_date_name:
domain.append((field_date_name, '>=', self.start_date))
if self.end_date and field_date_name:
domain.append((field_date_name, '<=', self.end_date))
res = {
'name': self.name,
'type': self.type,
'model': self.model_id.model,
'domain': str(domain),
'precision': self.precision,
}
obj = self.env[self.model_id.model]
if self.type == 'list':
total_count = obj.search_count(domain)
groups = [{'test': lambda r: True}]
if self.agenda:
today = date.today()
tomorrow = today + timedelta(days=1)
def r2date(r):
d = getattr(r, field_date_name)
if d:
d = datetime.strptime(d, self.field_date_id.ttype=='date' and DEFAULT_SERVER_DATE_FORMAT or DEFAULT_SERVER_DATETIME_FORMAT)
d = d.date()
else:
d = date.today()
return d
groups = [
{
'label': _('Overdue'),
'class': 'overdue',
'test': lambda r: r2date(r) < today,
'mandatory': False,
},
{
'label': _('Today'),
'class': 'today',
'test': lambda r: r2date(r) == today,
'mandatory': True,
},
{
'label': _('Tomorrow'),
'class': 'tomorrow',
'test': lambda r: r2date(r) == tomorrow,
'mandatory': False,
},
{
'label': _('Later'),
'class': 'later',
'test': lambda r: r2date(r) > tomorrow,
'mandatory': False,
},
]
for g in groups:
g['lines'] = []
res.update({
'more': self.limit and self.limit < total_count,
'total_count': total_count,
'agenda': self.agenda,
'groups': groups,
})
for r in obj.search(domain, limit=self.limit, order=self.order):
mako = mako_template_env.from_string(tools.ustr(self.content))
content = mako.render({'record':r})
r_json = {
'id': r.id,
#'fields': dict( (f,getattr(r,f)) for f in fields),
'display_mode': 'progress',
'state': 'inprogress',
'completeness': 0,
'name': content,
'description': '',
}
if self.value_field_id:
r_json['current'] = getattr(r, self.value_field_id.name)
if self.value_field_monetary:
r_json['monetary'] = 1
for g in groups:
if g['test'](r):
g['lines'].append(r_json)
break
for g in groups:
del g['test']
elif self.type == 'funnel':
stage_ids = [] # [key]
for group in obj.read_group(domain, [], [self.stage_field_id.name]):
key = group[self.stage_field_id.name]
if isinstance(key, (list, tuple)):
key = key[0]
stage_ids.append(key)
stages = [] # [{'name':Name, 'id': key}]
if self.stage_field_id.ttype == 'selection':
d = dict (self.stage_field_id.selection)
stages = [ {'id':id, 'name':d[id]} for id in stage_ids ]
else: # many2one
stage_model = self.stage_field_id.relation
for r in self.env[stage_model].browse(stage_ids):
stages.append({'id': r.id, 'name':r.name_get()[0][1]})
value_field_name = self.value_field_id.name
for stage in stages:
d = copy.copy(domain)
d.append( (self.stage_field_id.name, '=', stage['id']) )
result = obj.read_group(d, [value_field_name], [])
stage['closed_value'] = result and result[0][value_field_name] or 0.0
stage['domain'] = str(d)
# won value
d = domain + won_domain
result = obj.read_group(domain, [value_field_name], [])
won = {'name': _('Won'),
'id':'__won__',
'closed_value': result and result[0][value_field_name] or 0.0
}
stages.append(won)
cur = 0
for stage in reversed(stages):
cur += stage['closed_value']
stage['abs_value'] = cur
total_value = stages[0]['abs_value']
precision = self.precision
for s in stages:
s['rel_value'] = round(100*s['abs_value']/total_value/precision)*precision if total_value else 100
# dummy fields
s['display_mode'] = 'progress'
s['monetary'] = 1
res['stages'] = stages
res['won'] = won
res['conversion_rate'] = stages[-1]['rel_value']
elif self.type == 'slice':
value_field_name = self.value_field_id.name
for f,d in [('total', domain), ('won', won_domain)]:
result = obj.read_group(d, [value_field_name], [])
res[f] = result and result[0][value_field_name] or 0.0
res['domain'] = str(domain)
res['won_domain'] = str(won_domain)
precision = self.precision
total_value = res['total']
res['slice'] = round(100*res['won']/res['total']/precision)*precision if res['total'] else 100
# dummy fields
res['display_mode'] = 'progress'
res['monetary'] = self.value_field_monetary
return res
class mail_wall_widgets_cache(models.Model):
_name = 'mail.wall.widgets.cache'
cache = fields.Text('Cached data')
res_id = fields.Integer('Resource ID')
res_model = fields.Integer('Resource Model')
user_id = fields.Many2one('res.users')
class res_users(models.Model):
_inherit = 'res.users'
@api.v7
def get_serialised_mail_wall_widgets_summary(self, cr, uid, excluded_categories=None, context=None):
return self._get_serialised_mail_wall_widgets_summary(cr, uid, uid, excluded_categories=excluded_categories, context=context)[0]
@api.one
def _get_serialised_mail_wall_widgets_summary(self, excluded_categories=None):
"""
[
{
'id': ...,
'model': ...,
'currency': <res.currency id>,
'data': (depend on model)
},
]
"""
user = self.env.user
res = []
model = 'mail.wall.widgets.widget'
domain = [('group_ids', 'in', user.groups_id.ids), ('active', '=', True)]
for widget in self.env[model].search(domain, order='sequence'):
if widget.cache:
#TODO
continue
res.append({
'model': model,
'id': widget.id,
'currency': user.company_id.currency_id.id,
'data': widget.get_data(user)[0],
})
return res
#def get_challenge_suggestions(self, cr, uid, context=None):
# """Return the list of challenges suggested to the user"""
# challenge_info = []
# challenge_obj = self.pool.get('mail_wall_widgets.challenge')
# challenge_ids = challenge_obj.search(cr, uid, [('invited_user_ids', 'in', uid), ('state', '=', 'inprogress')], context=context)
# for challenge in challenge_obj.browse(cr, uid, challenge_ids, context=context):
# values = {
# 'id': challenge.id,
# 'name': challenge.name,
# 'description': challenge.description,
# }
# challenge_info.append(values)
# return challenge_info
| lgpl-3.0 | 5,038,466,031,038,543,000 | 42.685512 | 283 | 0.512093 | false |
tickbh/tdengine_cocos2dx_demo | tdengine_ddz/third_part/jsoncpp/test/pyjsontestrunner.py | 257 | 2137 | # Simple implementation of a json test runner to run the test against json-py.
import sys
import os.path
import json
import types
if len(sys.argv) != 2:
print "Usage: %s input-json-file", sys.argv[0]
sys.exit(3)
input_path = sys.argv[1]
base_path = os.path.splitext(input_path)[0]
actual_path = base_path + '.actual'
rewrite_path = base_path + '.rewrite'
rewrite_actual_path = base_path + '.actual-rewrite'
def valueTreeToString( fout, value, path = '.' ):
ty = type(value)
if ty is types.DictType:
fout.write( '%s={}\n' % path )
suffix = path[-1] != '.' and '.' or ''
names = value.keys()
names.sort()
for name in names:
valueTreeToString( fout, value[name], path + suffix + name )
elif ty is types.ListType:
fout.write( '%s=[]\n' % path )
for index, childValue in zip( xrange(0,len(value)), value ):
valueTreeToString( fout, childValue, path + '[%d]' % index )
elif ty is types.StringType:
fout.write( '%s="%s"\n' % (path,value) )
elif ty is types.IntType:
fout.write( '%s=%d\n' % (path,value) )
elif ty is types.FloatType:
fout.write( '%s=%.16g\n' % (path,value) )
elif value is True:
fout.write( '%s=true\n' % path )
elif value is False:
fout.write( '%s=false\n' % path )
elif value is None:
fout.write( '%s=null\n' % path )
else:
assert False and "Unexpected value type"
def parseAndSaveValueTree( input, actual_path ):
root = json.loads( input )
fout = file( actual_path, 'wt' )
valueTreeToString( fout, root )
fout.close()
return root
def rewriteValueTree( value, rewrite_path ):
rewrite = json.dumps( value )
#rewrite = rewrite[1:-1] # Somehow the string is quoted ! jsonpy bug ?
file( rewrite_path, 'wt').write( rewrite + '\n' )
return rewrite
input = file( input_path, 'rt' ).read()
root = parseAndSaveValueTree( input, actual_path )
rewrite = rewriteValueTree( json.write( root ), rewrite_path )
rewrite_root = parseAndSaveValueTree( rewrite, rewrite_actual_path )
sys.exit( 0 )
| apache-2.0 | 6,716,772,136,147,014,000 | 32.390625 | 78 | 0.610201 | false |
rickhurst/Django-non-rel-blog | django/utils/unittest/suite.py | 353 | 9293 | """TestSuite"""
import sys
import unittest
from django.utils.unittest import case, util
__unittest = True
class BaseTestSuite(unittest.TestSuite):
"""A simple test suite that doesn't provide class or module shared fixtures.
"""
def __init__(self, tests=()):
self._tests = []
self.addTests(tests)
def __repr__(self):
return "<%s tests=%s>" % (util.strclass(self.__class__), list(self))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return list(self) == list(other)
def __ne__(self, other):
return not self == other
# Can't guarantee hash invariant, so flag as unhashable
__hash__ = None
def __iter__(self):
return iter(self._tests)
def countTestCases(self):
cases = 0
for test in self:
cases += test.countTestCases()
return cases
def addTest(self, test):
# sanity checks
if not hasattr(test, '__call__'):
raise TypeError("%r is not callable" % (repr(test),))
if isinstance(test, type) and issubclass(test,
(case.TestCase, TestSuite)):
raise TypeError("TestCases and TestSuites must be instantiated "
"before passing them to addTest()")
self._tests.append(test)
def addTests(self, tests):
if isinstance(tests, basestring):
raise TypeError("tests must be an iterable of tests, not a string")
for test in tests:
self.addTest(test)
def run(self, result):
for test in self:
if result.shouldStop:
break
test(result)
return result
def __call__(self, *args, **kwds):
return self.run(*args, **kwds)
def debug(self):
"""Run the tests without collecting errors in a TestResult"""
for test in self:
test.debug()
class TestSuite(BaseTestSuite):
"""A test suite is a composite test consisting of a number of TestCases.
For use, create an instance of TestSuite, then add test case instances.
When all tests have been added, the suite can be passed to a test
runner, such as TextTestRunner. It will run the individual test cases
in the order in which they were added, aggregating the results. When
subclassing, do not forget to call the base class constructor.
"""
def run(self, result):
self._wrapped_run(result)
self._tearDownPreviousClass(None, result)
self._handleModuleTearDown(result)
return result
def debug(self):
"""Run the tests without collecting errors in a TestResult"""
debug = _DebugResult()
self._wrapped_run(debug, True)
self._tearDownPreviousClass(None, debug)
self._handleModuleTearDown(debug)
################################
# private methods
def _wrapped_run(self, result, debug=False):
for test in self:
if result.shouldStop:
break
if _isnotsuite(test):
self._tearDownPreviousClass(test, result)
self._handleModuleFixture(test, result)
self._handleClassSetUp(test, result)
result._previousTestClass = test.__class__
if (getattr(test.__class__, '_classSetupFailed', False) or
getattr(result, '_moduleSetUpFailed', False)):
continue
if hasattr(test, '_wrapped_run'):
test._wrapped_run(result, debug)
elif not debug:
test(result)
else:
test.debug()
def _handleClassSetUp(self, test, result):
previousClass = getattr(result, '_previousTestClass', None)
currentClass = test.__class__
if currentClass == previousClass:
return
if result._moduleSetUpFailed:
return
if getattr(currentClass, "__unittest_skip__", False):
return
try:
currentClass._classSetupFailed = False
except TypeError:
# test may actually be a function
# so its class will be a builtin-type
pass
setUpClass = getattr(currentClass, 'setUpClass', None)
if setUpClass is not None:
try:
setUpClass()
except Exception, e:
if isinstance(result, _DebugResult):
raise
currentClass._classSetupFailed = True
className = util.strclass(currentClass)
errorName = 'setUpClass (%s)' % className
self._addClassOrModuleLevelException(result, e, errorName)
def _get_previous_module(self, result):
previousModule = None
previousClass = getattr(result, '_previousTestClass', None)
if previousClass is not None:
previousModule = previousClass.__module__
return previousModule
def _handleModuleFixture(self, test, result):
previousModule = self._get_previous_module(result)
currentModule = test.__class__.__module__
if currentModule == previousModule:
return
self._handleModuleTearDown(result)
result._moduleSetUpFailed = False
try:
module = sys.modules[currentModule]
except KeyError:
return
setUpModule = getattr(module, 'setUpModule', None)
if setUpModule is not None:
try:
setUpModule()
except Exception, e:
if isinstance(result, _DebugResult):
raise
result._moduleSetUpFailed = True
errorName = 'setUpModule (%s)' % currentModule
self._addClassOrModuleLevelException(result, e, errorName)
def _addClassOrModuleLevelException(self, result, exception, errorName):
error = _ErrorHolder(errorName)
addSkip = getattr(result, 'addSkip', None)
if addSkip is not None and isinstance(exception, case.SkipTest):
addSkip(error, str(exception))
else:
result.addError(error, sys.exc_info())
def _handleModuleTearDown(self, result):
previousModule = self._get_previous_module(result)
if previousModule is None:
return
if result._moduleSetUpFailed:
return
try:
module = sys.modules[previousModule]
except KeyError:
return
tearDownModule = getattr(module, 'tearDownModule', None)
if tearDownModule is not None:
try:
tearDownModule()
except Exception, e:
if isinstance(result, _DebugResult):
raise
errorName = 'tearDownModule (%s)' % previousModule
self._addClassOrModuleLevelException(result, e, errorName)
def _tearDownPreviousClass(self, test, result):
previousClass = getattr(result, '_previousTestClass', None)
currentClass = test.__class__
if currentClass == previousClass:
return
if getattr(previousClass, '_classSetupFailed', False):
return
if getattr(result, '_moduleSetUpFailed', False):
return
if getattr(previousClass, "__unittest_skip__", False):
return
tearDownClass = getattr(previousClass, 'tearDownClass', None)
if tearDownClass is not None:
try:
tearDownClass()
except Exception, e:
if isinstance(result, _DebugResult):
raise
className = util.strclass(previousClass)
errorName = 'tearDownClass (%s)' % className
self._addClassOrModuleLevelException(result, e, errorName)
class _ErrorHolder(object):
"""
Placeholder for a TestCase inside a result. As far as a TestResult
is concerned, this looks exactly like a unit test. Used to insert
arbitrary errors into a test suite run.
"""
# Inspired by the ErrorHolder from Twisted:
# http://twistedmatrix.com/trac/browser/trunk/twisted/trial/runner.py
# attribute used by TestResult._exc_info_to_string
failureException = None
def __init__(self, description):
self.description = description
def id(self):
return self.description
def shortDescription(self):
return None
def __repr__(self):
return "<ErrorHolder description=%r>" % (self.description,)
def __str__(self):
return self.id()
def run(self, result):
# could call result.addError(...) - but this test-like object
# shouldn't be run anyway
pass
def __call__(self, result):
return self.run(result)
def countTestCases(self):
return 0
def _isnotsuite(test):
"A crude way to tell apart testcases and suites with duck-typing"
try:
iter(test)
except TypeError:
return True
return False
class _DebugResult(object):
"Used by the TestSuite to hold previous class when running in debug."
_previousTestClass = None
_moduleSetUpFailed = False
shouldStop = False
| bsd-3-clause | 8,920,969,436,171,339,000 | 31.379791 | 80 | 0.586248 | false |
yuanagain/seniorthesis | venv/lib/python2.7/site-packages/pip/req/req_uninstall.py | 510 | 6897 | from __future__ import absolute_import
import logging
import os
import tempfile
from pip.compat import uses_pycache, WINDOWS, cache_from_source
from pip.exceptions import UninstallationError
from pip.utils import rmtree, ask, is_local, renames, normalize_path
from pip.utils.logging import indent_log
logger = logging.getLogger(__name__)
class UninstallPathSet(object):
"""A set of file paths to be removed in the uninstallation of a
requirement."""
def __init__(self, dist):
self.paths = set()
self._refuse = set()
self.pth = {}
self.dist = dist
self.save_dir = None
self._moved_paths = []
def _permitted(self, path):
"""
Return True if the given path is one we are permitted to
remove/modify, False otherwise.
"""
return is_local(path)
def add(self, path):
head, tail = os.path.split(path)
# we normalize the head to resolve parent directory symlinks, but not
# the tail, since we only want to uninstall symlinks, not their targets
path = os.path.join(normalize_path(head), os.path.normcase(tail))
if not os.path.exists(path):
return
if self._permitted(path):
self.paths.add(path)
else:
self._refuse.add(path)
# __pycache__ files can show up after 'installed-files.txt' is created,
# due to imports
if os.path.splitext(path)[1] == '.py' and uses_pycache:
self.add(cache_from_source(path))
def add_pth(self, pth_file, entry):
pth_file = normalize_path(pth_file)
if self._permitted(pth_file):
if pth_file not in self.pth:
self.pth[pth_file] = UninstallPthEntries(pth_file)
self.pth[pth_file].add(entry)
else:
self._refuse.add(pth_file)
def compact(self, paths):
"""Compact a path set to contain the minimal number of paths
necessary to contain all paths in the set. If /a/path/ and
/a/path/to/a/file.txt are both in the set, leave only the
shorter path."""
short_paths = set()
for path in sorted(paths, key=len):
if not any([
(path.startswith(shortpath) and
path[len(shortpath.rstrip(os.path.sep))] == os.path.sep)
for shortpath in short_paths]):
short_paths.add(path)
return short_paths
def _stash(self, path):
return os.path.join(
self.save_dir, os.path.splitdrive(path)[1].lstrip(os.path.sep))
def remove(self, auto_confirm=False):
"""Remove paths in ``self.paths`` with confirmation (unless
``auto_confirm`` is True)."""
if not self.paths:
logger.info(
"Can't uninstall '%s'. No files were found to uninstall.",
self.dist.project_name,
)
return
logger.info(
'Uninstalling %s-%s:',
self.dist.project_name, self.dist.version
)
with indent_log():
paths = sorted(self.compact(self.paths))
if auto_confirm:
response = 'y'
else:
for path in paths:
logger.info(path)
response = ask('Proceed (y/n)? ', ('y', 'n'))
if self._refuse:
logger.info('Not removing or modifying (outside of prefix):')
for path in self.compact(self._refuse):
logger.info(path)
if response == 'y':
self.save_dir = tempfile.mkdtemp(suffix='-uninstall',
prefix='pip-')
for path in paths:
new_path = self._stash(path)
logger.debug('Removing file or directory %s', path)
self._moved_paths.append(path)
renames(path, new_path)
for pth in self.pth.values():
pth.remove()
logger.info(
'Successfully uninstalled %s-%s',
self.dist.project_name, self.dist.version
)
def rollback(self):
"""Rollback the changes previously made by remove()."""
if self.save_dir is None:
logger.error(
"Can't roll back %s; was not uninstalled",
self.dist.project_name,
)
return False
logger.info('Rolling back uninstall of %s', self.dist.project_name)
for path in self._moved_paths:
tmp_path = self._stash(path)
logger.debug('Replacing %s', path)
renames(tmp_path, path)
for pth in self.pth.values():
pth.rollback()
def commit(self):
"""Remove temporary save dir: rollback will no longer be possible."""
if self.save_dir is not None:
rmtree(self.save_dir)
self.save_dir = None
self._moved_paths = []
class UninstallPthEntries(object):
def __init__(self, pth_file):
if not os.path.isfile(pth_file):
raise UninstallationError(
"Cannot remove entries from nonexistent file %s" % pth_file
)
self.file = pth_file
self.entries = set()
self._saved_lines = None
def add(self, entry):
entry = os.path.normcase(entry)
# On Windows, os.path.normcase converts the entry to use
# backslashes. This is correct for entries that describe absolute
# paths outside of site-packages, but all the others use forward
# slashes.
if WINDOWS and not os.path.splitdrive(entry)[0]:
entry = entry.replace('\\', '/')
self.entries.add(entry)
def remove(self):
logger.debug('Removing pth entries from %s:', self.file)
with open(self.file, 'rb') as fh:
# windows uses '\r\n' with py3k, but uses '\n' with py2.x
lines = fh.readlines()
self._saved_lines = lines
if any(b'\r\n' in line for line in lines):
endline = '\r\n'
else:
endline = '\n'
for entry in self.entries:
try:
logger.debug('Removing entry: %s', entry)
lines.remove((entry + endline).encode("utf-8"))
except ValueError:
pass
with open(self.file, 'wb') as fh:
fh.writelines(lines)
def rollback(self):
if self._saved_lines is None:
logger.error(
'Cannot roll back changes to %s, none were made', self.file
)
return False
logger.debug('Rolling %s back to previous state', self.file)
with open(self.file, 'wb') as fh:
fh.writelines(self._saved_lines)
return True
| mit | -2,603,625,947,438,188,500 | 34.369231 | 79 | 0.543425 | false |
Autonomi/limn | Printrun/pronsole.py | 15 | 1106 | #!/usr/bin/env python
# This file is part of the Printrun suite.
#
# Printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
import sys
import traceback
import logging
from printrun.pronsole import pronsole
if __name__ == "__main__":
interp = pronsole()
interp.parse_cmdline(sys.argv[1:])
try:
interp.cmdloop()
except SystemExit:
interp.p.disconnect()
except:
logging.error(_("Caught an exception, exiting:")
+ "\n" + traceback.format_exc())
interp.p.disconnect()
| mit | -6,605,440,017,058,288,000 | 31.529412 | 70 | 0.698011 | false |
rvalyi/OpenUpgrade | addons/hr_timesheet_invoice/report/hr_timesheet_invoice_report.py | 40 | 9518 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv
from openerp.tools.sql import drop_view_if_exists
class report_timesheet_line(osv.osv):
_name = "report.timesheet.line"
_description = "Timesheet Line"
_auto = False
_columns = {
'name': fields.char('Year',size=64,required=False, readonly=True),
'user_id': fields.many2one('res.users', 'User', readonly=True),
'date': fields.date('Date', readonly=True),
'day': fields.char('Day', size=128, readonly=True),
'quantity': fields.float('Time', readonly=True),
'cost': fields.float('Cost', readonly=True),
'product_id': fields.many2one('product.product', 'Product',readonly=True),
'account_id': fields.many2one('account.analytic.account', 'Analytic Account', readonly=True),
'general_account_id': fields.many2one('account.account', 'General Account', readonly=True),
'invoice_id': fields.many2one('account.invoice', 'Invoiced', readonly=True),
'month': fields.selection([('01','January'), ('02','February'), ('03','March'), ('04','April'), ('05','May'), ('06','June'),
('07','July'), ('08','August'), ('09','September'), ('10','October'), ('11','November'), ('12','December')],'Month',readonly=True),
}
_order = 'name desc,user_id desc'
def init(self, cr):
drop_view_if_exists(cr, 'report_timesheet_line')
cr.execute("""
create or replace view report_timesheet_line as (
select
min(l.id) as id,
l.date as date,
to_char(l.date,'YYYY') as name,
to_char(l.date,'MM') as month,
l.user_id,
to_char(l.date, 'YYYY-MM-DD') as day,
l.invoice_id,
l.product_id,
l.account_id,
l.general_account_id,
sum(l.unit_amount) as quantity,
sum(l.amount) as cost
from
account_analytic_line l
where
l.user_id is not null
group by
l.date,
l.user_id,
l.product_id,
l.account_id,
l.general_account_id,
l.invoice_id
)
""")
class report_timesheet_user(osv.osv):
_name = "report_timesheet.user"
_description = "Timesheet per day"
_auto = False
_columns = {
'name': fields.char('Year',size=64,required=False, readonly=True),
'user_id':fields.many2one('res.users', 'User', readonly=True),
'quantity': fields.float('Time', readonly=True),
'cost': fields.float('Cost', readonly=True),
'month':fields.selection([('01','January'), ('02','February'), ('03','March'), ('04','April'), ('05','May'), ('06','June'),
('07','July'), ('08','August'), ('09','September'), ('10','October'), ('11','November'), ('12','December')],'Month',readonly=True),
}
_order = 'name desc,user_id desc'
def init(self, cr):
drop_view_if_exists(cr, 'report_timesheet_user')
cr.execute("""
create or replace view report_timesheet_user as (
select
min(l.id) as id,
to_char(l.date,'YYYY') as name,
to_char(l.date,'MM') as month,
l.user_id,
sum(l.unit_amount) as quantity,
sum(l.amount) as cost
from
account_analytic_line l
where
user_id is not null
group by l.date, to_char(l.date,'YYYY'),to_char(l.date,'MM'), l.user_id
)
""")
class report_timesheet_account(osv.osv):
_name = "report_timesheet.account"
_description = "Timesheet per account"
_auto = False
_columns = {
'name': fields.char('Year',size=64,required=False, readonly=True),
'user_id':fields.many2one('res.users', 'User', readonly=True),
'account_id':fields.many2one('account.analytic.account', 'Analytic Account', readonly=True),
'quantity': fields.float('Time', readonly=True),
'month':fields.selection([('01','January'), ('02','February'), ('03','March'), ('04','April'), ('05','May'), ('06','June'),
('07','July'), ('08','August'), ('09','September'), ('10','October'), ('11','November'), ('12','December')],'Month',readonly=True),
}
_order = 'name desc,account_id desc,user_id desc'
def init(self, cr):
drop_view_if_exists(cr, 'report_timesheet_account')
cr.execute("""
create or replace view report_timesheet_account as (
select
min(id) as id,
to_char(create_date, 'YYYY') as name,
to_char(create_date,'MM') as month,
user_id,
account_id,
sum(unit_amount) as quantity
from
account_analytic_line
group by
to_char(create_date, 'YYYY'),to_char(create_date, 'MM'), user_id, account_id
)
""")
class report_timesheet_account_date(osv.osv):
_name = "report_timesheet.account.date"
_description = "Daily timesheet per account"
_auto = False
_columns = {
'name': fields.char('Year',size=64,required=False, readonly=True),
'user_id':fields.many2one('res.users', 'User', readonly=True),
'account_id':fields.many2one('account.analytic.account', 'Analytic Account', readonly=True),
'quantity': fields.float('Time', readonly=True),
'month':fields.selection([('01','January'), ('02','February'), ('03','March'), ('04','April'), ('05','May'), ('06','June'),
('07','July'), ('08','August'), ('09','September'), ('10','October'), ('11','November'), ('12','December')],'Month',readonly=True),
}
_order = 'name desc,account_id desc,user_id desc'
def init(self, cr):
drop_view_if_exists(cr, 'report_timesheet_account_date')
cr.execute("""
create or replace view report_timesheet_account_date as (
select
min(id) as id,
to_char(date,'YYYY') as name,
to_char(date,'MM') as month,
user_id,
account_id,
sum(unit_amount) as quantity
from
account_analytic_line
group by
to_char(date,'YYYY'),to_char(date,'MM'), user_id, account_id
)
""")
class report_timesheet_invoice(osv.osv):
_name = "report_timesheet.invoice"
_description = "Costs to invoice"
_auto = False
_columns = {
'user_id':fields.many2one('res.users', 'User', readonly=True),
'account_id':fields.many2one('account.analytic.account', 'Project', readonly=True),
'manager_id':fields.many2one('res.users', 'Manager', readonly=True),
'quantity': fields.float('Time', readonly=True),
'amount_invoice': fields.float('To invoice', readonly=True)
}
_rec_name = 'user_id'
_order = 'user_id desc'
def init(self, cr):
drop_view_if_exists(cr, 'report_timesheet_invoice')
cr.execute("""
create or replace view report_timesheet_invoice as (
select
min(l.id) as id,
l.user_id as user_id,
l.account_id as account_id,
a.user_id as manager_id,
sum(l.unit_amount) as quantity,
sum(l.unit_amount * t.list_price) as amount_invoice
from account_analytic_line l
left join hr_timesheet_invoice_factor f on (l.to_invoice=f.id)
left join account_analytic_account a on (l.account_id=a.id)
left join product_product p on (l.to_invoice=f.id)
left join product_template t on (l.to_invoice=f.id)
where
l.to_invoice is not null and
l.invoice_id is null
group by
l.user_id,
l.account_id,
a.user_id
)
""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -6,952,013,429,644,142,000 | 43.064815 | 165 | 0.519752 | false |
mozillazg/redis-py-doc | tests/conftest.py | 2 | 6783 | import pytest
import random
import redis
from distutils.version import StrictVersion
from redis.connection import parse_url
from unittest.mock import Mock
from urllib.parse import urlparse
# redis 6 release candidates report a version number of 5.9.x. Use this
# constant for skip_if decorators as a placeholder until 6.0.0 is officially
# released
REDIS_6_VERSION = '5.9.0'
REDIS_INFO = {}
default_redis_url = "redis://localhost:6379/9"
def pytest_addoption(parser):
parser.addoption('--redis-url', default=default_redis_url,
action="store",
help="Redis connection string,"
" defaults to `%(default)s`")
def _get_info(redis_url):
client = redis.Redis.from_url(redis_url)
info = client.info()
client.connection_pool.disconnect()
return info
def pytest_sessionstart(session):
redis_url = session.config.getoption("--redis-url")
info = _get_info(redis_url)
version = info["redis_version"]
arch_bits = info["arch_bits"]
REDIS_INFO["version"] = version
REDIS_INFO["arch_bits"] = arch_bits
def skip_if_server_version_lt(min_version):
redis_version = REDIS_INFO["version"]
check = StrictVersion(redis_version) < StrictVersion(min_version)
return pytest.mark.skipif(
check,
reason="Redis version required >= {}".format(min_version))
def skip_if_server_version_gte(min_version):
redis_version = REDIS_INFO["version"]
check = StrictVersion(redis_version) >= StrictVersion(min_version)
return pytest.mark.skipif(
check,
reason="Redis version required < {}".format(min_version))
def skip_unless_arch_bits(arch_bits):
return pytest.mark.skipif(REDIS_INFO["arch_bits"] != arch_bits,
reason="server is not {}-bit".format(arch_bits))
def _get_client(cls, request, single_connection_client=True, flushdb=True,
**kwargs):
"""
Helper for fixtures or tests that need a Redis client
Uses the "--redis-url" command line argument for connection info. Unlike
ConnectionPool.from_url, keyword arguments to this function override
values specified in the URL.
"""
redis_url = request.config.getoption("--redis-url")
url_options = parse_url(redis_url)
url_options.update(kwargs)
pool = redis.ConnectionPool(**url_options)
client = cls(connection_pool=pool)
if single_connection_client:
client = client.client()
if request:
def teardown():
if flushdb:
try:
client.flushdb()
except redis.ConnectionError:
# handle cases where a test disconnected a client
# just manually retry the flushdb
client.flushdb()
client.close()
client.connection_pool.disconnect()
request.addfinalizer(teardown)
return client
@pytest.fixture()
def r(request):
with _get_client(redis.Redis, request) as client:
yield client
@pytest.fixture()
def r2(request):
"A second client for tests that need multiple"
with _get_client(redis.Redis, request) as client:
yield client
def _gen_cluster_mock_resp(r, response):
connection = Mock()
connection.read_response.return_value = response
r.connection = connection
return r
@pytest.fixture()
def mock_cluster_resp_ok(request, **kwargs):
r = _get_client(redis.Redis, request, **kwargs)
return _gen_cluster_mock_resp(r, 'OK')
@pytest.fixture()
def mock_cluster_resp_int(request, **kwargs):
r = _get_client(redis.Redis, request, **kwargs)
return _gen_cluster_mock_resp(r, '2')
@pytest.fixture()
def mock_cluster_resp_info(request, **kwargs):
r = _get_client(redis.Redis, request, **kwargs)
response = ('cluster_state:ok\r\ncluster_slots_assigned:16384\r\n'
'cluster_slots_ok:16384\r\ncluster_slots_pfail:0\r\n'
'cluster_slots_fail:0\r\ncluster_known_nodes:7\r\n'
'cluster_size:3\r\ncluster_current_epoch:7\r\n'
'cluster_my_epoch:2\r\ncluster_stats_messages_sent:170262\r\n'
'cluster_stats_messages_received:105653\r\n')
return _gen_cluster_mock_resp(r, response)
@pytest.fixture()
def mock_cluster_resp_nodes(request, **kwargs):
r = _get_client(redis.Redis, request, **kwargs)
response = ('c8253bae761cb1ecb2b61857d85dfe455a0fec8b 172.17.0.7:7006 '
'slave aa90da731f673a99617dfe930306549a09f83a6b 0 '
'1447836263059 5 connected\n'
'9bd595fe4821a0e8d6b99d70faa660638a7612b3 172.17.0.7:7008 '
'master - 0 1447836264065 0 connected\n'
'aa90da731f673a99617dfe930306549a09f83a6b 172.17.0.7:7003 '
'myself,master - 0 0 2 connected 5461-10922\n'
'1df047e5a594f945d82fc140be97a1452bcbf93e 172.17.0.7:7007 '
'slave 19efe5a631f3296fdf21a5441680f893e8cc96ec 0 '
'1447836262556 3 connected\n'
'4ad9a12e63e8f0207025eeba2354bcf4c85e5b22 172.17.0.7:7005 '
'master - 0 1447836262555 7 connected 0-5460\n'
'19efe5a631f3296fdf21a5441680f893e8cc96ec 172.17.0.7:7004 '
'master - 0 1447836263562 3 connected 10923-16383\n'
'fbb23ed8cfa23f17eaf27ff7d0c410492a1093d6 172.17.0.7:7002 '
'master,fail - 1447829446956 1447829444948 1 disconnected\n'
)
return _gen_cluster_mock_resp(r, response)
@pytest.fixture()
def mock_cluster_resp_slaves(request, **kwargs):
r = _get_client(redis.Redis, request, **kwargs)
response = ("['1df047e5a594f945d82fc140be97a1452bcbf93e 172.17.0.7:7007 "
"slave 19efe5a631f3296fdf21a5441680f893e8cc96ec 0 "
"1447836789290 3 connected']")
return _gen_cluster_mock_resp(r, response)
@pytest.fixture(scope="session")
def master_host(request):
url = request.config.getoption("--redis-url")
parts = urlparse(url)
yield parts.hostname
def wait_for_command(client, monitor, command):
# issue a command with a key name that's local to this process.
# if we find a command with our key before the command we're waiting
# for, something went wrong
redis_version = REDIS_INFO["version"]
if StrictVersion(redis_version) >= StrictVersion('5.0.0'):
id_str = str(client.client_id())
else:
id_str = '%08x' % random.randrange(2**32)
key = '__REDIS-PY-%s__' % id_str
client.get(key)
while True:
monitor_response = monitor.next_command()
if command in monitor_response['command']:
return monitor_response
if key in monitor_response['command']:
return None
| mit | 3,884,862,944,654,136,300 | 33.963918 | 78 | 0.645879 | false |
fhartwig/adhocracy3.mercator | src/adhocracy_mercator/adhocracy_mercator/catalog/adhocracy.py | 2 | 3435 | """ Adhocracy catalog extensions."""
from substanced.catalog import Keyword
from adhocracy_core.catalog.adhocracy import AdhocracyCatalogIndexes
from adhocracy_core.interfaces import IResource
from adhocracy_core.utils import get_sheet_field
from adhocracy_mercator.sheets.mercator import IMercatorSubResources
from adhocracy_mercator.sheets.mercator import IFinance
from adhocracy_mercator.sheets.mercator import ILocation
class MercatorCatalogIndexes(AdhocracyCatalogIndexes):
"""Mercator indexes for the adhocracy catalog."""
mercator_location = Keyword()
mercator_requested_funding = Keyword()
mercator_budget = Keyword()
LOCATION_INDEX_KEYWORDS = ['specific', 'online', 'linked_to_ruhr']
def index_location(resource, default) -> list:
"""Return search index keywords based on the "location_is_..." fields."""
location = get_sheet_field(resource, IMercatorSubResources, 'location')
# TODO: Why is location '' in the first pass of that function
# during MercatorProposal create?
if location is None or location == '':
return default
locations = []
for keyword in LOCATION_INDEX_KEYWORDS:
if get_sheet_field(location, ILocation, 'location_is_' + keyword):
locations.append(keyword)
return locations if locations else default
BUDGET_INDEX_LIMIT_KEYWORDS = [5000, 10000, 20000, 50000]
def index_requested_funding(resource: IResource, default) -> str:
"""Return search index keyword based on the "requested_funding" field."""
# TODO: Why is finance '' in the first pass of that function
# during MercatorProposal create?
# This sounds like a bug, the default value for References is None,
finance = get_sheet_field(resource, IMercatorSubResources, 'finance')
if finance is None or finance == '':
return default
funding = get_sheet_field(finance, IFinance, 'requested_funding')
for limit in BUDGET_INDEX_LIMIT_KEYWORDS:
if funding <= limit:
return [str(limit)]
return default
def index_budget(resource: IResource, default) -> str:
"""
Return search index keyword based on the "budget" field.
The returned values are the same values as per the "requested_funding"
field, or "above_50000" if the total budget value is more than 50,000 euro.
"""
finance = get_sheet_field(resource, IMercatorSubResources, 'finance')
if finance is None or finance == '':
return default
funding = get_sheet_field(finance, IFinance, 'budget')
for limit in BUDGET_INDEX_LIMIT_KEYWORDS:
if funding <= limit:
return [str(limit)]
return ['above_50000']
def includeme(config):
"""Register catalog utilities and index functions."""
config.add_catalog_factory('adhocracy', MercatorCatalogIndexes)
config.add_indexview(index_location,
catalog_name='adhocracy',
index_name='mercator_location',
context=IMercatorSubResources)
config.add_indexview(index_requested_funding,
catalog_name='adhocracy',
index_name='mercator_requested_funding',
context=IMercatorSubResources)
config.add_indexview(index_budget,
catalog_name='adhocracy',
index_name='mercator_budget',
context=IMercatorSubResources)
| agpl-3.0 | 7,516,452,472,328,973,000 | 38.94186 | 79 | 0.675983 | false |
liamgh/liamgreenhughes-sl4a-tf101 | python/src/Demo/sockets/rpythond.py | 47 | 1214 | #! /usr/bin/env python
# Remote python server.
# Execute Python commands remotely and send output back.
# WARNING: This version has a gaping security hole -- it accepts requests
# from any host on the Internet!
import sys
from socket import *
import StringIO
import traceback
PORT = 4127
BUFSIZE = 1024
def main():
if len(sys.argv) > 1:
port = int(eval(sys.argv[1]))
else:
port = PORT
s = socket(AF_INET, SOCK_STREAM)
s.bind(('', port))
s.listen(1)
while 1:
conn, (remotehost, remoteport) = s.accept()
print 'connected by', remotehost, remoteport
request = ''
while 1:
data = conn.recv(BUFSIZE)
if not data:
break
request = request + data
reply = execute(request)
conn.send(reply)
conn.close()
def execute(request):
stdout = sys.stdout
stderr = sys.stderr
sys.stdout = sys.stderr = fakefile = StringIO.StringIO()
try:
try:
exec request in {}, {}
except:
print
traceback.print_exc(100)
finally:
sys.stderr = stderr
sys.stdout = stdout
return fakefile.getvalue()
main()
| apache-2.0 | 296,158,569,930,777,500 | 22.346154 | 73 | 0.579901 | false |
ddy88958620/lib | Python/scrapy/getinthemix/dv247.py | 2 | 2212 | import re
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, FormRequest, HtmlResponse
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from productloader import load_product
from scrapy.http import FormRequest
class DV247(BaseSpider):
name = 'dv247.com'
allowed_domains = ['dv247.com', 'www.dv247.com']
start_urls = ('http://www.dv247.com',)
def parse_product(self, response):
URL_BASE = 'http://www.dv247.com'
hxs = HtmlXPathSelector(response)
products = hxs.select('//div[@class="listItem clearfix"]')
for p in products:
res = {}
name = ' '.join(p.select('.//a//text()').extract())
url = p.select('.//a/@href')[0].extract()
url = urljoin_rfc(URL_BASE, url)
price = p.select('.//li[@class="price"]/text()').re('\xa3(.*)')[0]
res['url'] = url
res['description'] = name
res['price'] = price
yield load_product(res, response)
def parse(self, response):
if not isinstance(response, HtmlResponse):
return
URL_BASE = 'http://www.dv247.com'
#categories
hxs = HtmlXPathSelector(response)
category_urls = hxs.select('//nav[@id="megamenu"]/ul/li/a/@href | \
//nav[@id="megamenu"]//li[@class="accessories threeCol"]//a/@href').extract()
#the following category had to be added manually because the link is broken.
category_urls.append('/computer-music-software/')
for url in category_urls:
if url == '#':
continue
url = urljoin_rfc(URL_BASE, url)
yield Request(url)
#next page
next_pages = hxs.select('//div[@class="listPaging"]')
if next_pages:
next_pages = next_pages[0].select('.//a[not(@class="selectedpage")]/@href').extract()
for page in next_pages:
url = urljoin_rfc(URL_BASE, page)
yield Request(url)
# products
for p in self.parse_product(response):
yield p
| apache-2.0 | -1,069,311,827,569,749,600 | 35.262295 | 113 | 0.572785 | false |
CityGrid/arsenal | server/arsenalweb/views/login.py | 1 | 4340 | '''Arsenal login page.'''
# Copyright 2015 CityGrid Media, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
from pyramid.view import view_config, forbidden_view_config
from pyramid.httpexceptions import HTTPOk
from pyramid.httpexceptions import HTTPFound
from pyramid.httpexceptions import HTTPUnauthorized
from pyramid.httpexceptions import HTTPForbidden
from pyramid.security import remember
from pyramid.session import signed_serialize
from pyramid_ldap import get_ldap_connector
from arsenalweb.views import (
db_authenticate,
get_authenticated_user,
pam_authenticate,
site_layout,
)
LOG = logging.getLogger(__name__)
@view_config(route_name='login', renderer='arsenalweb:templates/login.pt')
@forbidden_view_config(renderer='arsenalweb:templates/login.pt')
def login(request):
'''Process requests for the /login route.'''
page_title = 'Login'
LOG.debug('Processing login request...')
auth_user = get_authenticated_user(request)
if request.referer:
referer_host = request.referer.split('/')[2]
else:
referer_host = None
# Need to send the client a 401 so it can send a user/pass to auth.
# Without this the client just gets the login page with a 200 and
# thinks the command was successful.
if request.path_info.split('/')[1][:3] == 'api' and not request.authenticated_userid:
LOG.debug('request came from the api, sending request to re-auth')
return HTTPUnauthorized()
if request.referer and referer_host == request.host \
and request.referer.split('/')[3][:6] != 'logout':
return_url = request.referer
elif request.path != '/login':
return_url = request.url
else:
return_url = '/nodes'
login_name = ''
password = ''
error = ''
if 'form.submitted' in request.POST:
login_name = request.POST['login']
password = request.POST['password']
LOG.debug('Attempting to authenticate login: {0}'.format(login_name))
# Try local first, ldap/pam second (if configured)
LOG.debug('Authenticating against local DB...')
data = db_authenticate(login_name, password)
if data is None and request.registry.settings['arsenal.use_ldap']:
LOG.debug('Authenticating against LDAP...')
connector = get_ldap_connector(request)
data = connector.authenticate(login_name, password)
if data is None and request.registry.settings['arsenal.use_pam']:
LOG.debug('Authenticating against PAM...')
data = pam_authenticate(login_name, password)
if data is not None:
user_name = data[0]
encrypted = signed_serialize(login_name,
request.registry.settings['arsenal.cookie_token'])
headers = remember(request, user_name)
headers.append(('Set-Cookie', 'un=' + str(encrypted) + '; Max-Age=604800; Path=/'))
if 'api.client' in request.POST:
return HTTPOk(headers=headers)
else:
return HTTPFound(request.POST['return_url'], headers=headers)
else:
error = 'Invalid credentials'
request.response.status = 403
if request.authenticated_userid:
if request.path == '/login':
error = 'You are already logged in'
page_title = 'Already Logged In'
else:
error = 'You do not have permission to access this page'
page_title = 'Access Denied'
request.response.status = 403
return {
'au': auth_user,
'error': error,
'layout': site_layout('max'),
'login': login_name,
'page_title': page_title,
'password': password,
'return_url': return_url,
}
| apache-2.0 | 4,483,435,209,964,893,000 | 34.867769 | 95 | 0.645392 | false |
gribozavr/swift | utils/swift_build_support/tests/test_host.py | 48 | 2433 | # test_host.py - Unit tests for swift_build_support.cmake -*-- python -*-
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
import platform
import unittest
import swift_build_support.host as sbs_host
class HostTestCase(unittest.TestCase):
def test_system_memory(self):
# We make sure that we get an integer back. If we get an integer back,
# we know that we at least were able to get some sort of information
# from the system and it could be parsed as an integer. This is just a
# smoke test.
supported_platforms = [('Darwin', 'x86_64')]
mem = sbs_host.system_memory()
if (platform.system(), platform.machine()) not in supported_platforms:
self.assertIsNone(mem)
else:
self.assertIsInstance(mem, int)
def test_lto_link_job_counts(self):
# Make sure that:
#
# 1. we get back a dictionary with two keys in it, the first called
# llvm, the other called swift.
#
# 2. The values associated with these keys is either None (if we do not
# support the platform) or is an int that is reasonable (i.e. <
# 100). The number 100 is just a heuristic number that is appropriate
# currently since LTO uses so much memory. If and when that changes,
# this number should change.
supported_platforms = [('Darwin', 'x86_64')]
reasonable_upper_bound_of_lto_threads = 100
result = sbs_host.max_lto_link_job_counts()
self.assertIsInstance(result, dict)
self.assertEqual(len(result), 2)
if (platform.system(), platform.machine()) not in supported_platforms:
self.assertIsNone(result['llvm'])
self.assertIsNone(result['swift'])
return
self.assertIsNotNone(result['llvm'])
self.assertIsNotNone(result['swift'])
self.assertIsInstance(result['llvm'], int)
self.assertIsInstance(result['swift'], int)
self.assertLess(result['llvm'], reasonable_upper_bound_of_lto_threads)
self.assertLess(result['swift'], reasonable_upper_bound_of_lto_threads)
| apache-2.0 | 1,394,844,812,919,164,000 | 38.885246 | 79 | 0.657624 | false |
knorrium/selenium | py/selenium/webdriver/firefox/extension_connection.py | 66 | 2846 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import time
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.common import utils
from selenium.webdriver.remote.command import Command
from selenium.webdriver.remote.remote_connection import RemoteConnection
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
LOGGER = logging.getLogger(__name__)
PORT = 0 #
HOST = None
_URL = ""
class ExtensionConnection(RemoteConnection):
def __init__(self, host, firefox_profile, firefox_binary=None, timeout=30):
self.profile = firefox_profile
self.binary = firefox_binary
HOST = host
if self.binary is None:
self.binary = FirefoxBinary()
if HOST is None:
HOST = "127.0.0.1"
PORT = utils.free_port()
self.profile.port = PORT
self.profile.update_preferences()
self.profile.add_extension()
self.binary.launch_browser(self.profile)
_URL = "http://%s:%d/hub" % (HOST, PORT)
RemoteConnection.__init__(
self, _URL, keep_alive=True)
def quit(self, sessionId=None):
self.execute(Command.QUIT, {'sessionId':sessionId})
while self.is_connectable():
LOGGER.info("waiting to quit")
time.sleep(1)
def connect(self):
"""Connects to the extension and retrieves the session id."""
return self.execute(Command.NEW_SESSION,
{'desiredCapabilities': DesiredCapabilities.FIREFOX})
@classmethod
def connect_and_quit(self):
"""Connects to an running browser and quit immediately."""
self._request('%s/extensions/firefox/quit' % _URL)
@classmethod
def is_connectable(self):
"""Trys to connect to the extension but do not retrieve context."""
utils.is_connectable(self.profile.port)
class ExtensionConnectionError(Exception):
"""An internal error occurred int the extension.
Might be caused by bad input or bugs in webdriver
"""
pass
| apache-2.0 | 3,949,414,792,794,885,000 | 33.707317 | 81 | 0.686578 | false |
arichar6/veusz | veusz/widgets/nonorthpoint.py | 1 | 10913 | # Copyright (C) 2010 Jeremy S. Sanders
# Email: Jeremy Sanders <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
##############################################################################
"""Non orthogonal point plotting."""
from __future__ import division
import numpy as N
from ..compat import czip
from .. import qtall as qt4
from .. import document
from .. import datasets
from .. import setting
from .. import utils
from . import pickable
from .nonorthgraph import NonOrthGraph, FillBrush
from .widget import Widget
from .point import MarkerFillBrush
def _(text, disambiguation=None, context='NonOrthPoint'):
"""Translate text."""
return qt4.QCoreApplication.translate(context, text, disambiguation)
class NonOrthPoint(Widget):
'''Widget for plotting points in a non-orthogonal plot.'''
typename = 'nonorthpoint'
allowusercreation = True
description = _('Plot points on a graph with non-orthogonal axes')
@classmethod
def addSettings(klass, s):
'''Settings for widget.'''
Widget.addSettings(s)
s.add( setting.DatasetExtended(
'data1', 'x',
descr=_('Dataset containing 1st dataset, list of values '
'or expression'),
usertext=_('Dataset 1')) )
s.add( setting.DatasetExtended(
'data2', 'y',
descr=_('Dataset containing 2nd dataset, list of values '
'or expression'),
usertext=_('Dataset 2')) )
s.add( setting.DatasetOrStr(
'labels', '',
descr=_('Dataset or string to label points'),
usertext=_('Labels')) )
s.add( setting.DatasetExtended(
'scalePoints', '',
descr = _('Scale size of plotted markers by this dataset, '
' list of values or expression'),
usertext=_('Scale markers')) )
s.add( setting.MarkerColor('Color') )
s.add( setting.Color('color',
'auto',
descr = _('Master color'),
usertext = _('Color'),
formatting=True), 0 )
s.add( setting.DistancePt('markerSize',
'3pt',
descr = _('Size of marker to plot'),
usertext=_('Marker size'), formatting=True), 0 )
s.add( setting.Marker('marker',
'circle',
descr = _('Type of marker to plot'),
usertext=_('Marker'), formatting=True), 0 )
s.add( setting.Line('PlotLine',
descr = _('Plot line settings'),
usertext = _('Plot line')),
pixmap = 'settings_plotline' )
s.PlotLine.get('color').newDefault( setting.Reference('../color') )
s.add( setting.MarkerLine('MarkerLine',
descr = _('Line around the marker settings'),
usertext = _('Marker border')),
pixmap = 'settings_plotmarkerline' )
s.add( MarkerFillBrush('MarkerFill',
descr = _('Marker fill settings'),
usertext = _('Marker fill')),
pixmap = 'settings_plotmarkerfill' )
s.add( FillBrush('Fill1',
descr = _('Fill settings (1)'),
usertext = _('Area fill 1')),
pixmap = 'settings_plotfillbelow' )
s.add( FillBrush('Fill2',
descr = _('Fill settings (2)'),
usertext = _('Area fill 2')),
pixmap = 'settings_plotfillbelow' )
s.add( setting.PointLabel('Label',
descr = _('Label settings'),
usertext=_('Label')),
pixmap = 'settings_axislabel' )
@classmethod
def allowedParentTypes(klass):
return (NonOrthGraph,)
@property
def userdescription(self):
return _("data1='%s', data2='%s'") % (
self.settings.data1, self.settings.data2)
def updateDataRanges(self, inrange):
'''Extend inrange to range of data.'''
d1 = self.settings.get('data1').getData(self.document)
if d1:
inrange[0] = min( N.nanmin(d1.data), inrange[0] )
inrange[1] = max( N.nanmax(d1.data), inrange[1] )
d2 = self.settings.get('data2').getData(self.document)
if d2:
inrange[2] = min( N.nanmin(d2.data), inrange[2] )
inrange[3] = max( N.nanmax(d2.data), inrange[3] )
def pickPoint(self, x0, y0, bounds, distance = 'radial'):
p = pickable.DiscretePickable(self, 'data1', 'data2',
lambda v1, v2: self.parent.graphToPlotCoords(v1, v2))
return p.pickPoint(x0, y0, bounds, distance)
def pickIndex(self, oldindex, direction, bounds):
p = pickable.DiscretePickable(self, 'data1', 'data2',
lambda v1, v2: self.parent.graphToPlotCoords(v1, v2))
return p.pickIndex(oldindex, direction, bounds)
def drawLabels(self, painter, xplotter, yplotter,
textvals, markersize):
"""Draw labels for the points.
This is copied from the xy (point) widget class, so it
probably should be somehow be shared.
FIXME: sane automatic placement of labels
"""
s = self.settings
lab = s.get('Label')
# work out offset an alignment
deltax = markersize*1.5*{'left':-1, 'centre':0, 'right':1}[lab.posnHorz]
deltay = markersize*1.5*{'top':-1, 'centre':0, 'bottom':1}[lab.posnVert]
alignhorz = {'left':1, 'centre':0, 'right':-1}[lab.posnHorz]
alignvert = {'top':-1, 'centre':0, 'bottom':1}[lab.posnVert]
# make font and len
textpen = lab.makeQPen(painter)
painter.setPen(textpen)
font = lab.makeQFont(painter)
angle = lab.angle
# iterate over each point and plot each label
for x, y, t in czip(xplotter+deltax, yplotter+deltay,
textvals):
utils.Renderer(
painter, font, x, y, t,
alignhorz, alignvert, angle,
doc=self.document).render()
def getColorbarParameters(self):
"""Return parameters for colorbar."""
s = self.settings
c = s.Color
return (c.min, c.max, c.scaling, s.MarkerFill.colorMap, 0,
s.MarkerFill.colorMapInvert)
def autoColor(self, painter, dataindex=0):
"""Automatic color for plotting."""
return painter.docColorAuto(
painter.helper.autoColorIndex((self, dataindex)))
def draw(self, parentposn, phelper, outerbounds=None):
'''Plot the data on a plotter.'''
posn = self.computeBounds(parentposn, phelper)
s = self.settings
d = self.document
# exit if hidden
if s.hide:
return
d1 = s.get('data1').getData(d)
d2 = s.get('data2').getData(d)
dscale = s.get('scalePoints').getData(d)
colorpoints = s.Color.get('points').getData(d)
text = s.get('labels').getData(d, checknull=True)
if not d1 or not d2:
return
x1, y1, x2, y2 = posn
cliprect = qt4.QRectF( qt4.QPointF(x1, y1), qt4.QPointF(x2, y2) )
painter = phelper.painter(self, posn)
with painter:
self.parent.setClip(painter, posn)
# split parts separated by NaNs
for v1, v2, scalings, cvals, textitems in datasets.generateValidDatasetParts(
[d1, d2, dscale, colorpoints, text]):
# convert data (chopping down length)
v1d, v2d = v1.data, v2.data
minlen = min(v1d.shape[0], v2d.shape[0])
v1d, v2d = v1d[:minlen], v2d[:minlen]
px, py = self.parent.graphToPlotCoords(v1d, v2d)
# do fill1 (if any)
if not s.Fill1.hide:
self.parent.drawFillPts(painter, s.Fill1, cliprect, px, py)
# do fill2
if not s.Fill2.hide:
self.parent.drawFillPts(painter, s.Fill2, cliprect, px, py)
# plot line
if not s.PlotLine.hide:
painter.setBrush( qt4.QBrush() )
painter.setPen(s.PlotLine.makeQPen(painter))
pts = qt4.QPolygonF()
utils.addNumpyToPolygonF(pts, px, py)
utils.plotClippedPolyline(painter, cliprect, pts)
# plot markers
markersize = s.get('markerSize').convert(painter)
if not s.MarkerLine.hide or not s.MarkerFill.hide:
pscale = colorvals = cmap = None
if scalings:
pscale = scalings.data
# color point individually
if cvals and not s.MarkerFill.hide:
colorvals = utils.applyScaling(
cvals.data, s.Color.scaling,
s.Color.min, s.Color.max)
cmap = self.document.evaluate.getColormap(
s.MarkerFill.colorMap, s.MarkerFill.colorMapInvert)
painter.setBrush(s.MarkerFill.makeQBrushWHide(painter))
painter.setPen(s.MarkerLine.makeQPenWHide(painter))
utils.plotMarkers(painter, px, py, s.marker, markersize,
scaling=pscale, clip=cliprect,
cmap=cmap, colorvals=colorvals,
scaleline=s.MarkerLine.scaleLine)
# finally plot any labels
if textitems and not s.Label.hide:
self.drawLabels(painter, px, py, textitems, markersize)
# allow the factory to instantiate plotter
document.thefactory.register( NonOrthPoint )
| gpl-2.0 | 4,645,380,856,243,699,000 | 39.720149 | 89 | 0.535691 | false |
Sumith1896/sympy | sympy/polys/heuristicgcd.py | 86 | 3818 | """Heuristic polynomial GCD algorithm (HEUGCD). """
from __future__ import print_function, division
from sympy.core.compatibility import range
from .polyerrors import HeuristicGCDFailed
HEU_GCD_MAX = 6
def heugcd(f, g):
"""
Heuristic polynomial GCD in ``Z[X]``.
Given univariate polynomials ``f`` and ``g`` in ``Z[X]``, returns
their GCD and cofactors, i.e. polynomials ``h``, ``cff`` and ``cfg``
such that::
h = gcd(f, g), cff = quo(f, h) and cfg = quo(g, h)
The algorithm is purely heuristic which means it may fail to compute
the GCD. This will be signaled by raising an exception. In this case
you will need to switch to another GCD method.
The algorithm computes the polynomial GCD by evaluating polynomials
``f`` and ``g`` at certain points and computing (fast) integer GCD
of those evaluations. The polynomial GCD is recovered from the integer
image by interpolation. The evaluation proces reduces f and g variable
by variable into a large integer. The final step is to verify if the
interpolated polynomial is the correct GCD. This gives cofactors of
the input polynomials as a side effect.
Examples
========
>>> from sympy.polys.heuristicgcd import heugcd
>>> from sympy.polys import ring, ZZ
>>> R, x,y, = ring("x,y", ZZ)
>>> f = x**2 + 2*x*y + y**2
>>> g = x**2 + x*y
>>> h, cff, cfg = heugcd(f, g)
>>> h, cff, cfg
(x + y, x + y, x)
>>> cff*h == f
True
>>> cfg*h == g
True
References
==========
1. [Liao95]_
"""
assert f.ring == g.ring and f.ring.domain.is_ZZ
ring = f.ring
x0 = ring.gens[0]
domain = ring.domain
gcd, f, g = f.extract_ground(g)
f_norm = f.max_norm()
g_norm = g.max_norm()
B = domain(2*min(f_norm, g_norm) + 29)
x = max(min(B, 99*domain.sqrt(B)),
2*min(f_norm // abs(f.LC),
g_norm // abs(g.LC)) + 2)
for i in range(0, HEU_GCD_MAX):
ff = f.evaluate(x0, x)
gg = g.evaluate(x0, x)
if ff and gg:
if ring.ngens == 1:
h, cff, cfg = domain.cofactors(ff, gg)
else:
h, cff, cfg = heugcd(ff, gg)
h = _gcd_interpolate(h, x, ring)
h = h.primitive()[1]
cff_, r = f.div(h)
if not r:
cfg_, r = g.div(h)
if not r:
h = h.mul_ground(gcd)
return h, cff_, cfg_
cff = _gcd_interpolate(cff, x, ring)
h, r = f.div(cff)
if not r:
cfg_, r = g.div(h)
if not r:
h = h.mul_ground(gcd)
return h, cff, cfg_
cfg = _gcd_interpolate(cfg, x, ring)
h, r = g.div(cfg)
if not r:
cff_, r = f.div(h)
if not r:
h = h.mul_ground(gcd)
return h, cff_, cfg
x = 73794*x * domain.sqrt(domain.sqrt(x)) // 27011
raise HeuristicGCDFailed('no luck')
def _gcd_interpolate(h, x, ring):
"""Interpolate polynomial GCD from integer GCD. """
f, i = ring.zero, 0
# TODO: don't expose poly repr implementation details
if ring.ngens == 1:
while h:
g = h % x
if g > x // 2: g -= x
h = (h - g) // x
# f += X**i*g
if g:
f[(i,)] = g
i += 1
else:
while h:
g = h.trunc_ground(x)
h = (h - g).quo_ground(x)
# f += X**i*g
if g:
for monom, coeff in g.iterterms():
f[(i,) + monom] = coeff
i += 1
if f.LC < 0:
return -f
else:
return f
| bsd-3-clause | 3,592,611,716,500,382,000 | 24.284768 | 74 | 0.490309 | false |
CarlosCondor/pelisalacarta-xbmc-plus | lib/atom/auth.py | 26 | 1123 | #!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
__author__ = '[email protected] (Jeff Scudder)'
import base64
class BasicAuth(object):
"""Sets the Authorization header as defined in RFC1945"""
def __init__(self, user_id, password):
self.basic_cookie = base64.encodestring(
'%s:%s' % (user_id, password)).strip()
def modify_request(self, http_request):
http_request.headers['Authorization'] = 'Basic %s' % self.basic_cookie
ModifyRequest = modify_request
| gpl-3.0 | -1,688,085,138,080,248,000 | 29.351351 | 76 | 0.705254 | false |
Thraxis/SickRage | lib/unidecode/x081.py | 252 | 4673 | data = (
'Cheng ', # 0x00
'Tiao ', # 0x01
'Zhi ', # 0x02
'Cui ', # 0x03
'Mei ', # 0x04
'Xie ', # 0x05
'Cui ', # 0x06
'Xie ', # 0x07
'Mo ', # 0x08
'Mai ', # 0x09
'Ji ', # 0x0a
'Obiyaakasu ', # 0x0b
'[?] ', # 0x0c
'Kuai ', # 0x0d
'Sa ', # 0x0e
'Zang ', # 0x0f
'Qi ', # 0x10
'Nao ', # 0x11
'Mi ', # 0x12
'Nong ', # 0x13
'Luan ', # 0x14
'Wan ', # 0x15
'Bo ', # 0x16
'Wen ', # 0x17
'Guan ', # 0x18
'Qiu ', # 0x19
'Jiao ', # 0x1a
'Jing ', # 0x1b
'Rou ', # 0x1c
'Heng ', # 0x1d
'Cuo ', # 0x1e
'Lie ', # 0x1f
'Shan ', # 0x20
'Ting ', # 0x21
'Mei ', # 0x22
'Chun ', # 0x23
'Shen ', # 0x24
'Xie ', # 0x25
'De ', # 0x26
'Zui ', # 0x27
'Cu ', # 0x28
'Xiu ', # 0x29
'Xin ', # 0x2a
'Tuo ', # 0x2b
'Pao ', # 0x2c
'Cheng ', # 0x2d
'Nei ', # 0x2e
'Fu ', # 0x2f
'Dou ', # 0x30
'Tuo ', # 0x31
'Niao ', # 0x32
'Noy ', # 0x33
'Pi ', # 0x34
'Gu ', # 0x35
'Gua ', # 0x36
'Li ', # 0x37
'Lian ', # 0x38
'Zhang ', # 0x39
'Cui ', # 0x3a
'Jie ', # 0x3b
'Liang ', # 0x3c
'Zhou ', # 0x3d
'Pi ', # 0x3e
'Biao ', # 0x3f
'Lun ', # 0x40
'Pian ', # 0x41
'Guo ', # 0x42
'Kui ', # 0x43
'Chui ', # 0x44
'Dan ', # 0x45
'Tian ', # 0x46
'Nei ', # 0x47
'Jing ', # 0x48
'Jie ', # 0x49
'La ', # 0x4a
'Yi ', # 0x4b
'An ', # 0x4c
'Ren ', # 0x4d
'Shen ', # 0x4e
'Chuo ', # 0x4f
'Fu ', # 0x50
'Fu ', # 0x51
'Ju ', # 0x52
'Fei ', # 0x53
'Qiang ', # 0x54
'Wan ', # 0x55
'Dong ', # 0x56
'Pi ', # 0x57
'Guo ', # 0x58
'Zong ', # 0x59
'Ding ', # 0x5a
'Wu ', # 0x5b
'Mei ', # 0x5c
'Ruan ', # 0x5d
'Zhuan ', # 0x5e
'Zhi ', # 0x5f
'Cou ', # 0x60
'Gua ', # 0x61
'Ou ', # 0x62
'Di ', # 0x63
'An ', # 0x64
'Xing ', # 0x65
'Nao ', # 0x66
'Yu ', # 0x67
'Chuan ', # 0x68
'Nan ', # 0x69
'Yun ', # 0x6a
'Zhong ', # 0x6b
'Rou ', # 0x6c
'E ', # 0x6d
'Sai ', # 0x6e
'Tu ', # 0x6f
'Yao ', # 0x70
'Jian ', # 0x71
'Wei ', # 0x72
'Jiao ', # 0x73
'Yu ', # 0x74
'Jia ', # 0x75
'Duan ', # 0x76
'Bi ', # 0x77
'Chang ', # 0x78
'Fu ', # 0x79
'Xian ', # 0x7a
'Ni ', # 0x7b
'Mian ', # 0x7c
'Wa ', # 0x7d
'Teng ', # 0x7e
'Tui ', # 0x7f
'Bang ', # 0x80
'Qian ', # 0x81
'Lu ', # 0x82
'Wa ', # 0x83
'Sou ', # 0x84
'Tang ', # 0x85
'Su ', # 0x86
'Zhui ', # 0x87
'Ge ', # 0x88
'Yi ', # 0x89
'Bo ', # 0x8a
'Liao ', # 0x8b
'Ji ', # 0x8c
'Pi ', # 0x8d
'Xie ', # 0x8e
'Gao ', # 0x8f
'Lu ', # 0x90
'Bin ', # 0x91
'Ou ', # 0x92
'Chang ', # 0x93
'Lu ', # 0x94
'Guo ', # 0x95
'Pang ', # 0x96
'Chuai ', # 0x97
'Piao ', # 0x98
'Jiang ', # 0x99
'Fu ', # 0x9a
'Tang ', # 0x9b
'Mo ', # 0x9c
'Xi ', # 0x9d
'Zhuan ', # 0x9e
'Lu ', # 0x9f
'Jiao ', # 0xa0
'Ying ', # 0xa1
'Lu ', # 0xa2
'Zhi ', # 0xa3
'Tara ', # 0xa4
'Chun ', # 0xa5
'Lian ', # 0xa6
'Tong ', # 0xa7
'Peng ', # 0xa8
'Ni ', # 0xa9
'Zha ', # 0xaa
'Liao ', # 0xab
'Cui ', # 0xac
'Gui ', # 0xad
'Xiao ', # 0xae
'Teng ', # 0xaf
'Fan ', # 0xb0
'Zhi ', # 0xb1
'Jiao ', # 0xb2
'Shan ', # 0xb3
'Wu ', # 0xb4
'Cui ', # 0xb5
'Run ', # 0xb6
'Xiang ', # 0xb7
'Sui ', # 0xb8
'Fen ', # 0xb9
'Ying ', # 0xba
'Tan ', # 0xbb
'Zhua ', # 0xbc
'Dan ', # 0xbd
'Kuai ', # 0xbe
'Nong ', # 0xbf
'Tun ', # 0xc0
'Lian ', # 0xc1
'Bi ', # 0xc2
'Yong ', # 0xc3
'Jue ', # 0xc4
'Chu ', # 0xc5
'Yi ', # 0xc6
'Juan ', # 0xc7
'La ', # 0xc8
'Lian ', # 0xc9
'Sao ', # 0xca
'Tun ', # 0xcb
'Gu ', # 0xcc
'Qi ', # 0xcd
'Cui ', # 0xce
'Bin ', # 0xcf
'Xun ', # 0xd0
'Ru ', # 0xd1
'Huo ', # 0xd2
'Zang ', # 0xd3
'Xian ', # 0xd4
'Biao ', # 0xd5
'Xing ', # 0xd6
'Kuan ', # 0xd7
'La ', # 0xd8
'Yan ', # 0xd9
'Lu ', # 0xda
'Huo ', # 0xdb
'Zang ', # 0xdc
'Luo ', # 0xdd
'Qu ', # 0xde
'Zang ', # 0xdf
'Luan ', # 0xe0
'Ni ', # 0xe1
'Zang ', # 0xe2
'Chen ', # 0xe3
'Qian ', # 0xe4
'Wo ', # 0xe5
'Guang ', # 0xe6
'Zang ', # 0xe7
'Lin ', # 0xe8
'Guang ', # 0xe9
'Zi ', # 0xea
'Jiao ', # 0xeb
'Nie ', # 0xec
'Chou ', # 0xed
'Ji ', # 0xee
'Gao ', # 0xef
'Chou ', # 0xf0
'Mian ', # 0xf1
'Nie ', # 0xf2
'Zhi ', # 0xf3
'Zhi ', # 0xf4
'Ge ', # 0xf5
'Jian ', # 0xf6
'Die ', # 0xf7
'Zhi ', # 0xf8
'Xiu ', # 0xf9
'Tai ', # 0xfa
'Zhen ', # 0xfb
'Jiu ', # 0xfc
'Xian ', # 0xfd
'Yu ', # 0xfe
'Cha ', # 0xff
)
| gpl-3.0 | 102,784,568,937,478,900 | 17.112403 | 24 | 0.395249 | false |
fkolacek/FIT-VUT | bp-revok/python/lib/python2.7/email/test/test_email_torture.py | 150 | 3669 | # Copyright (C) 2002-2004 Python Software Foundation
#
# A torture test of the email package. This should not be run as part of the
# standard Python test suite since it requires several meg of email messages
# collected in the wild. These source messages are not checked into the
# Python distro, but are available as part of the standalone email package at
# http://sf.net/projects/mimelib
import sys
import os
import unittest
from cStringIO import StringIO
from types import ListType
from email.test.test_email import TestEmailBase
from test.test_support import TestSkipped, run_unittest
import email
from email import __file__ as testfile
from email.iterators import _structure
def openfile(filename):
from os.path import join, dirname, abspath
path = abspath(join(dirname(testfile), os.pardir, 'moredata', filename))
return open(path, 'r')
# Prevent this test from running in the Python distro
try:
openfile('crispin-torture.txt')
except IOError:
raise TestSkipped
class TortureBase(TestEmailBase):
def _msgobj(self, filename):
fp = openfile(filename)
try:
msg = email.message_from_file(fp)
finally:
fp.close()
return msg
class TestCrispinTorture(TortureBase):
# Mark Crispin's torture test from the SquirrelMail project
def test_mondo_message(self):
eq = self.assertEqual
neq = self.ndiffAssertEqual
msg = self._msgobj('crispin-torture.txt')
payload = msg.get_payload()
eq(type(payload), ListType)
eq(len(payload), 12)
eq(msg.preamble, None)
eq(msg.epilogue, '\n')
# Probably the best way to verify the message is parsed correctly is to
# dump its structure and compare it against the known structure.
fp = StringIO()
_structure(msg, fp=fp)
neq(fp.getvalue(), """\
multipart/mixed
text/plain
message/rfc822
multipart/alternative
text/plain
multipart/mixed
text/richtext
application/andrew-inset
message/rfc822
audio/basic
audio/basic
image/pbm
message/rfc822
multipart/mixed
multipart/mixed
text/plain
audio/x-sun
multipart/mixed
image/gif
image/gif
application/x-be2
application/atomicmail
audio/x-sun
message/rfc822
multipart/mixed
text/plain
image/pgm
text/plain
message/rfc822
multipart/mixed
text/plain
image/pbm
message/rfc822
application/postscript
image/gif
message/rfc822
multipart/mixed
audio/basic
audio/basic
message/rfc822
multipart/mixed
application/postscript
text/plain
message/rfc822
multipart/mixed
text/plain
multipart/parallel
image/gif
audio/basic
application/atomicmail
message/rfc822
audio/x-sun
""")
def _testclasses():
mod = sys.modules[__name__]
return [getattr(mod, name) for name in dir(mod) if name.startswith('Test')]
def suite():
suite = unittest.TestSuite()
for testclass in _testclasses():
suite.addTest(unittest.makeSuite(testclass))
return suite
def test_main():
for testclass in _testclasses():
run_unittest(testclass)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| apache-2.0 | 455,763,016,855,514,800 | 25.207143 | 79 | 0.60725 | false |
nickhdamico/py | lib/cherrypy/wsgiserver/ssl_builtin.py | 56 | 3242 | """A library for integrating Python's builtin ``ssl`` library with CherryPy.
The ssl module must be importable for SSL functionality.
To use this module, set ``CherryPyWSGIServer.ssl_adapter`` to an instance of
``BuiltinSSLAdapter``.
"""
try:
import ssl
except ImportError:
ssl = None
try:
from _pyio import DEFAULT_BUFFER_SIZE
except ImportError:
try:
from io import DEFAULT_BUFFER_SIZE
except ImportError:
DEFAULT_BUFFER_SIZE = -1
import sys
from cherrypy import wsgiserver
class BuiltinSSLAdapter(wsgiserver.SSLAdapter):
"""A wrapper for integrating Python's builtin ssl module with CherryPy."""
certificate = None
"""The filename of the server SSL certificate."""
private_key = None
"""The filename of the server's private key file."""
def __init__(self, certificate, private_key, certificate_chain=None):
if ssl is None:
raise ImportError("You must install the ssl module to use HTTPS.")
self.certificate = certificate
self.private_key = private_key
self.certificate_chain = certificate_chain
def bind(self, sock):
"""Wrap and return the given socket."""
return sock
def wrap(self, sock):
"""Wrap and return the given socket, plus WSGI environ entries."""
try:
s = ssl.wrap_socket(sock, do_handshake_on_connect=True,
server_side=True, certfile=self.certificate,
keyfile=self.private_key,
ssl_version=ssl.PROTOCOL_SSLv23)
except ssl.SSLError:
e = sys.exc_info()[1]
if e.errno == ssl.SSL_ERROR_EOF:
# This is almost certainly due to the cherrypy engine
# 'pinging' the socket to assert it's connectable;
# the 'ping' isn't SSL.
return None, {}
elif e.errno == ssl.SSL_ERROR_SSL:
if e.args[1].endswith('http request'):
# The client is speaking HTTP to an HTTPS server.
raise wsgiserver.NoSSLError
elif e.args[1].endswith('unknown protocol'):
# The client is speaking some non-HTTP protocol.
# Drop the conn.
return None, {}
raise
return s, self.get_environ(s)
# TODO: fill this out more with mod ssl env
def get_environ(self, sock):
"""Create WSGI environ entries to be merged into each request."""
cipher = sock.cipher()
ssl_environ = {
"wsgi.url_scheme": "https",
"HTTPS": "on",
'SSL_PROTOCOL': cipher[1],
'SSL_CIPHER': cipher[0]
# SSL_VERSION_INTERFACE string The mod_ssl program version
# SSL_VERSION_LIBRARY string The OpenSSL program version
}
return ssl_environ
if sys.version_info >= (3, 0):
def makefile(self, sock, mode='r', bufsize=DEFAULT_BUFFER_SIZE):
return wsgiserver.CP_makefile(sock, mode, bufsize)
else:
def makefile(self, sock, mode='r', bufsize=DEFAULT_BUFFER_SIZE):
return wsgiserver.CP_fileobject(sock, mode, bufsize)
| gpl-3.0 | 5,481,705,362,562,528,000 | 34.23913 | 78 | 0.589143 | false |
appapantula/scikit-learn | sklearn/neighbors/graph.py | 208 | 7031 | """Nearest Neighbors graph functions"""
# Author: Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from .base import KNeighborsMixin, RadiusNeighborsMixin
from .unsupervised import NearestNeighbors
def _check_params(X, metric, p, metric_params):
"""Check the validity of the input parameters"""
params = zip(['metric', 'p', 'metric_params'],
[metric, p, metric_params])
est_params = X.get_params()
for param_name, func_param in params:
if func_param != est_params[param_name]:
raise ValueError(
"Got %s for %s, while the estimator has %s for "
"the same parameter." % (
func_param, param_name, est_params[param_name]))
def _query_include_self(X, include_self, mode):
"""Return the query based on include_self param"""
# Done to preserve backward compatibility.
if include_self is None:
if mode == "connectivity":
warnings.warn(
"The behavior of 'kneighbors_graph' when mode='connectivity' "
"will change in version 0.18. Presently, the nearest neighbor "
"of each sample is the sample itself. Beginning in version "
"0.18, the default behavior will be to exclude each sample "
"from being its own nearest neighbor. To maintain the current "
"behavior, set include_self=True.", DeprecationWarning)
include_self = True
else:
include_self = False
if include_self:
query = X._fit_X
else:
query = None
return query
def kneighbors_graph(X, n_neighbors, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=None):
"""Computes the (weighted) graph of k-Neighbors for points in X
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
n_neighbors : int
Number of neighbors for each sample.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the k-Neighbors for each sample
point. The DistanceMetric class gives a list of available metrics.
The default distance is 'euclidean' ('minkowski' metric with the p
param equal to 2.)
include_self: bool, default backward-compatible.
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty. From
version 0.18, the default value will be False, irrespective of the
value of `mode`.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import kneighbors_graph
>>> A = kneighbors_graph(X, 2)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
radius_neighbors_graph
"""
if not isinstance(X, KNeighborsMixin):
X = NearestNeighbors(n_neighbors, metric=metric, p=p,
metric_params=metric_params).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self, mode)
return X.kneighbors_graph(X=query, n_neighbors=n_neighbors, mode=mode)
def radius_neighbors_graph(X, radius, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=None):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
radius : float
Radius of neighborhoods.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the neighbors within a
given radius for each sample point. The DistanceMetric class
gives a list of available metrics. The default distance is
'euclidean' ('minkowski' metric with the param equal to 2.)
include_self: bool, default None
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty. From
version 0.18, the default value will be False, irrespective of the
value of `mode`.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import radius_neighbors_graph
>>> A = radius_neighbors_graph(X, 1.5)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if not isinstance(X, RadiusNeighborsMixin):
X = NearestNeighbors(radius=radius, metric=metric, p=p,
metric_params=metric_params).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self, mode)
return X.radius_neighbors_graph(query, radius, mode)
| bsd-3-clause | 5,731,666,377,884,670,000 | 36.005263 | 79 | 0.625089 | false |
redhat-openstack/neutron | neutron/tests/unit/ryu/test_ryu_db.py | 9 | 2313 | # Copyright 2012 Isaku Yamahata <yamahata at private email ne jp>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import operator
from neutron.db import api as db
from neutron.plugins.ryu.common import config # noqa
from neutron.plugins.ryu.db import api_v2 as db_api_v2
from neutron.tests.unit import test_db_plugin as test_plugin
class RyuDBTest(test_plugin.NeutronDbPluginV2TestCase):
@staticmethod
def _tunnel_key_sort(key_list):
key_list.sort(key=operator.attrgetter('tunnel_key'))
return [(key.network_id, key.tunnel_key) for key in key_list]
def test_key_allocation(self):
tunnel_key = db_api_v2.TunnelKey()
session = db.get_session()
with contextlib.nested(self.network('network-0'),
self.network('network-1')
) as (network_0, network_1):
network_id0 = network_0['network']['id']
key0 = tunnel_key.allocate(session, network_id0)
network_id1 = network_1['network']['id']
key1 = tunnel_key.allocate(session, network_id1)
key_list = tunnel_key.all_list()
self.assertEqual(len(key_list), 2)
expected_list = [(network_id0, key0), (network_id1, key1)]
self.assertEqual(self._tunnel_key_sort(key_list),
expected_list)
tunnel_key.delete(session, network_id0)
key_list = tunnel_key.all_list()
self.assertEqual(self._tunnel_key_sort(key_list),
[(network_id1, key1)])
tunnel_key.delete(session, network_id1)
self.assertEqual(tunnel_key.all_list(), [])
| apache-2.0 | 5,614,313,625,237,225,000 | 41.833333 | 78 | 0.619109 | false |
whitehorse-io/encarnia | pyenv/lib/python2.7/site-packages/twisted/python/urlpath.py | 3 | 9084 | # -*- test-case-name: twisted.python.test.test_urlpath -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
L{URLPath}, a representation of a URL.
"""
from __future__ import division, absolute_import
from twisted.python.compat import (
nativeString, unicode, urllib_parse as urlparse, urlunquote, urlquote
)
from hyperlink import URL as _URL
_allascii = b"".join([chr(x).encode('ascii') for x in range(1, 128)])
def _rereconstituter(name):
"""
Attriute declaration to preserve mutability on L{URLPath}.
@param name: a public attribute name
@type name: native L{str}
@return: a descriptor which retrieves the private version of the attribute
on get and calls rerealize on set.
"""
privateName = nativeString("_") + name
return property(
lambda self: getattr(self, privateName),
lambda self, value: (setattr(self, privateName,
value if isinstance(value, bytes)
else value.encode("charmap")) or
self._reconstitute())
)
class URLPath(object):
"""
A representation of a URL.
@ivar scheme: The scheme of the URL (e.g. 'http').
@type scheme: L{bytes}
@ivar netloc: The network location ("host").
@type netloc: L{bytes}
@ivar path: The path on the network location.
@type path: L{bytes}
@ivar query: The query argument (the portion after ? in the URL).
@type query: L{bytes}
@ivar fragment: The page fragment (the portion after # in the URL).
@type fragment: L{bytes}
"""
def __init__(self, scheme=b'', netloc=b'localhost', path=b'',
query=b'', fragment=b''):
self._scheme = scheme or b'http'
self._netloc = netloc
self._path = path or b'/'
self._query = query
self._fragment = fragment
self._reconstitute()
def _reconstitute(self):
"""
Reconstitute this L{URLPath} from all its given attributes.
"""
urltext = urlquote(
urlparse.urlunsplit((self._scheme, self._netloc,
self._path, self._query, self._fragment)),
safe=_allascii
)
self._url = _URL.fromText(urltext.encode("ascii").decode("ascii"))
scheme = _rereconstituter("scheme")
netloc = _rereconstituter("netloc")
path = _rereconstituter("path")
query = _rereconstituter("query")
fragment = _rereconstituter("fragment")
@classmethod
def _fromURL(cls, urlInstance):
"""
Reconstruct all the public instance variables of this L{URLPath} from
its underlying L{_URL}.
@param urlInstance: the object to base this L{URLPath} on.
@type urlInstance: L{_URL}
@return: a new L{URLPath}
"""
self = cls.__new__(cls)
self._url = urlInstance.replace(path=urlInstance.path or [u""])
self._scheme = self._url.scheme.encode("ascii")
self._netloc = self._url.authority().encode("ascii")
self._path = (_URL(path=self._url.path,
rooted=True).asURI().asText()
.encode("ascii"))
self._query = (_URL(query=self._url.query).asURI().asText()
.encode("ascii"))[1:]
self._fragment = self._url.fragment.encode("ascii")
return self
def pathList(self, unquote=False, copy=True):
"""
Split this URL's path into its components.
@param unquote: whether to remove %-encoding from the returned strings.
@param copy: (ignored, do not use)
@return: The components of C{self.path}
@rtype: L{list} of L{bytes}
"""
segments = self._url.path
mapper = lambda x: x.encode("ascii")
if unquote:
mapper = (lambda x, m=mapper: m(urlunquote(x)))
return [b''] + [mapper(segment) for segment in segments]
@classmethod
def fromString(klass, url):
"""
Make a L{URLPath} from a L{str} or L{unicode}.
@param url: A L{str} representation of a URL.
@type url: L{str} or L{unicode}.
@return: a new L{URLPath} derived from the given string.
@rtype: L{URLPath}
"""
if not isinstance(url, (str, unicode)):
raise ValueError("'url' must be a str or unicode")
if isinstance(url, bytes):
# On Python 2, accepting 'str' (for compatibility) means we might
# get 'bytes'. On py3, this will not work with bytes due to the
# check above.
return klass.fromBytes(url)
return klass._fromURL(_URL.fromText(url))
@classmethod
def fromBytes(klass, url):
"""
Make a L{URLPath} from a L{bytes}.
@param url: A L{bytes} representation of a URL.
@type url: L{bytes}
@return: a new L{URLPath} derived from the given L{bytes}.
@rtype: L{URLPath}
@since: 15.4
"""
if not isinstance(url, bytes):
raise ValueError("'url' must be bytes")
quoted = urlquote(url, safe=_allascii)
if isinstance(quoted, bytes):
# This will only be bytes on python 2, where we can transform it
# into unicode. On python 3, urlquote always returns str.
quoted = quoted.decode("ascii")
return klass.fromString(quoted)
@classmethod
def fromRequest(klass, request):
"""
Make a L{URLPath} from a L{twisted.web.http.Request}.
@param request: A L{twisted.web.http.Request} to make the L{URLPath}
from.
@return: a new L{URLPath} derived from the given request.
@rtype: L{URLPath}
"""
return klass.fromBytes(request.prePathURL())
def _mod(self, newURL, keepQuery):
"""
Return a modified copy of C{self} using C{newURL}, keeping the query
string if C{keepQuery} is C{True}.
@param newURL: a L{URL} to derive a new L{URLPath} from
@type newURL: L{URL}
@param keepQuery: if C{True}, preserve the query parameters from
C{self} on the new L{URLPath}; if C{False}, give the new L{URLPath}
no query parameters.
@type keepQuery: L{bool}
@return: a new L{URLPath}
"""
return self._fromURL(newURL.replace(
fragment=u'', query=self._url.query if keepQuery else ()
))
def sibling(self, path, keepQuery=False):
"""
Get the sibling of the current L{URLPath}. A sibling is a file which
is in the same directory as the current file.
@param path: The path of the sibling.
@type path: L{bytes}
@param keepQuery: Whether to keep the query parameters on the returned
L{URLPath}.
@type: keepQuery: L{bool}
@return: a new L{URLPath}
"""
return self._mod(self._url.sibling(path.decode("ascii")), keepQuery)
def child(self, path, keepQuery=False):
"""
Get the child of this L{URLPath}.
@param path: The path of the child.
@type path: L{bytes}
@param keepQuery: Whether to keep the query parameters on the returned
L{URLPath}.
@type: keepQuery: L{bool}
@return: a new L{URLPath}
"""
return self._mod(self._url.child(path.decode("ascii")), keepQuery)
def parent(self, keepQuery=False):
"""
Get the parent directory of this L{URLPath}.
@param keepQuery: Whether to keep the query parameters on the returned
L{URLPath}.
@type: keepQuery: L{bool}
@return: a new L{URLPath}
"""
return self._mod(self._url.click(u".."), keepQuery)
def here(self, keepQuery=False):
"""
Get the current directory of this L{URLPath}.
@param keepQuery: Whether to keep the query parameters on the returned
L{URLPath}.
@type: keepQuery: L{bool}
@return: a new L{URLPath}
"""
return self._mod(self._url.click(u"."), keepQuery)
def click(self, st):
"""
Return a path which is the URL where a browser would presumably take
you if you clicked on a link with an HREF as given.
@param st: A relative URL, to be interpreted relative to C{self} as the
base URL.
@type st: L{bytes}
@return: a new L{URLPath}
"""
return self._fromURL(self._url.click(st.decode("ascii")))
def __str__(self):
"""
The L{str} of a L{URLPath} is its URL text.
"""
return nativeString(self._url.asURI().asText())
def __repr__(self):
"""
The L{repr} of a L{URLPath} is an eval-able expression which will
construct a similar L{URLPath}.
"""
return ('URLPath(scheme=%r, netloc=%r, path=%r, query=%r, fragment=%r)'
% (self.scheme, self.netloc, self.path, self.query,
self.fragment))
| mit | 270,919,456,766,223,170 | 29.897959 | 79 | 0.574637 | false |
dpac-vlsi/SynchroTrace | src/dev/x86/Cmos.py | 11 | 2041 | # Copyright (c) 2008 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
from m5.params import *
from m5.proxy import *
from Device import BasicPioDevice
from X86IntPin import X86IntSourcePin
class Cmos(BasicPioDevice):
type = 'Cmos'
cxx_class='X86ISA::Cmos'
time = Param.Time('01/01/2012',
"System time to use ('Now' for actual time)")
pio_latency = Param.Latency('1ns', "Programmed IO latency in simticks")
int_pin = Param.X86IntSourcePin(X86IntSourcePin(),
'Pin to signal RTC alarm interrupts to')
| bsd-3-clause | -6,585,606,045,570,089,000 | 48.780488 | 75 | 0.771681 | false |
awkspace/ansible | test/runner/lib/integration/__init__.py | 11 | 7030 | """Ansible integration test infrastructure."""
from __future__ import absolute_import, print_function
import contextlib
import os
import shutil
import tempfile
from lib.target import (
analyze_integration_target_dependencies,
walk_integration_targets,
)
from lib.config import (
NetworkIntegrationConfig,
PosixIntegrationConfig,
WindowsIntegrationConfig,
)
from lib.util import (
ApplicationError,
display,
make_dirs,
)
from lib.cache import (
CommonCache,
)
def generate_dependency_map(integration_targets):
"""
:type integration_targets: list[IntegrationTarget]
:rtype: dict[str, set[IntegrationTarget]]
"""
targets_dict = dict((target.name, target) for target in integration_targets)
target_dependencies = analyze_integration_target_dependencies(integration_targets)
dependency_map = {}
invalid_targets = set()
for dependency, dependents in target_dependencies.items():
dependency_target = targets_dict.get(dependency)
if not dependency_target:
invalid_targets.add(dependency)
continue
for dependent in dependents:
if dependent not in dependency_map:
dependency_map[dependent] = set()
dependency_map[dependent].add(dependency_target)
if invalid_targets:
raise ApplicationError('Non-existent target dependencies: %s' % ', '.join(sorted(invalid_targets)))
return dependency_map
def get_files_needed(target_dependencies):
"""
:type target_dependencies: list[IntegrationTarget]
:rtype: list[str]
"""
files_needed = []
for target_dependency in target_dependencies:
files_needed += target_dependency.needs_file
files_needed = sorted(set(files_needed))
invalid_paths = [path for path in files_needed if not os.path.isfile(path)]
if invalid_paths:
raise ApplicationError('Invalid "needs/file/*" aliases:\n%s' % '\n'.join(invalid_paths))
return files_needed
@contextlib.contextmanager
def integration_test_environment(args, target, inventory_path):
"""
:type args: IntegrationConfig
:type target: IntegrationTarget
:type inventory_path: str
"""
vars_file = 'integration_config.yml'
if args.no_temp_workdir or 'no/temp_workdir/' in target.aliases:
display.warning('Disabling the temp work dir is a temporary debugging feature that may be removed in the future without notice.')
integration_dir = 'test/integration'
ansible_config = os.path.join(integration_dir, '%s.cfg' % args.command)
inventory_name = os.path.relpath(inventory_path, integration_dir)
if '/' in inventory_name:
inventory_name = inventory_path
yield IntegrationEnvironment(integration_dir, inventory_name, ansible_config, vars_file)
return
root_temp_dir = os.path.expanduser('~/.ansible/test/tmp')
prefix = '%s-' % target.name
suffix = u'-\u00c5\u00d1\u015a\u00cc\u03b2\u0141\u00c8'
if args.no_temp_unicode or 'no/temp_unicode/' in target.aliases:
display.warning('Disabling unicode in the temp work dir is a temporary debugging feature that may be removed in the future without notice.')
suffix = '-ansible'
if isinstance('', bytes):
suffix = suffix.encode('utf-8')
if args.explain:
temp_dir = os.path.join(root_temp_dir, '%stemp%s' % (prefix, suffix))
else:
make_dirs(root_temp_dir)
temp_dir = tempfile.mkdtemp(prefix=prefix, suffix=suffix, dir=root_temp_dir)
try:
display.info('Preparing temporary directory: %s' % temp_dir, verbosity=2)
inventory_names = {
PosixIntegrationConfig: 'inventory',
WindowsIntegrationConfig: 'inventory.winrm',
NetworkIntegrationConfig: 'inventory.networking',
}
inventory_name = inventory_names[type(args)]
cache = IntegrationCache(args)
target_dependencies = sorted([target] + list(cache.dependency_map.get(target.name, set())))
files_needed = get_files_needed(target_dependencies)
integration_dir = os.path.join(temp_dir, 'test/integration')
ansible_config = os.path.join(integration_dir, '%s.cfg' % args.command)
file_copies = [
('test/integration/%s.cfg' % args.command, ansible_config),
('test/integration/integration_config.yml', os.path.join(integration_dir, vars_file)),
(inventory_path, os.path.join(integration_dir, inventory_name)),
]
file_copies += [(path, os.path.join(temp_dir, path)) for path in files_needed]
directory_copies = [
(os.path.join('test/integration/targets', target.name), os.path.join(integration_dir, 'targets', target.name)) for target in target_dependencies
]
inventory_dir = os.path.dirname(inventory_path)
host_vars_dir = os.path.join(inventory_dir, 'host_vars')
group_vars_dir = os.path.join(inventory_dir, 'group_vars')
if os.path.isdir(host_vars_dir):
directory_copies.append((host_vars_dir, os.path.join(integration_dir, os.path.basename(host_vars_dir))))
if os.path.isdir(group_vars_dir):
directory_copies.append((group_vars_dir, os.path.join(integration_dir, os.path.basename(group_vars_dir))))
directory_copies = sorted(set(directory_copies))
file_copies = sorted(set(file_copies))
if not args.explain:
make_dirs(integration_dir)
for dir_src, dir_dst in directory_copies:
display.info('Copying %s/ to %s/' % (dir_src, dir_dst), verbosity=2)
if not args.explain:
shutil.copytree(dir_src, dir_dst, symlinks=True)
for file_src, file_dst in file_copies:
display.info('Copying %s to %s' % (file_src, file_dst), verbosity=2)
if not args.explain:
make_dirs(os.path.dirname(file_dst))
shutil.copy2(file_src, file_dst)
yield IntegrationEnvironment(integration_dir, inventory_name, ansible_config, vars_file)
finally:
if not args.explain:
shutil.rmtree(temp_dir)
class IntegrationEnvironment(object):
"""Details about the integration environment."""
def __init__(self, integration_dir, inventory_path, ansible_config, vars_file):
self.integration_dir = integration_dir
self.inventory_path = inventory_path
self.ansible_config = ansible_config
self.vars_file = vars_file
class IntegrationCache(CommonCache):
"""Integration cache."""
@property
def integration_targets(self):
"""
:rtype: list[IntegrationTarget]
"""
return self.get('integration_targets', lambda: list(walk_integration_targets()))
@property
def dependency_map(self):
"""
:rtype: dict[str, set[IntegrationTarget]]
"""
return self.get('dependency_map', lambda: generate_dependency_map(self.integration_targets))
| gpl-3.0 | -1,022,464,492,585,938,700 | 31.850467 | 156 | 0.655903 | false |
cocrawler/cocrawler | scripts/aiohttp-fetch.py | 1 | 2534 | '''
Fetches some urls using aiohttp. Also serves as a minimum example of using aiohttp.
Good examples:
https://www.enterprisecarshare.com/robots.txt -- 302 redir lacking Location: raises RuntimeError
'''
import sys
from traceback import print_exc
import asyncio
import aiohttp
import aiohttp.connector
async def main(urls):
connector = aiohttp.connector.TCPConnector(use_dns_cache=True)
session = aiohttp.ClientSession(connector=connector)
for url in urls:
if not url.startswith('http'):
url = 'http://' + url
print(url, '\n')
try:
response = await session.get(url, allow_redirects=True)
except aiohttp.client_exceptions.ClientConnectorError as e:
print('saw connect error for', url, ':', e, file=sys.stderr)
continue
except Exception as e:
print('Saw an exception thrown by session.get:')
print_exc()
print('')
continue
#print('dns:')
#for k, v in connector.cached_hosts.items():
# print(' ', k) # or k[0]?
# for rec in v:
# print(' ', rec.get('host'))
print('')
if str(response.url) != url:
print('final url:', str(response.url))
print('')
print('final request headers:')
for k, v in response.request_info.headers.items():
print(k+':', v)
print('')
if response.history:
print('response history: response and headers:')
for h in response.history:
print(' ', repr(h))
print('')
print('response history urls:')
response_urls = [str(h.url) for h in response.history]
response_urls.append(str(response.url))
if response_urls:
print(' ', '\n '.join(response_urls))
print('')
print('response headers:')
for k, v in response.raw_headers:
line = k+b': '+v
print(' ', line.decode(errors='ignore'))
print('')
try:
text = await response.text(errors='ignore')
#print(text)
pass
except Exception:
print_exc()
await session.close()
loop = asyncio.get_event_loop()
loop.run_until_complete(main(sys.argv[1:]))
# vodoo recommended by advanced aiohttp docs for graceful shutdown
# https://github.com/aio-libs/aiohttp/issues/1925
loop.run_until_complete(asyncio.sleep(0.250))
loop.close()
| apache-2.0 | -435,381,970,558,361,100 | 27.47191 | 96 | 0.56472 | false |
nysan/yocto-autobuilder | lib/python2.6/site-packages/Twisted-11.0.0-py2.6-linux-x86_64.egg/twisted/internet/base.py | 18 | 41263 | # -*- test-case-name: twisted.test.test_internet -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Very basic functionality for a Reactor implementation.
"""
import socket # needed only for sync-dns
from zope.interface import implements, classImplements
import sys
import warnings
from heapq import heappush, heappop, heapify
import traceback
from twisted.python.compat import set
from twisted.python.util import unsignedID
from twisted.internet.interfaces import IReactorCore, IReactorTime, IReactorThreads
from twisted.internet.interfaces import IResolverSimple, IReactorPluggableResolver
from twisted.internet.interfaces import IConnector, IDelayedCall
from twisted.internet import fdesc, main, error, abstract, defer, threads
from twisted.python import log, failure, reflect
from twisted.python.runtime import seconds as runtimeSeconds, platform
from twisted.internet.defer import Deferred, DeferredList
from twisted.persisted import styles
# This import is for side-effects! Even if you don't see any code using it
# in this module, don't delete it.
from twisted.python import threadable
class DelayedCall(styles.Ephemeral):
implements(IDelayedCall)
# enable .debug to record creator call stack, and it will be logged if
# an exception occurs while the function is being run
debug = False
_str = None
def __init__(self, time, func, args, kw, cancel, reset,
seconds=runtimeSeconds):
"""
@param time: Seconds from the epoch at which to call C{func}.
@param func: The callable to call.
@param args: The positional arguments to pass to the callable.
@param kw: The keyword arguments to pass to the callable.
@param cancel: A callable which will be called with this
DelayedCall before cancellation.
@param reset: A callable which will be called with this
DelayedCall after changing this DelayedCall's scheduled
execution time. The callable should adjust any necessary
scheduling details to ensure this DelayedCall is invoked
at the new appropriate time.
@param seconds: If provided, a no-argument callable which will be
used to determine the current time any time that information is
needed.
"""
self.time, self.func, self.args, self.kw = time, func, args, kw
self.resetter = reset
self.canceller = cancel
self.seconds = seconds
self.cancelled = self.called = 0
self.delayed_time = 0
if self.debug:
self.creator = traceback.format_stack()[:-2]
def getTime(self):
"""Return the time at which this call will fire
@rtype: C{float}
@return: The number of seconds after the epoch at which this call is
scheduled to be made.
"""
return self.time + self.delayed_time
def cancel(self):
"""Unschedule this call
@raise AlreadyCancelled: Raised if this call has already been
unscheduled.
@raise AlreadyCalled: Raised if this call has already been made.
"""
if self.cancelled:
raise error.AlreadyCancelled
elif self.called:
raise error.AlreadyCalled
else:
self.canceller(self)
self.cancelled = 1
if self.debug:
self._str = str(self)
del self.func, self.args, self.kw
def reset(self, secondsFromNow):
"""Reschedule this call for a different time
@type secondsFromNow: C{float}
@param secondsFromNow: The number of seconds from the time of the
C{reset} call at which this call will be scheduled.
@raise AlreadyCancelled: Raised if this call has been cancelled.
@raise AlreadyCalled: Raised if this call has already been made.
"""
if self.cancelled:
raise error.AlreadyCancelled
elif self.called:
raise error.AlreadyCalled
else:
newTime = self.seconds() + secondsFromNow
if newTime < self.time:
self.delayed_time = 0
self.time = newTime
self.resetter(self)
else:
self.delayed_time = newTime - self.time
def delay(self, secondsLater):
"""Reschedule this call for a later time
@type secondsLater: C{float}
@param secondsLater: The number of seconds after the originally
scheduled time for which to reschedule this call.
@raise AlreadyCancelled: Raised if this call has been cancelled.
@raise AlreadyCalled: Raised if this call has already been made.
"""
if self.cancelled:
raise error.AlreadyCancelled
elif self.called:
raise error.AlreadyCalled
else:
self.delayed_time += secondsLater
if self.delayed_time < 0:
self.activate_delay()
self.resetter(self)
def activate_delay(self):
self.time += self.delayed_time
self.delayed_time = 0
def active(self):
"""Determine whether this call is still pending
@rtype: C{bool}
@return: True if this call has not yet been made or cancelled,
False otherwise.
"""
return not (self.cancelled or self.called)
def __le__(self, other):
"""
Implement C{<=} operator between two L{DelayedCall} instances.
Comparison is based on the C{time} attribute (unadjusted by the
delayed time).
"""
return self.time <= other.time
def __lt__(self, other):
"""
Implement C{<} operator between two L{DelayedCall} instances.
Comparison is based on the C{time} attribute (unadjusted by the
delayed time).
"""
return self.time < other.time
def __str__(self):
if self._str is not None:
return self._str
if hasattr(self, 'func'):
if hasattr(self.func, 'func_name'):
func = self.func.func_name
if hasattr(self.func, 'im_class'):
func = self.func.im_class.__name__ + '.' + func
else:
func = reflect.safe_repr(self.func)
else:
func = None
now = self.seconds()
L = ["<DelayedCall 0x%x [%ss] called=%s cancelled=%s" % (
unsignedID(self), self.time - now, self.called,
self.cancelled)]
if func is not None:
L.extend((" ", func, "("))
if self.args:
L.append(", ".join([reflect.safe_repr(e) for e in self.args]))
if self.kw:
L.append(", ")
if self.kw:
L.append(", ".join(['%s=%s' % (k, reflect.safe_repr(v)) for (k, v) in self.kw.iteritems()]))
L.append(")")
if self.debug:
L.append("\n\ntraceback at creation: \n\n%s" % (' '.join(self.creator)))
L.append('>')
return "".join(L)
class ThreadedResolver(object):
"""
L{ThreadedResolver} uses a reactor, a threadpool, and
L{socket.gethostbyname} to perform name lookups without blocking the
reactor thread. It also supports timeouts indepedently from whatever
timeout logic L{socket.gethostbyname} might have.
@ivar reactor: The reactor the threadpool of which will be used to call
L{socket.gethostbyname} and the I/O thread of which the result will be
delivered.
"""
implements(IResolverSimple)
def __init__(self, reactor):
self.reactor = reactor
self._runningQueries = {}
def _fail(self, name, err):
err = error.DNSLookupError("address %r not found: %s" % (name, err))
return failure.Failure(err)
def _cleanup(self, name, lookupDeferred):
userDeferred, cancelCall = self._runningQueries[lookupDeferred]
del self._runningQueries[lookupDeferred]
userDeferred.errback(self._fail(name, "timeout error"))
def _checkTimeout(self, result, name, lookupDeferred):
try:
userDeferred, cancelCall = self._runningQueries[lookupDeferred]
except KeyError:
pass
else:
del self._runningQueries[lookupDeferred]
cancelCall.cancel()
if isinstance(result, failure.Failure):
userDeferred.errback(self._fail(name, result.getErrorMessage()))
else:
userDeferred.callback(result)
def getHostByName(self, name, timeout = (1, 3, 11, 45)):
"""
See L{twisted.internet.interfaces.IResolverSimple.getHostByName}.
Note that the elements of C{timeout} are summed and the result is used
as a timeout for the lookup. Any intermediate timeout or retry logic
is left up to the platform via L{socket.gethostbyname}.
"""
if timeout:
timeoutDelay = sum(timeout)
else:
timeoutDelay = 60
userDeferred = defer.Deferred()
lookupDeferred = threads.deferToThreadPool(
self.reactor, self.reactor.getThreadPool(),
socket.gethostbyname, name)
cancelCall = self.reactor.callLater(
timeoutDelay, self._cleanup, name, lookupDeferred)
self._runningQueries[lookupDeferred] = (userDeferred, cancelCall)
lookupDeferred.addBoth(self._checkTimeout, name, lookupDeferred)
return userDeferred
class BlockingResolver:
implements(IResolverSimple)
def getHostByName(self, name, timeout = (1, 3, 11, 45)):
try:
address = socket.gethostbyname(name)
except socket.error:
msg = "address %r not found" % (name,)
err = error.DNSLookupError(msg)
return defer.fail(err)
else:
return defer.succeed(address)
class _ThreePhaseEvent(object):
"""
Collection of callables (with arguments) which can be invoked as a group in
a particular order.
This provides the underlying implementation for the reactor's system event
triggers. An instance of this class tracks triggers for all phases of a
single type of event.
@ivar before: A list of the before-phase triggers containing three-tuples
of a callable, a tuple of positional arguments, and a dict of keyword
arguments
@ivar finishedBefore: A list of the before-phase triggers which have
already been executed. This is only populated in the C{'BEFORE'} state.
@ivar during: A list of the during-phase triggers containing three-tuples
of a callable, a tuple of positional arguments, and a dict of keyword
arguments
@ivar after: A list of the after-phase triggers containing three-tuples
of a callable, a tuple of positional arguments, and a dict of keyword
arguments
@ivar state: A string indicating what is currently going on with this
object. One of C{'BASE'} (for when nothing in particular is happening;
this is the initial value), C{'BEFORE'} (when the before-phase triggers
are in the process of being executed).
"""
def __init__(self):
self.before = []
self.during = []
self.after = []
self.state = 'BASE'
def addTrigger(self, phase, callable, *args, **kwargs):
"""
Add a trigger to the indicate phase.
@param phase: One of C{'before'}, C{'during'}, or C{'after'}.
@param callable: An object to be called when this event is triggered.
@param *args: Positional arguments to pass to C{callable}.
@param **kwargs: Keyword arguments to pass to C{callable}.
@return: An opaque handle which may be passed to L{removeTrigger} to
reverse the effects of calling this method.
"""
if phase not in ('before', 'during', 'after'):
raise KeyError("invalid phase")
getattr(self, phase).append((callable, args, kwargs))
return phase, callable, args, kwargs
def removeTrigger(self, handle):
"""
Remove a previously added trigger callable.
@param handle: An object previously returned by L{addTrigger}. The
trigger added by that call will be removed.
@raise ValueError: If the trigger associated with C{handle} has already
been removed or if C{handle} is not a valid handle.
"""
return getattr(self, 'removeTrigger_' + self.state)(handle)
def removeTrigger_BASE(self, handle):
"""
Just try to remove the trigger.
@see: removeTrigger
"""
try:
phase, callable, args, kwargs = handle
except (TypeError, ValueError):
raise ValueError("invalid trigger handle")
else:
if phase not in ('before', 'during', 'after'):
raise KeyError("invalid phase")
getattr(self, phase).remove((callable, args, kwargs))
def removeTrigger_BEFORE(self, handle):
"""
Remove the trigger if it has yet to be executed, otherwise emit a
warning that in the future an exception will be raised when removing an
already-executed trigger.
@see: removeTrigger
"""
phase, callable, args, kwargs = handle
if phase != 'before':
return self.removeTrigger_BASE(handle)
if (callable, args, kwargs) in self.finishedBefore:
warnings.warn(
"Removing already-fired system event triggers will raise an "
"exception in a future version of Twisted.",
category=DeprecationWarning,
stacklevel=3)
else:
self.removeTrigger_BASE(handle)
def fireEvent(self):
"""
Call the triggers added to this event.
"""
self.state = 'BEFORE'
self.finishedBefore = []
beforeResults = []
while self.before:
callable, args, kwargs = self.before.pop(0)
self.finishedBefore.append((callable, args, kwargs))
try:
result = callable(*args, **kwargs)
except:
log.err()
else:
if isinstance(result, Deferred):
beforeResults.append(result)
DeferredList(beforeResults).addCallback(self._continueFiring)
def _continueFiring(self, ignored):
"""
Call the during and after phase triggers for this event.
"""
self.state = 'BASE'
self.finishedBefore = []
for phase in self.during, self.after:
while phase:
callable, args, kwargs = phase.pop(0)
try:
callable(*args, **kwargs)
except:
log.err()
class ReactorBase(object):
"""
Default base class for Reactors.
@type _stopped: C{bool}
@ivar _stopped: A flag which is true between paired calls to C{reactor.run}
and C{reactor.stop}. This should be replaced with an explicit state
machine.
@type _justStopped: C{bool}
@ivar _justStopped: A flag which is true between the time C{reactor.stop}
is called and the time the shutdown system event is fired. This is
used to determine whether that event should be fired after each
iteration through the mainloop. This should be replaced with an
explicit state machine.
@type _started: C{bool}
@ivar _started: A flag which is true from the time C{reactor.run} is called
until the time C{reactor.run} returns. This is used to prevent calls
to C{reactor.run} on a running reactor. This should be replaced with
an explicit state machine.
@ivar running: See L{IReactorCore.running}
"""
implements(IReactorCore, IReactorTime, IReactorPluggableResolver)
_stopped = True
installed = False
usingThreads = False
resolver = BlockingResolver()
__name__ = "twisted.internet.reactor"
def __init__(self):
self.threadCallQueue = []
self._eventTriggers = {}
self._pendingTimedCalls = []
self._newTimedCalls = []
self._cancellations = 0
self.running = False
self._started = False
self._justStopped = False
self._startedBefore = False
# reactor internal readers, e.g. the waker.
self._internalReaders = set()
self.waker = None
# Arrange for the running attribute to change to True at the right time
# and let a subclass possibly do other things at that time (eg install
# signal handlers).
self.addSystemEventTrigger(
'during', 'startup', self._reallyStartRunning)
self.addSystemEventTrigger('during', 'shutdown', self.crash)
self.addSystemEventTrigger('during', 'shutdown', self.disconnectAll)
if platform.supportsThreads():
self._initThreads()
self.installWaker()
# override in subclasses
_lock = None
def installWaker(self):
raise NotImplementedError(
reflect.qual(self.__class__) + " did not implement installWaker")
def installResolver(self, resolver):
assert IResolverSimple.providedBy(resolver)
oldResolver = self.resolver
self.resolver = resolver
return oldResolver
def wakeUp(self):
"""
Wake up the event loop.
"""
if self.waker:
self.waker.wakeUp()
# if the waker isn't installed, the reactor isn't running, and
# therefore doesn't need to be woken up
def doIteration(self, delay):
"""
Do one iteration over the readers and writers which have been added.
"""
raise NotImplementedError(
reflect.qual(self.__class__) + " did not implement doIteration")
def addReader(self, reader):
raise NotImplementedError(
reflect.qual(self.__class__) + " did not implement addReader")
def addWriter(self, writer):
raise NotImplementedError(
reflect.qual(self.__class__) + " did not implement addWriter")
def removeReader(self, reader):
raise NotImplementedError(
reflect.qual(self.__class__) + " did not implement removeReader")
def removeWriter(self, writer):
raise NotImplementedError(
reflect.qual(self.__class__) + " did not implement removeWriter")
def removeAll(self):
raise NotImplementedError(
reflect.qual(self.__class__) + " did not implement removeAll")
def getReaders(self):
raise NotImplementedError(
reflect.qual(self.__class__) + " did not implement getReaders")
def getWriters(self):
raise NotImplementedError(
reflect.qual(self.__class__) + " did not implement getWriters")
def resolve(self, name, timeout = (1, 3, 11, 45)):
"""Return a Deferred that will resolve a hostname.
"""
if not name:
# XXX - This is *less than* '::', and will screw up IPv6 servers
return defer.succeed('0.0.0.0')
if abstract.isIPAddress(name):
return defer.succeed(name)
return self.resolver.getHostByName(name, timeout)
# Installation.
# IReactorCore
def stop(self):
"""
See twisted.internet.interfaces.IReactorCore.stop.
"""
if self._stopped:
raise error.ReactorNotRunning(
"Can't stop reactor that isn't running.")
self._stopped = True
self._justStopped = True
self._startedBefore = True
def crash(self):
"""
See twisted.internet.interfaces.IReactorCore.crash.
Reset reactor state tracking attributes and re-initialize certain
state-transition helpers which were set up in C{__init__} but later
destroyed (through use).
"""
self._started = False
self.running = False
self.addSystemEventTrigger(
'during', 'startup', self._reallyStartRunning)
def sigInt(self, *args):
"""Handle a SIGINT interrupt.
"""
log.msg("Received SIGINT, shutting down.")
self.callFromThread(self.stop)
def sigBreak(self, *args):
"""Handle a SIGBREAK interrupt.
"""
log.msg("Received SIGBREAK, shutting down.")
self.callFromThread(self.stop)
def sigTerm(self, *args):
"""Handle a SIGTERM interrupt.
"""
log.msg("Received SIGTERM, shutting down.")
self.callFromThread(self.stop)
def disconnectAll(self):
"""Disconnect every reader, and writer in the system.
"""
selectables = self.removeAll()
for reader in selectables:
log.callWithLogger(reader,
reader.connectionLost,
failure.Failure(main.CONNECTION_LOST))
def iterate(self, delay=0):
"""See twisted.internet.interfaces.IReactorCore.iterate.
"""
self.runUntilCurrent()
self.doIteration(delay)
def fireSystemEvent(self, eventType):
"""See twisted.internet.interfaces.IReactorCore.fireSystemEvent.
"""
event = self._eventTriggers.get(eventType)
if event is not None:
event.fireEvent()
def addSystemEventTrigger(self, _phase, _eventType, _f, *args, **kw):
"""See twisted.internet.interfaces.IReactorCore.addSystemEventTrigger.
"""
assert callable(_f), "%s is not callable" % _f
if _eventType not in self._eventTriggers:
self._eventTriggers[_eventType] = _ThreePhaseEvent()
return (_eventType, self._eventTriggers[_eventType].addTrigger(
_phase, _f, *args, **kw))
def removeSystemEventTrigger(self, triggerID):
"""See twisted.internet.interfaces.IReactorCore.removeSystemEventTrigger.
"""
eventType, handle = triggerID
self._eventTriggers[eventType].removeTrigger(handle)
def callWhenRunning(self, _callable, *args, **kw):
"""See twisted.internet.interfaces.IReactorCore.callWhenRunning.
"""
if self.running:
_callable(*args, **kw)
else:
return self.addSystemEventTrigger('after', 'startup',
_callable, *args, **kw)
def startRunning(self):
"""
Method called when reactor starts: do some initialization and fire
startup events.
Don't call this directly, call reactor.run() instead: it should take
care of calling this.
This method is somewhat misnamed. The reactor will not necessarily be
in the running state by the time this method returns. The only
guarantee is that it will be on its way to the running state.
"""
if self._started:
raise error.ReactorAlreadyRunning()
if self._startedBefore:
raise error.ReactorNotRestartable()
self._started = True
self._stopped = False
threadable.registerAsIOThread()
self.fireSystemEvent('startup')
def _reallyStartRunning(self):
"""
Method called to transition to the running state. This should happen
in the I{during startup} event trigger phase.
"""
self.running = True
# IReactorTime
seconds = staticmethod(runtimeSeconds)
def callLater(self, _seconds, _f, *args, **kw):
"""See twisted.internet.interfaces.IReactorTime.callLater.
"""
assert callable(_f), "%s is not callable" % _f
assert sys.maxint >= _seconds >= 0, \
"%s is not greater than or equal to 0 seconds" % (_seconds,)
tple = DelayedCall(self.seconds() + _seconds, _f, args, kw,
self._cancelCallLater,
self._moveCallLaterSooner,
seconds=self.seconds)
self._newTimedCalls.append(tple)
return tple
def _moveCallLaterSooner(self, tple):
# Linear time find: slow.
heap = self._pendingTimedCalls
try:
pos = heap.index(tple)
# Move elt up the heap until it rests at the right place.
elt = heap[pos]
while pos != 0:
parent = (pos-1) // 2
if heap[parent] <= elt:
break
# move parent down
heap[pos] = heap[parent]
pos = parent
heap[pos] = elt
except ValueError:
# element was not found in heap - oh well...
pass
def _cancelCallLater(self, tple):
self._cancellations+=1
def getDelayedCalls(self):
"""Return all the outstanding delayed calls in the system.
They are returned in no particular order.
This method is not efficient -- it is really only meant for
test cases."""
return [x for x in (self._pendingTimedCalls + self._newTimedCalls) if not x.cancelled]
def _insertNewDelayedCalls(self):
for call in self._newTimedCalls:
if call.cancelled:
self._cancellations-=1
else:
call.activate_delay()
heappush(self._pendingTimedCalls, call)
self._newTimedCalls = []
def timeout(self):
# insert new delayed calls to make sure to include them in timeout value
self._insertNewDelayedCalls()
if not self._pendingTimedCalls:
return None
return max(0, self._pendingTimedCalls[0].time - self.seconds())
def runUntilCurrent(self):
"""Run all pending timed calls.
"""
if self.threadCallQueue:
# Keep track of how many calls we actually make, as we're
# making them, in case another call is added to the queue
# while we're in this loop.
count = 0
total = len(self.threadCallQueue)
for (f, a, kw) in self.threadCallQueue:
try:
f(*a, **kw)
except:
log.err()
count += 1
if count == total:
break
del self.threadCallQueue[:count]
if self.threadCallQueue:
self.wakeUp()
# insert new delayed calls now
self._insertNewDelayedCalls()
now = self.seconds()
while self._pendingTimedCalls and (self._pendingTimedCalls[0].time <= now):
call = heappop(self._pendingTimedCalls)
if call.cancelled:
self._cancellations-=1
continue
if call.delayed_time > 0:
call.activate_delay()
heappush(self._pendingTimedCalls, call)
continue
try:
call.called = 1
call.func(*call.args, **call.kw)
except:
log.deferr()
if hasattr(call, "creator"):
e = "\n"
e += " C: previous exception occurred in " + \
"a DelayedCall created here:\n"
e += " C:"
e += "".join(call.creator).rstrip().replace("\n","\n C:")
e += "\n"
log.msg(e)
if (self._cancellations > 50 and
self._cancellations > len(self._pendingTimedCalls) >> 1):
self._cancellations = 0
self._pendingTimedCalls = [x for x in self._pendingTimedCalls
if not x.cancelled]
heapify(self._pendingTimedCalls)
if self._justStopped:
self._justStopped = False
self.fireSystemEvent("shutdown")
# IReactorProcess
def _checkProcessArgs(self, args, env):
"""
Check for valid arguments and environment to spawnProcess.
@return: A two element tuple giving values to use when creating the
process. The first element of the tuple is a C{list} of C{str}
giving the values for argv of the child process. The second element
of the tuple is either C{None} if C{env} was C{None} or a C{dict}
mapping C{str} environment keys to C{str} environment values.
"""
# Any unicode string which Python would successfully implicitly
# encode to a byte string would have worked before these explicit
# checks were added. Anything which would have failed with a
# UnicodeEncodeError during that implicit encoding step would have
# raised an exception in the child process and that would have been
# a pain in the butt to debug.
#
# So, we will explicitly attempt the same encoding which Python
# would implicitly do later. If it fails, we will report an error
# without ever spawning a child process. If it succeeds, we'll save
# the result so that Python doesn't need to do it implicitly later.
#
# For any unicode which we can actually encode, we'll also issue a
# deprecation warning, because no one should be passing unicode here
# anyway.
#
# -exarkun
defaultEncoding = sys.getdefaultencoding()
# Common check function
def argChecker(arg):
"""
Return either a str or None. If the given value is not
allowable for some reason, None is returned. Otherwise, a
possibly different object which should be used in place of arg
is returned. This forces unicode encoding to happen now, rather
than implicitly later.
"""
if isinstance(arg, unicode):
try:
arg = arg.encode(defaultEncoding)
except UnicodeEncodeError:
return None
warnings.warn(
"Argument strings and environment keys/values passed to "
"reactor.spawnProcess should be str, not unicode.",
category=DeprecationWarning,
stacklevel=4)
if isinstance(arg, str) and '\0' not in arg:
return arg
return None
# Make a few tests to check input validity
if not isinstance(args, (tuple, list)):
raise TypeError("Arguments must be a tuple or list")
outputArgs = []
for arg in args:
arg = argChecker(arg)
if arg is None:
raise TypeError("Arguments contain a non-string value")
else:
outputArgs.append(arg)
outputEnv = None
if env is not None:
outputEnv = {}
for key, val in env.iteritems():
key = argChecker(key)
if key is None:
raise TypeError("Environment contains a non-string key")
val = argChecker(val)
if val is None:
raise TypeError("Environment contains a non-string value")
outputEnv[key] = val
return outputArgs, outputEnv
# IReactorThreads
if platform.supportsThreads():
threadpool = None
# ID of the trigger starting the threadpool
_threadpoolStartupID = None
# ID of the trigger stopping the threadpool
threadpoolShutdownID = None
def _initThreads(self):
self.usingThreads = True
self.resolver = ThreadedResolver(self)
def callFromThread(self, f, *args, **kw):
"""
See L{twisted.internet.interfaces.IReactorThreads.callFromThread}.
"""
assert callable(f), "%s is not callable" % (f,)
# lists are thread-safe in CPython, but not in Jython
# this is probably a bug in Jython, but until fixed this code
# won't work in Jython.
self.threadCallQueue.append((f, args, kw))
self.wakeUp()
def _initThreadPool(self):
"""
Create the threadpool accessible with callFromThread.
"""
from twisted.python import threadpool
self.threadpool = threadpool.ThreadPool(
0, 10, 'twisted.internet.reactor')
self._threadpoolStartupID = self.callWhenRunning(
self.threadpool.start)
self.threadpoolShutdownID = self.addSystemEventTrigger(
'during', 'shutdown', self._stopThreadPool)
def _uninstallHandler(self):
pass
def _stopThreadPool(self):
"""
Stop the reactor threadpool. This method is only valid if there
is currently a threadpool (created by L{_initThreadPool}). It
is not intended to be called directly; instead, it will be
called by a shutdown trigger created in L{_initThreadPool}.
"""
triggers = [self._threadpoolStartupID, self.threadpoolShutdownID]
for trigger in filter(None, triggers):
try:
self.removeSystemEventTrigger(trigger)
except ValueError:
pass
self._threadpoolStartupID = None
self.threadpoolShutdownID = None
self.threadpool.stop()
self.threadpool = None
def getThreadPool(self):
"""
See L{twisted.internet.interfaces.IReactorThreads.getThreadPool}.
"""
if self.threadpool is None:
self._initThreadPool()
return self.threadpool
def callInThread(self, _callable, *args, **kwargs):
"""
See L{twisted.internet.interfaces.IReactorThreads.callInThread}.
"""
self.getThreadPool().callInThread(_callable, *args, **kwargs)
def suggestThreadPoolSize(self, size):
"""
See L{twisted.internet.interfaces.IReactorThreads.suggestThreadPoolSize}.
"""
self.getThreadPool().adjustPoolsize(maxthreads=size)
else:
# This is for signal handlers.
def callFromThread(self, f, *args, **kw):
assert callable(f), "%s is not callable" % (f,)
# See comment in the other callFromThread implementation.
self.threadCallQueue.append((f, args, kw))
if platform.supportsThreads():
classImplements(ReactorBase, IReactorThreads)
class BaseConnector(styles.Ephemeral):
"""Basic implementation of connector.
State can be: "connecting", "connected", "disconnected"
"""
implements(IConnector)
timeoutID = None
factoryStarted = 0
def __init__(self, factory, timeout, reactor):
self.state = "disconnected"
self.reactor = reactor
self.factory = factory
self.timeout = timeout
def disconnect(self):
"""Disconnect whatever our state is."""
if self.state == 'connecting':
self.stopConnecting()
elif self.state == 'connected':
self.transport.loseConnection()
def connect(self):
"""Start connection to remote server."""
if self.state != "disconnected":
raise RuntimeError, "can't connect in this state"
self.state = "connecting"
if not self.factoryStarted:
self.factory.doStart()
self.factoryStarted = 1
self.transport = transport = self._makeTransport()
if self.timeout is not None:
self.timeoutID = self.reactor.callLater(self.timeout, transport.failIfNotConnected, error.TimeoutError())
self.factory.startedConnecting(self)
def stopConnecting(self):
"""Stop attempting to connect."""
if self.state != "connecting":
raise error.NotConnectingError, "we're not trying to connect"
self.state = "disconnected"
self.transport.failIfNotConnected(error.UserError())
del self.transport
def cancelTimeout(self):
if self.timeoutID is not None:
try:
self.timeoutID.cancel()
except ValueError:
pass
del self.timeoutID
def buildProtocol(self, addr):
self.state = "connected"
self.cancelTimeout()
return self.factory.buildProtocol(addr)
def connectionFailed(self, reason):
self.cancelTimeout()
self.transport = None
self.state = "disconnected"
self.factory.clientConnectionFailed(self, reason)
if self.state == "disconnected":
# factory hasn't called our connect() method
self.factory.doStop()
self.factoryStarted = 0
def connectionLost(self, reason):
self.state = "disconnected"
self.factory.clientConnectionLost(self, reason)
if self.state == "disconnected":
# factory hasn't called our connect() method
self.factory.doStop()
self.factoryStarted = 0
def getDestination(self):
raise NotImplementedError(
reflect.qual(self.__class__) + " did not implement "
"getDestination")
class BasePort(abstract.FileDescriptor):
"""Basic implementation of a ListeningPort.
Note: This does not actually implement IListeningPort.
"""
addressFamily = None
socketType = None
def createInternetSocket(self):
s = socket.socket(self.addressFamily, self.socketType)
s.setblocking(0)
fdesc._setCloseOnExec(s.fileno())
return s
def doWrite(self):
"""Raises a RuntimeError"""
raise RuntimeError, "doWrite called on a %s" % reflect.qual(self.__class__)
class _SignalReactorMixin(object):
"""
Private mixin to manage signals: it installs signal handlers at start time,
and define run method.
It can only be used mixed in with L{ReactorBase}, and has to be defined
first in the inheritance (so that method resolution order finds
startRunning first).
@type _installSignalHandlers: C{bool}
@ivar _installSignalHandlers: A flag which indicates whether any signal
handlers will be installed during startup. This includes handlers for
SIGCHLD to monitor child processes, and SIGINT, SIGTERM, and SIGBREAK
to stop the reactor.
"""
_installSignalHandlers = False
def _handleSignals(self):
"""
Install the signal handlers for the Twisted event loop.
"""
try:
import signal
except ImportError:
log.msg("Warning: signal module unavailable -- "
"not installing signal handlers.")
return
if signal.getsignal(signal.SIGINT) == signal.default_int_handler:
# only handle if there isn't already a handler, e.g. for Pdb.
signal.signal(signal.SIGINT, self.sigInt)
signal.signal(signal.SIGTERM, self.sigTerm)
# Catch Ctrl-Break in windows
if hasattr(signal, "SIGBREAK"):
signal.signal(signal.SIGBREAK, self.sigBreak)
def startRunning(self, installSignalHandlers=True):
"""
Extend the base implementation in order to remember whether signal
handlers should be installed later.
@type installSignalHandlers: C{bool}
@param installSignalHandlers: A flag which, if set, indicates that
handlers for a number of (implementation-defined) signals should be
installed during startup.
"""
self._installSignalHandlers = installSignalHandlers
ReactorBase.startRunning(self)
def _reallyStartRunning(self):
"""
Extend the base implementation by also installing signal handlers, if
C{self._installSignalHandlers} is true.
"""
ReactorBase._reallyStartRunning(self)
if self._installSignalHandlers:
# Make sure this happens before after-startup events, since the
# expectation of after-startup is that the reactor is fully
# initialized. Don't do it right away for historical reasons
# (perhaps some before-startup triggers don't want there to be a
# custom SIGCHLD handler so that they can run child processes with
# some blocking api).
self._handleSignals()
def run(self, installSignalHandlers=True):
self.startRunning(installSignalHandlers=installSignalHandlers)
self.mainLoop()
def mainLoop(self):
while self._started:
try:
while self._started:
# Advance simulation time in delayed event
# processors.
self.runUntilCurrent()
t2 = self.timeout()
t = self.running and t2
self.doIteration(t)
except:
log.msg("Unexpected error in main loop.")
log.err()
else:
log.msg('Main loop terminated.')
__all__ = []
| gpl-2.0 | 7,070,139,759,940,670,000 | 33.879966 | 117 | 0.59683 | false |
ryansnowboarder/zulip | zerver/lib/test_runner.py | 2 | 3482 | from __future__ import print_function
from django.test.runner import DiscoverRunner
from zerver.lib.cache import bounce_key_prefix_for_testing
from zerver.views.messages import get_sqlalchemy_connection
import os
import time
import traceback
import unittest
def slow(expected_run_time, slowness_reason):
'''
This is a decorate that annotates a test as being "known
to be slow." The decorator will set expected_run_time and slowness_reason
as atributes of the function. Other code can use this annotation
as needed, e.g. to exclude these tests in "fast" mode.
'''
def decorator(f):
f.expected_run_time = expected_run_time
f.slowness_reason = slowness_reason
return f
return decorator
def is_known_slow_test(test_method):
return hasattr(test_method, 'slowness_reason')
def full_test_name(test):
test_module = test.__module__
test_class = test.__class__.__name__
test_method = test._testMethodName
return '%s.%s.%s' % (test_module, test_class, test_method)
def get_test_method(test):
return getattr(test, test._testMethodName)
def enforce_timely_test_completion(test_method, test_name, delay):
if hasattr(test_method, 'expected_run_time'):
# Allow for tests to run 50% slower than normal due
# to random variations.
max_delay = 1.5 * test_method.expected_run_time
else:
max_delay = 0.180 # seconds
# Further adjustments for slow laptops:
max_delay = max_delay * 3
if delay > max_delay:
print('Test is TOO slow: %s (%.3f s)' % (test_name, delay))
def fast_tests_only():
return "FAST_TESTS_ONLY" in os.environ
def run_test(test):
failed = False
test_method = get_test_method(test)
if fast_tests_only() and is_known_slow_test(test_method):
return failed
test_name = full_test_name(test)
bounce_key_prefix_for_testing(test_name)
print('Running', test_name)
if not hasattr(test, "_pre_setup"):
print("somehow the test doesn't have _pre_setup; it may be an import fail.")
print("Here's a debugger. Good luck!")
import pdb; pdb.set_trace()
test._pre_setup()
start_time = time.time()
test.setUp()
try:
test_method()
except unittest.SkipTest:
pass
except Exception:
failed = True
traceback.print_exc()
test.tearDown()
delay = time.time() - start_time
enforce_timely_test_completion(test_method, test_name, delay)
test._post_teardown()
return failed
class Runner(DiscoverRunner):
def __init__(self, *args, **kwargs):
DiscoverRunner.__init__(self, *args, **kwargs)
def run_suite(self, suite, fatal_errors=None):
failed = False
for test in suite:
if run_test(test):
failed = True
if fatal_errors:
return failed
return failed
def run_tests(self, test_labels, extra_tests=None, **kwargs):
self.setup_test_environment()
suite = self.build_suite(test_labels, extra_tests)
# We have to do the next line to avoid flaky scenarios where we
# run a single test and getting an SA connection causes data from
# a Django connection to be rolled back mid-test.
get_sqlalchemy_connection()
failed = self.run_suite(suite, fatal_errors=kwargs.get('fatal_errors'))
self.teardown_test_environment()
return failed
print()
| apache-2.0 | 9,037,814,933,979,107,000 | 29.278261 | 84 | 0.645032 | false |
rvalyi/OpenUpgrade | openerp/addons/base/tests/test_misc.py | 393 | 1111 | import unittest2
from openerp.tools import misc
class test_countingstream(unittest2.TestCase):
def test_empty_stream(self):
s = misc.CountingStream(iter([]))
self.assertEqual(s.index, -1)
self.assertIsNone(next(s, None))
self.assertEqual(s.index, 0)
def test_single(self):
s = misc.CountingStream(xrange(1))
self.assertEqual(s.index, -1)
self.assertEqual(next(s, None), 0)
self.assertIsNone(next(s, None))
self.assertEqual(s.index, 1)
def test_full(self):
s = misc.CountingStream(xrange(42))
for _ in s:
pass
self.assertEqual(s.index, 42)
def test_repeated(self):
""" Once the CountingStream has stopped iterating, the index should not
increase anymore (the internal state should not be allowed to change)
"""
s = misc.CountingStream(iter([]))
self.assertIsNone(next(s, None))
self.assertEqual(s.index, 0)
self.assertIsNone(next(s, None))
self.assertEqual(s.index, 0)
if __name__ == '__main__':
unittest2.main()
| agpl-3.0 | -1,779,371,649,994,160,000 | 29.027027 | 79 | 0.612061 | false |
caveman-dick/ansible | lib/ansible/modules/packaging/os/dpkg_selections.py | 29 | 2142 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: dpkg_selections
short_description: Dpkg package selection selections
description:
- Change dpkg package selection state via --get-selections and --set-selections.
version_added: "2.0"
author: Brian Brazil <[email protected]>
options:
name:
description:
- Name of the package
required: true
selection:
description:
- The selection state to set the package to.
choices: [ 'install', 'hold', 'deinstall', 'purge' ]
required: true
notes:
- This module won't cause any packages to be installed/removed/purged, use the C(apt) module for that.
'''
EXAMPLES = '''
# Prevent python from being upgraded.
- dpkg_selections:
name: python
selection: hold
'''
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
selection=dict(choices=['install', 'hold', 'deinstall', 'purge'])
),
supports_check_mode=True,
)
dpkg = module.get_bin_path('dpkg', True)
name = module.params['name']
selection = module.params['selection']
# Get current settings.
rc, out, err = module.run_command([dpkg, '--get-selections', name], check_rc=True)
if not out:
current = 'not present'
else:
current = out.split()[1]
changed = current != selection
if module.check_mode or not changed:
module.exit_json(changed=changed, before=current, after=selection)
module.run_command([dpkg, '--set-selections'], data="%s %s" % (name, selection), check_rc=True)
module.exit_json(changed=changed, before=current, after=selection)
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 | -8,024,273,377,798,781,000 | 26.818182 | 106 | 0.630719 | false |
adamkh/Arduino | arduino-core/src/processing/app/i18n/python/requests/packages/urllib3/__init__.py | 309 | 1692 | # urllib3/__init__.py
# Copyright 2008-2012 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
urllib3 - Thread-safe connection pooling and re-using.
"""
__author__ = 'Andrey Petrov ([email protected])'
__license__ = 'MIT'
__version__ = 'dev'
from .connectionpool import (
HTTPConnectionPool,
HTTPSConnectionPool,
connection_from_url
)
from . import exceptions
from .filepost import encode_multipart_formdata
from .poolmanager import PoolManager, ProxyManager, proxy_from_url
from .response import HTTPResponse
from .util import make_headers, get_host
# Set default logging handler to avoid "No handler found" warnings.
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
def add_stderr_logger(level=logging.DEBUG):
"""
Helper for quickly adding a StreamHandler to the logger. Useful for
debugging.
Returns the handler after adding it.
"""
# This method needs to be in this __init__.py to get the __name__ correct
# even if urllib3 is vendored within another package.
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
logger.addHandler(handler)
logger.setLevel(level)
logger.debug('Added an stderr logging handler to logger: %s' % __name__)
return handler
# ... Clean up.
del NullHandler
| lgpl-2.1 | 3,474,151,781,714,833,400 | 28.172414 | 84 | 0.71513 | false |
kant/inasafe | safe/metadata/test/test_hazard_metadata.py | 8 | 1391 | # coding=utf-8
"""
InaSAFE Disaster risk assessment tool developed by AusAid -
**Exception Classes.**
Custom exception classes for the IS application.
Contact : [email protected]
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = '[email protected]'
__revision__ = '$Format:%H$'
__date__ = '12/10/2014'
__copyright__ = ('Copyright 2012, Australia Indonesia Facility for '
'Disaster Reduction')
from unittest import TestCase
from safe.common.utilities import unique_filename
from safe.metadata import HazardLayerMetadata
class TestHazardMetadata(TestCase):
def test_standard_properties(self):
metadata = HazardLayerMetadata(unique_filename())
with self.assertRaises(KeyError):
metadata.get_property('non_existing_key')
# from BaseMetadata
metadata.get_property('email')
# from HazardLayerMetadata
metadata.get_property('hazard')
metadata.get_property('hazard_category')
metadata.get_property('continuous_hazard_unit')
metadata.get_property('vector_hazard_classification')
metadata.get_property('raster_hazard_classification')
| gpl-3.0 | -8,358,617,968,047,260,000 | 30.613636 | 78 | 0.705248 | false |
Moriadry/tensorflow | tensorflow/contrib/metrics/python/metrics/classification_test.py | 125 | 4623 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for metrics.classification."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.metrics.python.metrics import classification
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class ClassificationTest(test.TestCase):
def testAccuracy1D(self):
with self.test_session() as session:
pred = array_ops.placeholder(dtypes.int32, shape=[None])
labels = array_ops.placeholder(dtypes.int32, shape=[None])
acc = classification.accuracy(pred, labels)
result = session.run(acc,
feed_dict={pred: [1, 0, 1, 0],
labels: [1, 1, 0, 0]})
self.assertEqual(result, 0.5)
def testAccuracy1DBool(self):
with self.test_session() as session:
pred = array_ops.placeholder(dtypes.bool, shape=[None])
labels = array_ops.placeholder(dtypes.bool, shape=[None])
acc = classification.accuracy(pred, labels)
result = session.run(acc,
feed_dict={pred: [1, 0, 1, 0],
labels: [1, 1, 0, 0]})
self.assertEqual(result, 0.5)
def testAccuracy1DInt64(self):
with self.test_session() as session:
pred = array_ops.placeholder(dtypes.int64, shape=[None])
labels = array_ops.placeholder(dtypes.int64, shape=[None])
acc = classification.accuracy(pred, labels)
result = session.run(acc,
feed_dict={pred: [1, 0, 1, 0],
labels: [1, 1, 0, 0]})
self.assertEqual(result, 0.5)
def testAccuracy1DString(self):
with self.test_session() as session:
pred = array_ops.placeholder(dtypes.string, shape=[None])
labels = array_ops.placeholder(dtypes.string, shape=[None])
acc = classification.accuracy(pred, labels)
result = session.run(
acc,
feed_dict={pred: ['a', 'b', 'a', 'c'],
labels: ['a', 'c', 'b', 'c']})
self.assertEqual(result, 0.5)
def testAccuracyDtypeMismatch(self):
with self.assertRaises(ValueError):
pred = array_ops.placeholder(dtypes.int32, shape=[None])
labels = array_ops.placeholder(dtypes.int64, shape=[None])
classification.accuracy(pred, labels)
def testAccuracyFloatLabels(self):
with self.assertRaises(ValueError):
pred = array_ops.placeholder(dtypes.int32, shape=[None])
labels = array_ops.placeholder(dtypes.float32, shape=[None])
classification.accuracy(pred, labels)
def testAccuracy1DWeighted(self):
with self.test_session() as session:
pred = array_ops.placeholder(dtypes.int32, shape=[None])
labels = array_ops.placeholder(dtypes.int32, shape=[None])
weights = array_ops.placeholder(dtypes.float32, shape=[None])
acc = classification.accuracy(pred, labels)
result = session.run(acc,
feed_dict={
pred: [1, 0, 1, 1],
labels: [1, 1, 0, 1],
weights: [3.0, 1.0, 2.0, 0.0]
})
self.assertEqual(result, 0.5)
def testAccuracy1DWeightedBroadcast(self):
with self.test_session() as session:
pred = array_ops.placeholder(dtypes.int32, shape=[None])
labels = array_ops.placeholder(dtypes.int32, shape=[None])
weights = array_ops.placeholder(dtypes.float32, shape=[])
acc = classification.accuracy(pred, labels)
result = session.run(acc,
feed_dict={
pred: [1, 0, 1, 0],
labels: [1, 1, 0, 0],
weights: 3.0,
})
self.assertEqual(result, 0.5)
if __name__ == '__main__':
test.main()
| apache-2.0 | -700,139,666,390,057,700 | 40.276786 | 80 | 0.598745 | false |
Distrotech/clamav | contrib/phishing/regex_opt.py | 14 | 1170 | #!/usr/bin/env python
def strlen(a,b):
if len(a)<len(b):
return -1;
elif len(a)>len(b):
return 1;
else:
return 0;
def getcommon_prefix(a,b):
if a==b:
return b;
if a[:-1]==b[:-1]:
return a[:-1];
else:
return ""
fil = file("iana_tld.h")
left = fil.read().split("(")
out=[]
for i in range(1,len(left)):
right = left[i].split(")")
regex_split = right[0].split("|")
regex_split.sort()
regex_split.sort(strlen)
prefix=''
prefixlen=0;
c_map=''
list=[]
for val in regex_split:
if val[:prefixlen] == prefix:
if len(val) == (prefixlen+1):
c_map = c_map+val[prefixlen]
else:
if len(c_map)>1:
c_map = "["+c_map+"]"
if len(prefix+c_map)>0:
list.append(prefix+c_map)
prefix = val[:-1]
prefixlen=len(prefix)
c_map=val[prefixlen]
else:
if len(c_map)>1:
c_map = "["+c_map+"]"
list.append(prefix+c_map)
prefix = getcommon_prefix(prefix,val)
if len(prefix)==0:
prefix=val[:-1]
prefixlen=len(prefix)
c_map=val[prefixlen]
if i==1:
left0=left[0]
else:
left0=""
out.append(left0)
out.append("(")
out.append("|".join(list))
out.append(")")
out.append(right[1])
print "".join(out)
| gpl-2.0 | 4,974,141,919,124,099,000 | 18.180328 | 41 | 0.581197 | false |
shakamunyi/tensorflow | tensorflow/python/ops/tensor_array_grad.py | 71 | 9083 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradients for operators defined in tensor_array_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import tensor_array_ops
# TODO(b/31222613): These ops may be differentiable, and there may be
# latent bugs here.
ops.NotDifferentiable("TensorArray")
ops.NotDifferentiable("TensorArrayGrad")
ops.NotDifferentiable("TensorArraySize")
ops.NotDifferentiable("TensorArrayClose")
ops.NotDifferentiable("TensorArrayV2")
ops.NotDifferentiable("TensorArrayGradV2")
ops.NotDifferentiable("TensorArraySizeV2")
ops.NotDifferentiable("TensorArrayCloseV2")
ops.NotDifferentiable("TensorArrayV3")
ops.NotDifferentiable("TensorArrayGradV3")
ops.NotDifferentiable("TensorArraySizeV3")
ops.NotDifferentiable("TensorArrayCloseV3")
def _GetGradSource(op_or_tensor):
"""Identify which call to tf.gradients created this gradient op or tensor.
TensorArray gradient calls use an accumulator TensorArray object. If
multiple gradients are calculated and run in the same session, the multiple
gradient nodes may accidentally flow throuth the same accumulator TensorArray.
This double counting breaks the TensorArray gradient flow.
The solution is to identify which gradient call this particular
TensorArray*Grad is being called in, by looking at the input gradient
tensor's name, and create or lookup an accumulator gradient TensorArray
associated with this specific call. This solves any confusion and ensures
different gradients from the same forward graph get their own accumulators.
This function creates the unique label associated with the tf.gradients call
that is used to create the gradient TensorArray.
Args:
op_or_tensor: `Tensor` or `Operation` which is an input to a
TensorArray*Grad call.
Returns:
A python string, the unique label associated with this particular
gradients calculation.
Raises:
ValueError: If not called within a gradients calculation.
"""
name_tokens = op_or_tensor.name.split("/")
grad_pos = [i for i, x in enumerate(name_tokens) if x.startswith("gradients")]
if not grad_pos:
raise ValueError(
"Expected op/tensor name to start with gradients (excluding scope)"
", got: %s" % op_or_tensor.name)
return "/".join(name_tokens[:grad_pos[-1] + 1])
@ops.RegisterGradient("TensorArrayRead")
@ops.RegisterGradient("TensorArrayReadV2")
@ops.RegisterGradient("TensorArrayReadV3")
def _TensorArrayReadGrad(op, grad):
"""Gradient for TensorArrayRead.
Args:
op: Forward TensorArrayRead op.
grad: Gradient `Tensor` to TensorArrayRead.
Returns:
A flow `Tensor`, which can be used in control dependencies to
force the write of `grad` to the gradient `TensorArray`.
"""
# Note: the forward flow dependency in the call to grad() is necessary for
# the case of dynamic sized TensorArrays. When creating the gradient
# TensorArray, the final size of the forward array must be known.
# For this we need to wait until it has been created by depending on
# the input flow of the original op.
handle = op.inputs[0]
index = op.inputs[1]
flow = op.inputs[2]
dtype = op.get_attr("dtype")
grad_source = _GetGradSource(grad)
g = (tensor_array_ops.TensorArray(dtype=dtype, handle=handle, flow=flow,
colocate_with_first_write_call=False)
.grad(source=grad_source, flow=flow))
w_g = g.write(index, grad)
return [None, None, w_g.flow]
@ops.RegisterGradient("TensorArrayWrite")
@ops.RegisterGradient("TensorArrayWriteV2")
@ops.RegisterGradient("TensorArrayWriteV3")
def _TensorArrayWriteGrad(op, flow):
"""Gradient for TensorArrayWrite.
Args:
op: Forward TensorArrayWrite op.
flow: Gradient `Tensor` flow to TensorArrayWrite.
Returns:
A grad `Tensor`, the gradient created in an upstream ReadGrad or PackGrad.
"""
# handle is the output store_handle of TensorArrayReadGrad or
# the handle output of TensorArrayWriteGrad. we must use this one.
handle = op.inputs[0]
index = op.inputs[1]
dtype = op.get_attr("T")
grad_source = _GetGradSource(flow)
g = (tensor_array_ops.TensorArray(dtype=dtype, handle=handle, flow=flow,
colocate_with_first_write_call=False)
.grad(source=grad_source, flow=flow))
grad = g.read(index)
return [None, None, grad, flow]
@ops.RegisterGradient("TensorArrayGather")
@ops.RegisterGradient("TensorArrayGatherV2")
@ops.RegisterGradient("TensorArrayGatherV3")
def _TensorArrayGatherGrad(op, grad):
"""Gradient for TensorArrayGather.
Args:
op: Forward TensorArrayGather op.
grad: Gradient `Tensor` to TensorArrayGather.
Returns:
A flow `Tensor`, which can be used in control dependencies to
force the write of `grad` to the gradient `TensorArray`.
"""
# Note: the forward flow dependency in the call to grad() is necessary for
# the case of dynamic sized TensorArrays. When creating the gradient
# TensorArray, the final size of the forward array must be known.
# For this we need to wait until it has been created by depending on
# the input flow of the original op.
handle = op.inputs[0]
indices = op.inputs[1]
flow = op.inputs[2]
dtype = op.get_attr("dtype")
grad_source = _GetGradSource(grad)
g = (tensor_array_ops.TensorArray(dtype=dtype, handle=handle, flow=flow,
colocate_with_first_write_call=False)
.grad(source=grad_source, flow=flow))
u_g = g.scatter(indices, grad)
return [None, None, u_g.flow]
@ops.RegisterGradient("TensorArrayScatter")
@ops.RegisterGradient("TensorArrayScatterV2")
@ops.RegisterGradient("TensorArrayScatterV3")
def _TensorArrayScatterGrad(op, flow):
"""Gradient for TensorArrayScatter.
Args:
op: Forward TensorArrayScatter op.
flow: Gradient `Tensor` flow to TensorArrayScatter.
Returns:
A grad `Tensor`, the gradient created in upstream ReadGrads or PackGrad.
"""
handle = op.inputs[0]
indices = op.inputs[1]
dtype = op.get_attr("T")
grad_source = _GetGradSource(flow)
g = (tensor_array_ops.TensorArray(dtype=dtype, handle=handle, flow=flow,
colocate_with_first_write_call=False)
.grad(source=grad_source, flow=flow))
grad = g.gather(indices)
return [None, None, grad, flow]
@ops.RegisterGradient("TensorArrayConcat")
@ops.RegisterGradient("TensorArrayConcatV2")
@ops.RegisterGradient("TensorArrayConcatV3")
def _TensorArrayConcatGrad(op, grad, unused_lengths_grad):
"""Gradient for TensorArrayConcat.
Args:
op: Forward TensorArrayConcat op.
grad: Gradient `Tensor` to TensorArrayConcat.
Returns:
A flow `Tensor`, which can be used in control dependencies to
force the write of `grad` to the gradient `TensorArray`.
"""
# Note: the forward flow dependency in the call to grad() is necessary for
# the case of dynamic sized TensorArrays. When creating the gradient
# TensorArray, the final size of the forward array must be known.
# For this we need to wait until it has been created by depending on
# the input flow of the original op.
handle = op.inputs[0]
flow = op.inputs[1]
lengths = op.outputs[1]
dtype = op.get_attr("dtype")
grad_source = _GetGradSource(grad)
g = (tensor_array_ops.TensorArray(dtype=dtype, handle=handle, flow=flow,
colocate_with_first_write_call=False)
.grad(source=grad_source, flow=flow))
u_g = g.split(grad, lengths=lengths)
# handle, flow_in
return [None, u_g.flow]
@ops.RegisterGradient("TensorArraySplit")
@ops.RegisterGradient("TensorArraySplitV2")
@ops.RegisterGradient("TensorArraySplitV3")
def _TensorArraySplitGrad(op, flow):
"""Gradient for TensorArraySplit.
Args:
op: Forward TensorArraySplit op.
flow: Gradient `Tensor` flow to TensorArraySplit.
Returns:
A grad `Tensor`, the gradient created in upstream ReadGrads or PackGrad.
"""
handle = op.inputs[0]
dtype = op.get_attr("T")
grad_source = _GetGradSource(flow)
g = (tensor_array_ops.TensorArray(dtype=dtype, handle=handle, flow=flow,
colocate_with_first_write_call=False)
.grad(source=grad_source, flow=flow))
grad = g.concat()
# handle, value, lengths, flow_in
return [None, grad, None, flow]
| apache-2.0 | -3,300,407,450,813,267,000 | 36.378601 | 80 | 0.716283 | false |
latenitefilms/hammerspoon | scripts/docs/bin/build_docs.py | 2 | 29809 | #!/usr/bin/env -S -P/usr/bin:${PATH} python3
# -*- coding: utf-8 -*-
"""Hammerspoon API Documentation Builder"""
import argparse
import json
import os
import pprint
import sqlite3
import sys
import re
DEBUG = False
FAIL_ON_WARN = True
HAS_WARNED = False
LINT_MODE = False
LINTS = []
CHUNK_FILE = 0
CHUNK_LINE = 1
CHUNK_SIGN = 2
CHUNK_TYPE = 3
CHUNK_DESC = 4
TYPE_NAMES = ["Deprecated", "Command", "Constant", "Variable", "Function",
"Constructor", "Field", "Method"]
SECTION_NAMES = ["Parameters", "Returns", "Notes", "Examples"]
TYPE_DESC = {
"Constant": "Useful values which cannot be changed",
"Variable": "Configurable values",
"Function": "API calls offered directly by the extension",
"Method": "API calls which can only be made on an object returned by a constructor",
"Constructor": "API calls which return an object, typically one that offers API methods",
"Command": "External shell commands",
"Field": "Variables which can only be accessed from an object returned by a constructor",
"Deprecated": "API features which will be removed in an future release"
}
LINKS = [
{"name": "Website", "url": "https://www.hammerspoon.org/"},
{"name": "GitHub page",
"url": "https://github.com/Hammerspoon/hammerspoon"},
{"name": "Getting Started Guide",
"url": "https://www.hammerspoon.org/go/"},
{"name": "Spoon Plugin Documentation",
"url": "https://github.com/Hammerspoon/hammerspoon/blob/master/SPOONS.md"},
{"name": "Official Spoon repository",
"url": "https://www.hammerspoon.org/Spoons"},
{"name": "IRC channel",
"url": "irc://irc.libera.chat/#hammerspoon"},
{"name": "Mailing list",
"url": "https://groups.google.com/forum/#!forum/hammerspoon/"},
{"name": "LuaSkin API docs",
"url": "https://www.hammerspoon.org/docs/LuaSkin/"}
]
ARGUMENTS = None
def dbg(msg):
"""Print a debug message"""
if DEBUG:
print("DEBUG: %s" % msg)
def warn(msg):
"""Print a warning message"""
global HAS_WARNED
print("WARN: %s" % msg)
HAS_WARNED = True
def err(msg):
"""Print an error message"""
print("ERROR: %s" % msg)
sys.exit(1)
def find_code_files(path):
"""Find all of the code files under a path"""
code_files = []
for dirpath, _, files in os.walk(path):
dbg("Entering: %s" % dirpath)
for filename in files:
if filename.endswith(".m") or filename.endswith(".lua"):
dbg(" Found file: %s/%s" % (dirpath, filename))
code_files.append("%s/%s" % (dirpath, filename))
return code_files
def extract_docstrings(filename):
"""Find all of the docstrings in a file"""
docstrings = []
is_in_chunk = False
chunk = None
i = 0
with open(filename, "r") as filedata:
for raw_line in filedata.readlines():
i += 1
line = raw_line.strip('\n')
if line.startswith("----") or line.startswith("////"):
dbg("Skipping %s:%d - too many comment chars" % (filename, i))
continue
if line.startswith("---") or line.startswith("///"):
# We're in a chunk of docstrings
if not is_in_chunk:
# This is a new chunk
is_in_chunk = True
chunk = []
# Store the file and line number
chunk.append(filename)
chunk.append("%d" % i)
# Append the line to the current chunk
line = line.strip("/-")
if len(line) > 0 and line[0] == ' ':
line = line[1:]
chunk.append(line)
else:
# We hit a line that isn't a docstring. If we were previously
# processing docstrings, we just exited a chunk of docs, so
# store it and reset for the next chunk.
if is_in_chunk and chunk:
docstrings.append(chunk)
is_in_chunk = False
chunk = None
return docstrings
def find_module_for_item(modules, item):
"""Find the matching module for a given item"""
dbg("find_module_for_item: Searching for: %s" % item)
module = None
# We need a shortcut here for root level items
if not ARGUMENTS.standalone and item.count('.') == 1:
dbg("find_module_for_item: Using root-level shortcut")
module = "hs"
# Methods are very easy to shortcut
if item.count(':') == 1:
dbg("find_module_for_item: Using method shortcut")
module = item.split(':')[0]
if not module:
matches = []
for mod in modules:
if item.startswith(mod):
matches.append(mod)
matches.sort()
dbg("find_module_for_item: Found options: %s" % matches)
try:
module = matches[-1]
except IndexError:
err("Unable to find module for: %s" % item)
dbg("find_module_for_item: Found: %s" % module)
return module
def find_itemname_from_signature(signature):
"""Find the name of an item, from a full signature"""
return ''.join(re.split(r"[\(\[\s]", signature)[0])
def remove_method_from_itemname(itemname):
"""Return an itemname without any method name in it"""
return itemname.split(':')[0]
def find_basename_from_itemname(itemname):
"""Find the base name of an item, from its full name"""
# (where "base name" means the function/method/variable/etc name
splitchar = '.'
if ':' in itemname:
splitchar = ':'
return itemname.split(splitchar)[-1].split(' ')[0]
def get_section_from_chunk(chunk, sectionname):
"""Extract a named section of a chunk"""
section = []
in_section = False
for line in chunk:
if line == sectionname:
in_section = True
continue
if in_section:
if line == "":
# We've reached the end of the section
break
else:
section.append(line)
return section
def strip_sections_from_chunk(chunk):
"""Remove the Parameters/Returns/Notes/Examples sections from a chunk"""
stripped_chunk = []
in_section = False
for line in chunk:
if line[:-1] in SECTION_NAMES:
# We hit a section
in_section = True
continue
elif line == "":
# We hit the end of a section
in_section = False
continue
else:
if not in_section:
stripped_chunk.append(line)
return stripped_chunk
def process_docstrings(docstrings):
"""Process the docstrings into a proper structure"""
docs = {}
# First we'll find all of the modules and prepare the docs structure
for chunk in docstrings:
if chunk[2].startswith("==="):
# This is a module definition
modulename = chunk[CHUNK_SIGN].strip("= ")
dbg("process_docstrings: Module: %s at %s:%s" % (
modulename,
chunk[CHUNK_FILE],
chunk[CHUNK_LINE]))
docs[modulename] = {}
docs[modulename]["header"] = chunk
docs[modulename]["items"] = {}
# Now we'll get all of the item definitions
for chunk in docstrings:
if not chunk[2].startswith("==="):
# This is an item definition
itemname = find_itemname_from_signature(chunk[CHUNK_SIGN])
dbg("process_docstrings: Found item: %s at %s:%s" % (
itemname,
chunk[CHUNK_FILE],
chunk[CHUNK_LINE]))
modulename = find_module_for_item(list(docs.keys()), itemname)
dbg("process_docstrings: Assigning item to module: %s" %
modulename)
docs[modulename]["items"][itemname] = chunk
return docs
def process_module(modulename, raw_module):
"""Process the docstrings for a module"""
dbg("Processing module: %s" % modulename)
dbg("Header: %s" % raw_module["header"][CHUNK_DESC])
module = {}
module["name"] = modulename
module["type"] = "Module"
module["desc"] = raw_module["header"][CHUNK_DESC]
module["doc"] = '\n'.join(raw_module["header"][CHUNK_DESC:])
module["stripped_doc"] = '\n'.join(raw_module["header"][CHUNK_DESC + 1:])
module["submodules"] = []
module["items"] = [] # Deprecated
module["Function"] = []
module["Method"] = []
module["Constructor"] = []
module["Constant"] = []
module["Variable"] = []
module["Command"] = []
module["Field"] = []
# NOTE: I don't like having the deprecated type, I think we should revist
# this later and find another way to annotate deprecations
module["Deprecated"] = []
for itemname in raw_module["items"]:
dbg(" Processing item: %s" % itemname)
chunk = raw_module["items"][itemname]
if chunk[CHUNK_TYPE] not in TYPE_NAMES:
err("UNKNOWN TYPE: %s (%s)" % (chunk[CHUNK_TYPE],
pprint.pformat(chunk)))
basename = find_basename_from_itemname(itemname)
item = {}
item["name"] = basename
item["signature"] = chunk[CHUNK_SIGN]
item["def"] = chunk[CHUNK_SIGN] # Deprecated
item["type"] = chunk[CHUNK_TYPE]
item["desc"] = chunk[CHUNK_DESC]
item["doc"] = '\n'.join(chunk[CHUNK_DESC:])
item["file"] = chunk[CHUNK_FILE]
item["lineno"] = chunk[CHUNK_LINE]
for section in ["Parameters", "Returns", "Notes", "Examples"]:
if section + ':' in chunk:
item[section.lower()] = get_section_from_chunk(chunk,
section + ':')
item["stripped_doc"] = '\n'.join(strip_sections_from_chunk(chunk[CHUNK_DESC + 1:]))
module[item["type"]].append(item)
module["items"].append(item) # Deprecated
dbg(" %s" % pprint.pformat(item).replace('\n', "\n "))
# The rest of this code is only for functions/constructors/methods
if item["type"] not in ["Function", "Constructor", "Method"]:
continue
def is_actual_parameter(some_text):
return some_text.startswith(" * ")
try:
if item['desc'].startswith("Alias for [`"):
item["parameters"] = []
item["returns"] = []
item["notes"] = []
pass
else:
sig_without_return = item["signature"].split("->")[0]
sig_params = re.sub(r".*\((.*)\).*", r"\1", sig_without_return)
sig_param_arr = re.split(r',|\|', sig_params)
sig_arg_count = len(sig_param_arr)
# Check if there are more than a single line of description at the top of the function
params_index = chunk[CHUNK_DESC:].index("Parameters:")
desc_section = [x for x in chunk[CHUNK_DESC:][0:params_index] if x != '']
if len(desc_section) > 1:
message = "Function description should be a single line. Other content may belong in Notes: %s" % sig_without_return
warn(message)
LINTS.append({
"file": item["file"],
"line": int(item["lineno"]),
"title": "Docstring function/method/constructor description should not be multiline",
"message": message,
"annotation_level": "failure"
})
# Clean up Parameters
clean_params = []
numlines = len(item["parameters"])
try:
for i in range(0, numlines):
line = item["parameters"][i]
if line.startswith(" * "):
# This is the start of a new parameter, add it to clean_params
clean_params.append(line.rstrip())
elif line.startswith(" * ") or line.startswith(" * "):
if line.startswith(" * "):
# Sub-lists should start with two spaces in GitHub Flavoured Markdown, so add in the missing space in this item
line = " " + line
# This is a sub-parameter of the previous parameter, add it to that string in clean_params
prev_clean_line = clean_params[-1]
prev_clean_line += '\n' + line.rstrip()
clean_params[-1] = prev_clean_line
else:
# This should have been on the line before
prev_clean_line = clean_params[-1]
prev_clean_line += ' ' + line.strip()
clean_params[-1] = prev_clean_line
except:
message = "PARAMETERS FORMAT ISSUE: Unable to parse Parameters for: %s" % sig_without_return
warn(message)
LINTS.append({
"file": item["file"],
"line": int(item["lineno"]),
"title": "Docstring function/method/constructor parameter parsing error",
"message": message,
"annotation_level": "failure"
})
item["parameters"] = clean_params
# Check the number of parameters in the signature matches the number in Parameters
parameter_count = len(item["parameters"])
if parameter_count != sig_arg_count:
message = "SIGNATURE/PARAMETER COUNT MISMATCH: '%s' says %d parameters ('%s'), but Parameters section has %d entries:\n%s\n" % (sig_without_return, sig_arg_count, ','.join(sig_param_arr), parameter_count, '\n'.join(item["parameters"]))
warn(message)
LINTS.append({
"file": item["file"],
"line": int(item["lineno"]),
"title": "Docstring signature/parameter mismatch",
"message": message,
"annotation_level": "failure"
})
# Check if we have zero items for Returns.
# This is a lint error in Hammerspoon, but in Standalone (ie Spoons) we'll let it slide and assume they meant to have no returns
if "returns" not in item:
item["returns"] = []
if len(item["returns"]) == 0 and not ARGUMENTS.standalone:
message = "RETURN COUNT ERROR: '%s' does not specify a return value" % (sig_without_return)
warn(message)
LINTS.append({
"file": item["file"],
"line": int(item["lineno"]),
"title": "Docstring missing return value",
"message": message,
"annotation_level": "failure"
})
# Having validated the Returns, we will now remove any "None" ones
if len(item["returns"]) == 1 and item["returns"][0] == "* None":
item["returns"] = []
# Check if we have zero items for Notes
if "notes" not in item:
item["notes"] = []
# Check if we have zero items for Examples
if "examples" not in item:
item["examples"] = []
except:
message = "Unable to parse parameters for %s\n%s\n" % (item["signature"], sys.exc_info()[1])
warn(message)
LINTS.append({
"file": item["file"],
"line": int(item["lineno"]),
"title": "Docstring Parameters parse failure",
"message": message,
"annotation_level": "failure"
})
if FAIL_ON_WARN:
sys.exit(1)
return module
def strip_paragraph(text):
"""Strip <p> from the start of a string, and </p>\n from the end"""
text = text.replace("<p>", "")
text = text.replace("</p>\n", "")
return text
def process_markdown(data):
"""Pre-render GitHub-flavoured Markdown, and syntax-highlight code"""
import mistune
from pygments import highlight
from pygments.lexers import get_lexer_by_name
from pygments.formatters import html
class HighlightRenderer(mistune.Renderer):
def block_code(self, code, lang):
if not lang:
return '\n<pre><code>%s</code></pre>\n' % \
mistune.escape(code)
lexer = get_lexer_by_name(lang, stripall=True)
formatter = html.HtmlFormatter()
return highlight(code, lexer, formatter)
md = mistune.Markdown(renderer=HighlightRenderer())
for i in range(0, len(data)):
module = data[i]
module["desc_gfm"] = md(module["desc"])
module["doc_gfm"] = md(module["doc"])
for item_type in TYPE_NAMES:
items = module[item_type]
for j in range(0, len(items)):
item = items[j]
dbg("Preparing template data for: %s" % item["def"])
item["def_gfm"] = strip_paragraph(md(item["def"]))
item["doc_gfm"] = md(item["doc"])
if item_type in ["Function", "Constructor", "Method"]:
item["parameters_gfm"] = md('\n'.join(item["parameters"]))
item["returns_gfm"] = md('\n'.join(item["returns"]))
item["notes_gfm"] = md('\n'.join(item["notes"]))
items[j] = item
# Now do the same for the deprecated 'items' list
for j in range(0, len(module["items"])):
item = module["items"][j]
item["def_gfm"] = strip_paragraph(md(item["def"]))
item["doc_gfm"] = md(item["doc"])
module["items"][j] = item
data[i] = module
return data
def do_processing(directories):
"""Run all processing steps for one or more directories"""
raw_docstrings = []
codefiles = []
processed_docstrings = []
module_tree = {}
for directory in directories:
codefiles += find_code_files(directory)
if len(codefiles) == 0:
err("No .m/.lua files found")
for filename in codefiles:
raw_docstrings += extract_docstrings(filename)
if len(raw_docstrings) == 0:
err("No docstrings found")
docs = process_docstrings(raw_docstrings)
if len(docs) == 0:
err("No modules found")
for module in docs:
dbg("Processing: %s" % module)
module_docs = process_module(module, docs[module])
module_docs["items"].sort(key=lambda item: item["name"].lower())
for item_type in TYPE_NAMES:
module_docs[item_type].sort(key=lambda item: item["name"].lower())
processed_docstrings.append(module_docs)
# Add this module to our module tree
module_parts = module.split('.')
cursor = module_tree
for part in module_parts:
if part not in cursor:
cursor[part] = {}
cursor = cursor[part]
# Iterate over the modules, consulting the module tree, to find their
# submodules
# (Note that this is done as a separate step after the above loop, to
# ensure that we know about all possible modules by this point)
i = 0
for module in processed_docstrings:
dbg("Finding submodules for: %s" % module["name"])
module_parts = module["name"].split('.')
cursor = module_tree
for part in module_parts:
cursor = cursor[part]
# cursor now points at this module, so now we can check for subs
for sub in list(cursor.keys()):
processed_docstrings[i]["submodules"].append(sub)
processed_docstrings[i]["submodules"].sort()
i += 1
processed_docstrings.sort(key=lambda module: module["name"].lower())
return processed_docstrings
def write_annotations(filepath, data):
"""Write out a JSON file with our linter errors"""
with open(filepath, "wb") as jsonfile:
jsonfile.write(json.dumps(data, indent=2,
separators=(',', ': '),
ensure_ascii=False).encode('utf-8'))
def write_json(filepath, data):
"""Write out a JSON version of the docs"""
with open(filepath, "wb") as jsonfile:
jsonfile.write(json.dumps(data, sort_keys=True, indent=2,
separators=(',', ': '),
ensure_ascii=False).encode('utf-8'))
def write_json_index(filepath, data):
"""Write out a JSON index of the docs"""
index = []
for item in data:
entry = {}
entry["name"] = item["name"]
entry["desc"] = item["desc"]
entry["type"] = item["type"]
index.append(entry)
for subtype in TYPE_NAMES:
for subitem in item[subtype]:
entry = {}
entry["name"] = subitem["name"]
entry["module"] = item["name"]
entry["desc"] = subitem["desc"]
entry["type"] = subitem["type"]
index.append(entry)
with open(filepath, "wb") as jsonfile:
jsonfile.write(json.dumps(index, sort_keys=True, indent=2,
separators=(',', ': '),
ensure_ascii=False).encode('utf-8'))
def write_sql(filepath, data):
"""Write out an SQLite DB of docs metadata, for Dash"""
db = sqlite3.connect(filepath)
cur = db.cursor()
try:
cur.execute("DROP TABLE searchIndex;")
except sqlite3.OperationalError:
# This table won't have existed in a blank database
pass
cur.execute("CREATE TABLE searchIndex(id INTEGER PRIMARY KEY, name TEXT, "
"type TEXT, path TEXT);")
cur.execute("CREATE UNIQUE INDEX anchor ON searchIndex (name, type, "
"path);")
for module in data:
cur.execute("INSERT INTO searchIndex VALUES(NULL, '%(modname)s', "
"'Module', '%(modname)s.html');" %
{"modname": module["name"]})
for item in module["items"]:
try:
cur.execute("INSERT INTO searchIndex VALUES(NULL, "
"'%(modname)s.%(itemname)s', "
"'%(itemtype)s', '%(modname)s.html#%(itemname)s');" %
{"modname": module["name"], "itemname": item["name"],
"itemtype": item["type"]})
except:
err("DB Insert failed on %s:%s(%s)" % (module["name"], item["name"], item["type"]))
db.commit()
cur.execute("VACUUM;")
def write_templated_output(output_dir, template_dir, title, data, extension):
"""Write out a templated version of the docs"""
from jinja2 import Environment
jinja = Environment(trim_blocks=True, lstrip_blocks=True)
# Make sure we have a valid output_dir
if not os.path.isdir(output_dir):
try:
os.makedirs(output_dir)
except Exception as error:
err("Output directory is not a directory, "
"and/or can't be created: %s" % error)
# Prepare for writing index.<extensions>
try:
outfile = open(output_dir + "/index." + extension, "wb")
except Exception as error:
err("Unable to create %s: %s" % (output_dir + "/index." + extension,
error))
# Prepare for reading index.j2.<extension>
try:
tmplfile = open(template_dir + "/index.j2." + extension, "r")
except Exception as error:
err("Unable to open index.j2.%s: %s" % (extension, error))
if extension == "html":
# Re-process the doc data to convert Markdown to HTML
data = process_markdown(data)
# Write out the data as a file, for later debugging
write_json(output_dir + "/templated_docs.json", data)
# Render and write index.<extension>
template = jinja.from_string(tmplfile.read())
render = template.render(data=data, links=LINKS, title=title)
outfile.write(render.encode("utf-8"))
outfile.close()
tmplfile.close()
dbg("Wrote index." + extension)
# Render and write module docs
try:
tmplfile = open(template_dir + "/module.j2." + extension, "r")
template = jinja.from_string(tmplfile.read())
except Exception as error:
err("Unable to open module.j2.%s: %s" % (extension, error))
for module in data:
with open("%s/%s.%s" % (output_dir,
module["name"],
extension), "wb") as docfile:
render = template.render(module=module,
type_order=TYPE_NAMES,
type_desc=TYPE_DESC)
docfile.write(render.encode("utf-8"))
dbg("Wrote %s.%s" % (module["name"], extension))
tmplfile.close()
def write_html(output_dir, template_dir, title, data):
"""Write out an HTML version of the docs"""
write_templated_output(output_dir, template_dir, title, data, "html")
def write_markdown(output_dir, template_dir, title, data):
"""Write out a Markdown version of the docs"""
write_templated_output(output_dir, template_dir, title, data, "md")
def main():
"""Main entrypoint"""
global DEBUG
global ARGUMENTS
parser = argparse.ArgumentParser()
commands = parser.add_argument_group("Commands")
commands.add_argument("-v", "--validate", action="store_true",
dest="validate", default=False,
help="Ensure all docstrings are valid")
commands.add_argument("-j", "--json", action="store_true",
dest="json", default=False,
help="Output docs.json")
commands.add_argument("-s", "--sql", action="store_true",
dest="sql", default=False,
help="Output docs.sqlite")
commands.add_argument("-t", "--html", action="store_true",
dest="html", default=False,
help="Output HTML docs")
commands.add_argument("-m", "--markdown", action="store_true",
dest="markdown", default=False,
help="Output Markdown docs")
parser.add_argument("-n", "--standalone",
help="Process a single module only",
action="store_true", default=False,
dest="standalone")
parser.add_argument("-d", "--debug", help="Enable debugging output",
action="store_true", default=False,
dest="debug")
parser.add_argument("-e", "--templates", action="store",
help="Directory of HTML templates",
dest="template_dir", default="scripts/docs/templates")
parser.add_argument("-o", "--output_dir", action="store",
dest="output_dir", default="build/",
help="Directory to write outputs to")
parser.add_argument("-i", "--title", action="store",
dest="title", default="Hammerspoon",
help="Title for the index page")
parser.add_argument("-l", "--lint", action="store_true",
dest="lint_mode", default=False,
help="Run in Lint mode. No docs will be built")
parser.add_argument("DIRS", nargs=argparse.REMAINDER,
help="Directories to search")
arguments, leftovers = parser.parse_known_args()
if arguments.debug:
DEBUG = True
dbg("Arguments: %s" % arguments)
if not arguments.validate and \
not arguments.json and \
not arguments.sql and \
not arguments.html and \
not arguments.markdown and \
not arguments.lint_mode:
parser.print_help()
err("At least one of validate/json/sql/html/markdown is required.")
if len(arguments.DIRS) == 0:
parser.print_help()
err("At least one directory is required. See DIRS")
# Store global copy of our arguments
ARGUMENTS = arguments
if arguments.lint_mode:
global LINT_MODE
global FAIL_ON_WARN
LINT_MODE = True
FAIL_ON_WARN = False
results = do_processing(arguments.DIRS)
if arguments.validate:
# If we got this far, we already processed the docs, and validated them
pass
if arguments.lint_mode:
write_annotations(arguments.output_dir + "/annotations.json", LINTS)
if arguments.json:
write_json(arguments.output_dir + "/docs.json", results)
write_json_index(arguments.output_dir + "/docs_index.json", results)
if arguments.sql:
write_sql(arguments.output_dir + "/docs.sqlite", results)
if arguments.html:
write_html(arguments.output_dir + "/html/",
arguments.template_dir,
arguments.title, results)
if arguments.markdown:
write_markdown(arguments.output_dir + "/markdown/",
arguments.template_dir,
arguments.title, results)
if __name__ == "__main__":
main()
if FAIL_ON_WARN and HAS_WARNED:
sys.exit(1)
| mit | -953,439,886,044,061,000 | 37.364221 | 255 | 0.538025 | false |
TeachAtTUM/edx-platform | common/djangoapps/track/backends/logger.py | 26 | 1508 | """Event tracker backend that saves events to a python logger."""
from __future__ import absolute_import
import json
import logging
from django.conf import settings
from track.backends import BaseBackend
from track.utils import DateTimeJSONEncoder
log = logging.getLogger('track.backends.logger')
application_log = logging.getLogger('track.backends.application_log') # pylint: disable=invalid-name
class LoggerBackend(BaseBackend):
"""Event tracker backend that uses a python logger.
Events are logged to the INFO level as JSON strings.
"""
def __init__(self, name, **kwargs):
"""Event tracker backend that uses a python logger.
:Parameters:
- `name`: identifier of the logger, which should have
been configured using the default python mechanisms.
"""
super(LoggerBackend, self).__init__(**kwargs)
self.event_logger = logging.getLogger(name)
def send(self, event):
try:
event_str = json.dumps(event, cls=DateTimeJSONEncoder)
except UnicodeDecodeError:
application_log.exception(
"UnicodeDecodeError Event_data: %r", event
)
raise
# TODO: remove trucation of the serialized event, either at a
# higher level during the emittion of the event, or by
# providing warnings when the events exceed certain size.
event_str = event_str[:settings.TRACK_MAX_EVENT]
self.event_logger.info(event_str)
| agpl-3.0 | -3,983,498,073,852,096,500 | 29.16 | 101 | 0.666446 | false |
morlandi/django-permissions | permissions/models.py | 1 | 6596 | # django imports
from django.db import models
from django.conf import settings
from django.contrib.auth.models import Group
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.utils.translation import ugettext_lazy as _
# permissions imports
import permissions.utils
class Permission(models.Model):
"""A permission which can be granted to users/groups and objects.
**Attributes:**
name
The unique name of the permission. This is displayed to users.
codename
The unique codename of the permission. This is used internal to
identify a permission.
content_types
The content types for which the permission is active. This can be
used to display only reasonable permissions for an object.
"""
name = models.CharField(_(u"Name"), max_length=100, unique=True)
codename = models.CharField(_(u"Codename"), max_length=100, unique=True)
content_types = models.ManyToManyField(ContentType, verbose_name=_(u"Content Types"), blank=True, related_name="content_types")
class Meta:
app_label = "permissions"
def __unicode__(self):
return "%s (%s)" % (self.name, self.codename)
class ObjectPermission(models.Model):
"""Grants permission for a role and an content object (optional).
**Attributes:**
role
The role for which the permission is granted.
permission
The permission which is granted.
content
The object for which the permission is granted.
"""
role = models.ForeignKey("Role", verbose_name=_(u"Role"), blank=True, null=True)
permission = models.ForeignKey(Permission, verbose_name=_(u"Permission"))
content_type = models.ForeignKey(ContentType, verbose_name=_(u"Content type"))
content_id = models.PositiveIntegerField(verbose_name=_(u"Content id"))
content = GenericForeignKey(ct_field="content_type", fk_field="content_id")
class Meta:
app_label = "permissions"
def __unicode__(self):
return "%s / %s / %s - %s" % (self.permission.name, self.role, self.content_type, self.content_id)
class ObjectPermissionInheritanceBlock(models.Model):
"""Blocks the inheritance for specific permission and object.
**Attributes:**
permission
The permission for which inheritance is blocked.
content
The object for which the inheritance is blocked.
"""
permission = models.ForeignKey(Permission, verbose_name=_(u"Permission"))
content_type = models.ForeignKey(ContentType, verbose_name=_(u"Content type"))
content_id = models.PositiveIntegerField(verbose_name=_(u"Content id"))
content = GenericForeignKey(ct_field="content_type", fk_field="content_id")
class Meta:
app_label = "permissions"
def __unicode__(self):
return "%s / %s - %s" % (self.permission, self.content_type, self.content_id)
class Role(models.Model):
"""A role gets permissions to do something. Principals (users and groups)
can only get permissions via roles.
**Attributes:**
name
The unique name of the role
"""
name = models.CharField(max_length=100, unique=True)
class Meta:
app_label = "permissions"
ordering = ("name", )
def __unicode__(self):
return self.name
def add_principal(self, principal, content=None):
"""Addes the given principal (user or group) ot the Role.
"""
return permissions.utils.add_role(principal, self)
def get_groups(self, content=None):
"""Returns all groups which has this role assigned. If content is given
it returns also the local roles.
"""
if content:
ctype = ContentType.objects.get_for_model(content)
prrs = PrincipalRoleRelation.objects.filter(role=self,
content_id__in=(None, content.id),
content_type__in=(None, ctype)).exclude(group=None)
else:
prrs = PrincipalRoleRelation.objects.filter(role=self,
content_id=None, content_type=None).exclude(group=None)
return [prr.group for prr in prrs]
def get_users(self, content=None):
"""Returns all users which has this role assigned. If content is given
it returns also the local roles.
"""
if content:
ctype = ContentType.objects.get_for_model(content)
prrs = PrincipalRoleRelation.objects.filter(role=self,
content_id__in=(None, content.id),
content_type__in=(None, ctype)).exclude(user=None)
else:
prrs = PrincipalRoleRelation.objects.filter(role=self,
content_id=None, content_type=None).exclude(user=None)
return [prr.user for prr in prrs]
class PrincipalRoleRelation(models.Model):
"""A role given to a principal (user or group). If a content object is
given this is a local role, i.e. the principal has this role only for this
content object. Otherwise it is a global role, i.e. the principal has
this role generally.
user
A user instance. Either a user xor a group needs to be given.
group
A group instance. Either a user xor a group needs to be given.
role
The role which is given to the principal for content.
content
The content object which gets the local role (optional).
"""
user = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_(u"User"), blank=True, null=True)
group = models.ForeignKey(Group, verbose_name=_(u"Group"), blank=True, null=True)
role = models.ForeignKey(Role, verbose_name=_(u"Role"))
content_type = models.ForeignKey(ContentType, verbose_name=_(u"Content type"), blank=True, null=True)
content_id = models.PositiveIntegerField(verbose_name=_(u"Content id"), blank=True, null=True)
content = GenericForeignKey(ct_field="content_type", fk_field="content_id")
class Meta:
app_label = "permissions"
def __unicode__(self):
if self.user:
principal = self.user.username
else:
principal = self.group
return "%s - %s" % (principal, self.role)
def get_principal(self):
"""Returns the principal.
"""
return self.user or self.group
def set_principal(self, principal):
"""Sets the principal.
"""
if isinstance(principal, User):
self.user = principal
else:
self.group = principal
principal = property(get_principal, set_principal)
| bsd-3-clause | 5,021,914,450,039,645,000 | 33.176166 | 131 | 0.656155 | false |
seberg/numpy | benchmarks/benchmarks/bench_reduce.py | 11 | 1723 | from .common import Benchmark, TYPES1, get_squares
import numpy as np
class AddReduce(Benchmark):
def setup(self):
self.squares = get_squares().values()
def time_axis_0(self):
[np.add.reduce(a, axis=0) for a in self.squares]
def time_axis_1(self):
[np.add.reduce(a, axis=1) for a in self.squares]
class AddReduceSeparate(Benchmark):
params = [[0, 1], TYPES1]
param_names = ['axis', 'type']
def setup(self, axis, typename):
self.a = get_squares()[typename]
def time_reduce(self, axis, typename):
np.add.reduce(self.a, axis=axis)
class AnyAll(Benchmark):
def setup(self):
# avoid np.zeros's lazy allocation that would
# cause page faults during benchmark
self.zeros = np.full(100000, 0, bool)
self.ones = np.full(100000, 1, bool)
def time_all_fast(self):
self.zeros.all()
def time_all_slow(self):
self.ones.all()
def time_any_fast(self):
self.ones.any()
def time_any_slow(self):
self.zeros.any()
class MinMax(Benchmark):
params = [np.float32, np.float64, np.intp]
param_names = ['dtype']
def setup(self, dtype):
self.d = np.ones(20000, dtype=dtype)
def time_min(self, dtype):
np.min(self.d)
def time_max(self, dtype):
np.max(self.d)
class ArgMax(Benchmark):
params = [np.float32, bool]
param_names = ['dtype']
def setup(self, dtype):
self.d = np.zeros(200000, dtype=dtype)
def time_argmax(self, dtype):
np.argmax(self.d)
class SmallReduction(Benchmark):
def setup(self):
self.d = np.ones(100, dtype=np.float32)
def time_small(self):
np.sum(self.d)
| bsd-3-clause | 2,891,568,500,984,076,000 | 21.671053 | 56 | 0.604759 | false |
hopeall/odoo | openerp/tools/mail.py | 125 | 29474 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2012-TODAY OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from lxml import etree
import cgi
import logging
import lxml.html
import lxml.html.clean as clean
import random
import re
import socket
import threading
import time
from email.utils import getaddresses
import openerp
from openerp.loglevels import ustr
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
#----------------------------------------------------------
# HTML Sanitizer
#----------------------------------------------------------
tags_to_kill = ["script", "head", "meta", "title", "link", "style", "frame", "iframe", "base", "object", "embed"]
tags_to_remove = ['html', 'body', 'font']
# allow new semantic HTML5 tags
allowed_tags = clean.defs.tags | frozenset('article section header footer hgroup nav aside figure main'.split() + [etree.Comment])
safe_attrs = clean.defs.safe_attrs | frozenset(
['style',
'data-oe-model', 'data-oe-id', 'data-oe-field', 'data-oe-type', 'data-oe-expression', 'data-oe-translate', 'data-oe-nodeid',
'data-snippet-id', 'data-publish', 'data-id', 'data-res_id', 'data-member_id', 'data-view-id'
])
def html_sanitize(src, silent=True, strict=False, strip_style=False):
if not src:
return src
src = ustr(src, errors='replace')
logger = logging.getLogger(__name__ + '.html_sanitize')
# html encode email tags
part = re.compile(r"(<(([^a<>]|a[^<>\s])[^<>]*)@[^<>]+>)", re.IGNORECASE | re.DOTALL)
src = part.sub(lambda m: cgi.escape(m.group(1)), src)
# html encode mako tags <% ... %> to decode them later and keep them alive, otherwise they are stripped by the cleaner
src = src.replace('<%', cgi.escape('<%'))
src = src.replace('%>', cgi.escape('%>'))
kwargs = {
'page_structure': True,
'style': strip_style, # True = remove style tags/attrs
'forms': True, # remove form tags
'remove_unknown_tags': False,
'allow_tags': allowed_tags,
'comments': False,
'processing_instructions': False
}
if etree.LXML_VERSION >= (2, 3, 1):
# kill_tags attribute has been added in version 2.3.1
kwargs.update({
'kill_tags': tags_to_kill,
'remove_tags': tags_to_remove,
})
else:
kwargs['remove_tags'] = tags_to_kill + tags_to_remove
if strict:
if etree.LXML_VERSION >= (3, 1, 0):
# lxml < 3.1.0 does not allow to specify safe_attrs. We keep all attributes in order to keep "style"
kwargs.update({
'safe_attrs_only': True,
'safe_attrs': safe_attrs,
})
else:
kwargs['safe_attrs_only'] = False # keep oe-data attributes + style
kwargs['frames'] = False, # do not remove frames (embbed video in CMS blogs)
try:
# some corner cases make the parser crash (such as <SCRIPT/XSS SRC=\"http://ha.ckers.org/xss.js\"></SCRIPT> in test_mail)
cleaner = clean.Cleaner(**kwargs)
cleaned = cleaner.clean_html(src)
# MAKO compatibility: $, { and } inside quotes are escaped, preventing correct mako execution
cleaned = cleaned.replace('%24', '$')
cleaned = cleaned.replace('%7B', '{')
cleaned = cleaned.replace('%7D', '}')
cleaned = cleaned.replace('%20', ' ')
cleaned = cleaned.replace('%5B', '[')
cleaned = cleaned.replace('%5D', ']')
cleaned = cleaned.replace('<%', '<%')
cleaned = cleaned.replace('%>', '%>')
except etree.ParserError, e:
if 'empty' in str(e):
return ""
if not silent:
raise
logger.warning('ParserError obtained when sanitizing %r', src, exc_info=True)
cleaned = '<p>ParserError when sanitizing</p>'
except Exception:
if not silent:
raise
logger.warning('unknown error obtained when sanitizing %r', src, exc_info=True)
cleaned = '<p>Unknown error when sanitizing</p>'
# this is ugly, but lxml/etree tostring want to put everything in a 'div' that breaks the editor -> remove that
if cleaned.startswith('<div>') and cleaned.endswith('</div>'):
cleaned = cleaned[5:-6]
return cleaned
#----------------------------------------------------------
# HTML Cleaner
#----------------------------------------------------------
def html_email_clean(html, remove=False, shorten=False, max_length=300, expand_options=None,
protect_sections=False):
""" html_email_clean: clean the html by doing the following steps:
- try to strip email quotes, by removing blockquotes or having some client-
specific heuristics
- try to strip signatures
- shorten the html to a maximum number of characters if requested
Some specific use case:
- MsOffice: ``div.style = border-top:solid;`` delimitates the beginning of
a quote; detecting by finding WordSection1 of MsoNormal
- Hotmail: ``hr.stopSpelling`` delimitates the beginning of a quote; detect
Hotmail by funding ``SkyDrivePlaceholder``
:param string html: sanitized html; tags like html or head should not
be present in the html string. This method therefore
takes as input html code coming from a sanitized source,
like fields.html.
:param boolean remove: remove the html code that is unwanted; otherwise it
is only flagged and tagged
:param boolean shorten: shorten the html; every excessing content will
be flagged as to remove
:param int max_length: if shortening, maximum number of characters before
shortening
:param dict expand_options: options for the read more link when shortening
the content.The used keys are the following:
- oe_expand_container_tag: class applied to the
container of the whole read more link
- oe_expand_container_class: class applied to the
link container (default: oe_mail_expand)
- oe_expand_container_content: content of the
container (default: ...)
- oe_expand_separator_node: optional separator, like
adding ... <br /><br /> <a ...>read more</a> (default: void)
- oe_expand_a_href: href of the read more link itself
(default: #)
- oe_expand_a_class: class applied to the <a> containing
the link itself (default: oe_mail_expand)
- oe_expand_a_content: content of the <a> (default: read more)
The formatted read more link is the following:
<cont_tag class="oe_expand_container_class">
oe_expand_container_content
if expand_options.get('oe_expand_separator_node'):
<oe_expand_separator_node/>
<a href="oe_expand_a_href" class="oe_expand_a_class">
oe_expand_a_content
</a>
</span>
"""
def _replace_matching_regex(regex, source, replace=''):
""" Replace all matching expressions in source by replace """
if not source:
return source
dest = ''
idx = 0
for item in re.finditer(regex, source):
dest += source[idx:item.start()] + replace
idx = item.end()
dest += source[idx:]
return dest
def _create_node(tag, text, tail=None, attrs={}):
new_node = etree.Element(tag)
new_node.text = text
new_node.tail = tail
for key, val in attrs.iteritems():
new_node.set(key, val)
return new_node
def _insert_new_node(node, index, new_node_tag, new_node_text, new_node_tail=None, new_node_attrs={}):
new_node = _create_node(new_node_tag, new_node_text, new_node_tail, new_node_attrs)
node.insert(index, new_node)
return new_node
def _tag_matching_regex_in_text(regex, node, new_node_tag='span', new_node_attrs={}):
text = node.text or ''
if not re.search(regex, text):
return
cur_node = node
node.text = ''
idx, iteration = 0, 0
for item in re.finditer(regex, text):
if iteration == 0:
cur_node.text = text[idx:item.start()]
else:
_insert_new_node(node, (iteration - 1) * 2 + 1, new_node_tag, text[idx:item.start()])
new_node = _insert_new_node(node, iteration * 2, new_node_tag, text[item.start():item.end()], None, new_node_attrs)
cur_node = new_node
idx = item.end()
iteration += 1
new_node = _insert_new_node(node, -1, new_node_tag, text[idx:] + (cur_node.tail or ''), None, {})
def _truncate_node(node, position, simplify_whitespaces=True):
""" Truncate a node text at a given position. This algorithm will shorten
at the end of the word whose ending character exceeds position.
:param bool simplify_whitespaces: whether to try to count all successive
whitespaces as one character. This
option should not be True when trying
to keep 'pre' consistency.
"""
if node.text is None:
node.text = ''
truncate_idx = -1
if simplify_whitespaces:
cur_char_nbr = 0
word = None
node_words = node.text.strip(' \t\r\n').split()
for word in node_words:
cur_char_nbr += len(word)
if cur_char_nbr >= position:
break
if word:
truncate_idx = node.text.find(word) + len(word)
else:
truncate_idx = position
if truncate_idx == -1 or truncate_idx > len(node.text):
truncate_idx = len(node.text)
# compose new text bits
innertext = node.text[0:truncate_idx]
outertext = node.text[truncate_idx:]
node.text = innertext
# create <span> ... <a href="#">read more</a></span> node
read_more_node = _create_node(
expand_options.get('oe_expand_container_tag', 'span'),
expand_options.get('oe_expand_container_content', ' ... '),
None,
{'class': expand_options.get('oe_expand_container_class', 'oe_mail_expand')}
)
if expand_options.get('oe_expand_separator_node'):
read_more_separator_node = _create_node(
expand_options.get('oe_expand_separator_node'),
'',
None,
{}
)
read_more_node.append(read_more_separator_node)
read_more_link_node = _create_node(
'a',
expand_options.get('oe_expand_a_content', _('read more')),
None,
{
'href': expand_options.get('oe_expand_a_href', '#'),
'class': expand_options.get('oe_expand_a_class', 'oe_mail_expand'),
}
)
read_more_node.append(read_more_link_node)
# create outertext node
overtext_node = _create_node('span', outertext)
# tag node
overtext_node.set('in_overlength', '1')
# add newly created nodes in dom
node.append(read_more_node)
node.append(overtext_node)
if expand_options is None:
expand_options = {}
if not html or not isinstance(html, basestring):
return html
html = ustr(html)
# Pre processing
# ------------------------------------------------------------
# TDE TODO: --- MAIL ORIGINAL ---: '[\-]{4,}([^\-]*)[\-]{4,}'
# html: remove encoding attribute inside tags
doctype = re.compile(r'(<[^>]*\s)(encoding=(["\'][^"\']*?["\']|[^\s\n\r>]+)(\s[^>]*|/)?>)', re.IGNORECASE | re.DOTALL)
html = doctype.sub(r"", html)
# html: ClEditor seems to love using <div><br /><div> -> replace with <br />
br_div_tags = re.compile(r'(<div>\s*<br\s*\/>\s*<\/div>)', re.IGNORECASE)
html = _replace_matching_regex(br_div_tags, html, '<br />')
# form a tree
root = lxml.html.fromstring(html)
if not len(root) and root.text is None and root.tail is None:
html = '<div>%s</div>' % html
root = lxml.html.fromstring(html)
quote_tags = re.compile(r'(\n(>)+[^\n\r]*)')
signature = re.compile(r'([-]{2,}[\s]?[\r\n]{1,2}[\s\S]+)')
for node in root.iter():
# remove all tails and replace them by a span element, because managing text and tails can be a pain in the ass
if node.tail:
tail_node = _create_node('span', node.tail)
node.tail = None
node.addnext(tail_node)
# form node and tag text-based quotes and signature
_tag_matching_regex_in_text(quote_tags, node, 'span', {'text_quote': '1'})
_tag_matching_regex_in_text(signature, node, 'span', {'text_signature': '1'})
# Processing
# ------------------------------------------------------------
# tree: tag nodes
# signature_begin = False # try dynamic signature recognition
quote_begin = False
overlength = False
overlength_section_id = None
overlength_section_count = 0
cur_char_nbr = 0
for node in root.iter():
# comments do not need processing
# note: bug in node.get(value, default) for HtmlComments, default never returned
if node.tag == etree.Comment:
continue
# do not take into account multiple spaces that are displayed as max 1 space in html
node_text = ' '.join((node.text and node.text.strip(' \t\r\n') or '').split())
# root: try to tag the client used to write the html
if 'WordSection1' in node.get('class', '') or 'MsoNormal' in node.get('class', ''):
root.set('msoffice', '1')
if 'SkyDrivePlaceholder' in node.get('class', '') or 'SkyDrivePlaceholder' in node.get('id', ''):
root.set('hotmail', '1')
# protect sections by tagging section limits and blocks contained inside sections, using an increasing id to re-find them later
if node.tag == 'section':
overlength_section_count += 1
node.set('section_closure', str(overlength_section_count))
if node.getparent() is not None and (node.getparent().get('section_closure') or node.getparent().get('section_inner')):
node.set('section_inner', str(overlength_section_count))
# state of the parsing: flag quotes and tails to remove
if quote_begin:
node.set('in_quote', '1')
node.set('tail_remove', '1')
# state of the parsing: flag when being in over-length content, depending on section content if defined (only when having protect_sections)
if overlength:
if not overlength_section_id or int(node.get('section_inner', overlength_section_count + 1)) > overlength_section_count:
node.set('in_overlength', '1')
node.set('tail_remove', '1')
# find quote in msoffice / hotmail / blockquote / text quote and signatures
if root.get('msoffice') and node.tag == 'div' and 'border-top:solid' in node.get('style', ''):
quote_begin = True
node.set('in_quote', '1')
node.set('tail_remove', '1')
if root.get('hotmail') and node.tag == 'hr' and ('stopSpelling' in node.get('class', '') or 'stopSpelling' in node.get('id', '')):
quote_begin = True
node.set('in_quote', '1')
node.set('tail_remove', '1')
if node.tag == 'blockquote' or node.get('text_quote') or node.get('text_signature'):
# here no quote_begin because we want to be able to remove some quoted
# text without removing all the remaining context
node.set('in_quote', '1')
if node.getparent() is not None and node.getparent().get('in_quote'):
# inside a block of removed text but not in quote_begin (see above)
node.set('in_quote', '1')
# shorten:
# if protect section:
# 1/ find the first parent not being inside a section
# 2/ add the read more link
# else:
# 1/ truncate the text at the next available space
# 2/ create a 'read more' node, next to current node
# 3/ add the truncated text in a new node, next to 'read more' node
node_text = (node.text or '').strip().strip('\n').strip()
if shorten and not overlength and cur_char_nbr + len(node_text) > max_length:
node_to_truncate = node
while node_to_truncate.getparent() is not None:
if node_to_truncate.get('in_quote'):
node_to_truncate = node_to_truncate.getparent()
elif protect_sections and (node_to_truncate.getparent().get('section_inner') or node_to_truncate.getparent().get('section_closure')):
node_to_truncate = node_to_truncate.getparent()
overlength_section_id = node_to_truncate.get('section_closure')
else:
break
overlength = True
node_to_truncate.set('truncate', '1')
if node_to_truncate == node:
node_to_truncate.set('truncate_position', str(max_length - cur_char_nbr))
else:
node_to_truncate.set('truncate_position', str(len(node.text or '')))
cur_char_nbr += len(node_text)
# Tree modification
# ------------------------------------------------------------
for node in root.iter():
if node.get('truncate'):
_truncate_node(node, int(node.get('truncate_position', '0')), node.tag != 'pre')
# Post processing
# ------------------------------------------------------------
to_remove = []
for node in root.iter():
if node.get('in_quote') or node.get('in_overlength'):
# copy the node tail into parent text
if node.tail and not node.get('tail_remove'):
parent = node.getparent()
parent.tail = node.tail + (parent.tail or '')
to_remove.append(node)
if node.get('tail_remove'):
node.tail = ''
# clean node
for attribute_name in ['in_quote', 'tail_remove', 'in_overlength', 'msoffice', 'hotmail', 'truncate', 'truncate_position']:
node.attrib.pop(attribute_name, None)
for node in to_remove:
if remove:
node.getparent().remove(node)
else:
if not expand_options.get('oe_expand_a_class', 'oe_mail_expand') in node.get('class', ''): # trick: read more link should be displayed even if it's in overlength
node_class = node.get('class', '') + ' oe_mail_cleaned'
node.set('class', node_class)
# html: \n that were tail of elements have been encapsulated into <span> -> back to \n
html = etree.tostring(root, pretty_print=False)
linebreaks = re.compile(r'<span[^>]*>([\s]*[\r\n]+[\s]*)<\/span>', re.IGNORECASE | re.DOTALL)
html = _replace_matching_regex(linebreaks, html, '\n')
return html
#----------------------------------------------------------
# HTML/Text management
#----------------------------------------------------------
def html2plaintext(html, body_id=None, encoding='utf-8'):
""" From an HTML text, convert the HTML to plain text.
If @param body_id is provided then this is the tag where the
body (not necessarily <body>) starts.
"""
## (c) Fry-IT, www.fry-it.com, 2007
## <[email protected]>
## download here: http://www.peterbe.com/plog/html2plaintext
html = ustr(html)
tree = etree.fromstring(html, parser=etree.HTMLParser())
if body_id is not None:
source = tree.xpath('//*[@id=%s]' % (body_id,))
else:
source = tree.xpath('//body')
if len(source):
tree = source[0]
url_index = []
i = 0
for link in tree.findall('.//a'):
url = link.get('href')
if url:
i += 1
link.tag = 'span'
link.text = '%s [%s]' % (link.text, i)
url_index.append(url)
html = ustr(etree.tostring(tree, encoding=encoding))
# \r char is converted into , must remove it
html = html.replace(' ', '')
html = html.replace('<strong>', '*').replace('</strong>', '*')
html = html.replace('<b>', '*').replace('</b>', '*')
html = html.replace('<h3>', '*').replace('</h3>', '*')
html = html.replace('<h2>', '**').replace('</h2>', '**')
html = html.replace('<h1>', '**').replace('</h1>', '**')
html = html.replace('<em>', '/').replace('</em>', '/')
html = html.replace('<tr>', '\n')
html = html.replace('</p>', '\n')
html = re.sub('<br\s*/?>', '\n', html)
html = re.sub('<.*?>', ' ', html)
html = html.replace(' ' * 2, ' ')
html = html.replace('>', '>')
html = html.replace('<', '<')
html = html.replace('&', '&')
# strip all lines
html = '\n'.join([x.strip() for x in html.splitlines()])
html = html.replace('\n' * 2, '\n')
for i, url in enumerate(url_index):
if i == 0:
html += '\n\n'
html += ustr('[%s] %s\n') % (i + 1, url)
return html
def plaintext2html(text, container_tag=False):
""" Convert plaintext into html. Content of the text is escaped to manage
html entities, using cgi.escape().
- all \n,\r are replaced by <br />
- enclose content into <p>
- 2 or more consecutive <br /> are considered as paragraph breaks
:param string container_tag: container of the html; by default the
content is embedded into a <div>
"""
text = cgi.escape(ustr(text))
# 1. replace \n and \r
text = text.replace('\n', '<br/>')
text = text.replace('\r', '<br/>')
# 2-3: form paragraphs
idx = 0
final = '<p>'
br_tags = re.compile(r'(([<]\s*[bB][rR]\s*\/?[>]\s*){2,})')
for item in re.finditer(br_tags, text):
final += text[idx:item.start()] + '</p><p>'
idx = item.end()
final += text[idx:] + '</p>'
# 4. container
if container_tag:
final = '<%s>%s</%s>' % (container_tag, final, container_tag)
return ustr(final)
def append_content_to_html(html, content, plaintext=True, preserve=False, container_tag=False):
""" Append extra content at the end of an HTML snippet, trying
to locate the end of the HTML document (</body>, </html>, or
EOF), and converting the provided content in html unless ``plaintext``
is False.
Content conversion can be done in two ways:
- wrapping it into a pre (preserve=True)
- use plaintext2html (preserve=False, using container_tag to wrap the
whole content)
A side-effect of this method is to coerce all HTML tags to
lowercase in ``html``, and strip enclosing <html> or <body> tags in
content if ``plaintext`` is False.
:param str html: html tagsoup (doesn't have to be XHTML)
:param str content: extra content to append
:param bool plaintext: whether content is plaintext and should
be wrapped in a <pre/> tag.
:param bool preserve: if content is plaintext, wrap it into a <pre>
instead of converting it into html
"""
html = ustr(html)
if plaintext and preserve:
content = u'\n<pre>%s</pre>\n' % ustr(content)
elif plaintext:
content = '\n%s\n' % plaintext2html(content, container_tag)
else:
content = re.sub(r'(?i)(</?(?:html|body|head|!\s*DOCTYPE)[^>]*>)', '', content)
content = u'\n%s\n' % ustr(content)
# Force all tags to lowercase
html = re.sub(r'(</?)\W*(\w+)([ >])',
lambda m: '%s%s%s' % (m.group(1), m.group(2).lower(), m.group(3)), html)
insert_location = html.find('</body>')
if insert_location == -1:
insert_location = html.find('</html>')
if insert_location == -1:
return '%s%s' % (html, content)
return '%s%s%s' % (html[:insert_location], content, html[insert_location:])
#----------------------------------------------------------
# Emails
#----------------------------------------------------------
# matches any email in a body of text
email_re = re.compile(r"""([a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,6})""", re.VERBOSE)
# matches a string containing only one email
single_email_re = re.compile(r"""^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,6}$""", re.VERBOSE)
res_re = re.compile(r"\[([0-9]+)\]", re.UNICODE)
command_re = re.compile("^Set-([a-z]+) *: *(.+)$", re.I + re.UNICODE)
# Updated in 7.0 to match the model name as well
# Typical form of references is <timestamp-openerp-record_id-model_name@domain>
# group(1) = the record ID ; group(2) = the model (if any) ; group(3) = the domain
reference_re = re.compile("<.*-open(?:object|erp)-(\\d+)(?:-([\w.]+))?[^>]*@([^>]*)>", re.UNICODE)
def generate_tracking_message_id(res_id):
"""Returns a string that can be used in the Message-ID RFC822 header field
Used to track the replies related to a given object thanks to the "In-Reply-To"
or "References" fields that Mail User Agents will set.
"""
try:
rnd = random.SystemRandom().random()
except NotImplementedError:
rnd = random.random()
rndstr = ("%.15f" % rnd)[2:]
return "<%.15f.%s-openerp-%s@%s>" % (time.time(), rndstr, res_id, socket.gethostname())
def email_send(email_from, email_to, subject, body, email_cc=None, email_bcc=None, reply_to=False,
attachments=None, message_id=None, references=None, openobject_id=False, debug=False, subtype='plain', headers=None,
smtp_server=None, smtp_port=None, ssl=False, smtp_user=None, smtp_password=None, cr=None, uid=None):
"""Low-level function for sending an email (deprecated).
:deprecate: since OpenERP 6.1, please use ir.mail_server.send_email() instead.
:param email_from: A string used to fill the `From` header, if falsy,
config['email_from'] is used instead. Also used for
the `Reply-To` header if `reply_to` is not provided
:param email_to: a sequence of addresses to send the mail to.
"""
# If not cr, get cr from current thread database
local_cr = None
if not cr:
db_name = getattr(threading.currentThread(), 'dbname', None)
if db_name:
local_cr = cr = openerp.registry(db_name).cursor()
else:
raise Exception("No database cursor found, please pass one explicitly")
# Send Email
try:
mail_server_pool = openerp.registry(cr.dbname)['ir.mail_server']
res = False
# Pack Message into MIME Object
email_msg = mail_server_pool.build_email(email_from, email_to, subject, body, email_cc, email_bcc, reply_to,
attachments, message_id, references, openobject_id, subtype, headers=headers)
res = mail_server_pool.send_email(cr, uid or 1, email_msg, mail_server_id=None,
smtp_server=smtp_server, smtp_port=smtp_port, smtp_user=smtp_user, smtp_password=smtp_password,
smtp_encryption=('ssl' if ssl else None), smtp_debug=debug)
except Exception:
_logger.exception("tools.email_send failed to deliver email")
return False
finally:
if local_cr:
cr.close()
return res
def email_split(text):
""" Return a list of the email addresses found in ``text`` """
if not text:
return []
return [addr[1] for addr in getaddresses([text])
# getaddresses() returns '' when email parsing fails, and
# sometimes returns emails without at least '@'. The '@'
# is strictly required in RFC2822's `addr-spec`.
if addr[1]
if '@' in addr[1]]
| agpl-3.0 | 6,417,726,045,833,026,000 | 42.600592 | 174 | 0.554082 | false |
brkrishna/freelance | univs/archives/snc_edu.py | 1 | 3105 | # -- coding: utf-8 --
#-------------------------------------------------------------------------------
# Name: snc_edu
# Purpose: St. Norbert College
#
# Author: Ramakrishna
#
# Dated: 07/Apr/2016
# Copyright: (c) Ramakrishna 2016
# Licence: <your licence>
#-------------------------------------------------------------------------------
import requests, re, os, csv
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from lxml import html
import socks, socket
from collections import OrderedDict
from queue import Queue
from threading import Thread
#socks.setdefaultproxy(proxy_type=socks.PROXY_TYPE_SOCKS5, addr="127.0.0.1", port=9150)
#socket.socket = socks.socksocket
url = 'http://www.snc.edu/cgi-bin/people/search.cgi'
headers = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:45.0) Gecko/20100101 Firefox/45.0'}
#service_args = ['--proxy=127.0.0.1:9150','--proxy-type=socks5',]
def search(term):
try:
server_url = "http://%s:%s/wd/hub" % ('127.0.0.1', '4444')
dc = DesiredCapabilities.HTMLUNIT
d = webdriver.Remote(server_url, dc)
d.get(url)
d.find_element_by_name('str').clear()
d.find_element_by_name('str').send_keys(term.replace("\n", ""))
d.find_element_by_name('sbutton').click()
tree = html.fromstring(d.page_source.encode("utf-8"))
trs = tree.xpath("//table[@style='border-collapse: collapse']//tr")
count = len(trs)
records = []
for i in range(3, count):
rec = "$$$".join(trs[i].xpath("./td[1]//text()[normalize-space()]")).replace("\r\n", "").replace(" ", "").strip()
if 'Student' not in rec:
continue
row = OrderedDict()
try:
row['name'] = rec[:rec.find("Student")].replace("$$$", "").strip()
except:
continue
try:
row['email'] = rec[rec.find("Student$$$")+10:].replace("$$$", "")
except:
pass
records.append(row)
if len(records) > 0:
file_exists = os.path.isfile('snc_edu.csv')
with open('snc_edu.csv', 'a', newline='', encoding='utf-8') as outfile:
fp = csv.DictWriter(outfile, records[0].keys())
if not file_exists:
fp.writeheader()
fp.writerows(records)
with open('snc_terms', 'a') as f:
f.write(term + "\n")
except Exception as e:
print(e.__doc__)
print(e.args)
return None
finally:
if d:
d = None
class Worker(Thread):
def __init__(self, queue):
Thread.__init__(self)
self.queue = queue
def run(self):
while True:
term = self.queue.get()
search(term)
self.queue.task_done()
def main():
try:
terms = set(open('terms.txt').readlines())
if os.path.isfile('snc_terms'):
finished_terms = set(open('snc_terms').readlines())
terms -= finished_terms
terms = list(terms)
queue = Queue()
for x in range(16):
worker = Worker(queue)
worker.daemon = True
worker.start()
terms_count = len(terms)
for i in range(0, terms_count):
queue.put(terms[i])
queue.join()
except Exception as e:
print(e.__doc__)
print(e.args)
if __name__ == '__main__':
main()
| gpl-2.0 | -7,487,602,610,980,704,000 | 24.661157 | 117 | 0.601932 | false |
Bforartists/scons | scons-local/SCons/Tool/BitKeeper.py | 7 | 2433 | """SCons.Tool.BitKeeper.py
Tool-specific initialization for the BitKeeper source code control
system.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001 - 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/BitKeeper.py 2014/07/05 09:42:21 garyo"
import SCons.Action
import SCons.Builder
import SCons.Util
def generate(env):
"""Add a Builder factory function and construction variables for
BitKeeper to an Environment."""
def BitKeeperFactory(env=env):
""" """
import SCons.Warnings as W
W.warn(W.DeprecatedSourceCodeWarning, """The BitKeeper() factory is deprecated and there is no replacement.""")
act = SCons.Action.Action("$BITKEEPERCOM", "$BITKEEPERCOMSTR")
return SCons.Builder.Builder(action = act, env = env)
#setattr(env, 'BitKeeper', BitKeeperFactory)
env.BitKeeper = BitKeeperFactory
env['BITKEEPER'] = 'bk'
env['BITKEEPERGET'] = '$BITKEEPER get'
env['BITKEEPERGETFLAGS'] = SCons.Util.CLVar('')
env['BITKEEPERCOM'] = '$BITKEEPERGET $BITKEEPERGETFLAGS $TARGET'
def exists(env):
return env.Detect('bk')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mit | 415,066,700,374,629,700 | 35.313433 | 119 | 0.732429 | false |
stanta/darfchain | darfchain_docker/tests/web/test_blocks.py | 3 | 5828 | import pytest
from bigchaindb.models import Transaction
BLOCKS_ENDPOINT = '/api/v1/blocks/'
@pytest.mark.bdb
@pytest.mark.usefixtures('inputs')
def test_get_block_endpoint(b, client):
tx = Transaction.create([b.me], [([b.me], 1)])
tx = tx.sign([b.me_private])
block = b.create_block([tx])
b.write_block(block)
res = client.get(BLOCKS_ENDPOINT + block.id)
assert block.to_dict() == res.json
assert res.status_code == 200
@pytest.mark.bdb
@pytest.mark.usefixtures('inputs')
def test_get_block_returns_404_if_not_found(client):
res = client.get(BLOCKS_ENDPOINT + '123')
assert res.status_code == 404
res = client.get(BLOCKS_ENDPOINT + '123/')
assert res.status_code == 404
@pytest.mark.bdb
@pytest.mark.usefixtures('inputs')
def test_get_blocks_by_txid_endpoint(b, client):
tx = Transaction.create([b.me], [([b.me], 1)])
tx = tx.sign([b.me_private])
tx2 = Transaction.create([b.me], [([b.me], 10)])
tx2 = tx2.sign([b.me_private])
block_invalid = b.create_block([tx])
b.write_block(block_invalid)
res = client.get(BLOCKS_ENDPOINT + '?transaction_id=' + tx.id)
# test if block is retrieved as undecided
assert res.status_code == 200
assert block_invalid.id in res.json
assert len(res.json) == 1
# vote the block invalid
vote = b.vote(block_invalid.id, b.get_last_voted_block().id, False)
b.write_vote(vote)
res = client.get(BLOCKS_ENDPOINT + '?transaction_id=' + tx.id)
# test if block is retrieved as invalid
assert res.status_code == 200
assert block_invalid.id in res.json
assert len(res.json) == 1
# create a new block containing the same tx (and tx2 to avoid block id collision)
block_valid = b.create_block([tx, tx2])
b.write_block(block_valid)
res = client.get(BLOCKS_ENDPOINT + '?transaction_id=' + tx.id)
# test if block is retrieved as undecided
assert res.status_code == 200
assert block_valid.id in res.json
assert len(res.json) == 2
# vote the block valid
vote = b.vote(block_valid.id, block_invalid.id, True)
b.write_vote(vote)
res = client.get(BLOCKS_ENDPOINT + '?transaction_id=' + tx.id)
# test if block is retrieved as valid
assert res.status_code == 200
assert block_valid.id in res.json
assert len(res.json) == 2
@pytest.mark.bdb
@pytest.mark.usefixtures('inputs')
def test_get_blocks_by_txid_and_status_endpoint(b, client):
from bigchaindb import Bigchain
tx = Transaction.create([b.me], [([b.me], 1)])
tx = tx.sign([b.me_private])
tx2 = Transaction.create([b.me], [([b.me], 10)])
tx2 = tx2.sign([b.me_private])
block_invalid = b.create_block([tx])
b.write_block(block_invalid)
# create a new block containing the same tx (and tx2 to avoid block id collision)
block_valid = b.create_block([tx, tx2])
b.write_block(block_valid)
res = client.get('{}?transaction_id={}&status={}'.format(BLOCKS_ENDPOINT, tx.id, Bigchain.BLOCK_INVALID))
# test if no blocks are retrieved as invalid
assert res.status_code == 200
assert len(res.json) == 0
res = client.get('{}?transaction_id={}&status={}'.format(BLOCKS_ENDPOINT, tx.id, Bigchain.BLOCK_UNDECIDED))
# test if both blocks are retrieved as undecided
assert res.status_code == 200
assert block_valid.id in res.json
assert block_invalid.id in res.json
assert len(res.json) == 2
res = client.get('{}?transaction_id={}&status={}'.format(BLOCKS_ENDPOINT, tx.id, Bigchain.BLOCK_VALID))
# test if no blocks are retrieved as valid
assert res.status_code == 200
assert len(res.json) == 0
# vote one of the blocks invalid
vote = b.vote(block_invalid.id, b.get_last_voted_block().id, False)
b.write_vote(vote)
# vote the other block valid
vote = b.vote(block_valid.id, block_invalid.id, True)
b.write_vote(vote)
res = client.get('{}?transaction_id={}&status={}'.format(BLOCKS_ENDPOINT, tx.id, Bigchain.BLOCK_INVALID))
# test if the invalid block is retrieved as invalid
assert res.status_code == 200
assert block_invalid.id in res.json
assert len(res.json) == 1
res = client.get('{}?transaction_id={}&status={}'.format(BLOCKS_ENDPOINT, tx.id, Bigchain.BLOCK_UNDECIDED))
# test if no blocks are retrieved as undecided
assert res.status_code == 200
assert len(res.json) == 0
res = client.get('{}?transaction_id={}&status={}'.format(BLOCKS_ENDPOINT, tx.id, Bigchain.BLOCK_VALID))
# test if the valid block is retrieved as valid
assert res.status_code == 200
assert block_valid.id in res.json
assert len(res.json) == 1
@pytest.mark.bdb
def test_get_blocks_by_txid_endpoint_returns_empty_list_not_found(client):
res = client.get(BLOCKS_ENDPOINT + '?transaction_id=')
assert res.status_code == 200
assert len(res.json) == 0
res = client.get(BLOCKS_ENDPOINT + '?transaction_id=123')
assert res.status_code == 200
assert len(res.json) == 0
@pytest.mark.bdb
def test_get_blocks_by_txid_endpoint_returns_400_bad_query_params(client):
res = client.get(BLOCKS_ENDPOINT)
assert res.status_code == 400
res = client.get(BLOCKS_ENDPOINT + '?ts_id=123')
assert res.status_code == 400
assert res.json == {
'message': {
'transaction_id': 'Missing required parameter in the JSON body or the post body or the query string'
}
}
res = client.get(BLOCKS_ENDPOINT + '?transaction_id=123&foo=123')
assert res.status_code == 400
assert res.json == {
'message': 'Unknown arguments: foo'
}
res = client.get(BLOCKS_ENDPOINT + '?transaction_id=123&status=123')
assert res.status_code == 400
assert res.json == {
'message': {
'status': '123 is not a valid choice'
}
}
| gpl-3.0 | -8,554,732,350,599,750,000 | 31.741573 | 112 | 0.654084 | false |
javilonas/NCam | cross/android-toolchain/lib/python2.7/hmac.py | 253 | 4531 | """HMAC (Keyed-Hashing for Message Authentication) Python module.
Implements the HMAC algorithm as described by RFC 2104.
"""
import warnings as _warnings
trans_5C = "".join ([chr (x ^ 0x5C) for x in xrange(256)])
trans_36 = "".join ([chr (x ^ 0x36) for x in xrange(256)])
# The size of the digests returned by HMAC depends on the underlying
# hashing module used. Use digest_size from the instance of HMAC instead.
digest_size = None
# A unique object passed by HMAC.copy() to the HMAC constructor, in order
# that the latter return very quickly. HMAC("") in contrast is quite
# expensive.
_secret_backdoor_key = []
class HMAC:
"""RFC 2104 HMAC class. Also complies with RFC 4231.
This supports the API for Cryptographic Hash Functions (PEP 247).
"""
blocksize = 64 # 512-bit HMAC; can be changed in subclasses.
def __init__(self, key, msg = None, digestmod = None):
"""Create a new HMAC object.
key: key for the keyed hash object.
msg: Initial input for the hash, if provided.
digestmod: A module supporting PEP 247. *OR*
A hashlib constructor returning a new hash object.
Defaults to hashlib.md5.
"""
if key is _secret_backdoor_key: # cheap
return
if digestmod is None:
import hashlib
digestmod = hashlib.md5
if hasattr(digestmod, '__call__'):
self.digest_cons = digestmod
else:
self.digest_cons = lambda d='': digestmod.new(d)
self.outer = self.digest_cons()
self.inner = self.digest_cons()
self.digest_size = self.inner.digest_size
if hasattr(self.inner, 'block_size'):
blocksize = self.inner.block_size
if blocksize < 16:
# Very low blocksize, most likely a legacy value like
# Lib/sha.py and Lib/md5.py have.
_warnings.warn('block_size of %d seems too small; using our '
'default of %d.' % (blocksize, self.blocksize),
RuntimeWarning, 2)
blocksize = self.blocksize
else:
_warnings.warn('No block_size attribute on given digest object; '
'Assuming %d.' % (self.blocksize),
RuntimeWarning, 2)
blocksize = self.blocksize
if len(key) > blocksize:
key = self.digest_cons(key).digest()
key = key + chr(0) * (blocksize - len(key))
self.outer.update(key.translate(trans_5C))
self.inner.update(key.translate(trans_36))
if msg is not None:
self.update(msg)
## def clear(self):
## raise NotImplementedError, "clear() method not available in HMAC."
def update(self, msg):
"""Update this hashing object with the string msg.
"""
self.inner.update(msg)
def copy(self):
"""Return a separate copy of this hashing object.
An update to this copy won't affect the original object.
"""
other = self.__class__(_secret_backdoor_key)
other.digest_cons = self.digest_cons
other.digest_size = self.digest_size
other.inner = self.inner.copy()
other.outer = self.outer.copy()
return other
def _current(self):
"""Return a hash object for the current state.
To be used only internally with digest() and hexdigest().
"""
h = self.outer.copy()
h.update(self.inner.digest())
return h
def digest(self):
"""Return the hash value of this hashing object.
This returns a string containing 8-bit data. The object is
not altered in any way by this function; you can continue
updating the object after calling this function.
"""
h = self._current()
return h.digest()
def hexdigest(self):
"""Like digest(), but returns a string of hexadecimal digits instead.
"""
h = self._current()
return h.hexdigest()
def new(key, msg = None, digestmod = None):
"""Create a new hashing object and return it.
key: The starting key for the hash.
msg: if available, will immediately be hashed into the object's starting
state.
You can now feed arbitrary strings into the object using its update()
method, and can ask for the hash value at any time by calling its digest()
method.
"""
return HMAC(key, msg, digestmod)
| gpl-3.0 | -7,128,551,305,846,672,000 | 33.067669 | 78 | 0.594791 | false |
shawnadelic/shuup | shuup/customer_group_pricing/admin_form_part.py | 2 | 4175 | # -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
from django import forms
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from shuup.admin.form_part import FormPart, TemplatedFormDef
from shuup.core.models import ContactGroup, Shop
from shuup.customer_group_pricing.models import CgpPrice
class CustomerGroupPricingForm(forms.Form):
def __init__(self, **kwargs):
self.product = kwargs.pop("product", None)
super(CustomerGroupPricingForm, self).__init__(**kwargs)
self.shops = []
self.groups = []
if self.product:
self._build_fields()
def _build_fields(self):
self.shops = list(Shop.objects.all())
self.groups = list(ContactGroup.objects.filter(
Q(show_pricing=True) |
Q(
id__in=CgpPrice.objects.filter(product=self.product)
.values_list("group_id", flat=True).distinct()
)
))
prices_by_shop_and_group = dict(
((shop_id or 0, group_id or 0), price)
for (shop_id, group_id, price)
in CgpPrice.objects.filter(product=self.product)
.values_list("shop_id", "group_id", "price_value")
)
for group in self.groups:
for shop in self.shops:
shop_group_id_tuple = self._get_id_tuple(shop, group)
name = self._get_field_name(shop_group_id_tuple)
price = prices_by_shop_and_group.get(shop_group_id_tuple)
price_field = forms.DecimalField(
min_value=0, initial=price,
label=(_("Price (%(shop)s/%(group)s)") %
{"shop": shop, "group": group}),
required=False
)
self.fields[name] = price_field
def _get_id_tuple(self, shop, group):
return (
shop.id if shop else 0,
group.id if group else 0
)
def _get_field_name(self, id_tuple):
return "s_%d_g_%d" % id_tuple
def _process_single_save(self, shop, group):
shop_group_id_tuple = self._get_id_tuple(shop, group)
name = self._get_field_name(shop_group_id_tuple)
value = self.cleaned_data.get(name)
clear = (value is None or value < 0)
if clear:
CgpPrice.objects.filter(product=self.product, group=group, shop=shop).delete()
else:
(spp, created) = CgpPrice.objects.get_or_create(
product=self.product, group=group, shop=shop,
defaults={'price_value': value})
if not created:
spp.price_value = value
spp.save()
def save(self):
if not self.has_changed(): # No changes, so no need to do anything.
# (This is required because `.full_clean()` would create an empty `.cleaned_data`,
# but short-circuits out if `has_changed()` returns false.
# That, in kind, would cause `self.cleaned_data.get(name)` in `_process_single_save`
# to return Nones, clearing out all prices. Oops.)
return
for group in self.groups:
for shop in self.shops:
self._process_single_save(shop, group)
def get_shop_group_field(self, shop, group):
shop_group_id_tuple = self._get_id_tuple(shop, group)
name = self._get_field_name(shop_group_id_tuple)
return self[name]
class CustomerGroupPricingFormPart(FormPart):
priority = 10
def get_form_defs(self):
yield TemplatedFormDef(
name="customer_group_pricing",
form_class=CustomerGroupPricingForm,
template_name="shuup/admin/customer_group_pricing/form_part.jinja",
required=False,
kwargs={"product": self.object}
)
def form_valid(self, form):
form["customer_group_pricing"].save()
| agpl-3.0 | 852,682,496,811,656,600 | 36.276786 | 97 | 0.582754 | false |
tejasnikumbh/ThesisCode | lib/python2.7/site-packages/numpy/core/tests/test_arrayprint.py | 69 | 6858 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import division, absolute_import, print_function
import sys
import numpy as np
from numpy.testing import *
from numpy.compat import sixu
class TestArrayRepr(object):
def test_nan_inf(self):
x = np.array([np.nan, np.inf])
assert_equal(repr(x), 'array([ nan, inf])')
class TestComplexArray(TestCase):
def test_str(self):
rvals = [0, 1, -1, np.inf, -np.inf, np.nan]
cvals = [complex(rp, ip) for rp in rvals for ip in rvals]
dtypes = [np.complex64, np.cdouble, np.clongdouble]
actual = [str(np.array([c], dt)) for c in cvals for dt in dtypes]
wanted = [
'[ 0.+0.j]', '[ 0.+0.j]', '[ 0.0+0.0j]',
'[ 0.+1.j]', '[ 0.+1.j]', '[ 0.0+1.0j]',
'[ 0.-1.j]', '[ 0.-1.j]', '[ 0.0-1.0j]',
'[ 0.+infj]', '[ 0.+infj]', '[ 0.0+infj]',
'[ 0.-infj]', '[ 0.-infj]', '[ 0.0-infj]',
'[ 0.+nanj]', '[ 0.+nanj]', '[ 0.0+nanj]',
'[ 1.+0.j]', '[ 1.+0.j]', '[ 1.0+0.0j]',
'[ 1.+1.j]', '[ 1.+1.j]', '[ 1.0+1.0j]',
'[ 1.-1.j]', '[ 1.-1.j]', '[ 1.0-1.0j]',
'[ 1.+infj]', '[ 1.+infj]', '[ 1.0+infj]',
'[ 1.-infj]', '[ 1.-infj]', '[ 1.0-infj]',
'[ 1.+nanj]', '[ 1.+nanj]', '[ 1.0+nanj]',
'[-1.+0.j]', '[-1.+0.j]', '[-1.0+0.0j]',
'[-1.+1.j]', '[-1.+1.j]', '[-1.0+1.0j]',
'[-1.-1.j]', '[-1.-1.j]', '[-1.0-1.0j]',
'[-1.+infj]', '[-1.+infj]', '[-1.0+infj]',
'[-1.-infj]', '[-1.-infj]', '[-1.0-infj]',
'[-1.+nanj]', '[-1.+nanj]', '[-1.0+nanj]',
'[ inf+0.j]', '[ inf+0.j]', '[ inf+0.0j]',
'[ inf+1.j]', '[ inf+1.j]', '[ inf+1.0j]',
'[ inf-1.j]', '[ inf-1.j]', '[ inf-1.0j]',
'[ inf+infj]', '[ inf+infj]', '[ inf+infj]',
'[ inf-infj]', '[ inf-infj]', '[ inf-infj]',
'[ inf+nanj]', '[ inf+nanj]', '[ inf+nanj]',
'[-inf+0.j]', '[-inf+0.j]', '[-inf+0.0j]',
'[-inf+1.j]', '[-inf+1.j]', '[-inf+1.0j]',
'[-inf-1.j]', '[-inf-1.j]', '[-inf-1.0j]',
'[-inf+infj]', '[-inf+infj]', '[-inf+infj]',
'[-inf-infj]', '[-inf-infj]', '[-inf-infj]',
'[-inf+nanj]', '[-inf+nanj]', '[-inf+nanj]',
'[ nan+0.j]', '[ nan+0.j]', '[ nan+0.0j]',
'[ nan+1.j]', '[ nan+1.j]', '[ nan+1.0j]',
'[ nan-1.j]', '[ nan-1.j]', '[ nan-1.0j]',
'[ nan+infj]', '[ nan+infj]', '[ nan+infj]',
'[ nan-infj]', '[ nan-infj]', '[ nan-infj]',
'[ nan+nanj]', '[ nan+nanj]', '[ nan+nanj]']
for res, val in zip(actual, wanted):
assert_(res == val)
class TestArray2String(TestCase):
def test_basic(self):
"""Basic test of array2string."""
a = np.arange(3)
assert_(np.array2string(a) == '[0 1 2]')
assert_(np.array2string(a, max_line_width=4) == '[0 1\n 2]')
def test_style_keyword(self):
"""This should only apply to 0-D arrays. See #1218."""
stylestr = np.array2string(np.array(1.5),
style=lambda x: "Value in 0-D array: " + str(x))
assert_(stylestr == 'Value in 0-D array: 1.5')
def test_format_function(self):
"""Test custom format function for each element in array."""
def _format_function(x):
if np.abs(x) < 1:
return '.'
elif np.abs(x) < 2:
return 'o'
else:
return 'O'
x = np.arange(3)
if sys.version_info[0] >= 3:
x_hex = "[0x0 0x1 0x2]"
x_oct = "[0o0 0o1 0o2]"
else:
x_hex = "[0x0L 0x1L 0x2L]"
x_oct = "[0L 01L 02L]"
assert_(np.array2string(x, formatter={'all':_format_function}) == \
"[. o O]")
assert_(np.array2string(x, formatter={'int_kind':_format_function}) ==\
"[. o O]")
assert_(np.array2string(x, formatter={'all':lambda x: "%.4f" % x}) == \
"[0.0000 1.0000 2.0000]")
assert_equal(np.array2string(x, formatter={'int':lambda x: hex(x)}), \
x_hex)
assert_equal(np.array2string(x, formatter={'int':lambda x: oct(x)}), \
x_oct)
x = np.arange(3.)
assert_(np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x}) == \
"[0.00 1.00 2.00]")
assert_(np.array2string(x, formatter={'float':lambda x: "%.2f" % x}) == \
"[0.00 1.00 2.00]")
s = np.array(['abc', 'def'])
assert_(np.array2string(s, formatter={'numpystr':lambda s: s*2}) == \
'[abcabc defdef]')
class TestPrintOptions:
"""Test getting and setting global print options."""
def setUp(self):
self.oldopts = np.get_printoptions()
def tearDown(self):
np.set_printoptions(**self.oldopts)
def test_basic(self):
x = np.array([1.5, 0, 1.234567890])
assert_equal(repr(x), "array([ 1.5 , 0. , 1.23456789])")
np.set_printoptions(precision=4)
assert_equal(repr(x), "array([ 1.5 , 0. , 1.2346])")
def test_formatter(self):
x = np.arange(3)
np.set_printoptions(formatter={'all':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1, 0, 1])")
def test_formatter_reset(self):
x = np.arange(3)
np.set_printoptions(formatter={'all':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1, 0, 1])")
np.set_printoptions(formatter={'int':None})
assert_equal(repr(x), "array([0, 1, 2])")
np.set_printoptions(formatter={'all':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1, 0, 1])")
np.set_printoptions(formatter={'all':None})
assert_equal(repr(x), "array([0, 1, 2])")
np.set_printoptions(formatter={'int':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1, 0, 1])")
np.set_printoptions(formatter={'int_kind':None})
assert_equal(repr(x), "array([0, 1, 2])")
x = np.arange(3.)
np.set_printoptions(formatter={'float':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1.0, 0.0, 1.0])")
np.set_printoptions(formatter={'float_kind':None})
assert_equal(repr(x), "array([ 0., 1., 2.])")
def test_unicode_object_array():
import sys
if sys.version_info[0] >= 3:
expected = "array(['é'], dtype=object)"
else:
expected = "array([u'\\xe9'], dtype=object)"
x = np.array([sixu('\xe9')], dtype=object)
assert_equal(repr(x), expected)
if __name__ == "__main__":
run_module_suite()
| mit | -3,257,617,382,865,122,000 | 40.05988 | 86 | 0.452384 | false |
jyr/opentumblr | simplejson/decoder.py | 317 | 12404 | """Implementation of JSONDecoder
"""
import re
import sys
import struct
from simplejson.scanner import make_scanner
try:
from simplejson._speedups import scanstring as c_scanstring
except ImportError:
c_scanstring = None
__all__ = ['JSONDecoder']
FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
def _floatconstants():
_BYTES = '7FF80000000000007FF0000000000000'.decode('hex')
if sys.byteorder != 'big':
_BYTES = _BYTES[:8][::-1] + _BYTES[8:][::-1]
nan, inf = struct.unpack('dd', _BYTES)
return nan, inf, -inf
NaN, PosInf, NegInf = _floatconstants()
def linecol(doc, pos):
lineno = doc.count('\n', 0, pos) + 1
if lineno == 1:
colno = pos
else:
colno = pos - doc.rindex('\n', 0, pos)
return lineno, colno
def errmsg(msg, doc, pos, end=None):
# Note that this function is called from _speedups
lineno, colno = linecol(doc, pos)
if end is None:
#fmt = '{0}: line {1} column {2} (char {3})'
#return fmt.format(msg, lineno, colno, pos)
fmt = '%s: line %d column %d (char %d)'
return fmt % (msg, lineno, colno, pos)
endlineno, endcolno = linecol(doc, end)
#fmt = '{0}: line {1} column {2} - line {3} column {4} (char {5} - {6})'
#return fmt.format(msg, lineno, colno, endlineno, endcolno, pos, end)
fmt = '%s: line %d column %d - line %d column %d (char %d - %d)'
return fmt % (msg, lineno, colno, endlineno, endcolno, pos, end)
_CONSTANTS = {
'-Infinity': NegInf,
'Infinity': PosInf,
'NaN': NaN,
}
STRINGCHUNK = re.compile(r'(.*?)(["\\\x00-\x1f])', FLAGS)
BACKSLASH = {
'"': u'"', '\\': u'\\', '/': u'/',
'b': u'\b', 'f': u'\f', 'n': u'\n', 'r': u'\r', 't': u'\t',
}
DEFAULT_ENCODING = "utf-8"
def py_scanstring(s, end, encoding=None, strict=True, _b=BACKSLASH, _m=STRINGCHUNK.match):
"""Scan the string s for a JSON string. End is the index of the
character in s after the quote that started the JSON string.
Unescapes all valid JSON string escape sequences and raises ValueError
on attempt to decode an invalid string. If strict is False then literal
control characters are allowed in the string.
Returns a tuple of the decoded string and the index of the character in s
after the end quote."""
if encoding is None:
encoding = DEFAULT_ENCODING
chunks = []
_append = chunks.append
begin = end - 1
while 1:
chunk = _m(s, end)
if chunk is None:
raise ValueError(
errmsg("Unterminated string starting at", s, begin))
end = chunk.end()
content, terminator = chunk.groups()
# Content is contains zero or more unescaped string characters
if content:
if not isinstance(content, unicode):
content = unicode(content, encoding)
_append(content)
# Terminator is the end of string, a literal control character,
# or a backslash denoting that an escape sequence follows
if terminator == '"':
break
elif terminator != '\\':
if strict:
msg = "Invalid control character %r at" % (terminator,)
#msg = "Invalid control character {0!r} at".format(terminator)
raise ValueError(errmsg(msg, s, end))
else:
_append(terminator)
continue
try:
esc = s[end]
except IndexError:
raise ValueError(
errmsg("Unterminated string starting at", s, begin))
# If not a unicode escape sequence, must be in the lookup table
if esc != 'u':
try:
char = _b[esc]
except KeyError:
msg = "Invalid \\escape: " + repr(esc)
raise ValueError(errmsg(msg, s, end))
end += 1
else:
# Unicode escape sequence
esc = s[end + 1:end + 5]
next_end = end + 5
if len(esc) != 4:
msg = "Invalid \\uXXXX escape"
raise ValueError(errmsg(msg, s, end))
uni = int(esc, 16)
# Check for surrogate pair on UCS-4 systems
if 0xd800 <= uni <= 0xdbff and sys.maxunicode > 65535:
msg = "Invalid \\uXXXX\\uXXXX surrogate pair"
if not s[end + 5:end + 7] == '\\u':
raise ValueError(errmsg(msg, s, end))
esc2 = s[end + 7:end + 11]
if len(esc2) != 4:
raise ValueError(errmsg(msg, s, end))
uni2 = int(esc2, 16)
uni = 0x10000 + (((uni - 0xd800) << 10) | (uni2 - 0xdc00))
next_end += 6
char = unichr(uni)
end = next_end
# Append the unescaped character
_append(char)
return u''.join(chunks), end
# Use speedup if available
scanstring = c_scanstring or py_scanstring
WHITESPACE = re.compile(r'[ \t\n\r]*', FLAGS)
WHITESPACE_STR = ' \t\n\r'
def JSONObject((s, end), encoding, strict, scan_once, object_hook, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
pairs = {}
# Use a slice to prevent IndexError from being raised, the following
# check will raise a more specific ValueError if the string is empty
nextchar = s[end:end + 1]
# Normally we expect nextchar == '"'
if nextchar != '"':
if nextchar in _ws:
end = _w(s, end).end()
nextchar = s[end:end + 1]
# Trivial empty object
if nextchar == '}':
return pairs, end + 1
elif nextchar != '"':
raise ValueError(errmsg("Expecting property name", s, end))
end += 1
while True:
key, end = scanstring(s, end, encoding, strict)
# To skip some function call overhead we optimize the fast paths where
# the JSON key separator is ": " or just ":".
if s[end:end + 1] != ':':
end = _w(s, end).end()
if s[end:end + 1] != ':':
raise ValueError(errmsg("Expecting : delimiter", s, end))
end += 1
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
try:
value, end = scan_once(s, end)
except StopIteration:
raise ValueError(errmsg("Expecting object", s, end))
pairs[key] = value
try:
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar == '}':
break
elif nextchar != ',':
raise ValueError(errmsg("Expecting , delimiter", s, end - 1))
try:
nextchar = s[end]
if nextchar in _ws:
end += 1
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar != '"':
raise ValueError(errmsg("Expecting property name", s, end - 1))
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end
def JSONArray((s, end), scan_once, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
values = []
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
# Look-ahead for trivial empty array
if nextchar == ']':
return values, end + 1
_append = values.append
while True:
try:
value, end = scan_once(s, end)
except StopIteration:
raise ValueError(errmsg("Expecting object", s, end))
_append(value)
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
end += 1
if nextchar == ']':
break
elif nextchar != ',':
raise ValueError(errmsg("Expecting , delimiter", s, end))
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
return values, end
class JSONDecoder(object):
"""Simple JSON <http://json.org> decoder
Performs the following translations in decoding by default:
+---------------+-------------------+
| JSON | Python |
+===============+===================+
| object | dict |
+---------------+-------------------+
| array | list |
+---------------+-------------------+
| string | unicode |
+---------------+-------------------+
| number (int) | int, long |
+---------------+-------------------+
| number (real) | float |
+---------------+-------------------+
| true | True |
+---------------+-------------------+
| false | False |
+---------------+-------------------+
| null | None |
+---------------+-------------------+
It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as
their corresponding ``float`` values, which is outside the JSON spec.
"""
def __init__(self, encoding=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, strict=True):
"""``encoding`` determines the encoding used to interpret any ``str``
objects decoded by this instance (utf-8 by default). It has no
effect when decoding ``unicode`` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as ``unicode``.
``object_hook``, if specified, will be called with the result
of every JSON object decoded and its return value will be used in
place of the given ``dict``. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
``parse_float``, if specified, will be called with the string
of every JSON float to be decoded. By default this is equivalent to
float(num_str). This can be used to use another datatype or parser
for JSON floats (e.g. decimal.Decimal).
``parse_int``, if specified, will be called with the string
of every JSON int to be decoded. By default this is equivalent to
int(num_str). This can be used to use another datatype or parser
for JSON integers (e.g. float).
``parse_constant``, if specified, will be called with one of the
following strings: -Infinity, Infinity, NaN.
This can be used to raise an exception if invalid JSON numbers
are encountered.
"""
self.encoding = encoding
self.object_hook = object_hook
self.parse_float = parse_float or float
self.parse_int = parse_int or int
self.parse_constant = parse_constant or _CONSTANTS.__getitem__
self.strict = strict
self.parse_object = JSONObject
self.parse_array = JSONArray
self.parse_string = scanstring
self.scan_once = make_scanner(self)
def decode(self, s, _w=WHITESPACE.match):
"""Return the Python representation of ``s`` (a ``str`` or ``unicode``
instance containing a JSON document)
"""
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
end = _w(s, end).end()
if end != len(s):
raise ValueError(errmsg("Extra data", s, end, len(s)))
return obj
def raw_decode(self, s, idx=0):
"""Decode a JSON document from ``s`` (a ``str`` or ``unicode`` beginning
with a JSON document) and return a 2-tuple of the Python
representation and the index in ``s`` where the document ended.
This can be used to decode a JSON document from a string that may
have extraneous data at the end.
"""
try:
obj, end = self.scan_once(s, idx)
except StopIteration:
raise ValueError("No JSON object could be decoded")
return obj, end
| mit | -5,047,558,435,357,355,000 | 34.039548 | 108 | 0.524589 | false |
metacloud/python-keystoneclient | keystoneclient/tests/v3/test_projects.py | 3 | 2261 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import httpretty
from keystoneclient.tests.v3 import utils
from keystoneclient.v3 import projects
class ProjectTests(utils.TestCase, utils.CrudTests):
def setUp(self):
super(ProjectTests, self).setUp()
self.key = 'project'
self.collection_key = 'projects'
self.model = projects.Project
self.manager = self.client.projects
def new_ref(self, **kwargs):
kwargs = super(ProjectTests, self).new_ref(**kwargs)
kwargs.setdefault('domain_id', uuid.uuid4().hex)
kwargs.setdefault('enabled', True)
kwargs.setdefault('name', uuid.uuid4().hex)
return kwargs
@httpretty.activate
def test_list_projects_for_user(self):
ref_list = [self.new_ref(), self.new_ref()]
user_id = uuid.uuid4().hex
self.stub_entity(httpretty.GET,
['users', user_id, self.collection_key],
entity=ref_list)
returned_list = self.manager.list(user=user_id)
self.assertEqual(len(ref_list), len(returned_list))
[self.assertIsInstance(r, self.model) for r in returned_list]
@httpretty.activate
def test_list_projects_for_domain(self):
ref_list = [self.new_ref(), self.new_ref()]
domain_id = uuid.uuid4().hex
self.stub_entity(httpretty.GET, [self.collection_key],
entity=ref_list)
returned_list = self.manager.list(domain=domain_id)
self.assertEqual(len(ref_list), len(returned_list))
[self.assertIsInstance(r, self.model) for r in returned_list]
self.assertEqual(httpretty.last_request().querystring,
{'domain_id': [domain_id]})
| apache-2.0 | 3,828,234,561,975,815,700 | 35.467742 | 75 | 0.653251 | false |
aYukiSekiguchi/ACCESS-Chromium | tools/valgrind/asan/chrome_tests.py | 1 | 17088 | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
''' Runs various chrome tests through asan_test.py.
Most of this code is copied from ../valgrind/chrome_tests.py.
TODO(glider): put common functions to a standalone module.
'''
import glob
import logging
import optparse
import os
import stat
import sys
import logging_utils
import path_utils
import common
import asan_test
class TestNotFound(Exception): pass
def Dir2IsNewer(dir1, dir2):
if dir2 is None or not os.path.isdir(dir2):
return False
if dir1 is None or not os.path.isdir(dir1):
return True
return os.stat(dir2)[stat.ST_MTIME] > os.stat(dir1)[stat.ST_MTIME]
def FindNewestDir(dirs):
newest_dir = None
for dir in dirs:
if Dir2IsNewer(newest_dir, dir):
newest_dir = dir
return newest_dir
def File2IsNewer(file1, file2):
if file2 is None or not os.path.isfile(file2):
return False
if file1 is None or not os.path.isfile(file1):
return True
return os.stat(file2)[stat.ST_MTIME] > os.stat(file1)[stat.ST_MTIME]
def FindDirContainingNewestFile(dirs, file):
"""Searches for the directory containing the newest copy of |file|.
Args:
dirs: A list of paths to the directories to search among.
file: A string containing the file name to search.
Returns:
The string representing the the directory containing the newest copy of
|file|.
Raises:
IOError: |file| was not found.
"""
newest_dir = None
newest_file = None
for dir in dirs:
the_file = os.path.join(dir, file)
if File2IsNewer(newest_file, the_file):
newest_dir = dir
newest_file = the_file
if newest_dir is None:
raise IOError("cannot find file %s anywhere, have you built it?" % file)
return newest_dir
class ChromeTests(object):
'''This class is derived from the chrome_tests.py file in ../purify/.
'''
def __init__(self, options, args, test):
# The known list of tests.
# Recognise the original abbreviations as well as full executable names.
self._test_list = {
"base": self.TestBase, "base_unittests": self.TestBase,
"browser": self.TestBrowser, "browser_tests": self.TestBrowser,
"crypto": self.TestCrypto, "crypto_unittests": self.TestCrypto,
"googleurl": self.TestGURL, "googleurl_unittests": self.TestGURL,
"content": self.TestContent, "content_unittests": self.TestContent,
"courgette": self.TestCourgette,
"courgette_unittests": self.TestCourgette,
"ipc": self.TestIpc, "ipc_tests": self.TestIpc,
"layout": self.TestLayout, "layout_tests": self.TestLayout,
"media": self.TestMedia, "media_unittests": self.TestMedia,
"net": self.TestNet, "net_unittests": self.TestNet,
"printing": self.TestPrinting, "printing_unittests": self.TestPrinting,
"remoting": self.TestRemoting, "remoting_unittests": self.TestRemoting,
"startup": self.TestStartup, "startup_tests": self.TestStartup,
"sync": self.TestSync, "sync_unit_tests": self.TestSync,
"test_shell": self.TestTestShell, "test_shell_tests": self.TestTestShell,
"ui": self.TestUI, "ui_tests": self.TestUI,
"unit": self.TestUnit, "unit_tests": self.TestUnit,
"views": self.TestViews, "views_unittests": self.TestViews,
"sql": self.TestSql, "sql_unittests": self.TestSql,
"ui_unit": self.TestUIUnit, "ui_unittests": self.TestUIUnit,
"gfx": self.TestGfx, "gfx_unittests": self.TestGfx,
}
if test not in self._test_list:
raise TestNotFound("Unknown test: %s" % test)
self._options = options
self._args = args
self._test = test
script_dir = path_utils.ScriptDir()
# Compute the top of the tree (the "source dir") from the script dir (where
# this script lives). We assume that the script dir is in tools/asan/
# relative to the top of the tree.
self._source_dir = os.path.dirname(os.path.dirname(script_dir))
# Since this path is used for string matching, make sure it's always
# an absolute Unix-style path.
self._source_dir = os.path.abspath(self._source_dir).replace('\\', '/')
asan_test_script = os.path.join(script_dir, "asan_test.py")
self._command_preamble = [asan_test_script]
def _DefaultCommand(self, module, exe=None, asan_test_args=None):
'''Generates the default command array that most tests will use.
Args:
module: The module name (corresponds to the dir in src/ where the test
data resides).
exe: The executable name.
asan_test_args: additional arguments to append to the command line.
Returns:
A string with the command to run the test.
'''
if not self._options.build_dir:
dirs = [
os.path.join(self._source_dir, "xcodebuild", "Debug"),
os.path.join(self._source_dir, "out", "Debug"),
]
if exe:
self._options.build_dir = FindDirContainingNewestFile(dirs, exe)
else:
self._options.build_dir = FindNewestDir(dirs)
cmd = list(self._command_preamble)
if asan_test_args != None:
for arg in asan_test_args:
cmd.append(arg)
if exe:
cmd.append(os.path.join(self._options.build_dir, exe))
# Show elapased time so we can find the slowpokes.
cmd.append("--gtest_print_time")
if self._options.gtest_repeat:
cmd.append("--gtest_repeat=%s" % self._options.gtest_repeat)
return cmd
def Suppressions(self):
'''Builds the list of available suppressions files.'''
ret = []
directory = path_utils.ScriptDir()
suppression_file = os.path.join(directory, "suppressions.txt")
if os.path.exists(suppression_file):
ret.append(suppression_file)
suppression_file = os.path.join(directory, "suppressions_linux.txt")
if os.path.exists(suppression_file):
ret.append(suppression_file)
return ret
def Run(self):
'''Runs the test specified by command-line argument --test.'''
logging.info("running test %s" % (self._test))
return self._test_list[self._test]()
def _ReadGtestFilterFile(self, name, cmd):
'''Reads files which contain lists of tests to filter out with
--gtest_filter and appends the command-line option to |cmd|.
Args:
name: the test executable name.
cmd: the test running command line to be modified.
'''
filters = []
directory = path_utils.ScriptDir()
gtest_filter_files = [
os.path.join(directory, name + ".gtest-asan.txt"),
# TODO(glider): Linux vs. CrOS?
]
logging.info("Reading gtest exclude filter files:")
for filename in gtest_filter_files:
# strip the leading absolute path (may be very long on the bot)
# and the following / or \.
readable_filename = filename.replace(self._source_dir, "")[1:]
if not os.path.exists(filename):
logging.info(" \"%s\" - not found" % readable_filename)
continue
logging.info(" \"%s\" - OK" % readable_filename)
f = open(filename, 'r')
for line in f.readlines():
if line.startswith("#") or line.startswith("//") or line.isspace():
continue
line = line.rstrip()
filters.append(line)
gtest_filter = self._options.gtest_filter
if len(filters):
if gtest_filter:
gtest_filter += ":"
if gtest_filter.find("-") < 0:
gtest_filter += "-"
else:
gtest_filter = "-"
gtest_filter += ":".join(filters)
if gtest_filter:
cmd.append("--gtest_filter=%s" % gtest_filter)
def SimpleTest(self, module, name, asan_test_args=None, cmd_args=None):
'''Builds the command line and runs the specified test.
Args:
module: The module name (corresponds to the dir in src/ where the test
data resides).
name: The executable name.
asan_test_args: Additional command line args for asan.
cmd_args: Additional command line args for the test.
'''
cmd = self._DefaultCommand(module, name, asan_test_args)
supp = self.Suppressions()
self._ReadGtestFilterFile(name, cmd)
if cmd_args:
cmd.extend(["--"])
cmd.extend(cmd_args)
# Sets LD_LIBRARY_PATH to the build folder so external libraries can be
# loaded.
if (os.getenv("LD_LIBRARY_PATH")):
os.putenv("LD_LIBRARY_PATH", "%s:%s" % (os.getenv("LD_LIBRARY_PATH"),
self._options.build_dir))
else:
os.putenv("LD_LIBRARY_PATH", self._options.build_dir)
return asan_test.RunTool(cmd, supp, module)
def TestBase(self):
return self.SimpleTest("base", "base_unittests")
def TestBrowser(self):
return self.SimpleTest("chrome", "browser_tests")
def TestCrypto(self):
return self.SimpleTest("crypto", "crypto_unittests")
def TestGURL(self):
return self.SimpleTest("chrome", "googleurl_unittests")
def TestContent(self):
return self.SimpleTest("content", "content_unittests")
def TestCourgette(self):
return self.SimpleTest("courgette", "courgette_unittests")
def TestMedia(self):
return self.SimpleTest("chrome", "media_unittests")
def TestPrinting(self):
return self.SimpleTest("chrome", "printing_unittests")
def TestRemoting(self):
return self.SimpleTest("chrome", "remoting_unittests")
def TestSync(self):
return self.SimpleTest("chrome", "sync_unit_tests")
def TestIpc(self):
return self.SimpleTest("ipc", "ipc_tests")
def TestNet(self):
return self.SimpleTest("net", "net_unittests")
def TestStartup(self):
# We don't need the performance results, we're just looking for pointer
# errors, so set number of iterations down to the minimum.
os.putenv("STARTUP_TESTS_NUMCYCLES", "1")
logging.info("export STARTUP_TESTS_NUMCYCLES=1");
return self.SimpleTest("chrome", "startup_tests")
def TestTestShell(self):
return self.SimpleTest("webkit", "test_shell_tests")
def TestUnit(self):
return self.SimpleTest("chrome", "unit_tests")
def TestViews(self):
return self.SimpleTest("views", "views_unittests")
def TestSql(self):
return self.SimpleTest("chrome", "sql_unittests")
def TestUIUnit(self):
return self.SimpleTest("chrome", "ui_unittests")
def TestGfx(self):
return self.SimpleTest("chrome", "gfx_unittests")
def TestUI(self):
return self.SimpleTest("chrome", "ui_tests",
cmd_args=[
"--ui-test-action-timeout=80000",
"--ui-test-action-max-timeout=180000"])
def TestLayoutChunk(self, chunk_num, chunk_size):
'''Runs tests [chunk_num*chunk_size .. (chunk_num+1)*chunk_size).
Wrap around to beginning of list at end. If chunk_size is zero, run all
tests in the list once. If a text file is given as argument, it is used as
the list of tests.
'''
# Build the ginormous commandline in 'cmd'.
# It's going to be roughly
# python asan_test.py ... python run_webkit_tests.py ...
# but we'll use the --indirect flag to asan_test.py
# to avoid asaning python.
# Start by building the asan_test.py commandline.
cmd = self._DefaultCommand("webkit")
# Now build script_cmd, the run_webkits_tests.py commandline
# Store each chunk in its own directory so that we can find the data later
chunk_dir = os.path.join("layout", "chunk_%05d" % chunk_num)
test_shell = os.path.join(self._options.build_dir, "test_shell")
out_dir = os.path.join(path_utils.ScriptDir(), "latest")
out_dir = os.path.join(out_dir, chunk_dir)
if os.path.exists(out_dir):
old_files = glob.glob(os.path.join(out_dir, "*.txt"))
for f in old_files:
os.remove(f)
else:
os.makedirs(out_dir)
script = os.path.join(self._source_dir, "webkit", "tools", "layout_tests",
"run_webkit_tests.py")
script_cmd = ["python", script, "--run-singly", "-v",
"--noshow-results", "--time-out-ms=200000",
"--nocheck-sys-deps"]
# Pass build mode to run_webkit_tests.py. We aren't passed it directly,
# so parse it out of build_dir. run_webkit_tests.py can only handle
# the two values "Release" and "Debug".
# TODO(Hercules): unify how all our scripts pass around build mode
# (--mode / --target / --build_dir / --debug)
if self._options.build_dir.endswith("Debug"):
script_cmd.append("--debug");
if (chunk_size > 0):
script_cmd.append("--run-chunk=%d:%d" % (chunk_num, chunk_size))
if len(self._args):
# if the arg is a txt file, then treat it as a list of tests
if os.path.isfile(self._args[0]) and self._args[0][-4:] == ".txt":
script_cmd.append("--test-list=%s" % self._args[0])
else:
script_cmd.extend(self._args)
self._ReadGtestFilterFile("layout", script_cmd)
# Now run script_cmd with the wrapper in cmd
cmd.extend(["--"])
cmd.extend(script_cmd)
supp = self.Suppressions()
return asan_test.RunTool(cmd, supp, "layout")
def TestLayout(self):
'''Runs the layout tests.'''
# A "chunk file" is maintained in the local directory so that each test
# runs a slice of the layout tests of size chunk_size that increments with
# each run. Since tests can be added and removed from the layout tests at
# any time, this is not going to give exact coverage, but it will allow us
# to continuously run small slices of the layout tests under purify rather
# than having to run all of them in one shot.
chunk_size = self._options.num_tests
if (chunk_size == 0):
return self.TestLayoutChunk(0, 0)
chunk_num = 0
chunk_file = os.path.join("asan_layout_chunk.txt")
logging.info("Reading state from " + chunk_file)
try:
f = open(chunk_file)
if f:
str = f.read()
if len(str):
chunk_num = int(str)
# This should be enough so that we have a couple of complete runs
# of test data stored in the archive (although note that when we loop
# that we almost guaranteed won't be at the end of the test list)
if chunk_num > 10000:
chunk_num = 0
f.close()
except IOError, (errno, strerror):
logging.error("error reading from file %s (%d, %s)" % (chunk_file,
errno, strerror))
ret = self.TestLayoutChunk(chunk_num, chunk_size)
# Wait until after the test runs to completion to write out the new chunk
# number. This way, if the bot is killed, we'll start running again from
# the current chunk rather than skipping it.
logging.info("Saving state to " + chunk_file)
try:
f = open(chunk_file, "w")
chunk_num += 1
f.write("%d" % chunk_num)
f.close()
except IOError, (errno, strerror):
logging.error("error writing to file %s (%d, %s)" % (chunk_file, errno,
strerror))
# Since we're running small chunks of the layout tests, it's important to
# mark the ones that have errors in them. These won't be visible in the
# summary list for long, but will be useful for someone reviewing this bot.
return ret
def main():
if not sys.platform.startswith(('linux', 'darwin')):
logging.error("AddressSanitizer works only on Linux and Mac OS "
"at the moment.")
return 1
parser = optparse.OptionParser("usage: %prog -b <dir> -t <test> "
"[-t <test> ...]")
parser.disable_interspersed_args()
parser.add_option("-b", "--build_dir",
help="the location of the output of the compiler output")
parser.add_option("-t", "--test", action="append",
help="which test to run")
parser.add_option("", "--gtest_filter",
help="additional arguments to --gtest_filter")
parser.add_option("", "--gtest_repeat",
help="argument for --gtest_repeat")
parser.add_option("-v", "--verbose", action="store_true", default=False,
help="verbose output - enable debug log messages")
# My machine can do about 120 layout tests/hour in release mode.
# Let's do 30 minutes worth per run.
# The CPU is mostly idle, so perhaps we can raise this when
# we figure out how to run them more efficiently.
parser.add_option("-n", "--num_tests", default=60, type="int",
help="for layout tests: # of subtests per run. 0 for all.")
options, args = parser.parse_args()
if options.verbose:
logging_utils.config_root(logging.DEBUG)
else:
logging_utils.config_root()
if not options.test or not len(options.test):
parser.error("--test not specified")
for t in options.test:
tests = ChromeTests(options, args, t)
ret = tests.Run()
if ret:
return ret
return 0
if __name__ == "__main__":
sys.exit(main())
| bsd-3-clause | -2,208,929,664,391,057,000 | 35.907127 | 80 | 0.637406 | false |
tangjonathan/HKQuiz | node_modules/pryjs/node_modules/pygmentize-bundled/vendor/pygments/build-3.3/pygments/filters/__init__.py | 59 | 11588 | # -*- coding: utf-8 -*-
"""
pygments.filters
~~~~~~~~~~~~~~~~
Module containing filter lookup functions and default
filters.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.token import String, Comment, Keyword, Name, Error, Whitespace, \
string_to_tokentype
from pygments.filter import Filter
from pygments.util import get_list_opt, get_int_opt, get_bool_opt, \
get_choice_opt, ClassNotFound, OptionError, text_type, string_types
from pygments.plugin import find_plugin_filters
def find_filter_class(filtername):
"""
Lookup a filter by name. Return None if not found.
"""
if filtername in FILTERS:
return FILTERS[filtername]
for name, cls in find_plugin_filters():
if name == filtername:
return cls
return None
def get_filter_by_name(filtername, **options):
"""
Return an instantiated filter. Options are passed to the filter
initializer if wanted. Raise a ClassNotFound if not found.
"""
cls = find_filter_class(filtername)
if cls:
return cls(**options)
else:
raise ClassNotFound('filter %r not found' % filtername)
def get_all_filters():
"""
Return a generator of all filter names.
"""
for name in FILTERS:
yield name
for name, _ in find_plugin_filters():
yield name
def _replace_special(ttype, value, regex, specialttype,
replacefunc=lambda x: x):
last = 0
for match in regex.finditer(value):
start, end = match.start(), match.end()
if start != last:
yield ttype, value[last:start]
yield specialttype, replacefunc(value[start:end])
last = end
if last != len(value):
yield ttype, value[last:]
class CodeTagFilter(Filter):
"""
Highlight special code tags in comments and docstrings.
Options accepted:
`codetags` : list of strings
A list of strings that are flagged as code tags. The default is to
highlight ``XXX``, ``TODO``, ``BUG`` and ``NOTE``.
"""
def __init__(self, **options):
Filter.__init__(self, **options)
tags = get_list_opt(options, 'codetags',
['XXX', 'TODO', 'BUG', 'NOTE'])
self.tag_re = re.compile(r'\b(%s)\b' % '|'.join([
re.escape(tag) for tag in tags if tag
]))
def filter(self, lexer, stream):
regex = self.tag_re
for ttype, value in stream:
if ttype in String.Doc or \
ttype in Comment and \
ttype not in Comment.Preproc:
for sttype, svalue in _replace_special(ttype, value, regex,
Comment.Special):
yield sttype, svalue
else:
yield ttype, value
class KeywordCaseFilter(Filter):
"""
Convert keywords to lowercase or uppercase or capitalize them, which
means first letter uppercase, rest lowercase.
This can be useful e.g. if you highlight Pascal code and want to adapt the
code to your styleguide.
Options accepted:
`case` : string
The casing to convert keywords to. Must be one of ``'lower'``,
``'upper'`` or ``'capitalize'``. The default is ``'lower'``.
"""
def __init__(self, **options):
Filter.__init__(self, **options)
case = get_choice_opt(options, 'case', ['lower', 'upper', 'capitalize'], 'lower')
self.convert = getattr(text_type, case)
def filter(self, lexer, stream):
for ttype, value in stream:
if ttype in Keyword:
yield ttype, self.convert(value)
else:
yield ttype, value
class NameHighlightFilter(Filter):
"""
Highlight a normal Name (and Name.*) token with a different token type.
Example::
filter = NameHighlightFilter(
names=['foo', 'bar', 'baz'],
tokentype=Name.Function,
)
This would highlight the names "foo", "bar" and "baz"
as functions. `Name.Function` is the default token type.
Options accepted:
`names` : list of strings
A list of names that should be given the different token type.
There is no default.
`tokentype` : TokenType or string
A token type or a string containing a token type name that is
used for highlighting the strings in `names`. The default is
`Name.Function`.
"""
def __init__(self, **options):
Filter.__init__(self, **options)
self.names = set(get_list_opt(options, 'names', []))
tokentype = options.get('tokentype')
if tokentype:
self.tokentype = string_to_tokentype(tokentype)
else:
self.tokentype = Name.Function
def filter(self, lexer, stream):
for ttype, value in stream:
if ttype in Name and value in self.names:
yield self.tokentype, value
else:
yield ttype, value
class ErrorToken(Exception):
pass
class RaiseOnErrorTokenFilter(Filter):
"""
Raise an exception when the lexer generates an error token.
Options accepted:
`excclass` : Exception class
The exception class to raise.
The default is `pygments.filters.ErrorToken`.
.. versionadded:: 0.8
"""
def __init__(self, **options):
Filter.__init__(self, **options)
self.exception = options.get('excclass', ErrorToken)
try:
# issubclass() will raise TypeError if first argument is not a class
if not issubclass(self.exception, Exception):
raise TypeError
except TypeError:
raise OptionError('excclass option is not an exception class')
def filter(self, lexer, stream):
for ttype, value in stream:
if ttype is Error:
raise self.exception(value)
yield ttype, value
class VisibleWhitespaceFilter(Filter):
"""
Convert tabs, newlines and/or spaces to visible characters.
Options accepted:
`spaces` : string or bool
If this is a one-character string, spaces will be replaces by this string.
If it is another true value, spaces will be replaced by ``·`` (unicode
MIDDLE DOT). If it is a false value, spaces will not be replaced. The
default is ``False``.
`tabs` : string or bool
The same as for `spaces`, but the default replacement character is ``»``
(unicode RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK). The default value
is ``False``. Note: this will not work if the `tabsize` option for the
lexer is nonzero, as tabs will already have been expanded then.
`tabsize` : int
If tabs are to be replaced by this filter (see the `tabs` option), this
is the total number of characters that a tab should be expanded to.
The default is ``8``.
`newlines` : string or bool
The same as for `spaces`, but the default replacement character is ``¶``
(unicode PILCROW SIGN). The default value is ``False``.
`wstokentype` : bool
If true, give whitespace the special `Whitespace` token type. This allows
styling the visible whitespace differently (e.g. greyed out), but it can
disrupt background colors. The default is ``True``.
.. versionadded:: 0.8
"""
def __init__(self, **options):
Filter.__init__(self, **options)
for name, default in [('spaces', u'·'),
('tabs', u'»'),
('newlines', u'¶')]:
opt = options.get(name, False)
if isinstance(opt, string_types) and len(opt) == 1:
setattr(self, name, opt)
else:
setattr(self, name, (opt and default or ''))
tabsize = get_int_opt(options, 'tabsize', 8)
if self.tabs:
self.tabs += ' '*(tabsize-1)
if self.newlines:
self.newlines += '\n'
self.wstt = get_bool_opt(options, 'wstokentype', True)
def filter(self, lexer, stream):
if self.wstt:
spaces = self.spaces or ' '
tabs = self.tabs or '\t'
newlines = self.newlines or '\n'
regex = re.compile(r'\s')
def replacefunc(wschar):
if wschar == ' ':
return spaces
elif wschar == '\t':
return tabs
elif wschar == '\n':
return newlines
return wschar
for ttype, value in stream:
for sttype, svalue in _replace_special(ttype, value, regex,
Whitespace, replacefunc):
yield sttype, svalue
else:
spaces, tabs, newlines = self.spaces, self.tabs, self.newlines
# simpler processing
for ttype, value in stream:
if spaces:
value = value.replace(' ', spaces)
if tabs:
value = value.replace('\t', tabs)
if newlines:
value = value.replace('\n', newlines)
yield ttype, value
class GobbleFilter(Filter):
"""
Gobbles source code lines (eats initial characters).
This filter drops the first ``n`` characters off every line of code. This
may be useful when the source code fed to the lexer is indented by a fixed
amount of space that isn't desired in the output.
Options accepted:
`n` : int
The number of characters to gobble.
.. versionadded:: 1.2
"""
def __init__(self, **options):
Filter.__init__(self, **options)
self.n = get_int_opt(options, 'n', 0)
def gobble(self, value, left):
if left < len(value):
return value[left:], 0
else:
return '', left - len(value)
def filter(self, lexer, stream):
n = self.n
left = n # How many characters left to gobble.
for ttype, value in stream:
# Remove ``left`` tokens from first line, ``n`` from all others.
parts = value.split('\n')
(parts[0], left) = self.gobble(parts[0], left)
for i in range(1, len(parts)):
(parts[i], left) = self.gobble(parts[i], n)
value = '\n'.join(parts)
if value != '':
yield ttype, value
class TokenMergeFilter(Filter):
"""
Merges consecutive tokens with the same token type in the output stream of a
lexer.
.. versionadded:: 1.2
"""
def __init__(self, **options):
Filter.__init__(self, **options)
def filter(self, lexer, stream):
current_type = None
current_value = None
for ttype, value in stream:
if ttype is current_type:
current_value += value
else:
if current_type is not None:
yield current_type, current_value
current_type = ttype
current_value = value
if current_type is not None:
yield current_type, current_value
FILTERS = {
'codetagify': CodeTagFilter,
'keywordcase': KeywordCaseFilter,
'highlight': NameHighlightFilter,
'raiseonerror': RaiseOnErrorTokenFilter,
'whitespace': VisibleWhitespaceFilter,
'gobble': GobbleFilter,
'tokenmerge': TokenMergeFilter,
}
| mit | 1,021,806,336,150,879,900 | 31.351955 | 89 | 0.572267 | false |
marc-sensenich/ansible | lib/ansible/modules/network/fortios/fortios_firewall_vip46.py | 7 | 15899 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2018 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# the lib use python logging can get it if the following is set in your
# Ansible config.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_firewall_vip46
short_description: Configure IPv4 to IPv6 virtual IPs in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS by
allowing the user to configure firewall feature and vip46 category.
Examples includes all options and need to be adjusted to datasources before usage.
Tested with FOS v6.0.2
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate ip adress.
required: true
username:
description:
- FortiOS or FortiGate username.
required: true
password:
description:
- FortiOS or FortiGate password.
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS
protocol
type: bool
default: false
firewall_vip46:
description:
- Configure IPv4 to IPv6 virtual IPs.
default: null
suboptions:
state:
description:
- Indicates whether to create or remove the object
choices:
- present
- absent
arp-reply:
description:
- Enable ARP reply.
choices:
- disable
- enable
color:
description:
- Color of icon on the GUI.
comment:
description:
- Comment.
extip:
description:
- Start-external-IP [-end-external-IP].
extport:
description:
- External service port.
id:
description:
- Custom defined id.
ldb-method:
description:
- Load balance method.
choices:
- static
- round-robin
- weighted
- least-session
- least-rtt
- first-alive
mappedip:
description:
- Start-mapped-IP [-end mapped-IP].
mappedport:
description:
- Mapped service port.
monitor:
description:
- Health monitors.
suboptions:
name:
description:
- Health monitor name. Source firewall.ldb-monitor.name.
required: true
name:
description:
- VIP46 name.
required: true
portforward:
description:
- Enable port forwarding.
choices:
- disable
- enable
protocol:
description:
- Mapped port protocol.
choices:
- tcp
- udp
realservers:
description:
- Real servers.
suboptions:
client-ip:
description:
- Restrict server to a client IP in this range.
healthcheck:
description:
- Per server health check.
choices:
- disable
- enable
- vip
holddown-interval:
description:
- Hold down interval.
id:
description:
- Real server ID.
required: true
ip:
description:
- Mapped server IPv6.
max-connections:
description:
- Maximum number of connections allowed to server.
monitor:
description:
- Health monitors. Source firewall.ldb-monitor.name.
port:
description:
- Mapped server port.
status:
description:
- Server administrative status.
choices:
- active
- standby
- disable
weight:
description:
- weight
server-type:
description:
- Server type.
choices:
- http
- tcp
- udp
- ip
src-filter:
description:
- Source IP filter (x.x.x.x/x).
suboptions:
range:
description:
- Src-filter range.
required: true
type:
description:
- "VIP type: static NAT or server load balance."
choices:
- static-nat
- server-load-balance
uuid:
description:
- Universally Unique Identifier (UUID; automatically assigned but can be manually reset).
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
tasks:
- name: Configure IPv4 to IPv6 virtual IPs.
fortios_firewall_vip46:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
firewall_vip46:
state: "present"
arp-reply: "disable"
color: "4"
comment: "Comment."
extip: "<your_own_value>"
extport: "<your_own_value>"
id: "8"
ldb-method: "static"
mappedip: "<your_own_value>"
mappedport: "<your_own_value>"
monitor:
-
name: "default_name_13 (source firewall.ldb-monitor.name)"
name: "default_name_14"
portforward: "disable"
protocol: "tcp"
realservers:
-
client-ip: "<your_own_value>"
healthcheck: "disable"
holddown-interval: "20"
id: "21"
ip: "<your_own_value>"
max-connections: "23"
monitor: "<your_own_value> (source firewall.ldb-monitor.name)"
port: "25"
status: "active"
weight: "27"
server-type: "http"
src-filter:
-
range: "<your_own_value>"
type: "static-nat"
uuid: "<your_own_value>"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "key1"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
fos = None
def login(data):
host = data['host']
username = data['username']
password = data['password']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password)
def filter_firewall_vip46_data(json):
option_list = ['arp-reply', 'color', 'comment',
'extip', 'extport', 'id',
'ldb-method', 'mappedip', 'mappedport',
'monitor', 'name', 'portforward',
'protocol', 'realservers', 'server-type',
'src-filter', 'type', 'uuid']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def firewall_vip46(data, fos):
vdom = data['vdom']
firewall_vip46_data = data['firewall_vip46']
filtered_data = filter_firewall_vip46_data(firewall_vip46_data)
if firewall_vip46_data['state'] == "present":
return fos.set('firewall',
'vip46',
data=filtered_data,
vdom=vdom)
elif firewall_vip46_data['state'] == "absent":
return fos.delete('firewall',
'vip46',
mkey=filtered_data['name'],
vdom=vdom)
def fortios_firewall(data, fos):
login(data)
methodlist = ['firewall_vip46']
for method in methodlist:
if data[method]:
resp = eval(method)(data, fos)
break
fos.logout()
return not resp['status'] == "success", resp['status'] == "success", resp
def main():
fields = {
"host": {"required": True, "type": "str"},
"username": {"required": True, "type": "str"},
"password": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": "False"},
"firewall_vip46": {
"required": False, "type": "dict",
"options": {
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"arp-reply": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"color": {"required": False, "type": "int"},
"comment": {"required": False, "type": "str"},
"extip": {"required": False, "type": "str"},
"extport": {"required": False, "type": "str"},
"id": {"required": False, "type": "int"},
"ldb-method": {"required": False, "type": "str",
"choices": ["static", "round-robin", "weighted",
"least-session", "least-rtt", "first-alive"]},
"mappedip": {"required": False, "type": "str"},
"mappedport": {"required": False, "type": "str"},
"monitor": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"name": {"required": True, "type": "str"},
"portforward": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"protocol": {"required": False, "type": "str",
"choices": ["tcp", "udp"]},
"realservers": {"required": False, "type": "list",
"options": {
"client-ip": {"required": False, "type": "str"},
"healthcheck": {"required": False, "type": "str",
"choices": ["disable", "enable", "vip"]},
"holddown-interval": {"required": False, "type": "int"},
"id": {"required": True, "type": "int"},
"ip": {"required": False, "type": "str"},
"max-connections": {"required": False, "type": "int"},
"monitor": {"required": False, "type": "str"},
"port": {"required": False, "type": "int"},
"status": {"required": False, "type": "str",
"choices": ["active", "standby", "disable"]},
"weight": {"required": False, "type": "int"}
}},
"server-type": {"required": False, "type": "str",
"choices": ["http", "tcp", "udp",
"ip"]},
"src-filter": {"required": False, "type": "list",
"options": {
"range": {"required": True, "type": "str"}
}},
"type": {"required": False, "type": "str",
"choices": ["static-nat", "server-load-balance"]},
"uuid": {"required": False, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
global fos
fos = FortiOSAPI()
is_error, has_changed, result = fortios_firewall(module.params, fos)
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 | 6,122,283,307,252,485,000 | 32.899787 | 109 | 0.469526 | false |
naousse/odoo | openerp/report/int_to_text.py | 442 | 2641 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
unites = {
0: '', 1:'un', 2:'deux', 3:'trois', 4:'quatre', 5:'cinq', 6:'six', 7:'sept', 8:'huit', 9:'neuf',
10:'dix', 11:'onze', 12:'douze', 13:'treize', 14:'quatorze', 15:'quinze', 16:'seize',
21:'vingt et un', 31:'trente et un', 41:'quarante et un', 51:'cinquante et un', 61:'soixante et un',
71:'septante et un', 91:'nonante et un', 80:'quatre-vingts'
}
dizaine = {
1: 'dix', 2:'vingt', 3:'trente',4:'quarante', 5:'cinquante', 6:'soixante', 7:'septante', 8:'quatre-vingt', 9:'nonante'
}
centaine = {
0:'', 1: 'cent', 2:'deux cent', 3:'trois cent',4:'quatre cent', 5:'cinq cent', 6:'six cent', 7:'sept cent', 8:'huit cent', 9:'neuf cent'
}
mille = {
0:'', 1:'mille'
}
def _100_to_text(chiffre):
if chiffre in unites:
return unites[chiffre]
else:
if chiffre%10>0:
return dizaine[chiffre / 10]+'-'+unites[chiffre % 10]
else:
return dizaine[chiffre / 10]
def _1000_to_text(chiffre):
d = _100_to_text(chiffre % 100)
d2 = chiffre/100
if d2>0 and d:
return centaine[d2]+' '+d
elif d2>1 and not d:
return centaine[d2]+'s'
else:
return centaine[d2] or d
def _10000_to_text(chiffre):
if chiffre==0:
return 'zero'
part1 = _1000_to_text(chiffre % 1000)
part2 = mille.get(chiffre / 1000, _1000_to_text(chiffre / 1000)+' mille')
if part2 and part1:
part1 = ' '+part1
return part2+part1
def int_to_text(i):
return _10000_to_text(i)
if __name__=='__main__':
for i in range(1,999999,139):
print int_to_text(i)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -2,550,260,462,241,915,000 | 33.298701 | 140 | 0.579705 | false |
tboyce021/home-assistant | tests/components/calendar/test_init.py | 21 | 1455 | """The tests for the calendar component."""
from datetime import timedelta
from homeassistant.bootstrap import async_setup_component
import homeassistant.util.dt as dt_util
async def test_events_http_api(hass, hass_client):
"""Test the calendar demo view."""
await async_setup_component(hass, "calendar", {"calendar": {"platform": "demo"}})
await hass.async_block_till_done()
client = await hass_client()
response = await client.get("/api/calendars/calendar.calendar_2")
assert response.status == 400
start = dt_util.now()
end = start + timedelta(days=1)
response = await client.get(
"/api/calendars/calendar.calendar_1?start={}&end={}".format(
start.isoformat(), end.isoformat()
)
)
assert response.status == 200
events = await response.json()
assert events[0]["summary"] == "Future Event"
assert events[0]["title"] == "Future Event"
async def test_calendars_http_api(hass, hass_client):
"""Test the calendar demo view."""
await async_setup_component(hass, "calendar", {"calendar": {"platform": "demo"}})
await hass.async_block_till_done()
client = await hass_client()
response = await client.get("/api/calendars")
assert response.status == 200
data = await response.json()
assert data == [
{"entity_id": "calendar.calendar_1", "name": "Calendar 1"},
{"entity_id": "calendar.calendar_2", "name": "Calendar 2"},
]
| apache-2.0 | -496,533,088,957,177,540 | 36.307692 | 85 | 0.651546 | false |
nuclear-wizard/moose | python/mooseutils/VectorPostprocessorReader.py | 6 | 5970 | #* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import os
import glob
import pandas
import bisect
from .MooseDataFrame import MooseDataFrame
from . import message
class VectorPostprocessorReader(object):
"""
A Reader for MOOSE VectorPostprocessor data.
Args:
pattern[str]: A pattern of files (for use with glob) for loading.
MOOSE outputs VectorPostprocessor data in separate files for each timestep, using the timestep as
a prefix. For example: file_000.csv, file_001.csv, etc.
Therefore, a pattern acceptable for use with the python glob package must be supplied. For the
above files, "file_*.csv" should be supplied.
This object manages the loading and unloading of data and should always be in a valid state,
regardless of the existence of a file. It will also append new data and remove old/deleted data
on subsequent calls to "update()".
"""
def __init__(self, pattern, run_start_time=0):
self._pattern = pattern
self._timedata = MooseDataFrame(self._pattern.replace('*', 'time'),
run_start_time=None,
index='timestep')
self._frames = dict()
self._time = -1
self._index = None
self._run_start_time = run_start_time
self.update()
@property
def data(self):
return self._frames.get(self._index, pandas.DataFrame())
@property
def filename(self):
if self._frames:
return self._frames[self._index].filename
def __getitem__(self, keys):
"""
Operator[] returns the data for the current time.
Args:
keys[str|list]: The key(s) to return.
"""
return self._frames[self._index][keys]
def __bool__(self):
"""
Allows this object to be used in boolean cases.
Example:
data = VectorPostprocessorReader('files_*.csv')
if not data:
print 'No data found!'
"""
return self._index in self._frames
def __contains__(self, variable):
"""
Returns true if the variable exists in the data structure.
"""
return variable in self._frames[self._index]
def times(self):
"""
Returns the list of available time indices contained in the data.
"""
return sorted(self._frames.keys())
def clear(self):
"""
Remove all data.
"""
self._frames = dict()
self._index = None
self._time = None
def variables(self):
"""
Return a list of postprocessor variable names listed in the reader.
"""
if self._index is not None:
return self._frames[self._index].data.columns.tolist()
def update(self, time=None):
"""
Update data by adding/removing files.
time[float]: The time at which the data should be returned.
"""
# Update the time
if time is not None:
self._time = time
# Update the time data file
self._timedata.update()
# The list of files from the supplied pattern
last_modified = 0.0
self._frames = dict()
for fname in sorted(glob.glob(self._pattern)):
if fname.endswith('LATEST') or fname.endswith('FINAL') or (fname == self._timedata.filename):
continue
idx = self._timeHelper(fname)
mdf = self._frames.get(idx, None)
if mdf is None:
mdf = MooseDataFrame(fname, run_start_time=self._run_start_time, update=False,
peacock_index=True)
self._frames[idx] = mdf
if (mdf.modified < last_modified):
self._frames.pop(idx)
elif mdf.filesize == 0:
self._frames.pop(idx)
else:
last_modified = mdf.modified
# Clear the data if empty
if self._frames:
self.__updateCurrentIndex()
df = self._frames.get(self._index, None)
if df is not None:
return df.update()
def repr(self):
"""
Return components for building script.
Returns:
(output, imports) The necessary script and include statements to re-create data load.
"""
imports = ['import mooseutils']
output = ['\n# Read VectorPostprocessor Data']
output += ['data = mooseutils.VectorPostprocessorReader({})'.format(repr(self._pattern))]
return output, imports
def _timeHelper(self, filename):
"""
Determine the time index. (protected)
"""
idx = filename.rfind('_') + 1
tstep = int(filename[idx:-4])
if not self._timedata:
return tstep
else:
try:
return self._timedata['time'].loc[tstep]
except Exception:
return tstep
def __updateCurrentIndex(self):
"""
Helper for setting the current key for the supplied time.
"""
if not self._frames:
index = None
# Return the latest time
elif self._time == -1:
index = self.times()[-1]
# Return the specified time
elif self._time in self._frames:
index = self._time
# Find nearest time
else:
times = self.times()
n = len(times)
idx = bisect.bisect_right(times, self._time) - 1
if idx < 0:
idx = 0
elif idx > n:
idx = -1
index = times[idx]
self._index = index
| lgpl-2.1 | -6,740,007,926,348,591,000 | 29.151515 | 105 | 0.557956 | false |
Russell-IO/ansible | test/units/modules/network/f5/test_bigip_pool_member.py | 17 | 11584 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
from nose.plugins.skip import SkipTest
if sys.version_info < (2, 7):
raise SkipTest("F5 Ansible modules require Python >= 2.7")
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import Mock
from ansible.compat.tests.mock import patch
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_pool_member import ModuleParameters
from library.modules.bigip_pool_member import ApiParameters
from library.modules.bigip_pool_member import NodeApiParameters
from library.modules.bigip_pool_member import ModuleManager
from library.modules.bigip_pool_member import ArgumentSpec
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
from test.unit.modules.utils import set_module_args
except ImportError:
try:
from ansible.modules.network.f5.bigip_pool_member import ModuleParameters
from ansible.modules.network.f5.bigip_pool_member import ApiParameters
from ansible.modules.network.f5.bigip_pool_member import NodeApiParameters
from ansible.modules.network.f5.bigip_pool_member import ModuleManager
from ansible.modules.network.f5.bigip_pool_member import ArgumentSpec
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
from units.modules.utils import set_module_args
except ImportError:
raise SkipTest("F5 Ansible modules require the f5-sdk Python library")
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
pool='my-pool',
address='1.2.3.4',
fqdn='fqdn.foo.bar',
name='my-name',
port=2345,
connection_limit=100,
description='this is a description',
rate_limit=70,
ratio=20,
preserve_node=False,
priority_group=10,
state='present',
partition='Common',
fqdn_auto_populate=False,
reuse_nodes=False,
# Deprecated params
# TODO(Remove in 2.7)
session_state='disabled',
monitor_state='disabled',
)
p = ModuleParameters(params=args)
assert p.name == 'my-name'
def test_api_parameters(self):
args = load_fixture('load_net_node_with_fqdn.json')
p = ApiParameters(params=args)
assert p.state == 'present'
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create_reuse_node_with_name(self, *args):
# Configure the arguments that would be sent to the Ansible module
set_module_args(dict(
pool='my-pool',
name='my-name',
port=2345,
state='present',
partition='Common',
reuse_nodes=True,
password='password',
server='localhost',
user='admin'
))
current_node = NodeApiParameters(params=load_fixture('load_net_node_with_fqdn.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(return_value=False)
mm.create_on_device = Mock(return_value=True)
mm.read_current_node_from_device = Mock(return_value=current_node)
results = mm.exec_module()
assert results['changed'] is True
assert results['fqdn_auto_populate'] is True
assert results['fqdn'] == 'foo.bar.com'
assert results['state'] == 'present'
def test_create_reuse_node_with_ipv4_address(self, *args):
# Configure the arguments that would be sent to the Ansible module
set_module_args(dict(
pool='my-pool',
name='7.3.67.8',
port=2345,
state='present',
partition='Common',
reuse_nodes=True,
password='password',
server='localhost',
user='admin'
))
current_node = NodeApiParameters(params=load_fixture('load_net_node_with_ipv4_address.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(return_value=False)
mm.create_on_device = Mock(return_value=True)
mm.read_current_node_from_device = Mock(return_value=current_node)
results = mm.exec_module()
assert results['changed'] is True
assert results['fqdn_auto_populate'] is False
assert results['address'] == '7.3.67.8'
assert results['state'] == 'present'
def test_create_reuse_node_with_fqdn_auto_populate(self, *args):
# Configure the arguments that would be sent to the Ansible module
set_module_args(dict(
pool='my-pool',
name='my-name',
port=2345,
state='present',
partition='Common',
reuse_nodes=True,
fqdn_auto_populate=False,
password='password',
server='localhost',
user='admin'
))
current_node = NodeApiParameters(params=load_fixture('load_net_node_with_fqdn.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(return_value=False)
mm.create_on_device = Mock(return_value=True)
mm.read_current_node_from_device = Mock(return_value=current_node)
results = mm.exec_module()
assert results['changed'] is True
assert results['fqdn_auto_populate'] is True
assert results['fqdn'] == 'foo.bar.com'
assert results['state'] == 'present'
class TestLegacyManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create_name_is_hostname_with_session_and_monitor_enabled(self, *args):
# Configure the arguments that would be sent to the Ansible module
set_module_args(dict(
pool='my-pool',
name='my-name',
port=2345,
state='present',
session_state='enabled',
monitor_state='enabled',
partition='Common',
password='password',
server='localhost',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(return_value=False)
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['fqdn_auto_populate'] is False
assert results['fqdn'] == 'my-name'
assert results['state'] == 'present'
def test_create_name_is_address_with_session_and_monitor_enabled(self, *args):
# Configure the arguments that would be sent to the Ansible module
set_module_args(dict(
pool='my-pool',
name='10.10.10.10',
port=2345,
state='present',
session_state='enabled',
monitor_state='enabled',
partition='Common',
password='password',
server='localhost',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(return_value=False)
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['fqdn_auto_populate'] is False
assert results['address'] == '10.10.10.10'
assert results['state'] == 'present'
def test_create_name_is_address_with_session_disabled_and_monitor_enabled(self, *args):
# Configure the arguments that would be sent to the Ansible module
set_module_args(dict(
pool='my-pool',
name='10.10.10.10',
port=2345,
state='present',
monitor_state='enabled',
session_state='disabled',
partition='Common',
password='password',
server='localhost',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(return_value=False)
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['fqdn_auto_populate'] is False
assert results['address'] == '10.10.10.10'
assert results['state'] == 'disabled'
def test_create_name_is_address_with_session_and_monitor_disabled(self, *args):
# Configure the arguments that would be sent to the Ansible module
set_module_args(dict(
pool='my-pool',
name='10.10.10.10',
port=2345,
state='present',
monitor_state='disabled',
session_state='disabled',
partition='Common',
password='password',
server='localhost',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(return_value=False)
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['fqdn_auto_populate'] is False
assert results['address'] == '10.10.10.10'
assert results['state'] == 'forced_offline'
| gpl-3.0 | 4,317,162,278,305,698,000 | 33.272189 | 101 | 0.610756 | false |
eckardm/archivematica | src/MCPClient/lib/clientScripts/generateDIPFromAIPGenerateDIP.py | 1 | 2639 | #!/usr/bin/env python2
# This file is part of Archivematica.
#
# Copyright 2010-2013 Artefactual Systems Inc. <http://artefactual.com>
#
# Archivematica is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Archivematica is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Archivematica. If not, see <http://www.gnu.org/licenses/>.
# @package Archivematica
# @subpackage archivematicaClientScript
# @author Joseph Perry <[email protected]>
import os
import sys
import shutil
import django
django.setup()
# dashboard
from main.models import Job, SIP
# archivematicaCommon
from custom_handlers import get_script_logger
from databaseFunctions import createSIP
if __name__ == '__main__':
logger = get_script_logger("archivematica.mcp.client.generateDIPFromAIPGenerateDIP")
# COPY THE METS FILE
# Move the DIP Directory
fauxUUID = sys.argv[1]
unitPath = sys.argv[2]
date = sys.argv[3]
basename = os.path.basename(unitPath[:-1])
uuidLen = 36
originalSIPName = basename[:-(uuidLen+1)*2]
originalSIPUUID = basename[:-(uuidLen+1)][-uuidLen:]
METSPath = os.path.join(unitPath, "metadata/submissionDocumentation/data/", "METS.%s.xml" % (originalSIPUUID))
if not os.path.isfile(METSPath):
print >>sys.stderr, "Mets file not found: ", METSPath
exit(-1)
# move mets to DIP
src = METSPath
dst = os.path.join(unitPath, "DIP", os.path.basename(METSPath))
shutil.move(src, dst)
# Move DIP
src = os.path.join(unitPath, "DIP")
dst = os.path.join("/var/archivematica/sharedDirectory/watchedDirectories/uploadDIP/", originalSIPName + "-" + originalSIPUUID)
shutil.move(src, dst)
try:
SIP.objects.get(uuid=originalSIPUUID)
except SIP.DoesNotExist:
# otherwise doesn't appear in dashboard
createSIP(unitPath, UUID=originalSIPUUID)
Job.objects.create(jobtype="Hack to make DIP Jobs appear",
directory=unitPath,
sip_id=originalSIPUUID,
currentstep="Completed successfully",
unittype="unitSIP",
microservicegroup="Upload DIP")
| agpl-3.0 | 6,851,512,321,589,091,000 | 33.723684 | 133 | 0.685108 | false |
angyukai/boulderactive2016-landing-page | node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/MSVSVersion.py | 1509 | 17165 | # Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Handle version information related to Visual Stuio."""
import errno
import os
import re
import subprocess
import sys
import gyp
import glob
class VisualStudioVersion(object):
"""Information regarding a version of Visual Studio."""
def __init__(self, short_name, description,
solution_version, project_version, flat_sln, uses_vcxproj,
path, sdk_based, default_toolset=None):
self.short_name = short_name
self.description = description
self.solution_version = solution_version
self.project_version = project_version
self.flat_sln = flat_sln
self.uses_vcxproj = uses_vcxproj
self.path = path
self.sdk_based = sdk_based
self.default_toolset = default_toolset
def ShortName(self):
return self.short_name
def Description(self):
"""Get the full description of the version."""
return self.description
def SolutionVersion(self):
"""Get the version number of the sln files."""
return self.solution_version
def ProjectVersion(self):
"""Get the version number of the vcproj or vcxproj files."""
return self.project_version
def FlatSolution(self):
return self.flat_sln
def UsesVcxproj(self):
"""Returns true if this version uses a vcxproj file."""
return self.uses_vcxproj
def ProjectExtension(self):
"""Returns the file extension for the project."""
return self.uses_vcxproj and '.vcxproj' or '.vcproj'
def Path(self):
"""Returns the path to Visual Studio installation."""
return self.path
def ToolPath(self, tool):
"""Returns the path to a given compiler tool. """
return os.path.normpath(os.path.join(self.path, "VC/bin", tool))
def DefaultToolset(self):
"""Returns the msbuild toolset version that will be used in the absence
of a user override."""
return self.default_toolset
def SetupScript(self, target_arch):
"""Returns a command (with arguments) to be used to set up the
environment."""
# Check if we are running in the SDK command line environment and use
# the setup script from the SDK if so. |target_arch| should be either
# 'x86' or 'x64'.
assert target_arch in ('x86', 'x64')
sdk_dir = os.environ.get('WindowsSDKDir')
if self.sdk_based and sdk_dir:
return [os.path.normpath(os.path.join(sdk_dir, 'Bin/SetEnv.Cmd')),
'/' + target_arch]
else:
# We don't use VC/vcvarsall.bat for x86 because vcvarsall calls
# vcvars32, which it can only find if VS??COMNTOOLS is set, which it
# isn't always.
if target_arch == 'x86':
if self.short_name >= '2013' and self.short_name[-1] != 'e' and (
os.environ.get('PROCESSOR_ARCHITECTURE') == 'AMD64' or
os.environ.get('PROCESSOR_ARCHITEW6432') == 'AMD64'):
# VS2013 and later, non-Express have a x64-x86 cross that we want
# to prefer.
return [os.path.normpath(
os.path.join(self.path, 'VC/vcvarsall.bat')), 'amd64_x86']
# Otherwise, the standard x86 compiler.
return [os.path.normpath(
os.path.join(self.path, 'Common7/Tools/vsvars32.bat'))]
else:
assert target_arch == 'x64'
arg = 'x86_amd64'
# Use the 64-on-64 compiler if we're not using an express
# edition and we're running on a 64bit OS.
if self.short_name[-1] != 'e' and (
os.environ.get('PROCESSOR_ARCHITECTURE') == 'AMD64' or
os.environ.get('PROCESSOR_ARCHITEW6432') == 'AMD64'):
arg = 'amd64'
return [os.path.normpath(
os.path.join(self.path, 'VC/vcvarsall.bat')), arg]
def _RegistryQueryBase(sysdir, key, value):
"""Use reg.exe to read a particular key.
While ideally we might use the win32 module, we would like gyp to be
python neutral, so for instance cygwin python lacks this module.
Arguments:
sysdir: The system subdirectory to attempt to launch reg.exe from.
key: The registry key to read from.
value: The particular value to read.
Return:
stdout from reg.exe, or None for failure.
"""
# Skip if not on Windows or Python Win32 setup issue
if sys.platform not in ('win32', 'cygwin'):
return None
# Setup params to pass to and attempt to launch reg.exe
cmd = [os.path.join(os.environ.get('WINDIR', ''), sysdir, 'reg.exe'),
'query', key]
if value:
cmd.extend(['/v', value])
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Obtain the stdout from reg.exe, reading to the end so p.returncode is valid
# Note that the error text may be in [1] in some cases
text = p.communicate()[0]
# Check return code from reg.exe; officially 0==success and 1==error
if p.returncode:
return None
return text
def _RegistryQuery(key, value=None):
r"""Use reg.exe to read a particular key through _RegistryQueryBase.
First tries to launch from %WinDir%\Sysnative to avoid WoW64 redirection. If
that fails, it falls back to System32. Sysnative is available on Vista and
up and available on Windows Server 2003 and XP through KB patch 942589. Note
that Sysnative will always fail if using 64-bit python due to it being a
virtual directory and System32 will work correctly in the first place.
KB 942589 - http://support.microsoft.com/kb/942589/en-us.
Arguments:
key: The registry key.
value: The particular registry value to read (optional).
Return:
stdout from reg.exe, or None for failure.
"""
text = None
try:
text = _RegistryQueryBase('Sysnative', key, value)
except OSError, e:
if e.errno == errno.ENOENT:
text = _RegistryQueryBase('System32', key, value)
else:
raise
return text
def _RegistryGetValueUsingWinReg(key, value):
"""Use the _winreg module to obtain the value of a registry key.
Args:
key: The registry key.
value: The particular registry value to read.
Return:
contents of the registry key's value, or None on failure. Throws
ImportError if _winreg is unavailable.
"""
import _winreg
try:
root, subkey = key.split('\\', 1)
assert root == 'HKLM' # Only need HKLM for now.
with _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, subkey) as hkey:
return _winreg.QueryValueEx(hkey, value)[0]
except WindowsError:
return None
def _RegistryGetValue(key, value):
"""Use _winreg or reg.exe to obtain the value of a registry key.
Using _winreg is preferable because it solves an issue on some corporate
environments where access to reg.exe is locked down. However, we still need
to fallback to reg.exe for the case where the _winreg module is not available
(for example in cygwin python).
Args:
key: The registry key.
value: The particular registry value to read.
Return:
contents of the registry key's value, or None on failure.
"""
try:
return _RegistryGetValueUsingWinReg(key, value)
except ImportError:
pass
# Fallback to reg.exe if we fail to import _winreg.
text = _RegistryQuery(key, value)
if not text:
return None
# Extract value.
match = re.search(r'REG_\w+\s+([^\r]+)\r\n', text)
if not match:
return None
return match.group(1)
def _CreateVersion(name, path, sdk_based=False):
"""Sets up MSVS project generation.
Setup is based off the GYP_MSVS_VERSION environment variable or whatever is
autodetected if GYP_MSVS_VERSION is not explicitly specified. If a version is
passed in that doesn't match a value in versions python will throw a error.
"""
if path:
path = os.path.normpath(path)
versions = {
'2015': VisualStudioVersion('2015',
'Visual Studio 2015',
solution_version='12.00',
project_version='14.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v140'),
'2013': VisualStudioVersion('2013',
'Visual Studio 2013',
solution_version='13.00',
project_version='12.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v120'),
'2013e': VisualStudioVersion('2013e',
'Visual Studio 2013',
solution_version='13.00',
project_version='12.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v120'),
'2012': VisualStudioVersion('2012',
'Visual Studio 2012',
solution_version='12.00',
project_version='4.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v110'),
'2012e': VisualStudioVersion('2012e',
'Visual Studio 2012',
solution_version='12.00',
project_version='4.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v110'),
'2010': VisualStudioVersion('2010',
'Visual Studio 2010',
solution_version='11.00',
project_version='4.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based),
'2010e': VisualStudioVersion('2010e',
'Visual C++ Express 2010',
solution_version='11.00',
project_version='4.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based),
'2008': VisualStudioVersion('2008',
'Visual Studio 2008',
solution_version='10.00',
project_version='9.00',
flat_sln=False,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2008e': VisualStudioVersion('2008e',
'Visual Studio 2008',
solution_version='10.00',
project_version='9.00',
flat_sln=True,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2005': VisualStudioVersion('2005',
'Visual Studio 2005',
solution_version='9.00',
project_version='8.00',
flat_sln=False,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2005e': VisualStudioVersion('2005e',
'Visual Studio 2005',
solution_version='9.00',
project_version='8.00',
flat_sln=True,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
}
return versions[str(name)]
def _ConvertToCygpath(path):
"""Convert to cygwin path if we are using cygwin."""
if sys.platform == 'cygwin':
p = subprocess.Popen(['cygpath', path], stdout=subprocess.PIPE)
path = p.communicate()[0].strip()
return path
def _DetectVisualStudioVersions(versions_to_check, force_express):
"""Collect the list of installed visual studio versions.
Returns:
A list of visual studio versions installed in descending order of
usage preference.
Base this on the registry and a quick check if devenv.exe exists.
Only versions 8-10 are considered.
Possibilities are:
2005(e) - Visual Studio 2005 (8)
2008(e) - Visual Studio 2008 (9)
2010(e) - Visual Studio 2010 (10)
2012(e) - Visual Studio 2012 (11)
2013(e) - Visual Studio 2013 (12)
2015 - Visual Studio 2015 (14)
Where (e) is e for express editions of MSVS and blank otherwise.
"""
version_to_year = {
'8.0': '2005',
'9.0': '2008',
'10.0': '2010',
'11.0': '2012',
'12.0': '2013',
'14.0': '2015',
}
versions = []
for version in versions_to_check:
# Old method of searching for which VS version is installed
# We don't use the 2010-encouraged-way because we also want to get the
# path to the binaries, which it doesn't offer.
keys = [r'HKLM\Software\Microsoft\VisualStudio\%s' % version,
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\%s' % version,
r'HKLM\Software\Microsoft\VCExpress\%s' % version,
r'HKLM\Software\Wow6432Node\Microsoft\VCExpress\%s' % version]
for index in range(len(keys)):
path = _RegistryGetValue(keys[index], 'InstallDir')
if not path:
continue
path = _ConvertToCygpath(path)
# Check for full.
full_path = os.path.join(path, 'devenv.exe')
express_path = os.path.join(path, '*express.exe')
if not force_express and os.path.exists(full_path):
# Add this one.
versions.append(_CreateVersion(version_to_year[version],
os.path.join(path, '..', '..')))
# Check for express.
elif glob.glob(express_path):
# Add this one.
versions.append(_CreateVersion(version_to_year[version] + 'e',
os.path.join(path, '..', '..')))
# The old method above does not work when only SDK is installed.
keys = [r'HKLM\Software\Microsoft\VisualStudio\SxS\VC7',
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\SxS\VC7']
for index in range(len(keys)):
path = _RegistryGetValue(keys[index], version)
if not path:
continue
path = _ConvertToCygpath(path)
if version != '14.0': # There is no Express edition for 2015.
versions.append(_CreateVersion(version_to_year[version] + 'e',
os.path.join(path, '..'), sdk_based=True))
return versions
def SelectVisualStudioVersion(version='auto', allow_fallback=True):
"""Select which version of Visual Studio projects to generate.
Arguments:
version: Hook to allow caller to force a particular version (vs auto).
Returns:
An object representing a visual studio project format version.
"""
# In auto mode, check environment variable for override.
if version == 'auto':
version = os.environ.get('GYP_MSVS_VERSION', 'auto')
version_map = {
'auto': ('14.0', '12.0', '10.0', '9.0', '8.0', '11.0'),
'2005': ('8.0',),
'2005e': ('8.0',),
'2008': ('9.0',),
'2008e': ('9.0',),
'2010': ('10.0',),
'2010e': ('10.0',),
'2012': ('11.0',),
'2012e': ('11.0',),
'2013': ('12.0',),
'2013e': ('12.0',),
'2015': ('14.0',),
}
override_path = os.environ.get('GYP_MSVS_OVERRIDE_PATH')
if override_path:
msvs_version = os.environ.get('GYP_MSVS_VERSION')
if not msvs_version:
raise ValueError('GYP_MSVS_OVERRIDE_PATH requires GYP_MSVS_VERSION to be '
'set to a particular version (e.g. 2010e).')
return _CreateVersion(msvs_version, override_path, sdk_based=True)
version = str(version)
versions = _DetectVisualStudioVersions(version_map[version], 'e' in version)
if not versions:
if not allow_fallback:
raise ValueError('Could not locate Visual Studio installation.')
if version == 'auto':
# Default to 2005 if we couldn't find anything
return _CreateVersion('2005', None)
else:
return _CreateVersion(version, None)
return versions[0]
| mit | 7,857,204,932,087,653,000 | 37.747178 | 80 | 0.556889 | false |
razor-x/scipy-data_fitting | scipy_data_fitting/model.py | 1 | 7085 | import functools
import sympy
class Model:
"""
A model organizes symbols, expressions and replacements rules by name.
Example:
#!python
>>> model = Model()
>>> model.add_symbols('y', 'x', 'm', 'b')
>>> y, m, x, b = model.get_symbols('y', 'x', 'm', 'b')
>>> model.expressions['line'] = y
>>> model.replacements['slope_intercept'] = (y, m * x + b)
>>> line = model.replace('line', 'slope_intercept')
m * x + b
>>> function = model.lambdify(line, ('m', 'x', 'b'))
>>> function(1, 2, 3)
5
"""
def __init__(self, name=None):
self.name = name
"""
The identifier name for this object.
"""
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def expressions(self):
"""
Dictionary to store SymPy expressions by name.
"""
if not hasattr(self, '_expressions'): self._expressions = {}
return self._expressions
@expressions.setter
def expressions(self, value):
self._expressions = value
@property
def replacements(self):
"""
Dictionary to store replacement rules by name.
Each value is a tuple of SymPy expressions: `(expression, replacement)`.
"""
if not hasattr(self, '_replacements'): self._replacements = {}
return self._replacements
@replacements.setter
def replacements(self, value):
self._replacements = value
@property
def replacement_groups(self):
"""
Dictionary to store a sequence of replacements by name.
Each value is a list of names that will be looked up
in `scipy_data_fitting.Model.replacements`.
When used to make substitutions, replacements will be applied
one at a time in the order given.
"""
if not hasattr(self, '_replacement_groups'): self._replacement_groups = {}
return self._replacement_groups
@replacement_groups.setter
def replacement_groups(self, value):
self._replacement_groups = value
@property
def symbols(self):
"""
Dictionary to store symbols by name.
Add symbols directly, or with `scipy_data_fitting.Model.add_symbol`
and `scipy_data_fitting.Model.add_symbols`.
"""
if not hasattr(self, '_symbols'): self._symbols = {}
return self._symbols
@symbols.setter
def symbols(self, value):
self._symbols = value
def symbol(self, name):
"""
Function to provide a shorthand for `self.symbols[name]`.
"""
return self.symbols[name]
def add_symbol(self, name, string=None):
"""
Add a symbol with key `name` to `scipy_data_fitting.Model.symbols`.
Optionally, specify an alternative `string` to pass to [`sympy.Symbol`][1],
otherwise `name` is used.
[1]: http://docs.sympy.org/dev/modules/core.html#id4
"""
if not string: string = name
self.symbols[name] = sympy.Symbol(string)
def add_symbols(self, *names):
"""
Pass any number of strings to add symbols to `scipy_data_fitting.Model.symbols`
using `scipy_data_fitting.Model.add_symbol`.
Example:
#!python
>>> model.add_symbols('x', 'y', 'z')
"""
for name in names:
self.add_symbol(name)
def get_symbols(self, *symbols):
"""
A tuple of symbols by name.
Example:
#!python
>>> x, y, z = model.get_symbols('x', 'y', 'z')
"""
return ( self.symbol(s) for s in symbols )
def replace(self, expression, replacements):
"""
All purpose method to reduce an expression by applying
successive replacement rules.
`expression` is either a SymPy expression
or a key in `scipy_data_fitting.Model.expressions`.
`replacements` can be any of the following,
or a list of any combination of the following:
- A replacement tuple as in `scipy_data_fitting.Model.replacements`.
- The name of a replacement in `scipy_data_fitting.Model.replacements`.
- The name of a replacement group in `scipy_data_fitting.Model.replacement_groups`.
Examples:
#!python
>>> model.replace(x + y, (x, z))
z + y
>>> model.replace('expression', (x, z))
>>> model.replace('expression', 'replacement')
>>> model.replace('expression', ['replacement_1', 'replacement_2'])
>>> model.replace('expression', ['replacement', 'group'])
"""
# When expression is a string,
# get the expressions from self.expressions.
if isinstance(expression, str):
expression = self.expressions[expression]
# Allow for replacements to be empty.
if not replacements:
return expression
# Allow replacements to be a string.
if isinstance(replacements, str):
if replacements in self.replacements:
return self.replace(expression, self.replacements[replacements])
elif replacements in self.replacement_groups:
return self.replace(expression, self.replacement_groups[replacements])
# When replacements is a list of strings or tuples,
# Use reduce to make all the replacements.
if all(isinstance(item, str) for item in replacements) \
or all(isinstance(item, tuple) for item in replacements):
return functools.reduce(self.replace, replacements, expression)
# Otherwise make the replacement.
return expression.replace(*replacements)
def lambdify(self, expression, symbols, **kwargs):
"""
Converts a SymPy expression into a function using [`sympy.lambdify`][1].
`expression` can be a SymPy expression or the name of an expression
in `scipy_data_fitting.Model.expressions`.
`symbols` can be any of the following,
or a list of any combination of the following:
- A SymPy symbol.
- The name of a symbol in `scipy_data_fitting.Model.symbols`.
Additional keyword arguments are passed to [`sympy.lambdify`][1].
[1]: http://docs.sympy.org/latest/modules/utilities/lambdify.html#sympy.utilities.lambdify.lambdify
"""
if isinstance(expression, str):
expression = self.expressions[expression]
if hasattr(symbols, '__iter__'):
variables = []
for s in symbols:
if isinstance(s, str):
variables.append(self.symbol(s))
else:
variables.append(s)
else:
if isinstance(symbols, str):
variables = (self.symbol(symbols), )
else:
variables = (symbols, )
return sympy.lambdify(tuple(variables), expression, **kwargs)
| mit | -7,549,078,446,927,727,000 | 31.351598 | 107 | 0.584333 | false |
simonkuang/grpc | tools/run_tests/performance/massage_qps_stats.py | 25 | 28679 | # Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Autogenerated by tools/codegen/core/gen_stats_data.py
import massage_qps_stats_helpers
def massage_qps_stats(scenario_result):
for stats in scenario_result["serverStats"] + scenario_result["clientStats"]:
if "coreStats" in stats:
# Get rid of the "coreStats" element and replace it by statistics
# that correspond to columns in the bigquery schema.
core_stats = stats["coreStats"]
del stats["coreStats"]
stats[
"core_client_calls_created"] = massage_qps_stats_helpers.counter(
core_stats, "client_calls_created")
stats[
"core_server_calls_created"] = massage_qps_stats_helpers.counter(
core_stats, "server_calls_created")
stats["core_cqs_created"] = massage_qps_stats_helpers.counter(
core_stats, "cqs_created")
stats[
"core_client_channels_created"] = massage_qps_stats_helpers.counter(
core_stats, "client_channels_created")
stats[
"core_client_subchannels_created"] = massage_qps_stats_helpers.counter(
core_stats, "client_subchannels_created")
stats[
"core_server_channels_created"] = massage_qps_stats_helpers.counter(
core_stats, "server_channels_created")
stats["core_syscall_poll"] = massage_qps_stats_helpers.counter(
core_stats, "syscall_poll")
stats["core_syscall_wait"] = massage_qps_stats_helpers.counter(
core_stats, "syscall_wait")
stats["core_pollset_kick"] = massage_qps_stats_helpers.counter(
core_stats, "pollset_kick")
stats[
"core_pollset_kicked_without_poller"] = massage_qps_stats_helpers.counter(
core_stats, "pollset_kicked_without_poller")
stats[
"core_pollset_kicked_again"] = massage_qps_stats_helpers.counter(
core_stats, "pollset_kicked_again")
stats[
"core_pollset_kick_wakeup_fd"] = massage_qps_stats_helpers.counter(
core_stats, "pollset_kick_wakeup_fd")
stats[
"core_pollset_kick_wakeup_cv"] = massage_qps_stats_helpers.counter(
core_stats, "pollset_kick_wakeup_cv")
stats[
"core_pollset_kick_own_thread"] = massage_qps_stats_helpers.counter(
core_stats, "pollset_kick_own_thread")
stats["core_syscall_epoll_ctl"] = massage_qps_stats_helpers.counter(
core_stats, "syscall_epoll_ctl")
stats[
"core_pollset_fd_cache_hits"] = massage_qps_stats_helpers.counter(
core_stats, "pollset_fd_cache_hits")
stats[
"core_histogram_slow_lookups"] = massage_qps_stats_helpers.counter(
core_stats, "histogram_slow_lookups")
stats["core_syscall_write"] = massage_qps_stats_helpers.counter(
core_stats, "syscall_write")
stats["core_syscall_read"] = massage_qps_stats_helpers.counter(
core_stats, "syscall_read")
stats[
"core_tcp_backup_pollers_created"] = massage_qps_stats_helpers.counter(
core_stats, "tcp_backup_pollers_created")
stats[
"core_tcp_backup_poller_polls"] = massage_qps_stats_helpers.counter(
core_stats, "tcp_backup_poller_polls")
stats["core_http2_op_batches"] = massage_qps_stats_helpers.counter(
core_stats, "http2_op_batches")
stats["core_http2_op_cancel"] = massage_qps_stats_helpers.counter(
core_stats, "http2_op_cancel")
stats[
"core_http2_op_send_initial_metadata"] = massage_qps_stats_helpers.counter(
core_stats, "http2_op_send_initial_metadata")
stats[
"core_http2_op_send_message"] = massage_qps_stats_helpers.counter(
core_stats, "http2_op_send_message")
stats[
"core_http2_op_send_trailing_metadata"] = massage_qps_stats_helpers.counter(
core_stats, "http2_op_send_trailing_metadata")
stats[
"core_http2_op_recv_initial_metadata"] = massage_qps_stats_helpers.counter(
core_stats, "http2_op_recv_initial_metadata")
stats[
"core_http2_op_recv_message"] = massage_qps_stats_helpers.counter(
core_stats, "http2_op_recv_message")
stats[
"core_http2_op_recv_trailing_metadata"] = massage_qps_stats_helpers.counter(
core_stats, "http2_op_recv_trailing_metadata")
stats[
"core_http2_settings_writes"] = massage_qps_stats_helpers.counter(
core_stats, "http2_settings_writes")
stats["core_http2_pings_sent"] = massage_qps_stats_helpers.counter(
core_stats, "http2_pings_sent")
stats[
"core_http2_writes_begun"] = massage_qps_stats_helpers.counter(
core_stats, "http2_writes_begun")
stats[
"core_http2_writes_offloaded"] = massage_qps_stats_helpers.counter(
core_stats, "http2_writes_offloaded")
stats[
"core_http2_writes_continued"] = massage_qps_stats_helpers.counter(
core_stats, "http2_writes_continued")
stats[
"core_http2_partial_writes"] = massage_qps_stats_helpers.counter(
core_stats, "http2_partial_writes")
stats[
"core_http2_initiate_write_due_to_initial_write"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_initial_write")
stats[
"core_http2_initiate_write_due_to_start_new_stream"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_start_new_stream")
stats[
"core_http2_initiate_write_due_to_send_message"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_send_message")
stats[
"core_http2_initiate_write_due_to_send_initial_metadata"] = massage_qps_stats_helpers.counter(
core_stats,
"http2_initiate_write_due_to_send_initial_metadata")
stats[
"core_http2_initiate_write_due_to_send_trailing_metadata"] = massage_qps_stats_helpers.counter(
core_stats,
"http2_initiate_write_due_to_send_trailing_metadata")
stats[
"core_http2_initiate_write_due_to_retry_send_ping"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_retry_send_ping")
stats[
"core_http2_initiate_write_due_to_continue_pings"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_continue_pings")
stats[
"core_http2_initiate_write_due_to_goaway_sent"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_goaway_sent")
stats[
"core_http2_initiate_write_due_to_rst_stream"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_rst_stream")
stats[
"core_http2_initiate_write_due_to_close_from_api"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_close_from_api")
stats[
"core_http2_initiate_write_due_to_stream_flow_control"] = massage_qps_stats_helpers.counter(
core_stats,
"http2_initiate_write_due_to_stream_flow_control")
stats[
"core_http2_initiate_write_due_to_transport_flow_control"] = massage_qps_stats_helpers.counter(
core_stats,
"http2_initiate_write_due_to_transport_flow_control")
stats[
"core_http2_initiate_write_due_to_send_settings"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_send_settings")
stats[
"core_http2_initiate_write_due_to_bdp_estimator_ping"] = massage_qps_stats_helpers.counter(
core_stats,
"http2_initiate_write_due_to_bdp_estimator_ping")
stats[
"core_http2_initiate_write_due_to_flow_control_unstalled_by_setting"] = massage_qps_stats_helpers.counter(
core_stats,
"http2_initiate_write_due_to_flow_control_unstalled_by_setting"
)
stats[
"core_http2_initiate_write_due_to_flow_control_unstalled_by_update"] = massage_qps_stats_helpers.counter(
core_stats,
"http2_initiate_write_due_to_flow_control_unstalled_by_update"
)
stats[
"core_http2_initiate_write_due_to_application_ping"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_application_ping")
stats[
"core_http2_initiate_write_due_to_keepalive_ping"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_keepalive_ping")
stats[
"core_http2_initiate_write_due_to_transport_flow_control_unstalled"] = massage_qps_stats_helpers.counter(
core_stats,
"http2_initiate_write_due_to_transport_flow_control_unstalled"
)
stats[
"core_http2_initiate_write_due_to_ping_response"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_ping_response")
stats[
"core_http2_initiate_write_due_to_force_rst_stream"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_force_rst_stream")
stats[
"core_http2_spurious_writes_begun"] = massage_qps_stats_helpers.counter(
core_stats, "http2_spurious_writes_begun")
stats[
"core_hpack_recv_indexed"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_indexed")
stats[
"core_hpack_recv_lithdr_incidx"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_lithdr_incidx")
stats[
"core_hpack_recv_lithdr_incidx_v"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_lithdr_incidx_v")
stats[
"core_hpack_recv_lithdr_notidx"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_lithdr_notidx")
stats[
"core_hpack_recv_lithdr_notidx_v"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_lithdr_notidx_v")
stats[
"core_hpack_recv_lithdr_nvridx"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_lithdr_nvridx")
stats[
"core_hpack_recv_lithdr_nvridx_v"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_lithdr_nvridx_v")
stats[
"core_hpack_recv_uncompressed"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_uncompressed")
stats[
"core_hpack_recv_huffman"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_huffman")
stats["core_hpack_recv_binary"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_binary")
stats[
"core_hpack_recv_binary_base64"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_binary_base64")
stats[
"core_hpack_send_indexed"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_indexed")
stats[
"core_hpack_send_lithdr_incidx"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_lithdr_incidx")
stats[
"core_hpack_send_lithdr_incidx_v"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_lithdr_incidx_v")
stats[
"core_hpack_send_lithdr_notidx"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_lithdr_notidx")
stats[
"core_hpack_send_lithdr_notidx_v"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_lithdr_notidx_v")
stats[
"core_hpack_send_lithdr_nvridx"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_lithdr_nvridx")
stats[
"core_hpack_send_lithdr_nvridx_v"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_lithdr_nvridx_v")
stats[
"core_hpack_send_uncompressed"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_uncompressed")
stats[
"core_hpack_send_huffman"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_huffman")
stats["core_hpack_send_binary"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_binary")
stats[
"core_hpack_send_binary_base64"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_binary_base64")
stats[
"core_combiner_locks_initiated"] = massage_qps_stats_helpers.counter(
core_stats, "combiner_locks_initiated")
stats[
"core_combiner_locks_scheduled_items"] = massage_qps_stats_helpers.counter(
core_stats, "combiner_locks_scheduled_items")
stats[
"core_combiner_locks_scheduled_final_items"] = massage_qps_stats_helpers.counter(
core_stats, "combiner_locks_scheduled_final_items")
stats[
"core_combiner_locks_offloaded"] = massage_qps_stats_helpers.counter(
core_stats, "combiner_locks_offloaded")
stats[
"core_call_combiner_locks_initiated"] = massage_qps_stats_helpers.counter(
core_stats, "call_combiner_locks_initiated")
stats[
"core_call_combiner_locks_scheduled_items"] = massage_qps_stats_helpers.counter(
core_stats, "call_combiner_locks_scheduled_items")
stats[
"core_call_combiner_set_notify_on_cancel"] = massage_qps_stats_helpers.counter(
core_stats, "call_combiner_set_notify_on_cancel")
stats[
"core_call_combiner_cancelled"] = massage_qps_stats_helpers.counter(
core_stats, "call_combiner_cancelled")
stats[
"core_executor_scheduled_short_items"] = massage_qps_stats_helpers.counter(
core_stats, "executor_scheduled_short_items")
stats[
"core_executor_scheduled_long_items"] = massage_qps_stats_helpers.counter(
core_stats, "executor_scheduled_long_items")
stats[
"core_executor_scheduled_to_self"] = massage_qps_stats_helpers.counter(
core_stats, "executor_scheduled_to_self")
stats[
"core_executor_wakeup_initiated"] = massage_qps_stats_helpers.counter(
core_stats, "executor_wakeup_initiated")
stats[
"core_executor_queue_drained"] = massage_qps_stats_helpers.counter(
core_stats, "executor_queue_drained")
stats[
"core_executor_push_retries"] = massage_qps_stats_helpers.counter(
core_stats, "executor_push_retries")
stats[
"core_server_requested_calls"] = massage_qps_stats_helpers.counter(
core_stats, "server_requested_calls")
stats[
"core_server_slowpath_requests_queued"] = massage_qps_stats_helpers.counter(
core_stats, "server_slowpath_requests_queued")
stats[
"core_cq_ev_queue_trylock_failures"] = massage_qps_stats_helpers.counter(
core_stats, "cq_ev_queue_trylock_failures")
stats[
"core_cq_ev_queue_trylock_successes"] = massage_qps_stats_helpers.counter(
core_stats, "cq_ev_queue_trylock_successes")
stats[
"core_cq_ev_queue_transient_pop_failures"] = massage_qps_stats_helpers.counter(
core_stats, "cq_ev_queue_transient_pop_failures")
h = massage_qps_stats_helpers.histogram(core_stats,
"call_initial_size")
stats["core_call_initial_size"] = ",".join(
"%f" % x for x in h.buckets)
stats["core_call_initial_size_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_call_initial_size_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_call_initial_size_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_call_initial_size_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats,
"poll_events_returned")
stats["core_poll_events_returned"] = ",".join(
"%f" % x for x in h.buckets)
stats["core_poll_events_returned_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_poll_events_returned_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_poll_events_returned_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_poll_events_returned_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats,
"tcp_write_size")
stats["core_tcp_write_size"] = ",".join("%f" % x for x in h.buckets)
stats["core_tcp_write_size_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_tcp_write_size_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_tcp_write_size_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_tcp_write_size_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats,
"tcp_write_iov_size")
stats["core_tcp_write_iov_size"] = ",".join(
"%f" % x for x in h.buckets)
stats["core_tcp_write_iov_size_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_tcp_write_iov_size_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_tcp_write_iov_size_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_tcp_write_iov_size_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats, "tcp_read_size")
stats["core_tcp_read_size"] = ",".join("%f" % x for x in h.buckets)
stats["core_tcp_read_size_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_tcp_read_size_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_tcp_read_size_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_tcp_read_size_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats,
"tcp_read_offer")
stats["core_tcp_read_offer"] = ",".join("%f" % x for x in h.buckets)
stats["core_tcp_read_offer_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_tcp_read_offer_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_tcp_read_offer_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_tcp_read_offer_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats,
"tcp_read_offer_iov_size")
stats["core_tcp_read_offer_iov_size"] = ",".join(
"%f" % x for x in h.buckets)
stats["core_tcp_read_offer_iov_size_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_tcp_read_offer_iov_size_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_tcp_read_offer_iov_size_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_tcp_read_offer_iov_size_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats,
"http2_send_message_size")
stats["core_http2_send_message_size"] = ",".join(
"%f" % x for x in h.buckets)
stats["core_http2_send_message_size_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_http2_send_message_size_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_http2_send_message_size_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_http2_send_message_size_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(
core_stats, "http2_send_initial_metadata_per_write")
stats["core_http2_send_initial_metadata_per_write"] = ",".join(
"%f" % x for x in h.buckets)
stats["core_http2_send_initial_metadata_per_write_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_http2_send_initial_metadata_per_write_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_http2_send_initial_metadata_per_write_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_http2_send_initial_metadata_per_write_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(
core_stats, "http2_send_message_per_write")
stats["core_http2_send_message_per_write"] = ",".join(
"%f" % x for x in h.buckets)
stats["core_http2_send_message_per_write_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_http2_send_message_per_write_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_http2_send_message_per_write_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_http2_send_message_per_write_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(
core_stats, "http2_send_trailing_metadata_per_write")
stats["core_http2_send_trailing_metadata_per_write"] = ",".join(
"%f" % x for x in h.buckets)
stats[
"core_http2_send_trailing_metadata_per_write_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_http2_send_trailing_metadata_per_write_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_http2_send_trailing_metadata_per_write_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_http2_send_trailing_metadata_per_write_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(
core_stats, "http2_send_flowctl_per_write")
stats["core_http2_send_flowctl_per_write"] = ",".join(
"%f" % x for x in h.buckets)
stats["core_http2_send_flowctl_per_write_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_http2_send_flowctl_per_write_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_http2_send_flowctl_per_write_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_http2_send_flowctl_per_write_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats,
"server_cqs_checked")
stats["core_server_cqs_checked"] = ",".join(
"%f" % x for x in h.buckets)
stats["core_server_cqs_checked_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_server_cqs_checked_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_server_cqs_checked_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_server_cqs_checked_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
| apache-2.0 | -198,645,942,087,565,700 | 55.123288 | 122 | 0.54308 | false |
adrienbrault/home-assistant | tests/components/netatmo/test_media_source.py | 13 | 2552 | """Test Local Media Source."""
import ast
import pytest
from homeassistant.components import media_source
from homeassistant.components.media_source import const
from homeassistant.components.media_source.models import PlayMedia
from homeassistant.components.netatmo import DATA_CAMERAS, DATA_EVENTS, DOMAIN
from homeassistant.setup import async_setup_component
from tests.common import load_fixture
async def test_async_browse_media(hass):
"""Test browse media."""
assert await async_setup_component(hass, DOMAIN, {})
# Prepare cached Netatmo event date
hass.data[DOMAIN] = {}
hass.data[DOMAIN][DATA_EVENTS] = ast.literal_eval(
load_fixture("netatmo/events.txt")
)
hass.data[DOMAIN][DATA_CAMERAS] = {
"12:34:56:78:90:ab": "MyCamera",
"12:34:56:78:90:ac": "MyOutdoorCamera",
}
assert await async_setup_component(hass, const.DOMAIN, {})
await hass.async_block_till_done()
# Test camera not exists
with pytest.raises(media_source.BrowseError) as excinfo:
await media_source.async_browse_media(
hass, f"{const.URI_SCHEME}{DOMAIN}/events/98:76:54:32:10:ff"
)
assert str(excinfo.value) == "Camera does not exist."
# Test browse event
with pytest.raises(media_source.BrowseError) as excinfo:
await media_source.async_browse_media(
hass, f"{const.URI_SCHEME}{DOMAIN}/events/12:34:56:78:90:ab/12345"
)
assert str(excinfo.value) == "Event does not exist."
# Test invalid base
with pytest.raises(media_source.BrowseError) as excinfo:
await media_source.async_browse_media(
hass, f"{const.URI_SCHEME}{DOMAIN}/invalid/base"
)
assert str(excinfo.value) == "Unknown source directory."
# Test successful listing
media = await media_source.async_browse_media(
hass, f"{const.URI_SCHEME}{DOMAIN}/events/"
)
# Test successful events listing
media = await media_source.async_browse_media(
hass, f"{const.URI_SCHEME}{DOMAIN}/events/12:34:56:78:90:ab"
)
# Test successful event listing
media = await media_source.async_browse_media(
hass, f"{const.URI_SCHEME}{DOMAIN}/events/12:34:56:78:90:ab/1599152672"
)
assert media
# Test successful event resolve
media = await media_source.async_resolve_media(
hass, f"{const.URI_SCHEME}{DOMAIN}/events/12:34:56:78:90:ab/1599152672"
)
assert media == PlayMedia(
url="http:///files/high/index.m3u8", mime_type="application/x-mpegURL"
)
| mit | 5,315,828,239,952,103,000 | 32.578947 | 79 | 0.675157 | false |
Zhongqilong/mykbengineer | kbe/src/lib/python/PCbuild/build_ssl.py | 30 | 9208 | # Script for building the _ssl and _hashlib modules for Windows.
# Uses Perl to setup the OpenSSL environment correctly
# and build OpenSSL, then invokes a simple nmake session
# for the actual _ssl.pyd and _hashlib.pyd DLLs.
# THEORETICALLY, you can:
# * Unpack the latest SSL release one level above your main Python source
# directory. It is likely you will already find the zlib library and
# any other external packages there.
# * Install ActivePerl and ensure it is somewhere on your path.
# * Run this script from the PCBuild directory.
#
# it should configure and build SSL, then build the _ssl and _hashlib
# Python extensions without intervention.
# Modified by Christian Heimes
# Now this script supports pre-generated makefiles and assembly files.
# Developers don't need an installation of Perl anymore to build Python. A svn
# checkout from our svn repository is enough.
#
# In Order to create the files in the case of an update you still need Perl.
# Run build_ssl in this order:
# python.exe build_ssl.py Release x64
# python.exe build_ssl.py Release Win32
import os, sys, re, shutil
import subprocess
# Find all "foo.exe" files on the PATH.
def find_all_on_path(filename, extras = None):
entries = os.environ["PATH"].split(os.pathsep)
ret = []
for p in entries:
fname = os.path.abspath(os.path.join(p, filename))
if os.path.isfile(fname) and fname not in ret:
ret.append(fname)
if extras:
for p in extras:
fname = os.path.abspath(os.path.join(p, filename))
if os.path.isfile(fname) and fname not in ret:
ret.append(fname)
return ret
# Find a suitable Perl installation for OpenSSL.
# cygwin perl does *not* work. ActivePerl does.
# Being a Perl dummy, the simplest way I can check is if the "Win32" package
# is available.
def find_working_perl(perls):
for perl in perls:
try:
subprocess.check_output([perl, "-e", "use Win32;"])
except subprocess.CalledProcessError:
continue
else:
return perl
if perls:
print("The following perl interpreters were found:")
for p in perls:
print(" ", p)
print(" None of these versions appear suitable for building OpenSSL")
else:
print("NO perl interpreters were found on this machine at all!")
print(" Please install ActivePerl and ensure it appears on your path")
# Fetch SSL directory from VC properties
def get_ssl_dir():
propfile = (os.path.join(os.path.dirname(__file__), 'pyproject.props'))
with open(propfile, encoding='utf-8-sig') as f:
m = re.search('openssl-([^<]+)<', f.read())
return "..\..\openssl-"+m.group(1)
def create_makefile64(makefile, m32):
"""Create and fix makefile for 64bit
Replace 32 with 64bit directories
"""
if not os.path.isfile(m32):
return
with open(m32) as fin:
with open(makefile, 'w') as fout:
for line in fin:
line = line.replace("=tmp32", "=tmp64")
line = line.replace("=out32", "=out64")
line = line.replace("=inc32", "=inc64")
# force 64 bit machine
line = line.replace("MKLIB=lib", "MKLIB=lib /MACHINE:X64")
line = line.replace("LFLAGS=", "LFLAGS=/MACHINE:X64 ")
# don't link against the lib on 64bit systems
line = line.replace("bufferoverflowu.lib", "")
fout.write(line)
os.unlink(m32)
def fix_makefile(makefile):
"""Fix some stuff in all makefiles
"""
if not os.path.isfile(makefile):
return
with open(makefile) as fin:
lines = fin.readlines()
with open(makefile, 'w') as fout:
for line in lines:
if line.startswith("PERL="):
continue
if line.startswith("CP="):
line = "CP=copy\n"
if line.startswith("MKDIR="):
line = "MKDIR=mkdir\n"
if line.startswith("CFLAG="):
line = line.strip()
for algo in ("RC5", "MDC2", "IDEA"):
noalgo = " -DOPENSSL_NO_%s" % algo
if noalgo not in line:
line = line + noalgo
line = line + '\n'
fout.write(line)
def run_configure(configure, do_script):
print("perl Configure "+configure+" no-idea no-mdc2")
os.system("perl Configure "+configure+" no-idea no-mdc2")
print(do_script)
os.system(do_script)
def cmp(f1, f2):
bufsize = 1024 * 8
with open(f1, 'rb') as fp1, open(f2, 'rb') as fp2:
while True:
b1 = fp1.read(bufsize)
b2 = fp2.read(bufsize)
if b1 != b2:
return False
if not b1:
return True
def copy(src, dst):
if os.path.isfile(dst) and cmp(src, dst):
return
shutil.copy(src, dst)
def main():
build_all = "-a" in sys.argv
if sys.argv[1] == "Release":
debug = False
elif sys.argv[1] == "Debug":
debug = True
else:
raise ValueError(str(sys.argv))
if sys.argv[2] == "Win32":
arch = "x86"
configure = "VC-WIN32"
do_script = "ms\\do_nasm"
makefile="ms\\nt.mak"
m32 = makefile
dirsuffix = "32"
elif sys.argv[2] == "x64":
arch="amd64"
configure = "VC-WIN64A"
do_script = "ms\\do_win64a"
makefile = "ms\\nt64.mak"
m32 = makefile.replace('64', '')
dirsuffix = "64"
#os.environ["VSEXTCOMP_USECL"] = "MS_OPTERON"
else:
raise ValueError(str(sys.argv))
make_flags = ""
if build_all:
make_flags = "-a"
# perl should be on the path, but we also look in "\perl" and "c:\\perl"
# as "well known" locations
perls = find_all_on_path("perl.exe", ["\\perl\\bin", "C:\\perl\\bin"])
perl = find_working_perl(perls)
if perl:
print("Found a working perl at '%s'" % (perl,))
else:
print("No Perl installation was found. Existing Makefiles are used.")
sys.stdout.flush()
# Look for SSL 2 levels up from pcbuild - ie, same place zlib etc all live.
ssl_dir = get_ssl_dir()
if ssl_dir is None:
sys.exit(1)
old_cd = os.getcwd()
try:
os.chdir(ssl_dir)
# rebuild makefile when we do the role over from 32 to 64 build
if arch == "amd64" and os.path.isfile(m32) and not os.path.isfile(makefile):
os.unlink(m32)
# If the ssl makefiles do not exist, we invoke Perl to generate them.
# Due to a bug in this script, the makefile sometimes ended up empty
# Force a regeneration if it is.
if not os.path.isfile(makefile) or os.path.getsize(makefile)==0:
if perl is None:
print("Perl is required to build the makefiles!")
sys.exit(1)
print("Creating the makefiles...")
sys.stdout.flush()
# Put our working Perl at the front of our path
os.environ["PATH"] = os.path.dirname(perl) + \
os.pathsep + \
os.environ["PATH"]
run_configure(configure, do_script)
if debug:
print("OpenSSL debug builds aren't supported.")
#if arch=="x86" and debug:
# # the do_masm script in openssl doesn't generate a debug
# # build makefile so we generate it here:
# os.system("perl util\mk1mf.pl debug "+configure+" >"+makefile)
if arch == "amd64":
create_makefile64(makefile, m32)
fix_makefile(makefile)
copy(r"crypto\buildinf.h", r"crypto\buildinf_%s.h" % arch)
copy(r"crypto\opensslconf.h", r"crypto\opensslconf_%s.h" % arch)
# If the assembler files don't exist in tmpXX, copy them there
if perl is None and os.path.exists("asm"+dirsuffix):
if not os.path.exists("tmp"+dirsuffix):
os.mkdir("tmp"+dirsuffix)
for f in os.listdir("asm"+dirsuffix):
if not f.endswith(".asm"): continue
if os.path.isfile(r"tmp%s\%s" % (dirsuffix, f)): continue
shutil.copy(r"asm%s\%s" % (dirsuffix, f), "tmp"+dirsuffix)
# Now run make.
if arch == "amd64":
rc = os.system("nasm -f win64 -DNEAR -Ox -g ms\\uptable.asm")
if rc:
print("nasm assembler has failed.")
sys.exit(rc)
copy(r"crypto\buildinf_%s.h" % arch, r"crypto\buildinf.h")
copy(r"crypto\opensslconf_%s.h" % arch, r"crypto\opensslconf.h")
#makeCommand = "nmake /nologo PERL=\"%s\" -f \"%s\"" %(perl, makefile)
makeCommand = "nmake /nologo -f \"%s\"" % makefile
print("Executing ssl makefiles:", makeCommand)
sys.stdout.flush()
rc = os.system(makeCommand)
if rc:
print("Executing "+makefile+" failed")
print(rc)
sys.exit(rc)
finally:
os.chdir(old_cd)
sys.exit(rc)
if __name__=='__main__':
main()
| lgpl-3.0 | -6,075,392,182,660,927,000 | 35.395257 | 84 | 0.571242 | false |
hehongliang/tensorflow | tensorflow/python/distribute/shared_variable_creator.py | 45 | 3938 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility to re-use variables created on first device on subsequent devices."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
_VARIABLE_UNIQUIFYING_REGEX = re.compile(r"_\d/")
_VARIABLE_UNIQUIFYING_REGEX_AT_END = re.compile(r"_\d$")
def _canonicalize_variable_name(name):
# If no name is specified, uses default name "Variable".
if name is None:
return "Variable"
# Replace all instances of "_<num>/" with "/"
name = _VARIABLE_UNIQUIFYING_REGEX.sub("/", name)
# Replace any instances of "_<num>" at the end of the string with ""
name = _VARIABLE_UNIQUIFYING_REGEX_AT_END.sub("", name)
return name
def make_fn(shared_variable_store, device_id):
"""Construct the variable creator function for device `device_id`.
Constructs custom variable creator functions for the given device.
On first device (device_id == 0), it creates the variable using the
`next_creator`, and stores it in the provided `shared_variable_store`.
On all other devices (device_id > 0), it tries to re-use the variable
already created with the same name. If no such variable exists, it throws an
error.
Additionally, we de-uniquify variable names before checking for matches. This
helps re-use variables which are intended to be the same but have different
names due to variable uniquification happening upstream. Since this might
mean we may have multiple variables with the same canonical name, we store
them in a list per canonical name and return them in the same order as well.
Args:
shared_variable_store: A dictionary that we will use to store variables
created on the first device, and re-used by creators for other devices.
device_id: Integer index of the device whose creator should be
constructed.
Returns:
An appropriate creator function based on device_id.
"""
variable_scope_access_index = {}
assert isinstance(device_id, int)
def create_new_variable(next_creator, *args, **kwargs):
"""Create the variable using `next_creator` and store it."""
canonical_name = _canonicalize_variable_name(kwargs.get("name"))
v = next_creator(*args, **kwargs)
if canonical_name not in shared_variable_store:
shared_variable_store[canonical_name] = []
shared_variable_store[canonical_name].append(v)
return v
def reuse_variable(next_creator, *args, **kwargs):
"""Re-use existing variable from store with same name (in order)."""
del next_creator, args
name = kwargs.get("name")
canonical_name = _canonicalize_variable_name(name)
try:
variable_index = variable_scope_access_index.get(canonical_name, 0)
v = shared_variable_store[canonical_name][variable_index]
# TODO(priyag): Make this variable re-use more robust by adding checks
# that the requested shape and dtype match the existing variable.
variable_scope_access_index[canonical_name] = variable_index + 1
return v
except (KeyError, IndexError):
raise RuntimeError(
"Tried to create variable {} with mismatching name on device {}".
format(name, device_id))
if device_id == 0:
return create_new_variable
else:
return reuse_variable
| apache-2.0 | -323,219,449,857,299,100 | 39.597938 | 80 | 0.706196 | false |
photoninger/ansible | lib/ansible/modules/monitoring/circonus_annotation.py | 89 | 7162 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2014-2015, Epic Games, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: circonus_annotation
short_description: create an annotation in circonus
description:
- Create an annotation event with a given category, title and description. Optionally start, end or durations can be provided
author: "Nick Harring (@NickatEpic)"
version_added: 2.0
requirements:
- requests (either >= 2.0.0 for Python 3, or >= 1.0.0 for Python 2)
notes:
- Check mode isn't supported.
options:
api_key:
description:
- Circonus API key
required: true
category:
description:
- Annotation Category
required: true
description:
description:
- Description of annotation
required: true
title:
description:
- Title of annotation
required: true
start:
description:
- Unix timestamp of event start
default: I(now)
stop:
description:
- Unix timestamp of event end
default: I(now) + I(duration)
duration:
description:
- Duration in seconds of annotation
default: 0
'''
EXAMPLES = '''
# Create a simple annotation event with a source, defaults to start and end time of now
- circonus_annotation:
api_key: XXXXXXXXXXXXXXXXX
title: App Config Change
description: This is a detailed description of the config change
category: This category groups like annotations
# Create an annotation with a duration of 5 minutes and a default start time of now
- circonus_annotation:
api_key: XXXXXXXXXXXXXXXXX
title: App Config Change
description: This is a detailed description of the config change
category: This category groups like annotations
duration: 300
# Create an annotation with a start_time and end_time
- circonus_annotation:
api_key: XXXXXXXXXXXXXXXXX
title: App Config Change
description: This is a detailed description of the config change
category: This category groups like annotations
start_time: 1395940006
end_time: 1395954407
'''
RETURN = '''
annotation:
description: details about the created annotation
returned: success
type: complex
contains:
_cid:
description: annotation identifier
returned: success
type: string
sample: /annotation/100000
_created:
description: creation timestamp
returned: success
type: int
sample: 1502236928
_last_modified:
description: last modification timestamp
returned: success
type: int
sample: 1502236928
_last_modified_by:
description: last modified by
returned: success
type: string
sample: /user/1000
category:
description: category of the created annotation
returned: success
type: string
sample: alerts
title:
description: title of the created annotation
returned: success
type: string
sample: WARNING
description:
description: description of the created annotation
returned: success
type: string
sample: Host is down.
start:
description: timestamp, since annotation applies
returned: success
type: int
sample: Host is down.
stop:
description: timestamp, since annotation ends
returned: success
type: string
sample: Host is down.
rel_metrics:
description: Array of metrics related to this annotation, each metrics is a string.
returned: success
type: list
sample:
- 54321_kbps
'''
import json
import time
import traceback
from distutils.version import LooseVersion
try:
import requests
HAS_REQUESTS = True
except ImportError:
HAS_REQUESTS = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import PY3
from ansible.module_utils._text import to_native
def check_requests_dep(module):
"""Check if an adequate requests version is available"""
if not HAS_REQUESTS:
module.fail_json(msg='requests is required for this module')
else:
required_version = '2.0.0' if PY3 else '1.0.0'
if LooseVersion(requests.__version__) < LooseVersion(required_version):
module.fail_json(msg="'requests' library version should be >= %s, found: %s." % (required_version, requests.__version__))
def post_annotation(annotation, api_key):
''' Takes annotation dict and api_key string'''
base_url = 'https://api.circonus.com/v2'
anootate_post_endpoint = '/annotation'
resp = requests.post(base_url + anootate_post_endpoint,
headers=build_headers(api_key), data=json.dumps(annotation))
resp.raise_for_status()
return resp
def create_annotation(module):
''' Takes ansible module object '''
annotation = {}
duration = module.params['duration']
if module.params['start'] is not None:
start = module.params['start']
else:
start = int(time.time())
if module.params['stop'] is not None:
stop = module.params['stop']
else:
stop = int(time.time()) + duration
annotation['start'] = start
annotation['stop'] = stop
annotation['category'] = module.params['category']
annotation['description'] = module.params['description']
annotation['title'] = module.params['title']
return annotation
def build_headers(api_token):
'''Takes api token, returns headers with it included.'''
headers = {'X-Circonus-App-Name': 'ansible',
'Host': 'api.circonus.com', 'X-Circonus-Auth-Token': api_token,
'Accept': 'application/json'}
return headers
def main():
'''Main function, dispatches logic'''
module = AnsibleModule(
argument_spec=dict(
start=dict(type='int'),
stop=dict(type='int'),
category=dict(required=True),
title=dict(required=True),
description=dict(required=True),
duration=dict(default=0, type='int'),
api_key=dict(required=True, no_log=True)
)
)
check_requests_dep(module)
annotation = create_annotation(module)
try:
resp = post_annotation(annotation, module.params['api_key'])
except requests.exceptions.RequestException as e:
module.fail_json(msg='Request Failed', reason=to_native(e), exception=traceback.format_exc())
module.exit_json(changed=True, annotation=resp.json())
if __name__ == '__main__':
main()
| gpl-3.0 | -4,246,948,575,721,383,400 | 30.275109 | 133 | 0.623569 | false |
jlowin/airflow | scripts/perf/scheduler_ops_metrics.py | 30 | 6536 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
import logging
import pandas as pd
import sys
from airflow import configuration, settings
from airflow.jobs import SchedulerJob
from airflow.models import DagBag, DagModel, DagRun, TaskInstance
from airflow.utils.state import State
SUBDIR = 'scripts/perf/dags'
DAG_IDS = ['perf_dag_1', 'perf_dag_2']
MAX_RUNTIME_SECS = 6
class SchedulerMetricsJob(SchedulerJob):
"""
This class extends SchedulerJob to instrument the execution performance of
task instances contained in each DAG. We want to know if any DAG
is starved of resources, and this will be reflected in the stats printed
out at the end of the test run. The following metrics will be instrumented
for each task instance (dag_id, task_id, execution_date) tuple:
1. Queuing delay - time taken from starting the executor to the task
instance to be added to the executor queue.
2. Start delay - time taken from starting the executor to the task instance
to start execution.
3. Land time - time taken from starting the executor to task instance
completion.
4. Duration - time taken for executing the task instance.
The DAGs implement bash operators that call the system wait command. This
is representative of typical operators run on Airflow - queries that are
run on remote systems and spend the majority of their time on I/O wait.
To Run:
$ python scripts/perf/scheduler_ops_metrics.py
"""
__mapper_args__ = {
'polymorphic_identity': 'SchedulerMetricsJob'
}
def print_stats(self):
"""
Print operational metrics for the scheduler test.
"""
session = settings.Session()
TI = TaskInstance
tis = (
session
.query(TI)
.filter(TI.dag_id.in_(DAG_IDS))
.all()
)
successful_tis = filter(lambda x: x.state == State.SUCCESS, tis)
ti_perf = [(ti.dag_id, ti.task_id, ti.execution_date,
(ti.queued_dttm - self.start_date).total_seconds(),
(ti.start_date - self.start_date).total_seconds(),
(ti.end_date - self.start_date).total_seconds(),
ti.duration) for ti in successful_tis]
ti_perf_df = pd.DataFrame(ti_perf, columns=['dag_id', 'task_id',
'execution_date',
'queue_delay',
'start_delay', 'land_time',
'duration'])
print('Performance Results')
print('###################')
for dag_id in DAG_IDS:
print('DAG {}'.format(dag_id))
print(ti_perf_df[ti_perf_df['dag_id'] == dag_id])
print('###################')
if len(tis) > len(successful_tis):
print("WARNING!! The following task instances haven't completed")
print(pd.DataFrame([(ti.dag_id, ti.task_id, ti.execution_date, ti.state)
for ti in filter(lambda x: x.state != State.SUCCESS, tis)],
columns=['dag_id', 'task_id', 'execution_date', 'state']))
session.commit()
def heartbeat(self):
"""
Override the scheduler heartbeat to determine when the test is complete
"""
super(SchedulerMetricsJob, self).heartbeat()
session = settings.Session()
# Get all the relevant task instances
TI = TaskInstance
successful_tis = (
session
.query(TI)
.filter(TI.dag_id.in_(DAG_IDS))
.filter(TI.state.in_([State.SUCCESS]))
.all()
)
session.commit()
dagbag = DagBag(SUBDIR)
dags = [dagbag.dags[dag_id] for dag_id in DAG_IDS]
# the tasks in perf_dag_1 and per_dag_2 have a daily schedule interval.
num_task_instances = sum([(datetime.today() - task.start_date).days
for dag in dags for task in dag.tasks])
if (len(successful_tis) == num_task_instances or
(datetime.now()-self.start_date).total_seconds() >
MAX_RUNTIME_SECS):
if (len(successful_tis) == num_task_instances):
self.logger.info("All tasks processed! Printing stats.")
else:
self.logger.info("Test timeout reached. "
"Printing available stats.")
self.print_stats()
set_dags_paused_state(True)
sys.exit()
def clear_dag_runs():
"""
Remove any existing DAG runs for the perf test DAGs.
"""
session = settings.Session()
drs = session.query(DagRun).filter(
DagRun.dag_id.in_(DAG_IDS),
).all()
for dr in drs:
logging.info('Deleting DagRun :: {}'.format(dr))
session.delete(dr)
def clear_dag_task_instances():
"""
Remove any existing task instances for the perf test DAGs.
"""
session = settings.Session()
TI = TaskInstance
tis = (
session
.query(TI)
.filter(TI.dag_id.in_(DAG_IDS))
.all()
)
for ti in tis:
logging.info('Deleting TaskInstance :: {}'.format(ti))
session.delete(ti)
session.commit()
def set_dags_paused_state(is_paused):
"""
Toggle the pause state of the DAGs in the test.
"""
session = settings.Session()
dms = session.query(DagModel).filter(
DagModel.dag_id.in_(DAG_IDS))
for dm in dms:
logging.info('Setting DAG :: {} is_paused={}'.format(dm, is_paused))
dm.is_paused = is_paused
session.commit()
def main():
configuration.load_test_config()
set_dags_paused_state(False)
clear_dag_runs()
clear_dag_task_instances()
job = SchedulerMetricsJob(dag_ids=DAG_IDS, subdir=SUBDIR)
job.run()
if __name__ == "__main__":
main()
| apache-2.0 | -126,184,396,995,116,620 | 33.951872 | 84 | 0.588586 | false |
eminence/Minecraft-Overviewer | overviewer_core/cache.py | 6 | 5470 | # This file is part of the Minecraft Overviewer.
#
# Minecraft Overviewer is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 3 of the License, or (at
# your option) any later version.
#
# Minecraft Overviewer is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with the Overviewer. If not, see <http://www.gnu.org/licenses/>.
"""This module has supporting functions for the caching logic used in world.py.
Each cache class should implement the standard container type interface
(__getitem__ and __setitem__), as well as provide a "hits" and "misses"
attribute.
"""
import functools
import logging
import cPickle
class LRUCache(object):
"""A simple, generic, in-memory LRU cache that implements the standard
python container interface.
An ordered dict type would simplify this implementation a bit, but we want
Python 2.6 compatibility and the standard library ordereddict was added in
2.7. It's probably okay because this implementation can be tuned for
exactly what we need and nothing more.
This implementation keeps a linked-list of cache keys and values, ordered
in least-recently-used order. A dictionary maps keys to linked-list nodes.
On cache hit, the link is moved to the end of the list. On cache miss, the
first item of the list is evicted. All operations have constant time
complexity (dict lookups are worst case O(n) time)
"""
class _LinkNode(object):
__slots__ = ['left', 'right', 'key', 'value']
def __init__(self,l=None,r=None,k=None,v=None):
self.left = l
self.right = r
self.key = k
self.value = v
def __init__(self, size=100, destructor=None):
"""Initialize a new LRU cache with the given size.
destructor, if given, is a callable that is called upon an item being
evicted from the cache. It takes one argument, the value stored in the
cache.
"""
self.cache = {}
# Two sentinel nodes at the ends of the linked list simplify boundary
# conditions in the code below.
self.listhead = LRUCache._LinkNode()
self.listtail = LRUCache._LinkNode()
self.listhead.right = self.listtail
self.listtail.left = self.listhead
self.hits = 0
self.misses = 0
self.size = size
self.destructor = destructor
# Initialize an empty cache of the same size for worker processes
def __getstate__(self):
return self.size
def __setstate__(self, size):
self.__init__(size)
def __getitem__(self, key):
try:
link = self.cache[key]
except KeyError:
self.misses += 1
raise
# Disconnect the link from where it is
link.left.right = link.right
link.right.left = link.left
# Insert the link at the end of the list
tail = self.listtail
link.left = tail.left
link.right = tail
tail.left.right = link
tail.left = link
self.hits += 1
return link.value
def __setitem__(self, key, value):
cache = self.cache
if key in cache:
# Shortcut this case
cache[key].value = value
return
if len(cache) >= self.size:
# Evict a node
link = self.listhead.right
del cache[link.key]
link.left.right = link.right
link.right.left = link.left
d = self.destructor
if d:
d(link.value)
del link
# The node doesn't exist already, and we have room for it. Let's do this.
tail = self.listtail
link = LRUCache._LinkNode(tail.left, tail,key,value)
tail.left.right = link
tail.left = link
cache[key] = link
def __delitem__(self, key):
# Used to flush the cache of this key
cache = self.cache
link = cache[key]
del cache[key]
link.left.right = link.right
link.right.left = link.left
# Call the destructor
d = self.destructor
if d:
d(link.value)
# memcached is an option, but unless your IO costs are really high, it just
# ends up adding overhead and isn't worth it.
try:
import memcache
except ImportError:
class Memcached(object):
def __init__(*args):
raise ImportError("No module 'memcache' found. Please install python-memcached")
else:
class Memcached(object):
def __init__(self, conn='127.0.0.1:11211'):
self.conn = conn
self.mc = memcache.Client([conn], debug=0, pickler=cPickle.Pickler, unpickler=cPickle.Unpickler)
def __getstate__(self):
return self.conn
def __setstate__(self, conn):
self.__init__(conn)
def __getitem__(self, key):
v = self.mc.get(key)
if not v:
raise KeyError()
return v
def __setitem__(self, key, value):
self.mc.set(key, value)
| gpl-3.0 | -1,024,106,037,131,848,300 | 31.754491 | 108 | 0.610603 | false |
vladimir-ipatov/ganeti | lib/workerpool.py | 7 | 18694 | #
#
# Copyright (C) 2008, 2009, 2010 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""Base classes for worker pools.
"""
import logging
import threading
import heapq
import itertools
from ganeti import compat
from ganeti import errors
_TERMINATE = object()
_DEFAULT_PRIORITY = 0
class DeferTask(Exception):
"""Special exception class to defer a task.
This class can be raised by L{BaseWorker.RunTask} to defer the execution of a
task. Optionally, the priority of the task can be changed.
"""
def __init__(self, priority=None):
"""Initializes this class.
@type priority: number
@param priority: New task priority (None means no change)
"""
Exception.__init__(self)
self.priority = priority
class NoSuchTask(Exception):
"""Exception raised when a task can't be found.
"""
class BaseWorker(threading.Thread, object):
"""Base worker class for worker pools.
Users of a worker pool must override RunTask in a subclass.
"""
# pylint: disable=W0212
def __init__(self, pool, worker_id):
"""Constructor for BaseWorker thread.
@param pool: the parent worker pool
@param worker_id: identifier for this worker
"""
super(BaseWorker, self).__init__(name=worker_id)
self.pool = pool
self._worker_id = worker_id
self._current_task = None
assert self.getName() == worker_id
def ShouldTerminate(self):
"""Returns whether this worker should terminate.
Should only be called from within L{RunTask}.
"""
self.pool._lock.acquire()
try:
assert self._HasRunningTaskUnlocked()
return self.pool._ShouldWorkerTerminateUnlocked(self)
finally:
self.pool._lock.release()
def GetCurrentPriority(self):
"""Returns the priority of the current task.
Should only be called from within L{RunTask}.
"""
self.pool._lock.acquire()
try:
assert self._HasRunningTaskUnlocked()
(priority, _, _, _) = self._current_task
return priority
finally:
self.pool._lock.release()
def SetTaskName(self, taskname):
"""Sets the name of the current task.
Should only be called from within L{RunTask}.
@type taskname: string
@param taskname: Task's name
"""
if taskname:
name = "%s/%s" % (self._worker_id, taskname)
else:
name = self._worker_id
# Set thread name
self.setName(name)
def _HasRunningTaskUnlocked(self):
"""Returns whether this worker is currently running a task.
"""
return (self._current_task is not None)
def _GetCurrentOrderAndTaskId(self):
"""Returns the order and task ID of the current task.
Should only be called from within L{RunTask}.
"""
self.pool._lock.acquire()
try:
assert self._HasRunningTaskUnlocked()
(_, order_id, task_id, _) = self._current_task
return (order_id, task_id)
finally:
self.pool._lock.release()
def run(self):
"""Main thread function.
Waits for new tasks to show up in the queue.
"""
pool = self.pool
while True:
assert self._current_task is None
defer = None
try:
# Wait on lock to be told either to terminate or to do a task
pool._lock.acquire()
try:
task = pool._WaitForTaskUnlocked(self)
if task is _TERMINATE:
# Told to terminate
break
if task is None:
# Spurious notification, ignore
continue
self._current_task = task
# No longer needed, dispose of reference
del task
assert self._HasRunningTaskUnlocked()
finally:
pool._lock.release()
(priority, _, _, args) = self._current_task
try:
# Run the actual task
assert defer is None
logging.debug("Starting task %r, priority %s", args, priority)
assert self.getName() == self._worker_id
try:
self.RunTask(*args) # pylint: disable=W0142
finally:
self.SetTaskName(None)
logging.debug("Done with task %r, priority %s", args, priority)
except DeferTask, err:
defer = err
if defer.priority is None:
# Use same priority
defer.priority = priority
logging.debug("Deferring task %r, new priority %s",
args, defer.priority)
assert self._HasRunningTaskUnlocked()
except: # pylint: disable=W0702
logging.exception("Caught unhandled exception")
assert self._HasRunningTaskUnlocked()
finally:
# Notify pool
pool._lock.acquire()
try:
if defer:
assert self._current_task
# Schedule again for later run
(_, _, task_id, args) = self._current_task
pool._AddTaskUnlocked(args, defer.priority, task_id)
if self._current_task:
self._current_task = None
pool._worker_to_pool.notifyAll()
finally:
pool._lock.release()
assert not self._HasRunningTaskUnlocked()
logging.debug("Terminates")
def RunTask(self, *args):
"""Function called to start a task.
This needs to be implemented by child classes.
"""
raise NotImplementedError()
class WorkerPool(object):
"""Worker pool with a queue.
This class is thread-safe.
Tasks are guaranteed to be started in the order in which they're
added to the pool. Due to the nature of threading, they're not
guaranteed to finish in the same order.
@type _tasks: list of tuples
@ivar _tasks: Each tuple has the format (priority, order ID, task ID,
arguments). Priority and order ID are numeric and essentially control the
sort order. The order ID is an increasing number denoting the order in
which tasks are added to the queue. The task ID is controlled by user of
workerpool, see L{AddTask} for details. The task arguments are C{None} for
abandoned tasks, otherwise a sequence of arguments to be passed to
L{BaseWorker.RunTask}). The list must fulfill the heap property (for use by
the C{heapq} module).
@type _taskdata: dict; (task IDs as keys, tuples as values)
@ivar _taskdata: Mapping from task IDs to entries in L{_tasks}
"""
def __init__(self, name, num_workers, worker_class):
"""Constructor for worker pool.
@param num_workers: number of workers to be started
(dynamic resizing is not yet implemented)
@param worker_class: the class to be instantiated for workers;
should derive from L{BaseWorker}
"""
# Some of these variables are accessed by BaseWorker
self._lock = threading.Lock()
self._pool_to_pool = threading.Condition(self._lock)
self._pool_to_worker = threading.Condition(self._lock)
self._worker_to_pool = threading.Condition(self._lock)
self._worker_class = worker_class
self._name = name
self._last_worker_id = 0
self._workers = []
self._quiescing = False
self._active = True
# Terminating workers
self._termworkers = []
# Queued tasks
self._counter = itertools.count()
self._tasks = []
self._taskdata = {}
# Start workers
self.Resize(num_workers)
# TODO: Implement dynamic resizing?
def _WaitWhileQuiescingUnlocked(self):
"""Wait until the worker pool has finished quiescing.
"""
while self._quiescing:
self._pool_to_pool.wait()
def _AddTaskUnlocked(self, args, priority, task_id):
"""Adds a task to the internal queue.
@type args: sequence
@param args: Arguments passed to L{BaseWorker.RunTask}
@type priority: number
@param priority: Task priority
@param task_id: Task ID
"""
assert isinstance(args, (tuple, list)), "Arguments must be a sequence"
assert isinstance(priority, (int, long)), "Priority must be numeric"
assert task_id is None or isinstance(task_id, (int, long)), \
"Task ID must be numeric or None"
task = [priority, self._counter.next(), task_id, args]
if task_id is not None:
assert task_id not in self._taskdata
# Keep a reference to change priority later if necessary
self._taskdata[task_id] = task
# A counter is used to ensure elements are processed in their incoming
# order. For processing they're sorted by priority and then counter.
heapq.heappush(self._tasks, task)
# Notify a waiting worker
self._pool_to_worker.notify()
def AddTask(self, args, priority=_DEFAULT_PRIORITY, task_id=None):
"""Adds a task to the queue.
@type args: sequence
@param args: arguments passed to L{BaseWorker.RunTask}
@type priority: number
@param priority: Task priority
@param task_id: Task ID
@note: The task ID can be essentially anything that can be used as a
dictionary key. Callers, however, must ensure a task ID is unique while a
task is in the pool or while it might return to the pool due to deferring
using L{DeferTask}.
"""
self._lock.acquire()
try:
self._WaitWhileQuiescingUnlocked()
self._AddTaskUnlocked(args, priority, task_id)
finally:
self._lock.release()
def AddManyTasks(self, tasks, priority=_DEFAULT_PRIORITY, task_id=None):
"""Add a list of tasks to the queue.
@type tasks: list of tuples
@param tasks: list of args passed to L{BaseWorker.RunTask}
@type priority: number or list of numbers
@param priority: Priority for all added tasks or a list with the priority
for each task
@type task_id: list
@param task_id: List with the ID for each task
@note: See L{AddTask} for a note on task IDs.
"""
assert compat.all(isinstance(task, (tuple, list)) for task in tasks), \
"Each task must be a sequence"
assert (isinstance(priority, (int, long)) or
compat.all(isinstance(prio, (int, long)) for prio in priority)), \
"Priority must be numeric or be a list of numeric values"
assert task_id is None or isinstance(task_id, (tuple, list)), \
"Task IDs must be in a sequence"
if isinstance(priority, (int, long)):
priority = [priority] * len(tasks)
elif len(priority) != len(tasks):
raise errors.ProgrammerError("Number of priorities (%s) doesn't match"
" number of tasks (%s)" %
(len(priority), len(tasks)))
if task_id is None:
task_id = [None] * len(tasks)
elif len(task_id) != len(tasks):
raise errors.ProgrammerError("Number of task IDs (%s) doesn't match"
" number of tasks (%s)" %
(len(task_id), len(tasks)))
self._lock.acquire()
try:
self._WaitWhileQuiescingUnlocked()
assert compat.all(isinstance(prio, (int, long)) for prio in priority)
assert len(tasks) == len(priority)
assert len(tasks) == len(task_id)
for (args, prio, tid) in zip(tasks, priority, task_id):
self._AddTaskUnlocked(args, prio, tid)
finally:
self._lock.release()
def ChangeTaskPriority(self, task_id, priority):
"""Changes a task's priority.
@param task_id: Task ID
@type priority: number
@param priority: New task priority
@raise NoSuchTask: When the task referred by C{task_id} can not be found
(it may never have existed, may have already been processed, or is
currently running)
"""
assert isinstance(priority, (int, long)), "Priority must be numeric"
self._lock.acquire()
try:
logging.debug("About to change priority of task %s to %s",
task_id, priority)
# Find old task
oldtask = self._taskdata.get(task_id, None)
if oldtask is None:
msg = "Task '%s' was not found" % task_id
logging.debug(msg)
raise NoSuchTask(msg)
# Prepare new task
newtask = [priority] + oldtask[1:]
# Mark old entry as abandoned (this doesn't change the sort order and
# therefore doesn't invalidate the heap property of L{self._tasks}).
# See also <http://docs.python.org/library/heapq.html#priority-queue-
# implementation-notes>.
oldtask[-1] = None
# Change reference to new task entry and forget the old one
assert task_id is not None
self._taskdata[task_id] = newtask
# Add a new task with the old number and arguments
heapq.heappush(self._tasks, newtask)
# Notify a waiting worker
self._pool_to_worker.notify()
finally:
self._lock.release()
def SetActive(self, active):
"""Enable/disable processing of tasks.
This is different from L{Quiesce} in the sense that this function just
changes an internal flag and doesn't wait for the queue to be empty. Tasks
already being processed continue normally, but no new tasks will be
started. New tasks can still be added.
@type active: bool
@param active: Whether tasks should be processed
"""
self._lock.acquire()
try:
self._active = active
if active:
# Tell all workers to continue processing
self._pool_to_worker.notifyAll()
finally:
self._lock.release()
def _WaitForTaskUnlocked(self, worker):
"""Waits for a task for a worker.
@type worker: L{BaseWorker}
@param worker: Worker thread
"""
while True:
if self._ShouldWorkerTerminateUnlocked(worker):
return _TERMINATE
# If there's a pending task, return it immediately
if self._active and self._tasks:
# Get task from queue and tell pool about it
try:
task = heapq.heappop(self._tasks)
finally:
self._worker_to_pool.notifyAll()
(_, _, task_id, args) = task
# If the priority was changed, "args" is None
if args is None:
# Try again
logging.debug("Found abandoned task (%r)", task)
continue
# Delete reference
if task_id is not None:
del self._taskdata[task_id]
return task
logging.debug("Waiting for tasks")
# wait() releases the lock and sleeps until notified
self._pool_to_worker.wait()
logging.debug("Notified while waiting")
def _ShouldWorkerTerminateUnlocked(self, worker):
"""Returns whether a worker should terminate.
"""
return (worker in self._termworkers)
def _HasRunningTasksUnlocked(self):
"""Checks whether there's a task running in a worker.
"""
for worker in self._workers + self._termworkers:
if worker._HasRunningTaskUnlocked(): # pylint: disable=W0212
return True
return False
def HasRunningTasks(self):
"""Checks whether there's at least one task running.
"""
self._lock.acquire()
try:
return self._HasRunningTasksUnlocked()
finally:
self._lock.release()
def Quiesce(self):
"""Waits until the task queue is empty.
"""
self._lock.acquire()
try:
self._quiescing = True
# Wait while there are tasks pending or running
while self._tasks or self._HasRunningTasksUnlocked():
self._worker_to_pool.wait()
finally:
self._quiescing = False
# Make sure AddTasks continues in case it was waiting
self._pool_to_pool.notifyAll()
self._lock.release()
def _NewWorkerIdUnlocked(self):
"""Return an identifier for a new worker.
"""
self._last_worker_id += 1
return "%s%d" % (self._name, self._last_worker_id)
def _ResizeUnlocked(self, num_workers):
"""Changes the number of workers.
"""
assert num_workers >= 0, "num_workers must be >= 0"
logging.debug("Resizing to %s workers", num_workers)
current_count = len(self._workers)
if current_count == num_workers:
# Nothing to do
pass
elif current_count > num_workers:
if num_workers == 0:
# Create copy of list to iterate over while lock isn't held.
termworkers = self._workers[:]
del self._workers[:]
else:
# TODO: Implement partial downsizing
raise NotImplementedError()
#termworkers = ...
self._termworkers += termworkers
# Notify workers that something has changed
self._pool_to_worker.notifyAll()
# Join all terminating workers
self._lock.release()
try:
for worker in termworkers:
logging.debug("Waiting for thread %s", worker.getName())
worker.join()
finally:
self._lock.acquire()
# Remove terminated threads. This could be done in a more efficient way
# (del self._termworkers[:]), but checking worker.isAlive() makes sure we
# don't leave zombie threads around.
for worker in termworkers:
assert worker in self._termworkers, ("Worker not in list of"
" terminating workers")
if not worker.isAlive():
self._termworkers.remove(worker)
assert not self._termworkers, "Zombie worker detected"
elif current_count < num_workers:
# Create (num_workers - current_count) new workers
for _ in range(num_workers - current_count):
worker = self._worker_class(self, self._NewWorkerIdUnlocked())
self._workers.append(worker)
worker.start()
def Resize(self, num_workers):
"""Changes the number of workers in the pool.
@param num_workers: the new number of workers
"""
self._lock.acquire()
try:
return self._ResizeUnlocked(num_workers)
finally:
self._lock.release()
def TerminateWorkers(self):
"""Terminate all worker threads.
Unstarted tasks will be ignored.
"""
logging.debug("Terminating all workers")
self._lock.acquire()
try:
self._ResizeUnlocked(0)
if self._tasks:
logging.debug("There are %s tasks left", len(self._tasks))
finally:
self._lock.release()
logging.debug("All workers terminated")
| gpl-2.0 | -5,054,415,737,917,408,000 | 27.76 | 79 | 0.63507 | false |
thiagomg/experiments | math/question.py | 4 | 2911 | import sys
#compatibility
try: input = raw_input
except NameError: pass
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
colors = {
'GREEN': bcolors.OKGREEN,
'BLUE': bcolors.OKBLUE,
'MAGENTA': bcolors.HEADER,
'PURPLE': bcolors.HEADER,
'YELLOW': bcolors.WARNING,
'RED': bcolors.FAIL,
'NONE': bcolors.ENDC
}
attribs = {
'BOLD' : bcolors.BOLD,
'UNDERLINE': bcolors.UNDERLINE,
}
exit_cond = lambda x: x in {'q', 'quit', 'leave', 'exit'}
def set_exit_cond(condition):
global exit_cond
exit_cond = condition
def get_char(s, char_list):
while( True ):
string = input(s)
if exit_cond(string):
return None
if string in char_list:
return string
def get_number(s, max_val=None):
while( True ):
try:
string = input(s)
if exit_cond(string):
return None
val = int(string)
if max_val is None or val <= max_val:
return val
except:
print ('Not a number. Try again')
def get_string(s):
string = input(s)
if exit_cond(string):
return None
return string
def get_word(s):
string = input(s)
if exit_cond(string):
return False
return True
def ask_addition_question(m, n):
for i in range(1, 4):
result = get_number(str(m) + ' + ' + str(n) + ' = ')
if result == None:
return -1
if result == (m+n):
print ('Correct !')
return 1
else:
print ('Wrong. try again!')
return 0
def ask_multiplication_question(m, n):
for i in range(1, 4):
result = get_number(str(m) + ' x ' + str(n) + ' = ')
if result == None:
return -1
if result == (m*n):
print ('Correct !')
return 1
else:
print ('Wrong. try again!')
return 0
def ask_subtraction_question(m, n):
for i in range(1, 4):
if m < n:
m, n = n, m
result = get_number(str(m) + ' - ' + str(n) + ' = ')
if result == None:
return -1
if result == (m-n):
print ('Correct !')
return 1
else:
print ('Wrong. try again!')
return 0
def ask_word_question(word):
return get_word(' ' + word + ' ')
def write(text, color=None, *attrib):
prefix = ''
sufix = ''
if not color is None:
prefix += colors[color.upper()]
for at in attrib:
prefix += attribs[at.upper()]
if len(prefix) > 0:
sufix = colors['NONE']
print (prefix + text + sufix)
| mit | -6,450,787,443,298,298,000 | 21.05303 | 60 | 0.486087 | false |
info-labs/owlbot | owlbot/tests/test_secrets.py | 1 | 4149 | """Test the secrets module.
As most of the functions in secrets are thin wrappers around functions
defined elsewhere, we don't need to test them exhaustively.
"""
from ..pep506 import secrets
import unittest
import string
# For Python 2/3 compatibility.
try:
unicode
except NameError:
# Python 3.
unicode = str
# === Unit tests ===
class Compare_Digest_Tests(unittest.TestCase):
"""Test secrets.compare_digest function."""
def test_equal(self):
# Test compare_digest functionality with equal strings.
for s in ("a", "bcd", "xyz123"):
a = s*100
b = s*100
self.assertTrue(secrets.compare_digest(a, b))
def test_unequal(self):
# Test compare_digest functionality with unequal strings.
self.assertFalse(secrets.compare_digest("abc", "abcd"))
for s in ("x", "mn", "a1b2c3"):
a = s*100 + "q"
b = s*100 + "k"
self.assertFalse(secrets.compare_digest(a, b))
def test_bad_types(self):
# Test that compare_digest raises with mixed types.
a = "abcde" # str in Python3, bytes in Python2.
a = a.encode('ascii')
assert isinstance(a, bytes)
b = a.decode('ascii')
assert isinstance(b, unicode)
self.assertRaises(TypeError, secrets.compare_digest, a, b)
self.assertRaises(TypeError, secrets.compare_digest, b, a)
def test_bool(self):
# Test that compare_digest returns a bool.
self.assertTrue(isinstance(secrets.compare_digest("abc", "abc"), bool))
self.assertTrue(isinstance(secrets.compare_digest("abc", "xyz"), bool))
class Random_Tests(unittest.TestCase):
"""Test wrappers around SystemRandom methods."""
def test_randbits(self):
# Test randbits.
errmsg = "randbits(%d) returned %d"
for numbits in (3, 12, 30):
for i in range(6):
n = secrets.randbits(numbits)
self.assertTrue(0 <= n < 2**numbits, errmsg % (numbits, n))
def test_choice(self):
# Test choice.
items = [1, 2, 4, 8, 16, 32, 64]
for i in range(10):
self.assertTrue(secrets.choice(items) in items)
def test_randbelow(self):
# Test randbelow.
errmsg = "randbelow(%d) returned %d"
for i in range(2, 10):
n = secrets.randbelow(i)
self.assertTrue(n in range(i), errmsg % (i, n))
self.assertRaises(ValueError, secrets.randbelow, 0)
class Token_Tests(unittest.TestCase):
"""Test token functions."""
def test_token_defaults(self):
# Test that token_* functions handle default size correctly.
for func in (secrets.token_bytes, secrets.token_hex,
secrets.token_urlsafe):
name = func.__name__
try:
func()
except TypeError:
self.fail("%s cannot be called with no argument" % name)
try:
func(None)
except TypeError:
self.fail("%s cannot be called with None" % name)
size = secrets.DEFAULT_ENTROPY
self.assertEqual(len(secrets.token_bytes(None)), size)
self.assertEqual(len(secrets.token_hex(None)), 2*size)
def test_token_bytes(self):
# Test token_bytes.
self.assertTrue(isinstance(secrets.token_bytes(11), bytes))
for n in (1, 8, 17, 100):
self.assertEqual(len(secrets.token_bytes(n)), n)
def test_token_hex(self):
# Test token_hex.
self.assertTrue(isinstance(secrets.token_hex(7), unicode))
for n in (1, 12, 25, 90):
s = secrets.token_hex(n)
self.assertEqual(len(s), 2*n)
self.assertTrue(all(c in string.hexdigits for c in s))
def test_token_urlsafe(self):
# Test token_urlsafe.
self.assertTrue(isinstance(secrets.token_urlsafe(9), unicode))
legal = string.ascii_letters + string.digits + '-_'
for n in (1, 11, 28, 76):
self.assertTrue(all(c in legal for c in secrets.token_urlsafe(n)))
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | -8,754,500,925,913,074,000 | 32.192 | 79 | 0.58954 | false |
vijaylbais/boto | boto/rds/statusinfo.py | 180 | 2011 | # Copyright (c) 2013 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
class StatusInfo(object):
"""
Describes a status message.
"""
def __init__(self, status_type=None, normal=None, status=None, message=None):
self.status_type = status_type
self.normal = normal
self.status = status
self.message = message
def __repr__(self):
return 'StatusInfo:%s' % self.message
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'StatusType':
self.status_type = value
elif name == 'Normal':
if value.lower() == 'true':
self.normal = True
else:
self.normal = False
elif name == 'Status':
self.status = value
elif name == 'Message':
self.message = value
else:
setattr(self, name, value)
| mit | 8,234,118,086,401,378,000 | 36.240741 | 81 | 0.665341 | false |
nE0sIghT/pcsx2 | 3rdparty/wxwidgets3.0/src/msw/wince/clean_vcp.py | 45 | 1324 | '''
This script will delete dependences from *.vcp files.
After using this script, next time when you will try to save project,
you will have wait until 'Visual Tools' will rebuild all dependencies
and this process might take HUGE amount of time
Author : Viktor Voroshylo
'''
__version__='$Revision$'[11:-2]
import sys
if len(sys.argv) != 2 :
print "Usage: %s project_file.vcp" % sys.argv[0]
sys.exit(0)
vsp_filename = sys.argv[1]
exclude_line = 0
resultLines = []
vsp_file = open(vsp_filename, "r")
empty_if_start = -1
line = vsp_file.readline()
while line :
skip_line = 0
if exclude_line :
if not line.endswith("\\\n") : exclude_line = 0
skip_line = 1
elif line.startswith("DEP_CPP_") or line.startswith("NODEP_CPP_") :
exclude_line = 1
skip_line = 1
elif empty_if_start != -1 :
if line == "!ENDIF \n" :
resultLines = resultLines[:empty_if_start]
empty_if_start = -1
skip_line = 1
elif line != "\n" and not line.startswith("!ELSEIF ") :
empty_if_start = -1
elif line.startswith("!IF ") :
empty_if_start = len(resultLines)
if not skip_line :
resultLines.append(line)
line = vsp_file.readline()
open(vsp_filename, "w").write("".join(resultLines))
| gpl-2.0 | 6,558,898,435,123,163,000 | 26.583333 | 71 | 0.60423 | false |
alaski/nova | nova/tests/unit/objects/test_hv_spec.py | 46 | 2308 | # Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.compute import arch
from nova.compute import hv_type
from nova.compute import vm_mode
from nova import objects
from nova.tests.unit.objects import test_objects
spec_dict = {
'arch': arch.I686,
'hv_type': hv_type.KVM,
'vm_mode': vm_mode.HVM
}
spec_list = [
arch.I686,
hv_type.KVM,
vm_mode.HVM
]
spec_dict_vz = {
'arch': arch.I686,
'hv_type': hv_type.VIRTUOZZO,
'vm_mode': vm_mode.HVM
}
spec_dict_parallels = {
'arch': arch.I686,
'hv_type': hv_type.PARALLELS,
'vm_mode': vm_mode.HVM
}
class _TestHVSpecObject(object):
def test_hv_spec_from_list(self):
spec_obj = objects.HVSpec.from_list(spec_list)
self.compare_obj(spec_obj, spec_dict)
def test_hv_spec_to_list(self):
spec_obj = objects.HVSpec()
spec_obj.arch = arch.I686
spec_obj.hv_type = hv_type.KVM
spec_obj.vm_mode = vm_mode.HVM
spec = spec_obj.to_list()
self.assertEqual(spec_list, spec)
def test_hv_spec_obj_make_compatible(self):
spec_dict_vz_copy = spec_dict_vz.copy()
# check 1.1->1.0 compatibility
objects.HVSpec().obj_make_compatible(spec_dict_vz_copy, '1.0')
self.assertEqual(spec_dict_parallels, spec_dict_vz_copy)
# check that nothing changed
objects.HVSpec().obj_make_compatible(spec_dict_vz_copy, '1.1')
self.assertEqual(spec_dict_parallels, spec_dict_vz_copy)
class TestHVSpecObject(test_objects._LocalTest,
_TestHVSpecObject):
pass
class TestRemoteHVSpecObject(test_objects._RemoteTest,
_TestHVSpecObject):
pass
| apache-2.0 | 5,418,905,810,517,459,000 | 27.493827 | 78 | 0.656846 | false |
snbueno/blivet | blivet/devices/disk.py | 2 | 21440 | # devices/disk.py
# Classes to represent various types of disk-like devices.
#
# Copyright (C) 2009-2014 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): David Lehman <[email protected]>
#
import os
from gi.repository import BlockDev as blockdev
from .. import errors
from .. import util
from ..flags import flags
from ..storage_log import log_method_call
from .. import udev
from ..size import Size
from ..fcoe import fcoe
import logging
log = logging.getLogger("blivet")
from .storage import StorageDevice
from .container import ContainerDevice
from .network import NetworkStorageDevice
from .dm import DMDevice
class DiskDevice(StorageDevice):
""" A local/generic disk.
This is not the only kind of device that is treated as a disk. More
useful than checking isinstance(device, DiskDevice) is checking
device.isDisk.
"""
_type = "disk"
_partitionable = True
_isDisk = True
def __init__(self, name, fmt=None,
size=None, major=None, minor=None, sysfsPath='',
parents=None, serial=None, vendor="", model="", bus="",
exists=True):
"""
:param name: the device name (generally a device node's basename)
:type name: str
:keyword size: the device's size
:type size: :class:`~.size.Size`
:keyword parents: a list of parent devices
:type parents: list of :class:`StorageDevice`
:keyword fmt: this device's formatting
:type fmt: :class:`~.formats.DeviceFormat` or a subclass of it
:keyword uuid: universally unique identifier (device -- not fs)
:type uuid: str
:keyword sysfsPath: sysfs device path
:type sysfsPath: str
:keyword removable: whether or not this is a removable device
:type removable: bool
:keyword serial: the ID_SERIAL_RAW, ID_SERIAL or ID_SERIAL_SHORT for
this device (which one is available)
:type serial: str
:keyword vendor: the manufacturer of this Device
:type vendor: str
:keyword model: manufacturer's device model string
:type model: str
:keyword bus: the interconnect this device uses
:type bus: str
DiskDevices always exist.
"""
StorageDevice.__init__(self, name, fmt=fmt, size=size,
major=major, minor=minor, exists=exists,
sysfsPath=sysfsPath, parents=parents,
serial=serial, model=model,
vendor=vendor, bus=bus)
def __repr__(self):
s = StorageDevice.__repr__(self)
s += (" removable = %(removable)s partedDevice = %(partedDevice)r" %
{"removable": self.removable, "partedDevice": self.partedDevice})
return s
@property
def mediaPresent(self):
if flags.testing:
return True
if not self.partedDevice:
return False
# Some drivers (cpqarray <blegh>) make block device nodes for
# controllers with no disks attached and then report a 0 size,
# treat this as no media present
return Size(self.partedDevice.getLength(unit="B")) != Size(0)
@property
def description(self):
return self.model
@property
def size(self):
""" The disk's size """
return super(DiskDevice, self).size
def _preDestroy(self):
""" Destroy the device. """
log_method_call(self, self.name, status=self.status)
if not self.mediaPresent:
raise errors.DeviceError("cannot destroy disk with no media", self.name)
StorageDevice._preDestroy(self)
class DiskFile(DiskDevice):
""" This is a file that we will pretend is a disk.
This is intended only for testing purposes. The benefit of this class
is that you can instantiate a disk-like device with a working disklabel
class as a non-root user. It is not known how the system will behave if
partitions are committed to one of these disks.
"""
_devDir = ""
def __init__(self, name, fmt=None,
size=None, major=None, minor=None, sysfsPath='',
parents=None, serial=None, vendor="", model="", bus="",
exists=True):
"""
:param str name: the full path to the backing regular file
:keyword :class:`~.formats.DeviceFormat` fmt: the device's format
"""
_name = os.path.basename(name)
self._devDir = os.path.dirname(name)
super(DiskFile, self).__init__(_name, fmt=fmt, size=size,
major=major, minor=minor, sysfsPath=sysfsPath,
parents=parents, serial=serial, vendor=vendor,
model=model, bus=bus, exists=exists)
#
# Regular files do not have sysfs entries.
#
@property
def sysfsPath(self):
return ""
@sysfsPath.setter
def sysfsPath(self, value):
pass
def updateSysfsPath(self):
pass
class DMRaidArrayDevice(DMDevice, ContainerDevice):
""" A dmraid (device-mapper RAID) device """
_type = "dm-raid array"
_packages = ["dmraid"]
_partitionable = True
_isDisk = True
_formatClassName = property(lambda s: "dmraidmember")
_formatUUIDAttr = property(lambda s: None)
def __init__(self, name, fmt=None,
size=None, parents=None, sysfsPath=''):
"""
:param name: the device name (generally a device node's basename)
:type name: str
:keyword size: the device's size
:type size: :class:`~.size.Size`
:keyword parents: a list of parent devices
:type parents: list of :class:`StorageDevice`
:keyword fmt: this device's formatting
:type fmt: :class:`~.formats.DeviceFormat` or a subclass of it
:keyword sysfsPath: sysfs device path
:type sysfsPath: str
DMRaidArrayDevices always exist. Blivet cannot create or destroy
them.
"""
super(DMRaidArrayDevice, self).__init__(name, fmt=fmt, size=size,
parents=parents, exists=True,
sysfsPath=sysfsPath)
@property
def devices(self):
""" Return a list of this array's member device instances. """
return self.parents
def deactivate(self):
""" Deactivate the raid set. """
log_method_call(self, self.name, status=self.status)
# This call already checks if the set is not active.
blockdev.dm_deactivate_raid_set(self.name)
def activate(self):
""" Activate the raid set. """
log_method_call(self, self.name, status=self.status)
# This call already checks if the set is active.
blockdev.dm_activate_raid_set(self.name)
udev.settle()
def _setup(self, orig=False):
""" Open, or set up, a device. """
log_method_call(self, self.name, orig=orig, status=self.status,
controllable=self.controllable)
self.activate()
def teardown(self, recursive=None):
""" Close, or tear down, a device. """
log_method_call(self, self.name, status=self.status,
controllable=self.controllable)
if not self._preTeardown(recursive=recursive):
return
log.debug("not tearing down dmraid device %s", self.name)
def _add(self, member):
raise NotImplementedError()
def _remove(self, member):
raise NotImplementedError()
@property
def description(self):
return "BIOS RAID set (%s)" % blockdev.dm_get_raid_set_type(self.name)
@property
def model(self):
return self.description
def dracutSetupArgs(self):
return set(["rd.dm.uuid=%s" % self.name])
class MultipathDevice(DMDevice):
""" A multipath device """
_type = "dm-multipath"
_packages = ["device-mapper-multipath"]
_partitionable = True
_isDisk = True
def __init__(self, name, fmt=None, size=None, serial=None,
parents=None, sysfsPath=''):
"""
:param name: the device name (generally a device node's basename)
:type name: str
:keyword size: the device's size
:type size: :class:`~.size.Size`
:keyword parents: a list of parent devices
:type parents: list of :class:`StorageDevice`
:keyword fmt: this device's formatting
:type fmt: :class:`~.formats.DeviceFormat` or a subclass of it
:keyword sysfsPath: sysfs device path
:type sysfsPath: str
:keyword serial: the device's serial number
:type serial: str
MultipathDevices always exist. Blivet cannot create or destroy
them.
"""
DMDevice.__init__(self, name, fmt=fmt, size=size,
parents=parents, sysfsPath=sysfsPath,
exists=True)
self.identity = serial
self.config = {
'wwid' : self.identity,
'mode' : '0600',
'uid' : '0',
'gid' : '0',
}
@property
def wwid(self):
identity = self.identity
ret = []
while identity:
ret.append(identity[:2])
identity = identity[2:]
return ":".join(ret)
@property
def model(self):
if not self.parents:
return ""
return self.parents[0].model
@property
def vendor(self):
if not self.parents:
return ""
return self.parents[0].vendor
@property
def description(self):
return "WWID %s" % (self.wwid,)
def addParent(self, parent):
""" Add a parent device to the mpath. """
log_method_call(self, self.name, status=self.status)
if self.status:
self.teardown()
self.parents.append(parent)
self.setup()
else:
self.parents.append(parent)
def _setup(self, orig=False):
""" Open, or set up, a device. """
log_method_call(self, self.name, orig=orig, status=self.status,
controllable=self.controllable)
udev.settle()
rc = util.run_program(["multipath", self.name])
if rc:
raise errors.MPathError("multipath activation failed for '%s'" %
self.name, hardware_fault=True)
def _postSetup(self):
StorageDevice._postSetup(self)
self.setupPartitions()
udev.settle()
class iScsiDiskDevice(DiskDevice, NetworkStorageDevice):
""" An iSCSI disk. """
_type = "iscsi"
_packages = ["iscsi-initiator-utils", "dracut-network"]
def __init__(self, device, **kwargs):
"""
:param name: the device name (generally a device node's basename)
:type name: str
:keyword exists: does this device exist?
:type exists: bool
:keyword size: the device's size
:type size: :class:`~.size.Size`
:keyword parents: a list of parent devices
:type parents: list of :class:`StorageDevice`
:keyword format: this device's formatting
:type format: :class:`~.formats.DeviceFormat` or a subclass of it
:keyword node: ???
:type node: str
:keyword ibft: use iBFT
:type ibft: bool
:keyword nic: name of NIC to use
:type nic: str
:keyword initiator: initiator name
:type initiator: str
:keyword fw_name: qla4xxx partial offload
:keyword fw_address: qla4xxx partial offload
:keyword fw_port: qla4xxx partial offload
"""
self.node = kwargs.pop("node")
self.ibft = kwargs.pop("ibft")
self.nic = kwargs.pop("nic")
self.initiator = kwargs.pop("initiator")
if self.node is None:
# qla4xxx partial offload
name = kwargs.pop("fw_name")
address = kwargs.pop("fw_address")
port = kwargs.pop("fw_port")
DiskDevice.__init__(self, device, **kwargs)
NetworkStorageDevice.__init__(self,
host_address=address,
nic=self.nic)
log.debug("created new iscsi disk %s %s:%s using fw initiator %s",
name, address, port, self.initiator)
else:
DiskDevice.__init__(self, device, **kwargs)
NetworkStorageDevice.__init__(self, host_address=self.node.address,
nic=self.nic)
log.debug("created new iscsi disk %s %s:%d via %s:%s", self.node.name,
self.node.address,
self.node.port,
self.node.iface,
self.nic)
def dracutSetupArgs(self):
if self.ibft:
return set(["iscsi_firmware"])
# qla4xxx partial offload
if self.node is None:
return set()
address = self.node.address
# surround ipv6 addresses with []
if ":" in address:
address = "[%s]" % address
netroot="netroot=iscsi:"
auth = self.node.getAuth()
if auth:
netroot += "%s:%s" % (auth.username, auth.password)
if len(auth.reverse_username) or len(auth.reverse_password):
netroot += ":%s:%s" % (auth.reverse_username,
auth.reverse_password)
iface_spec = ""
if self.nic != "default":
iface_spec = ":%s:%s" % (self.node.iface, self.nic)
netroot += "@%s::%d%s::%s" % (address,
self.node.port,
iface_spec,
self.node.name)
initiator = "iscsi_initiator=%s" % self.initiator
return set([netroot, initiator])
class FcoeDiskDevice(DiskDevice, NetworkStorageDevice):
""" An FCoE disk. """
_type = "fcoe"
_packages = ["fcoe-utils", "dracut-network"]
def __init__(self, device, **kwargs):
"""
:param name: the device name (generally a device node's basename)
:type name: str
:keyword exists: does this device exist?
:type exists: bool
:keyword size: the device's size
:type size: :class:`~.size.Size`
:keyword parents: a list of parent devices
:type parents: list of :class:`StorageDevice`
:keyword format: this device's formatting
:type format: :class:`~.formats.DeviceFormat` or a subclass of it
:keyword nic: name of NIC to use
:keyword identifier: ???
"""
self.nic = kwargs.pop("nic")
self.identifier = kwargs.pop("identifier")
DiskDevice.__init__(self, device, **kwargs)
NetworkStorageDevice.__init__(self, nic=self.nic)
log.debug("created new fcoe disk %s (%s) @ %s",
device, self.identifier, self.nic)
def dracutSetupArgs(self):
dcb = True
for nic, dcb, _auto_vlan in fcoe().nics:
if nic == self.nic:
break
else:
return set()
if dcb:
dcbOpt = "dcb"
else:
dcbOpt = "nodcb"
if self.nic in fcoe().added_nics:
return set(["fcoe=%s:%s" % (self.nic, dcbOpt)])
else:
return set(["fcoe=edd:%s" % dcbOpt])
class ZFCPDiskDevice(DiskDevice):
""" A mainframe ZFCP disk. """
_type = "zfcp"
def __init__(self, device, **kwargs):
"""
:param name: the device name (generally a device node's basename)
:type name: str
:keyword exists: does this device exist?
:type exists: bool
:keyword size: the device's size
:type size: :class:`~.size.Size`
:keyword parents: a list of parent devices
:type parents: list of :class:`StorageDevice`
:keyword format: this device's formatting
:type format: :class:`~.formats.DeviceFormat` or a subclass of it
:keyword hba_id: ???
:keyword wwpn: ???
:keyword fcp_lun: ???
"""
self.hba_id = kwargs.pop("hba_id")
self.wwpn = kwargs.pop("wwpn")
self.fcp_lun = kwargs.pop("fcp_lun")
DiskDevice.__init__(self, device, **kwargs)
def __repr__(self):
s = DiskDevice.__repr__(self)
s += (" hba_id = %(hba_id)s wwpn = %(wwpn)s fcp_lun = %(fcp_lun)s" %
{"hba_id": self.hba_id,
"wwpn": self.wwpn,
"fcp_lun": self.fcp_lun})
return s
@property
def description(self):
return "FCP device %(device)s with WWPN %(wwpn)s and LUN %(lun)s" \
% {'device': self.hba_id,
'wwpn': self.wwpn,
'lun': self.fcp_lun}
def dracutSetupArgs(self):
return set(["rd.zfcp=%s,%s,%s" % (self.hba_id, self.wwpn, self.fcp_lun,)])
class DASDDevice(DiskDevice):
""" A mainframe DASD. """
_type = "dasd"
def __init__(self, device, **kwargs):
"""
:param name: the device name (generally a device node's basename)
:type name: str
:keyword exists: does this device exist?
:type exists: bool
:keyword size: the device's size
:type size: :class:`~.size.Size`
:keyword parents: a list of parent devices
:type parents: list of :class:`StorageDevice`
:keyword format: this device's formatting
:type format: :class:`~.formats.DeviceFormat` or a subclass of it
:keyword busid: bus ID
:keyword opts: options
:type opts: dict with option name keys and option value values
"""
self.busid = kwargs.pop('busid')
self.opts = kwargs.pop('opts')
DiskDevice.__init__(self, device, **kwargs)
@property
def description(self):
return "DASD device %s" % self.busid
def getOpts(self):
return ["%s=%s" % (k, v) for k, v in self.opts.items() if v == '1']
def dracutSetupArgs(self):
conf = "/etc/dasd.conf"
line = None
if os.path.isfile(conf):
f = open(conf)
# grab the first line that starts with our busID
for l in f.readlines():
if l.startswith(self.busid):
line = l.rstrip()
break
f.close()
# See if we got a line. If not, grab our getOpts
if not line:
line = self.busid
for devopt in self.getOpts():
line += " %s" % devopt
# Create a translation mapping from dasd.conf format to module format
translate = {'use_diag': 'diag',
'readonly': 'ro',
'erplog': 'erplog',
'failfast': 'failfast'}
# this is a really awkward way of determining if the
# feature found is actually desired (1, not 0), plus
# translating that feature into the actual kernel module
# value
opts = []
parts = line.split()
for chunk in parts[1:]:
try:
feat, val = chunk.split('=')
if int(val):
opts.append(translate[feat])
except (ValueError, KeyError):
# If we don't know what the feature is (feat not in translate
# or if we get a val that doesn't cleanly convert to an int
# we can't do anything with it.
log.warning("failed to parse dasd feature %s", chunk)
if opts:
return set(["rd.dasd=%s(%s)" % (self.busid,
":".join(opts))])
else:
return set(["rd.dasd=%s" % self.busid])
| gpl-2.0 | -2,118,480,112,093,683,500 | 35.033613 | 85 | 0.549953 | false |
WQuanfeng/wagtail | wagtail/contrib/wagtailapi/utils.py | 13 | 1299 | from django.conf import settings
from django.utils.six.moves.urllib.parse import urlparse
from wagtail.wagtailcore.models import Page
class BadRequestError(Exception):
pass
class URLPath(object):
"""
This class represents a URL path that should be converted to a full URL.
It is used when the domain that should be used is not known at the time
the URL was generated. It will get resolved to a full URL during
serialisation in api.py.
One example use case is the documents endpoint adding download URLs into
the JSON. The endpoint does not know the domain name to use at the time so
returns one of these instead.
"""
def __init__(self, path):
self.path = path
class ObjectDetailURL(object):
def __init__(self, model, pk):
self.model = model
self.pk = pk
def get_base_url(request=None):
base_url = getattr(settings, 'WAGTAILAPI_BASE_URL', request.site.root_url if request else None)
if base_url:
# We only want the scheme and netloc
base_url_parsed = urlparse(base_url)
return base_url_parsed.scheme + '://' + base_url_parsed.netloc
def pages_for_site(site):
pages = Page.objects.public().live()
pages = pages.descendant_of(site.root_page, inclusive=True)
return pages
| bsd-3-clause | 5,236,326,146,387,988,000 | 27.23913 | 99 | 0.689761 | false |
ruiting/opencog | opencog/python/pln_old/examples/deduction/deduction_example.py | 32 | 2889 | """
PLN Deduction Example
Demonstrates how to run the example in deduction_agent.py when
when interacting with PLN from a standalone Python environment
for development or testing purposes. The normal use case is to
run the example from the CogServer, for which you should use
deduction_agent.py instead.
"""
from __future__ import print_function
from pprint import pprint
from pln.examples.deduction import deduction_agent
from opencog.atomspace import types, AtomSpace, TruthValue
__author__ = 'Cosmo Harrigan'
# Create an AtomSpace with some sample information, equivalent to the
# information in atomspace_contents.scm
atomspace = AtomSpace()
# Basic concepts
frog = atomspace.add_node(types.ConceptNode, 'Frog', TruthValue(0.01, 100))
intelligent = atomspace.add_node(types.ConceptNode,
'Intelligent',
TruthValue(0.05, 100))
slimy = atomspace.add_node(types.ConceptNode, 'Slimy', TruthValue(0.01, 100))
animal = atomspace.add_node(types.ConceptNode, 'Animal', TruthValue(0.1, 100))
being = atomspace.add_node(types.ConceptNode, 'Being', TruthValue(0.1, 100))
moves = atomspace.add_node(types.PredicateNode, 'Moves', TruthValue(0.1, 100))
# Attributes of frogs
atomspace.add_link(types.InheritanceLink,
[frog, intelligent],
TruthValue(0.2, 100))
atomspace.add_link(types.InheritanceLink, [frog, slimy], TruthValue(0.5, 100))
atomspace.add_link(types.InheritanceLink, [frog, animal], TruthValue(0.9, 100))
# Attributes of animals
atomspace.add_link(types.InheritanceLink,
[animal, being],
TruthValue(0.9, 100))
atomspace.add_link(types.InheritanceLink,
[animal, moves],
TruthValue(0.9, 100))
# Peter is a frog
peter = atomspace.add_node(types.ConceptNode, 'Peter', TruthValue(0.001, 100))
atomspace.add_link(types.InheritanceLink, [peter, frog], TruthValue(0.9, 100))
#print('AtomSpace starting contents:')
#atomspace.print_list()
# Test multiple steps of forward inference on the AtomSpace
deduction_agent = deduction_agent.DeductionAgent()
for i in range(1, 500):
result = deduction_agent.run(atomspace)
output = None
input = None
rule = None
if result is not None:
(rule, input, output) = result
if output is not None:
print("\n---- [Step # {0}] ----".format(i))
print("-- Output:\n{0}".format(output[0]))
print("-- Rule:\n{0}".format(rule))
print("\n-- Input:\n{0}".format(input))
print('--- History:')
history = deduction_agent.get_history()
pprint(history)
with open('pln_log.txt', 'w') as logfile:
all_atoms = atomspace.get_atoms_by_type(t=types.Atom)
print('; Number of atoms in atomspace after inference: %d' %
len(all_atoms), file=logfile)
for atom in all_atoms:
print(atom, file=logfile)
| agpl-3.0 | 3,428,534,164,014,785,500 | 35.1125 | 79 | 0.676359 | false |
kohnle-lernmodule/exeLearningPlus1_04 | twisted/pb/test/test_pb.py | 14 | 42742 |
import gc
import sys, re
from twisted.python import log
#log.startLogging(sys.stderr)
from zope.interface import implements, implementsOnly, implementedBy
from twisted.python import components, failure, reflect
from twisted.internet import reactor, defer
from twisted.trial import unittest
from twisted.internet.main import CONNECTION_LOST
from twisted.application.internet import TCPServer
from twisted.pb import schema, pb, tokens, remoteinterface, referenceable
from twisted.pb.tokens import BananaError, Violation, INT, STRING, OPEN
from twisted.pb.slicer import BananaFailure
from twisted.pb import copyable, broker, call
from twisted.pb.remoteinterface import getRemoteInterface
from twisted.pb.remoteinterface import RemoteInterfaceRegistry
try:
from twisted.pb import crypto
except ImportError:
crypto = None
if crypto and not crypto.available:
crypto = None
from twisted.pb.test.common import HelperTarget, RIHelper, TargetMixin
from twisted.pb.test.common import getRemoteInterfaceName
from twisted.pb.negotiate import eventually, flushEventualQueue
class TestRequest(call.PendingRequest):
def __init__(self, reqID, rref=None):
self.answers = []
call.PendingRequest.__init__(self, reqID, rref)
def complete(self, res):
self.answers.append((True, res))
def fail(self, why):
self.answers.append((False, why))
class TestReferenceUnslicer(unittest.TestCase):
# OPEN(reference), INT(refid), [STR(interfacename), INT(version)]... CLOSE
def setUp(self):
self.broker = broker.Broker()
def newUnslicer(self):
unslicer = referenceable.ReferenceUnslicer()
unslicer.broker = self.broker
unslicer.opener = self.broker.rootUnslicer
return unslicer
def testReject(self):
u = self.newUnslicer()
self.failUnlessRaises(BananaError, u.checkToken, STRING, 10)
u = self.newUnslicer()
self.failUnlessRaises(BananaError, u.checkToken, OPEN, 0)
def testNoInterfaces(self):
u = self.newUnslicer()
u.checkToken(INT, 0)
u.receiveChild(12)
rr1,rr1d = u.receiveClose()
self.failUnless(rr1d is None)
rr2 = self.broker.getTrackerForYourReference(12).getRef()
self.failUnless(rr2)
self.failUnless(isinstance(rr2, referenceable.RemoteReference))
self.failUnlessEqual(rr2.tracker.broker, self.broker)
self.failUnlessEqual(rr2.tracker.clid, 12)
self.failUnlessEqual(rr2.tracker.interfaceName, None)
def testInterfaces(self):
u = self.newUnslicer()
u.checkToken(INT, 0)
u.receiveChild(12)
u.receiveChild("IBar")
rr1,rr1d = u.receiveClose()
self.failUnless(rr1d is None)
rr2 = self.broker.getTrackerForYourReference(12).getRef()
self.failUnless(rr2)
self.failUnlessIdentical(rr1, rr2)
self.failUnless(isinstance(rr2, referenceable.RemoteReference))
self.failUnlessEqual(rr2.tracker.broker, self.broker)
self.failUnlessEqual(rr2.tracker.clid, 12)
self.failUnlessEqual(rr2.tracker.interfaceName, "IBar")
class TestAnswer(unittest.TestCase):
# OPEN(answer), INT(reqID), [answer], CLOSE
def setUp(self):
self.broker = broker.Broker()
def newUnslicer(self):
unslicer = call.AnswerUnslicer()
unslicer.broker = self.broker
unslicer.opener = self.broker.rootUnslicer
unslicer.protocol = self.broker
return unslicer
def makeRequest(self):
req = call.PendingRequest(defer.Deferred())
def testAccept1(self):
req = TestRequest(12)
self.broker.addRequest(req)
u = self.newUnslicer()
u.checkToken(INT, 0)
u.receiveChild(12) # causes broker.getRequest
u.checkToken(STRING, 8)
u.receiveChild("results")
self.failIf(req.answers)
u.receiveClose() # causes broker.gotAnswer
self.failUnlessEqual(req.answers, [(True, "results")])
def testAccept2(self):
req = TestRequest(12)
req.setConstraint(schema.makeConstraint(str))
self.broker.addRequest(req)
u = self.newUnslicer()
u.checkToken(INT, 0)
u.receiveChild(12) # causes broker.getRequest
u.checkToken(STRING, 15)
u.receiveChild("results")
self.failIf(req.answers)
u.receiveClose() # causes broker.gotAnswer
self.failUnlessEqual(req.answers, [(True, "results")])
def testReject1(self):
# answer a non-existent request
req = TestRequest(12)
self.broker.addRequest(req)
u = self.newUnslicer()
u.checkToken(INT, 0)
self.failUnlessRaises(Violation, u.receiveChild, 13)
def testReject2(self):
# answer a request with a result that violates the constraint
req = TestRequest(12)
req.setConstraint(schema.makeConstraint(int))
self.broker.addRequest(req)
u = self.newUnslicer()
u.checkToken(INT, 0)
u.receiveChild(12)
self.failUnlessRaises(Violation, u.checkToken, STRING, 42)
# this does not yet errback the request
self.failIf(req.answers)
# it gets errbacked when banana reports the violation
v = Violation("icky")
v.setLocation("here")
u.reportViolation(BananaFailure(v))
self.failUnlessEqual(len(req.answers), 1)
err = req.answers[0]
self.failIf(err[0])
f = err[1]
self.failUnless(f.check(Violation))
class RIMyTarget(pb.RemoteInterface):
# method constraints can be declared directly:
add1 = schema.RemoteMethodSchema(_response=int, a=int, b=int)
# or through their function definitions:
def add(a=int, b=int): return int
#add = schema.callable(add) # the metaclass makes this unnecessary
# but it could be used for adding options or something
def join(a=str, b=str, c=int): return str
def getName(): return str
disputed = schema.RemoteMethodSchema(_response=int, a=int)
class RIMyTarget2(pb.RemoteInterface):
__remote_name__ = "RIMyTargetInterface2"
sub = schema.RemoteMethodSchema(_response=int, a=int, b=int)
# For some tests, we want the two sides of the connection to disagree about
# the contents of the RemoteInterface they are using. This is remarkably
# difficult to accomplish within a single process. We do it by creating
# something that behaves just barely enough like a RemoteInterface to work.
class FakeTarget(dict):
pass
RIMyTarget3 = FakeTarget()
RIMyTarget3.__remote_name__ = RIMyTarget.__remote_name__
RIMyTarget3['disputed'] = schema.RemoteMethodSchema(_response=int, a=str)
RIMyTarget3['disputed'].name = "disputed"
RIMyTarget3['disputed'].interface = RIMyTarget3
RIMyTarget3['disputed2'] = schema.RemoteMethodSchema(_response=str, a=int)
RIMyTarget3['disputed2'].name = "disputed"
RIMyTarget3['disputed2'].interface = RIMyTarget3
RIMyTarget3['sub'] = schema.RemoteMethodSchema(_response=int, a=int, b=int)
RIMyTarget3['sub'].name = "sub"
RIMyTarget3['sub'].interface = RIMyTarget3
class Target(pb.Referenceable):
implements(RIMyTarget)
def __init__(self, name=None):
self.calls = []
self.name = name
def getMethodSchema(self, methodname):
return None
def remote_add(self, a, b):
self.calls.append((a,b))
return a+b
remote_add1 = remote_add
def remote_getName(self):
return self.name
def remote_disputed(self, a):
return 24
def remote_fail(self):
raise ValueError("you asked me to fail")
class TargetWithoutInterfaces(Target):
# undeclare the RIMyTarget interface
implementsOnly(implementedBy(pb.Referenceable))
class BrokenTarget(pb.Referenceable):
implements(RIMyTarget)
def remote_add(self, a, b):
return "error"
class IFoo(components.Interface):
# non-remote Interface
pass
class Target2(Target):
implementsOnly(IFoo, RIMyTarget2)
class TestInterface(TargetMixin, unittest.TestCase):
def testTypes(self):
self.failUnless(isinstance(RIMyTarget,
remoteinterface.RemoteInterfaceClass))
self.failUnless(isinstance(RIMyTarget2,
remoteinterface.RemoteInterfaceClass))
def testRegister(self):
reg = RemoteInterfaceRegistry
self.failUnlessEqual(reg["RIMyTarget"], RIMyTarget)
self.failUnlessEqual(reg["RIMyTargetInterface2"], RIMyTarget2)
def testDuplicateRegistry(self):
try:
class RIMyTarget(pb.RemoteInterface):
def foo(bar=int): return int
except remoteinterface.DuplicateRemoteInterfaceError:
pass
else:
self.fail("duplicate registration not caught")
def testInterface1(self):
# verify that we extract the right interfaces from a local object.
# also check that the registry stuff works.
self.setupBrokers()
rr, target = self.setupTarget(Target())
iface = getRemoteInterface(target)
self.failUnlessEqual(iface, RIMyTarget)
iname = getRemoteInterfaceName(target)
self.failUnlessEqual(iname, "RIMyTarget")
self.failUnlessIdentical(RemoteInterfaceRegistry["RIMyTarget"],
RIMyTarget)
rr, target = self.setupTarget(Target2())
iname = getRemoteInterfaceName(target)
self.failUnlessEqual(iname, "RIMyTargetInterface2")
self.failUnlessIdentical(\
RemoteInterfaceRegistry["RIMyTargetInterface2"], RIMyTarget2)
def testInterface2(self):
# verify that RemoteInterfaces have the right attributes
t = Target()
iface = getRemoteInterface(t)
self.failUnlessEqual(iface, RIMyTarget)
# 'add' is defined with 'def'
s1 = RIMyTarget['add']
self.failUnless(isinstance(s1, schema.RemoteMethodSchema))
ok, s2 = s1.getArgConstraint("a")
self.failUnless(ok)
self.failUnless(isinstance(s2, schema.IntegerConstraint))
self.failUnless(s2.checkObject(12) == None)
self.failUnlessRaises(schema.Violation, s2.checkObject, "string")
s3 = s1.getResponseConstraint()
self.failUnless(isinstance(s3, schema.IntegerConstraint))
# 'add1' is defined as a class attribute
s1 = RIMyTarget['add1']
self.failUnless(isinstance(s1, schema.RemoteMethodSchema))
ok, s2 = s1.getArgConstraint("a")
self.failUnless(ok)
self.failUnless(isinstance(s2, schema.IntegerConstraint))
self.failUnless(s2.checkObject(12) == None)
self.failUnlessRaises(schema.Violation, s2.checkObject, "string")
s3 = s1.getResponseConstraint()
self.failUnless(isinstance(s3, schema.IntegerConstraint))
s1 = RIMyTarget['join']
self.failUnless(isinstance(s1.getArgConstraint("a")[1],
schema.StringConstraint))
self.failUnless(isinstance(s1.getArgConstraint("c")[1],
schema.IntegerConstraint))
s3 = RIMyTarget['join'].getResponseConstraint()
self.failUnless(isinstance(s3, schema.StringConstraint))
s1 = RIMyTarget['disputed']
self.failUnless(isinstance(s1.getArgConstraint("a")[1],
schema.IntegerConstraint))
s3 = s1.getResponseConstraint()
self.failUnless(isinstance(s3, schema.IntegerConstraint))
def testInterface3(self):
t = TargetWithoutInterfaces()
iface = getRemoteInterface(t)
self.failIf(iface)
class Unsendable:
pass
class TestCall(TargetMixin, unittest.TestCase):
def setUp(self):
TargetMixin.setUp(self)
self.setupBrokers()
def testCall1(self):
# this is done without interfaces
rr, target = self.setupTarget(TargetWithoutInterfaces())
d = rr.callRemote("add", a=1, b=2)
d.addCallback(lambda res: self.failUnlessEqual(res, 3))
d.addCallback(lambda res: self.failUnlessEqual(target.calls, [(1,2)]))
d.addCallback(self._testCall1_1, rr)
return d
testCall1.timeout = 3
def _testCall1_1(self, res, rr):
# the caller still holds the RemoteReference
self.failUnless(self.callingBroker.yourReferenceByCLID.has_key(1))
# release the RemoteReference. This does two things: 1) the
# callingBroker will forget about it. 2) they will send a decref to
# the targetBroker so *they* can forget about it.
del rr # this fires a DecRef
gc.collect() # make sure
# we need to give it a moment to deliver the DecRef message and act
# on it
d = defer.Deferred()
reactor.callLater(0.1, d.callback, None)
d.addCallback(self._testCall1_2)
return d
def _testCall1_2(self, res):
self.failIf(self.callingBroker.yourReferenceByCLID.has_key(1))
self.failIf(self.targetBroker.myReferenceByCLID.has_key(1))
def testFail1(self):
# this is done without interfaces
rr, target = self.setupTarget(TargetWithoutInterfaces())
d = rr.callRemote("fail")
self.failIf(target.calls)
d.addBoth(self._testFail1_1)
return d
testFail1.timeout = 2
def _testFail1_1(self, f):
# f should be a pb.CopiedFailure
self.failUnless(isinstance(f, failure.Failure),
"Hey, we didn't fail: %s" % f)
self.failUnless(f.check(ValueError),
"wrong exception type: %s" % f)
self.failUnlessSubstring("you asked me to fail", f.value)
def testFail2(self):
# this is done without interfaces
rr, target = self.setupTarget(TargetWithoutInterfaces())
d = rr.callRemote("add", a=1, b=2, c=3)
# add() does not take a 'c' argument, so we get a TypeError here
self.failIf(target.calls)
d.addBoth(self._testFail2_1)
return d
testFail2.timeout = 2
def _testFail2_1(self, f):
self.failUnless(isinstance(f, failure.Failure),
"Hey, we didn't fail: %s" % f)
self.failUnless(f.check(TypeError),
"wrong exception type: %s" % f.type)
self.failUnlessSubstring("remote_add() got an unexpected keyword "
"argument 'c'", f.value)
def testFail3(self):
# this is done without interfaces
rr, target = self.setupTarget(TargetWithoutInterfaces())
d = rr.callRemote("bogus", a=1, b=2)
# the target does not have .bogus method, so we get an AttributeError
self.failIf(target.calls)
d.addBoth(self._testFail3_1)
return d
testFail3.timeout = 2
def _testFail3_1(self, f):
self.failUnless(isinstance(f, failure.Failure),
"Hey, we didn't fail: %s" % f)
self.failUnless(f.check(AttributeError),
"wrong exception type: %s" % f.type)
self.failUnlessSubstring("TargetWithoutInterfaces", str(f))
self.failUnlessSubstring(" has no attribute 'remote_bogus'", str(f))
def testCall2(self):
# server end uses an interface this time, but not the client end
rr, target = self.setupTarget(Target(), True)
d = rr.callRemote("add", a=3, b=4, _useSchema=False)
# the schema is enforced upon receipt
d.addCallback(lambda res: self.failUnlessEqual(res, 7))
return d
testCall2.timeout = 2
def testCall3(self):
# use interface on both sides
rr, target = self.setupTarget(Target(), True)
d = rr.callRemote('add', 3, 4) # enforces schemas
d.addCallback(lambda res: self.failUnlessEqual(res, 7))
return d
testCall3.timeout = 2
def testCall4(self):
# call through a manually-defined RemoteMethodSchema
rr, target = self.setupTarget(Target(), True)
d = rr.callRemote("add", 3, 4, _methodConstraint=RIMyTarget['add1'])
d.addCallback(lambda res: self.failUnlessEqual(res, 7))
return d
testCall4.timeout = 2
def testFailWrongMethodLocal(self):
# the caller knows that this method does not really exist
rr, target = self.setupTarget(Target(), True)
d = rr.callRemote("bogus") # RIMyTarget doesn't implement .bogus()
d.addCallbacks(lambda res: self.fail("should have failed"),
self._testFailWrongMethodLocal_1)
return d
testFailWrongMethodLocal.timeout = 2
def _testFailWrongMethodLocal_1(self, f):
self.failUnless(f.check(Violation))
self.failUnless(re.search(r'RIMyTarget\(.*\) does not offer bogus',
str(f)))
def testFailWrongMethodRemote(self):
# if the target doesn't specify any remote interfaces, then the
# calling side shouldn't try to do any checking. The problem is
# caught on the target side.
rr, target = self.setupTarget(Target(), False)
d = rr.callRemote("bogus") # RIMyTarget doesn't implement .bogus()
d.addCallbacks(lambda res: self.fail("should have failed"),
self._testFailWrongMethodRemote_1)
return d
testFailWrongMethodRemote.timeout = 2
def _testFailWrongMethodRemote_1(self, f):
self.failUnless(f.check(Violation))
self.failUnlessSubstring("method 'bogus' not defined in RIMyTarget",
str(f))
def testFailWrongMethodRemote2(self):
# call a method which doesn't actually exist. The sender thinks
# they're ok but the recipient catches the violation
rr, target = self.setupTarget(Target(), True)
d = rr.callRemote("bogus", _useSchema=False)
# RIMyTarget2 has a 'sub' method, but RIMyTarget (the real interface)
# does not
d.addCallbacks(lambda res: self.fail("should have failed"),
self._testFailWrongMethodRemote2_1)
d.addCallback(lambda res: self.failIf(target.calls))
return d
testFailWrongMethodRemote2.timeout = 2
def _testFailWrongMethodRemote2_1(self, f):
self.failUnless(f.check(Violation))
self.failUnless(re.search(r'RIMyTarget\(.*\) does not offer bogus',
str(f)))
def testFailWrongArgsLocal1(self):
# we violate the interface (extra arg), and the sender should catch it
rr, target = self.setupTarget(Target(), True)
d = rr.callRemote("add", a=1, b=2, c=3)
d.addCallbacks(lambda res: self.fail("should have failed"),
self._testFailWrongArgsLocal1_1)
d.addCallback(lambda res: self.failIf(target.calls))
return d
testFailWrongArgsLocal1.timeout = 2
def _testFailWrongArgsLocal1_1(self, f):
self.failUnless(f.check(Violation))
self.failUnlessSubstring("unknown argument 'c'", str(f.value))
def testFailWrongArgsLocal2(self):
# we violate the interface (bad arg), and the sender should catch it
rr, target = self.setupTarget(Target(), True)
d = rr.callRemote("add", a=1, b="two")
d.addCallbacks(lambda res: self.fail("should have failed"),
self._testFailWrongArgsLocal2_1)
d.addCallback(lambda res: self.failIf(target.calls))
return d
testFailWrongArgsLocal2.timeout = 2
def _testFailWrongArgsLocal2_1(self, f):
self.failUnless(f.check(Violation))
self.failUnlessSubstring("not a number", str(f.value))
def testFailWrongArgsRemote1(self):
# the sender thinks they're ok but the recipient catches the
# violation
rr, target = self.setupTarget(Target(), True)
d = rr.callRemote("add", a=1, b="foo", _useSchema=False)
d.addCallbacks(lambda res: self.fail("should have failed"),
self._testFailWrongArgsRemote1_1)
d.addCallbacks(lambda res: self.failIf(target.calls))
return d
testFailWrongArgsRemote1.timeout = 2
def _testFailWrongArgsRemote1_1(self, f):
self.failUnless(f.check(Violation))
self.failUnlessSubstring("STRING token rejected by IntegerConstraint",
f.value)
self.failUnlessSubstring("at <RootUnslicer>.<methodcall .add arg[b]>",
f.value)
def testFailWrongReturnRemote(self):
rr, target = self.setupTarget(BrokenTarget(), True)
d = rr.callRemote("add", 3, 4) # violates return constraint
d.addCallbacks(lambda res: self.fail("should have failed"),
self._testFailWrongReturnRemote_1)
return d
testFailWrongReturnRemote.timeout = 2
def _testFailWrongReturnRemote_1(self, f):
self.failUnless(f.check(Violation))
self.failUnlessSubstring("in outbound method results", f.value)
def testFailWrongReturnLocal(self):
# the target returns a value which violates our _resultConstraint
rr, target = self.setupTarget(Target(), True)
d = rr.callRemote("add", a=1, b=2, _resultConstraint=str)
# The target returns an int, which matches the schema they're using,
# so they think they're ok. We've overridden our expectations to
# require a string.
d.addCallbacks(lambda res: self.fail("should have failed"),
self._testFailWrongReturnLocal_1)
# the method should have been run
d.addCallback(lambda res: self.failUnless(target.calls))
return d
testFailWrongReturnLocal.timeout = 2
def _testFailWrongReturnLocal_1(self, f):
self.failUnless(f.check(Violation))
self.failUnlessSubstring("INT token rejected by StringConstraint",
str(f))
self.failUnlessSubstring("in inbound method results", str(f))
self.failUnlessSubstring("at <RootUnslicer>.Answer(req=0)", str(f))
def testDefer(self):
rr, target = self.setupTarget(HelperTarget())
d = rr.callRemote("defer", obj=12)
d.addCallback(lambda res: self.failUnlessEqual(res, 12))
return d
testDefer.timeout = 2
def testDisconnect1(self):
rr, target = self.setupTarget(HelperTarget())
d = rr.callRemote("hang")
e = RuntimeError("lost connection")
rr.tracker.broker.transport.loseConnection(e)
d.addCallbacks(lambda res: self.fail("should have failed"),
lambda why: why.trap(RuntimeError) and None)
return d
testDisconnect1.timeout = 2
def disconnected(self):
self.lost = 1
def testDisconnect2(self):
rr, target = self.setupTarget(HelperTarget())
self.lost = 0
rr.notifyOnDisconnect(self.disconnected)
rr.tracker.broker.transport.loseConnection(CONNECTION_LOST)
d = eventually()
d.addCallback(lambda res: self.failUnless(self.lost))
return d
def testDisconnect3(self):
rr, target = self.setupTarget(HelperTarget())
self.lost = 0
rr.notifyOnDisconnect(self.disconnected)
rr.dontNotifyOnDisconnect(self.disconnected)
rr.tracker.broker.transport.loseConnection(CONNECTION_LOST)
d = eventually()
d.addCallback(lambda res: self.failIf(self.lost))
return d
def testUnsendable(self):
rr, target = self.setupTarget(HelperTarget())
d = rr.callRemote("set", obj=Unsendable())
d.addCallbacks(lambda res: self.fail("should have failed"),
self._testUnsendable_1)
return d
testUnsendable.timeout = 2
def _testUnsendable_1(self, why):
self.failUnless(why.check(Violation))
self.failUnlessSubstring("cannot serialize", why.value.args[0])
class TestReferenceable(TargetMixin, unittest.TestCase):
# test how a Referenceable gets transformed into a RemoteReference as it
# crosses the wire, then verify that it gets transformed back into the
# original Referenceable when it comes back. Also test how shared
# references to the same object are handled.
def setUp(self):
TargetMixin.setUp(self)
self.setupBrokers()
if 0:
print
self.callingBroker.doLog = "TX"
self.targetBroker.doLog = " rx"
def send(self, arg):
rr, target = self.setupTarget(HelperTarget())
d = rr.callRemote("set", obj=arg)
d.addCallback(self.failUnless)
d.addCallback(lambda res: target.obj)
return d
def send2(self, arg1, arg2):
rr, target = self.setupTarget(HelperTarget())
d = rr.callRemote("set2", obj1=arg1, obj2=arg2)
d.addCallback(self.failUnless)
d.addCallback(lambda res: (target.obj1, target.obj2))
return d
def echo(self, arg):
rr, target = self.setupTarget(HelperTarget())
d = rr.callRemote("echo", obj=arg)
return d
def testRef1(self):
# Referenceables turn into RemoteReferences
r = Target()
d = self.send(r)
d.addCallback(self._testRef1_1, r)
return d
def _testRef1_1(self, res, r):
t = res.tracker
self.failUnless(isinstance(res, referenceable.RemoteReference))
self.failUnlessEqual(t.broker, self.targetBroker)
self.failUnless(type(t.clid) is int)
self.failUnless(self.callingBroker.getMyReferenceByCLID(t.clid) is r)
self.failUnlessEqual(t.interfaceName, 'RIMyTarget')
def testRef2(self):
# sending a Referenceable over the wire multiple times should result
# in equivalent RemoteReferences
r = Target()
d = self.send(r)
d.addCallback(self._testRef2_1, r)
return d
def _testRef2_1(self, res1, r):
d = self.send(r)
d.addCallback(self._testRef2_2, res1)
return d
def _testRef2_2(self, res2, res1):
self.failUnless(res1 == res2)
self.failUnless(res1 is res2) # newpb does this, oldpb didn't
def testRef3(self):
# sending the same Referenceable in multiple arguments should result
# in equivalent RRs
r = Target()
d = self.send2(r, r)
d.addCallback(self._testRef3_1)
return d
def _testRef3_1(self, (res1, res2)):
self.failUnless(res1 == res2)
self.failUnless(res1 is res2)
def testRef4(self):
# sending the same Referenceable in multiple calls will result in
# equivalent RRs
r = Target()
rr, target = self.setupTarget(HelperTarget())
d = rr.callRemote("set", obj=r)
d.addCallback(self._testRef4_1, rr, r, target)
return d
def _testRef4_1(self, res, rr, r, target):
res1 = target.obj
d = rr.callRemote("set", obj=r)
d.addCallback(self._testRef4_2, target, res1)
return d
def _testRef4_2(self, res, target, res1):
res2 = target.obj
self.failUnless(res1 == res2)
self.failUnless(res1 is res2)
def testRef5(self):
# those RemoteReferences can be used to invoke methods on the sender.
# 'r' lives on side A. The anonymous target lives on side B. From
# side A we invoke B.set(r), and we get the matching RemoteReference
# 'rr' which lives on side B. Then we use 'rr' to invoke r.getName
# from side A.
r = Target()
r.name = "ernie"
d = self.send(r)
d.addCallback(lambda rr: rr.callRemote("getName"))
d.addCallback(self.failUnlessEqual, "ernie")
return d
def testRef6(self):
# Referenceables survive round-trips
r = Target()
d = self.echo(r)
d.addCallback(self.failUnlessIdentical, r)
return d
def NOTtestRemoteRef1(self):
# known URLRemoteReferences turn into Referenceables
root = Target()
rr, target = self.setupTarget(HelperTarget())
self.targetBroker.factory = pb.PBServerFactory(root)
urlRRef = self.callingBroker.remoteReferenceForName("", [])
# urlRRef points at root
d = rr.callRemote("set", obj=urlRRef)
self.failUnless(dr(d))
self.failUnlessIdentical(target.obj, root)
def NOTtestRemoteRef2(self):
# unknown URLRemoteReferences are errors
root = Target()
rr, target = self.setupTarget(HelperTarget())
self.targetBroker.factory = pb.PBServerFactory(root)
urlRRef = self.callingBroker.remoteReferenceForName("bogus", [])
# urlRRef points at nothing
d = rr.callRemote("set", obj=urlRRef)
f = de(d)
#print f
#self.failUnlessEqual(f.type, tokens.Violation)
self.failUnlessEqual(type(f.value), str)
self.failUnless(f.value.find("unknown clid 'bogus'") != -1)
def testArgs1(self):
# sending the same non-Referenceable object in multiple calls results
# in distinct objects, because the serialization scope is bounded by
# each method call
r = [1,2]
rr, target = self.setupTarget(HelperTarget())
d = rr.callRemote("set", obj=r)
d.addCallback(self._testArgs1_1, rr, r, target)
# TODO: also make sure the original list goes out of scope once the
# method call has finished, to guard against a leaky
# reference-tracking implementation.
return d
def _testArgs1_1(self, res, rr, r, target):
res1 = target.obj
d = rr.callRemote("set", obj=r)
d.addCallback(self._testArgs1_2, target, res1)
return d
def _testArgs1_2(self, res, target, res1):
res2 = target.obj
self.failUnless(res1 == res2)
self.failIf(res1 is res2)
def testArgs2(self):
# but sending them as multiple arguments of the *same* method call
# results in identical objects
r = [1,2]
rr, target = self.setupTarget(HelperTarget())
d = rr.callRemote("set2", obj1=r, obj2=r)
d.addCallback(self._testArgs2_1, rr, target)
return d
def _testArgs2_1(self, res, rr, target):
self.failUnlessIdentical(target.obj1, target.obj2)
def testAnswer1(self):
# also, shared objects in a return value should be shared
r = [1,2]
rr, target = self.setupTarget(HelperTarget())
target.obj = (r,r)
d = rr.callRemote("get")
d.addCallback(lambda res: self.failUnlessIdentical(res[0], res[1]))
return d
def testAnswer2(self):
# but objects returned by separate method calls should be distinct
rr, target = self.setupTarget(HelperTarget())
r = [1,2]
target.obj = r
d = rr.callRemote("get")
d.addCallback(self._testAnswer2_1, rr, target)
return d
def _testAnswer2_1(self, res1, rr, target):
d = rr.callRemote("get")
d.addCallback(self._testAnswer2_2, res1)
return d
def _testAnswer2_2(self, res2, res1):
self.failUnless(res1 == res2)
self.failIf(res1 is res2)
class TestFactory(unittest.TestCase):
def setUp(self):
self.client = None
self.server = None
def gotReference(self, ref):
self.client = ref
def tearDown(self):
if self.client:
self.client.broker.transport.loseConnection()
if self.server:
return self.server.stopListening()
class TestCallable(unittest.TestCase):
def setUp(self):
self.services = [pb.PBService(), pb.PBService()]
self.tubA, self.tubB = self.services
for s in self.services:
s.startService()
l = s.listenOn("tcp:0:interface=127.0.0.1")
s.setLocation("localhost:%d" % l.getPortnum())
def tearDown(self):
return defer.DeferredList([s.stopService() for s in self.services])
def testBoundMethod(self):
target = Target()
meth_url = self.tubB.registerReference(target.remote_add)
d = self.tubA.getReference(meth_url)
d.addCallback(self._testBoundMethod_1)
return d
testBoundMethod.timeout = 5
def _testBoundMethod_1(self, ref):
self.failUnless(isinstance(ref, referenceable.RemoteMethodReference))
#self.failUnlessEqual(ref.getSchemaName(),
# RIMyTarget.__remote_name__ + "/remote_add")
d = ref.callRemote(a=1, b=2)
d.addCallback(lambda res: self.failUnlessEqual(res, 3))
return d
def testFunction(self):
l = []
# we need a keyword arg here
def append(what):
l.append(what)
func_url = self.tubB.registerReference(append)
d = self.tubA.getReference(func_url)
d.addCallback(self._testFunction_1, l)
return d
testFunction.timeout = 5
def _testFunction_1(self, ref, l):
self.failUnless(isinstance(ref, referenceable.RemoteMethodReference))
d = ref.callRemote(what=12)
d.addCallback(lambda res: self.failUnlessEqual(l, [12]))
return d
class TestService(unittest.TestCase):
def setUp(self):
self.services = [pb.PBService()]
self.services[0].startService()
def tearDown(self):
return defer.DeferredList([s.stopService() for s in self.services])
def testRegister(self):
s = self.services[0]
l = s.listenOn("tcp:0:interface=127.0.0.1")
s.setLocation("localhost:%d" % l.getPortnum())
t1 = Target()
public_url = s.registerReference(t1, "target")
if crypto:
self.failUnless(public_url.startswith("pb://"))
self.failUnless(public_url.endswith("@localhost:%d/target"
% l.getPortnum()))
else:
self.failUnlessEqual(public_url,
"pbu://localhost:%d/target"
% l.getPortnum())
self.failUnlessEqual(s.registerReference(t1, "target"), public_url)
self.failUnlessIdentical(s.getReferenceForURL(public_url), t1)
t2 = Target()
private_url = s.registerReference(t2)
self.failUnlessEqual(s.registerReference(t2), private_url)
self.failUnlessIdentical(s.getReferenceForURL(private_url), t2)
s.unregisterURL(public_url)
self.failUnlessRaises(KeyError, s.getReferenceForURL, public_url)
s.unregisterReference(t2)
self.failUnlessRaises(KeyError, s.getReferenceForURL, private_url)
# TODO: check what happens when you register the same referenceable
# under multiple URLs
def getRef(self, target):
self.services.append(pb.PBService())
s1 = self.services[0]
s2 = self.services[1]
s2.startService()
l = s1.listenOn("tcp:0:interface=127.0.0.1")
s1.setLocation("localhost:%d" % l.getPortnum())
public_url = s1.registerReference(target, "target")
d = s2.getReference(public_url)
return d
def testConnect1(self):
t1 = TargetWithoutInterfaces()
d = self.getRef(t1)
d.addCallback(lambda ref: ref.callRemote('add', a=2, b=3))
d.addCallback(self._testConnect1, t1)
return d
testConnect1.timeout = 5
def _testConnect1(self, res, t1):
self.failUnlessEqual(t1.calls, [(2,3)])
self.failUnlessEqual(res, 5)
def testConnect2(self):
t1 = Target()
d = self.getRef(t1)
d.addCallback(lambda ref: ref.callRemote('add', a=2, b=3))
d.addCallback(self._testConnect2, t1)
return d
testConnect2.timeout = 5
def _testConnect2(self, res, t1):
self.failUnlessEqual(t1.calls, [(2,3)])
self.failUnlessEqual(res, 5)
def testConnect3(self):
t1 = Target()
d = self.getRef(t1)
d.addCallback(lambda ref: ref.callRemote('add', a=2, b=3))
d.addCallback(self._testConnect3, t1)
return d
testConnect3.timeout = 5
def _testConnect3(self, res, t1):
self.failUnlessEqual(t1.calls, [(2,3)])
self.failUnlessEqual(res, 5)
def testStatic(self):
# make sure we can register static data too, at least hashable ones
t1 = (1,2,3)
d = self.getRef(t1)
d.addCallback(lambda ref: self.failUnlessEqual(ref, (1,2,3)))
return d
testStatic.timeout = 2
def testBadMethod(self):
t1 = Target()
d = self.getRef(t1)
d.addCallback(lambda ref: ref.callRemote('missing', a=2, b=3))
d.addCallbacks(self._testBadMethod_cb, self._testBadMethod_eb)
return d
testBadMethod.timeout = 5
def _testBadMethod_cb(self, res):
self.fail("method wasn't supposed to work")
def _testBadMethod_eb(self, f):
#self.failUnlessEqual(f.type, 'twisted.pb.tokens.Violation')
self.failUnlessEqual(f.type, Violation)
self.failUnless(re.search(r'RIMyTarget\(.*\) does not offer missing',
str(f)))
def testBadMethod2(self):
t1 = TargetWithoutInterfaces()
d = self.getRef(t1)
d.addCallback(lambda ref: ref.callRemote('missing', a=2, b=3))
d.addCallbacks(self._testBadMethod_cb, self._testBadMethod2_eb)
return d
testBadMethod2.timeout = 5
def _testBadMethod2_eb(self, f):
self.failUnlessEqual(f.type, 'exceptions.AttributeError')
self.failUnlessSubstring("TargetWithoutInterfaces", f.value)
self.failUnlessSubstring(" has no attribute 'remote_missing'", f.value)
class ThreeWayHelper:
passed = False
def start(self):
d = pb.getRemoteURL_TCP("localhost", self.portnum1, "", RIHelper)
d.addCallback(self.step2)
d.addErrback(self.err)
return d
def step2(self, remote1):
# .remote1 is our RRef to server1's "t1" HelperTarget
self.clients.append(remote1)
self.remote1 = remote1
d = pb.getRemoteURL_TCP("localhost", self.portnum2, "", RIHelper)
d.addCallback(self.step3)
return d
def step3(self, remote2):
# and .remote2 is our RRef to server2's "t2" helper target
self.clients.append(remote2)
self.remote2 = remote2
# sending a RemoteReference back to its source should be ok
d = self.remote1.callRemote("set", obj=self.remote1)
d.addCallback(self.step4)
return d
def step4(self, res):
assert self.target1.obj is self.target1
# but sending one to someone else is not
d = self.remote2.callRemote("set", obj=self.remote1)
d.addCallback(self.step5_callback)
d.addErrback(self.step5_errback)
return d
def step5_callback(self, res):
why = unittest.FailTest("sending a 3rd-party reference did not fail")
self.err(failure.Failure(why))
return None
def step5_errback(self, why):
bad = None
if why.type != tokens.Violation:
bad = "%s failure should be a Violation" % why.type
elif why.value.args[0].find("RemoteReferences can only be sent back to their home Broker") == -1:
bad = "wrong error message: '%s'" % why.value.args[0]
if bad:
why = unittest.FailTest(bad)
self.passed = failure.Failure(why)
else:
self.passed = True
def err(self, why):
self.passed = why
class Test3Way(unittest.TestCase):
# Here we test the three-party introduction process as depicted in the
# classic Granovetter diagram. Alice has a reference to Bob and another
# one to Carol. Alice wants to give her Carol-reference to Bob, by
# including it as the argument to a method she invokes on her
# Bob-reference.
def setUp(self):
self.services = [pb.PBService(), pb.PBService(), pb.PBService()]
self.tubA, self.tubB, self.tubC = self.services
for s in self.services:
s.startService()
l = s.listenOn("tcp:0:interface=127.0.0.1")
s.setLocation("localhost:%d" % l.getPortnum())
def tearDown(self):
return defer.DeferredList([s.stopService() for s in self.services])
def testGift(self):
# we must start by giving Alice a reference to both Bob and Carol.
self.bob = HelperTarget("bob")
self.bob_url = self.tubB.registerReference(self.bob)
self.carol = HelperTarget("carol")
self.carol_url = self.tubC.registerReference(self.carol)
# now, from Alice's point of view:
d = self.tubA.getReference(self.bob_url)
d.addCallback(self._aliceGotBob)
return d
testGift.timeout = 2
def _aliceGotBob(self, abob):
self.abob = abob # Alice's reference to Bob
d = self.tubA.getReference(self.carol_url)
d.addCallback(self._aliceGotCarol)
return d
def _aliceGotCarol(self, acarol):
self.acarol = acarol # Alice's reference to Carol
d2 = self.bob.waitfor()
d = self.abob.callRemote("set", obj=self.acarol) # send the gift
# TODO: at this point, 'del self.acarol' should not lose alice's
# reference to carol, because it will still be in the gift table. The
# trick is how to test that, we would need a way to stall the gift
# delivery while we verify everything
d.addCallback(lambda res: d2)
d.addCallback(self._bobGotCarol)
return d
def _bobGotCarol(self, bcarol):
# Bob has received the gift
self.bcarol = bcarol
# alice's gift table should be empty
brokerAB = self.abob.tracker.broker
self.failIf(brokerAB.myGifts)
self.failIf(brokerAB.myGiftsByGiftID)
d2 = self.carol.waitfor()
d = self.bcarol.callRemote("set", obj=12)
d.addCallback(lambda res: d2)
d.addCallback(self._carolCalled)
return d
def _carolCalled(self, res):
self.failUnlessEqual(res, 12)
# TODO:
# when the Violation is remote, it is reported in a CopiedFailure, which
# means f.type is a string. When it is local, it is reported in a Failure,
# and f.type is the tokens.Violation class. I'm not sure how I feel about
# these being different.
# TODO: tests to port from oldpb suite
# testTooManyRefs: sending pb.MAX_BROKER_REFS across the wire should die
# testFactoryCopy?
# tests which aren't relevant right now but which might be once we port the
# corresponding functionality:
#
# testObserve, testCache (pb.Cacheable)
# testViewPoint
# testPublishable (spread.publish??)
# SpreadUtilTestCase (spread.util)
# NewCredTestCase
# tests which aren't relevant and aren't like to ever be
#
# PagingTestCase
# ConnectionTestCase (oldcred)
# NSPTestCase
| gpl-2.0 | -4,765,436,701,169,684,000 | 36.959147 | 105 | 0.636681 | false |
vimeworks/ImpaQto | coworkersimpaqto/models.py | 1 | 4211 | from django.db import models
from django.template.defaultfilters import default
# Create your models here.
class Membresia(models.Model):
MODALIDAD_CHOICES=(
#('D','Diario'),
('M','Mensual'),
#('S','Semestral'),
#('A','Anual'),
)
STATE_CHOICES=(
('A','Activo'),
('I','Inactivo'),
)
nombre = models.TextField("Nombre de la membresía")
uso_espacio = models.IntegerField("Uso de Espacio")
modalidad = models.CharField("Modalidad de la membresía",max_length=1,choices=MODALIDAD_CHOICES)
estado = models.CharField("Estado de la membresía",max_length=1,choices=STATE_CHOICES)
def __str__(self):
return self.nombre
def __unicode__(self):
return self.nombre
class Coworker(models.Model):
nombre = models.CharField("Nombre del Coworker",max_length=250)
apellido = models.CharField("Apellido del Coworker",max_length=250)
mail= models.EmailField("Correo Electrónico del Coworker",unique=True,null=False,blank=True)
username = models.CharField("Usuario",max_length=16,null=False,blank=True)
def __str__(self):
return '%s %s'%(self.nombre,self.apellido)
def mail_default(self):
return {"mail":"[email protected]"}
class Meta:
ordering = ["apellido"]
verbose_name_plural="Coworker's"
class Contrato(models.Model):
ACTIVO='A'
INACTIVO='I'
ESTADO_CHOICES=(
(ACTIVO,'Activo'),
(INACTIVO,'Inactivo'),
)
coworker = models.ForeignKey(Coworker,verbose_name="Nombre del Coworkers")
membresia = models.ForeignKey(Membresia,verbose_name="Nombre de la membresía")
fecha_inicio = models.DateField()
fecha_fin = models.DateField(null=True,blank=True)
estado = models.CharField("Estado del contrato",max_length=1,choices=ESTADO_CHOICES,default=ACTIVO)
minutos_mes = models.DecimalField(decimal_places=2,max_digits=10,null=True,blank=True)
def __str__(self):
return '%s %s'%(self.coworker,self.membresia)
class Meta:
order_with_respect_to = 'coworker'
verbose_name_plural="Planes - Coworker's"
class ControlConsumo(models.Model):
mes = models.IntegerField()
anio = models.IntegerField("Año")
control_minutos = models.DecimalField(decimal_places=2,max_digits=10,null=True,blank=True)
contrato = models.ForeignKey(Contrato,verbose_name="Contrato a elegir")
def __str__(self):
return 'En %s del % '%(self.mes,self.anio)
class Meta:
ordering = ["anio"]
verbose_name_plural = "Resumen del Consumo"
class ManejadorConsumo(models.Manager):
def resumen_dias(self,mes,anio):
from django.db import connection
cursor = connection.cursor()
cursor.execute("""
SELECT date_part('day',c.fecha_entrada) as day, SUM(c.minutos) as minutos
FROM coworkersimpaqto_consumo c
WHERE date_part('month', c.fecha_entrada) = %s
AND date_part('year', c.fecha_entrada) = %s
GROUP BY day
ORDER BY day""",[mes,anio])
lista_resultados =[]
for row in cursor.fetchall():
p =self.model(minutos=row[1])
p.dia = row[0]
p.resumen_minutos = row[1]
lista_resultados.append(p)
return lista_resultados
class Consumo(models.Model):
ENTRADA ='E'
SALIDA = 'S'
REGISTRO_CHOICES=(
(ENTRADA,'Entrada'),
(SALIDA,'Salida'),
)
estado_registro = models.CharField("Registro de ",max_length=1,choices = REGISTRO_CHOICES,default=ENTRADA)
fecha_entrada = models.DateTimeField(auto_now_add=True,null=True,blank=True)
fecha_salida = models.DateTimeField(null=True,blank=True)
minutos = models.DecimalField(decimal_places=2,max_digits=10,null=True,blank=True)
control_consumo = models.ForeignKey(ControlConsumo,verbose_name="Control Consumo",null=False,blank=False)
objects = models.Manager()
reporte = ManejadorConsumo()
def __str__(self):
return '%s '%(self.control_consumo)
class Meta:
ordering = ["fecha_entrada"]
verbose_name_plural = "Asistencia"
| mit | 6,665,792,510,473,334,000 | 33.752066 | 110 | 0.638288 | false |
cloudbase/nova | nova/tests/unit/api_samples_test_base/test_compare_result.py | 10 | 16266 | # Copyright 2015 HPE, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
import testtools
from nova import test
from nova.tests.functional import api_samples_test_base
class TestCompareResult(test.NoDBTestCase):
"""Provide test coverage for result comparison logic in functional tests.
_compare_result two types of comparisons, template data and sample
data.
Template data means the response is checked against a regex that is
referenced by the template name. The template name is specified in
the format %(name)
Sample data is a normal value comparison.
"""
def getApiSampleTestBaseHelper(self):
"""Build an instance without running any unwanted test methods"""
# NOTE(auggy): TestCase takes a "test" method name to run in __init__
# calling this way prevents additional test methods from running
ast_instance = api_samples_test_base.ApiSampleTestBase('setUp')
# required by ApiSampleTestBase
ast_instance.api_major_version = 'v2'
ast_instance._project_id = 'True'
# automagically create magic methods usually handled by test classes
ast_instance.compute = mock.MagicMock()
ast_instance.subs = ast_instance._get_regexes()
return ast_instance
def setUp(self):
super(TestCompareResult, self).setUp()
self.ast = self.getApiSampleTestBaseHelper()
def test_bare_strings_match(self):
"""compare 2 bare strings that match"""
sample_data = u'foo'
response_data = u'foo'
result = self.ast._compare_result(
expected=sample_data,
result=response_data,
result_str="Test")
# NOTE(auggy): _compare_result will not return a matched value in the
# case of bare strings. If they don't match it will throw an exception,
# otherwise it returns "None".
self.assertEqual(
expected=None,
observed=result,
message='Check _compare_result of 2 bare strings')
def test_bare_strings_no_match(self):
"""check 2 bare strings that don't match"""
sample_data = u'foo'
response_data = u'bar'
with testtools.ExpectedException(api_samples_test_base.NoMatch):
self.ast._compare_result(
expected=sample_data,
result=response_data,
result_str="Test")
def test_template_strings_match(self):
"""compare 2 template strings (contain %) that match"""
template_data = u'%(id)s'
response_data = u'858f295a-8543-45fa-804a-08f8356d616d'
result = self.ast._compare_result(
expected=template_data,
result=response_data,
result_str="Test")
self.assertEqual(
expected=response_data,
observed=result,
message='Check _compare_result of 2 template strings')
def test_template_strings_no_match(self):
"""check 2 template strings (contain %) that don't match"""
template_data = u'%(id)s'
response_data = u'$58f295a-8543-45fa-804a-08f8356d616d'
with testtools.ExpectedException(api_samples_test_base.NoMatch):
self.ast._compare_result(
expected=template_data,
result=response_data,
result_str="Test")
# TODO(auggy): _compare_result needs a consistent return value
# In some cases it returns the value if it matched, in others it returns
# None. In all cases, it throws an exception if there's no match.
def test_bare_int_match(self):
"""check 2 bare ints that match"""
sample_data = 42
response_data = 42
result = self.ast._compare_result(
expected=sample_data,
result=response_data,
result_str="Test")
self.assertEqual(
expected=None,
observed=result,
message='Check _compare_result of 2 bare ints')
def test_bare_int_no_match(self):
"""check 2 bare ints that don't match"""
sample_data = 42
response_data = 43
with testtools.ExpectedException(api_samples_test_base.NoMatch):
self.ast._compare_result(
expected=sample_data,
result=response_data,
result_str="Test")
# TODO(auggy): _compare_result needs a consistent return value
def test_template_int_match(self):
"""check template int against string containing digits"""
template_data = u'%(int)s'
response_data = u'42'
result = self.ast._compare_result(
expected=template_data,
result=response_data,
result_str="Test")
self.assertEqual(
expected=None,
observed=result,
message='Check _compare_result of template ints')
def test_template_int_no_match(self):
"""check template int against a string containing no digits"""
template_data = u'%(int)s'
response_data = u'foo'
with testtools.ExpectedException(api_samples_test_base.NoMatch):
self.ast._compare_result(
expected=template_data,
result=response_data,
result_str="Test")
def test_template_int_value(self):
"""check an int value of a template int throws exception"""
# template_data = u'%(int_test)'
# response_data = 42
# use an int instead of a string as the subs value
local_subs = copy.deepcopy(self.ast.subs)
local_subs.update({'int_test': 42})
with testtools.ExpectedException(TypeError):
self.ast.subs = local_subs
# TODO(auggy): _compare_result needs a consistent return value
def test_dict_match(self):
"""check 2 matching dictionaries"""
template_data = {
u'server': {
u'id': u'%(id)s',
u'adminPass': u'%(password)s'
}
}
response_data = {
u'server': {
u'id': u'858f295a-8543-45fa-804a-08f8356d616d',
u'adminPass': u'4ZQ3bb6WYbC2'}
}
result = self.ast._compare_result(
expected=template_data,
result=response_data,
result_str="Test")
self.assertEqual(
expected=u'858f295a-8543-45fa-804a-08f8356d616d',
observed=result,
message='Check _compare_result of 2 dictionaries')
def test_dict_no_match_value(self):
"""check 2 dictionaries where one has a different value"""
sample_data = {
u'server': {
u'id': u'858f295a-8543-45fa-804a-08f8356d616d',
u'adminPass': u'foo'
}
}
response_data = {
u'server': {
u'id': u'858f295a-8543-45fa-804a-08f8356d616d',
u'adminPass': u'4ZQ3bb6WYbC2'}
}
with testtools.ExpectedException(api_samples_test_base.NoMatch):
self.ast._compare_result(
expected=sample_data,
result=response_data,
result_str="Test")
def test_dict_no_match_extra_key(self):
"""check 2 dictionaries where one has an extra key"""
template_data = {
u'server': {
u'id': u'%(id)s',
u'adminPass': u'%(password)s',
u'foo': u'foo'
}
}
response_data = {
u'server': {
u'id': u'858f295a-8543-45fa-804a-08f8356d616d',
u'adminPass': u'4ZQ3bb6WYbC2'}
}
with testtools.ExpectedException(api_samples_test_base.NoMatch):
self.ast._compare_result(
expected=template_data,
result=response_data,
result_str="Test")
def test_dict_result_type_mismatch(self):
"""check expected is a dictionary and result is not a dictionary"""
template_data = {
u'server': {
u'id': u'%(id)s',
u'adminPass': u'%(password)s',
}
}
response_data = u'foo'
with testtools.ExpectedException(api_samples_test_base.NoMatch):
self.ast._compare_result(
expected=template_data,
result=response_data,
result_str="Test")
# TODO(auggy): _compare_result needs a consistent return value
def test_list_match(self):
"""check 2 matching lists"""
template_data = {
u'links':
[
{
u'href': u'%(versioned_compute_endpoint)s/server/%(uuid)s',
u'rel': u'self'
},
{
u'href': u'%(compute_endpoint)s/servers/%(uuid)s',
u'rel': u'bookmark'
}
]
}
response_data = {
u'links':
[
{
u'href':
(u'http://openstack.example.com/v2/%s/server/'
'858f295a-8543-45fa-804a-08f8356d616d' %
api_samples_test_base.PROJECT_ID
),
u'rel': u'self'
},
{
u'href':
(u'http://openstack.example.com/%s/servers/'
'858f295a-8543-45fa-804a-08f8356d616d' %
api_samples_test_base.PROJECT_ID
),
u'rel': u'bookmark'
}
]
}
result = self.ast._compare_result(
expected=template_data,
result=response_data,
result_str="Test")
self.assertEqual(
expected=None,
observed=result,
message='Check _compare_result of 2 lists')
def test_list_match_extra_item_result(self):
"""check extra list items in result """
template_data = {
u'links':
[
{
u'href': u'%(versioned_compute_endpoint)s/server/%(uuid)s',
u'rel': u'self'
},
{
u'href': u'%(compute_endpoint)s/servers/%(uuid)s',
u'rel': u'bookmark'
}
]
}
response_data = {
u'links':
[
{
u'href':
(u'http://openstack.example.com/v2/openstack/server/'
'858f295a-8543-45fa-804a-08f8356d616d'),
u'rel': u'self'
},
{
u'href':
(u'http://openstack.example.com/openstack/servers/'
'858f295a-8543-45fa-804a-08f8356d616d'),
u'rel': u'bookmark'
},
u'foo'
]
}
with testtools.ExpectedException(api_samples_test_base.NoMatch):
self.ast._compare_result(
expected=template_data,
result=response_data,
result_str="Test")
def test_list_match_extra_item_template(self):
"""check extra list items in template """
template_data = {
u'links':
[
{
u'href': u'%(versioned_compute_endpoint)s/server/%(uuid)s',
u'rel': u'self'
},
{
u'href': u'%(compute_endpoint)s/servers/%(uuid)s',
u'rel': u'bookmark'
},
u'foo' # extra field
]
}
response_data = {
u'links':
[
{
u'href':
(u'http://openstack.example.com/v2/openstack/server/'
'858f295a-8543-45fa-804a-08f8356d616d'),
u'rel': u'self'
},
{
u'href':
(u'http://openstack.example.com/openstack/servers/'
'858f295a-8543-45fa-804a-08f8356d616d'),
u'rel': u'bookmark'
}
]
}
with testtools.ExpectedException(api_samples_test_base.NoMatch):
self.ast._compare_result(
expected=template_data,
result=response_data,
result_str="Test")
def test_list_no_match(self):
"""check 2 matching lists"""
template_data = {
u'things':
[
{
u'foo': u'bar',
u'baz': 0
},
{
u'foo': u'zod',
u'baz': 1
}
]
}
response_data = {
u'things':
[
{
u'foo': u'bar',
u'baz': u'0'
},
{
u'foo': u'zod',
u'baz': 1
}
]
}
# TODO(auggy): This error returns "extra list items"
# it should show the item/s in the list that didn't match
with testtools.ExpectedException(api_samples_test_base.NoMatch):
self.ast._compare_result(
expected=template_data,
result=response_data,
result_str="Test")
def test_none_match(self):
"""check that None matches"""
sample_data = None
response_data = None
result = self.ast._compare_result(
expected=sample_data,
result=response_data,
result_str="Test")
# NOTE(auggy): _compare_result will not return a matched value in the
# case of bare strings. If they don't match it will throw an exception,
# otherwise it returns "None".
self.assertEqual(
expected=None,
observed=result,
message='Check _compare_result of None')
def test_none_no_match(self):
"""check expected none and non-None response don't match"""
sample_data = None
response_data = u'bar'
with testtools.ExpectedException(api_samples_test_base.NoMatch):
self.ast._compare_result(
expected=sample_data,
result=response_data,
result_str="Test")
def test_none_result_no_match(self):
"""check result none and expected non-None response don't match"""
sample_data = u'foo'
response_data = None
with testtools.ExpectedException(api_samples_test_base.NoMatch):
self.ast._compare_result(
expected=sample_data,
result=response_data,
result_str="Test")
def test_template_no_subs_key(self):
"""check an int value of a template int throws exception"""
template_data = u'%(foo)'
response_data = 'bar'
with testtools.ExpectedException(KeyError):
self.ast._compare_result(
expected=template_data,
result=response_data,
result_str="Test")
| apache-2.0 | 3,341,611,652,029,612,000 | 33.244211 | 79 | 0.505103 | false |
akiss77/servo | etc/ci/performance/test_differ.py | 77 | 1744 | #!/usr/bin/env python3
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import argparse
import json
parser = argparse.ArgumentParser(description="Diff between two runs of performance tests.")
parser.add_argument("file1", help="the first output json from runner")
parser.add_argument("file2", help="the second output json from runner")
args = parser.parse_args()
def load_data(filename):
with open(filename, 'r') as f:
results = {}
totals = {}
counts = {}
records = json.load(f)
for record in records:
key = record.get('testcase')
value = record.get('domComplete') - record.get('domLoading')
totals[key] = totals.get('key', 0) + value
counts[key] = counts.get('key', 0) + 1
results[key] = round(totals[key] / counts[key])
return results
data1 = load_data(args.file1)
data2 = load_data(args.file2)
keys = set(data1.keys()).union(data2.keys())
BLUE = '\033[94m'
GREEN = '\033[92m'
WARNING = '\033[93m'
END = '\033[0m'
for key in keys:
value1 = data1.get(key)
value2 = data2.get(key)
if value1 and not(value2):
print ("{}Test {}: missing from {}.{}".format(WARNING, key, args.file2, END))
elif value2 and not(value1):
print ("{}Test {}: missing from {}.{}".format(WARNING, key, args.file1, END))
elif value1 and value2:
diff = value2 - value1
change = diff / value1
color = BLUE if value1 <= value2 else GREEN
print("{}{:6} {:6} {:+6} {:+8.2%} {}.{}".format(color, value1, value2, diff, change, key, END))
| mpl-2.0 | 5,776,088,651,868,218,000 | 32.538462 | 105 | 0.614679 | false |
legendtang/mitmproxy | libmproxy/main.py | 11 | 3548 | from __future__ import print_function, absolute_import
import os
import signal
import sys
import netlib.version
import netlib.version_check
from . import version, cmdline
from .proxy import process_proxy_options, ProxyServerError
from .proxy.server import DummyServer, ProxyServer
def assert_utf8_env():
spec = ""
for i in ["LANG", "LC_CTYPE", "LC_ALL"]:
spec += os.environ.get(i, "").lower()
if "utf" not in spec:
print(
"Error: mitmproxy requires a UTF console environment.",
file=sys.stderr
)
print(
"Set your LANG enviroment variable to something like en_US.UTF-8",
file=sys.stderr
)
sys.exit(1)
def get_server(dummy_server, options):
if dummy_server:
return DummyServer(options)
else:
try:
return ProxyServer(options)
except ProxyServerError as v:
print(str(v), file=sys.stderr)
sys.exit(1)
def mitmproxy(args=None): # pragma: nocover
from . import console
netlib.version_check.version_check(version.IVERSION)
assert_utf8_env()
parser = cmdline.mitmproxy()
options = parser.parse_args(args)
if options.quiet:
options.verbose = 0
proxy_config = process_proxy_options(parser, options)
console_options = console.Options(**cmdline.get_common_options(options))
console_options.palette = options.palette
console_options.palette_transparent = options.palette_transparent
console_options.eventlog = options.eventlog
console_options.intercept = options.intercept
console_options.limit = options.limit
server = get_server(console_options.no_server, proxy_config)
m = console.ConsoleMaster(server, console_options)
try:
m.run()
except KeyboardInterrupt:
pass
def mitmdump(args=None): # pragma: nocover
from . import dump
netlib.version_check.version_check(version.IVERSION)
parser = cmdline.mitmdump()
options = parser.parse_args(args)
if options.quiet:
options.verbose = 0
options.flow_detail = 0
proxy_config = process_proxy_options(parser, options)
dump_options = dump.Options(**cmdline.get_common_options(options))
dump_options.flow_detail = options.flow_detail
dump_options.keepserving = options.keepserving
dump_options.filtstr = " ".join(options.args) if options.args else None
server = get_server(dump_options.no_server, proxy_config)
try:
master = dump.DumpMaster(server, dump_options)
def cleankill(*args, **kwargs):
master.shutdown()
signal.signal(signal.SIGTERM, cleankill)
master.run()
except dump.DumpError as e:
print("mitmdump: %s" % e, file=sys.stderr)
sys.exit(1)
except KeyboardInterrupt:
pass
def mitmweb(args=None): # pragma: nocover
from . import web
netlib.version_check.version_check(version.IVERSION)
parser = cmdline.mitmweb()
options = parser.parse_args(args)
if options.quiet:
options.verbose = 0
proxy_config = process_proxy_options(parser, options)
web_options = web.Options(**cmdline.get_common_options(options))
web_options.intercept = options.intercept
web_options.wdebug = options.wdebug
web_options.wiface = options.wiface
web_options.wport = options.wport
server = get_server(web_options.no_server, proxy_config)
m = web.WebMaster(server, web_options)
try:
m.run()
except KeyboardInterrupt:
pass
| mit | -6,065,686,260,266,282,000 | 27.612903 | 78 | 0.664036 | false |
chugunovyar/factoryForBuild | env/lib/python2.7/site-packages/rest_framework/serializers.py | 3 | 60354 | """
Serializers and ModelSerializers are similar to Forms and ModelForms.
Unlike forms, they are not constrained to dealing with HTML output, and
form encoded input.
Serialization in REST framework is a two-phase process:
1. Serializers marshal between complex types like model instances, and
python primitives.
2. The process of marshalling between python primitives and request and
response content is handled by parsers and renderers.
"""
from __future__ import unicode_literals
import copy
import inspect
import traceback
from collections import OrderedDict
from django.core.exceptions import ValidationError as DjangoValidationError
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.db.models import DurationField as ModelDurationField
from django.db.models.fields import Field as DjangoModelField
from django.db.models.fields import FieldDoesNotExist
from django.utils import six, timezone
from django.utils.functional import cached_property
from django.utils.translation import ugettext_lazy as _
from rest_framework.compat import JSONField as ModelJSONField
from rest_framework.compat import postgres_fields, set_many, unicode_to_repr
from rest_framework.exceptions import ErrorDetail, ValidationError
from rest_framework.fields import get_error_detail, set_value
from rest_framework.settings import api_settings
from rest_framework.utils import html, model_meta, representation
from rest_framework.utils.field_mapping import (
ClassLookupDict, get_field_kwargs, get_nested_relation_kwargs,
get_relation_kwargs, get_url_kwargs
)
from rest_framework.utils.serializer_helpers import (
BindingDict, BoundField, NestedBoundField, ReturnDict, ReturnList
)
from rest_framework.validators import (
UniqueForDateValidator, UniqueForMonthValidator, UniqueForYearValidator,
UniqueTogetherValidator
)
# Note: We do the following so that users of the framework can use this style:
#
# example_field = serializers.CharField(...)
#
# This helps keep the separation between model fields, form fields, and
# serializer fields more explicit.
from rest_framework.fields import ( # NOQA # isort:skip
BooleanField, CharField, ChoiceField, DateField, DateTimeField, DecimalField,
DictField, DurationField, EmailField, Field, FileField, FilePathField, FloatField,
HiddenField, IPAddressField, ImageField, IntegerField, JSONField, ListField,
ModelField, MultipleChoiceField, NullBooleanField, ReadOnlyField, RegexField,
SerializerMethodField, SlugField, TimeField, URLField, UUIDField,
)
from rest_framework.relations import ( # NOQA # isort:skip
HyperlinkedIdentityField, HyperlinkedRelatedField, ManyRelatedField,
PrimaryKeyRelatedField, RelatedField, SlugRelatedField, StringRelatedField,
)
# Non-field imports, but public API
from rest_framework.fields import ( # NOQA # isort:skip
CreateOnlyDefault, CurrentUserDefault, SkipField, empty
)
from rest_framework.relations import Hyperlink, PKOnlyObject # NOQA # isort:skip
# We assume that 'validators' are intended for the child serializer,
# rather than the parent serializer.
LIST_SERIALIZER_KWARGS = (
'read_only', 'write_only', 'required', 'default', 'initial', 'source',
'label', 'help_text', 'style', 'error_messages', 'allow_empty',
'instance', 'data', 'partial', 'context', 'allow_null'
)
ALL_FIELDS = '__all__'
# BaseSerializer
# --------------
class BaseSerializer(Field):
"""
The BaseSerializer class provides a minimal class which may be used
for writing custom serializer implementations.
Note that we strongly restrict the ordering of operations/properties
that may be used on the serializer in order to enforce correct usage.
In particular, if a `data=` argument is passed then:
.is_valid() - Available.
.initial_data - Available.
.validated_data - Only available after calling `is_valid()`
.errors - Only available after calling `is_valid()`
.data - Only available after calling `is_valid()`
If a `data=` argument is not passed then:
.is_valid() - Not available.
.initial_data - Not available.
.validated_data - Not available.
.errors - Not available.
.data - Available.
"""
def __init__(self, instance=None, data=empty, **kwargs):
self.instance = instance
if data is not empty:
self.initial_data = data
self.partial = kwargs.pop('partial', False)
self._context = kwargs.pop('context', {})
kwargs.pop('many', None)
super(BaseSerializer, self).__init__(**kwargs)
def __new__(cls, *args, **kwargs):
# We override this method in order to automagically create
# `ListSerializer` classes instead when `many=True` is set.
if kwargs.pop('many', False):
return cls.many_init(*args, **kwargs)
return super(BaseSerializer, cls).__new__(cls, *args, **kwargs)
@classmethod
def many_init(cls, *args, **kwargs):
"""
This method implements the creation of a `ListSerializer` parent
class when `many=True` is used. You can customize it if you need to
control which keyword arguments are passed to the parent, and
which are passed to the child.
Note that we're over-cautious in passing most arguments to both parent
and child classes in order to try to cover the general case. If you're
overriding this method you'll probably want something much simpler, eg:
@classmethod
def many_init(cls, *args, **kwargs):
kwargs['child'] = cls()
return CustomListSerializer(*args, **kwargs)
"""
allow_empty = kwargs.pop('allow_empty', None)
child_serializer = cls(*args, **kwargs)
list_kwargs = {
'child': child_serializer,
}
if allow_empty is not None:
list_kwargs['allow_empty'] = allow_empty
list_kwargs.update({
key: value for key, value in kwargs.items()
if key in LIST_SERIALIZER_KWARGS
})
meta = getattr(cls, 'Meta', None)
list_serializer_class = getattr(meta, 'list_serializer_class', ListSerializer)
return list_serializer_class(*args, **list_kwargs)
def to_internal_value(self, data):
raise NotImplementedError('`to_internal_value()` must be implemented.')
def to_representation(self, instance):
raise NotImplementedError('`to_representation()` must be implemented.')
def update(self, instance, validated_data):
raise NotImplementedError('`update()` must be implemented.')
def create(self, validated_data):
raise NotImplementedError('`create()` must be implemented.')
def save(self, **kwargs):
assert not hasattr(self, 'save_object'), (
'Serializer `%s.%s` has old-style version 2 `.save_object()` '
'that is no longer compatible with REST framework 3. '
'Use the new-style `.create()` and `.update()` methods instead.' %
(self.__class__.__module__, self.__class__.__name__)
)
assert hasattr(self, '_errors'), (
'You must call `.is_valid()` before calling `.save()`.'
)
assert not self.errors, (
'You cannot call `.save()` on a serializer with invalid data.'
)
# Guard against incorrect use of `serializer.save(commit=False)`
assert 'commit' not in kwargs, (
"'commit' is not a valid keyword argument to the 'save()' method. "
"If you need to access data before committing to the database then "
"inspect 'serializer.validated_data' instead. "
"You can also pass additional keyword arguments to 'save()' if you "
"need to set extra attributes on the saved model instance. "
"For example: 'serializer.save(owner=request.user)'.'"
)
assert not hasattr(self, '_data'), (
"You cannot call `.save()` after accessing `serializer.data`."
"If you need to access data before committing to the database then "
"inspect 'serializer.validated_data' instead. "
)
validated_data = dict(
list(self.validated_data.items()) +
list(kwargs.items())
)
if self.instance is not None:
self.instance = self.update(self.instance, validated_data)
assert self.instance is not None, (
'`update()` did not return an object instance.'
)
else:
self.instance = self.create(validated_data)
assert self.instance is not None, (
'`create()` did not return an object instance.'
)
return self.instance
def is_valid(self, raise_exception=False):
assert not hasattr(self, 'restore_object'), (
'Serializer `%s.%s` has old-style version 2 `.restore_object()` '
'that is no longer compatible with REST framework 3. '
'Use the new-style `.create()` and `.update()` methods instead.' %
(self.__class__.__module__, self.__class__.__name__)
)
assert hasattr(self, 'initial_data'), (
'Cannot call `.is_valid()` as no `data=` keyword argument was '
'passed when instantiating the serializer instance.'
)
if not hasattr(self, '_validated_data'):
try:
self._validated_data = self.run_validation(self.initial_data)
except ValidationError as exc:
self._validated_data = {}
self._errors = exc.detail
else:
self._errors = {}
if self._errors and raise_exception:
raise ValidationError(self.errors)
return not bool(self._errors)
@property
def data(self):
if hasattr(self, 'initial_data') and not hasattr(self, '_validated_data'):
msg = (
'When a serializer is passed a `data` keyword argument you '
'must call `.is_valid()` before attempting to access the '
'serialized `.data` representation.\n'
'You should either call `.is_valid()` first, '
'or access `.initial_data` instead.'
)
raise AssertionError(msg)
if not hasattr(self, '_data'):
if self.instance is not None and not getattr(self, '_errors', None):
self._data = self.to_representation(self.instance)
elif hasattr(self, '_validated_data') and not getattr(self, '_errors', None):
self._data = self.to_representation(self.validated_data)
else:
self._data = self.get_initial()
return self._data
@property
def errors(self):
if not hasattr(self, '_errors'):
msg = 'You must call `.is_valid()` before accessing `.errors`.'
raise AssertionError(msg)
return self._errors
@property
def validated_data(self):
if not hasattr(self, '_validated_data'):
msg = 'You must call `.is_valid()` before accessing `.validated_data`.'
raise AssertionError(msg)
return self._validated_data
# Serializer & ListSerializer classes
# -----------------------------------
class SerializerMetaclass(type):
"""
This metaclass sets a dictionary named `_declared_fields` on the class.
Any instances of `Field` included as attributes on either the class
or on any of its superclasses will be include in the
`_declared_fields` dictionary.
"""
@classmethod
def _get_declared_fields(cls, bases, attrs):
fields = [(field_name, attrs.pop(field_name))
for field_name, obj in list(attrs.items())
if isinstance(obj, Field)]
fields.sort(key=lambda x: x[1]._creation_counter)
# If this class is subclassing another Serializer, add that Serializer's
# fields. Note that we loop over the bases in *reverse*. This is necessary
# in order to maintain the correct order of fields.
for base in reversed(bases):
if hasattr(base, '_declared_fields'):
fields = list(base._declared_fields.items()) + fields
return OrderedDict(fields)
def __new__(cls, name, bases, attrs):
attrs['_declared_fields'] = cls._get_declared_fields(bases, attrs)
return super(SerializerMetaclass, cls).__new__(cls, name, bases, attrs)
def as_serializer_error(exc):
assert isinstance(exc, (ValidationError, DjangoValidationError))
if isinstance(exc, DjangoValidationError):
detail = get_error_detail(exc)
else:
detail = exc.detail
if isinstance(detail, dict):
# If errors may be a dict we use the standard {key: list of values}.
# Here we ensure that all the values are *lists* of errors.
return {
key: value if isinstance(value, (list, dict)) else [value]
for key, value in detail.items()
}
elif isinstance(detail, list):
# Errors raised as a list are non-field errors.
return {
api_settings.NON_FIELD_ERRORS_KEY: detail
}
# Errors raised as a string are non-field errors.
return {
api_settings.NON_FIELD_ERRORS_KEY: [detail]
}
@six.add_metaclass(SerializerMetaclass)
class Serializer(BaseSerializer):
default_error_messages = {
'invalid': _('Invalid data. Expected a dictionary, but got {datatype}.')
}
@property
def fields(self):
"""
A dictionary of {field_name: field_instance}.
"""
# `fields` is evaluated lazily. We do this to ensure that we don't
# have issues importing modules that use ModelSerializers as fields,
# even if Django's app-loading stage has not yet run.
if not hasattr(self, '_fields'):
self._fields = BindingDict(self)
for key, value in self.get_fields().items():
self._fields[key] = value
return self._fields
@cached_property
def _writable_fields(self):
return [
field for field in self.fields.values()
if (not field.read_only) or (field.default is not empty)
]
@cached_property
def _readable_fields(self):
return [
field for field in self.fields.values()
if not field.write_only
]
def get_fields(self):
"""
Returns a dictionary of {field_name: field_instance}.
"""
# Every new serializer is created with a clone of the field instances.
# This allows users to dynamically modify the fields on a serializer
# instance without affecting every other serializer class.
return copy.deepcopy(self._declared_fields)
def get_validators(self):
"""
Returns a list of validator callables.
"""
# Used by the lazily-evaluated `validators` property.
meta = getattr(self, 'Meta', None)
validators = getattr(meta, 'validators', None)
return validators[:] if validators else []
def get_initial(self):
if hasattr(self, 'initial_data'):
return OrderedDict([
(field_name, field.get_value(self.initial_data))
for field_name, field in self.fields.items()
if (field.get_value(self.initial_data) is not empty) and
not field.read_only
])
return OrderedDict([
(field.field_name, field.get_initial())
for field in self.fields.values()
if not field.read_only
])
def get_value(self, dictionary):
# We override the default field access in order to support
# nested HTML forms.
if html.is_html_input(dictionary):
return html.parse_html_dict(dictionary, prefix=self.field_name) or empty
return dictionary.get(self.field_name, empty)
def run_validation(self, data=empty):
"""
We override the default `run_validation`, because the validation
performed by validators and the `.validate()` method should
be coerced into an error dictionary with a 'non_fields_error' key.
"""
(is_empty_value, data) = self.validate_empty_values(data)
if is_empty_value:
return data
value = self.to_internal_value(data)
try:
self.run_validators(value)
value = self.validate(value)
assert value is not None, '.validate() should return the validated data'
except (ValidationError, DjangoValidationError) as exc:
raise ValidationError(detail=as_serializer_error(exc))
return value
def to_internal_value(self, data):
"""
Dict of native values <- Dict of primitive datatypes.
"""
if not isinstance(data, dict):
message = self.error_messages['invalid'].format(
datatype=type(data).__name__
)
raise ValidationError({
api_settings.NON_FIELD_ERRORS_KEY: [message]
}, code='invalid')
ret = OrderedDict()
errors = OrderedDict()
fields = self._writable_fields
for field in fields:
validate_method = getattr(self, 'validate_' + field.field_name, None)
primitive_value = field.get_value(data)
try:
validated_value = field.run_validation(primitive_value)
if validate_method is not None:
validated_value = validate_method(validated_value)
except ValidationError as exc:
errors[field.field_name] = exc.detail
except DjangoValidationError as exc:
errors[field.field_name] = get_error_detail(exc)
except SkipField:
pass
else:
set_value(ret, field.source_attrs, validated_value)
if errors:
raise ValidationError(errors)
return ret
def to_representation(self, instance):
"""
Object instance -> Dict of primitive datatypes.
"""
ret = OrderedDict()
fields = self._readable_fields
for field in fields:
try:
attribute = field.get_attribute(instance)
except SkipField:
continue
# We skip `to_representation` for `None` values so that fields do
# not have to explicitly deal with that case.
#
# For related fields with `use_pk_only_optimization` we need to
# resolve the pk value.
check_for_none = attribute.pk if isinstance(attribute, PKOnlyObject) else attribute
if check_for_none is None:
ret[field.field_name] = None
else:
ret[field.field_name] = field.to_representation(attribute)
return ret
def validate(self, attrs):
return attrs
def __repr__(self):
return unicode_to_repr(representation.serializer_repr(self, indent=1))
# The following are used for accessing `BoundField` instances on the
# serializer, for the purposes of presenting a form-like API onto the
# field values and field errors.
def __iter__(self):
for field in self.fields.values():
yield self[field.field_name]
def __getitem__(self, key):
field = self.fields[key]
value = self.data.get(key)
error = self.errors.get(key) if hasattr(self, '_errors') else None
if isinstance(field, Serializer):
return NestedBoundField(field, value, error)
return BoundField(field, value, error)
# Include a backlink to the serializer class on return objects.
# Allows renderers such as HTMLFormRenderer to get the full field info.
@property
def data(self):
ret = super(Serializer, self).data
return ReturnDict(ret, serializer=self)
@property
def errors(self):
ret = super(Serializer, self).errors
if isinstance(ret, list) and len(ret) == 1 and getattr(ret[0], 'code', None) == 'null':
# Edge case. Provide a more descriptive error than
# "this field may not be null", when no data is passed.
detail = ErrorDetail('No data provided', code='null')
ret = {api_settings.NON_FIELD_ERRORS_KEY: [detail]}
return ReturnDict(ret, serializer=self)
# There's some replication of `ListField` here,
# but that's probably better than obfuscating the call hierarchy.
class ListSerializer(BaseSerializer):
child = None
many = True
default_error_messages = {
'not_a_list': _('Expected a list of items but got type "{input_type}".'),
'empty': _('This list may not be empty.')
}
def __init__(self, *args, **kwargs):
self.child = kwargs.pop('child', copy.deepcopy(self.child))
self.allow_empty = kwargs.pop('allow_empty', True)
assert self.child is not None, '`child` is a required argument.'
assert not inspect.isclass(self.child), '`child` has not been instantiated.'
super(ListSerializer, self).__init__(*args, **kwargs)
self.child.bind(field_name='', parent=self)
def get_initial(self):
if hasattr(self, 'initial_data'):
return self.to_representation(self.initial_data)
return []
def get_value(self, dictionary):
"""
Given the input dictionary, return the field value.
"""
# We override the default field access in order to support
# lists in HTML forms.
if html.is_html_input(dictionary):
return html.parse_html_list(dictionary, prefix=self.field_name)
return dictionary.get(self.field_name, empty)
def run_validation(self, data=empty):
"""
We override the default `run_validation`, because the validation
performed by validators and the `.validate()` method should
be coerced into an error dictionary with a 'non_fields_error' key.
"""
(is_empty_value, data) = self.validate_empty_values(data)
if is_empty_value:
return data
value = self.to_internal_value(data)
try:
self.run_validators(value)
value = self.validate(value)
assert value is not None, '.validate() should return the validated data'
except (ValidationError, DjangoValidationError) as exc:
raise ValidationError(detail=as_serializer_error(exc))
return value
def to_internal_value(self, data):
"""
List of dicts of native values <- List of dicts of primitive datatypes.
"""
if html.is_html_input(data):
data = html.parse_html_list(data)
if not isinstance(data, list):
message = self.error_messages['not_a_list'].format(
input_type=type(data).__name__
)
raise ValidationError({
api_settings.NON_FIELD_ERRORS_KEY: [message]
}, code='not_a_list')
if not self.allow_empty and len(data) == 0:
message = self.error_messages['empty']
raise ValidationError({
api_settings.NON_FIELD_ERRORS_KEY: [message]
}, code='empty')
ret = []
errors = []
for item in data:
try:
validated = self.child.run_validation(item)
except ValidationError as exc:
errors.append(exc.detail)
else:
ret.append(validated)
errors.append({})
if any(errors):
raise ValidationError(errors)
return ret
def to_representation(self, data):
"""
List of object instances -> List of dicts of primitive datatypes.
"""
# Dealing with nested relationships, data can be a Manager,
# so, first get a queryset from the Manager if needed
iterable = data.all() if isinstance(data, models.Manager) else data
return [
self.child.to_representation(item) for item in iterable
]
def validate(self, attrs):
return attrs
def update(self, instance, validated_data):
raise NotImplementedError(
"Serializers with many=True do not support multiple update by "
"default, only multiple create. For updates it is unclear how to "
"deal with insertions and deletions. If you need to support "
"multiple update, use a `ListSerializer` class and override "
"`.update()` so you can specify the behavior exactly."
)
def create(self, validated_data):
return [
self.child.create(attrs) for attrs in validated_data
]
def save(self, **kwargs):
"""
Save and return a list of object instances.
"""
# Guard against incorrect use of `serializer.save(commit=False)`
assert 'commit' not in kwargs, (
"'commit' is not a valid keyword argument to the 'save()' method. "
"If you need to access data before committing to the database then "
"inspect 'serializer.validated_data' instead. "
"You can also pass additional keyword arguments to 'save()' if you "
"need to set extra attributes on the saved model instance. "
"For example: 'serializer.save(owner=request.user)'.'"
)
validated_data = [
dict(list(attrs.items()) + list(kwargs.items()))
for attrs in self.validated_data
]
if self.instance is not None:
self.instance = self.update(self.instance, validated_data)
assert self.instance is not None, (
'`update()` did not return an object instance.'
)
else:
self.instance = self.create(validated_data)
assert self.instance is not None, (
'`create()` did not return an object instance.'
)
return self.instance
def is_valid(self, raise_exception=False):
# This implementation is the same as the default,
# except that we use lists, rather than dicts, as the empty case.
assert hasattr(self, 'initial_data'), (
'Cannot call `.is_valid()` as no `data=` keyword argument was '
'passed when instantiating the serializer instance.'
)
if not hasattr(self, '_validated_data'):
try:
self._validated_data = self.run_validation(self.initial_data)
except ValidationError as exc:
self._validated_data = []
self._errors = exc.detail
else:
self._errors = []
if self._errors and raise_exception:
raise ValidationError(self.errors)
return not bool(self._errors)
def __repr__(self):
return unicode_to_repr(representation.list_repr(self, indent=1))
# Include a backlink to the serializer class on return objects.
# Allows renderers such as HTMLFormRenderer to get the full field info.
@property
def data(self):
ret = super(ListSerializer, self).data
return ReturnList(ret, serializer=self)
@property
def errors(self):
ret = super(ListSerializer, self).errors
if isinstance(ret, list) and len(ret) == 1 and getattr(ret[0], 'code', None) == 'null':
# Edge case. Provide a more descriptive error than
# "this field may not be null", when no data is passed.
detail = ErrorDetail('No data provided', code='null')
ret = {api_settings.NON_FIELD_ERRORS_KEY: [detail]}
if isinstance(ret, dict):
return ReturnDict(ret, serializer=self)
return ReturnList(ret, serializer=self)
# ModelSerializer & HyperlinkedModelSerializer
# --------------------------------------------
def raise_errors_on_nested_writes(method_name, serializer, validated_data):
"""
Give explicit errors when users attempt to pass writable nested data.
If we don't do this explicitly they'd get a less helpful error when
calling `.save()` on the serializer.
We don't *automatically* support these sorts of nested writes because
there are too many ambiguities to define a default behavior.
Eg. Suppose we have a `UserSerializer` with a nested profile. How should
we handle the case of an update, where the `profile` relationship does
not exist? Any of the following might be valid:
* Raise an application error.
* Silently ignore the nested part of the update.
* Automatically create a profile instance.
"""
# Ensure we don't have a writable nested field. For example:
#
# class UserSerializer(ModelSerializer):
# ...
# profile = ProfileSerializer()
assert not any(
isinstance(field, BaseSerializer) and
(field.source in validated_data) and
isinstance(validated_data[field.source], (list, dict))
for key, field in serializer.fields.items()
), (
'The `.{method_name}()` method does not support writable nested '
'fields by default.\nWrite an explicit `.{method_name}()` method for '
'serializer `{module}.{class_name}`, or set `read_only=True` on '
'nested serializer fields.'.format(
method_name=method_name,
module=serializer.__class__.__module__,
class_name=serializer.__class__.__name__
)
)
# Ensure we don't have a writable dotted-source field. For example:
#
# class UserSerializer(ModelSerializer):
# ...
# address = serializer.CharField('profile.address')
assert not any(
'.' in field.source and
(key in validated_data) and
isinstance(validated_data[key], (list, dict))
for key, field in serializer.fields.items()
), (
'The `.{method_name}()` method does not support writable dotted-source '
'fields by default.\nWrite an explicit `.{method_name}()` method for '
'serializer `{module}.{class_name}`, or set `read_only=True` on '
'dotted-source serializer fields.'.format(
method_name=method_name,
module=serializer.__class__.__module__,
class_name=serializer.__class__.__name__
)
)
class ModelSerializer(Serializer):
"""
A `ModelSerializer` is just a regular `Serializer`, except that:
* A set of default fields are automatically populated.
* A set of default validators are automatically populated.
* Default `.create()` and `.update()` implementations are provided.
The process of automatically determining a set of serializer fields
based on the model fields is reasonably complex, but you almost certainly
don't need to dig into the implementation.
If the `ModelSerializer` class *doesn't* generate the set of fields that
you need you should either declare the extra/differing fields explicitly on
the serializer class, or simply use a `Serializer` class.
"""
serializer_field_mapping = {
models.AutoField: IntegerField,
models.BigIntegerField: IntegerField,
models.BooleanField: BooleanField,
models.CharField: CharField,
models.CommaSeparatedIntegerField: CharField,
models.DateField: DateField,
models.DateTimeField: DateTimeField,
models.DecimalField: DecimalField,
models.EmailField: EmailField,
models.Field: ModelField,
models.FileField: FileField,
models.FloatField: FloatField,
models.ImageField: ImageField,
models.IntegerField: IntegerField,
models.NullBooleanField: NullBooleanField,
models.PositiveIntegerField: IntegerField,
models.PositiveSmallIntegerField: IntegerField,
models.SlugField: SlugField,
models.SmallIntegerField: IntegerField,
models.TextField: CharField,
models.TimeField: TimeField,
models.URLField: URLField,
models.GenericIPAddressField: IPAddressField,
models.FilePathField: FilePathField,
}
if ModelDurationField is not None:
serializer_field_mapping[ModelDurationField] = DurationField
if ModelJSONField is not None:
serializer_field_mapping[ModelJSONField] = JSONField
serializer_related_field = PrimaryKeyRelatedField
serializer_related_to_field = SlugRelatedField
serializer_url_field = HyperlinkedIdentityField
serializer_choice_field = ChoiceField
# The field name for hyperlinked identity fields. Defaults to 'url'.
# You can modify this using the API setting.
#
# Note that if you instead need modify this on a per-serializer basis,
# you'll also need to ensure you update the `create` method on any generic
# views, to correctly handle the 'Location' response header for
# "HTTP 201 Created" responses.
url_field_name = None
# Default `create` and `update` behavior...
def create(self, validated_data):
"""
We have a bit of extra checking around this in order to provide
descriptive messages when something goes wrong, but this method is
essentially just:
return ExampleModel.objects.create(**validated_data)
If there are many to many fields present on the instance then they
cannot be set until the model is instantiated, in which case the
implementation is like so:
example_relationship = validated_data.pop('example_relationship')
instance = ExampleModel.objects.create(**validated_data)
instance.example_relationship = example_relationship
return instance
The default implementation also does not handle nested relationships.
If you want to support writable nested relationships you'll need
to write an explicit `.create()` method.
"""
raise_errors_on_nested_writes('create', self, validated_data)
ModelClass = self.Meta.model
# Remove many-to-many relationships from validated_data.
# They are not valid arguments to the default `.create()` method,
# as they require that the instance has already been saved.
info = model_meta.get_field_info(ModelClass)
many_to_many = {}
for field_name, relation_info in info.relations.items():
if relation_info.to_many and (field_name in validated_data):
many_to_many[field_name] = validated_data.pop(field_name)
try:
instance = ModelClass.objects.create(**validated_data)
except TypeError:
tb = traceback.format_exc()
msg = (
'Got a `TypeError` when calling `%s.objects.create()`. '
'This may be because you have a writable field on the '
'serializer class that is not a valid argument to '
'`%s.objects.create()`. You may need to make the field '
'read-only, or override the %s.create() method to handle '
'this correctly.\nOriginal exception was:\n %s' %
(
ModelClass.__name__,
ModelClass.__name__,
self.__class__.__name__,
tb
)
)
raise TypeError(msg)
# Save many-to-many relationships after the instance is created.
if many_to_many:
for field_name, value in many_to_many.items():
set_many(instance, field_name, value)
return instance
def update(self, instance, validated_data):
raise_errors_on_nested_writes('update', self, validated_data)
info = model_meta.get_field_info(instance)
# Simply set each attribute on the instance, and then save it.
# Note that unlike `.create()` we don't need to treat many-to-many
# relationships as being a special case. During updates we already
# have an instance pk for the relationships to be associated with.
for attr, value in validated_data.items():
if attr in info.relations and info.relations[attr].to_many:
set_many(instance, attr, value)
else:
setattr(instance, attr, value)
instance.save()
return instance
# Determine the fields to apply...
def get_fields(self):
"""
Return the dict of field names -> field instances that should be
used for `self.fields` when instantiating the serializer.
"""
if self.url_field_name is None:
self.url_field_name = api_settings.URL_FIELD_NAME
assert hasattr(self, 'Meta'), (
'Class {serializer_class} missing "Meta" attribute'.format(
serializer_class=self.__class__.__name__
)
)
assert hasattr(self.Meta, 'model'), (
'Class {serializer_class} missing "Meta.model" attribute'.format(
serializer_class=self.__class__.__name__
)
)
if model_meta.is_abstract_model(self.Meta.model):
raise ValueError(
'Cannot use ModelSerializer with Abstract Models.'
)
declared_fields = copy.deepcopy(self._declared_fields)
model = getattr(self.Meta, 'model')
depth = getattr(self.Meta, 'depth', 0)
if depth is not None:
assert depth >= 0, "'depth' may not be negative."
assert depth <= 10, "'depth' may not be greater than 10."
# Retrieve metadata about fields & relationships on the model class.
info = model_meta.get_field_info(model)
field_names = self.get_field_names(declared_fields, info)
# Determine any extra field arguments and hidden fields that
# should be included
extra_kwargs = self.get_extra_kwargs()
extra_kwargs, hidden_fields = self.get_uniqueness_extra_kwargs(
field_names, declared_fields, extra_kwargs
)
# Determine the fields that should be included on the serializer.
fields = OrderedDict()
for field_name in field_names:
# If the field is explicitly declared on the class then use that.
if field_name in declared_fields:
fields[field_name] = declared_fields[field_name]
continue
# Determine the serializer field class and keyword arguments.
field_class, field_kwargs = self.build_field(
field_name, info, model, depth
)
# Include any kwargs defined in `Meta.extra_kwargs`
extra_field_kwargs = extra_kwargs.get(field_name, {})
field_kwargs = self.include_extra_kwargs(
field_kwargs, extra_field_kwargs
)
# Create the serializer field.
fields[field_name] = field_class(**field_kwargs)
# Add in any hidden fields.
fields.update(hidden_fields)
return fields
# Methods for determining the set of field names to include...
def get_field_names(self, declared_fields, info):
"""
Returns the list of all field names that should be created when
instantiating this serializer class. This is based on the default
set of fields, but also takes into account the `Meta.fields` or
`Meta.exclude` options if they have been specified.
"""
fields = getattr(self.Meta, 'fields', None)
exclude = getattr(self.Meta, 'exclude', None)
if fields and fields != ALL_FIELDS and not isinstance(fields, (list, tuple)):
raise TypeError(
'The `fields` option must be a list or tuple or "__all__". '
'Got %s.' % type(fields).__name__
)
if exclude and not isinstance(exclude, (list, tuple)):
raise TypeError(
'The `exclude` option must be a list or tuple. Got %s.' %
type(exclude).__name__
)
assert not (fields and exclude), (
"Cannot set both 'fields' and 'exclude' options on "
"serializer {serializer_class}.".format(
serializer_class=self.__class__.__name__
)
)
assert not (fields is None and exclude is None), (
"Creating a ModelSerializer without either the 'fields' attribute "
"or the 'exclude' attribute has been deprecated since 3.3.0, "
"and is now disallowed. Add an explicit fields = '__all__' to the "
"{serializer_class} serializer.".format(
serializer_class=self.__class__.__name__
),
)
if fields == ALL_FIELDS:
fields = None
if fields is not None:
# Ensure that all declared fields have also been included in the
# `Meta.fields` option.
# Do not require any fields that are declared a parent class,
# in order to allow serializer subclasses to only include
# a subset of fields.
required_field_names = set(declared_fields)
for cls in self.__class__.__bases__:
required_field_names -= set(getattr(cls, '_declared_fields', []))
for field_name in required_field_names:
assert field_name in fields, (
"The field '{field_name}' was declared on serializer "
"{serializer_class}, but has not been included in the "
"'fields' option.".format(
field_name=field_name,
serializer_class=self.__class__.__name__
)
)
return fields
# Use the default set of field names if `Meta.fields` is not specified.
fields = self.get_default_field_names(declared_fields, info)
if exclude is not None:
# If `Meta.exclude` is included, then remove those fields.
for field_name in exclude:
assert field_name in fields, (
"The field '{field_name}' was included on serializer "
"{serializer_class} in the 'exclude' option, but does "
"not match any model field.".format(
field_name=field_name,
serializer_class=self.__class__.__name__
)
)
fields.remove(field_name)
return fields
def get_default_field_names(self, declared_fields, model_info):
"""
Return the default list of field names that will be used if the
`Meta.fields` option is not specified.
"""
return (
[model_info.pk.name] +
list(declared_fields.keys()) +
list(model_info.fields.keys()) +
list(model_info.forward_relations.keys())
)
# Methods for constructing serializer fields...
def build_field(self, field_name, info, model_class, nested_depth):
"""
Return a two tuple of (cls, kwargs) to build a serializer field with.
"""
if field_name in info.fields_and_pk:
model_field = info.fields_and_pk[field_name]
return self.build_standard_field(field_name, model_field)
elif field_name in info.relations:
relation_info = info.relations[field_name]
if not nested_depth:
return self.build_relational_field(field_name, relation_info)
else:
return self.build_nested_field(field_name, relation_info, nested_depth)
elif hasattr(model_class, field_name):
return self.build_property_field(field_name, model_class)
elif field_name == self.url_field_name:
return self.build_url_field(field_name, model_class)
return self.build_unknown_field(field_name, model_class)
def build_standard_field(self, field_name, model_field):
"""
Create regular model fields.
"""
field_mapping = ClassLookupDict(self.serializer_field_mapping)
field_class = field_mapping[model_field]
field_kwargs = get_field_kwargs(field_name, model_field)
if 'choices' in field_kwargs:
# Fields with choices get coerced into `ChoiceField`
# instead of using their regular typed field.
field_class = self.serializer_choice_field
# Some model fields may introduce kwargs that would not be valid
# for the choice field. We need to strip these out.
# Eg. models.DecimalField(max_digits=3, decimal_places=1, choices=DECIMAL_CHOICES)
valid_kwargs = set((
'read_only', 'write_only',
'required', 'default', 'initial', 'source',
'label', 'help_text', 'style',
'error_messages', 'validators', 'allow_null', 'allow_blank',
'choices'
))
for key in list(field_kwargs.keys()):
if key not in valid_kwargs:
field_kwargs.pop(key)
if not issubclass(field_class, ModelField):
# `model_field` is only valid for the fallback case of
# `ModelField`, which is used when no other typed field
# matched to the model field.
field_kwargs.pop('model_field', None)
if not issubclass(field_class, CharField) and not issubclass(field_class, ChoiceField):
# `allow_blank` is only valid for textual fields.
field_kwargs.pop('allow_blank', None)
if postgres_fields and isinstance(model_field, postgres_fields.ArrayField):
# Populate the `child` argument on `ListField` instances generated
# for the PostgrSQL specfic `ArrayField`.
child_model_field = model_field.base_field
child_field_class, child_field_kwargs = self.build_standard_field(
'child', child_model_field
)
field_kwargs['child'] = child_field_class(**child_field_kwargs)
return field_class, field_kwargs
def build_relational_field(self, field_name, relation_info):
"""
Create fields for forward and reverse relationships.
"""
field_class = self.serializer_related_field
field_kwargs = get_relation_kwargs(field_name, relation_info)
to_field = field_kwargs.pop('to_field', None)
if to_field and not relation_info.reverse and not relation_info.related_model._meta.get_field(to_field).primary_key:
field_kwargs['slug_field'] = to_field
field_class = self.serializer_related_to_field
# `view_name` is only valid for hyperlinked relationships.
if not issubclass(field_class, HyperlinkedRelatedField):
field_kwargs.pop('view_name', None)
return field_class, field_kwargs
def build_nested_field(self, field_name, relation_info, nested_depth):
"""
Create nested fields for forward and reverse relationships.
"""
class NestedSerializer(ModelSerializer):
class Meta:
model = relation_info.related_model
depth = nested_depth - 1
fields = '__all__'
field_class = NestedSerializer
field_kwargs = get_nested_relation_kwargs(relation_info)
return field_class, field_kwargs
def build_property_field(self, field_name, model_class):
"""
Create a read only field for model methods and properties.
"""
field_class = ReadOnlyField
field_kwargs = {}
return field_class, field_kwargs
def build_url_field(self, field_name, model_class):
"""
Create a field representing the object's own URL.
"""
field_class = self.serializer_url_field
field_kwargs = get_url_kwargs(model_class)
return field_class, field_kwargs
def build_unknown_field(self, field_name, model_class):
"""
Raise an error on any unknown fields.
"""
raise ImproperlyConfigured(
'Field name `%s` is not valid for model `%s`.' %
(field_name, model_class.__name__)
)
def include_extra_kwargs(self, kwargs, extra_kwargs):
"""
Include any 'extra_kwargs' that have been included for this field,
possibly removing any incompatible existing keyword arguments.
"""
if extra_kwargs.get('read_only', False):
for attr in [
'required', 'default', 'allow_blank', 'allow_null',
'min_length', 'max_length', 'min_value', 'max_value',
'validators', 'queryset'
]:
kwargs.pop(attr, None)
if extra_kwargs.get('default') and kwargs.get('required') is False:
kwargs.pop('required')
if extra_kwargs.get('read_only', kwargs.get('read_only', False)):
extra_kwargs.pop('required', None) # Read only fields should always omit the 'required' argument.
kwargs.update(extra_kwargs)
return kwargs
# Methods for determining additional keyword arguments to apply...
def get_extra_kwargs(self):
"""
Return a dictionary mapping field names to a dictionary of
additional keyword arguments.
"""
extra_kwargs = copy.deepcopy(getattr(self.Meta, 'extra_kwargs', {}))
read_only_fields = getattr(self.Meta, 'read_only_fields', None)
if read_only_fields is not None:
if not isinstance(read_only_fields, (list, tuple)):
raise TypeError(
'The `read_only_fields` option must be a list or tuple. '
'Got %s.' % type(read_only_fields).__name__
)
for field_name in read_only_fields:
kwargs = extra_kwargs.get(field_name, {})
kwargs['read_only'] = True
extra_kwargs[field_name] = kwargs
return extra_kwargs
def get_uniqueness_extra_kwargs(self, field_names, declared_fields, extra_kwargs):
"""
Return any additional field options that need to be included as a
result of uniqueness constraints on the model. This is returned as
a two-tuple of:
('dict of updated extra kwargs', 'mapping of hidden fields')
"""
if getattr(self.Meta, 'validators', None) is not None:
return (extra_kwargs, {})
model = getattr(self.Meta, 'model')
model_fields = self._get_model_fields(
field_names, declared_fields, extra_kwargs
)
# Determine if we need any additional `HiddenField` or extra keyword
# arguments to deal with `unique_for` dates that are required to
# be in the input data in order to validate it.
unique_constraint_names = set()
for model_field in model_fields.values():
# Include each of the `unique_for_*` field names.
unique_constraint_names |= {model_field.unique_for_date, model_field.unique_for_month,
model_field.unique_for_year}
unique_constraint_names -= {None}
# Include each of the `unique_together` field names,
# so long as all the field names are included on the serializer.
for parent_class in [model] + list(model._meta.parents.keys()):
for unique_together_list in parent_class._meta.unique_together:
if set(field_names).issuperset(set(unique_together_list)):
unique_constraint_names |= set(unique_together_list)
# Now we have all the field names that have uniqueness constraints
# applied, we can add the extra 'required=...' or 'default=...'
# arguments that are appropriate to these fields, or add a `HiddenField` for it.
hidden_fields = {}
uniqueness_extra_kwargs = {}
for unique_constraint_name in unique_constraint_names:
# Get the model field that is referred too.
unique_constraint_field = model._meta.get_field(unique_constraint_name)
if getattr(unique_constraint_field, 'auto_now_add', None):
default = CreateOnlyDefault(timezone.now)
elif getattr(unique_constraint_field, 'auto_now', None):
default = timezone.now
elif unique_constraint_field.has_default():
default = unique_constraint_field.default
else:
default = empty
if unique_constraint_name in model_fields:
# The corresponding field is present in the serializer
if default is empty:
uniqueness_extra_kwargs[unique_constraint_name] = {'required': True}
else:
uniqueness_extra_kwargs[unique_constraint_name] = {'default': default}
elif default is not empty:
# The corresponding field is not present in the
# serializer. We have a default to use for it, so
# add in a hidden field that populates it.
hidden_fields[unique_constraint_name] = HiddenField(default=default)
# Update `extra_kwargs` with any new options.
for key, value in uniqueness_extra_kwargs.items():
if key in extra_kwargs:
value.update(extra_kwargs[key])
extra_kwargs[key] = value
return extra_kwargs, hidden_fields
def _get_model_fields(self, field_names, declared_fields, extra_kwargs):
"""
Returns all the model fields that are being mapped to by fields
on the serializer class.
Returned as a dict of 'model field name' -> 'model field'.
Used internally by `get_uniqueness_field_options`.
"""
model = getattr(self.Meta, 'model')
model_fields = {}
for field_name in field_names:
if field_name in declared_fields:
# If the field is declared on the serializer
field = declared_fields[field_name]
source = field.source or field_name
else:
try:
source = extra_kwargs[field_name]['source']
except KeyError:
source = field_name
if '.' in source or source == '*':
# Model fields will always have a simple source mapping,
# they can't be nested attribute lookups.
continue
try:
field = model._meta.get_field(source)
if isinstance(field, DjangoModelField):
model_fields[source] = field
except FieldDoesNotExist:
pass
return model_fields
# Determine the validators to apply...
def get_validators(self):
"""
Determine the set of validators to use when instantiating serializer.
"""
# If the validators have been declared explicitly then use that.
validators = getattr(getattr(self, 'Meta', None), 'validators', None)
if validators is not None:
return validators[:]
# Otherwise use the default set of validators.
return (
self.get_unique_together_validators() +
self.get_unique_for_date_validators()
)
def get_unique_together_validators(self):
"""
Determine a default set of validators for any unique_together constraints.
"""
model_class_inheritance_tree = (
[self.Meta.model] +
list(self.Meta.model._meta.parents.keys())
)
# The field names we're passing though here only include fields
# which may map onto a model field. Any dotted field name lookups
# cannot map to a field, and must be a traversal, so we're not
# including those.
field_names = {
field.source for field in self._writable_fields
if (field.source != '*') and ('.' not in field.source)
}
# Note that we make sure to check `unique_together` both on the
# base model class, but also on any parent classes.
validators = []
for parent_class in model_class_inheritance_tree:
for unique_together in parent_class._meta.unique_together:
if field_names.issuperset(set(unique_together)):
validator = UniqueTogetherValidator(
queryset=parent_class._default_manager,
fields=unique_together
)
validators.append(validator)
return validators
def get_unique_for_date_validators(self):
"""
Determine a default set of validators for the following constraints:
* unique_for_date
* unique_for_month
* unique_for_year
"""
info = model_meta.get_field_info(self.Meta.model)
default_manager = self.Meta.model._default_manager
field_names = [field.source for field in self.fields.values()]
validators = []
for field_name, field in info.fields_and_pk.items():
if field.unique_for_date and field_name in field_names:
validator = UniqueForDateValidator(
queryset=default_manager,
field=field_name,
date_field=field.unique_for_date
)
validators.append(validator)
if field.unique_for_month and field_name in field_names:
validator = UniqueForMonthValidator(
queryset=default_manager,
field=field_name,
date_field=field.unique_for_month
)
validators.append(validator)
if field.unique_for_year and field_name in field_names:
validator = UniqueForYearValidator(
queryset=default_manager,
field=field_name,
date_field=field.unique_for_year
)
validators.append(validator)
return validators
if hasattr(models, 'UUIDField'):
ModelSerializer.serializer_field_mapping[models.UUIDField] = UUIDField
# IPAddressField is deprecated in Django
if hasattr(models, 'IPAddressField'):
ModelSerializer.serializer_field_mapping[models.IPAddressField] = IPAddressField
if postgres_fields:
class CharMappingField(DictField):
child = CharField(allow_blank=True)
ModelSerializer.serializer_field_mapping[postgres_fields.HStoreField] = CharMappingField
ModelSerializer.serializer_field_mapping[postgres_fields.ArrayField] = ListField
class HyperlinkedModelSerializer(ModelSerializer):
"""
A type of `ModelSerializer` that uses hyperlinked relationships instead
of primary key relationships. Specifically:
* A 'url' field is included instead of the 'id' field.
* Relationships to other instances are hyperlinks, instead of primary keys.
"""
serializer_related_field = HyperlinkedRelatedField
def get_default_field_names(self, declared_fields, model_info):
"""
Return the default list of field names that will be used if the
`Meta.fields` option is not specified.
"""
return (
[self.url_field_name] +
list(declared_fields.keys()) +
list(model_info.fields.keys()) +
list(model_info.forward_relations.keys())
)
def build_nested_field(self, field_name, relation_info, nested_depth):
"""
Create nested fields for forward and reverse relationships.
"""
class NestedSerializer(HyperlinkedModelSerializer):
class Meta:
model = relation_info.related_model
depth = nested_depth - 1
fields = '__all__'
field_class = NestedSerializer
field_kwargs = get_nested_relation_kwargs(relation_info)
return field_class, field_kwargs
| gpl-3.0 | 5,674,282,845,337,721,000 | 38.190909 | 124 | 0.608328 | false |
LTD-Beget/sprutio-rpc | lib/FileManager/workers/webdav/moveFromWebDav.py | 1 | 7786 | import os
import shutil
import threading
import time
import traceback
from lib.FileManager.FM import REQUEST_DELAY
from lib.FileManager.WebDavConnection import WebDavConnection
from lib.FileManager.workers.baseWorkerCustomer import BaseWorkerCustomer
class MoveFromWebDav(BaseWorkerCustomer):
def __init__(self, source, target, paths, overwrite, *args, **kwargs):
super(MoveFromWebDav, self).__init__(*args, **kwargs)
self.source = source
self.target = target
self.paths = paths
self.overwrite = overwrite
self.webdav = WebDavConnection.create(self.login, self.source.get('server_id'), self.logger)
self.operation_progress = {
"total_done": False,
"total": 0,
"operation_done": False,
"processed": 0,
"previous_percent": 0
}
def run(self):
try:
self.preload()
success_paths = []
error_paths = []
source_path = self.source.get('path')
target_path = self.target.get('path')
if source_path is None:
raise Exception("Source path empty")
if target_path is None:
raise Exception("Target path empty")
target_path = self.get_abs_path(target_path)
self.logger.info("MoveFromWebDav process run source = %s , target = %s" % (source_path, target_path))
t_total = threading.Thread(target=self.get_total, args=(self.operation_progress, self.paths))
t_total.start()
for path in self.paths:
try:
download_path = target_path
if self.webdav.isdir(path):
path += '/'
download_path += path.replace(self.webdav.parent(path), "/", 1)
download_result = self.download_file_from_webdav(path, download_path, self.operation_progress)
if download_result['success']:
success_paths.append(path)
self.webdav.remove(path)
except Exception as e:
self.logger.error(
"Error copy %s , error %s , %s" % (str(path), str(e), traceback.format_exc()))
error_paths.append(path)
self.operation_progress["operation_done"] = True
result = {
"success": success_paths,
"errors": error_paths
}
# иначе пользователям кажется что скопировалось не полностью )
progress = {
'percent': round(float(len(success_paths)) / float(len(self.paths)), 2),
'text': str(int(round(float(len(success_paths)) / float(len(self.paths)), 2) * 100)) + '%'
}
time.sleep(REQUEST_DELAY)
self.on_success(self.status_id, data=result, progress=progress, pid=self.pid, pname=self.name)
except Exception as e:
result = {
"error": True,
"message": str(e),
"traceback": traceback.format_exc()
}
self.on_error(self.status_id, result, pid=self.pid, pname=self.name)
def download_file_from_webdav(self, path, target_path, operation_progress):
try:
download_result = {}
target_file = target_path + path
if not os.path.exists(target_file):
download_result = self.webdav.download(path, target_path, self.downloading_progress)
if not download_result['success'] or len(download_result['file_list']['failed']) > 0:
raise download_result['error'] if download_result[
'error'] is not None else Exception(
"Download error")
elif self.overwrite and os.path.exists(target_file) and not os.path.isdir(target_file):
download_result = self.webdav.download(path, target_path, self.downloading_progress)
if not download_result['success'] or len(download_result['file_list']['failed']) > 0:
raise download_result['error'] if download_result[
'error'] is not None else Exception(
"Download error")
elif self.overwrite and os.path.isdir(target_file):
"""
See https://docs.python.org/3.4/library/shutil.html?highlight=shutil#shutil.copy
In case copy file when destination is dir
"""
shutil.rmtree(target_file)
download_result = self.webdav.download(path, target_path, self.downloading_progress)
if not download_result['success'] or len(download_result['file_list']['failed']) > 0:
raise download_result['error'] if download_result[
'error'] is not None else Exception(
"Download error")
else:
pass
except Exception as e:
self.logger.info("Cannot move file %s , %s" % (path, str(e)))
raise e
finally:
operation_progress["processed"] += 1
return download_result
def get_total(self, progress_object, paths, count_files=True):
self.logger.debug("start get_total() files = %s" % count_files)
webdav = WebDavConnection.create(self.login, self.source.get('server_id'), self.logger)
for path in paths:
try:
self.recursive_total(webdav, path, progress_object)
except Exception as e:
self.logger.error("Error get_total file %s , error %s" % (str(path), str(e)))
continue
progress_object["total_done"] = True
self.logger.debug("done get_total(), found %s files" % progress_object.get("total"))
return
def recursive_total(self, webdav, path, progress_object):
if webdav.isfile(path):
progress_object["total"] += 1
else:
for file in webdav.listdir(path):
self.recursive_total(webdav, file, progress_object)
def downloading_progress(self, download_t, download_d, upload_t, upload_d):
try:
percent_download = 0
if download_t != 0:
percent_download = round(float(download_d) / float(download_t), 2)
if percent_download != self.operation_progress.get("previous_percent"):
if percent_download == 0 and self.operation_progress.get("previous_percent") != 0:
self.operation_progress["processed"] += 1
self.operation_progress["previous_percent"] = percent_download
total_percent = percent_download + self.operation_progress.get("processed")
denominator = 50
if self.operation_progress.get("total_done"):
denominator = self.operation_progress.get("total")
percent = round(float(total_percent) /
float(denominator), 2)
self.logger.debug("percentage changed to %s" % percent)
progress = {
'percent': percent,
'text': str(int(percent * 100)) + '%'
}
self.on_running(self.status_id, progress=progress, pid=self.pid, pname=self.name)
except Exception as ex:
self.logger.error("Error in MoveFromWebDav downloading_progress(): %s, traceback = %s" %
(str(ex), traceback.format_exc()))
| gpl-3.0 | 5,954,226,736,938,706,000 | 42.206704 | 114 | 0.543962 | false |
tszym/ansible | lib/ansible/module_utils/cloud.py | 3 | 8468 | #
# (c) 2016 Allen Sanabria, <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
"""
This module adds shared support for generic cloud modules
In order to use this module, include it as part of a custom
module as shown below.
from ansible.module_utils.cloud import *
The 'cloud' module provides the following common classes:
* CloudRetry
- The base class to be used by other cloud providers, in order to
provide a backoff/retry decorator based on status codes.
- Example using the AWSRetry class which inherits from CloudRetry.
@AWSRetry.exponential_backoff(retries=10, delay=3)
get_ec2_security_group_ids_from_names()
@AWSRetry.jittered_backoff()
get_ec2_security_group_ids_from_names()
"""
import random
from functools import wraps
import syslog
import time
from ansible.module_utils.pycompat24 import get_exception
def _exponential_backoff(retries=10, delay=2, backoff=2, max_delay=60):
""" Customizable exponential backoff strategy.
Args:
retries (int): Maximum number of times to retry a request.
delay (float): Initial (base) delay.
backoff (float): base of the exponent to use for exponential
backoff.
max_delay (int): Optional. If provided each delay generated is capped
at this amount. Defaults to 60 seconds.
Returns:
Callable that returns a generator. This generator yields durations in
seconds to be used as delays for an exponential backoff strategy.
Usage:
>>> backoff = _exponential_backoff()
>>> backoff
<function backoff_backoff at 0x7f0d939facf8>
>>> list(backoff())
[2, 4, 8, 16, 32, 60, 60, 60, 60, 60]
"""
def backoff_gen():
for retry in range(0, retries):
sleep = delay * backoff ** retry
yield sleep if max_delay is None else min(sleep, max_delay)
return backoff_gen
def _full_jitter_backoff(retries=10, delay=3, max_delay=60, _random=random):
""" Implements the "Full Jitter" backoff strategy described here
https://www.awsarchitectureblog.com/2015/03/backoff.html
Args:
retries (int): Maximum number of times to retry a request.
delay (float): Approximate number of seconds to sleep for the first
retry.
max_delay (int): The maximum number of seconds to sleep for any retry.
_random (random.Random or None): Makes this generator testable by
allowing developers to explicitly pass in the a seeded Random.
Returns:
Callable that returns a generator. This generator yields durations in
seconds to be used as delays for a full jitter backoff strategy.
Usage:
>>> backoff = _full_jitter_backoff(retries=5)
>>> backoff
<function backoff_backoff at 0x7f0d939facf8>
>>> list(backoff())
[3, 6, 5, 23, 38]
>>> list(backoff())
[2, 1, 6, 6, 31]
"""
def backoff_gen():
for retry in range(0, retries):
yield _random.randint(0, min(max_delay, delay * 2 ** retry))
return backoff_gen
class CloudRetry(object):
""" CloudRetry can be used by any cloud provider, in order to implement a
backoff algorithm/retry effect based on Status Code from Exceptions.
"""
# This is the base class of the exception.
# AWS Example botocore.exceptions.ClientError
base_class = None
@staticmethod
def status_code_from_exception(error):
""" Return the status code from the exception object
Args:
error (object): The exception itself.
"""
pass
@staticmethod
def found(response_code):
""" Return True if the Response Code to retry on was found.
Args:
response_code (str): This is the Response Code that is being matched against.
"""
pass
@classmethod
def _backoff(cls, backoff_strategy):
""" Retry calling the Cloud decorated function using the provided
backoff strategy.
Args:
backoff_strategy (callable): Callable that returns a generator. The
generator should yield sleep times for each retry of the decorated
function.
"""
def deco(f):
@wraps(f)
def retry_func(*args, **kwargs):
for delay in backoff_strategy():
try:
return f(*args, **kwargs)
except Exception:
e = get_exception()
if isinstance(e, cls.base_class):
response_code = cls.status_code_from_exception(e)
if cls.found(response_code):
msg = "{0}: Retrying in {1} seconds...".format(str(e), delay)
syslog.syslog(syslog.LOG_INFO, msg)
time.sleep(delay)
else:
# Return original exception if exception is not a ClientError
raise e
else:
# Return original exception if exception is not a ClientError
raise e
return f(*args, **kwargs)
return retry_func # true decorator
return deco
@classmethod
def exponential_backoff(cls, retries=10, delay=3, backoff=2, max_delay=60):
"""
Retry calling the Cloud decorated function using an exponential backoff.
Kwargs:
retries (int): Number of times to retry a failed request before giving up
default=10
delay (int or float): Initial delay between retries in seconds
default=3
backoff (int or float): backoff multiplier e.g. value of 2 will
double the delay each retry
default=1.1
max_delay (int or None): maximum amount of time to wait between retries.
default=60
"""
return cls._backoff(_exponential_backoff(
retries=retries, delay=delay, backoff=backoff, max_delay=max_delay))
@classmethod
def jittered_backoff(cls, retries=10, delay=3, max_delay=60):
"""
Retry calling the Cloud decorated function using a jittered backoff
strategy. More on this strategy here:
https://www.awsarchitectureblog.com/2015/03/backoff.html
Kwargs:
retries (int): Number of times to retry a failed request before giving up
default=10
delay (int): Initial delay between retries in seconds
default=3
max_delay (int): maximum amount of time to wait between retries.
default=60
"""
return cls._backoff(_full_jitter_backoff(
retries=retries, delay=delay, max_delay=max_delay))
@classmethod
def backoff(cls, tries=10, delay=3, backoff=1.1):
"""
Retry calling the Cloud decorated function using an exponential backoff.
Compatibility for the original implementation of CloudRetry.backoff that
did not provide configurable backoff strategies. Developers should use
CloudRetry.exponential_backoff instead.
Kwargs:
tries (int): Number of times to try (not retry) before giving up
default=10
delay (int or float): Initial delay between retries in seconds
default=3
backoff (int or float): backoff multiplier e.g. value of 2 will
double the delay each retry
default=1.1
"""
return cls.exponential_backoff(
retries=tries - 1, delay=delay, backoff=backoff, max_delay=None)
| gpl-3.0 | -2,135,671,735,536,050,400 | 37.490909 | 93 | 0.611833 | false |
demarle/VTK | ThirdParty/Twisted/twisted/trial/__init__.py | 60 | 2053 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
#
# Maintainer: Jonathan Lange
"""
Asynchronous unit testing framework.
Trial extends Python's builtin C{unittest} to provide support for asynchronous
tests.
Maintainer: Jonathan Lange
Trial strives to be compatible with other Python xUnit testing frameworks.
"Compatibility" is a difficult things to define. In practice, it means that:
- L{twisted.trial.unittest.TestCase} objects should be able to be used by
other test runners without those runners requiring special support for
Trial tests.
- Tests that subclass the standard library C{TestCase} and don't do anything
"too weird" should be able to be discoverable and runnable by the Trial
test runner without the authors of those tests having to jump through
hoops.
- Tests that implement the interface provided by the standard library
C{TestCase} should be runnable by the Trial runner.
- The Trial test runner and Trial L{unittest.TestCase} objects ought to be
able to use standard library C{TestResult} objects, and third party
C{TestResult} objects based on the standard library.
This list is not necessarily exhaustive -- compatibility is hard to define.
Contributors who discover more helpful ways of defining compatibility are
encouraged to update this document.
Examples:
B{Timeouts} for tests should be implemented in the runner. If this is done,
then timeouts could work for third-party TestCase objects as well as for
L{twisted.trial.unittest.TestCase} objects. Further, Twisted C{TestCase}
objects will run in other runners without timing out.
See U{http://twistedmatrix.com/trac/ticket/2675}.
Running tests in a temporary directory should be a feature of the test case,
because often tests themselves rely on this behaviour. If the feature is
implemented in the runner, then tests will change behaviour (possibly
breaking) when run in a different test runner. Further, many tests don't even
care about the filesystem.
See U{http://twistedmatrix.com/trac/ticket/2916}.
"""
| bsd-3-clause | 1,086,748,128,466,007,700 | 38.480769 | 78 | 0.787628 | false |
rlindner81/pyload | module/plugins/crypter/RelinkUs.py | 1 | 11552 | # -*- coding: utf-8 -*-
from __future__ import with_statement
import binascii
import re
import Crypto.Cipher.AES
from module.plugins.captcha.SolveMedia import SolveMedia
from module.plugins.internal.Captcha import Captcha
from module.plugins.internal.Crypter import Crypter
from module.plugins.internal.misc import fsjoin, replace_patterns
class RelinkUs(Crypter):
__name__ = "RelinkUs"
__type__ = "crypter"
__version__ = "3.20"
__status__ = "testing"
__pattern__ = r'http://(?:www\.)?relink\.(?:us|to)/(f/|((view|go)\.php\?id=))(?P<ID>.+)'
__config__ = [("activated", "bool", "Activated", True),
("use_premium", "bool", "Use premium account if available", True),
("folder_per_package", "Default;Yes;No", "Create folder for each package", "Default")]
__description__ = """Relink.us decrypter plugin"""
__license__ = "GPLv3"
__authors__ = [("fragonib", "fragonib[AT]yahoo[DOT]es"),
("AndroKev", "[email protected]")]
URL_REPLACEMENTS = [(__pattern__ + '.*', r'http://relink.to/f/\g<ID>')]
PREFERRED_LINK_SOURCES = ["cnl2", "dlc", "web"]
OFFLINE_TOKEN = r'<title>Tattooside'
PASSWORD_TOKEN = r'container_password.php'
PASSWORD_ERROR_ROKEN = r'You have entered an incorrect password'
PASSWORD_SUBMIT_URL = r'http://relink.to/container_password.php'
CAPTCHA_TOKEN = r'container_captcha.php'
CIRCLE_CAPTCHA_PATTERN = r'id="captcha_id" value="(\w+?)"'
CAPTCHA_ERROR_ROKEN = r'You have solved the captcha wrong'
CIRCLE_CAPTCHA_IMG_URL = r'http://relink.to/core/captcha/circlecaptcha.php'
CAPTCHA_SUBMIT_URL = r'http://relink.to/container_captcha.php'
FILE_TITLE_PATTERN = r'<th>Title</th><td>(.*)</td></tr>'
FILE_NOTITLE = r'No title'
CNL2_FORM_PATTERN = r'<form id="cnl_form-(.*?)</form>'
CNL2_FORMINPUT_PATTERN = r'<input.*?name="%s".*?value="(.*?)"'
CNL2_JK_KEY = "jk"
CNL2_CRYPTED_KEY = "crypted"
DLC_LINK_PATTERN = r'<a href=".*?" class="dlc_button" target="_blank">'
DLC_DOWNLOAD_URL = r'http://relink.to/download.php'
WEB_FORWARD_PATTERN = r'getFile\(\'(.+)\'\)'
WEB_FORWARD_URL = r'http://relink.to/frame.php'
WEB_LINK_PATTERN = r'<iframe name="Container" height="100%" frameborder="no" width="100%" src="(.+)"></iframe>'
def setup(self):
self.file_id = None
self.package = None
self.captcha = Captcha(self.pyfile)
def decrypt(self, pyfile):
#: Init
self.init_package(pyfile)
#: Request package
self.request_package()
#: Check for online
if not self.is_online():
self.offline()
#: Check for protection
if self.is_password_protected():
self.unlock_password_protection()
self.handle_errors()
if self.is_captcha_protected():
self.unlock_captcha_protection()
self.handle_errors()
#: Get package name and folder
pack_name, folder_name = self.get_package_info()
#: Extract package links
pack_links = []
for sources in self.PREFERRED_LINK_SOURCES:
pack_links.extend(self.handle_link_source(sources))
if pack_links: #: Use only first source which provides links
break
pack_links = set(pack_links)
#: Pack
if pack_links:
self.packages = [(pack_name, pack_links, folder_name)]
def init_package(self, pyfile):
pyfile.url = replace_patterns(pyfile.url, self.URL_REPLACEMENTS)
self.file_id = re.match(self.__pattern__, pyfile.url).group('ID')
self.package = pyfile.package()
def request_package(self):
self.data = self.load(self.pyfile.url)
def is_online(self):
if self.OFFLINE_TOKEN in self.data:
self.log_debug("File not found")
return False
return True
def is_password_protected(self):
if self.PASSWORD_TOKEN in self.data:
self.log_debug("Links are password protected")
return True
def is_captcha_protected(self):
if self.CAPTCHA_TOKEN in self.data:
self.log_debug("Links are captcha protected")
return True
return False
def unlock_password_protection(self):
password = self.get_password()
self.log_debug(
"Submitting password [%s] for protected links" %
password)
if password:
passwd_url = self.PASSWORD_SUBMIT_URL + "?id=%s" % self.file_id
passwd_data = {
'id': self.file_id,
'password': password,
'pw': 'submit'}
self.data = self.load(passwd_url, post=passwd_data)
def unlock_captcha_protection(self):
m = re.search(self.CIRCLE_CAPTCHA_PATTERN, self.data)
if m:
self.log_debug("Request circle captcha resolving")
captcha_id = m.group(1)
coords = self.captcha.decrypt(
self.CIRCLE_CAPTCHA_IMG_URL,
get={
'id': captcha_id},
input_type="png",
output_type='positional') # , ocr="CircleCaptcha")
self.log_debug(
"Captcha resolved, coords (%s,%s)" %
(coords[0], coords[1]))
post_data = {'button.x': coords[0],
'button.y': coords[1],
'captcha_id': captcha_id,
'captcha_type': "RelinkCircle",
'captcha': "submit"}
else:
solvemedia = SolveMedia(self.pyfile)
captcha_key = solvemedia.detect_key()
if captcha_key:
self.log_debug(_("Request SolveMedia captcha resolving"))
response, challenge = solvemedia.challenge()
post_data = {'adcopy_response': response,
'adcopy_challenge': challenge,
'captcha_type': "Solvemedia",
'submit': "Continue",
'captcha': "submit"}
else:
self.log_error(_("Unknown captcha type detected"))
self.fail(_("Unknown captcha type"))
self.data = self.load(self.CAPTCHA_SUBMIT_URL,
ref=False, # ref=self.CAPTCHA_SUBMIT_URL + "&id=" + self.file_id,
get={'id': self.file_id},
post=post_data)
def get_package_info(self):
name = folder = None
#: Try to get info from web
# self.data = self.load(self.pyfile.url)
m = re.search(self.FILE_TITLE_PATTERN, self.data)
if m is not None:
title = m.group(1).strip()
if not self.FILE_NOTITLE in title:
name = folder = title
self.log_debug(
_("Found name [%s] and folder [%s] in package info") %
(name, folder))
#: Fallback to defaults
if not name or not folder:
name = self.package.name
folder = self.package.folder
self.log_debug(
_("Package info not found, defaulting to pyfile name [%s] and folder [%s]") %
(name, folder))
#: Return package info
return name, folder
def handle_errors(self):
if self.PASSWORD_ERROR_ROKEN in self.data:
self.fail(_("Wrong password"))
if self.captcha.task:
if self.CAPTCHA_ERROR_ROKEN in self.data:
self.retry_captcha()
else:
self.captcha.correct()
def handle_link_source(self, source):
if source == "cnl2":
return self.handle_CNL2Links()
elif source == "dlc":
return self.handle_DLC_links()
elif source == "web":
return self.handle_WEB_links()
else:
self.error(_('Unknown source type "%s"') % source)
def handle_CNL2Links(self):
self.log_debug(_("Search for CNL2 links"))
pack_links = []
m = re.search(self.CNL2_FORM_PATTERN, self.data, re.S)
if m is not None:
cnl2_form = m.group(1)
try:
(vcrypted, vjk) = self._get_cipher_params(cnl2_form)
for (crypted, jk) in zip(vcrypted, vjk):
pack_links.extend(self._get_links(crypted, jk))
except Exception:
self.log_debug(_("Unable to decrypt CNL2 links", trace=True))
return pack_links
def handle_DLC_links(self):
self.log_debug(_("Search for DLC links"))
pack_links = []
m = re.search(self.DLC_LINK_PATTERN, self.data)
if m is not None:
container_url = self.DLC_DOWNLOAD_URL + "?id=%s&dlc=1" % self.file_id
self.log_debug(
_("Downloading DLC container link [%s]") %
container_url)
try:
dlc = self.load(container_url)
dlc_filename = self.file_id + ".dlc"
dlc_filepath = fsjoin(
self.pyload.config.get(
'general',
'download_folder'),
dlc_filename)
with open(dlc_filepath, "wb") as f:
f.write(dlc)
pack_links.append(dlc_filepath)
except Exception:
self.fail(_("Unable to download DLC container"))
return pack_links
def handle_WEB_links(self):
self.log_debug(_("Search for WEB links"))
pack_links = []
params = re.findall(self.WEB_FORWARD_PATTERN, self.data)
self.log_debug(_("Decrypting %d Web links") % len(params))
for index, param in enumerate(params):
try:
url = self.WEB_FORWARD_URL + "?%s" % param
self.log_debug(
_("Decrypting Web link %d, %s") %
(index + 1, url))
res = self.load(url)
link = re.search(self.WEB_LINK_PATTERN, res).group(1)
pack_links.append(link)
except Exception as detail:
self.log_debug(
_("Error decrypting Web link %s, %s") %
(index, detail))
self.wait(4)
return pack_links
def _get_cipher_params(self, cnl2_form):
#: Get jk
jk_re = self.CNL2_FORMINPUT_PATTERN % self.CNL2_JK_KEY
vjk = re.findall(jk_re, cnl2_form, re.I)
#: Get crypted
crypted_re = self.CNL2_FORMINPUT_PATTERN % RelinkUs.CNL2_CRYPTED_KEY
vcrypted = re.findall(crypted_re, cnl2_form, re.I)
#: Log and return
self.log_debug(_("Detected %d crypted blocks") % len(vcrypted))
return vcrypted, vjk
def _get_links(self, crypted, jk):
#: Get key
jreturn = self.js.eval("%s f()" % jk)
self.log_debug(_("JsEngine returns value [%s]") % jreturn)
key = binascii.unhexlify(jreturn)
#: Decrypt
Key = key
IV = key
obj = Crypto.Cipher.AES.new(Key, Crypto.Cipher.AES.MODE_CBC, IV)
text = obj.decrypt(crypted.decode('base64'))
#: Extract links
text = text.replace("\x00", "").replace("\r", "")
links = filter(bool, text.split('\n'))
#: Log and return
self.log_debug(_("Package has %d links") % len(links))
return links
| gpl-3.0 | -4,407,337,896,784,982,500 | 33.586826 | 115 | 0.53653 | false |
uw-it-aca/canvas-analytics | data_aggregator/tests/test_cache.py | 1 | 1187 | # Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
import unittest
from django.test import TestCase
from data_aggregator.cache import DataAggregatorGCSCache
class TestDataAggregatorGCSCache(TestCase):
def test_get_cache_expiration_time(self):
cache = DataAggregatorGCSCache()
# valid urls
self.assertEqual(
cache.get_cache_expiration_time(
"canvas",
"/api/v1/courses/1392640/analytics/student_summaries.json"),
0)
self.assertEqual(
cache.get_cache_expiration_time(
"canvas",
"/api/v1/courses/1399587/analytics/users/3562797/"
"assignments.json"),
0)
# unknown service
self.assertEqual(
cache.get_cache_expiration_time(
"foobar",
"/api/v1/courses/1392640/analytics/"),
None)
# bad url
self.assertEqual(
cache.get_cache_expiration_time(
"canvas",
"/api/v2/courses/1392640/analytics/"),
None)
if __name__ == "__main__":
unittest.main()
| apache-2.0 | -4,825,218,005,346,232,000 | 28.675 | 76 | 0.566133 | false |
benob/icsisumm | icsisumm-primary-sys34_v1/nltk/nltk-0.9.2/nltk_contrib/stringcomp.py | 9 | 4452 | # Natural Language Toolkit
# String Comparison Module
# Author: Tiago Tresoldi <[email protected]>
"""
String Comparison Module.
Author: Tiago Tresoldi <[email protected]>
Based on previous work by Qi Xiao Yang, Sung Sam Yuan, Li Zhao, Lu Chun,
and Sung Peng.
"""
def stringcomp (fx, fy):
"""
Return a number within C{0.0} and C{1.0} indicating the similarity between
two strings. A perfect match is C{1.0}, not match at all is C{0.0}.
This is an implementation of the string comparison algorithm (also known
as "string similarity") published by Qi Xiao Yang, Sung Sam Yuan, Li Zhao,
Lu Chun and Sun Peng in a paper called "Faster Algorithm of String
Comparison" ( http://front.math.ucdavis.edu/0112.6022 ). Please note that,
however, this implementation presents some relevant differences that
will lead to different numerical results (read the comments for more
details).
@param fx: A C{string}.
@param fy: A C{string}.
@return: A float with the value of the comparision between C{fx} and C{fy}.
C{1.0} indicates a perfect match, C{0.0} no match at all.
@rtype: C{float}
"""
# get the smaller of 'n' and 'm', and of 'fx' and 'fy'
n, m = len(fx), len(fy)
if m < n:
(n, m) = (m, n)
(fx, fy) = (fy, fx)
# Sum of the Square of the Number of the same Characters
ssnc = 0.
# My implementation presents some relevant differences to the pseudo-code
# presented in the paper by Yang et al., which in a number of cases will
# lead to different numerical results (and, while no empirical tests have
# been perfomed, I expect this to be slower than the original).
# The differences are due to two specific characteristcs of the original
# algorithm that I consider undesiderable for my purposes:
#
# 1. It does not takes into account the probable repetition of the same
# substring inside the strings to be compared (such as "you" in "where
# do you think that you are going?") because, as far as I was able to
# understand, it count only the first occurence of each substring
# found.
# 2. It does not seem to consider the high probability of having more
# than one pattern of the same length (for example, comparing between
# "abc1def" and "abc2def" seems to consider only one three-character
# pattern, "abc").
#
# Demonstrating the differences between the two algorithms (or, at least,
# between my implementation of the original and the revised one):
#
# "abc1def" and "abc2def"
# Original: 0.534
# Current: 0.606
for length in range(n, 0, -1):
while True:
length_prev_ssnc = ssnc
for i in range(len(fx)-length+1):
pattern = fx[i:i+length]
pattern_prev_ssnc = ssnc
fx_removed = False
while True:
index = fy.find(pattern)
if index != -1:
ssnc += (2.*length)**2
if fx_removed == False:
fx = fx[:i] + fx[i+length:]
fx_removed = True
fy = fy[:index] + fy[index+length:]
else:
break
if ssnc != pattern_prev_ssnc:
break
if ssnc == length_prev_ssnc:
break
return (ssnc/((n+m)**2.))**0.5
def demo ():
print "Comparison between 'python' and 'python': %.2f" % stringcomp("python", "python")
print "Comparison between 'python' and 'Python': %.2f" % stringcomp("python", "Python")
print "Comparison between 'NLTK' and 'NTLK': %.2f" % stringcomp("NLTK", "NTLK")
print "Comparison between 'abc' and 'def': %.2f" % stringcomp("abc", "def")
print "Word most similar to 'australia' in list ['canada', 'brazil', 'egypt', 'thailand', 'austria']:"
max_score = 0.0 ; best_match = None
for country in ["canada", "brazil", "egypt", "thailand", "austria"]:
score = stringcomp("australia", country)
if score > max_score:
best_match = country
max_score = score
print "(comparison between 'australia' and '%s': %.2f)" % (country, score)
print "Word most similar to 'australia' is '%s' (score: %.2f)" % (best_match, max_score)
if __name__ == "__main__":
demo()
| gpl-3.0 | 373,177,470,934,229,760 | 40.222222 | 106 | 0.597484 | false |
hkawasaki/kawasaki-aio8-1 | common/djangoapps/util/views.py | 3 | 8888 | import json
import logging
import sys
from django.conf import settings
from django.core.validators import ValidationError, validate_email
from django.views.decorators.csrf import requires_csrf_token
from django.views.defaults import server_error
from django.http import (Http404, HttpResponse, HttpResponseNotAllowed,
HttpResponseServerError)
from dogapi import dog_stats_api
from edxmako.shortcuts import render_to_response
import zendesk
import calc
import track.views
log = logging.getLogger(__name__)
@requires_csrf_token
def jsonable_server_error(request, template_name='500.html'):
"""
500 error handler that serves JSON on an AJAX request, and proxies
to the Django default `server_error` view otherwise.
"""
if request.is_ajax():
msg = {"error": "The edX servers encountered an error"}
return HttpResponseServerError(json.dumps(msg))
else:
return server_error(request, template_name=template_name)
def calculate(request):
''' Calculator in footer of every page. '''
equation = request.GET['equation']
try:
result = calc.evaluator({}, {}, equation)
except:
event = {'error': map(str, sys.exc_info()),
'equation': equation}
track.views.server_track(request, 'error:calc', event, page='calc')
return HttpResponse(json.dumps({'result': 'Invalid syntax'}))
return HttpResponse(json.dumps({'result': str(result)}))
class _ZendeskApi(object):
def __init__(self):
"""
Instantiate the Zendesk API.
All of `ZENDESK_URL`, `ZENDESK_USER`, and `ZENDESK_API_KEY` must be set
in `django.conf.settings`.
"""
self._zendesk_instance = zendesk.Zendesk(
settings.ZENDESK_URL,
settings.ZENDESK_USER,
settings.ZENDESK_API_KEY,
use_api_token=False,
api_version=2,
# As of 2012-05-08, Zendesk is using a CA that is not
# installed on our servers
client_args={"disable_ssl_certificate_validation": True}
)
def create_ticket(self, ticket):
"""
Create the given `ticket` in Zendesk.
The ticket should have the format specified by the zendesk package.
"""
ticket_url = self._zendesk_instance.create_ticket(data=ticket)
return zendesk.get_id_from_url(ticket_url)
def update_ticket(self, ticket_id, update):
"""
Update the Zendesk ticket with id `ticket_id` using the given `update`.
The update should have the format specified by the zendesk package.
"""
self._zendesk_instance.update_ticket(ticket_id=ticket_id, data=update)
def _record_feedback_in_zendesk(realname, email, subject, details, tags, additional_info):
"""
Create a new user-requested Zendesk ticket.
Once created, the ticket will be updated with a private comment containing
additional information from the browser and server, such as HTTP headers
and user state. Returns a boolean value indicating whether ticket creation
was successful, regardless of whether the private comment update succeeded.
"""
zendesk_api = _ZendeskApi()
additional_info_string = (
"Additional information:\n\n" +
"\n".join("%s: %s" % (key, value) for (key, value) in additional_info.items() if value is not None)
)
# Tag all issues with LMS to distinguish channel in Zendesk; requested by student support team
zendesk_tags = list(tags.values()) + ["LMS"]
new_ticket = {
"ticket": {
"requester": {"name": realname, "email": email},
"subject": subject,
"comment": {"body": details},
"tags": zendesk_tags
}
}
try:
ticket_id = zendesk_api.create_ticket(new_ticket)
except zendesk.ZendeskError as err:
log.error("Error creating Zendesk ticket: %s", str(err))
return False
# Additional information is provided as a private update so the information
# is not visible to the user.
ticket_update = {"ticket": {"comment": {"public": False, "body": additional_info_string}}}
try:
zendesk_api.update_ticket(ticket_id, ticket_update)
except zendesk.ZendeskError as err:
log.error("Error updating Zendesk ticket: %s", str(err))
# The update is not strictly necessary, so do not indicate failure to the user
pass
return True
DATADOG_FEEDBACK_METRIC = "lms_feedback_submissions"
def _record_feedback_in_datadog(tags):
datadog_tags = [u"{k}:{v}".format(k=k.encode('utf-8'), v=v.encode('utf-8')) for k, v in tags.items()]
dog_stats_api.increment(DATADOG_FEEDBACK_METRIC, tags=datadog_tags)
def submit_feedback(request):
"""
Create a new user-requested ticket, currently implemented with Zendesk.
If feedback submission is not enabled, any request will raise `Http404`.
If any configuration parameter (`ZENDESK_URL`, `ZENDESK_USER`, or
`ZENDESK_API_KEY`) is missing, any request will raise an `Exception`.
The request must be a POST request specifying `subject` and `details`.
If the user is not authenticated, the request must also specify `name` and
`email`. If the user is authenticated, the `name` and `email` will be
populated from the user's information. If any required parameter is
missing, a 400 error will be returned indicating which field is missing and
providing an error message. If Zendesk ticket creation fails, 500 error
will be returned with no body; if ticket creation succeeds, an empty
successful response (200) will be returned.
"""
if not settings.FEATURES.get('ENABLE_FEEDBACK_SUBMISSION', False):
raise Http404()
if request.method != "POST":
return HttpResponseNotAllowed(["POST"])
if (
not settings.ZENDESK_URL or
not settings.ZENDESK_USER or
not settings.ZENDESK_API_KEY
):
raise Exception("Zendesk enabled but not configured")
def build_error_response(status_code, field, err_msg):
return HttpResponse(json.dumps({"field": field, "error": err_msg}), status=status_code)
additional_info = {}
required_fields = ["subject", "details"]
if not request.user.is_authenticated():
required_fields += ["name", "email"]
required_field_errs = {
"subject": "Please provide a subject.",
"details": "Please provide details.",
"name": "Please provide your name.",
"email": "Please provide a valid e-mail.",
}
for field in required_fields:
if field not in request.POST or not request.POST[field]:
return build_error_response(400, field, required_field_errs[field])
subject = request.POST["subject"]
details = request.POST["details"]
tags = dict(
[(tag, request.POST[tag]) for tag in ["issue_type", "course_id"] if tag in request.POST]
)
if request.user.is_authenticated():
realname = request.user.profile.name
email = request.user.email
additional_info["username"] = request.user.username
else:
realname = request.POST["name"]
email = request.POST["email"]
try:
validate_email(email)
except ValidationError:
return build_error_response(400, "email", required_field_errs["email"])
for header, pretty in [
("HTTP_REFERER", "Page"),
("HTTP_USER_AGENT", "Browser"),
("REMOTE_ADDR", "Client IP"),
("SERVER_NAME", "Host")
]:
additional_info[pretty] = request.META.get(header)
success = _record_feedback_in_zendesk(realname, email, subject, details, tags, additional_info)
_record_feedback_in_datadog(tags)
return HttpResponse(status=(200 if success else 500))
def info(request):
''' Info page (link from main header) '''
return render_to_response("info.html", {})
# From http://djangosnippets.org/snippets/1042/
def parse_accept_header(accept):
"""Parse the Accept header *accept*, returning a list with pairs of
(media_type, q_value), ordered by q values.
"""
result = []
for media_range in accept.split(","):
parts = media_range.split(";")
media_type = parts.pop(0)
media_params = []
q = 1.0
for part in parts:
(key, value) = part.lstrip().split("=", 1)
if key == "q":
q = float(value)
else:
media_params.append((key, value))
result.append((media_type, tuple(media_params), q))
result.sort(lambda x, y: -cmp(x[2], y[2]))
return result
def accepts(request, media_type):
"""Return whether this request has an Accept header that matches type"""
accept = parse_accept_header(request.META.get("HTTP_ACCEPT", ""))
return media_type in [t for (t, p, q) in accept]
| agpl-3.0 | 8,033,776,134,122,318,000 | 35.130081 | 107 | 0.644127 | false |
bleib1dj/boto | boto/provider.py | 102 | 20925 | # Copyright (c) 2010 Mitch Garnaat http://garnaat.org/
# Copyright 2010 Google Inc.
# Copyright (c) 2010, Eucalyptus Systems, Inc.
# Copyright (c) 2011, Nexenta Systems Inc.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
This class encapsulates the provider-specific header differences.
"""
import os
from boto.compat import six
from datetime import datetime
import boto
from boto import config
from boto.compat import expanduser
from boto.pyami.config import Config
from boto.gs.acl import ACL
from boto.gs.acl import CannedACLStrings as CannedGSACLStrings
from boto.s3.acl import CannedACLStrings as CannedS3ACLStrings
from boto.s3.acl import Policy
HEADER_PREFIX_KEY = 'header_prefix'
METADATA_PREFIX_KEY = 'metadata_prefix'
AWS_HEADER_PREFIX = 'x-amz-'
GOOG_HEADER_PREFIX = 'x-goog-'
ACL_HEADER_KEY = 'acl-header'
AUTH_HEADER_KEY = 'auth-header'
COPY_SOURCE_HEADER_KEY = 'copy-source-header'
COPY_SOURCE_VERSION_ID_HEADER_KEY = 'copy-source-version-id-header'
COPY_SOURCE_RANGE_HEADER_KEY = 'copy-source-range-header'
DELETE_MARKER_HEADER_KEY = 'delete-marker-header'
DATE_HEADER_KEY = 'date-header'
METADATA_DIRECTIVE_HEADER_KEY = 'metadata-directive-header'
RESUMABLE_UPLOAD_HEADER_KEY = 'resumable-upload-header'
SECURITY_TOKEN_HEADER_KEY = 'security-token-header'
STORAGE_CLASS_HEADER_KEY = 'storage-class'
MFA_HEADER_KEY = 'mfa-header'
SERVER_SIDE_ENCRYPTION_KEY = 'server-side-encryption-header'
VERSION_ID_HEADER_KEY = 'version-id-header'
RESTORE_HEADER_KEY = 'restore-header'
STORAGE_COPY_ERROR = 'StorageCopyError'
STORAGE_CREATE_ERROR = 'StorageCreateError'
STORAGE_DATA_ERROR = 'StorageDataError'
STORAGE_PERMISSIONS_ERROR = 'StoragePermissionsError'
STORAGE_RESPONSE_ERROR = 'StorageResponseError'
NO_CREDENTIALS_PROVIDED = object()
class ProfileNotFoundError(ValueError):
pass
class Provider(object):
CredentialMap = {
'aws': ('aws_access_key_id', 'aws_secret_access_key',
'aws_security_token', 'aws_profile'),
'google': ('gs_access_key_id', 'gs_secret_access_key',
None, None),
}
AclClassMap = {
'aws': Policy,
'google': ACL
}
CannedAclsMap = {
'aws': CannedS3ACLStrings,
'google': CannedGSACLStrings
}
HostKeyMap = {
'aws': 's3',
'google': 'gs'
}
ChunkedTransferSupport = {
'aws': False,
'google': True
}
MetadataServiceSupport = {
'aws': True,
'google': False
}
# If you update this map please make sure to put "None" for the
# right-hand-side for any headers that don't apply to a provider, rather
# than simply leaving that header out (which would cause KeyErrors).
HeaderInfoMap = {
'aws': {
HEADER_PREFIX_KEY: AWS_HEADER_PREFIX,
METADATA_PREFIX_KEY: AWS_HEADER_PREFIX + 'meta-',
ACL_HEADER_KEY: AWS_HEADER_PREFIX + 'acl',
AUTH_HEADER_KEY: 'AWS',
COPY_SOURCE_HEADER_KEY: AWS_HEADER_PREFIX + 'copy-source',
COPY_SOURCE_VERSION_ID_HEADER_KEY: AWS_HEADER_PREFIX +
'copy-source-version-id',
COPY_SOURCE_RANGE_HEADER_KEY: AWS_HEADER_PREFIX +
'copy-source-range',
DATE_HEADER_KEY: AWS_HEADER_PREFIX + 'date',
DELETE_MARKER_HEADER_KEY: AWS_HEADER_PREFIX + 'delete-marker',
METADATA_DIRECTIVE_HEADER_KEY: AWS_HEADER_PREFIX +
'metadata-directive',
RESUMABLE_UPLOAD_HEADER_KEY: None,
SECURITY_TOKEN_HEADER_KEY: AWS_HEADER_PREFIX + 'security-token',
SERVER_SIDE_ENCRYPTION_KEY: AWS_HEADER_PREFIX +
'server-side-encryption',
VERSION_ID_HEADER_KEY: AWS_HEADER_PREFIX + 'version-id',
STORAGE_CLASS_HEADER_KEY: AWS_HEADER_PREFIX + 'storage-class',
MFA_HEADER_KEY: AWS_HEADER_PREFIX + 'mfa',
RESTORE_HEADER_KEY: AWS_HEADER_PREFIX + 'restore',
},
'google': {
HEADER_PREFIX_KEY: GOOG_HEADER_PREFIX,
METADATA_PREFIX_KEY: GOOG_HEADER_PREFIX + 'meta-',
ACL_HEADER_KEY: GOOG_HEADER_PREFIX + 'acl',
AUTH_HEADER_KEY: 'GOOG1',
COPY_SOURCE_HEADER_KEY: GOOG_HEADER_PREFIX + 'copy-source',
COPY_SOURCE_VERSION_ID_HEADER_KEY: GOOG_HEADER_PREFIX +
'copy-source-version-id',
COPY_SOURCE_RANGE_HEADER_KEY: None,
DATE_HEADER_KEY: GOOG_HEADER_PREFIX + 'date',
DELETE_MARKER_HEADER_KEY: GOOG_HEADER_PREFIX + 'delete-marker',
METADATA_DIRECTIVE_HEADER_KEY: GOOG_HEADER_PREFIX +
'metadata-directive',
RESUMABLE_UPLOAD_HEADER_KEY: GOOG_HEADER_PREFIX + 'resumable',
SECURITY_TOKEN_HEADER_KEY: GOOG_HEADER_PREFIX + 'security-token',
SERVER_SIDE_ENCRYPTION_KEY: None,
# Note that this version header is not to be confused with
# the Google Cloud Storage 'x-goog-api-version' header.
VERSION_ID_HEADER_KEY: GOOG_HEADER_PREFIX + 'version-id',
STORAGE_CLASS_HEADER_KEY: None,
MFA_HEADER_KEY: None,
RESTORE_HEADER_KEY: None,
}
}
ErrorMap = {
'aws': {
STORAGE_COPY_ERROR: boto.exception.S3CopyError,
STORAGE_CREATE_ERROR: boto.exception.S3CreateError,
STORAGE_DATA_ERROR: boto.exception.S3DataError,
STORAGE_PERMISSIONS_ERROR: boto.exception.S3PermissionsError,
STORAGE_RESPONSE_ERROR: boto.exception.S3ResponseError,
},
'google': {
STORAGE_COPY_ERROR: boto.exception.GSCopyError,
STORAGE_CREATE_ERROR: boto.exception.GSCreateError,
STORAGE_DATA_ERROR: boto.exception.GSDataError,
STORAGE_PERMISSIONS_ERROR: boto.exception.GSPermissionsError,
STORAGE_RESPONSE_ERROR: boto.exception.GSResponseError,
}
}
def __init__(self, name, access_key=None, secret_key=None,
security_token=None, profile_name=None):
self.host = None
self.port = None
self.host_header = None
self.access_key = access_key
self.secret_key = secret_key
self.security_token = security_token
self.profile_name = profile_name
self.name = name
self.acl_class = self.AclClassMap[self.name]
self.canned_acls = self.CannedAclsMap[self.name]
self._credential_expiry_time = None
# Load shared credentials file if it exists
shared_path = os.path.join(expanduser('~'), '.' + name, 'credentials')
self.shared_credentials = Config(do_load=False)
if os.path.isfile(shared_path):
self.shared_credentials.load_from_path(shared_path)
self.get_credentials(access_key, secret_key, security_token, profile_name)
self.configure_headers()
self.configure_errors()
# Allow config file to override default host and port.
host_opt_name = '%s_host' % self.HostKeyMap[self.name]
if config.has_option('Credentials', host_opt_name):
self.host = config.get('Credentials', host_opt_name)
port_opt_name = '%s_port' % self.HostKeyMap[self.name]
if config.has_option('Credentials', port_opt_name):
self.port = config.getint('Credentials', port_opt_name)
host_header_opt_name = '%s_host_header' % self.HostKeyMap[self.name]
if config.has_option('Credentials', host_header_opt_name):
self.host_header = config.get('Credentials', host_header_opt_name)
def get_access_key(self):
if self._credentials_need_refresh():
self._populate_keys_from_metadata_server()
return self._access_key
def set_access_key(self, value):
self._access_key = value
access_key = property(get_access_key, set_access_key)
def get_secret_key(self):
if self._credentials_need_refresh():
self._populate_keys_from_metadata_server()
return self._secret_key
def set_secret_key(self, value):
self._secret_key = value
secret_key = property(get_secret_key, set_secret_key)
def get_security_token(self):
if self._credentials_need_refresh():
self._populate_keys_from_metadata_server()
return self._security_token
def set_security_token(self, value):
self._security_token = value
security_token = property(get_security_token, set_security_token)
def _credentials_need_refresh(self):
if self._credential_expiry_time is None:
return False
else:
# The credentials should be refreshed if they're going to expire
# in less than 5 minutes.
delta = self._credential_expiry_time - datetime.utcnow()
# python2.6 does not have timedelta.total_seconds() so we have
# to calculate this ourselves. This is straight from the
# datetime docs.
seconds_left = (
(delta.microseconds + (delta.seconds + delta.days * 24 * 3600)
* 10 ** 6) / 10 ** 6)
if seconds_left < (5 * 60):
boto.log.debug("Credentials need to be refreshed.")
return True
else:
return False
def get_credentials(self, access_key=None, secret_key=None,
security_token=None, profile_name=None):
access_key_name, secret_key_name, security_token_name, \
profile_name_name = self.CredentialMap[self.name]
# Load profile from shared environment variable if it was not
# already passed in and the environment variable exists
if profile_name is None and profile_name_name is not None and \
profile_name_name.upper() in os.environ:
profile_name = os.environ[profile_name_name.upper()]
shared = self.shared_credentials
if access_key is not None:
self.access_key = access_key
boto.log.debug("Using access key provided by client.")
elif access_key_name.upper() in os.environ:
self.access_key = os.environ[access_key_name.upper()]
boto.log.debug("Using access key found in environment variable.")
elif profile_name is not None:
if shared.has_option(profile_name, access_key_name):
self.access_key = shared.get(profile_name, access_key_name)
boto.log.debug("Using access key found in shared credential "
"file for profile %s." % profile_name)
elif config.has_option("profile %s" % profile_name,
access_key_name):
self.access_key = config.get("profile %s" % profile_name,
access_key_name)
boto.log.debug("Using access key found in config file: "
"profile %s." % profile_name)
else:
raise ProfileNotFoundError('Profile "%s" not found!' %
profile_name)
elif shared.has_option('default', access_key_name):
self.access_key = shared.get('default', access_key_name)
boto.log.debug("Using access key found in shared credential file.")
elif config.has_option('Credentials', access_key_name):
self.access_key = config.get('Credentials', access_key_name)
boto.log.debug("Using access key found in config file.")
if secret_key is not None:
self.secret_key = secret_key
boto.log.debug("Using secret key provided by client.")
elif secret_key_name.upper() in os.environ:
self.secret_key = os.environ[secret_key_name.upper()]
boto.log.debug("Using secret key found in environment variable.")
elif profile_name is not None:
if shared.has_option(profile_name, secret_key_name):
self.secret_key = shared.get(profile_name, secret_key_name)
boto.log.debug("Using secret key found in shared credential "
"file for profile %s." % profile_name)
elif config.has_option("profile %s" % profile_name, secret_key_name):
self.secret_key = config.get("profile %s" % profile_name,
secret_key_name)
boto.log.debug("Using secret key found in config file: "
"profile %s." % profile_name)
else:
raise ProfileNotFoundError('Profile "%s" not found!' %
profile_name)
elif shared.has_option('default', secret_key_name):
self.secret_key = shared.get('default', secret_key_name)
boto.log.debug("Using secret key found in shared credential file.")
elif config.has_option('Credentials', secret_key_name):
self.secret_key = config.get('Credentials', secret_key_name)
boto.log.debug("Using secret key found in config file.")
elif config.has_option('Credentials', 'keyring'):
keyring_name = config.get('Credentials', 'keyring')
try:
import keyring
except ImportError:
boto.log.error("The keyring module could not be imported. "
"For keyring support, install the keyring "
"module.")
raise
self.secret_key = keyring.get_password(
keyring_name, self.access_key)
boto.log.debug("Using secret key found in keyring.")
if security_token is not None:
self.security_token = security_token
boto.log.debug("Using security token provided by client.")
elif ((security_token_name is not None) and
(access_key is None) and (secret_key is None)):
# Only provide a token from the environment/config if the
# caller did not specify a key and secret. Otherwise an
# environment/config token could be paired with a
# different set of credentials provided by the caller
if security_token_name.upper() in os.environ:
self.security_token = os.environ[security_token_name.upper()]
boto.log.debug("Using security token found in environment"
" variable.")
elif shared.has_option(profile_name or 'default',
security_token_name):
self.security_token = shared.get(profile_name or 'default',
security_token_name)
boto.log.debug("Using security token found in shared "
"credential file.")
elif profile_name is not None:
if config.has_option("profile %s" % profile_name,
security_token_name):
boto.log.debug("config has option")
self.security_token = config.get("profile %s" % profile_name,
security_token_name)
boto.log.debug("Using security token found in config file: "
"profile %s." % profile_name)
elif config.has_option('Credentials', security_token_name):
self.security_token = config.get('Credentials',
security_token_name)
boto.log.debug("Using security token found in config file.")
if ((self._access_key is None or self._secret_key is None) and
self.MetadataServiceSupport[self.name]):
self._populate_keys_from_metadata_server()
self._secret_key = self._convert_key_to_str(self._secret_key)
def _populate_keys_from_metadata_server(self):
# get_instance_metadata is imported here because of a circular
# dependency.
boto.log.debug("Retrieving credentials from metadata server.")
from boto.utils import get_instance_metadata
timeout = config.getfloat('Boto', 'metadata_service_timeout', 1.0)
attempts = config.getint('Boto', 'metadata_service_num_attempts', 1)
# The num_retries arg is actually the total number of attempts made,
# so the config options is named *_num_attempts to make this more
# clear to users.
metadata = get_instance_metadata(
timeout=timeout, num_retries=attempts,
data='meta-data/iam/security-credentials/')
if metadata:
# I'm assuming there's only one role on the instance profile.
security = list(metadata.values())[0]
self._access_key = security['AccessKeyId']
self._secret_key = self._convert_key_to_str(security['SecretAccessKey'])
self._security_token = security['Token']
expires_at = security['Expiration']
self._credential_expiry_time = datetime.strptime(
expires_at, "%Y-%m-%dT%H:%M:%SZ")
boto.log.debug("Retrieved credentials will expire in %s at: %s",
self._credential_expiry_time - datetime.now(), expires_at)
def _convert_key_to_str(self, key):
if isinstance(key, six.text_type):
# the secret key must be bytes and not unicode to work
# properly with hmac.new (see http://bugs.python.org/issue5285)
return str(key)
return key
def configure_headers(self):
header_info_map = self.HeaderInfoMap[self.name]
self.metadata_prefix = header_info_map[METADATA_PREFIX_KEY]
self.header_prefix = header_info_map[HEADER_PREFIX_KEY]
self.acl_header = header_info_map[ACL_HEADER_KEY]
self.auth_header = header_info_map[AUTH_HEADER_KEY]
self.copy_source_header = header_info_map[COPY_SOURCE_HEADER_KEY]
self.copy_source_version_id = header_info_map[
COPY_SOURCE_VERSION_ID_HEADER_KEY]
self.copy_source_range_header = header_info_map[
COPY_SOURCE_RANGE_HEADER_KEY]
self.date_header = header_info_map[DATE_HEADER_KEY]
self.delete_marker = header_info_map[DELETE_MARKER_HEADER_KEY]
self.metadata_directive_header = (
header_info_map[METADATA_DIRECTIVE_HEADER_KEY])
self.security_token_header = header_info_map[SECURITY_TOKEN_HEADER_KEY]
self.resumable_upload_header = (
header_info_map[RESUMABLE_UPLOAD_HEADER_KEY])
self.server_side_encryption_header = header_info_map[SERVER_SIDE_ENCRYPTION_KEY]
self.storage_class_header = header_info_map[STORAGE_CLASS_HEADER_KEY]
self.version_id = header_info_map[VERSION_ID_HEADER_KEY]
self.mfa_header = header_info_map[MFA_HEADER_KEY]
self.restore_header = header_info_map[RESTORE_HEADER_KEY]
def configure_errors(self):
error_map = self.ErrorMap[self.name]
self.storage_copy_error = error_map[STORAGE_COPY_ERROR]
self.storage_create_error = error_map[STORAGE_CREATE_ERROR]
self.storage_data_error = error_map[STORAGE_DATA_ERROR]
self.storage_permissions_error = error_map[STORAGE_PERMISSIONS_ERROR]
self.storage_response_error = error_map[STORAGE_RESPONSE_ERROR]
def get_provider_name(self):
return self.HostKeyMap[self.name]
def supports_chunked_transfer(self):
return self.ChunkedTransferSupport[self.name]
# Static utility method for getting default Provider.
def get_default():
return Provider('aws')
| mit | 2,384,329,937,759,016,000 | 45.294248 | 88 | 0.60994 | false |
oliciv/youtube-dl | youtube_dl/extractor/soundcloud.py | 17 | 17239 | # encoding: utf-8
from __future__ import unicode_literals
import re
import itertools
from .common import InfoExtractor
from ..compat import (
compat_str,
compat_urlparse,
compat_urllib_parse,
)
from ..utils import (
ExtractorError,
int_or_none,
unified_strdate,
)
class SoundcloudIE(InfoExtractor):
"""Information extractor for soundcloud.com
To access the media, the uid of the song and a stream token
must be extracted from the page source and the script must make
a request to media.soundcloud.com/crossdomain.xml. Then
the media can be grabbed by requesting from an url composed
of the stream token and uid
"""
_VALID_URL = r'''(?x)^(?:https?://)?
(?:(?:(?:www\.|m\.)?soundcloud\.com/
(?P<uploader>[\w\d-]+)/
(?!(?:tracks|sets(?:/[^/?#]+)?|reposts|likes|spotlight)/?(?:$|[?#]))
(?P<title>[\w\d-]+)/?
(?P<token>[^?]+?)?(?:[?].*)?$)
|(?:api\.soundcloud\.com/tracks/(?P<track_id>\d+)
(?:/?\?secret_token=(?P<secret_token>[^&]+))?)
|(?P<player>(?:w|player|p.)\.soundcloud\.com/player/?.*?url=.*)
)
'''
IE_NAME = 'soundcloud'
_TESTS = [
{
'url': 'http://soundcloud.com/ethmusic/lostin-powers-she-so-heavy',
'md5': 'ebef0a451b909710ed1d7787dddbf0d7',
'info_dict': {
'id': '62986583',
'ext': 'mp3',
'upload_date': '20121011',
'description': 'No Downloads untill we record the finished version this weekend, i was too pumped n i had to post it , earl is prolly gonna b hella p.o\'d',
'uploader': 'E.T. ExTerrestrial Music',
'title': 'Lostin Powers - She so Heavy (SneakPreview) Adrian Ackers Blueprint 1',
'duration': 143,
}
},
# not streamable song
{
'url': 'https://soundcloud.com/the-concept-band/goldrushed-mastered?in=the-concept-band/sets/the-royal-concept-ep',
'info_dict': {
'id': '47127627',
'ext': 'mp3',
'title': 'Goldrushed',
'description': 'From Stockholm Sweden\r\nPovel / Magnus / Filip / David\r\nwww.theroyalconcept.com',
'uploader': 'The Royal Concept',
'upload_date': '20120521',
'duration': 227,
},
'params': {
# rtmp
'skip_download': True,
},
},
# private link
{
'url': 'https://soundcloud.com/jaimemf/youtube-dl-test-video-a-y-baw/s-8Pjrp',
'md5': 'aa0dd32bfea9b0c5ef4f02aacd080604',
'info_dict': {
'id': '123998367',
'ext': 'mp3',
'title': 'Youtube - Dl Test Video \'\' Ä↭',
'uploader': 'jaimeMF',
'description': 'test chars: \"\'/\\ä↭',
'upload_date': '20131209',
'duration': 9,
},
},
# private link (alt format)
{
'url': 'https://api.soundcloud.com/tracks/123998367?secret_token=s-8Pjrp',
'md5': 'aa0dd32bfea9b0c5ef4f02aacd080604',
'info_dict': {
'id': '123998367',
'ext': 'mp3',
'title': 'Youtube - Dl Test Video \'\' Ä↭',
'uploader': 'jaimeMF',
'description': 'test chars: \"\'/\\ä↭',
'upload_date': '20131209',
'duration': 9,
},
},
# downloadable song
{
'url': 'https://soundcloud.com/oddsamples/bus-brakes',
'md5': '7624f2351f8a3b2e7cd51522496e7631',
'info_dict': {
'id': '128590877',
'ext': 'mp3',
'title': 'Bus Brakes',
'description': 'md5:0053ca6396e8d2fd7b7e1595ef12ab66',
'uploader': 'oddsamples',
'upload_date': '20140109',
'duration': 17,
},
},
]
_CLIENT_ID = '02gUJC0hH2ct1EGOcYXQIzRFU91c72Ea'
_IPHONE_CLIENT_ID = '376f225bf427445fc4bfb6b99b72e0bf'
def report_resolve(self, video_id):
"""Report information extraction."""
self.to_screen('%s: Resolving id' % video_id)
@classmethod
def _resolv_url(cls, url):
return 'http://api.soundcloud.com/resolve.json?url=' + url + '&client_id=' + cls._CLIENT_ID
def _extract_info_dict(self, info, full_title=None, quiet=False, secret_token=None):
track_id = compat_str(info['id'])
name = full_title or track_id
if quiet:
self.report_extraction(name)
thumbnail = info['artwork_url']
if thumbnail is not None:
thumbnail = thumbnail.replace('-large', '-t500x500')
ext = 'mp3'
result = {
'id': track_id,
'uploader': info['user']['username'],
'upload_date': unified_strdate(info['created_at']),
'title': info['title'],
'description': info['description'],
'thumbnail': thumbnail,
'duration': int_or_none(info.get('duration'), 1000),
'webpage_url': info.get('permalink_url'),
}
formats = []
if info.get('downloadable', False):
# We can build a direct link to the song
format_url = (
'https://api.soundcloud.com/tracks/{0}/download?client_id={1}'.format(
track_id, self._CLIENT_ID))
formats.append({
'format_id': 'download',
'ext': info.get('original_format', 'mp3'),
'url': format_url,
'vcodec': 'none',
'preference': 10,
})
# We have to retrieve the url
streams_url = ('http://api.soundcloud.com/i1/tracks/{0}/streams?'
'client_id={1}&secret_token={2}'.format(track_id, self._IPHONE_CLIENT_ID, secret_token))
format_dict = self._download_json(
streams_url,
track_id, 'Downloading track url')
for key, stream_url in format_dict.items():
if key.startswith('http'):
formats.append({
'format_id': key,
'ext': ext,
'url': stream_url,
'vcodec': 'none',
})
elif key.startswith('rtmp'):
# The url doesn't have an rtmp app, we have to extract the playpath
url, path = stream_url.split('mp3:', 1)
formats.append({
'format_id': key,
'url': url,
'play_path': 'mp3:' + path,
'ext': 'flv',
'vcodec': 'none',
})
if not formats:
# We fallback to the stream_url in the original info, this
# cannot be always used, sometimes it can give an HTTP 404 error
formats.append({
'format_id': 'fallback',
'url': info['stream_url'] + '?client_id=' + self._CLIENT_ID,
'ext': ext,
'vcodec': 'none',
})
for f in formats:
if f['format_id'].startswith('http'):
f['protocol'] = 'http'
if f['format_id'].startswith('rtmp'):
f['protocol'] = 'rtmp'
self._check_formats(formats, track_id)
self._sort_formats(formats)
result['formats'] = formats
return result
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url, flags=re.VERBOSE)
if mobj is None:
raise ExtractorError('Invalid URL: %s' % url)
track_id = mobj.group('track_id')
token = None
if track_id is not None:
info_json_url = 'http://api.soundcloud.com/tracks/' + track_id + '.json?client_id=' + self._CLIENT_ID
full_title = track_id
token = mobj.group('secret_token')
if token:
info_json_url += "&secret_token=" + token
elif mobj.group('player'):
query = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
real_url = query['url'][0]
# If the token is in the query of the original url we have to
# manually add it
if 'secret_token' in query:
real_url += '?secret_token=' + query['secret_token'][0]
return self.url_result(real_url)
else:
# extract uploader (which is in the url)
uploader = mobj.group('uploader')
# extract simple title (uploader + slug of song title)
slug_title = mobj.group('title')
token = mobj.group('token')
full_title = resolve_title = '%s/%s' % (uploader, slug_title)
if token:
resolve_title += '/%s' % token
self.report_resolve(full_title)
url = 'http://soundcloud.com/%s' % resolve_title
info_json_url = self._resolv_url(url)
info = self._download_json(info_json_url, full_title, 'Downloading info JSON')
return self._extract_info_dict(info, full_title, secret_token=token)
class SoundcloudSetIE(SoundcloudIE):
_VALID_URL = r'https?://(?:(?:www|m)\.)?soundcloud\.com/(?P<uploader>[\w\d-]+)/sets/(?P<slug_title>[\w\d-]+)(?:/(?P<token>[^?/]+))?'
IE_NAME = 'soundcloud:set'
_TESTS = [{
'url': 'https://soundcloud.com/the-concept-band/sets/the-royal-concept-ep',
'info_dict': {
'id': '2284613',
'title': 'The Royal Concept EP',
},
'playlist_mincount': 6,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
# extract uploader (which is in the url)
uploader = mobj.group('uploader')
# extract simple title (uploader + slug of song title)
slug_title = mobj.group('slug_title')
full_title = '%s/sets/%s' % (uploader, slug_title)
url = 'http://soundcloud.com/%s/sets/%s' % (uploader, slug_title)
token = mobj.group('token')
if token:
full_title += '/' + token
url += '/' + token
self.report_resolve(full_title)
resolv_url = self._resolv_url(url)
info = self._download_json(resolv_url, full_title)
if 'errors' in info:
msgs = (compat_str(err['error_message']) for err in info['errors'])
raise ExtractorError('unable to download video webpage: %s' % ','.join(msgs))
entries = [self.url_result(track['permalink_url'], 'Soundcloud') for track in info['tracks']]
return {
'_type': 'playlist',
'entries': entries,
'id': '%s' % info['id'],
'title': info['title'],
}
class SoundcloudUserIE(SoundcloudIE):
_VALID_URL = r'''(?x)
https?://
(?:(?:www|m)\.)?soundcloud\.com/
(?P<user>[^/]+)
(?:/
(?P<rsrc>tracks|sets|reposts|likes|spotlight)
)?
/?(?:[?#].*)?$
'''
IE_NAME = 'soundcloud:user'
_TESTS = [{
'url': 'https://soundcloud.com/the-akashic-chronicler',
'info_dict': {
'id': '114582580',
'title': 'The Akashic Chronicler (All)',
},
'playlist_mincount': 111,
}, {
'url': 'https://soundcloud.com/the-akashic-chronicler/tracks',
'info_dict': {
'id': '114582580',
'title': 'The Akashic Chronicler (Tracks)',
},
'playlist_mincount': 50,
}, {
'url': 'https://soundcloud.com/the-akashic-chronicler/sets',
'info_dict': {
'id': '114582580',
'title': 'The Akashic Chronicler (Playlists)',
},
'playlist_mincount': 3,
}, {
'url': 'https://soundcloud.com/the-akashic-chronicler/reposts',
'info_dict': {
'id': '114582580',
'title': 'The Akashic Chronicler (Reposts)',
},
'playlist_mincount': 7,
}, {
'url': 'https://soundcloud.com/the-akashic-chronicler/likes',
'info_dict': {
'id': '114582580',
'title': 'The Akashic Chronicler (Likes)',
},
'playlist_mincount': 321,
}, {
'url': 'https://soundcloud.com/grynpyret/spotlight',
'info_dict': {
'id': '7098329',
'title': 'Grynpyret (Spotlight)',
},
'playlist_mincount': 1,
}]
_API_BASE = 'https://api.soundcloud.com'
_API_V2_BASE = 'https://api-v2.soundcloud.com'
_BASE_URL_MAP = {
'all': '%s/profile/soundcloud:users:%%s' % _API_V2_BASE,
'tracks': '%s/users/%%s/tracks' % _API_BASE,
'sets': '%s/users/%%s/playlists' % _API_V2_BASE,
'reposts': '%s/profile/soundcloud:users:%%s/reposts' % _API_V2_BASE,
'likes': '%s/users/%%s/likes' % _API_V2_BASE,
'spotlight': '%s/users/%%s/spotlight' % _API_V2_BASE,
}
_TITLE_MAP = {
'all': 'All',
'tracks': 'Tracks',
'sets': 'Playlists',
'reposts': 'Reposts',
'likes': 'Likes',
'spotlight': 'Spotlight',
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
uploader = mobj.group('user')
url = 'http://soundcloud.com/%s/' % uploader
resolv_url = self._resolv_url(url)
user = self._download_json(
resolv_url, uploader, 'Downloading user info')
resource = mobj.group('rsrc') or 'all'
base_url = self._BASE_URL_MAP[resource] % user['id']
next_href = None
entries = []
for i in itertools.count():
if not next_href:
data = compat_urllib_parse.urlencode({
'offset': i * 50,
'limit': 50,
'client_id': self._CLIENT_ID,
'linked_partitioning': '1',
'representation': 'speedy',
})
next_href = base_url + '?' + data
response = self._download_json(
next_href, uploader, 'Downloading track page %s' % (i + 1))
collection = response['collection']
if not collection:
self.to_screen('%s: End page received' % uploader)
break
def resolve_permalink_url(candidates):
for cand in candidates:
if isinstance(cand, dict):
permalink_url = cand.get('permalink_url')
if permalink_url and permalink_url.startswith('http'):
return permalink_url
for e in collection:
permalink_url = resolve_permalink_url((e, e.get('track'), e.get('playlist')))
if permalink_url:
entries.append(self.url_result(permalink_url))
if 'next_href' in response:
next_href = response['next_href']
if not next_href:
break
else:
next_href = None
return {
'_type': 'playlist',
'id': compat_str(user['id']),
'title': '%s (%s)' % (user['username'], self._TITLE_MAP[resource]),
'entries': entries,
}
class SoundcloudPlaylistIE(SoundcloudIE):
_VALID_URL = r'https?://api\.soundcloud\.com/playlists/(?P<id>[0-9]+)(?:/?\?secret_token=(?P<token>[^&]+?))?$'
IE_NAME = 'soundcloud:playlist'
_TESTS = [{
'url': 'http://api.soundcloud.com/playlists/4110309',
'info_dict': {
'id': '4110309',
'title': 'TILT Brass - Bowery Poetry Club, August \'03 [Non-Site SCR 02]',
'description': 're:.*?TILT Brass - Bowery Poetry Club',
},
'playlist_count': 6,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
playlist_id = mobj.group('id')
base_url = '%s//api.soundcloud.com/playlists/%s.json?' % (self.http_scheme(), playlist_id)
data_dict = {
'client_id': self._CLIENT_ID,
}
token = mobj.group('token')
if token:
data_dict['secret_token'] = token
data = compat_urllib_parse.urlencode(data_dict)
data = self._download_json(
base_url + data, playlist_id, 'Downloading playlist')
entries = [self.url_result(track['permalink_url'], 'Soundcloud') for track in data['tracks']]
return {
'_type': 'playlist',
'id': playlist_id,
'title': data.get('title'),
'description': data.get('description'),
'entries': entries,
}
| unlicense | 6,436,817,583,035,473,000 | 35.575372 | 172 | 0.488361 | false |
junmin-zhu/chromium-rivertrail | chrome/test/functional/stress.py | 3 | 29618 | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Stess Tests for Google Chrome.
This script runs 4 different stress tests:
1. Plugin stress.
2. Back and forward stress.
3. Download stress.
4. Preference stress.
After every cycle (running all 4 stress tests) it checks for crashes.
If there are any crashes, the script generates a report, uploads it to
a server and mails about the crash and the link to the report on the server.
Apart from this whenever the test stops on mac it looks for and reports
zombies.
Prerequisites:
Test needs the following files/folders in the Data dir.
1. A crash_report tool in "pyauto_private/stress/mac" folder for use on Mac.
2. A "downloads" folder containing stress_downloads and all the files
referenced in it.
3. A pref_dict file in "pyauto_private/stress/mac" folder.
4. A "plugin" folder containing doubleAnimation.xaml, flash.swf, FlashSpin.swf,
generic.html, get_flash_player.gif, js-invoker.swf, mediaplayer.wmv,
NavigatorTicker11.class, Plugins_page.html, sample5.mov, silverlight.xaml,
silverlight.js, embed.pdf, plugins_page.html and test6.swf.
5. A stress_pref file in "pyauto_private/stress".
"""
import commands
import glob
import logging
import os
import random
import re
import shutil
import sys
import time
import urllib
import test_utils
import subprocess
import pyauto_functional
import pyauto
import pyauto_utils
CRASHES = 'crashes' # Name of the folder to store crashes
class StressTest(pyauto.PyUITest):
"""Run all the stress tests."""
flash_url1 = pyauto.PyUITest.GetFileURLForPath(
os.path.join(pyauto.PyUITest.DataDir(), 'plugin', 'flash.swf'))
flash_url2 = pyauto.PyUITest.GetFileURLForPath(
os.path.join(pyauto.PyUITest.DataDir(),'plugin', 'js-invoker.swf'))
flash_url3 = pyauto.PyUITest.GetFileURLForPath(
os.path.join(pyauto.PyUITest.DataDir(), 'plugin', 'generic.html'))
plugin_url = pyauto.PyUITest.GetFileURLForPath(
os.path.join(pyauto.PyUITest.DataDir(), 'plugin', 'plugins_page.html'))
empty_url = pyauto.PyUITest.GetFileURLForPath(
os.path.join(pyauto.PyUITest.DataDir(), 'empty.html'))
download_url1 = pyauto.PyUITest.GetFileURLForPath(
os.path.join(pyauto.PyUITest.DataDir(), 'downloads', 'a_zip_file.zip'))
download_url2 = pyauto.PyUITest.GetFileURLForPath(
os.path.join(pyauto.PyUITest.DataDir(),'zip', 'test.zip'))
file_list = pyauto.PyUITest.EvalDataFrom(
os.path.join(pyauto.PyUITest.DataDir(), 'downloads', 'stress_downloads'))
symbols_dir = os.path.join(os.getcwd(), 'Build_Symbols')
stress_pref = pyauto.PyUITest.EvalDataFrom(
os.path.join(pyauto.PyUITest.DataDir(), 'pyauto_private', 'stress',
'stress_pref'))
breakpad_dir = None
chrome_version = None
bookmarks_list = []
def _FuncDir(self):
"""Returns the path to the functional dir chrome/test/functional."""
return os.path.dirname(__file__)
def _DownloadSymbols(self):
"""Downloads the symbols for the build being tested."""
download_location = os.path.join(os.getcwd(), 'Build_Symbols')
if os.path.exists(download_location):
shutil.rmtree(download_location)
os.makedirs(download_location)
url = self.stress_pref['symbols_dir'] + self.chrome_version
# TODO: Add linux symbol_files
if self.IsWin():
url = url + '/win/'
symbol_files = ['chrome_dll.pdb', 'chrome_exe.pdb']
elif self.IsMac():
url = url + '/mac/'
symbol_files = map(urllib.quote,
['Google Chrome Framework.framework',
'Google Chrome Helper.app',
'Google Chrome.app',
'crash_inspector',
'crash_report_sender',
'ffmpegsumo.so',
'libplugin_carbon_interpose.dylib'])
index = 0
symbol_files = ['%s-%s-i386.breakpad' % (sym_file, self.chrome_version) \
for sym_file in symbol_files]
logging.info(symbol_files)
for sym_file in symbol_files:
sym_url = url + sym_file
logging.info(sym_url)
download_sym_file = os.path.join(download_location, sym_file)
logging.info(download_sym_file)
urllib.urlretrieve(sym_url, download_sym_file)
def setUp(self):
pyauto.PyUITest.setUp(self)
self.breakpad_dir = self._CrashDumpFolder()
self.chrome_version = self.GetBrowserInfo()['properties']['ChromeVersion']
# Plugin stress functions
def _CheckForPluginProcess(self, plugin_name):
"""Checks if a particular plugin process exists.
Args:
plugin_name : plugin process which should be running.
"""
process = self.GetBrowserInfo()['child_processes']
self.assertTrue([x for x in process
if x['type'] == 'Plug-in' and
x['name'] == plugin_name])
def _GetPluginProcessId(self, plugin_name):
"""Get Plugin process id.
Args:
plugin_name: Plugin whose pid is expected.
Eg: "Shockwave Flash"
Returns:
Process id if the plugin process is running.
None otherwise.
"""
for process in self.GetBrowserInfo()['child_processes']:
if process['type'] == 'Plug-in' and \
re.search(plugin_name, process['name']):
return process['pid']
return None
def _CloseAllTabs(self):
"""Close all but one tab in first window."""
tab_count = self.GetTabCount(0)
for tab_index in xrange(tab_count - 1, 0, -1):
self.CloseTab(tab_index)
def _CloseAllWindows(self):
"""Close all windows except one."""
win_count = self.GetBrowserWindowCount()
for windex in xrange(win_count - 1, 0, -1):
self.RunCommand(pyauto.IDC_CLOSE_WINDOW, windex)
def _ReloadAllTabs(self):
"""Reload all the tabs in first window."""
for tab_index in range(self.GetTabCount()):
self.ReloadTab(tab_index)
def _LoadFlashInMultipleTabs(self):
"""Load Flash in multiple tabs in first window."""
self.NavigateToURL(self.empty_url)
# Open 18 tabs with flash
for _ in range(9):
self.AppendTab(pyauto.GURL(self.flash_url1))
self.AppendTab(pyauto.GURL(self.flash_url2))
def _OpenAndCloseMultipleTabsWithFlash(self):
"""Stress test for flash in multiple tabs."""
logging.info("In _OpenAndCloseMultipleWindowsWithFlash.")
self._LoadFlashInMultipleTabs()
self._CheckForPluginProcess('Shockwave Flash')
self._CloseAllTabs()
def _OpenAndCloseMultipleWindowsWithFlash(self):
"""Stress test for flash in multiple windows."""
logging.info('In _OpenAndCloseMultipleWindowsWithFlash.')
# Open 5 Normal and 4 Incognito windows
for tab_index in range(1, 10):
if tab_index < 6:
self.OpenNewBrowserWindow(True)
else:
self.RunCommand(pyauto.IDC_NEW_INCOGNITO_WINDOW)
self.NavigateToURL(self.flash_url2, tab_index, 0)
self.AppendTab(pyauto.GURL(self.flash_url2), tab_index)
self._CloseAllWindows()
def _OpenAndCloseMultipleTabsWithMultiplePlugins(self):
"""Stress test using multiple plugins in multiple tabs."""
logging.info('In _OpenAndCloseMultipleTabsWithMultiplePlugins.')
# Append 4 tabs with URL
for _ in range(5):
self.AppendTab(pyauto.GURL(self.plugin_url))
self._CloseAllTabs()
def _OpenAndCloseMultipleWindowsWithMultiplePlugins(self):
"""Stress test using multiple plugins in multiple windows."""
logging.info('In _OpenAndCloseMultipleWindowsWithMultiplePlugins.')
# Open 4 windows with URL
for tab_index in range(1, 5):
if tab_index < 6:
self.OpenNewBrowserWindow(True)
else:
self.RunCommand(pyauto.IDC_NEW_INCOGNITO_WINDOW)
self.NavigateToURL(self.plugin_url, tab_index, 0)
self._CloseAllWindows()
def _KillAndReloadFlash(self):
"""Stress test by killing flash process and reloading tabs."""
self._LoadFlashInMultipleTabs()
flash_process_id1 = self._GetPluginProcessId('Shockwave Flash')
self.Kill(flash_process_id1)
self._ReloadAllTabs()
self._CloseAllTabs()
def _KillAndReloadRenderersWithFlash(self):
"""Stress test by killing renderer processes and reloading tabs."""
logging.info('In _KillAndReloadRenderersWithFlash')
self._LoadFlashInMultipleTabs()
info = self.GetBrowserInfo()
# Kill all renderer processes
for tab_index in range(self.GetTabCount(0)):
self.KillRendererProcess(
info['windows'][0]['tabs'][tab_index]['renderer_pid'])
self._ReloadAllTabs()
self._CloseAllTabs()
def _TogglePlugin(self, plugin_name):
"""Toggle plugin status.
Args:
plugin_name: Name of the plugin to toggle.
"""
plugins = self.GetPluginsInfo().Plugins()
for item in range(len(plugins)):
if re.search(plugin_name, plugins[item]['name']):
if plugins[item]['enabled']:
self.DisablePlugin(plugins[item]['path'])
else:
self.EnablePlugin(plugins[item]['path'])
def _ToggleAndReloadFlashPlugin(self):
"""Toggle flash and reload all tabs."""
logging.info('In _ToggleAndReloadFlashPlugin')
for _ in range(10):
self.AppendTab(pyauto.GURL(self.flash_url3))
# Disable Flash Plugin
self._TogglePlugin('Shockwave Flash')
self._ReloadAllTabs()
# Enable Flash Plugin
self._TogglePlugin('Shockwave Flash')
self._ReloadAllTabs()
self._CloseAllTabs()
# Downloads stress functions
def _LoadDownloadsInMultipleTabs(self):
"""Load Downloads in multiple tabs in the same window."""
# Open 15 tabs with downloads
logging.info('In _LoadDownloadsInMultipleTabs')
for tab_index in range(15):
# We open an empty tab and then downlad a file from it.
self.AppendTab(pyauto.GURL(self.empty_url))
self.NavigateToURL(self.download_url1, 0, tab_index + 1)
self.AppendTab(pyauto.GURL(self.empty_url))
self.NavigateToURL(self.download_url2, 0, tab_index + 2)
def _OpenAndCloseMultipleTabsWithDownloads(self):
"""Download items in multiple tabs."""
logging.info('In _OpenAndCloseMultipleTabsWithDownloads')
self._LoadDownloadsInMultipleTabs()
self._CloseAllTabs()
def _OpenAndCloseMultipleWindowsWithDownloads(self):
"""Randomly have downloads in multiple windows."""
logging.info('In _OpenAndCloseMultipleWindowsWithDownloads')
# Open 15 Windows randomly on both regular and incognito with downloads
for window_index in range(15):
tick = round(random.random() * 100)
if tick % 2 != 0:
self.NavigateToURL(self.download_url2, 0, 0)
else:
self.RunCommand(pyauto.IDC_NEW_INCOGNITO_WINDOW)
self.AppendTab(pyauto.GURL(self.empty_url), 1)
self.NavigateToURL(self.download_url2, 1, 1)
self._CloseAllWindows()
def _OpenAndCloseMultipleTabsWithMultipleDownloads(self):
"""Download multiple items in multiple tabs."""
logging.info('In _OpenAndCloseMultipleTabsWithMultipleDownloads')
self.NavigateToURL(self.empty_url)
for _ in range(15):
for file in self.file_list:
count = 1
url = self.GetFileURLForPath(
os.path.join(self.DataDir(), 'downloads', file))
self.AppendTab(pyauto.GURL(self.empty_url))
self.NavigateToURL(url, 0, count)
count = count + 1
self._CloseAllTabs()
def _OpenAndCloseMultipleWindowsWithMultipleDownloads(self):
"""Randomly multiple downloads in multiple windows."""
logging.info('In _OpenAndCloseMultipleWindowsWithMultipleDownloads')
for _ in range(15):
for file in self.file_list:
tick = round(random.random() * 100)
url = self.GetFileURLForPath(
os.path.join(self.DataDir(), 'downloads', file))
if tick % 2!= 0:
self.NavigateToURL(url, 0, 0)
else:
self.RunCommand(pyauto.IDC_NEW_INCOGNITO_WINDOW)
self.AppendTab(pyauto.GURL(self.empty_url), 1)
self.NavigateToURL(url, 1, 1)
self._CloseAllWindows()
# Back and Forward stress functions
def _BrowserGoBack(self, window_index):
"""Go back in the browser history.
Chrome has limitation on going back and can only go back 49 pages.
Args:
window_index: the index of the browser window to work on.
"""
for nback in range(48): # Go back 48 times.
if nback % 4 == 0: # Bookmark every 5th url when going back.
self._BookMarkEvery5thURL(window_index)
self.TabGoBack(tab_index=0, windex=window_index)
def _BrowserGoForward(self, window_index):
"""Go Forward in the browser history.
Chrome has limitation on going back and can only go back 49 pages.
Args:
window_index: the index of the browser window to work on.
"""
for nforward in range(48): # Go back 48 times.
if nforward % 4 == 0: # Bookmark every 5th url when going Forward
self._BookMarkEvery5thURL(window_index)
self.TabGoForward(tab_index=0, windex=window_index)
def _AddToListAndBookmark(self, newname, url):
"""Bookmark the url to bookmarkbar and to he list of bookmarks.
Args:
newname: the name of the bookmark.
url: the url to bookmark.
"""
bookmarks = self.GetBookmarkModel()
bar_id = bookmarks.BookmarkBar()['id']
self.AddBookmarkURL(bar_id, 0, newname, url)
self.bookmarks_list.append(newname)
def _RemoveFromListAndBookmarkBar(self, name):
"""Remove the bookmark bor and bookmarks list.
Args:
name: the name of bookmark to remove.
"""
bookmarks = self.GetBookmarkModel()
node = bookmarks.FindByTitle(name)
self.RemoveBookmark(node[0]['id'])
self.bookmarks_list.remove(name)
def _DuplicateBookmarks(self, name):
"""Find duplicate bookmark in the bookmarks list.
Args:
name: name of the bookmark.
Returns:
True if it's a duplicate.
"""
for index in (self.bookmarks_list):
if index == name:
return True
return False
def _BookMarkEvery5thURL(self, window_index):
"""Check for duplicate in list and bookmark current url.
If its the first time and list is empty add the bookmark.
If its a duplicate remove the bookmark.
If its new tab page move over.
Args:
window_index: the index of the browser window to work on.
"""
tab_title = self.GetActiveTabTitle(window_index) # get the page title
url = self.GetActiveTabURL(window_index).spec() # get the page url
if not self.bookmarks_list:
self._AddToListAndBookmark(tab_title, url) # first run bookmark the url
return
elif self._DuplicateBookmarks(tab_title):
self._RemoveFromListAndBookmarkBar(tab_title)
return
elif tab_title == 'New Tab': # new tab page pass over
return
else:
# new bookmark add it to bookmarkbar
self._AddToListAndBookmark(tab_title, url)
return
def _ReadFileAndLoadInNormalAndIncognito(self):
"""Read urls and load them in normal and incognito window.
We load 96 urls only as we can go back and forth 48 times.
Uses time to get different urls in normal and incognito window
The source file is taken from stress folder in /data folder.
"""
# URL source from stress folder in data folder
data_file = os.path.join(self.DataDir(), 'pyauto_private', 'stress',
'urls_and_titles')
url_data = self.EvalDataFrom(data_file)
urls = url_data.keys()
i = 0
ticks = int(time.time()) # get the latest time.
for url in urls:
if i <= 96 : # load only 96 urls.
if ticks % 2 == 0: # loading in Incognito and Normal window.
self.NavigateToURL(url)
else:
self.NavigateToURL(url, 1, 0)
else:
break
ticks = ticks - 1
i += 1
return
def _StressTestNavigation(self):
""" This is the method from where various navigations are called.
First we load the urls then call navigete back and forth in
incognito window then in normal window.
"""
self._ReadFileAndLoadInNormalAndIncognito() # Load the urls.
self._BrowserGoBack(1) # Navigate back in incognito window.
self._BrowserGoForward(1) # Navigate forward in incognito window
self._BrowserGoBack(0) # Navigate back in normal window
self._BrowserGoForward(0) # Navigate forward in normal window
# Preference stress functions
def _RandomBool(self):
"""For any preferences bool value, it takes True or False value.
We are generating random True or False value.
"""
return random.randint(0, 1) == 1
def _RandomURL(self):
"""Some of preferences take string url, so generating random url here."""
# Site list
site_list = ['test1.html', 'test2.html','test3.html','test4.html',
'test5.html', 'test7.html', 'test6.html']
random_site = random.choice(site_list)
# Returning a url of random site
return self.GetFileURLForPath(os.path.join(self.DataDir(), random_site))
def _RandomURLArray(self):
"""Returns a list of 10 random URLs."""
return [self._RandomURL() for _ in range(10)]
def _RandomInt(self, max_number):
"""Some of the preferences takes integer value.
Eg: If there are three options, we generate random
value for any option.
Arg:
max_number: The number of options that a preference has.
"""
return random.randrange(1, max_number)
def _RandomDownloadDir(self):
"""Returns a random download directory."""
return random.choice(['dl_dir1', 'dl_dir2', 'dl_dir3',
'dl_dir4', 'dl_dir5'])
def _SetPref(self):
"""Reads the preferences from file and
sets the preferences to Chrome.
"""
raw_dictionary = self.EvalDataFrom(os.path.join(self.DataDir(),
'pyauto_private', 'stress', 'pref_dict'))
value_dictionary = {}
for key, value in raw_dictionary.iteritems():
if value == 'BOOL':
value_dictionary[key] = self._RandomBool()
elif value == 'STRING_URL':
value_dictionary[key] = self._RandomURL()
elif value == 'ARRAY_URL':
value_dictionary[key] = self._RandomURLArray()
elif value == 'STRING_PATH':
value_dictionary[key] = self._RandomDownloadDir()
elif value[0:3] == 'INT':
# Normally we difine INT datatype with number of options,
# so parsing number of options and selecting any of them
# randomly.
value_dictionary[key] = 1
max_number = raw_dictionary[key][3:4]
if not max_number == 1:
value_dictionary[key]= self._RandomInt(int(max_number))
self.SetPrefs(getattr(pyauto,key), value_dictionary[key])
return value_dictionary
# Crash reporting functions
def _CrashDumpFolder(self):
"""Get the breakpad folder.
Returns:
The full path of the Crash Reports folder.
"""
breakpad_folder = self.GetBrowserInfo()['properties']['DIR_CRASH_DUMPS']
self.assertTrue(breakpad_folder, 'Cannot figure crash dir')
return breakpad_folder
def _DeleteDumps(self):
"""Delete all the dump files in teh Crash Reports folder."""
# should be called at the start of stress run
if os.path.exists(self.breakpad_dir):
logging.info('xxxxxxxxxxxxxxxINSIDE DELETE DUMPSxxxxxxxxxxxxxxxxx')
if self.IsMac():
shutil.rmtree(self.breakpad_dir)
elif self.IsWin():
files = os.listdir(self.breakpad_dir)
for file in files:
os.remove(file)
first_crash = os.path.join(os.getcwd(), '1stcrash')
crashes_dir = os.path.join(os.getcwd(), 'crashes')
if (os.path.exists(crashes_dir)):
shutil.rmtree(crashes_dir)
shutil.rmtree(first_crash)
def _SymbolicateCrashDmp(self, dmp_file, symbols_dir, output_file):
"""Generate symbolicated crash report.
Args:
dmp_file: the dmp file to symbolicate.
symbols_dir: the directory containing the symbols.
output_file: the output file.
Returns:
Crash report text.
"""
report = ''
if self.IsWin():
windbg_cmd = [
os.path.join('C:', 'Program Files', 'Debugging Tools for Windows',
'windbg.exe'),
'-Q',
'-y',
'\"',
symbols_dir,
'\"',
'-c',
'\".ecxr;k50;.logclose;q\"',
'-logo',
output_file,
'-z',
'\"',
dmp_file,
'\"']
subprocess.call(windbg_cmd)
# Since we are directly writing the info into output_file,
# we just need to copy that in to report
report = open(output_file, 'r').read()
elif self.IsMac():
crash_report = os.path.join(self.DataDir(), 'pyauto_private', 'stress',
'mac', 'crash_report')
for i in range(5): # crash_report doesn't work sometimes. So we retry
report = test_utils.Shell2(
'%s -S "%s" "%s"' % (crash_report, symbols_dir, dmp_file))[0]
if len(report) < 200:
try_again = 'Try %d. crash_report didn\'t work out. Trying again', i
logging.info(try_again)
else:
break
open(output_file, 'w').write(report)
return report
def _SaveSymbols(self, symbols_dir, dump_dir=' ', multiple_dumps=True):
"""Save the symbolicated files for all crash dumps.
Args:
symbols_dir: the directory containing the symbols.
dump_dir: Path to the directory holding the crash dump files.
multiple_dumps: True if we are processing multiple dump files,
False if we are processing only the first crash.
"""
if multiple_dumps:
dump_dir = self.breakpad_dir
if not os.path.isdir(CRASHES):
os.makedirs(CRASHES)
# This will be sent to the method by the caller.
dmp_files = glob.glob(os.path.join(dump_dir, '*.dmp'))
for dmp_file in dmp_files:
dmp_id = os.path.splitext(os.path.basename(dmp_file))[0]
if multiple_dumps:
report_folder = CRASHES
else:
report_folder = dump_dir
report_fname = os.path.join(report_folder,
'%s.txt' % (dmp_id))
report = self._SymbolicateCrashDmp(dmp_file, symbols_dir,
report_fname)
if report == '':
logging.info('Crash report is empty.')
# This is for copying the original dumps.
if multiple_dumps:
shutil.copy2(dmp_file, CRASHES)
def _GetFirstCrashDir(self):
"""Get first crash file in the crash folder.
Here we create the 1stcrash directory which holds the
first crash report, which will be attached to the mail.
"""
breakpad_folder = self.breakpad_dir
dump_list = glob.glob1(breakpad_folder,'*.dmp')
dump_list.sort(key=lambda s: os.path.getmtime(os.path.join(
breakpad_folder, s)))
first_crash_file = os.path.join(breakpad_folder, dump_list[0])
if not os.path.isdir('1stcrash'):
os.makedirs('1stcrash')
shutil.copy2(first_crash_file, '1stcrash')
first_crash_dir = os.path.join(os.getcwd(), '1stcrash')
return first_crash_dir
def _GetFirstCrashFile(self):
"""Get first crash file in the crash folder."""
first_crash_dir = os.path.join(os.getcwd(), '1stcrash')
for each in os.listdir(first_crash_dir):
if each.endswith('.txt'):
first_crash_file = each
return os.path.join(first_crash_dir, first_crash_file)
def _ProcessOnlyFirstCrash(self):
""" Process only the first crash report for email."""
first_dir = self._GetFirstCrashDir()
self._SaveSymbols(self.symbols_dir, first_dir, False)
def _GetOSName(self):
"""Returns the OS type we are running this script on."""
os_name = ''
if self.IsMac():
os_number = commands.getoutput('sw_vers -productVersion | cut -c 1-4')
if os_number == '10.6':
os_name = 'Snow_Leopard'
elif os_number == '10.5':
os_name = 'Leopard'
elif self.IsWin():
# TODO: Windows team need to find the way to get OS name
os_name = 'Windows'
if platform.version()[0] == '5':
os_name = os_name + '_XP'
else:
os_name = os_name + '_Vista/Win7'
return os_name
def _ProcessUploadAndEmailCrashes(self):
"""Upload the crashes found and email the team about this."""
logging.info('#########INSIDE _ProcessUploadAndEmailCrashes#########')
try:
build_version = self.chrome_version
self._SaveSymbols(self.symbols_dir)
self._ProcessOnlyFirstCrash()
file_to_attach = self._GetFirstCrashFile()
# removing the crash_txt for now,
# since we are getting UnicodeDecodeError
# crash_txt = open(file_to_attach).read()
except ValueError:
test_utils.SendMail(self.stress_pref['mailing_address'],
self.stress_pref['mailing_address'],
"We don't have build version",
"BROWSER CRASHED, PLEASE CHECK",
self.stress_pref['smtp'])
# Move crash reports and dumps to server
os_name = self._GetOSName()
dest_dir = build_version + '_' + os_name
if (test_utils.Shell2(self.stress_pref['script'] % (CRASHES, dest_dir))):
logging.info('Copy Complete')
upload_dir= self.stress_pref['upload_dir'] + dest_dir
num_crashes = '\n \n Number of Crashes :' + \
str(len(glob.glob1(self.breakpad_dir, '*.dmp')))
mail_content = '\n\n Crash Report URL :' + upload_dir + '\n' + \
num_crashes + '\n\n' # + crash_txt
mail_subject = 'Stress Results :' + os_name + '_' + build_version
# Sending mail with first crash report, # of crashes, location of upload
test_utils.SendMail(self.stress_pref['mailing_address'],
self.stress_pref['mailing_address'],
mail_subject, mail_content,
self.stress_pref['smtp'], file_to_attach)
def _ReportCrashIfAny(self):
"""Check for browser crashes and report."""
if os.path.isdir(self.breakpad_dir):
listOfDumps = glob.glob(os.path.join(self.breakpad_dir, '*.dmp'))
if len(listOfDumps) > 0:
logging.info('========== INSIDE REPORT CRASH++++++++++++++')
# inform a method to process the dumps
self._ProcessUploadAndEmailCrashes()
# Test functions
def _PrefStress(self):
"""Stress preferences."""
default_prefs = self.GetPrefsInfo()
pref_dictionary = self._SetPref()
for key, value in pref_dictionary.iteritems():
self.assertEqual(value, self.GetPrefsInfo().Prefs(
getattr(pyauto, key)))
for key, value in pref_dictionary.iteritems():
self.SetPrefs(getattr(pyauto, key),
default_prefs.Prefs(getattr(pyauto, key)))
def _NavigationStress(self):
"""Run back and forward stress in normal and incognito window."""
self.RunCommand(pyauto.IDC_NEW_INCOGNITO_WINDOW)
self._StressTestNavigation()
def _DownloadStress(self):
"""Run all the Download stress test."""
org_download_dir = self.GetDownloadDirectory().value()
new_dl_dir = os.path.join(org_download_dir, 'My+Downloads Folder')
os.path.exists(new_dl_dir) and shutil.rmtree(new_dl_dir)
os.makedirs(new_dl_dir)
self.SetPrefs(pyauto.kDownloadDefaultDirectory, new_dl_dir)
self._OpenAndCloseMultipleTabsWithDownloads()
self._OpenAndCloseMultipleWindowsWithDownloads()
self._OpenAndCloseMultipleTabsWithMultipleDownloads()
self._OpenAndCloseMultipleWindowsWithMultipleDownloads()
pyauto_utils.RemovePath(new_dl_dir) # cleanup
self.SetPrefs(pyauto.kDownloadDefaultDirectory, org_download_dir)
def _PluginStress(self):
"""Run all the plugin stress tests."""
self._OpenAndCloseMultipleTabsWithFlash()
self._OpenAndCloseMultipleWindowsWithFlash()
self._OpenAndCloseMultipleTabsWithMultiplePlugins()
self._OpenAndCloseMultipleWindowsWithMultiplePlugins()
self._KillAndReloadRenderersWithFlash()
self._ToggleAndReloadFlashPlugin()
def testStress(self):
"""Run all the stress tests for 24 hrs."""
if self.GetBrowserInfo()['properties']['branding'] != 'Google Chrome':
logging.info('This is not a branded build, so stopping the stress')
return 1
self._DownloadSymbols()
run_number = 1
start_time = time.time()
while True:
logging.info('run %d...' % run_number)
run_number = run_number + 1
if (time.time() - start_time) >= 24*60*60:
logging.info('Its been 24hrs, so we break now.')
break
try:
methods = [self._NavigationStress, self._DownloadStress,
self._PluginStress, self._PrefStress]
random.shuffle(methods)
for method in methods:
method()
logging.info('Method %s done' % method)
except KeyboardInterrupt:
logging.info('----------We got a KeyboardInterrupt-----------')
except Exception, error:
logging.info('-------------There was an ERROR---------------')
logging.info(error)
# Crash Reporting
self._ReportCrashIfAny()
self._DeleteDumps()
if self.IsMac():
zombie = 'ps -el | grep Chrom | grep -v grep | grep Z | wc -l'
zombie_count = int(commands.getoutput(zombie))
if zombie_count > 0:
logging.info('WE HAVE ZOMBIES = %d' % zombie_count)
if __name__ == '__main__':
pyauto_functional.Main()
| bsd-3-clause | 2,348,265,972,694,893,000 | 35.746898 | 79 | 0.647005 | false |
ghedsouza/django | django/utils/http.py | 9 | 14635 | import base64
import calendar
import datetime
import re
import unicodedata
import warnings
from binascii import Error as BinasciiError
from email.utils import formatdate
from urllib.parse import (
ParseResult, SplitResult, _coerce_args, _splitnetloc, _splitparams, quote,
quote_plus, scheme_chars, unquote, unquote_plus,
urlencode as original_urlencode, uses_params,
)
from django.core.exceptions import TooManyFieldsSent
from django.utils.datastructures import MultiValueDict
from django.utils.deprecation import RemovedInDjango21Warning
from django.utils.encoding import force_bytes
from django.utils.functional import keep_lazy_text
# based on RFC 7232, Appendix C
ETAG_MATCH = re.compile(r'''
\A( # start of string and capture group
(?:W/)? # optional weak indicator
" # opening quote
[^"]* # any sequence of non-quote characters
" # end quote
)\Z # end of string and capture group
''', re.X)
MONTHS = 'jan feb mar apr may jun jul aug sep oct nov dec'.split()
__D = r'(?P<day>\d{2})'
__D2 = r'(?P<day>[ \d]\d)'
__M = r'(?P<mon>\w{3})'
__Y = r'(?P<year>\d{4})'
__Y2 = r'(?P<year>\d{2})'
__T = r'(?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2})'
RFC1123_DATE = re.compile(r'^\w{3}, %s %s %s %s GMT$' % (__D, __M, __Y, __T))
RFC850_DATE = re.compile(r'^\w{6,9}, %s-%s-%s %s GMT$' % (__D, __M, __Y2, __T))
ASCTIME_DATE = re.compile(r'^\w{3} %s %s %s %s$' % (__M, __D2, __T, __Y))
RFC3986_GENDELIMS = ":/?#[]@"
RFC3986_SUBDELIMS = "!$&'()*+,;="
FIELDS_MATCH = re.compile('[&;]')
@keep_lazy_text
def urlquote(url, safe='/'):
"""
A legacy compatibility wrapper to Python's urllib.parse.quote() function.
(was used for unicode handling on Python 2)
"""
return quote(url, safe)
@keep_lazy_text
def urlquote_plus(url, safe=''):
"""
A legacy compatibility wrapper to Python's urllib.parse.quote_plus()
function. (was used for unicode handling on Python 2)
"""
return quote_plus(url, safe)
@keep_lazy_text
def urlunquote(quoted_url):
"""
A legacy compatibility wrapper to Python's urllib.parse.unquote() function.
(was used for unicode handling on Python 2)
"""
return unquote(quoted_url)
@keep_lazy_text
def urlunquote_plus(quoted_url):
"""
A legacy compatibility wrapper to Python's urllib.parse.unquote_plus()
function. (was used for unicode handling on Python 2)
"""
return unquote_plus(quoted_url)
def urlencode(query, doseq=False):
"""
A version of Python's urllib.parse.urlencode() function that can operate on
MultiValueDict and non-string values.
"""
if isinstance(query, MultiValueDict):
query = query.lists()
elif hasattr(query, 'items'):
query = query.items()
return original_urlencode(
[(k, [str(i) for i in v] if isinstance(v, (list, tuple)) else str(v))
for k, v in query],
doseq
)
def cookie_date(epoch_seconds=None):
"""
Format the time to ensure compatibility with Netscape's cookie standard.
`epoch_seconds` is a floating point number expressed in seconds since the
epoch, in UTC - such as that outputted by time.time(). If set to None, it
defaults to the current time.
Output a string in the format 'Wdy, DD-Mon-YYYY HH:MM:SS GMT'.
"""
rfcdate = formatdate(epoch_seconds)
return '%s-%s-%s GMT' % (rfcdate[:7], rfcdate[8:11], rfcdate[12:25])
def http_date(epoch_seconds=None):
"""
Format the time to match the RFC1123 date format as specified by HTTP
RFC7231 section 7.1.1.1.
`epoch_seconds` is a floating point number expressed in seconds since the
epoch, in UTC - such as that outputted by time.time(). If set to None, it
defaults to the current time.
Output a string in the format 'Wdy, DD Mon YYYY HH:MM:SS GMT'.
"""
return formatdate(epoch_seconds, usegmt=True)
def parse_http_date(date):
"""
Parse a date format as specified by HTTP RFC7231 section 7.1.1.1.
The three formats allowed by the RFC are accepted, even if only the first
one is still in widespread use.
Return an integer expressed in seconds since the epoch, in UTC.
"""
# emails.Util.parsedate does the job for RFC1123 dates; unfortunately
# RFC7231 makes it mandatory to support RFC850 dates too. So we roll
# our own RFC-compliant parsing.
for regex in RFC1123_DATE, RFC850_DATE, ASCTIME_DATE:
m = regex.match(date)
if m is not None:
break
else:
raise ValueError("%r is not in a valid HTTP date format" % date)
try:
year = int(m.group('year'))
if year < 100:
if year < 70:
year += 2000
else:
year += 1900
month = MONTHS.index(m.group('mon').lower()) + 1
day = int(m.group('day'))
hour = int(m.group('hour'))
min = int(m.group('min'))
sec = int(m.group('sec'))
result = datetime.datetime(year, month, day, hour, min, sec)
return calendar.timegm(result.utctimetuple())
except Exception as exc:
raise ValueError("%r is not a valid date" % date) from exc
def parse_http_date_safe(date):
"""
Same as parse_http_date, but return None if the input is invalid.
"""
try:
return parse_http_date(date)
except Exception:
pass
# Base 36 functions: useful for generating compact URLs
def base36_to_int(s):
"""
Convert a base 36 string to an int. Raise ValueError if the input won't fit
into an int.
"""
# To prevent overconsumption of server resources, reject any
# base36 string that is longer than 13 base36 digits (13 digits
# is sufficient to base36-encode any 64-bit integer)
if len(s) > 13:
raise ValueError("Base36 input too large")
return int(s, 36)
def int_to_base36(i):
"""Convert an integer to a base36 string."""
char_set = '0123456789abcdefghijklmnopqrstuvwxyz'
if i < 0:
raise ValueError("Negative base36 conversion input.")
if i < 36:
return char_set[i]
b36 = ''
while i != 0:
i, n = divmod(i, 36)
b36 = char_set[n] + b36
return b36
def urlsafe_base64_encode(s):
"""
Encode a bytestring in base64 for use in URLs. Strip any trailing equal
signs.
"""
return base64.urlsafe_b64encode(s).rstrip(b'\n=')
def urlsafe_base64_decode(s):
"""
Decode a base64 encoded string. Add back any trailing equal signs that
might have been stripped.
"""
s = force_bytes(s)
try:
return base64.urlsafe_b64decode(s.ljust(len(s) + len(s) % 4, b'='))
except (LookupError, BinasciiError) as e:
raise ValueError(e)
def parse_etags(etag_str):
"""
Parse a string of ETags given in an If-None-Match or If-Match header as
defined by RFC 7232. Return a list of quoted ETags, or ['*'] if all ETags
should be matched.
"""
if etag_str.strip() == '*':
return ['*']
else:
# Parse each ETag individually, and return any that are valid.
etag_matches = (ETAG_MATCH.match(etag.strip()) for etag in etag_str.split(','))
return [match.group(1) for match in etag_matches if match]
def quote_etag(etag_str):
"""
If the provided string is already a quoted ETag, return it. Otherwise, wrap
the string in quotes, making it a strong ETag.
"""
if ETAG_MATCH.match(etag_str):
return etag_str
else:
return '"%s"' % etag_str
def is_same_domain(host, pattern):
"""
Return ``True`` if the host is either an exact match or a match
to the wildcard pattern.
Any pattern beginning with a period matches a domain and all of its
subdomains. (e.g. ``.example.com`` matches ``example.com`` and
``foo.example.com``). Anything else is an exact string match.
"""
if not pattern:
return False
pattern = pattern.lower()
return (
pattern[0] == '.' and (host.endswith(pattern) or host == pattern[1:]) or
pattern == host
)
def is_safe_url(url, host=None, allowed_hosts=None, require_https=False):
"""
Return ``True`` if the url is a safe redirection (i.e. it doesn't point to
a different host and uses a safe scheme).
Always return ``False`` on an empty url.
If ``require_https`` is ``True``, only 'https' will be considered a valid
scheme, as opposed to 'http' and 'https' with the default, ``False``.
"""
if url is not None:
url = url.strip()
if not url:
return False
if allowed_hosts is None:
allowed_hosts = set()
if host:
warnings.warn(
"The host argument is deprecated, use allowed_hosts instead.",
RemovedInDjango21Warning,
stacklevel=2,
)
# Avoid mutating the passed in allowed_hosts.
allowed_hosts = allowed_hosts | {host}
# Chrome treats \ completely as / in paths but it could be part of some
# basic auth credentials so we need to check both URLs.
return (_is_safe_url(url, allowed_hosts, require_https=require_https) and
_is_safe_url(url.replace('\\', '/'), allowed_hosts, require_https=require_https))
# Copied from urllib.parse.urlparse() but uses fixed urlsplit() function.
def _urlparse(url, scheme='', allow_fragments=True):
"""Parse a URL into 6 components:
<scheme>://<netloc>/<path>;<params>?<query>#<fragment>
Return a 6-tuple: (scheme, netloc, path, params, query, fragment).
Note that we don't break the components up in smaller bits
(e.g. netloc is a single string) and we don't expand % escapes."""
url, scheme, _coerce_result = _coerce_args(url, scheme)
splitresult = _urlsplit(url, scheme, allow_fragments)
scheme, netloc, url, query, fragment = splitresult
if scheme in uses_params and ';' in url:
url, params = _splitparams(url)
else:
params = ''
result = ParseResult(scheme, netloc, url, params, query, fragment)
return _coerce_result(result)
# Copied from urllib.parse.urlsplit() with
# https://github.com/python/cpython/pull/661 applied.
def _urlsplit(url, scheme='', allow_fragments=True):
"""Parse a URL into 5 components:
<scheme>://<netloc>/<path>?<query>#<fragment>
Return a 5-tuple: (scheme, netloc, path, query, fragment).
Note that we don't break the components up in smaller bits
(e.g. netloc is a single string) and we don't expand % escapes."""
url, scheme, _coerce_result = _coerce_args(url, scheme)
allow_fragments = bool(allow_fragments)
netloc = query = fragment = ''
i = url.find(':')
if i > 0:
for c in url[:i]:
if c not in scheme_chars:
break
else:
scheme, url = url[:i].lower(), url[i + 1:]
if url[:2] == '//':
netloc, url = _splitnetloc(url, 2)
if (('[' in netloc and ']' not in netloc) or
(']' in netloc and '[' not in netloc)):
raise ValueError("Invalid IPv6 URL")
if allow_fragments and '#' in url:
url, fragment = url.split('#', 1)
if '?' in url:
url, query = url.split('?', 1)
v = SplitResult(scheme, netloc, url, query, fragment)
return _coerce_result(v)
def _is_safe_url(url, allowed_hosts, require_https=False):
# Chrome considers any URL with more than two slashes to be absolute, but
# urlparse is not so flexible. Treat any url with three slashes as unsafe.
if url.startswith('///'):
return False
try:
url_info = _urlparse(url)
except ValueError: # e.g. invalid IPv6 addresses
return False
# Forbid URLs like http:///example.com - with a scheme, but without a hostname.
# In that URL, example.com is not the hostname but, a path component. However,
# Chrome will still consider example.com to be the hostname, so we must not
# allow this syntax.
if not url_info.netloc and url_info.scheme:
return False
# Forbid URLs that start with control characters. Some browsers (like
# Chrome) ignore quite a few control characters at the start of a
# URL and might consider the URL as scheme relative.
if unicodedata.category(url[0])[0] == 'C':
return False
scheme = url_info.scheme
# Consider URLs without a scheme (e.g. //example.com/p) to be http.
if not url_info.scheme and url_info.netloc:
scheme = 'http'
valid_schemes = ['https'] if require_https else ['http', 'https']
return ((not url_info.netloc or url_info.netloc in allowed_hosts) and
(not scheme or scheme in valid_schemes))
def limited_parse_qsl(qs, keep_blank_values=False, encoding='utf-8',
errors='replace', fields_limit=None):
"""
Return a list of key/value tuples parsed from query string.
Copied from urlparse with an additional "fields_limit" argument.
Copyright (C) 2013 Python Software Foundation (see LICENSE.python).
Arguments:
qs: percent-encoded query string to be parsed
keep_blank_values: flag indicating whether blank values in
percent-encoded queries should be treated as blank strings. A
true value indicates that blanks should be retained as blank
strings. The default false value indicates that blank values
are to be ignored and treated as if they were not included.
encoding and errors: specify how to decode percent-encoded sequences
into Unicode characters, as accepted by the bytes.decode() method.
fields_limit: maximum number of fields parsed or an exception
is raised. None means no limit and is the default.
"""
if fields_limit:
pairs = FIELDS_MATCH.split(qs, fields_limit)
if len(pairs) > fields_limit:
raise TooManyFieldsSent(
'The number of GET/POST parameters exceeded '
'settings.DATA_UPLOAD_MAX_NUMBER_FIELDS.'
)
else:
pairs = FIELDS_MATCH.split(qs)
r = []
for name_value in pairs:
if not name_value:
continue
nv = name_value.split('=', 1)
if len(nv) != 2:
# Handle case of a control-name with no equal sign
if keep_blank_values:
nv.append('')
else:
continue
if len(nv[1]) or keep_blank_values:
name = nv[0].replace('+', ' ')
name = unquote(name, encoding=encoding, errors=errors)
value = nv[1].replace('+', ' ')
value = unquote(value, encoding=encoding, errors=errors)
r.append((name, value))
return r
| bsd-3-clause | -4,001,109,512,241,159,000 | 33.35446 | 93 | 0.629177 | false |
indevgr/django | django/contrib/gis/db/backends/postgis/pgraster.py | 491 | 5071 | import binascii
import struct
from django.forms import ValidationError
from .const import (
GDAL_TO_POSTGIS, GDAL_TO_STRUCT, POSTGIS_HEADER_STRUCTURE, POSTGIS_TO_GDAL,
STRUCT_SIZE,
)
def pack(structure, data):
"""
Pack data into hex string with little endian format.
"""
return binascii.hexlify(struct.pack('<' + structure, *data)).upper()
def unpack(structure, data):
"""
Unpack little endian hexlified binary string into a list.
"""
return struct.unpack('<' + structure, binascii.unhexlify(data))
def chunk(data, index):
"""
Split a string into two parts at the input index.
"""
return data[:index], data[index:]
def get_pgraster_srid(data):
"""
Extract the SRID from a PostGIS raster string.
"""
if data is None:
return
# The positional arguments here extract the hex-encoded srid from the
# header of the PostGIS raster string. This can be understood through
# the POSTGIS_HEADER_STRUCTURE constant definition in the const module.
return unpack('i', data[106:114])[0]
def from_pgraster(data):
"""
Convert a PostGIS HEX String into a dictionary.
"""
if data is None:
return
# Split raster header from data
header, data = chunk(data, 122)
header = unpack(POSTGIS_HEADER_STRUCTURE, header)
# Parse band data
bands = []
pixeltypes = []
while data:
# Get pixel type for this band
pixeltype, data = chunk(data, 2)
pixeltype = unpack('B', pixeltype)[0]
# Subtract nodata byte from band nodata value if it exists
has_nodata = pixeltype >= 64
if has_nodata:
pixeltype -= 64
# Convert datatype from PostGIS to GDAL & get pack type and size
pixeltype = POSTGIS_TO_GDAL[pixeltype]
pack_type = GDAL_TO_STRUCT[pixeltype]
pack_size = 2 * STRUCT_SIZE[pack_type]
# Parse band nodata value. The nodata value is part of the
# PGRaster string even if the nodata flag is True, so it always
# has to be chunked off the data string.
nodata, data = chunk(data, pack_size)
nodata = unpack(pack_type, nodata)[0]
# Chunk and unpack band data (pack size times nr of pixels)
band, data = chunk(data, pack_size * header[10] * header[11])
band_result = {'data': binascii.unhexlify(band)}
# If the nodata flag is True, set the nodata value.
if has_nodata:
band_result['nodata_value'] = nodata
# Append band data to band list
bands.append(band_result)
# Store pixeltype of this band in pixeltypes array
pixeltypes.append(pixeltype)
# Check that all bands have the same pixeltype.
# This is required by GDAL. PostGIS rasters could have different pixeltypes
# for bands of the same raster.
if len(set(pixeltypes)) != 1:
raise ValidationError("Band pixeltypes are not all equal.")
return {
'srid': int(header[9]),
'width': header[10], 'height': header[11],
'datatype': pixeltypes[0],
'origin': (header[5], header[6]),
'scale': (header[3], header[4]),
'skew': (header[7], header[8]),
'bands': bands,
}
def to_pgraster(rast):
"""
Convert a GDALRaster into PostGIS Raster format.
"""
# Return if the raster is null
if rast is None or rast == '':
return
# Prepare the raster header data as a tuple. The first two numbers are
# the endianness and the PostGIS Raster Version, both are fixed by
# PostGIS at the moment.
rasterheader = (
1, 0, len(rast.bands), rast.scale.x, rast.scale.y,
rast.origin.x, rast.origin.y, rast.skew.x, rast.skew.y,
rast.srs.srid, rast.width, rast.height,
)
# Hexlify raster header
result = pack(POSTGIS_HEADER_STRUCTURE, rasterheader)
for band in rast.bands:
# The PostGIS raster band header has exactly two elements, a 8BUI byte
# and the nodata value.
#
# The 8BUI stores both the PostGIS pixel data type and a nodata flag.
# It is composed as the datatype integer plus 64 as a flag for existing
# nodata values:
# 8BUI_VALUE = PG_PIXEL_TYPE (0-11) + FLAG (0 or 64)
#
# For example, if the byte value is 71, then the datatype is
# 71-64 = 7 (32BSI) and the nodata value is True.
structure = 'B' + GDAL_TO_STRUCT[band.datatype()]
# Get band pixel type in PostGIS notation
pixeltype = GDAL_TO_POSTGIS[band.datatype()]
# Set the nodata flag
if band.nodata_value is not None:
pixeltype += 64
# Pack band header
bandheader = pack(structure, (pixeltype, band.nodata_value or 0))
# Hexlify band data
band_data_hex = binascii.hexlify(band.data(as_memoryview=True)).upper()
# Add packed header and band data to result
result += bandheader + band_data_hex
# Cast raster to string before passing it to the DB
return result.decode()
| bsd-3-clause | 3,628,501,479,974,839,000 | 30.496894 | 79 | 0.626504 | false |
alisw/alibuild | tests/test_init.py | 1 | 5571 | from __future__ import print_function
# Assuming you are using the mock library to ... mock things
try:
from unittest import mock
from unittest.mock import call, MagicMock # In Python 3, mock is built-in
from io import StringIO
except ImportError:
import mock
from mock import call, MagicMock # Python 2
from StringIO import StringIO
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
import sys
git_mock = MagicMock(partialCloneFilter="--filter=blob:none")
sys.modules["alibuild_helpers.git"] = git_mock
from alibuild_helpers.init import doInit,parsePackagesDefinition
import unittest
from argparse import Namespace
import os.path as path
def can_do_git_clone(x):
return 0
def valid_recipe(x):
if "zlib" in x.url:
return (0, {"package": "zlib",
"source": "https://github.com/alisw/zlib",
"version": "v1.0"}, "")
elif "aliroot" in x.url:
return (0, {"package": "AliRoot",
"source": "https://github.com/alisw/AliRoot",
"version": "master"}, "")
def dummy_exists(x):
calls = { '/sw/MIRROR/aliroot': True }
if x in calls:
return calls[x]
return False
CLONE_EVERYTHING = [
call(u'git clone --origin upstream --filter=blob:none https://github.com/alisw/alidist -b master /alidist'),
call(u'git clone --origin upstream --filter=blob:none https://github.com/alisw/AliRoot -b v5-08-00 --reference /sw/MIRROR/aliroot ./AliRoot && cd ./AliRoot && git remote set-url --push upstream https://github.com/alisw/AliRoot')
]
class InitTestCase(unittest.TestCase):
def test_packageDefinition(self):
self.assertEqual(parsePackagesDefinition("AliRoot@v5-08-16,AliPhysics@v5-08-16-01"),
[{'ver': 'v5-08-16', 'name': 'AliRoot'},
{'ver': 'v5-08-16-01', 'name': 'AliPhysics'}])
self.assertEqual(parsePackagesDefinition("AliRoot,AliPhysics@v5-08-16-01"),
[{'ver': '', 'name': 'AliRoot'},
{'ver': 'v5-08-16-01', 'name': 'AliPhysics'}])
@mock.patch("alibuild_helpers.init.info")
@mock.patch("alibuild_helpers.init.path")
@mock.patch("alibuild_helpers.init.os")
def test_doDryRunInit(self, mock_os, mock_path, mock_info):
fake_dist = {"repo": "alisw/alidist", "ver": "master"}
args = Namespace(
develPrefix = ".",
configDir = "/alidist",
pkgname = "zlib,AliRoot@v5-08-00",
referenceSources = "/sw/MIRROR",
dist = fake_dist,
defaults = "release",
dryRun = True,
fetchRepos = False,
architecture = "slc7_x86-64"
)
self.assertRaises(SystemExit, doInit, args)
self.assertEqual(mock_info.mock_calls, [call('This will initialise local checkouts for %s\n--dry-run / -n specified. Doing nothing.', 'zlib,AliRoot')])
@mock.patch("alibuild_helpers.init.banner")
@mock.patch("alibuild_helpers.init.info")
@mock.patch("alibuild_helpers.init.path")
@mock.patch("alibuild_helpers.init.os")
@mock.patch("alibuild_helpers.init.execute")
@mock.patch("alibuild_helpers.init.parseRecipe")
@mock.patch("alibuild_helpers.init.updateReferenceRepoSpec")
@mock.patch("alibuild_helpers.utilities.open")
@mock.patch("alibuild_helpers.init.readDefaults")
def test_doRealInit(self, mock_read_defaults, mock_open, mock_update_reference, mock_parse_recipe, mock_execute, mock_os, mock_path, mock_info, mock_banner):
fake_dist = {"repo": "alisw/alidist", "ver": "master"}
mock_open.side_effect = lambda x: {
"/alidist/defaults-release.sh": StringIO("package: defaults-release\nversion: v1\n---"),
"/alidist/aliroot.sh": StringIO("package: AliRoot\nversion: master\nsource: https://github.com/alisw/AliRoot\n---")
}[x]
mock_execute.side_effect = can_do_git_clone
mock_parse_recipe.side_effect = valid_recipe
mock_path.exists.side_effect = dummy_exists
mock_os.mkdir.return_value = None
mock_path.join.side_effect = path.join
mock_read_defaults.return_value = (OrderedDict({"package": "defaults-release", "disable": []}), "")
args = Namespace(
develPrefix = ".",
configDir = "/alidist",
pkgname = "AliRoot@v5-08-00",
referenceSources = "/sw/MIRROR",
dist = fake_dist,
defaults = "release",
dryRun = False,
fetchRepos = False,
architecture = "slc7_x86-64"
)
doInit(args)
mock_execute.assert_called_with("git clone --origin upstream --filter=blob:none https://github.com/alisw/AliRoot -b v5-08-00 --reference /sw/MIRROR/aliroot ./AliRoot && cd ./AliRoot && git remote set-url --push upstream https://github.com/alisw/AliRoot")
self.assertEqual(mock_execute.mock_calls, CLONE_EVERYTHING)
mock_path.exists.assert_has_calls([call('.'), call('/sw/MIRROR'), call('/alidist'), call('./AliRoot')])
# Force fetch repos
mock_execute.reset_mock()
mock_path.reset_mock()
args.fetchRepos = True
doInit(args)
mock_execute.assert_called_with("git clone --origin upstream --filter=blob:none https://github.com/alisw/AliRoot -b v5-08-00 --reference /sw/MIRROR/aliroot ./AliRoot && cd ./AliRoot && git remote set-url --push upstream https://github.com/alisw/AliRoot")
self.assertEqual(mock_execute.mock_calls, CLONE_EVERYTHING)
mock_path.exists.assert_has_calls([call('.'), call('/sw/MIRROR'), call('/alidist'), call('./AliRoot')])
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | 3,519,998,651,371,887,600 | 44.292683 | 260 | 0.647999 | false |
rossburton/yocto-autobuilder | lib/python2.7/site-packages/SQLAlchemy-0.8.0b2-py2.7-linux-x86_64.egg/sqlalchemy/testing/schema.py | 8 | 2933 |
from . import exclusions
from .. import schema, event
from . import config
__all__ = 'Table', 'Column',
table_options = {}
def Table(*args, **kw):
"""A schema.Table wrapper/hook for dialect-specific tweaks."""
test_opts = dict([(k, kw.pop(k)) for k in kw.keys()
if k.startswith('test_')])
kw.update(table_options)
if exclusions.against('mysql'):
if 'mysql_engine' not in kw and 'mysql_type' not in kw:
if 'test_needs_fk' in test_opts or 'test_needs_acid' in test_opts:
kw['mysql_engine'] = 'InnoDB'
else:
kw['mysql_engine'] = 'MyISAM'
# Apply some default cascading rules for self-referential foreign keys.
# MySQL InnoDB has some issues around seleting self-refs too.
if exclusions.against('firebird'):
table_name = args[0]
unpack = (config.db.dialect.
identifier_preparer.unformat_identifiers)
# Only going after ForeignKeys in Columns. May need to
# expand to ForeignKeyConstraint too.
fks = [fk
for col in args if isinstance(col, schema.Column)
for fk in col.foreign_keys]
for fk in fks:
# root around in raw spec
ref = fk._colspec
if isinstance(ref, schema.Column):
name = ref.table.name
else:
# take just the table name: on FB there cannot be
# a schema, so the first element is always the
# table name, possibly followed by the field name
name = unpack(ref)[0]
if name == table_name:
if fk.ondelete is None:
fk.ondelete = 'CASCADE'
if fk.onupdate is None:
fk.onupdate = 'CASCADE'
return schema.Table(*args, **kw)
def Column(*args, **kw):
"""A schema.Column wrapper/hook for dialect-specific tweaks."""
test_opts = dict([(k, kw.pop(k)) for k in kw.keys()
if k.startswith('test_')])
if not config.requirements.foreign_key_ddl.enabled:
args = [arg for arg in args if not isinstance(arg, schema.ForeignKey)]
col = schema.Column(*args, **kw)
if 'test_needs_autoincrement' in test_opts and \
kw.get('primary_key', False) and \
exclusions.against('firebird', 'oracle'):
def add_seq(c, tbl):
c._init_items(
schema.Sequence(_truncate_name(
config.db.dialect, tbl.name + '_' + c.name + '_seq'),
optional=True)
)
event.listen(col, 'after_parent_attach', add_seq, propagate=True)
return col
def _truncate_name(dialect, name):
if len(name) > dialect.max_identifier_length:
return name[0:max(dialect.max_identifier_length - 6, 0)] + \
"_" + hex(hash(name) % 64)[2:]
else:
return name
| gpl-2.0 | -9,029,513,383,362,320,000 | 33.104651 | 78 | 0.556427 | false |
onoga/toolib | toolib/wx/util/ControlHost.py | 2 | 1928 | # -*- coding: Cp1251 -*-
###############################################################################
#
'''
'''
__author__ = "Oleg Noga"
__date__ = "$Date: 2005/12/07 19:53:53 $"
__version__ = "$Revision: 1.2 $"
# $Source: D:/HOME/cvs/toolib/wx/util/ControlHost.py,v $
###############################################################################
import wx
from toolib.util.OrderDict import OrderDict
class ControlHost(object):
"""
registers labels, controls, validators
validates, produces error messages,
gathers data
"""
_POS_CONTROL = 0
_POS_VALIDATOR = 1
_POS_LABEL = 2
def __init__(self):
self.__controls = OrderDict()
def getControlIds(self):
return self.__controls.keys()
def registerControl(self, id, control, validator=None, label=None):
self.__controls[id] = (control, validator, label)
def validate(self):
errors = []
for id, (control, validator, label) in self.__controls.iteritems():
if validator is not None and control.IsEnabled():
value = self._getControlValue(id)
try:
validator.validate(value, label)
except ValueError, e:
errors.append(e[0])
return errors
def getControl(self, id):
return self.__controls[id][self._POS_CONTROL]
def _getControlValue(self, id):
c = self.getControl(id)
if hasattr(c, 'getDate'):
return c.getDate()
else:
return c.GetValue()
def getValidator(self, id):
return self.__controls[id][self._POS_VALIDATOR]
def setValidator(self, id, validator):
control, oldValidator, label = self.__controls[id]
self.registerControl(id, control, validator, label)
def getLabel(self, id):
return self.__controls[id][self._POS_LABEL]
def setLabel(self, id, label):
control, validator, oldLabel = self.__controls[id]
self.registerControl(id, control, validator, label)
def getData(self):
d = {}
for id in self.__controls.iterkeys():
d[id] = self._getControlValue(id)
return d
| gpl-2.0 | -3,162,611,594,227,878,000 | 25.054054 | 79 | 0.614627 | false |
sqlobject/sqlobject | sqlobject/boundattributes.py | 2 | 4191 | """
Bound attributes are attributes that are bound to a specific class and
a specific name. In SQLObject a typical example is a column object,
which knows its name and class.
A bound attribute should define a method ``__addtoclass__(added_class,
name)`` (attributes without this method will simply be treated as
normal). The return value is ignored; if the attribute wishes to
change the value in the class, it must call ``setattr(added_class,
name, new_value)``.
BoundAttribute is a class that facilitates lazy attribute creation.
"""
from __future__ import absolute_import
from . import declarative
from . import events
__all__ = ['BoundAttribute', 'BoundFactory']
class BoundAttribute(declarative.Declarative):
"""
This is a declarative class that passes all the values given to it
to another object. So you can pass it arguments (via
__init__/__call__) or give it the equivalent of keyword arguments
through subclassing. Then a bound object will be added in its
place.
To hook this other object in, override ``make_object(added_class,
name, **attrs)`` and maybe ``set_object(added_class, name,
**attrs)`` (the default implementation of ``set_object``
just resets the attribute to whatever ``make_object`` returned).
Also see ``BoundFactory``.
"""
_private_variables = (
'_private_variables',
'_all_attributes',
'__classinit__',
'__addtoclass__',
'_add_attrs',
'set_object',
'make_object',
'clone_in_subclass',
)
_all_attrs = ()
clone_for_subclass = True
def __classinit__(cls, new_attrs):
declarative.Declarative.__classinit__(cls, new_attrs)
cls._all_attrs = cls._add_attrs(cls, new_attrs)
def __instanceinit__(self, new_attrs):
declarative.Declarative.__instanceinit__(self, new_attrs)
self.__dict__['_all_attrs'] = self._add_attrs(self, new_attrs)
@staticmethod
def _add_attrs(this_object, new_attrs):
private = this_object._private_variables
all_attrs = list(this_object._all_attrs)
for key in new_attrs.keys():
if key.startswith('_') or key in private:
continue
if key not in all_attrs:
all_attrs.append(key)
return tuple(all_attrs)
@declarative.classinstancemethod
def __addtoclass__(self, cls, added_class, attr_name):
me = self or cls
attrs = {}
for name in me._all_attrs:
attrs[name] = getattr(me, name)
attrs['added_class'] = added_class
attrs['attr_name'] = attr_name
obj = me.make_object(**attrs)
if self.clone_for_subclass:
def on_rebind(new_class_name, bases, new_attrs,
post_funcs, early_funcs):
def rebind(new_class):
me.set_object(
new_class, attr_name,
me.make_object(**attrs))
post_funcs.append(rebind)
events.listen(receiver=on_rebind, soClass=added_class,
signal=events.ClassCreateSignal, weak=False)
me.set_object(added_class, attr_name, obj)
@classmethod
def set_object(cls, added_class, attr_name, obj):
setattr(added_class, attr_name, obj)
@classmethod
def make_object(cls, added_class, attr_name, *args, **attrs):
raise NotImplementedError
def __setattr__(self, name, value):
self.__dict__['_all_attrs'] = self._add_attrs(self, {name: value})
self.__dict__[name] = value
class BoundFactory(BoundAttribute):
"""
This will bind the attribute to whatever is given by
``factory_class``. This factory should be a callable with the
signature ``factory_class(added_class, attr_name, *args, **kw)``.
The factory will be reinvoked (and the attribute rebound) for
every subclassing.
"""
factory_class = None
_private_variables = (
BoundAttribute._private_variables + ('factory_class',))
def make_object(cls, added_class, attr_name, *args, **kw):
return cls.factory_class(added_class, attr_name, *args, **kw)
| lgpl-2.1 | 5,200,749,630,434,080,000 | 32.798387 | 74 | 0.620377 | false |
Tiger66639/ansible-modules-core | windows/win_get_url.py | 13 | 1921 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Paul Durivage <[email protected]>, and others
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
DOCUMENTATION = '''
---
module: win_get_url
version_added: "1.7"
short_description: Fetches a file from a given URL
description:
- Fetches a file from a URL and saves to locally
options:
url:
description:
- The full URL of a file to download
required: true
default: null
aliases: []
dest:
description:
- The absolute path of the location to save the file at the URL. Be sure to include a filename and extension as appropriate.
required: false
default: yes
aliases: []
author: "Paul Durivage (@angstwad)"
'''
EXAMPLES = '''
# Downloading a JPEG and saving it to a file with the ansible command.
# Note the "dest" is quoted rather instead of escaping the backslashes
$ ansible -i hosts -c winrm -m win_get_url -a "url=http://www.example.com/earthrise.jpg dest='C:\Users\Administrator\earthrise.jpg'" all
# Playbook example
- name: Download earthrise.jpg to 'C:\Users\RandomUser\earthrise.jpg'
win_get_url:
url: 'http://www.example.com/earthrise.jpg'
dest: 'C:\Users\RandomUser\earthrise.jpg'
'''
| gpl-3.0 | -1,010,075,977,783,287,900 | 32.701754 | 136 | 0.717855 | false |
bema-ligo/pycbc | pycbc/types/array_cpu.py | 1 | 5898 | # Copyright (C) 2012 Alex Nitz
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""Numpy based CPU backend for PyCBC Array
"""
from __future__ import absolute_import
import numpy as _np
from pycbc.types.array import common_kind, complex128, float64
from . import aligned as _algn
from scipy.linalg import blas
from weave import inline
from pycbc.opt import omp_libs, omp_flags
from pycbc import WEAVE_FLAGS
from pycbc.types import real_same_precision_as
def zeros(length, dtype=_np.float64):
return _algn.zeros(length, dtype=dtype)
def empty(length, dtype=_np.float64):
return _algn.empty(length, dtype=dtype)
def ptr(self):
return self.data.ctypes.data
def dot(self, other):
return _np.dot(self._data,other)
def min(self):
return self.data.min()
code_abs_arg_max = """
float val = 0;
int l = 0;
for (int i=0; i<N; i++){
float mag = data[i*2] * data[i*2] + data[i*2+1] * data[i*2+1];
if ( mag > val){
l = i;
val = mag;
}
}
loc[0] = l;
"""
code_flags = [WEAVE_FLAGS] + omp_flags
def abs_arg_max(self):
if self.kind == 'real':
return _np.argmax(self.data)
else:
data = _np.array(self._data,
copy=False).view(real_same_precision_as(self))
loc = _np.array([0])
N = len(self)
inline(code_abs_arg_max, ['data', 'loc', 'N'], libraries=omp_libs,
extra_compile_args=code_flags)
return loc[0]
def abs_max_loc(self):
if self.kind == 'real':
tmp = abs(self.data)
ind = _np.argmax(tmp)
return tmp[ind], ind
else:
tmp = self.data.real ** 2.0
tmp += self.data.imag ** 2.0
ind = _np.argmax(tmp)
return tmp[ind] ** 0.5, ind
def cumsum(self):
return self.data.cumsum()
def max(self):
return self.data.max()
def max_loc(self):
ind = _np.argmax(self.data)
return self.data[ind], ind
def take(self, indices):
return self.data.take(indices)
def weighted_inner(self, other, weight):
""" Return the inner product of the array with complex conjugation.
"""
if weight is None:
return self.inner(other)
cdtype = common_kind(self.dtype, other.dtype)
if cdtype.kind == 'c':
acum_dtype = complex128
else:
acum_dtype = float64
return _np.sum(self.data.conj() * other / weight, dtype=acum_dtype)
inner_code = """
double value = 0;
#pragma omp parallel for reduction(+:value)
for (int i=0; i<N; i++){
float val = x[i] * y[i];
value += val;
}
total[0] = value;
"""
def inner_inline_real(self, other):
x = _np.array(self._data, copy=False)
y = _np.array(other, copy=False)
total = _np.array([0.], dtype=float64)
N = len(self)
inline(inner_code, ['x', 'y', 'total', 'N'], libraries=omp_libs,
extra_compile_args=code_flags)
return total[0]
def inner(self, other):
""" Return the inner product of the array with complex conjugation.
"""
cdtype = common_kind(self.dtype, other.dtype)
if cdtype.kind == 'c':
return _np.sum(self.data.conj() * other, dtype=complex128)
else:
return inner_inline_real(self, other)
def vdot(self, other):
""" Return the inner product of the array with complex conjugation.
"""
return _np.vdot(self.data, other)
def squared_norm(self):
""" Return the elementwise squared norm of the array """
return (self.data.real**2 + self.data.imag**2)
_blas_mandadd_funcs = {}
_blas_mandadd_funcs[_np.float32] = blas.saxpy
_blas_mandadd_funcs[_np.float64] = blas.daxpy
_blas_mandadd_funcs[_np.complex64] = blas.caxpy
_blas_mandadd_funcs[_np.complex128] = blas.zaxpy
def multiply_and_add(self, other, mult_fac):
"""
Return other multiplied by mult_fac and with self added.
Self will be modified in place. This requires all inputs to be of the same
precision.
"""
# Sanity checking should have already be done. But we don't know if
# mult_fac and add_fac are arrays or scalars.
inpt = _np.array(self.data, copy=False)
N = len(inpt)
# For some reason, _checkother decorator returns other.data so we don't
# take .data here
other = _np.array(other, copy=False)
assert(inpt.dtype == other.dtype)
blas_fnc = _blas_mandadd_funcs[inpt.dtype.type]
return blas_fnc(other, inpt, a=mult_fac)
def numpy(self):
return self._data
def _copy(self, self_ref, other_ref):
self_ref[:] = other_ref[:]
def _getvalue(self, index):
return self._data[index]
def sum(self):
if self.kind == 'real':
return _np.sum(self._data,dtype=float64)
else:
return _np.sum(self._data,dtype=complex128)
def clear(self):
self[:] = 0
def _scheme_matches_base_array(array):
# Since ArrayWithAligned is a subclass of ndarray,
# and since converting to ArrayWithAligned will
# *not* copy 'array', the following is the way to go:
if isinstance(array, _np.ndarray):
return True
else:
return False
| gpl-3.0 | 3,446,148,500,638,562,300 | 27.631068 | 79 | 0.618854 | false |
magenta/magenta | magenta/models/nsynth/baseline/models/ae.py | 1 | 7401 | # Copyright 2021 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""Autoencoder model for training on spectrograms."""
from magenta.contrib import training as contrib_training
from magenta.models.nsynth import utils
import numpy as np
import tensorflow.compat.v1 as tf
import tf_slim as slim
def get_hparams(config_name):
"""Set hyperparameters.
Args:
config_name: Name of config module to use.
Returns:
A HParams object (magenta) with defaults.
"""
hparams = contrib_training.HParams(
# Optimization
batch_size=16,
learning_rate=1e-4,
adam_beta=0.5,
max_steps=6000 * 50000,
samples_per_second=16000,
num_samples=64000,
# Preprocessing
n_fft=1024,
hop_length=256,
mask=True,
log_mag=True,
use_cqt=False,
re_im=False,
dphase=True,
mag_only=False,
pad=True,
mu_law_num=0,
raw_audio=False,
# Graph
num_latent=64, # dimension of z.
cost_phase_mask=False,
phase_loss_coeff=1.0,
fw_loss_coeff=1.0, # Frequency weighted cost
fw_loss_cutoff=1000,
)
# Set values from a dictionary in the config
config = utils.get_module("baseline.models.ae_configs.%s" % config_name)
if hasattr(config, "config_hparams"):
config_hparams = config.config_hparams
hparams.update(config_hparams)
return hparams
def compute_mse_loss(x, xhat, hparams):
"""MSE loss function.
Args:
x: Input data tensor.
xhat: Reconstruction tensor.
hparams: Hyperparameters.
Returns:
total_loss: MSE loss scalar.
"""
with tf.name_scope("Losses"):
if hparams.raw_audio:
total_loss = tf.reduce_mean((x - xhat)**2)
else:
# Magnitude
m = x[:, :, :, 0] if hparams.cost_phase_mask else 1.0
fm = utils.frequency_weighted_cost_mask(
hparams.fw_loss_coeff,
hz_flat=hparams.fw_loss_cutoff,
n_fft=hparams.n_fft)
mag_loss = tf.reduce_mean(fm * (x[:, :, :, 0] - xhat[:, :, :, 0])**2)
if hparams.mag_only:
total_loss = mag_loss
else:
# Phase
if hparams.dphase:
phase_loss = tf.reduce_mean(fm * m *
(x[:, :, :, 1] - xhat[:, :, :, 1])**2)
else:
# Von Mises Distribution "Circular Normal"
# Added constant to keep positive (Same Probability) range [0, 2]
phase_loss = 1 - tf.reduce_mean(fm * m * tf.cos(
(x[:, :, :, 1] - xhat[:, :, :, 1]) * np.pi))
total_loss = mag_loss + hparams.phase_loss_coeff * phase_loss
tf.summary.scalar("Loss/Mag", mag_loss)
tf.summary.scalar("Loss/Phase", phase_loss)
tf.summary.scalar("Loss/Total", total_loss)
return total_loss
def train_op(batch, hparams, config_name):
"""Define a training op, including summaries and optimization.
Args:
batch: Dictionary produced by NSynthDataset.
hparams: Hyperparameters dictionary.
config_name: Name of config module.
Returns:
train_op: A complete iteration of training with summaries.
"""
config = utils.get_module("baseline.models.ae_configs.%s" % config_name)
if hparams.raw_audio:
x = batch["audio"]
# Add height and channel dims
x = tf.expand_dims(tf.expand_dims(x, 1), -1)
else:
x = batch["spectrogram"]
# Define the model
with tf.name_scope("Model"):
z = config.encode(x, hparams)
xhat = config.decode(z, batch, hparams)
# For interpolation
tf.add_to_collection("x", x)
tf.add_to_collection("pitch", batch["pitch"])
tf.add_to_collection("z", z)
tf.add_to_collection("xhat", xhat)
# Compute losses
total_loss = compute_mse_loss(x, xhat, hparams)
# Apply optimizer
with tf.name_scope("Optimizer"):
global_step = tf.get_variable(
"global_step", [],
tf.int64,
initializer=tf.constant_initializer(0),
trainable=False)
optimizer = tf.train.AdamOptimizer(hparams.learning_rate, hparams.adam_beta)
train_step = slim.learning.create_train_op(total_loss,
optimizer,
global_step=global_step)
return train_step
def eval_op(batch, hparams, config_name):
"""Define a evaluation op.
Args:
batch: Batch produced by NSynthReader.
hparams: Hyperparameters.
config_name: Name of config module.
Returns:
eval_op: A complete evaluation op with summaries.
"""
phase = not (hparams.mag_only or hparams.raw_audio)
config = utils.get_module("baseline.models.ae_configs.%s" % config_name)
if hparams.raw_audio:
x = batch["audio"]
# Add height and channel dims
x = tf.expand_dims(tf.expand_dims(x, 1), -1)
else:
x = batch["spectrogram"]
# Define the model
with tf.name_scope("Model"):
z = config.encode(x, hparams, is_training=False)
xhat = config.decode(z, batch, hparams, is_training=False)
# For interpolation
tf.add_to_collection("x", x)
tf.add_to_collection("pitch", batch["pitch"])
tf.add_to_collection("z", z)
tf.add_to_collection("xhat", xhat)
total_loss = compute_mse_loss(x, xhat, hparams)
# Define the metrics:
names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({
"Loss": slim.metrics.mean(total_loss),
})
# Define the summaries
for name, value in names_to_values.items():
slim.summaries.add_scalar_summary(value, name, print_summary=True)
# Interpolate
with tf.name_scope("Interpolation"):
xhat = config.decode(z, batch, hparams, reuse=True, is_training=False)
# Linear interpolation
z_shift_one_example = tf.concat([z[1:], z[:1]], 0)
z_linear_half = (z + z_shift_one_example) / 2.0
xhat_linear_half = config.decode(z_linear_half, batch, hparams, reuse=True,
is_training=False)
# Pitch shift
pitch_plus_2 = tf.clip_by_value(batch["pitch"] + 2, 0, 127)
pitch_minus_2 = tf.clip_by_value(batch["pitch"] - 2, 0, 127)
batch["pitch"] = pitch_minus_2
xhat_pitch_minus_2 = config.decode(z, batch, hparams,
reuse=True, is_training=False)
batch["pitch"] = pitch_plus_2
xhat_pitch_plus_2 = config.decode(z, batch, hparams,
reuse=True, is_training=False)
utils.specgram_summaries(x, "Training Examples", hparams, phase=phase)
utils.specgram_summaries(xhat, "Reconstructions", hparams, phase=phase)
utils.specgram_summaries(
x - xhat, "Difference", hparams, audio=False, phase=phase)
utils.specgram_summaries(
xhat_linear_half, "Linear Interp. 0.5", hparams, phase=phase)
utils.specgram_summaries(xhat_pitch_plus_2, "Pitch +2", hparams, phase=phase)
utils.specgram_summaries(xhat_pitch_minus_2, "Pitch -2", hparams, phase=phase)
return list(names_to_updates.values())
| apache-2.0 | -3,374,700,922,489,438,700 | 30.900862 | 80 | 0.635455 | false |
jamesls/boto | boto/elastictranscoder/layer1.py | 4 | 34890 | # Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.compat import json
from boto.exception import JSONResponseError
from boto.connection import AWSAuthConnection
from boto.regioninfo import RegionInfo
from boto.elastictranscoder import exceptions
class ElasticTranscoderConnection(AWSAuthConnection):
"""
AWS Elastic Transcoder Service
The AWS Elastic Transcoder Service.
"""
APIVersion = "2012-09-25"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "elastictranscoder.us-east-1.amazonaws.com"
ResponseError = JSONResponseError
_faults = {
"IncompatibleVersionException": exceptions.IncompatibleVersionException,
"LimitExceededException": exceptions.LimitExceededException,
"ResourceInUseException": exceptions.ResourceInUseException,
"AccessDeniedException": exceptions.AccessDeniedException,
"ResourceNotFoundException": exceptions.ResourceNotFoundException,
"InternalServiceException": exceptions.InternalServiceException,
"ValidationException": exceptions.ValidationException,
}
def __init__(self, **kwargs):
region = kwargs.get('region')
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
else:
del kwargs['region']
kwargs['host'] = region.endpoint
AWSAuthConnection.__init__(self, **kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def cancel_job(self, id=None):
"""
To cancel a job, send a DELETE request to the
`/2012-09-25/jobs/ [jobId] ` resource.
You can only cancel a job that has a status of `Submitted`. To
prevent a pipeline from starting to process a job while you're
getting the job identifier, use UpdatePipelineStatus to
temporarily pause the pipeline.
:type id: string
:param id: The identifier of the job that you want to delete.
To get a list of the jobs (including their `jobId`) that have a status
of `Submitted`, use the ListJobsByStatus API action.
"""
uri = '/2012-09-25/jobs/{0}'.format(id)
return self.make_request('DELETE', uri, expected_status=202)
def create_job(self, pipeline_id=None, input_name=None, output=None,
outputs=None, output_key_prefix=None, playlists=None):
"""
To create a job, send a POST request to the `/2012-09-25/jobs`
resource.
When you create a job, Elastic Transcoder returns JSON data
that includes the values that you specified plus information
about the job that is created.
If you have specified more than one output for your jobs (for
example, one output for the Kindle Fire and another output for
the Apple iPhone 4s), you currently must use the Elastic
Transcoder API to list the jobs (as opposed to the AWS
Console).
:type pipeline_id: string
:param pipeline_id: The `Id` of the pipeline that you want Elastic
Transcoder to use for transcoding. The pipeline determines several
settings, including the Amazon S3 bucket from which Elastic
Transcoder gets the files to transcode and the bucket into which
Elastic Transcoder puts the transcoded files.
:type input_name: dict
:param input_name: A section of the request body that provides
information about the file that is being transcoded.
:type output: dict
:param output:
:type outputs: list
:param outputs: A section of the request body that provides information
about the transcoded (target) files. We recommend that you use the
`Outputs` syntax instead of the `Output` syntax.
:type output_key_prefix: string
:param output_key_prefix: The value, if any, that you want Elastic
Transcoder to prepend to the names of all files that this job
creates, including output files, thumbnails, and playlists.
:type playlists: list
:param playlists: If you specify a preset in `PresetId` for which the
value of `Container` is ts (MPEG-TS), Playlists contains
information about the master playlists that you want Elastic
Transcoder to create.
We recommend that you create only one master playlist. The maximum
number of master playlists in a job is 30.
"""
uri = '/2012-09-25/jobs'
params = {}
if pipeline_id is not None:
params['PipelineId'] = pipeline_id
if input_name is not None:
params['Input'] = input_name
if output is not None:
params['Output'] = output
if outputs is not None:
params['Outputs'] = outputs
if output_key_prefix is not None:
params['OutputKeyPrefix'] = output_key_prefix
if playlists is not None:
params['Playlists'] = playlists
return self.make_request('POST', uri, expected_status=201,
data=json.dumps(params))
def create_pipeline(self, name=None, input_bucket=None,
output_bucket=None, role=None, notifications=None,
content_config=None, thumbnail_config=None):
"""
To create a pipeline, send a POST request to the
`2012-09-25/pipelines` resource.
:type name: string
:param name: The name of the pipeline. We recommend that the name be
unique within the AWS account, but uniqueness is not enforced.
Constraints: Maximum 40 characters.
:type input_bucket: string
:param input_bucket: The Amazon S3 bucket in which you saved the media
files that you want to transcode.
:type output_bucket: string
:param output_bucket: The Amazon S3 bucket in which you want Elastic
Transcoder to save the transcoded files. (Use this, or use
ContentConfig:Bucket plus ThumbnailConfig:Bucket.)
Specify this value when all of the following are true:
+ You want to save transcoded files, thumbnails (if any), and playlists
(if any) together in one bucket.
+ You do not want to specify the users or groups who have access to the
transcoded files, thumbnails, and playlists.
+ You do not want to specify the permissions that Elastic Transcoder
grants to the files. When Elastic Transcoder saves files in
`OutputBucket`, it grants full control over the files only to the
AWS account that owns the role that is specified by `Role`.
+ You want to associate the transcoded files and thumbnails with the
Amazon S3 Standard storage class.
If you want to save transcoded files and playlists in one bucket and
thumbnails in another bucket, specify which users can access the
transcoded files or the permissions the users have, or change the
Amazon S3 storage class, omit `OutputBucket` and specify values for
`ContentConfig` and `ThumbnailConfig` instead.
:type role: string
:param role: The IAM Amazon Resource Name (ARN) for the role that you
want Elastic Transcoder to use to create the pipeline.
:type notifications: dict
:param notifications:
The Amazon Simple Notification Service (Amazon SNS) topic that you want
to notify to report job status.
To receive notifications, you must also subscribe to the new topic in
the Amazon SNS console.
+ **Progressing**: The topic ARN for the Amazon Simple Notification
Service (Amazon SNS) topic that you want to notify when Elastic
Transcoder has started to process a job in this pipeline. This is
the ARN that Amazon SNS returned when you created the topic. For
more information, see Create a Topic in the Amazon Simple
Notification Service Developer Guide.
+ **Completed**: The topic ARN for the Amazon SNS topic that you want
to notify when Elastic Transcoder has finished processing a job in
this pipeline. This is the ARN that Amazon SNS returned when you
created the topic.
+ **Warning**: The topic ARN for the Amazon SNS topic that you want to
notify when Elastic Transcoder encounters a warning condition while
processing a job in this pipeline. This is the ARN that Amazon SNS
returned when you created the topic.
+ **Error**: The topic ARN for the Amazon SNS topic that you want to
notify when Elastic Transcoder encounters an error condition while
processing a job in this pipeline. This is the ARN that Amazon SNS
returned when you created the topic.
:type content_config: dict
:param content_config:
The optional `ContentConfig` object specifies information about the
Amazon S3 bucket in which you want Elastic Transcoder to save
transcoded files and playlists: which bucket to use, which users
you want to have access to the files, the type of access you want
users to have, and the storage class that you want to assign to the
files.
If you specify values for `ContentConfig`, you must also specify values
for `ThumbnailConfig`.
If you specify values for `ContentConfig` and `ThumbnailConfig`, omit
the `OutputBucket` object.
+ **Bucket**: The Amazon S3 bucket in which you want Elastic Transcoder
to save transcoded files and playlists.
+ **Permissions** (Optional): The Permissions object specifies which
users you want to have access to transcoded files and the type of
access you want them to have. You can grant permissions to a
maximum of 30 users and/or predefined Amazon S3 groups.
+ **Grantee Type**: Specify the type of value that appears in the
`Grantee` object:
+ **Canonical**: The value in the `Grantee` object is either the
canonical user ID for an AWS account or an origin access identity
for an Amazon CloudFront distribution. For more information about
canonical user IDs, see Access Control List (ACL) Overview in the
Amazon Simple Storage Service Developer Guide. For more information
about using CloudFront origin access identities to require that
users use CloudFront URLs instead of Amazon S3 URLs, see Using an
Origin Access Identity to Restrict Access to Your Amazon S3
Content. A canonical user ID is not the same as an AWS account
number.
+ **Email**: The value in the `Grantee` object is the registered email
address of an AWS account.
+ **Group**: The value in the `Grantee` object is one of the following
predefined Amazon S3 groups: `AllUsers`, `AuthenticatedUsers`, or
`LogDelivery`.
+ **Grantee**: The AWS user or group that you want to have access to
transcoded files and playlists. To identify the user or group, you
can specify the canonical user ID for an AWS account, an origin
access identity for a CloudFront distribution, the registered email
address of an AWS account, or a predefined Amazon S3 group
+ **Access**: The permission that you want to give to the AWS user that
you specified in `Grantee`. Permissions are granted on the files
that Elastic Transcoder adds to the bucket, including playlists and
video files. Valid values include:
+ `READ`: The grantee can read the objects and metadata for objects
that Elastic Transcoder adds to the Amazon S3 bucket.
+ `READ_ACP`: The grantee can read the object ACL for objects that
Elastic Transcoder adds to the Amazon S3 bucket.
+ `WRITE_ACP`: The grantee can write the ACL for the objects that
Elastic Transcoder adds to the Amazon S3 bucket.
+ `FULL_CONTROL`: The grantee has `READ`, `READ_ACP`, and `WRITE_ACP`
permissions for the objects that Elastic Transcoder adds to the
Amazon S3 bucket.
+ **StorageClass**: The Amazon S3 storage class, `Standard` or
`ReducedRedundancy`, that you want Elastic Transcoder to assign to
the video files and playlists that it stores in your Amazon S3
bucket.
:type thumbnail_config: dict
:param thumbnail_config:
The `ThumbnailConfig` object specifies several values, including the
Amazon S3 bucket in which you want Elastic Transcoder to save
thumbnail files, which users you want to have access to the files,
the type of access you want users to have, and the storage class
that you want to assign to the files.
If you specify values for `ContentConfig`, you must also specify values
for `ThumbnailConfig` even if you don't want to create thumbnails.
If you specify values for `ContentConfig` and `ThumbnailConfig`, omit
the `OutputBucket` object.
+ **Bucket**: The Amazon S3 bucket in which you want Elastic Transcoder
to save thumbnail files.
+ **Permissions** (Optional): The `Permissions` object specifies which
users and/or predefined Amazon S3 groups you want to have access to
thumbnail files, and the type of access you want them to have. You
can grant permissions to a maximum of 30 users and/or predefined
Amazon S3 groups.
+ **GranteeType**: Specify the type of value that appears in the
Grantee object:
+ **Canonical**: The value in the `Grantee` object is either the
canonical user ID for an AWS account or an origin access identity
for an Amazon CloudFront distribution. A canonical user ID is not
the same as an AWS account number.
+ **Email**: The value in the `Grantee` object is the registered email
address of an AWS account.
+ **Group**: The value in the `Grantee` object is one of the following
predefined Amazon S3 groups: `AllUsers`, `AuthenticatedUsers`, or
`LogDelivery`.
+ **Grantee**: The AWS user or group that you want to have access to
thumbnail files. To identify the user or group, you can specify the
canonical user ID for an AWS account, an origin access identity for
a CloudFront distribution, the registered email address of an AWS
account, or a predefined Amazon S3 group.
+ **Access**: The permission that you want to give to the AWS user that
you specified in `Grantee`. Permissions are granted on the
thumbnail files that Elastic Transcoder adds to the bucket. Valid
values include:
+ `READ`: The grantee can read the thumbnails and metadata for objects
that Elastic Transcoder adds to the Amazon S3 bucket.
+ `READ_ACP`: The grantee can read the object ACL for thumbnails that
Elastic Transcoder adds to the Amazon S3 bucket.
+ `WRITE_ACP`: The grantee can write the ACL for the thumbnails that
Elastic Transcoder adds to the Amazon S3 bucket.
+ `FULL_CONTROL`: The grantee has `READ`, `READ_ACP`, and `WRITE_ACP`
permissions for the thumbnails that Elastic Transcoder adds to the
Amazon S3 bucket.
+ **StorageClass**: The Amazon S3 storage class, `Standard` or
`ReducedRedundancy`, that you want Elastic Transcoder to assign to
the thumbnails that it stores in your Amazon S3 bucket.
"""
uri = '/2012-09-25/pipelines'
params = {}
if name is not None:
params['Name'] = name
if input_bucket is not None:
params['InputBucket'] = input_bucket
if output_bucket is not None:
params['OutputBucket'] = output_bucket
if role is not None:
params['Role'] = role
if notifications is not None:
params['Notifications'] = notifications
if content_config is not None:
params['ContentConfig'] = content_config
if thumbnail_config is not None:
params['ThumbnailConfig'] = thumbnail_config
return self.make_request('POST', uri, expected_status=201,
data=json.dumps(params))
def create_preset(self, name=None, description=None, container=None,
video=None, audio=None, thumbnails=None):
"""
To create a preset, send a POST request to the
`/2012-09-25/presets` resource.
Elastic Transcoder checks the settings that you specify to
ensure that they meet Elastic Transcoder requirements and to
determine whether they comply with H.264 standards. If your
settings are not valid for Elastic Transcoder, Elastic
Transcoder returns an HTTP 400 response (
`ValidationException`) and does not create the preset. If the
settings are valid for Elastic Transcoder but aren't strictly
compliant with the H.264 standard, Elastic Transcoder creates
the preset and returns a warning message in the response. This
helps you determine whether your settings comply with the
H.264 standard while giving you greater flexibility with
respect to the video that Elastic Transcoder produces.
Elastic Transcoder uses the H.264 video-compression format.
For more information, see the International Telecommunication
Union publication Recommendation ITU-T H.264: Advanced video
coding for generic audiovisual services .
:type name: string
:param name: The name of the preset. We recommend that the name be
unique within the AWS account, but uniqueness is not enforced.
:type description: string
:param description: A description of the preset.
:type container: string
:param container: The container type for the output file. This value
must be `mp4`.
:type video: dict
:param video: A section of the request body that specifies the video
parameters.
:type audio: dict
:param audio: A section of the request body that specifies the audio
parameters.
:type thumbnails: dict
:param thumbnails: A section of the request body that specifies the
thumbnail parameters, if any.
"""
uri = '/2012-09-25/presets'
params = {}
if name is not None:
params['Name'] = name
if description is not None:
params['Description'] = description
if container is not None:
params['Container'] = container
if video is not None:
params['Video'] = video
if audio is not None:
params['Audio'] = audio
if thumbnails is not None:
params['Thumbnails'] = thumbnails
return self.make_request('POST', uri, expected_status=201,
data=json.dumps(params))
def delete_pipeline(self, id=None):
"""
To delete a pipeline, send a DELETE request to the
`/2012-09-25/pipelines/ [pipelineId] ` resource.
You can only delete a pipeline that has never been used or
that is not currently in use (doesn't contain any active
jobs). If the pipeline is currently in use, `DeletePipeline`
returns an error.
:type id: string
:param id: The identifier of the pipeline that you want to delete.
"""
uri = '/2012-09-25/pipelines/{0}'.format(id)
return self.make_request('DELETE', uri, expected_status=202)
def delete_preset(self, id=None):
"""
To delete a preset, send a DELETE request to the
`/2012-09-25/presets/ [presetId] ` resource.
If the preset has been used, you cannot delete it.
:type id: string
:param id: The identifier of the preset for which you want to get
detailed information.
"""
uri = '/2012-09-25/presets/{0}'.format(id)
return self.make_request('DELETE', uri, expected_status=202)
def list_jobs_by_pipeline(self, pipeline_id=None, ascending=None,
page_token=None):
"""
To get a list of the jobs currently in a pipeline, send a GET
request to the `/2012-09-25/jobsByPipeline/ [pipelineId] `
resource.
Elastic Transcoder returns all of the jobs currently in the
specified pipeline. The response body contains one element for
each job that satisfies the search criteria.
:type pipeline_id: string
:param pipeline_id: The ID of the pipeline for which you want to get
job information.
:type ascending: string
:param ascending: To list jobs in chronological order by the date and
time that they were submitted, enter `True`. To list jobs in
reverse chronological order, enter `False`.
:type page_token: string
:param page_token: When Elastic Transcoder returns more than one page
of results, use `pageToken` in subsequent `GET` requests to get
each successive page of results.
"""
uri = '/2012-09-25/jobsByPipeline/{0}'.format(pipeline_id)
params = {}
if pipeline_id is not None:
params['PipelineId'] = pipeline_id
if ascending is not None:
params['Ascending'] = ascending
if page_token is not None:
params['PageToken'] = page_token
return self.make_request('GET', uri, expected_status=200,
params=params)
def list_jobs_by_status(self, status=None, ascending=None,
page_token=None):
"""
To get a list of the jobs that have a specified status, send a
GET request to the `/2012-09-25/jobsByStatus/ [status] `
resource.
Elastic Transcoder returns all of the jobs that have the
specified status. The response body contains one element for
each job that satisfies the search criteria.
:type status: string
:param status: To get information about all of the jobs associated with
the current AWS account that have a given status, specify the
following status: `Submitted`, `Progressing`, `Complete`,
`Canceled`, or `Error`.
:type ascending: string
:param ascending: To list jobs in chronological order by the date and
time that they were submitted, enter `True`. To list jobs in
reverse chronological order, enter `False`.
:type page_token: string
:param page_token: When Elastic Transcoder returns more than one page
of results, use `pageToken` in subsequent `GET` requests to get
each successive page of results.
"""
uri = '/2012-09-25/jobsByStatus/{0}'.format(status)
params = {}
if status is not None:
params['Status'] = status
if ascending is not None:
params['Ascending'] = ascending
if page_token is not None:
params['PageToken'] = page_token
return self.make_request('GET', uri, expected_status=200,
params=params)
def list_pipelines(self):
"""
To get a list of the pipelines associated with the current AWS
account, send a GET request to the `/2012-09-25/pipelines`
resource.
"""
uri = '/2012-09-25/pipelines'
return self.make_request('GET', uri, expected_status=200)
def list_presets(self):
"""
To get a list of all presets associated with the current AWS
account, send a GET request to the `/2012-09-25/presets`
resource.
"""
uri = '/2012-09-25/presets'
return self.make_request('GET', uri, expected_status=200)
def read_job(self, id=None):
"""
To get detailed information about a job, send a GET request to
the `/2012-09-25/jobs/ [jobId] ` resource.
:type id: string
:param id: The identifier of the job for which you want to get detailed
information.
"""
uri = '/2012-09-25/jobs/{0}'.format(id)
return self.make_request('GET', uri, expected_status=200)
def read_pipeline(self, id=None):
"""
To get detailed information about a pipeline, send a GET
request to the `/2012-09-25/pipelines/ [pipelineId] `
resource.
:type id: string
:param id: The identifier of the pipeline to read.
"""
uri = '/2012-09-25/pipelines/{0}'.format(id)
return self.make_request('GET', uri, expected_status=200)
def read_preset(self, id=None):
"""
To get detailed information about a preset, send a GET request
to the `/2012-09-25/presets/ [presetId] ` resource.
:type id: string
:param id: The identifier of the preset for which you want to get
detailed information.
"""
uri = '/2012-09-25/presets/{0}'.format(id)
return self.make_request('GET', uri, expected_status=200)
def test_role(self, role=None, input_bucket=None, output_bucket=None,
topics=None):
"""
To test the IAM role that's used by Elastic Transcoder to
create the pipeline, send a POST request to the
`/2012-09-25/roleTests` resource.
The `TestRole` action lets you determine whether the IAM role
you are using has sufficient permissions to let Elastic
Transcoder perform tasks associated with the transcoding
process. The action attempts to assume the specified IAM role,
checks read access to the input and output buckets, and tries
to send a test notification to Amazon SNS topics that you
specify.
:type role: string
:param role: The IAM Amazon Resource Name (ARN) for the role that you
want Elastic Transcoder to test.
:type input_bucket: string
:param input_bucket: The Amazon S3 bucket that contains media files to
be transcoded. The action attempts to read from this bucket.
:type output_bucket: string
:param output_bucket: The Amazon S3 bucket that Elastic Transcoder will
write transcoded media files to. The action attempts to read from
this bucket.
:type topics: list
:param topics: The ARNs of one or more Amazon Simple Notification
Service (Amazon SNS) topics that you want the action to send a test
notification to.
"""
uri = '/2012-09-25/roleTests'
params = {}
if role is not None:
params['Role'] = role
if input_bucket is not None:
params['InputBucket'] = input_bucket
if output_bucket is not None:
params['OutputBucket'] = output_bucket
if topics is not None:
params['Topics'] = topics
return self.make_request('POST', uri, expected_status=200,
data=json.dumps(params))
def update_pipeline(self, id, name=None, input_bucket=None, role=None,
notifications=None, content_config=None,
thumbnail_config=None):
"""
:type id: string
:param id:
:type name: string
:param name:
:type input_bucket: string
:param input_bucket:
:type role: string
:param role:
:type notifications: dict
:param notifications:
:type content_config: dict
:param content_config:
:type thumbnail_config: dict
:param thumbnail_config:
"""
uri = '/2012-09-25/pipelines/{0}'.format(id)
params = {}
if name is not None:
params['Name'] = name
if input_bucket is not None:
params['InputBucket'] = input_bucket
if role is not None:
params['Role'] = role
if notifications is not None:
params['Notifications'] = notifications
if content_config is not None:
params['ContentConfig'] = content_config
if thumbnail_config is not None:
params['ThumbnailConfig'] = thumbnail_config
return self.make_request('PUT', uri, expected_status=200,
data=json.dumps(params))
def update_pipeline_notifications(self, id=None, notifications=None):
"""
To update Amazon Simple Notification Service (Amazon SNS)
notifications for a pipeline, send a POST request to the
`/2012-09-25/pipelines/ [pipelineId] /notifications` resource.
When you update notifications for a pipeline, Elastic
Transcoder returns the values that you specified in the
request.
:type id: string
:param id: The identifier of the pipeline for which you want to change
notification settings.
:type notifications: dict
:param notifications:
The topic ARN for the Amazon Simple Notification Service (Amazon SNS)
topic that you want to notify to report job status.
To receive notifications, you must also subscribe to the new topic in
the Amazon SNS console.
+ **Progressing**: The topic ARN for the Amazon Simple Notification
Service (Amazon SNS) topic that you want to notify when Elastic
Transcoder has started to process jobs that are added to this
pipeline. This is the ARN that Amazon SNS returned when you created
the topic.
+ **Completed**: The topic ARN for the Amazon SNS topic that you want
to notify when Elastic Transcoder has finished processing a job.
This is the ARN that Amazon SNS returned when you created the
topic.
+ **Warning**: The topic ARN for the Amazon SNS topic that you want to
notify when Elastic Transcoder encounters a warning condition. This
is the ARN that Amazon SNS returned when you created the topic.
+ **Error**: The topic ARN for the Amazon SNS topic that you want to
notify when Elastic Transcoder encounters an error condition. This
is the ARN that Amazon SNS returned when you created the topic.
"""
uri = '/2012-09-25/pipelines/{0}/notifications'.format(id)
params = {}
if id is not None:
params['Id'] = id
if notifications is not None:
params['Notifications'] = notifications
return self.make_request('POST', uri, expected_status=200,
data=json.dumps(params))
def update_pipeline_status(self, id=None, status=None):
"""
To pause or reactivate a pipeline, so the pipeline stops or
restarts processing jobs, update the status for the pipeline.
Send a POST request to the `/2012-09-25/pipelines/
[pipelineId] /status` resource.
Changing the pipeline status is useful if you want to cancel
one or more jobs. You can't cancel jobs after Elastic
Transcoder has started processing them; if you pause the
pipeline to which you submitted the jobs, you have more time
to get the job IDs for the jobs that you want to cancel, and
to send a CancelJob request.
:type id: string
:param id: The identifier of the pipeline to update.
:type status: string
:param status:
The desired status of the pipeline:
+ `Active`: The pipeline is processing jobs.
+ `Paused`: The pipeline is not currently processing jobs.
"""
uri = '/2012-09-25/pipelines/{0}/status'.format(id)
params = {}
if id is not None:
params['Id'] = id
if status is not None:
params['Status'] = status
return self.make_request('POST', uri, expected_status=200,
data=json.dumps(params))
def make_request(self, verb, resource, headers=None, data='',
expected_status=None, params=None):
if headers is None:
headers = {}
response = AWSAuthConnection.make_request(
self, verb, resource, headers=headers, data=data)
body = json.load(response)
if response.status == expected_status:
return body
else:
error_type = response.getheader('x-amzn-ErrorType').split(':')[0]
error_class = self._faults.get(error_type, self.ResponseError)
raise error_class(response.status, response.reason, body)
| mit | 2,844,535,781,348,419,600 | 43.616368 | 85 | 0.62806 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.