repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
IKholopov/HackUPC2017 | hackupc/env/lib/python3.5/site-packages/social_core/backends/odnoklassniki.py | 4 | 7024 | """
Odnoklassniki OAuth2 and Iframe Application backends, docs at:
https://python-social-auth.readthedocs.io/en/latest/backends/odnoklassnikiru.html
"""
from hashlib import md5
from six.moves.urllib_parse import unquote
from .base import BaseAuth
from .oauth import BaseOAuth2
from ..exceptions import AuthFailed
class OdnoklassnikiOAuth2(BaseOAuth2):
"""Odnoklassniki authentication backend"""
name = 'odnoklassniki-oauth2'
ID_KEY = 'uid'
ACCESS_TOKEN_METHOD = 'POST'
SCOPE_SEPARATOR = ';'
AUTHORIZATION_URL = 'https://connect.ok.ru/oauth/authorize'
ACCESS_TOKEN_URL = 'https://api.ok.ru/oauth/token.do'
EXTRA_DATA = [('refresh_token', 'refresh_token'),
('expires_in', 'expires')]
def get_user_details(self, response):
"""Return user details from Odnoklassniki request"""
fullname, first_name, last_name = self.get_user_names(
fullname=unquote(response['name']),
first_name=unquote(response['first_name']),
last_name=unquote(response['last_name'])
)
return {
'username': response['uid'],
'email': response.get('email', ''),
'fullname': fullname,
'first_name': first_name,
'last_name': last_name
}
def user_data(self, access_token, *args, **kwargs):
"""Return user data from Odnoklassniki REST API"""
data = {'access_token': access_token, 'method': 'users.getCurrentUser'}
key, secret = self.get_key_and_secret()
public_key = self.setting('PUBLIC_NAME')
return odnoklassniki_api(self, data, 'https://api.ok.ru/',
public_key, secret, 'oauth')
class OdnoklassnikiApp(BaseAuth):
"""Odnoklassniki iframe app authentication backend"""
name = 'odnoklassniki-app'
ID_KEY = 'uid'
def extra_data(self, user, uid, response, details=None, *args, **kwargs):
return dict([(key, value) for key, value in response.items()
if key in response['extra_data_list']])
def get_user_details(self, response):
fullname, first_name, last_name = self.get_user_names(
fullname=unquote(response['name']),
first_name=unquote(response['first_name']),
last_name=unquote(response['last_name'])
)
return {
'username': response['uid'],
'email': '',
'fullname': fullname,
'first_name': first_name,
'last_name': last_name
}
def auth_complete(self, *args, **kwargs):
self.verify_auth_sig()
response = self.get_response()
fields = ('uid', 'first_name', 'last_name', 'name') + \
self.setting('EXTRA_USER_DATA_LIST', ())
data = {
'method': 'users.getInfo',
'uids': '{0}'.format(response['logged_user_id']),
'fields': ','.join(fields),
}
client_key, client_secret = self.get_key_and_secret()
public_key = self.setting('PUBLIC_NAME')
details = odnoklassniki_api(self, data, response['api_server'],
public_key, client_secret,
'iframe_nosession')
if len(details) == 1 and 'uid' in details[0]:
details = details[0]
auth_data_fields = self.setting('EXTRA_AUTH_DATA_LIST',
('api_server', 'apiconnection',
'session_key', 'authorized',
'session_secret_key'))
for field in auth_data_fields:
details[field] = response[field]
details['extra_data_list'] = fields + auth_data_fields
kwargs.update({'backend': self, 'response': details})
else:
raise AuthFailed(self, 'Cannot get user details: API error')
return self.strategy.authenticate(*args, **kwargs)
def get_auth_sig(self):
secret_key = self.setting('SECRET')
hash_source = '{0:s}{1:s}{2:s}'.format(self.data['logged_user_id'],
self.data['session_key'],
secret_key)
return md5(hash_source.encode('utf-8')).hexdigest()
def get_response(self):
fields = ('logged_user_id', 'api_server', 'application_key',
'session_key', 'session_secret_key', 'authorized',
'apiconnection')
return dict((name, self.data[name]) for name in fields
if name in self.data)
def verify_auth_sig(self):
correct_key = self.get_auth_sig()
key = self.data['auth_sig'].lower()
if correct_key != key:
raise AuthFailed(self, 'Wrong authorization key')
def odnoklassniki_oauth_sig(data, client_secret):
"""
Calculates signature of request data access_token value must be included
Algorithm is described at
https://apiok.ru/wiki/pages/viewpage.action?pageId=12878032,
search for "little bit different way"
"""
suffix = md5(
'{0:s}{1:s}'.format(data['access_token'],
client_secret).encode('utf-8')
).hexdigest()
check_list = sorted(['{0:s}={1:s}'.format(key, value)
for key, value in data.items()
if key != 'access_token'])
return md5((''.join(check_list) + suffix).encode('utf-8')).hexdigest()
def odnoklassniki_iframe_sig(data, client_secret_or_session_secret):
"""
Calculates signature as described at:
https://apiok.ru/wiki/display/ok/Authentication+and+Authorization
If API method requires session context, request is signed with session
secret key. Otherwise it is signed with application secret key
"""
param_list = sorted(['{0:s}={1:s}'.format(key, value)
for key, value in data.items()])
return md5(
(''.join(param_list) + client_secret_or_session_secret).encode('utf-8')
).hexdigest()
def odnoklassniki_api(backend, data, api_url, public_key, client_secret,
request_type='oauth'):
"""Calls Odnoklassniki REST API method
https://apiok.ru/wiki/display/ok/Odnoklassniki+Rest+API"""
data.update({
'application_key': public_key,
'format': 'JSON'
})
if request_type == 'oauth':
data['sig'] = odnoklassniki_oauth_sig(data, client_secret)
elif request_type == 'iframe_session':
data['sig'] = odnoklassniki_iframe_sig(data,
data['session_secret_key'])
elif request_type == 'iframe_nosession':
data['sig'] = odnoklassniki_iframe_sig(data, client_secret)
else:
msg = 'Unknown request type {0}. How should it be signed?'
raise AuthFailed(backend, msg.format(request_type))
return backend.get_json(api_url + 'fb.do', params=data)
| apache-2.0 | -4,009,894,273,014,803,500 | 39.837209 | 85 | 0.566486 | false |
jtoppins/beaker | Server/bkr/server/pools.py | 1 | 18936 |
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import re
from flask import jsonify, request
from bkr.server import identity
from bkr.server.app import app
from bkr.server.model import System, SystemPool, SystemAccessPolicy, \
SystemAccessPolicyRule, User, Group, SystemPermission, Activity
from bkr.server.flask_util import auth_required, \
convert_internal_errors, read_json_request, BadRequest400, \
Forbidden403, MethodNotAllowed405, NotFound404, Conflict409, \
UnsupportedMediaType415, request_wants_json, render_tg_template, \
json_collection
from bkr.server.util import absolute_url
from sqlalchemy.orm import contains_eager
from sqlalchemy.orm.exc import NoResultFound
from turbogears.database import session
from bkr.server.systems import _get_system_by_FQDN, _edit_access_policy_rules
import datetime
from bkr.server.bexceptions import DatabaseLookupError
@app.route('/pools/', methods=['GET'])
def get_pools():
"""
Returns a pageable JSON collection of system pools in Beaker.
Refer to :ref:`pageable-json-collections`.
The following fields are supported for filtering and sorting:
``id``
ID of the pool.
``name``
Name of the pool.
``owner.user_name``
Username of the pool owner (if the pool is owned by a user rather than
by a group).
``owner.group_name``
Name of the pool's owning group (if the pool is owned by a group rather
than by a user).
"""
query = SystemPool.query.order_by(SystemPool.name)
# join User and Group for sorting/filtering and also for eager loading
query = query\
.outerjoin(SystemPool.owning_user)\
.options(contains_eager(SystemPool.owning_user))\
.outerjoin(SystemPool.owning_group)\
.options(contains_eager(SystemPool.owning_group))
json_result = json_collection(query, columns={
'id': SystemPool.id,
'name': SystemPool.name,
'owner.user_name': User.user_name,
'owner.group_name': Group.group_name,
})
if request_wants_json():
return jsonify(json_result)
return render_tg_template('bkr.server.templates.backgrid', {
'title': u'Pools',
'grid_collection_type': 'SystemPools',
'grid_collection_data': json_result,
'grid_collection_url': request.path,
'grid_view_type': 'PoolsView',
'grid_add_label': 'Create',
'grid_add_view_type': 'PoolCreateModal' if not identity.current.anonymous else 'null',
})
_typeahead_split_pattern = re.compile(r'[-\s]+')
@app.route('/pools/+typeahead')
def pools_typeahead():
if 'q' in request.args:
pools = SystemPool.query.filter(SystemPool.name.like('%%%s%%' % request.args['q']))
else:
pools = SystemPool.query
data = [{'name': pool.name, 'tokens': _typeahead_split_pattern.split(pool.name.strip())}
for pool in pools.values(SystemPool.name)]
return jsonify(data=data)
def _get_pool_by_name(pool_name, lockmode=False):
"""Get system pool by name, reporting HTTP 404 if the system pool is not found"""
try:
return SystemPool.by_name(pool_name, lockmode)
except NoResultFound:
raise NotFound404('System pool %s does not exist' % pool_name)
@app.route('/pools/<pool_name>/', methods=['GET'])
def get_pool(pool_name):
"""
Provides detailed information about a system pool in JSON format.
:param pool_name: System pool's name.
"""
pool = _get_pool_by_name(pool_name)
if request_wants_json():
return jsonify(pool.__json__())
return render_tg_template('bkr.server.templates.system_pool', {
'title': pool.name,
'system_pool': pool,
})
def _get_owner(data):
if data is None:
data = {}
user_name = data.get('user_name')
group_name = data.get('group_name')
if user_name and group_name:
raise Forbidden403('System pool can have either an user or a group as owner')
if user_name:
owner = User.by_user_name(user_name)
if owner is None:
raise BadRequest400('No such user %s' % user_name)
owner_type = 'user'
if group_name:
try:
owner = Group.by_name(group_name)
except NoResultFound:
raise BadRequest400('No such group %r' % group_name)
owner_type = 'group'
return owner, owner_type
@app.route('/pools/', methods=['POST'])
@auth_required
def create_pool():
"""
Creates a new system pool in Beaker. The request must be
:mimetype:`application/x-www-form-urlencoded` or
:mimetype:`application/json`.
:jsonparam string name: Name for the system pool.
:jsonparam string description: Description of the system pool.
:jsonparam object owner: JSON object containing a ``user_name`` key or
``group_name`` key identifying the owner for the system pool.
:status 201: The system pool was successfully created.
"""
owner = None
description = None
u = identity.current.user
if request.json:
if 'name' not in request.json:
raise BadRequest400('Missing pool name key')
new_name = request.json['name']
if 'owner' in request.json:
owner = request.json['owner']
if 'description' in request.json:
description = request.json['description']
elif request.form:
if 'name' not in request.form:
raise BadRequest400('Missing pool name parameter')
new_name = request.form['name']
if 'owner' in request.form:
owner = request.form['owner']
if 'description' in request.form:
description = request.form['description']
else:
raise UnsupportedMediaType415
with convert_internal_errors():
if SystemPool.query.filter(SystemPool.name == new_name).count() != 0:
raise Conflict409('System pool with name %r already exists' % new_name)
pool = SystemPool(name=new_name, description=description)
session.add(pool)
if owner:
owner, owner_type = _get_owner(owner)
if owner_type == 'user':
pool.owning_user = owner
else:
pool.owning_group = owner
else:
pool.owning_user = u
# new systems pool are visible to everybody by default
pool.access_policy = SystemAccessPolicy()
pool.access_policy.add_rule(SystemPermission.view, everybody=True)
pool.record_activity(user=u, service=u'HTTP',
action=u'Created', field=u'Pool',
new=unicode(pool))
response = jsonify(pool.__json__())
response.status_code = 201
response.headers.add('Location', absolute_url(pool.href))
return response
@app.route('/pools/<pool_name>/', methods=['PATCH'])
@auth_required
def update_pool(pool_name):
"""
Updates attributes of an existing system pool. The request body must be a JSON
object containing one or more of the following keys.
:param pool_name: System pool's name.
:jsonparam string name: New name for the system pool.
:jsonparam string description: Description of the system pool.
:jsonparam object owner: JSON object containing a ``user_name`` key or
``group_name`` key identifying the new owner for the system pool.
:status 200: System pool was updated.
:status 400: Invalid data was given.
"""
pool = _get_pool_by_name(pool_name)
if not pool.can_edit(identity.current.user):
raise Forbidden403('Cannot edit system pool')
data = read_json_request(request)
# helper for recording activity below
def record_activity(field, old, new, action=u'Changed'):
pool.record_activity(user=identity.current.user, service=u'HTTP',
action=action, field=field, old=old, new=new)
with convert_internal_errors():
renamed = False
if 'name' in data:
new_name = data['name']
if new_name != pool.name:
if SystemPool.query.filter(SystemPool.name == new_name).count():
raise Conflict409('System pool %s already exists' % new_name)
record_activity(u'Name', pool.name, new_name)
pool.name = new_name
renamed = True
if 'description' in data:
new_description = data['description']
if new_description != pool.description:
record_activity(u'Description', pool.description, new_description)
pool.description = new_description
if 'owner' in data:
new_owner, owner_type = _get_owner(data['owner'])
if owner_type == 'user':
pool.change_owner(user=new_owner)
else:
pool.change_owner(group=new_owner)
response = jsonify(pool.__json__())
if renamed:
response.headers.add('Location', absolute_url(pool.href))
return response
# For compat only. Separate function so that it doesn't appear in the docs.
@app.route('/pools/<pool_name>/', methods=['POST'])
def update_system_pool_post(pool_name):
return update_pool(pool_name)
@app.route('/pools/<pool_name>/systems/', methods=['POST'])
@auth_required
def add_system_to_pool(pool_name):
"""
Add a system to a system pool
:param pool_name: System pool's name.
:jsonparam fqdn: System's fully-qualified domain name.
"""
u = identity.current.user
data = read_json_request(request)
pool = _get_pool_by_name(pool_name, lockmode='update')
if 'fqdn' not in data:
raise BadRequest400('System FQDN not specified')
try:
system = System.by_fqdn(data['fqdn'], u)
except DatabaseLookupError:
raise BadRequest400("System '%s' does not exist" % data['fqdn'])
if not pool in system.pools:
if pool.can_edit(u) and system.can_edit(u):
system.record_activity(user=u, service=u'HTTP',
action=u'Added', field=u'Pool',
old=None,
new=unicode(pool))
system.pools.append(pool)
system.date_modified = datetime.datetime.utcnow()
pool.record_activity(user=u, service=u'HTTP',
action=u'Added', field=u'System', old=None,
new=unicode(system))
else:
if not pool.can_edit(u):
raise Forbidden403('You do not have permission to '
'add systems to pool %s' % pool.name)
if not system.can_edit(u):
raise Forbidden403('You do not have permission to '
'modify system %s' % system.fqdn)
return '', 204
@app.route('/pools/<pool_name>/systems/', methods=['DELETE'])
@auth_required
def remove_system_from_pool(pool_name):
"""
Remove a system from a system pool
:param pool_name: System pool's name.
:queryparam fqdn: System's fully-qualified domain name
"""
if 'fqdn' not in request.args:
raise MethodNotAllowed405
fqdn = request.args['fqdn']
system = _get_system_by_FQDN(fqdn)
u = identity.current.user
pool = _get_pool_by_name(pool_name, lockmode='update')
if pool in system.pools:
if pool.can_edit(u) or system.can_edit(u):
if system.active_access_policy == pool.access_policy:
system.active_access_policy = system.custom_access_policy
system.record_activity(user=u, service=u'HTTP',
field=u'Active Access Policy',
action=u'Changed',
old = pool.access_policy,
new = system.custom_access_policy)
system.pools.remove(pool)
system.record_activity(user=u, service=u'HTTP',
action=u'Removed', field=u'Pool', old=unicode(pool), new=None)
system.date_modified = datetime.datetime.utcnow()
pool.record_activity(user=u, service=u'HTTP',
action=u'Removed', field=u'System', old=unicode(system), new=None)
else:
raise Forbidden403('You do not have permission to modify system %s'
'or remove systems from pool %s' % (system.fqdn, pool.name))
else:
raise BadRequest400('System %s is not in pool %s' % (system.fqdn, pool.name))
return '', 204
@app.route('/pools/<pool_name>/access-policy/', methods=['GET'])
def get_access_policy(pool_name):
"""
Get access policy for pool
:param pool_name: System pool's name.
"""
pool = _get_pool_by_name(pool_name)
rules = pool.access_policy.rules
return jsonify({
'id': pool.access_policy.id,
'rules': [
{'id': rule.id,
'user': rule.user.user_name if rule.user else None,
'group': rule.group.group_name if rule.group else None,
'everybody': rule.everybody,
'permission': unicode(rule.permission)}
for rule in rules],
'possible_permissions': [
{'value': unicode(permission),
'label': unicode(permission.label)}
for permission in SystemPermission],
})
@app.route('/pools/<pool_name>/access-policy/', methods=['POST', 'PUT'])
@auth_required
def save_access_policy(pool_name):
"""
Updates the access policy for a system pool.
:param pool_name: System pool's name.
:jsonparam array rules: List of rules to include in the new policy. This
replaces all existing rules in the policy. Each rule is a JSON object
with ``user``, ``group``, and ``everybody`` keys.
"""
pool = _get_pool_by_name(pool_name)
if not pool.can_edit_policy(identity.current.user):
raise Forbidden403('Cannot edit system pool policy')
data = read_json_request(request)
_edit_access_policy_rules(pool, pool.access_policy, data['rules'])
return jsonify(pool.access_policy.__json__())
@app.route('/pools/<pool_name>/access-policy/rules/', methods=['POST'])
@auth_required
def add_access_policy_rule(pool_name):
"""
Adds a new rule to the access policy for a system pool. Each rule in the policy
grants a permission to a single user, a group of users, or to everybody.
See :ref:`system-access-policies-api` for a description of the expected JSON parameters.
:param pool_name: System pool's name.
"""
pool = _get_pool_by_name(pool_name)
if not pool.can_edit_policy(identity.current.user):
raise Forbidden403('Cannot edit system pool policy')
policy = pool.access_policy
rule = read_json_request(request)
if rule.get('user', None):
user = User.by_user_name(rule['user'])
if not user:
raise BadRequest400("User '%s' does not exist" % rule['user'])
else:
user = None
if rule.get('group', None):
try:
group = Group.by_name(rule['group'])
except NoResultFound:
raise BadRequest400("Group '%s' does not exist" % rule['group'])
else:
group = None
try:
permission = SystemPermission.from_string(rule['permission'])
except ValueError:
raise BadRequest400('Invalid permission')
new_rule = policy.add_rule(user=user, group=group,
everybody=rule['everybody'],
permission=permission)
pool.record_activity(user=identity.current.user, service=u'HTTP',
field=u'Access Policy Rule', action=u'Added',
new=repr(new_rule))
return '', 204
@app.route('/pools/<pool_name>/access-policy/rules/', methods=['DELETE'])
@auth_required
def delete_access_policy_rules(pool_name):
"""
Deletes one or more matching rules from a system pool's access policy.
See :ref:`system-access-policies-api` for description of the expected query parameters
:param pool_name: System pool's name.
"""
pool = _get_pool_by_name(pool_name)
if not pool.can_edit_policy(identity.current.user):
raise Forbidden403('Cannot edit system policy')
policy = pool.access_policy
query = SystemAccessPolicyRule.query.filter(SystemAccessPolicyRule.policy == policy)
if 'permission' in request.args:
query = query.filter(SystemAccessPolicyRule.permission.in_(
request.args.getlist('permission', type=SystemPermission.from_string)))
else:
raise MethodNotAllowed405
if 'user' in request.args:
query = query.join(SystemAccessPolicyRule.user)\
.filter(User.user_name.in_(request.args.getlist('user')))
elif 'group' in request.args:
query = query.join(SystemAccessPolicyRule.group)\
.filter(Group.group_name.in_(request.args.getlist('group')))
elif 'everybody' in request.args:
query = query.filter(SystemAccessPolicyRule.everybody)
else:
raise MethodNotAllowed405
for rule in query:
rule.record_deletion(service=u'HTTP')
session.delete(rule)
return '', 204
@app.route('/pools/<pool_name>/', methods=['DELETE'])
@auth_required
def delete_pool(pool_name):
"""
Deletes a system pool
:param pool_name: System pool's name
"""
pool = _get_pool_by_name(pool_name, lockmode='update')
u = identity.current.user
if not pool.can_edit(u):
raise Forbidden403('Cannot delete pool %s' % pool_name)
systems = System.query.filter(System.pools.contains(pool))
System.record_bulk_activity(systems, user=identity.current.user,
service=u'HTTP', action=u'Removed',
field=u'Pool',
old=unicode(pool),
new=None)
# Since we are deleting the pool, we will have to change the active
# access policy for all systems using the pool's policy to their
# custom policy
systems = System.query.filter(System.active_access_policy == pool.access_policy)
for system in systems:
system.active_access_policy = system.custom_access_policy
System.record_bulk_activity(systems, user=identity.current.user,
service=u'HTTP',
field=u'Active Access Policy', action=u'Changed',
old = 'Pool policy: %s' % pool_name,
new = 'Custom access policy')
session.delete(pool)
activity = Activity(u, u'HTTP', u'Deleted', u'Pool', pool_name)
session.add(activity)
return '', 204
| gpl-2.0 | -1,867,289,688,546,506,000 | 38.865263 | 97 | 0.617448 | false |
rdelval/aurora | src/test/python/apache/thermos/core/test_runner_integration.py | 14 | 6126 | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from textwrap import dedent
from apache.thermos.config.schema import Process, Resources, SequentialTask, Task, Tasks
from apache.thermos.testing.runner import RunnerTestBase
from gen.apache.thermos.ttypes import ProcessState, TaskState
class TestRunnerBasic(RunnerTestBase):
portmap = {'named_port': 8123} # noqa
@classmethod
def task(cls):
hello_template = Process(cmdline="echo 1")
t1 = hello_template(name="t1", cmdline="echo 1 port {{thermos.ports[named_port]}}")
t2 = hello_template(name="t2")
t3 = hello_template(name="t3")
t4 = hello_template(name="t4")
t5 = hello_template(name="t5")
t6 = hello_template(name="t6")
tsk = Task(name="complex", processes=[t1, t2, t3, t4, t5, t6])
# three ways of tasks: t1 t2, t3 t4, t5 t6
tsk = tsk(constraints=[
{'order': ['t1', 't3']},
{'order': ['t1', 't4']},
{'order': ['t2', 't3']},
{'order': ['t2', 't4']},
{'order': ['t3', 't5']},
{'order': ['t3', 't6']},
{'order': ['t4', 't5']},
{'order': ['t4', 't6']}])
return tsk
def test_runner_state_success(self):
assert self.state.statuses[-1].state == TaskState.SUCCESS
def test_runner_header_populated(self):
header = self.state.header
assert header is not None, 'header should be populated.'
assert header.task_id == self.runner.task_id, 'header task id must be set!'
assert header.sandbox == os.path.join(self.runner.tempdir, 'sandbox', header.task_id), (
'header sandbox must be set!')
assert header.hostname, 'header task replica id must be set!'
assert header.launch_time_ms, 'header launch time must be set'
def test_runner_has_allocated_name_ports(self):
ports = self.state.header.ports
assert 'named_port' in ports, 'ephemeral port was either not allocated, or not checkpointed!'
assert ports['named_port'] == 8123
def test_runner_has_expected_processes(self):
processes = self.state.processes
process_names = set(['t%d' % k for k in range(1, 7)])
actual_process_names = set(processes.keys())
assert process_names == actual_process_names, "runner didn't run expected set of processes!"
for process in processes:
assert processes[process][-1].process == process
def test_runner_processes_have_expected_output(self):
for process in self.state.processes:
history = self.state.processes[process]
assert history[-1].state == ProcessState.SUCCESS
if len(history) > 1:
for run in range(len(history) - 1):
assert history[run].state != ProcessState.SUCCESS, (
"nonterminal processes must not be in SUCCESS state!")
def test_runner_processes_have_monotonically_increasing_timestamps(self):
for process in self.state.processes:
for run in self.state.processes[process]:
assert run.fork_time < run.start_time
assert run.start_time < run.stop_time
class TestConcurrencyBasic(RunnerTestBase):
@classmethod
def task(cls):
hello_template = Process(cmdline="sleep 1")
tsk = Task(
name="complex",
processes=[hello_template(name="process1"),
hello_template(name="process2"),
hello_template(name="process3")],
resources=Resources(cpu=1.0, ram=16 * 1024 * 1024, disk=16 * 1024),
max_concurrency=1)
return tsk
def test_runner_state_success(self):
assert self.state.statuses[-1].state == TaskState.SUCCESS
# TODO(wickman) This needs a better test.
def test_runner_processes_separated_temporally_due_to_concurrency_limit(self):
runs = []
for process in self.state.processes:
assert len(self.state.processes[process]) == 1, 'Expect one run per task'
assert self.state.processes[process][0].state == ProcessState.SUCCESS
runs.append(self.state.processes[process][0].start_time)
runs.sort()
assert runs[1] - runs[0] > 1.0
assert runs[2] - runs[1] > 1.0
class TestRunnerEnvironment(RunnerTestBase):
@classmethod
def task(cls):
setup_bashrc = Process(name="setup_bashrc", cmdline=dedent(
"""
mkdir -p .profile.d
cat <<EOF > .thermos_profile
for i in .profile.d/*.sh ; do
if [ -r "\\$i" ]; then
. \\$i
fi
done
EOF
"""))
setup_foo = Process(name="setup_foo", cmdline=dedent(
"""
cat <<EOF > .profile.d/setup_foo.sh
export FOO=1
EOF
"""))
setup_bar = Process(name="setup_bar", cmdline=dedent(
"""
cat <<EOF > .profile.d/setup_bar.sh
export BAR=2
EOF
"""))
foo_recipe = SequentialTask(processes=[setup_bashrc, setup_foo])
bar_recipe = SequentialTask(processes=[setup_bashrc, setup_bar])
all_recipes = Tasks.combine(foo_recipe, bar_recipe)
run = Process(name="run", cmdline=dedent(
"""
echo $FOO $BAR > expected_output.txt
"""))
my_task = Task(processes=[run],
resources=Resources(cpu=1.0, ram=16 * 1024 * 1024, disk=16 * 1024))
return Tasks.concat(all_recipes, my_task, name="my_task")
def test_runner_state_success(self):
assert self.state.statuses[-1].state == TaskState.SUCCESS
def test_runner_processes_have_expected_output(self):
expected_output_file = os.path.join(self.runner.sandbox, self.runner.task_id,
'expected_output.txt')
assert os.path.exists(expected_output_file)
with open(expected_output_file, 'rb') as fp:
assert fp.read().strip() == b"1 2"
| apache-2.0 | -8,940,817,400,334,118,000 | 35.464286 | 97 | 0.642344 | false |
diogommartins/pox | pox/lib/packet/ethernet.py | 45 | 5486 | # Copyright 2011,2012,2013 James McCauley
# Copyright 2008 (C) Nicira, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is derived from the packet library in NOX, which was
# developed by Nicira, Inc.
#======================================================================
# Ethernet header
#
#======================================================================
import struct
from packet_base import packet_base
from packet_utils import ethtype_to_str
from pox.lib.addresses import *
ETHER_ANY = EthAddr(b"\x00\x00\x00\x00\x00\x00")
ETHER_BROADCAST = EthAddr(b"\xff\xff\xff\xff\xff\xff")
BRIDGE_GROUP_ADDRESS = EthAddr(b"\x01\x80\xC2\x00\x00\x00")
LLDP_MULTICAST = EthAddr(b"\x01\x80\xc2\x00\x00\x0e")
PAE_MULTICAST = EthAddr(b'\x01\x80\xc2\x00\x00\x03') # 802.1x Port
# Access Entity
NDP_MULTICAST = EthAddr(b'\x01\x23\x20\x00\x00\x01') # Nicira discovery
# multicast
class ethernet(packet_base):
"Ethernet packet struct"
resolve_names = False
MIN_LEN = 14
IP_TYPE = 0x0800
ARP_TYPE = 0x0806
RARP_TYPE = 0x8035
VLAN_TYPE = 0x8100
LLDP_TYPE = 0x88cc
PAE_TYPE = 0x888e # 802.1x Port Access Entity
#MPLS_UNICAST_TYPE = 0x8847
#MPLS_MULTICAST_TYPE = 0x8848
MPLS_TYPE = 0x8847
MPLS_MC_TYPE = 0x8848 # Multicast
IPV6_TYPE = 0x86dd
PPP_TYPE = 0x880b
LWAPP_TYPE = 0x88bb
GSMP_TYPE = 0x880c
IPX_TYPE = 0x8137
IPX_TYPE = 0x8137
WOL_TYPE = 0x0842
TRILL_TYPE = 0x22f3
JUMBO_TYPE = 0x8870
SCSI_TYPE = 0x889a
ATA_TYPE = 0x88a2
QINQ_TYPE = 0x9100
INVALID_TYPE = 0xffff
type_parsers = {}
def __init__(self, raw=None, prev=None, **kw):
packet_base.__init__(self)
if len(ethernet.type_parsers) == 0:
from vlan import vlan
ethernet.type_parsers[ethernet.VLAN_TYPE] = vlan
from arp import arp
ethernet.type_parsers[ethernet.ARP_TYPE] = arp
ethernet.type_parsers[ethernet.RARP_TYPE] = arp
from ipv4 import ipv4
ethernet.type_parsers[ethernet.IP_TYPE] = ipv4
from ipv6 import ipv6
ethernet.type_parsers[ethernet.IPV6_TYPE] = ipv6
from lldp import lldp
ethernet.type_parsers[ethernet.LLDP_TYPE] = lldp
from eapol import eapol
ethernet.type_parsers[ethernet.PAE_TYPE] = eapol
from mpls import mpls
ethernet.type_parsers[ethernet.MPLS_TYPE] = mpls
ethernet.type_parsers[ethernet.MPLS_MC_TYPE] = mpls
from llc import llc
ethernet._llc = llc
self.prev = prev
self.dst = ETHER_ANY
self.src = ETHER_ANY
self.type = 0
self.next = b''
if raw is not None:
self.parse(raw)
self._init(kw)
def parse (self, raw):
assert isinstance(raw, bytes)
self.next = None # In case of unfinished parsing
self.raw = raw
alen = len(raw)
if alen < ethernet.MIN_LEN:
self.msg('warning eth packet data too short to parse header: data len %u'
% (alen,))
return
self.dst = EthAddr(raw[:6])
self.src = EthAddr(raw[6:12])
self.type = struct.unpack('!H', raw[12:ethernet.MIN_LEN])[0]
self.hdr_len = ethernet.MIN_LEN
self.payload_len = alen - self.hdr_len
self.next = ethernet.parse_next(self, self.type, raw, ethernet.MIN_LEN)
self.parsed = True
@staticmethod
def parse_next (prev, typelen, raw, offset=0, allow_llc=True):
parser = ethernet.type_parsers.get(typelen)
if parser is not None:
return parser(raw[offset:], prev)
elif typelen < 1536 and allow_llc:
return ethernet._llc(raw[offset:], prev)
else:
return raw[offset:]
@staticmethod
def getNameForType (ethertype):
""" Returns a string name for a numeric ethertype """
return ethtype_to_str(ethertype)
@property
def effective_ethertype (self):
return self._get_effective_ethertype(self)
@staticmethod
def _get_effective_ethertype (self):
"""
Get the "effective" ethertype of a packet.
This means that if the payload is something like a VLAN or SNAP header,
we want the type from that deeper header. This is kind of ugly here in
the packet library, but it should make user code somewhat simpler.
"""
if not self.parsed:
return ethernet.INVALID_TYPE
if self.type == ethernet.VLAN_TYPE or type(self.payload) == ethernet._llc:
try:
return self.payload.effective_ethertype
except:
return ethernet.INVALID_TYPE
return self.type
def _to_str(self):
s = ''.join(('[',str(EthAddr(self.src)),'>',str(EthAddr(self.dst)),' ',
ethernet.getNameForType(self.type),']'))
return s
def hdr(self, payload):
dst = self.dst
src = self.src
if type(dst) is EthAddr:
dst = dst.toRaw()
if type(src) is EthAddr:
src = src.toRaw()
return struct.pack('!6s6sH', dst, src, self.type)
| apache-2.0 | 6,745,358,192,403,361,000 | 29.648045 | 79 | 0.628327 | false |
Shemahmforash/playcert | playcert/lib/event.py | 1 | 1199 | import logging
from playcert.lib.artist import Artist
from playcert.cache import cache_data_in_hash
log = logging.getLogger(__name__)
class Event(object):
def __init__(self, title, when, venue, artist_name=None, redis=None):
self.title = title
self.when = when
self.venue = venue
# to use cache in this class (not mandatory)
self.redis = redis
if artist_name:
log.debug(
'No need to find artist name, already have it from eventful %s', artist_name)
self.artist = Artist(artist_name, self.redis)
else:
# finds artist from event title
self.artist = self.find_artist()
def cache_hash_key(self):
return 'text.artist'
def cache_key(self):
return self.title
@cache_data_in_hash
def find_artist(self):
# finds and creates artist from event title
return Artist.create_artist_from_text(
self.title, self.venue, self.redis)
def __repr__(self):
return 'Event(%s, %s, %s, %s)' % \
(self.title.encode('utf-8'), self.when.encode('utf-8'),
self.venue.encode('utf-8'), self.artist)
| gpl-2.0 | -2,110,399,972,067,798,000 | 26.883721 | 93 | 0.589658 | false |
sacherjj/array_devices | baud_test.py | 1 | 2376 | from __future__ import division
import serial
import time
from array_devices import array3710
__author__ = 'JoeSacher'
"""
This is a crude script to play with PC baud rates while the load
is set to a fixed baud rate.
"""
load_addr = 1
# This should match load
base_baud_rate = 9600
serial_conn = serial.Serial('COM4', base_baud_rate, timeout=1)
load = array3710.Load(load_addr, serial_conn)
load.remote_control = True
serial_conn.close()
# Set this to sufficient range to get possible valid connections
min_rate = 3500
max_rate = 20000
print("Walking from {} to {} for {}".format(min_rate, max_rate, base_baud_rate))
for baud_rate in xrange(min_rate, max_rate, 100):
time.sleep(0.1)
serial_conn = serial.Serial('COM4', baud_rate, timeout=0.5)
try:
load = array3710.Load(load_addr, serial_conn, print_errors=False)
except IOError:
print("Baud_Rate: {} Error: Can't creating load".format(baud_rate))
else:
error_count = 0
for i in range(10):
try:
load.set_load_resistance(baud_rate/1000)
load.update_status(retry_count=1)
# print(load.voltage)
except IOError:
error_count += 1
try:
load.load_on = True
load.load_on = False
time.sleep(0.05)
except IOError:
error_count += 1
print("Baud_Rate: {} - Errors: {}".format(baud_rate, error_count))
serial_conn.close()
serial_conn = serial.Serial('COM4', base_baud_rate, timeout=1)
load = array3710.Load(load_addr, serial_conn)
load.remote_control = False
serial_conn.close()
"""
Results for both of my loads.
I found the multiple baud responses when the load was set at 9600 very interesting.
When I get time, I want to scope the wild mismatches and see what is going on.
4800(L1): 4700-5200 (All 0 errors)
4800(L2): 4700-5200 (All 0 errors)
9600(L1): 4000-4600, 5300-6400, 7600-12400, 15100-17400 (All 0 errors)
9600(L2): 4000-4600, 5300-6400, 7600-12400, 15100-17400 (All 0 errors)
19200(L1): 17500-24000 (~30% with 1 error, 1 with 2 errors)
19200(L2): 17500-24000 (~20% with 1 error, 5% with 2 errors)
38400(L1): 35900-44200 (Errors of 1-4 throughout range, pretty evenly spread, 20% with 0 errors)
38400(L2): 35900-44200 (same distribution of errors as L1 at this baud rate)
""" | mit | -1,785,297,088,008,296,200 | 31.121622 | 96 | 0.651936 | false |
popazerty/blackhole-vuplus | lib/python/Components/MediaPlayer.py | 32 | 3735 | from MenuList import MenuList
from Tools.Directories import SCOPE_CURRENT_SKIN, resolveFilename
from os import path
from enigma import eListboxPythonMultiContent, RT_VALIGN_CENTER, gFont, eServiceCenter
from Tools.LoadPixmap import LoadPixmap
import skin
STATE_PLAY = 0
STATE_PAUSE = 1
STATE_STOP = 2
STATE_REWIND = 3
STATE_FORWARD = 4
STATE_NONE = 5
class PlayList(MenuList):
def __init__(self, enableWrapAround = False):
MenuList.__init__(self, [], enableWrapAround, eListboxPythonMultiContent)
font = skin.fonts.get("PlayList", ("Regular", 18, 23))
self.l.setFont(0, gFont(font[0], font[1]))
self.l.setItemHeight(font[2])
self.currPlaying = -1
self.oldCurrPlaying = -1
self.serviceHandler = eServiceCenter.getInstance()
self.state = STATE_NONE
self.icons = [
LoadPixmap(resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/icons/ico_mp_play.png")),
LoadPixmap(resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/icons/ico_mp_pause.png")),
LoadPixmap(resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/icons/ico_mp_stop.png")),
LoadPixmap(resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/icons/ico_mp_rewind.png")),
LoadPixmap(path=resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/icons/ico_mp_forward.png")),
]
def PlaylistEntryComponent(self, serviceref, state):
res = [ serviceref ]
text = serviceref.getName()
if text is "":
text = path.split(serviceref.getPath().split('/')[-1])[1]
x, y, w, h = skin.parameters.get("PlayListName",(25, 1, 470, 22))
res.append((eListboxPythonMultiContent.TYPE_TEXT, x, y, w, h, 0, RT_VALIGN_CENTER, text))
try:
png = self.icons[state]
x, y, w, h = skin.parameters.get("PlayListIcon",(5, 3, 16, 16))
res.append((eListboxPythonMultiContent.TYPE_PIXMAP_ALPHATEST, x, y, w, h, png))
except:
pass
return res
def clear(self):
del self.list[:]
self.l.setList(self.list)
self.currPlaying = -1
self.oldCurrPlaying = -1
def getSelection(self):
return self.l.getCurrentSelection() and self.l.getCurrentSelection()[0]
def addFile(self, serviceref):
self.list.append(self.PlaylistEntryComponent(serviceref, STATE_NONE))
def updateFile(self, index, newserviceref):
if index < len(self.list):
self.list[index] = self.PlaylistEntryComponent(newserviceref, STATE_NONE)
def deleteFile(self, index):
if self.currPlaying >= index:
self.currPlaying -= 1
del self.list[index]
def setCurrentPlaying(self, index):
self.oldCurrPlaying = self.currPlaying
self.currPlaying = index
self.moveToIndex(index)
def updateState(self, state):
self.state = state
if len(self.list) > self.oldCurrPlaying and self.oldCurrPlaying != -1:
self.list[self.oldCurrPlaying] = self.PlaylistEntryComponent(self.list[self.oldCurrPlaying][0], STATE_NONE)
if self.currPlaying != -1 and self.currPlaying < len(self.list):
self.list[self.currPlaying] = self.PlaylistEntryComponent(self.list[self.currPlaying][0], state)
self.updateList()
def isStopped(self):
return self.state in (STATE_STOP, STATE_NONE)
def playFile(self):
self.updateState(STATE_PLAY)
def pauseFile(self):
self.updateState(STATE_PAUSE)
def stopFile(self):
self.updateState(STATE_STOP)
def rewindFile(self):
self.updateState(STATE_REWIND)
def forwardFile(self):
self.updateState(STATE_FORWARD)
def updateList(self):
self.l.setList(self.list)
def getCurrentIndex(self):
return self.currPlaying
def getCurrentEvent(self):
l = self.l.getCurrentSelection()
return l and self.serviceHandler.info(l[0]).getEvent(l[0])
def getCurrent(self):
l = self.l.getCurrentSelection()
return l and l[0]
def getServiceRefList(self):
return [ x[0] for x in self.list ]
def __len__(self):
return len(self.list)
| gpl-2.0 | -2,370,759,973,291,924,500 | 29.867769 | 110 | 0.72664 | false |
hogarthj/ansible | test/units/modules/network/edgeos/test_edgeos_command.py | 39 | 4249 | # (c) 2018 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible.compat.tests.mock import patch
from ansible.modules.network.edgeos import edgeos_command
from units.modules.utils import set_module_args
from .edgeos_module import TestEdgeosModule, load_fixture
class TestEdgeosCommandModule(TestEdgeosModule):
module = edgeos_command
def setUp(self):
super(TestEdgeosCommandModule, self).setUp()
self.mock_run_commands = patch('ansible.modules.network.edgeos.edgeos_command.run_commands')
self.run_commands = self.mock_run_commands.start()
def tearDown(self):
super(TestEdgeosCommandModule, self).tearDown()
self.mock_run_commands.stop()
def load_fixtures(self, commands=None):
def load_from_file(*args, **kwargs):
module, commands = args
output = list()
for item in commands:
try:
obj = json.loads(item)
command = obj['command']
except (ValueError, TypeError):
command = item['command']
filename = str(command).replace(' ', '_')
output.append(load_fixture(filename))
return output
self.run_commands.side_effect = load_from_file
def test_edgeos_command_simple(self):
set_module_args(dict(commands=['show version']))
result = self.execute_module()
self.assertEqual(len(result['stdout']), 1)
self.assertTrue(result['stdout'][0].startswith('Version: v1.9.7'))
def test_edgeos_command_multiple(self):
set_module_args(dict(commands=['show version', 'show version']))
result = self.execute_module()
self.assertEqual(len(result['stdout']), 2)
self.assertTrue(result['stdout'][0].startswith('Version: v1.9.7'))
def test_edgeos_commond_wait_for(self):
wait_for = 'result[0] contains "Ubiquiti Networks"'
set_module_args(dict(commands=['show version'], wait_for=wait_for))
self.execute_module()
def test_edgeos_command_wait_for_fails(self):
wait_for = 'result[0] contains "bad string"'
set_module_args(dict(commands=['show version'], wait_for=wait_for))
self.execute_module(failed=True)
self.assertEqual(self.run_commands.call_count, 10)
def test_edgeos_command_retries(self):
wait_for = 'result[0] contains "bad string"'
set_module_args(dict(commands=['show version'], wait_for=wait_for, retries=2))
self.execute_module(failed=True)
self.assertEqual(self.run_commands.call_count, 2)
def test_edgeos_command_match_any(self):
wait_for = ['result[0] contains "Ubiquiti Networks"',
'result[0] contains "bad string"']
set_module_args(dict(commands=['show version'], wait_for=wait_for, match='any'))
self.execute_module()
def test_edgeos_command_match_all(self):
wait_for = ['result[0] contains "Ubiquiti Networks"',
'result[0] contains "EdgeRouter"']
set_module_args(dict(commands=['show version'], wait_for=wait_for, match='all'))
self.execute_module()
def test_vyos_command_match_all_failure(self):
wait_for = ['result[0] contains "Ubiquiti Networks"',
'result[0] contains "bad string"']
commands = ['show version', 'show version']
set_module_args(dict(commands=commands, wait_for=wait_for, match='all'))
self.execute_module(failed=True)
| gpl-3.0 | -6,532,834,230,542,704,000 | 39.084906 | 100 | 0.648859 | false |
liberorbis/libernext | apps/frappe/frappe/__init__.py | 3 | 19151 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
"""
globals attached to frappe module
+ some utility functions that should probably be moved
"""
from __future__ import unicode_literals
from werkzeug.local import Local, release_local
import os, importlib, inspect, logging, json
# public
from frappe.__version__ import __version__
from .exceptions import *
from .utils.jinja import get_jenv, get_template, render_template
local = Local()
class _dict(dict):
"""dict like object that exposes keys as attributes"""
def __getattr__(self, key):
ret = self.get(key)
if not ret and key.startswith("__"):
raise AttributeError()
return ret
def __setattr__(self, key, value):
self[key] = value
def __getstate__(self):
return self
def __setstate__(self, d):
self.update(d)
def update(self, d):
"""update and return self -- the missing dict feature in python"""
super(_dict, self).update(d)
return self
def copy(self):
return _dict(dict(self).copy())
def _(msg):
"""translate object in current lang, if exists"""
if local.lang == "en":
return msg
from frappe.translate import get_full_dict
return get_full_dict(local.lang).get(msg, msg)
def get_lang_dict(fortype, name=None):
if local.lang=="en":
return {}
from frappe.translate import get_dict
return get_dict(fortype, name)
def set_user_lang(user, user_language=None):
from frappe.translate import get_user_lang
local.lang = get_user_lang(user)
# local-globals
db = local("db")
conf = local("conf")
form = form_dict = local("form_dict")
request = local("request")
request_method = local("request_method")
response = local("response")
session = local("session")
user = local("user")
flags = local("flags")
error_log = local("error_log")
debug_log = local("debug_log")
message_log = local("message_log")
lang = local("lang")
def init(site, sites_path=None):
if getattr(local, "initialised", None):
return
if not sites_path:
sites_path = '.'
local.error_log = []
local.message_log = []
local.debug_log = []
local.flags = _dict({})
local.rollback_observers = []
local.test_objects = {}
local.site = site
local.sites_path = sites_path
local.site_path = os.path.join(sites_path, site)
local.request_method = request.method if request else None
local.request_ip = None
local.response = _dict({"docs":[]})
local.conf = _dict(get_site_config())
local.lang = local.conf.lang or "en"
local.module_app = None
local.app_modules = None
local.user = None
local.role_permissions = {}
local.jenv = None
local.jloader =None
local.cache = {}
setup_module_map()
local.initialised = True
def connect(site=None, db_name=None):
from database import Database
if site:
init(site)
local.db = Database(user=db_name or local.conf.db_name)
local.form_dict = _dict()
local.session = _dict()
set_user("Administrator")
def get_site_config(sites_path=None, site_path=None):
config = {}
sites_path = sites_path or getattr(local, "sites_path", None)
site_path = site_path or getattr(local, "site_path", None)
if sites_path:
common_site_config = os.path.join(sites_path, "common_site_config.json")
if os.path.exists(common_site_config):
config.update(get_file_json(common_site_config))
if site_path:
site_config = os.path.join(site_path, "site_config.json")
if os.path.exists(site_config):
config.update(get_file_json(site_config))
return _dict(config)
def destroy():
"""closes connection and releases werkzeug local"""
if db:
db.close()
release_local(local)
_memc = None
# memcache
def cache():
global _memc
if not _memc:
from frappe.memc import MClient
_memc = MClient(['localhost:11211'])
return _memc
def get_traceback():
import utils
return utils.get_traceback()
def errprint(msg):
from utils import cstr
if not request or (not "cmd" in local.form_dict):
print cstr(msg)
error_log.append(cstr(msg))
def log(msg):
if not request:
if conf.get("logging") or False:
print repr(msg)
from utils import cstr
debug_log.append(cstr(msg))
def msgprint(msg, small=0, raise_exception=0, as_table=False):
def _raise_exception():
if raise_exception:
if flags.rollback_on_exception:
db.rollback()
import inspect
if inspect.isclass(raise_exception) and issubclass(raise_exception, Exception):
raise raise_exception, msg
else:
raise ValidationError, msg
if flags.mute_messages:
_raise_exception()
return
from utils import cstr
if as_table and type(msg) in (list, tuple):
msg = '<table border="1px" style="border-collapse: collapse" cellpadding="2px">' + ''.join(['<tr>'+''.join(['<td>%s</td>' % c for c in r])+'</tr>' for r in msg]) + '</table>'
if flags.print_messages:
print "Message: " + repr(msg)
message_log.append((small and '__small:' or '')+cstr(msg or ''))
_raise_exception()
def throw(msg, exc=ValidationError):
msgprint(msg, raise_exception=exc)
def create_folder(path, with_init=False):
from frappe.utils import touch_file
if not os.path.exists(path):
os.makedirs(path)
if with_init:
touch_file(os.path.join(path, "__init__.py"))
def set_user(username):
from frappe.utils.user import User
local.session.user = username
local.session.sid = username
local.cache = {}
local.form_dict = _dict()
local.jenv = None
local.session.data = {}
local.user = User(username)
local.role_permissions = {}
def get_request_header(key, default=None):
return request.headers.get(key, default)
def sendmail(recipients=(), sender="", subject="No Subject", message="No Message",
as_markdown=False, bulk=False, ref_doctype=None, ref_docname=None,
add_unsubscribe_link=False, attachments=None):
if bulk:
import frappe.utils.email_lib.bulk
frappe.utils.email_lib.bulk.send(recipients=recipients, sender=sender,
subject=subject, message=message, ref_doctype = ref_doctype,
ref_docname = ref_docname, add_unsubscribe_link=add_unsubscribe_link, attachments=attachments)
else:
import frappe.utils.email_lib
if as_markdown:
frappe.utils.email_lib.sendmail_md(recipients, sender=sender,
subject=subject, msg=message, attachments=attachments)
else:
frappe.utils.email_lib.sendmail(recipients, sender=sender,
subject=subject, msg=message, attachments=attachments)
logger = None
whitelisted = []
guest_methods = []
def whitelist(allow_guest=False):
"""
decorator for whitelisting a function
Note: if the function is allowed to be accessed by a guest user,
it must explicitly be marked as allow_guest=True
for specific roles, set allow_roles = ['Administrator'] etc.
"""
def innerfn(fn):
global whitelisted, guest_methods
whitelisted.append(fn)
if allow_guest:
guest_methods.append(fn)
return fn
return innerfn
def only_for(roles):
if not isinstance(roles, (tuple, list)):
roles = (roles,)
roles = set(roles)
myroles = set(get_roles())
if not roles.intersection(myroles):
raise PermissionError
def clear_cache(user=None, doctype=None):
"""clear cache"""
import frappe.sessions
if doctype:
import frappe.model.meta
frappe.model.meta.clear_cache(doctype)
reset_metadata_version()
elif user:
frappe.sessions.clear_cache(user)
else: # everything
import translate
frappe.sessions.clear_cache()
translate.clear_cache()
reset_metadata_version()
for fn in frappe.get_hooks("clear_cache"):
get_attr(fn)()
frappe.local.role_permissions = {}
def get_roles(username=None):
if not local.session:
return ["Guest"]
return get_user(username).get_roles()
def get_user(username):
from frappe.utils.user import User
if not username or username == local.session.user:
return local.user
else:
return User(username)
def has_permission(doctype, ptype="read", doc=None, user=None):
import frappe.permissions
return frappe.permissions.has_permission(doctype, ptype, doc, user=user)
def is_table(doctype):
tables = cache().get_value("is_table")
if tables==None:
tables = db.sql_list("select name from tabDocType where ifnull(istable,0)=1")
cache().set_value("is_table", tables)
return doctype in tables
def clear_perms(doctype):
db.sql("""delete from tabDocPerm where parent=%s""", doctype)
def reset_perms(doctype):
from frappe.core.doctype.notification_count.notification_count import delete_notification_count_for
delete_notification_count_for(doctype)
clear_perms(doctype)
reload_doc(db.get_value("DocType", doctype, "module"),
"DocType", doctype, force=True)
def generate_hash(txt=None):
"""Generates random hash for session id"""
import hashlib, time
from .utils import random_string
return hashlib.sha224((txt or "") + repr(time.time()) + repr(random_string(8))).hexdigest()
def reset_metadata_version():
v = generate_hash()
cache().set_value("metadata_version", v)
return v
def new_doc(doctype, parent_doc=None, parentfield=None):
from frappe.model.create_new import get_new_doc
return get_new_doc(doctype, parent_doc, parentfield)
def set_value(doctype, docname, fieldname, value):
import frappe.client
return frappe.client.set_value(doctype, docname, fieldname, value)
def get_doc(arg1, arg2=None):
import frappe.model.document
return frappe.model.document.get_doc(arg1, arg2)
def get_meta(doctype, cached=True):
import frappe.model.meta
return frappe.model.meta.get_meta(doctype, cached=cached)
def delete_doc(doctype=None, name=None, force=0, ignore_doctypes=None, for_reload=False, ignore_permissions=False):
import frappe.model.delete_doc
frappe.model.delete_doc.delete_doc(doctype, name, force, ignore_doctypes, for_reload, ignore_permissions)
def delete_doc_if_exists(doctype, name):
if db.exists(doctype, name):
delete_doc(doctype, name)
def reload_doc(module, dt=None, dn=None, force=False):
import frappe.modules
return frappe.modules.reload_doc(module, dt, dn, force=force)
def rename_doc(doctype, old, new, debug=0, force=False, merge=False, ignore_permissions=False):
from frappe.model.rename_doc import rename_doc
return rename_doc(doctype, old, new, force=force, merge=merge, ignore_permissions=ignore_permissions)
def insert(doclist):
import frappe.model
return frappe.model.insert(doclist)
def get_module(modulename):
return importlib.import_module(modulename)
def scrub(txt):
return txt.replace(' ','_').replace('-', '_').lower()
def unscrub(txt):
return txt.replace('_',' ').replace('-', ' ').title()
def get_module_path(module, *joins):
module = scrub(module)
return get_pymodule_path(local.module_app[module] + "." + module, *joins)
def get_app_path(app_name, *joins):
return get_pymodule_path(app_name, *joins)
def get_site_path(*joins):
return os.path.join(local.site_path, *joins)
def get_pymodule_path(modulename, *joins):
joins = [scrub(part) for part in joins]
return os.path.join(os.path.dirname(get_module(scrub(modulename)).__file__), *joins)
def get_module_list(app_name):
return get_file_items(os.path.join(os.path.dirname(get_module(app_name).__file__), "modules.txt"))
def get_all_apps(with_frappe=False, with_internal_apps=True, sites_path=None):
if not sites_path:
sites_path = local.sites_path
apps = get_file_items(os.path.join(sites_path, "apps.txt"), raise_not_found=True)
if with_internal_apps:
apps.extend(get_file_items(os.path.join(local.site_path, "apps.txt")))
if with_frappe:
apps.insert(0, 'frappe')
return apps
def get_installed_apps():
if getattr(flags, "in_install_db", True):
return []
installed = json.loads(db.get_global("installed_apps") or "[]")
return installed
@whitelist()
def get_versions():
versions = {}
for app in get_installed_apps():
versions[app] = {
"title": get_hooks("app_title", app_name=app),
"description": get_hooks("app_description", app_name=app)
}
try:
versions[app]["version"] = get_attr(app + ".__version__")
except AttributeError:
versions[app]["version"] = '0.0.1'
return versions
def get_hooks(hook=None, default=None, app_name=None):
def load_app_hooks(app_name=None):
hooks = {}
for app in [app_name] if app_name else get_installed_apps():
app = "frappe" if app=="webnotes" else app
app_hooks = get_module(app + ".hooks")
for key in dir(app_hooks):
if not key.startswith("_"):
append_hook(hooks, key, getattr(app_hooks, key))
return hooks
def append_hook(target, key, value):
if isinstance(value, dict):
target.setdefault(key, {})
for inkey in value:
append_hook(target[key], inkey, value[inkey])
else:
append_to_list(target, key, value)
def append_to_list(target, key, value):
target.setdefault(key, [])
if not isinstance(value, list):
value = [value]
target[key].extend(value)
if app_name:
hooks = _dict(load_app_hooks(app_name))
else:
hooks = _dict(cache().get_value("app_hooks", load_app_hooks))
if hook:
return hooks.get(hook) or (default if default is not None else [])
else:
return hooks
def setup_module_map():
_cache = cache()
if conf.db_name:
local.app_modules = _cache.get_value("app_modules")
local.module_app = _cache.get_value("module_app")
if not local.app_modules:
local.module_app, local.app_modules = {}, {}
for app in get_all_apps(True):
if app=="webnotes": app="frappe"
local.app_modules.setdefault(app, [])
for module in get_module_list(app):
module = scrub(module)
local.module_app[module] = app
local.app_modules[app].append(module)
if conf.db_name:
_cache.set_value("app_modules", local.app_modules)
_cache.set_value("module_app", local.module_app)
def get_file_items(path, raise_not_found=False, ignore_empty_lines=True):
import frappe.utils
content = read_file(path, raise_not_found=raise_not_found)
if content:
content = frappe.utils.strip(content)
return [p.strip() for p in content.splitlines() if (not ignore_empty_lines) or (p.strip() and not p.startswith("#"))]
else:
return []
def get_file_json(path):
with open(path, 'r') as f:
return json.load(f)
def read_file(path, raise_not_found=False):
from frappe.utils import cstr
if os.path.exists(path):
with open(path, "r") as f:
return cstr(f.read())
elif raise_not_found:
raise IOError("{} Not Found".format(path))
else:
return None
def get_attr(method_string):
modulename = '.'.join(method_string.split('.')[:-1])
methodname = method_string.split('.')[-1]
return getattr(get_module(modulename), methodname)
def call(fn, *args, **kwargs):
if hasattr(fn, 'fnargs'):
fnargs = fn.fnargs
else:
fnargs, varargs, varkw, defaults = inspect.getargspec(fn)
newargs = {}
for a in fnargs:
if a in kwargs:
newargs[a] = kwargs.get(a)
return fn(*args, **newargs)
def make_property_setter(args, ignore_validate=False, validate_fields_for_doctype=True):
args = _dict(args)
ps = get_doc({
'doctype': "Property Setter",
'doctype_or_field': args.doctype_or_field or "DocField",
'doc_type': args.doctype,
'field_name': args.fieldname,
'property': args.property,
'value': args.value,
'property_type': args.property_type or "Data",
'__islocal': 1
})
ps.ignore_validate = ignore_validate
ps.validate_fields_for_doctype = validate_fields_for_doctype
ps.insert()
def import_doc(path, ignore_links=False, ignore_insert=False, insert=False):
from frappe.core.page.data_import_tool import data_import_tool
data_import_tool.import_doc(path, ignore_links=ignore_links, ignore_insert=ignore_insert, insert=insert)
def copy_doc(doc, ignore_no_copy=True):
""" No_copy fields also get copied."""
import copy
def remove_no_copy_fields(d):
for df in d.meta.get("fields", {"no_copy": 1}):
if hasattr(d, df.fieldname):
d.set(df.fieldname, None)
if not isinstance(doc, dict):
d = doc.as_dict()
else:
d = doc
newdoc = get_doc(copy.deepcopy(d))
newdoc.name = None
newdoc.set("__islocal", 1)
newdoc.owner = None
newdoc.creation = None
newdoc.amended_from = None
newdoc.amendment_date = None
if not ignore_no_copy:
remove_no_copy_fields(newdoc)
for d in newdoc.get_all_children():
d.name = None
d.parent = None
d.set("__islocal", 1)
d.owner = None
d.creation = None
if not ignore_no_copy:
remove_no_copy_fields(d)
return newdoc
def compare(val1, condition, val2):
import frappe.utils
return frappe.utils.compare(val1, condition, val2)
def respond_as_web_page(title, html, success=None, http_status_code=None):
local.message_title = title
local.message = html
local.message_success = success
local.response['type'] = 'page'
local.response['page_name'] = 'message'
if http_status_code:
local.response['http_status_code'] = http_status_code
def build_match_conditions(doctype, as_condition=True):
import frappe.widgets.reportview
return frappe.widgets.reportview.build_match_conditions(doctype, as_condition)
def get_list(doctype, filters=None, fields=None, or_filters=None, docstatus=None,
group_by=None, order_by=None, limit_start=0, limit_page_length=None,
as_list=False, debug=False, ignore_permissions=False, user=None):
import frappe.model.db_query
return frappe.model.db_query.DatabaseQuery(doctype).execute(filters=filters,
fields=fields, docstatus=docstatus, or_filters=or_filters,
group_by=group_by, order_by=order_by, limit_start=limit_start,
limit_page_length=limit_page_length, as_list=as_list, debug=debug,
ignore_permissions=ignore_permissions, user=user)
def get_all(doctype, **args):
args["ignore_permissions"] = True
return get_list(doctype, **args)
run_query = get_list
def add_version(doc):
get_doc({
"doctype": "Version",
"ref_doctype": doc.doctype,
"docname": doc.name,
"doclist_json": json.dumps(doc.as_dict(), indent=1, sort_keys=True)
}).insert(ignore_permissions=True)
def get_test_records(doctype):
from frappe.modules import get_doctype_module, get_module_path
path = os.path.join(get_module_path(get_doctype_module(doctype)), "doctype", scrub(doctype), "test_records.json")
if os.path.exists(path):
with open(path, "r") as f:
return json.loads(f.read())
else:
return []
def format_value(value, df, doc=None, currency=None):
import frappe.utils.formatters
return frappe.utils.formatters.format_value(value, df, doc, currency=currency)
def get_print_format(doctype, name, print_format=None, style=None, as_pdf=False):
from frappe.website.render import build_page
from frappe.utils.pdf import get_pdf
local.form_dict.doctype = doctype
local.form_dict.name = name
local.form_dict.format = print_format
local.form_dict.style = style
html = build_page("print")
if as_pdf:
return get_pdf(html)
else:
return html
def attach_print(doctype, name, file_name):
from frappe.utils import scrub_urls
print_settings = db.get_singles_dict("Print Settings")
if int(print_settings.send_print_as_pdf or 0):
return {
"fname": file_name + ".pdf",
"fcontent": get_print_format(doctype, name, as_pdf=True)
}
else:
return {
"fname": file_name + ".html",
"fcontent": scrub_urls(get_print_format(doctype, name)).encode("utf-8")
}
logging_setup_complete = False
def get_logger(module=None):
from frappe.setup_logging import setup_logging
global logging_setup_complete
if not logging_setup_complete:
setup_logging()
logging_setup_complete = True
return logging.getLogger(module or "frappe")
| gpl-2.0 | -6,950,182,057,607,951,000 | 27.039531 | 176 | 0.710041 | false |
2013Commons/HUE-SHARK | build/env/lib/python2.7/site-packages/Pygments-1.3.1-py2.7.egg/pygments/lexer.py | 58 | 22989 | # -*- coding: utf-8 -*-
"""
pygments.lexer
~~~~~~~~~~~~~~
Base lexer classes.
:copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.filter import apply_filters, Filter
from pygments.filters import get_filter_by_name
from pygments.token import Error, Text, Other, _TokenType
from pygments.util import get_bool_opt, get_int_opt, get_list_opt, \
make_analysator
__all__ = ['Lexer', 'RegexLexer', 'ExtendedRegexLexer', 'DelegatingLexer',
'LexerContext', 'include', 'bygroups', 'using', 'this']
_default_analyse = staticmethod(lambda x: 0.0)
class LexerMeta(type):
"""
This metaclass automagically converts ``analyse_text`` methods into
static methods which always return float values.
"""
def __new__(cls, name, bases, d):
if 'analyse_text' in d:
d['analyse_text'] = make_analysator(d['analyse_text'])
return type.__new__(cls, name, bases, d)
class Lexer(object):
"""
Lexer for a specific language.
Basic options recognized:
``stripnl``
Strip leading and trailing newlines from the input (default: True).
``stripall``
Strip all leading and trailing whitespace from the input
(default: False).
``ensurenl``
Make sure that the input ends with a newline (default: True). This
is required for some lexers that consume input linewise.
*New in Pygments 1.3.*
``tabsize``
If given and greater than 0, expand tabs in the input (default: 0).
``encoding``
If given, must be an encoding name. This encoding will be used to
convert the input string to Unicode, if it is not already a Unicode
string (default: ``'latin1'``).
Can also be ``'guess'`` to use a simple UTF-8 / Latin1 detection, or
``'chardet'`` to use the chardet library, if it is installed.
"""
#: Name of the lexer
name = None
#: Shortcuts for the lexer
aliases = []
#: fn match rules
filenames = []
#: fn alias filenames
alias_filenames = []
#: mime types
mimetypes = []
__metaclass__ = LexerMeta
def __init__(self, **options):
self.options = options
self.stripnl = get_bool_opt(options, 'stripnl', True)
self.stripall = get_bool_opt(options, 'stripall', False)
self.ensurenl = get_bool_opt(options, 'ensurenl', True)
self.tabsize = get_int_opt(options, 'tabsize', 0)
self.encoding = options.get('encoding', 'latin1')
# self.encoding = options.get('inencoding', None) or self.encoding
self.filters = []
for filter_ in get_list_opt(options, 'filters', ()):
self.add_filter(filter_)
def __repr__(self):
if self.options:
return '<pygments.lexers.%s with %r>' % (self.__class__.__name__,
self.options)
else:
return '<pygments.lexers.%s>' % self.__class__.__name__
def add_filter(self, filter_, **options):
"""
Add a new stream filter to this lexer.
"""
if not isinstance(filter_, Filter):
filter_ = get_filter_by_name(filter_, **options)
self.filters.append(filter_)
def analyse_text(text):
"""
Has to return a float between ``0`` and ``1`` that indicates
if a lexer wants to highlight this text. Used by ``guess_lexer``.
If this method returns ``0`` it won't highlight it in any case, if
it returns ``1`` highlighting with this lexer is guaranteed.
The `LexerMeta` metaclass automatically wraps this function so
that it works like a static method (no ``self`` or ``cls``
parameter) and the return value is automatically converted to
`float`. If the return value is an object that is boolean `False`
it's the same as if the return values was ``0.0``.
"""
def get_tokens(self, text, unfiltered=False):
"""
Return an iterable of (tokentype, value) pairs generated from
`text`. If `unfiltered` is set to `True`, the filtering mechanism
is bypassed even if filters are defined.
Also preprocess the text, i.e. expand tabs and strip it if
wanted and applies registered filters.
"""
if not isinstance(text, unicode):
if self.encoding == 'guess':
try:
text = text.decode('utf-8')
if text.startswith(u'\ufeff'):
text = text[len(u'\ufeff'):]
except UnicodeDecodeError:
text = text.decode('latin1')
elif self.encoding == 'chardet':
try:
import chardet
except ImportError:
raise ImportError('To enable chardet encoding guessing, '
'please install the chardet library '
'from http://chardet.feedparser.org/')
enc = chardet.detect(text)
text = text.decode(enc['encoding'])
else:
text = text.decode(self.encoding)
# text now *is* a unicode string
text = text.replace('\r\n', '\n')
text = text.replace('\r', '\n')
if self.stripall:
text = text.strip()
elif self.stripnl:
text = text.strip('\n')
if self.tabsize > 0:
text = text.expandtabs(self.tabsize)
if self.ensurenl and not text.endswith('\n'):
text += '\n'
def streamer():
for i, t, v in self.get_tokens_unprocessed(text):
yield t, v
stream = streamer()
if not unfiltered:
stream = apply_filters(stream, self.filters, self)
return stream
def get_tokens_unprocessed(self, text):
"""
Return an iterable of (tokentype, value) pairs.
In subclasses, implement this method as a generator to
maximize effectiveness.
"""
raise NotImplementedError
class DelegatingLexer(Lexer):
"""
This lexer takes two lexer as arguments. A root lexer and
a language lexer. First everything is scanned using the language
lexer, afterwards all ``Other`` tokens are lexed using the root
lexer.
The lexers from the ``template`` lexer package use this base lexer.
"""
def __init__(self, _root_lexer, _language_lexer, _needle=Other, **options):
self.root_lexer = _root_lexer(**options)
self.language_lexer = _language_lexer(**options)
self.needle = _needle
Lexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
buffered = ''
insertions = []
lng_buffer = []
for i, t, v in self.language_lexer.get_tokens_unprocessed(text):
if t is self.needle:
if lng_buffer:
insertions.append((len(buffered), lng_buffer))
lng_buffer = []
buffered += v
else:
lng_buffer.append((i, t, v))
if lng_buffer:
insertions.append((len(buffered), lng_buffer))
return do_insertions(insertions,
self.root_lexer.get_tokens_unprocessed(buffered))
#-------------------------------------------------------------------------------
# RegexLexer and ExtendedRegexLexer
#
class include(str):
"""
Indicates that a state should include rules from another state.
"""
pass
class combined(tuple):
"""
Indicates a state combined from multiple states.
"""
def __new__(cls, *args):
return tuple.__new__(cls, args)
def __init__(self, *args):
# tuple.__init__ doesn't do anything
pass
class _PseudoMatch(object):
"""
A pseudo match object constructed from a string.
"""
def __init__(self, start, text):
self._text = text
self._start = start
def start(self, arg=None):
return self._start
def end(self, arg=None):
return self._start + len(self._text)
def group(self, arg=None):
if arg:
raise IndexError('No such group')
return self._text
def groups(self):
return (self._text,)
def groupdict(self):
return {}
def bygroups(*args):
"""
Callback that yields multiple actions for each group in the match.
"""
def callback(lexer, match, ctx=None):
for i, action in enumerate(args):
if action is None:
continue
elif type(action) is _TokenType:
data = match.group(i + 1)
if data:
yield match.start(i + 1), action, data
else:
if ctx:
ctx.pos = match.start(i + 1)
for item in action(lexer, _PseudoMatch(match.start(i + 1),
match.group(i + 1)), ctx):
if item:
yield item
if ctx:
ctx.pos = match.end()
return callback
class _This(object):
"""
Special singleton used for indicating the caller class.
Used by ``using``.
"""
this = _This()
def using(_other, **kwargs):
"""
Callback that processes the match with a different lexer.
The keyword arguments are forwarded to the lexer, except `state` which
is handled separately.
`state` specifies the state that the new lexer will start in, and can
be an enumerable such as ('root', 'inline', 'string') or a simple
string which is assumed to be on top of the root state.
Note: For that to work, `_other` must not be an `ExtendedRegexLexer`.
"""
gt_kwargs = {}
if 'state' in kwargs:
s = kwargs.pop('state')
if isinstance(s, (list, tuple)):
gt_kwargs['stack'] = s
else:
gt_kwargs['stack'] = ('root', s)
if _other is this:
def callback(lexer, match, ctx=None):
# if keyword arguments are given the callback
# function has to create a new lexer instance
if kwargs:
# XXX: cache that somehow
kwargs.update(lexer.options)
lx = lexer.__class__(**kwargs)
else:
lx = lexer
s = match.start()
for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs):
yield i + s, t, v
if ctx:
ctx.pos = match.end()
else:
def callback(lexer, match, ctx=None):
# XXX: cache that somehow
kwargs.update(lexer.options)
lx = _other(**kwargs)
s = match.start()
for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs):
yield i + s, t, v
if ctx:
ctx.pos = match.end()
return callback
class RegexLexerMeta(LexerMeta):
"""
Metaclass for RegexLexer, creates the self._tokens attribute from
self.tokens on the first instantiation.
"""
def _process_state(cls, unprocessed, processed, state):
assert type(state) is str, "wrong state name %r" % state
assert state[0] != '#', "invalid state name %r" % state
if state in processed:
return processed[state]
tokens = processed[state] = []
rflags = cls.flags
for tdef in unprocessed[state]:
if isinstance(tdef, include):
# it's a state reference
assert tdef != state, "circular state reference %r" % state
tokens.extend(cls._process_state(unprocessed, processed, str(tdef)))
continue
assert type(tdef) is tuple, "wrong rule def %r" % tdef
try:
rex = re.compile(tdef[0], rflags).match
except Exception, err:
raise ValueError("uncompilable regex %r in state %r of %r: %s" %
(tdef[0], state, cls, err))
assert type(tdef[1]) is _TokenType or callable(tdef[1]), \
'token type must be simple type or callable, not %r' % (tdef[1],)
if len(tdef) == 2:
new_state = None
else:
tdef2 = tdef[2]
if isinstance(tdef2, str):
# an existing state
if tdef2 == '#pop':
new_state = -1
elif tdef2 in unprocessed:
new_state = (tdef2,)
elif tdef2 == '#push':
new_state = tdef2
elif tdef2[:5] == '#pop:':
new_state = -int(tdef2[5:])
else:
assert False, 'unknown new state %r' % tdef2
elif isinstance(tdef2, combined):
# combine a new state from existing ones
new_state = '_tmp_%d' % cls._tmpname
cls._tmpname += 1
itokens = []
for istate in tdef2:
assert istate != state, 'circular state ref %r' % istate
itokens.extend(cls._process_state(unprocessed,
processed, istate))
processed[new_state] = itokens
new_state = (new_state,)
elif isinstance(tdef2, tuple):
# push more than one state
for state in tdef2:
assert (state in unprocessed or
state in ('#pop', '#push')), \
'unknown new state ' + state
new_state = tdef2
else:
assert False, 'unknown new state def %r' % tdef2
tokens.append((rex, tdef[1], new_state))
return tokens
def process_tokendef(cls, name, tokendefs=None):
processed = cls._all_tokens[name] = {}
tokendefs = tokendefs or cls.tokens[name]
for state in tokendefs.keys():
cls._process_state(tokendefs, processed, state)
return processed
def __call__(cls, *args, **kwds):
if not hasattr(cls, '_tokens'):
cls._all_tokens = {}
cls._tmpname = 0
if hasattr(cls, 'token_variants') and cls.token_variants:
# don't process yet
pass
else:
cls._tokens = cls.process_tokendef('', cls.tokens)
return type.__call__(cls, *args, **kwds)
class RegexLexer(Lexer):
"""
Base for simple stateful regular expression-based lexers.
Simplifies the lexing process so that you need only
provide a list of states and regular expressions.
"""
__metaclass__ = RegexLexerMeta
#: Flags for compiling the regular expressions.
#: Defaults to MULTILINE.
flags = re.MULTILINE
#: Dict of ``{'state': [(regex, tokentype, new_state), ...], ...}``
#:
#: The initial state is 'root'.
#: ``new_state`` can be omitted to signify no state transition.
#: If it is a string, the state is pushed on the stack and changed.
#: If it is a tuple of strings, all states are pushed on the stack and
#: the current state will be the topmost.
#: It can also be ``combined('state1', 'state2', ...)``
#: to signify a new, anonymous state combined from the rules of two
#: or more existing ones.
#: Furthermore, it can be '#pop' to signify going back one step in
#: the state stack, or '#push' to push the current state on the stack
#: again.
#:
#: The tuple can also be replaced with ``include('state')``, in which
#: case the rules from the state named by the string are included in the
#: current one.
tokens = {}
def get_tokens_unprocessed(self, text, stack=('root',)):
"""
Split ``text`` into (tokentype, text) pairs.
``stack`` is the inital stack (default: ``['root']``)
"""
pos = 0
tokendefs = self._tokens
statestack = list(stack)
statetokens = tokendefs[statestack[-1]]
while 1:
for rexmatch, action, new_state in statetokens:
m = rexmatch(text, pos)
if m:
if type(action) is _TokenType:
yield pos, action, m.group()
else:
for item in action(self, m):
yield item
pos = m.end()
if new_state is not None:
# state transition
if isinstance(new_state, tuple):
for state in new_state:
if state == '#pop':
statestack.pop()
elif state == '#push':
statestack.append(statestack[-1])
else:
statestack.append(state)
elif isinstance(new_state, int):
# pop
del statestack[new_state:]
elif new_state == '#push':
statestack.append(statestack[-1])
else:
assert False, "wrong state def: %r" % new_state
statetokens = tokendefs[statestack[-1]]
break
else:
try:
if text[pos] == '\n':
# at EOL, reset state to "root"
pos += 1
statestack = ['root']
statetokens = tokendefs['root']
yield pos, Text, u'\n'
continue
yield pos, Error, text[pos]
pos += 1
except IndexError:
break
class LexerContext(object):
"""
A helper object that holds lexer position data.
"""
def __init__(self, text, pos, stack=None, end=None):
self.text = text
self.pos = pos
self.end = end or len(text) # end=0 not supported ;-)
self.stack = stack or ['root']
def __repr__(self):
return 'LexerContext(%r, %r, %r)' % (
self.text, self.pos, self.stack)
class ExtendedRegexLexer(RegexLexer):
"""
A RegexLexer that uses a context object to store its state.
"""
def get_tokens_unprocessed(self, text=None, context=None):
"""
Split ``text`` into (tokentype, text) pairs.
If ``context`` is given, use this lexer context instead.
"""
tokendefs = self._tokens
if not context:
ctx = LexerContext(text, 0)
statetokens = tokendefs['root']
else:
ctx = context
statetokens = tokendefs[ctx.stack[-1]]
text = ctx.text
while 1:
for rexmatch, action, new_state in statetokens:
m = rexmatch(text, ctx.pos, ctx.end)
if m:
if type(action) is _TokenType:
yield ctx.pos, action, m.group()
ctx.pos = m.end()
else:
for item in action(self, m, ctx):
yield item
if not new_state:
# altered the state stack?
statetokens = tokendefs[ctx.stack[-1]]
# CAUTION: callback must set ctx.pos!
if new_state is not None:
# state transition
if isinstance(new_state, tuple):
ctx.stack.extend(new_state)
elif isinstance(new_state, int):
# pop
del ctx.stack[new_state:]
elif new_state == '#push':
ctx.stack.append(ctx.stack[-1])
else:
assert False, "wrong state def: %r" % new_state
statetokens = tokendefs[ctx.stack[-1]]
break
else:
try:
if ctx.pos >= ctx.end:
break
if text[ctx.pos] == '\n':
# at EOL, reset state to "root"
ctx.pos += 1
ctx.stack = ['root']
statetokens = tokendefs['root']
yield ctx.pos, Text, u'\n'
continue
yield ctx.pos, Error, text[ctx.pos]
ctx.pos += 1
except IndexError:
break
def do_insertions(insertions, tokens):
"""
Helper for lexers which must combine the results of several
sublexers.
``insertions`` is a list of ``(index, itokens)`` pairs.
Each ``itokens`` iterable should be inserted at position
``index`` into the token stream given by the ``tokens``
argument.
The result is a combined token stream.
TODO: clean up the code here.
"""
insertions = iter(insertions)
try:
index, itokens = insertions.next()
except StopIteration:
# no insertions
for item in tokens:
yield item
return
realpos = None
insleft = True
# iterate over the token stream where we want to insert
# the tokens from the insertion list.
for i, t, v in tokens:
# first iteration. store the postition of first item
if realpos is None:
realpos = i
oldi = 0
while insleft and i + len(v) >= index:
tmpval = v[oldi:index - i]
yield realpos, t, tmpval
realpos += len(tmpval)
for it_index, it_token, it_value in itokens:
yield realpos, it_token, it_value
realpos += len(it_value)
oldi = index - i
try:
index, itokens = insertions.next()
except StopIteration:
insleft = False
break # not strictly necessary
yield realpos, t, v[oldi:]
realpos += len(v) - oldi
# leftover tokens
while insleft:
# no normal tokens, set realpos to zero
realpos = realpos or 0
for p, t, v in itokens:
yield realpos, t, v
realpos += len(v)
try:
index, itokens = insertions.next()
except StopIteration:
insleft = False
break # not strictly necessary
| apache-2.0 | -986,447,255,852,400,400 | 33.831818 | 84 | 0.510723 | false |
derekjchow/models | official/keras_application_models/benchmark_main.py | 1 | 8439 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmark on the keras built-in application models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=g-bad-import-order
import numpy as np
from absl import app as absl_app
from absl import flags
import tensorflow as tf
# pylint: enable=g-bad-import-order
from official.keras_application_models import dataset
from official.keras_application_models import model_callbacks
from official.utils.flags import core as flags_core
from official.utils.logs import logger
from official.utils.misc import distribution_utils
# Define a dictionary that maps model names to their model classes inside Keras
MODELS = {
"vgg16": tf.keras.applications.VGG16,
"vgg19": tf.keras.applications.VGG19,
"inceptionv3": tf.keras.applications.InceptionV3,
"xception": tf.keras.applications.Xception,
"resnet50": tf.keras.applications.ResNet50,
"inceptionresnetv2": tf.keras.applications.InceptionResNetV2,
"mobilenet": tf.keras.applications.MobileNet,
"densenet121": tf.keras.applications.DenseNet121,
"densenet169": tf.keras.applications.DenseNet169,
"densenet201": tf.keras.applications.DenseNet201,
"nasnetlarge": tf.keras.applications.NASNetLarge,
"nasnetmobile": tf.keras.applications.NASNetMobile,
}
def run_keras_model_benchmark(_):
"""Run the benchmark on keras model."""
# Ensure a valid model name was supplied via command line argument
if FLAGS.model not in MODELS.keys():
raise AssertionError("The --model command line argument should "
"be a key in the `MODELS` dictionary.")
# Check if eager execution is enabled
if FLAGS.eager:
tf.logging.info("Eager execution is enabled...")
tf.enable_eager_execution()
# Load the model
tf.logging.info("Benchmark on {} model...".format(FLAGS.model))
keras_model = MODELS[FLAGS.model]
# Get dataset
dataset_name = "ImageNet"
if FLAGS.use_synthetic_data:
tf.logging.info("Using synthetic dataset...")
dataset_name += "_Synthetic"
train_dataset = dataset.generate_synthetic_input_dataset(
FLAGS.model, FLAGS.batch_size)
val_dataset = dataset.generate_synthetic_input_dataset(
FLAGS.model, FLAGS.batch_size)
model = keras_model(weights=None)
else:
tf.logging.info("Using CIFAR-10 dataset...")
dataset_name = "CIFAR-10"
ds = dataset.Cifar10Dataset(FLAGS.batch_size)
train_dataset = ds.train_dataset
val_dataset = ds.test_dataset
model = keras_model(
weights=None, input_shape=ds.input_shape, classes=ds.num_classes)
num_gpus = flags_core.get_num_gpus(FLAGS)
distribution = None
# Use distribution strategy
if FLAGS.dist_strat:
distribution = distribution_utils.get_distribution_strategy(
distribution_strategy=FLAGS.distribution_strategy,
num_gpus=num_gpus)
elif num_gpus > 1:
# Run with multi_gpu_model
# If eager execution is enabled, only one GPU is utilized even if multiple
# GPUs are provided.
if FLAGS.eager:
tf.logging.warning(
"{} GPUs are provided, but only one GPU is utilized as "
"eager execution is enabled.".format(num_gpus))
model = tf.keras.utils.multi_gpu_model(model, gpus=num_gpus)
# Adam optimizer and some other optimizers doesn't work well with
# distribution strategy (b/113076709)
# Use GradientDescentOptimizer here
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
model.compile(loss="categorical_crossentropy",
optimizer=optimizer,
metrics=["accuracy"],
distribute=distribution)
# Create benchmark logger for benchmark logging
run_params = {
"batch_size": FLAGS.batch_size,
"synthetic_data": FLAGS.use_synthetic_data,
"train_epochs": FLAGS.train_epochs,
"num_train_images": FLAGS.num_train_images,
"num_eval_images": FLAGS.num_eval_images,
}
benchmark_logger = logger.get_benchmark_logger()
benchmark_logger.log_run_info(
model_name=FLAGS.model,
dataset_name=dataset_name,
run_params=run_params,
test_id=FLAGS.benchmark_test_id)
# Create callbacks that log metric values about the training and evaluation
callbacks = model_callbacks.get_model_callbacks(
FLAGS.callbacks,
batch_size=FLAGS.batch_size,
metric_logger=benchmark_logger)
# Train and evaluate the model
history = model.fit(
train_dataset,
epochs=FLAGS.train_epochs,
callbacks=callbacks,
validation_data=val_dataset,
steps_per_epoch=int(np.ceil(FLAGS.num_train_images / FLAGS.batch_size)),
validation_steps=int(np.ceil(FLAGS.num_eval_images / FLAGS.batch_size))
)
tf.logging.info("Logging the evaluation results...")
for epoch in range(FLAGS.train_epochs):
eval_results = {
"accuracy": history.history["val_acc"][epoch],
"loss": history.history["val_loss"][epoch],
tf.GraphKeys.GLOBAL_STEP: (epoch + 1) * np.ceil(
FLAGS.num_eval_images/FLAGS.batch_size)
}
benchmark_logger.log_evaluation_result(eval_results)
# Clear the session explicitly to avoid session delete error
tf.keras.backend.clear_session()
def define_keras_benchmark_flags():
"""Add flags for keras built-in application models."""
flags_core.define_base(hooks=False)
flags_core.define_performance()
flags_core.define_image()
flags_core.define_benchmark()
flags.adopt_module_key_flags(flags_core)
flags_core.set_defaults(
data_format="channels_last",
use_synthetic_data=True,
batch_size=32,
train_epochs=2)
flags.DEFINE_enum(
name="model", default=None,
enum_values=MODELS.keys(), case_sensitive=False,
help=flags_core.help_wrap(
"Model to be benchmarked."))
flags.DEFINE_integer(
name="num_train_images", default=1000,
help=flags_core.help_wrap(
"The number of synthetic images for training. The default value is "
"1000."))
flags.DEFINE_integer(
name="num_eval_images", default=50,
help=flags_core.help_wrap(
"The number of synthetic images for evaluation. The default value is "
"50."))
flags.DEFINE_boolean(
name="eager", default=False, help=flags_core.help_wrap(
"To enable eager execution. Note that if eager execution is enabled, "
"only one GPU is utilized even if multiple GPUs are provided and "
"multi_gpu_model is used."))
flags.DEFINE_boolean(
name="dist_strat", default=False, help=flags_core.help_wrap(
"To enable distribution strategy for model training and evaluation. "
"Number of GPUs used for distribution strategy can be set by the "
"argument --num_gpus."))
flags.DEFINE_list(
name="callbacks",
default=["ExamplesPerSecondCallback", "LoggingMetricCallback"],
help=flags_core.help_wrap(
"A list of (case insensitive) strings to specify the names of "
"callbacks. For example: `--callbacks ExamplesPerSecondCallback,"
"LoggingMetricCallback`"))
@flags.multi_flags_validator(
["eager", "dist_strat"],
message="Both --eager and --dist_strat were set. Only one can be "
"defined, as DistributionStrategy is not supported in Eager "
"execution currently.")
# pylint: disable=unused-variable
def _check_eager_dist_strat(flag_dict):
return not(flag_dict["eager"] and flag_dict["dist_strat"])
def main(_):
with logger.benchmark_context(FLAGS):
run_keras_model_benchmark(FLAGS)
if __name__ == "__main__":
tf.logging.set_verbosity(tf.logging.INFO)
define_keras_benchmark_flags()
FLAGS = flags.FLAGS
absl_app.run(main)
| apache-2.0 | 5,247,751,943,607,803,000 | 36.013158 | 80 | 0.688352 | false |
RevolutionTech/hummingbird | hummingbird_django/settings.py | 1 | 2852 | """
Django settings for hummingbird_django project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ujdo90h)l_xw$^6rj@tnmjg+^0e6zfs1cs$!vnlngjqibpg1$q'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'hummingbird',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'hummingbird_django.urls'
WSGI_APPLICATION = 'hummingbird_django.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
MEDIA_URL = '/'
MEDIA_ROOT = os.path.join(BASE_DIR)
TEMPLATE_DIRS = (
os.path.join(BASE_DIR,'templates'),
)
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Los_Angeles'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
# Main Settings
PLAY_UNKNOWNS = False
# Time Settings
import datetime
TIME_RESET_TIME = datetime.time(hour=4, minute=0) # 4:00am
TIME_WAIT_TO_PLAY = 60 * 5 # 5 minutes
TIME_DELAY_TO_PLAY_SONG = 3
#time_check_queue = 0.25
TIME_CHECK_QUEUE = 1
TIME_INPUT_TIMEOUT = 30
TIME_MAX_SONG_LENGTH = 20
TIME_FADEOUT_SONG = 3000 # in milliseconds
# User Files
DATA_FILE = "songs.csv"
AUDIO_DIR = "audio/"
RANDOM_SUBDIR = "random/"
SOUND_SUBDIR = "sound/"
TCPDUMP_DID_NOT_MATCH_LOG = "tcpdump_dnm.log"
# Data Settings
DO_NOT_PLAY = "DNP"
NEED_TO_ASSIGN = "NTA"
UNKNOWN_USER_PREFIX = "Unknown #"
UNKNOWN_USER_SUFFIX_LENGTH = 5
| isc | 1,061,643,904,173,831,800 | 22.377049 | 71 | 0.719495 | false |
mlassnig/pilot | NordugridATLASSiteInformation.py | 3 | 1933 | # Class definition:
# NordugridATLASSiteInformation
# This class is the Nordugrid-ATLAS site information class inheriting from ATLASSiteInformation
# Instances are generated with SiteInformationFactory via pUtil::getSiteInformation()
# Implemented as a singleton class
# http://stackoverflow.com/questions/42558/python-and-the-singleton-pattern
# import relevant python/pilot modules
import os
import commands
import SiteMover
from SiteInformation import SiteInformation # Main site information class
from ATLASSiteInformation import ATLASSiteInformation # Main site information class
from pUtil import tolog # Logging method that sends text to the pilot log
from pUtil import readpar # Used to read values from the schedconfig DB (queuedata)
from PilotErrors import PilotErrors # Error codes
class NordugridATLASSiteInformation(ATLASSiteInformation):
# private data members
__experiment = "Nordugrid-ATLAS"
__instance = None
# Required methods
def __init__(self):
""" Default initialization """
pass
def __new__(cls, *args, **kwargs):
""" Override the __new__ method to make the class a singleton """
if not cls.__instance:
cls.__instance = super(ATLASSiteInformation, cls).__new__(cls, *args, **kwargs)
return cls.__instance
def getExperiment(self):
""" Return a string with the experiment name """
return self.__experiment
if __name__ == "__main__":
os.environ['PilotHomeDir'] = os.getcwd()
si = NordugridATLASSiteInformation()
tolog("Experiment: %s" % (si.getExperiment()))
cloud = "CERN"
queuename = si.getTier1Queue(cloud)
if queuename != "":
tolog("Cloud %s has Tier-1 queue %s" % (cloud, queuename))
else:
tolog("Failed to find a Tier-1 queue name for cloud %s" % (cloud))
| apache-2.0 | -4,017,108,199,240,480,000 | 32.327586 | 112 | 0.665804 | false |
Chuban/moose | python/peacock/tests/postprocessor_tab/test_AxesSettingsPlugin.py | 6 | 4831 | #!/usr/bin/env python
import sys
import unittest
from PyQt5 import QtCore, QtWidgets
from peacock.PostprocessorViewer.plugins.AxesSettingsPlugin import main
from peacock.utils import Testing
class TestAxesSettingsPlugin(Testing.PeacockImageTestCase):
"""
Test class for the ArtistToggleWidget which toggles postprocessor lines.
"""
#: QApplication: The main App for QT, this must be static to work correctly.
qapp = QtWidgets.QApplication(sys.argv)
def setUp(self):
"""
Creates the GUI.
"""
self._control, self._widget, self._window = main(['../input/white_elephant_jan_2016.csv'])
def plot(self):
# Create plot
select = self._widget.currentWidget().PostprocessorSelectPlugin
var = 'air_temp_set_1'
select._groups[0]._toggles[var].CheckBox.setCheckState(QtCore.Qt.Checked)
select._groups[0]._toggles[var].CheckBox.clicked.emit(True)
var = 'snow_depth_set_1'
select._groups[0]._toggles[var].CheckBox.setCheckState(QtCore.Qt.Checked)
select._groups[0]._toggles[var].PlotAxis.setCurrentIndex(1)
select._groups[0]._toggles[var].CheckBox.clicked.emit(True)
def testEmpty(self):
"""
Test that an empty plot is possible.
"""
ax0, ax1 = self._window.axes()
self.assertEqual(ax0.get_xlabel(), '')
self.assertEqual(ax0.get_ylabel(), '')
self.assertEqual(ax1.get_ylabel(), '')
self.assertFalse(self._control.LegendLocation.isEnabled())
self.assertFalse(self._control.Legend2Location.isEnabled())
self.assertImage('testEmpty.png')
def testTitle(self):
"""
Test that a title may be added.
"""
self._control.Title.setText("Testing Title")
self._control.Title.editingFinished.emit()
ax0, ax1 = self._window.axes()
self.assertEqual(ax0.get_title(), "Testing Title")
def testLegend(self):
"""
Test that legend toggle operates.
"""
self.plot()
self._control.Legend.setCheckState(QtCore.Qt.Checked)
self._control.Legend.clicked.emit(True)
self._control.Legend2.setCheckState(QtCore.Qt.Checked)
self._control.Legend2.clicked.emit(True)
self.assertTrue(self._control.LegendLocation.isEnabled())
self.assertTrue(self._control.Legend2Location.isEnabled())
self.assertImage('testLegend.png')
def testLegendLocation(self):
"""
Test legend location selection.
"""
self.plot()
self._control.Legend.setCheckState(QtCore.Qt.Checked)
self._control.Legend.clicked.emit(True)
self._control.LegendLocation.setCurrentIndex(3)
self._control.Legend2.setCheckState(QtCore.Qt.Checked)
self._control.Legend2.clicked.emit(True)
self._control.Legend2Location.setCurrentIndex(1)
self.assertImage('testLegendLocation.png')
def testEmptyLegend(self):
"""
Test that a legend created with no data produces warning.
"""
# Enable legends
self.plot()
self._control.Legend.setCheckState(QtCore.Qt.Checked)
self._control.Legend.clicked.emit(True)
self._control.Legend2.setCheckState(QtCore.Qt.Checked)
self._control.Legend2.clicked.emit(True)
# Remove lines
select = self._widget.currentWidget().PostprocessorSelectPlugin
var = 'air_temp_set_1'
select._groups[0]._toggles[var].CheckBox.setCheckState(QtCore.Qt.Unchecked)
select._groups[0]._toggles[var].CheckBox.clicked.emit(False)
var = 'snow_depth_set_1'
select._groups[0]._toggles[var].CheckBox.setCheckState(QtCore.Qt.Unchecked)
select._groups[0]._toggles[var].PlotAxis.setCurrentIndex(1)
select._groups[0]._toggles[var].CheckBox.clicked.emit(False)
self.assertImage('testEmpty.png')
def testRepr(self):
"""
Test python scripting.
"""
self.plot()
output, imports = self._control.repr()
self.assertNotIn("axes0.legend(loc='best')", output)
self.assertNotIn("axes1.legend(loc='best')", output)
self.assertNotIn("axes0.set_title('Title')", output)
self._control.Title.setText("Title")
self._control.Legend.setCheckState(QtCore.Qt.Checked)
self._control.Legend.clicked.emit(True)
self._control.Legend2.setCheckState(QtCore.Qt.Checked)
self._control.Legend2.clicked.emit(True)
output, imports = self._control.repr()
self.assertIn("axes0.legend(loc='best')", output)
self.assertIn("axes1.legend(loc='best')", output)
self.assertIn("axes0.set_title('Title')", output)
if __name__ == '__main__':
unittest.main(module=__name__, verbosity=2, buffer=True)
| lgpl-2.1 | -3,835,865,617,230,231,000 | 35.323308 | 98 | 0.645415 | false |
denys-duchier/django | tests/gis_tests/layermap/models.py | 21 | 2409 | from django.contrib.gis.db import models
class NamedModel(models.Model):
name = models.CharField(max_length=25)
class Meta:
abstract = True
required_db_features = ['gis_enabled']
def __str__(self):
return self.name
class State(NamedModel):
pass
class County(NamedModel):
state = models.ForeignKey(State, models.CASCADE)
mpoly = models.MultiPolygonField(srid=4269) # Multipolygon in NAD83
class CountyFeat(NamedModel):
poly = models.PolygonField(srid=4269)
class City(NamedModel):
name_txt = models.TextField(default='')
name_short = models.CharField(max_length=5)
population = models.IntegerField()
density = models.DecimalField(max_digits=7, decimal_places=1)
dt = models.DateField()
point = models.PointField()
class Meta:
app_label = 'layermap'
required_db_features = ['gis_enabled']
class Interstate(NamedModel):
length = models.DecimalField(max_digits=6, decimal_places=2)
path = models.LineStringField()
class Meta:
app_label = 'layermap'
required_db_features = ['gis_enabled']
# Same as `City` above, but for testing model inheritance.
class CityBase(NamedModel):
population = models.IntegerField()
density = models.DecimalField(max_digits=7, decimal_places=1)
point = models.PointField()
class ICity1(CityBase):
dt = models.DateField()
class Meta(CityBase.Meta):
pass
class ICity2(ICity1):
dt_time = models.DateTimeField(auto_now=True)
class Meta(ICity1.Meta):
pass
class Invalid(models.Model):
point = models.PointField()
class Meta:
required_db_features = ['gis_enabled']
# Mapping dictionaries for the models above.
co_mapping = {
'name': 'Name',
# ForeignKey's use another mapping dictionary for the _related_ Model (State in this case).
'state': {'name': 'State'},
'mpoly': 'MULTIPOLYGON', # Will convert POLYGON features into MULTIPOLYGONS.
}
cofeat_mapping = {'name': 'Name',
'poly': 'POLYGON',
}
city_mapping = {'name': 'Name',
'population': 'Population',
'density': 'Density',
'dt': 'Created',
'point': 'POINT',
}
inter_mapping = {'name': 'Name',
'length': 'Length',
'path': 'LINESTRING',
}
| bsd-3-clause | -7,026,902,760,400,785,000 | 23.09 | 95 | 0.618099 | false |
Addepar/buck | python-dsl/buck_parser/glob_internal.py | 5 | 2442 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Glob implementation in python."""
from .util import is_special
def path_component_starts_with_dot(relative_path):
for p in relative_path.parts:
if p.startswith("."):
return True
return False
def glob_internal(
includes,
excludes,
project_root_relative_excludes,
include_dotfiles,
search_base,
project_root,
):
def includes_iterator():
for pattern in includes:
for path in search_base.glob(pattern):
# TODO(beng): Handle hidden files on Windows.
if path.is_file() and (
include_dotfiles
or not path_component_starts_with_dot(path.relative_to(search_base))
):
yield path
non_special_excludes = set()
match_excludes = set()
for pattern in excludes:
if is_special(pattern):
match_excludes.add(pattern)
else:
non_special_excludes.add(pattern)
def exclusion(path):
relative_to_search_base = path.relative_to(search_base)
if relative_to_search_base.as_posix() in non_special_excludes:
return True
for pattern in match_excludes:
result = relative_to_search_base.match(pattern, match_entire=True)
if result:
return True
relative_to_project_root = path.relative_to(project_root)
for pattern in project_root_relative_excludes:
result = relative_to_project_root.match(pattern, match_entire=True)
if result:
return True
return False
return sorted(
set(
[
str(p.relative_to(search_base))
for p in includes_iterator()
if not exclusion(p)
]
)
)
__all__ = [glob_internal]
| apache-2.0 | 6,282,425,279,018,637,000 | 29.911392 | 88 | 0.612203 | false |
flightcoin/flightcoin | contrib/pyminer/pyminer.py | 1257 | 6438 | #!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file license.txt or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8332
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
| mit | -6,198,909,457,326,139,000 | 24.547619 | 84 | 0.648959 | false |
HKUST-SING/tensorflow | tensorflow/python/ops/cloud/bigquery_reader_ops_test.py | 12 | 9585 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for BigQueryReader Op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import re
import threading
from six.moves import SimpleHTTPServer
from six.moves import socketserver
from tensorflow.core.example import example_pb2
from tensorflow.core.framework import types_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops.cloud import cloud
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
_PROJECT = "test-project"
_DATASET = "test-dataset"
_TABLE = "test-table"
# List representation of the test rows in the 'test-table' in BigQuery.
# The schema for each row is: [int64, string, float].
# The values for rows are generated such that some columns have null values. The
# general formula here is:
# - The int64 column is present in every row.
# - The string column is only avaiable in even rows.
# - The float column is only available in every third row.
_ROWS = [[0, "s_0", 0.1], [1, None, None], [2, "s_2", None], [3, None, 3.1],
[4, "s_4", None], [5, None, None], [6, "s_6", 6.1], [7, None, None],
[8, "s_8", None], [9, None, 9.1]]
# Schema for 'test-table'.
# The schema currently has three columns: int64, string, and float
_SCHEMA = {
"kind": "bigquery#table",
"id": "test-project:test-dataset.test-table",
"schema": {
"fields": [{
"name": "int64_col",
"type": "INTEGER",
"mode": "NULLABLE"
}, {
"name": "string_col",
"type": "STRING",
"mode": "NULLABLE"
}, {
"name": "float_col",
"type": "FLOAT",
"mode": "NULLABLE"
}]
}
}
def _ConvertRowToExampleProto(row):
"""Converts the input row to an Example proto.
Args:
row: Input Row instance.
Returns:
An Example proto initialized with row values.
"""
example = example_pb2.Example()
example.features.feature["int64_col"].int64_list.value.append(row[0])
if row[1] is not None:
example.features.feature["string_col"].bytes_list.value.append(
compat.as_bytes(row[1]))
if row[2] is not None:
example.features.feature["float_col"].float_list.value.append(row[2])
return example
class FakeBigQueryServer(threading.Thread):
"""Fake http server to return schema and data for sample table."""
def __init__(self, address, port):
"""Creates a FakeBigQueryServer.
Args:
address: Server address
port: Server port. Pass 0 to automatically pick an empty port.
"""
threading.Thread.__init__(self)
self.handler = BigQueryRequestHandler
self.httpd = socketserver.TCPServer((address, port), self.handler)
def run(self):
self.httpd.serve_forever()
def shutdown(self):
self.httpd.shutdown()
self.httpd.socket.close()
class BigQueryRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
"""Responds to BigQuery HTTP requests.
Attributes:
num_rows: num_rows in the underlying table served by this class.
"""
num_rows = 0
def do_GET(self):
if "data?maxResults=" not in self.path:
# This is a schema request.
_SCHEMA["numRows"] = self.num_rows
response = json.dumps(_SCHEMA)
else:
# This is a data request.
#
# Extract max results and start index.
max_results = int(re.findall(r"maxResults=(\d+)", self.path)[0])
start_index = int(re.findall(r"startIndex=(\d+)", self.path)[0])
# Send the rows as JSON.
rows = []
for row in _ROWS[start_index:start_index + max_results]:
row_json = {
"f": [{
"v": str(row[0])
}, {
"v": str(row[1]) if row[1] is not None else None
}, {
"v": str(row[2]) if row[2] is not None else None
}]
}
rows.append(row_json)
response = json.dumps({
"kind": "bigquery#table",
"id": "test-project:test-dataset.test-table",
"rows": rows
})
self.send_response(200)
self.end_headers()
self.wfile.write(compat.as_bytes(response))
def _SetUpQueue(reader):
"""Sets up a queue for a reader."""
queue = data_flow_ops.FIFOQueue(8, [types_pb2.DT_STRING], shapes=())
key, value = reader.read(queue)
queue.enqueue_many(reader.partitions()).run()
queue.close().run()
return key, value
class BigQueryReaderOpsTest(test.TestCase):
def setUp(self):
super(BigQueryReaderOpsTest, self).setUp()
self.server = FakeBigQueryServer("127.0.0.1", 0)
self.server.start()
logging.info("server address is %s:%s", self.server.httpd.server_address[0],
self.server.httpd.server_address[1])
# An override to bypass the GCP auth token retrieval logic
# in google_auth_provider.cc.
os.environ["GOOGLE_AUTH_TOKEN_FOR_TESTING"] = "not-used"
def tearDown(self):
self.server.shutdown()
super(BigQueryReaderOpsTest, self).tearDown()
def _ReadAndCheckRowsUsingFeatures(self, num_rows):
self.server.handler.num_rows = num_rows
with self.test_session() as sess:
feature_configs = {
"int64_col":
parsing_ops.FixedLenFeature(
[1], dtype=dtypes.int64),
"string_col":
parsing_ops.FixedLenFeature(
[1], dtype=dtypes.string, default_value="s_default"),
}
reader = cloud.BigQueryReader(
project_id=_PROJECT,
dataset_id=_DATASET,
table_id=_TABLE,
num_partitions=4,
features=feature_configs,
timestamp_millis=1,
test_end_point=("%s:%s" % (self.server.httpd.server_address[0],
self.server.httpd.server_address[1])))
key, value = _SetUpQueue(reader)
seen_rows = []
features = parsing_ops.parse_example(
array_ops.reshape(value, [1]), feature_configs)
for _ in range(num_rows):
int_value, str_value = sess.run(
[features["int64_col"], features["string_col"]])
# Parse values returned from the session.
self.assertEqual(int_value.shape, (1, 1))
self.assertEqual(str_value.shape, (1, 1))
int64_col = int_value[0][0]
string_col = str_value[0][0]
seen_rows.append(int64_col)
# Compare.
expected_row = _ROWS[int64_col]
self.assertEqual(int64_col, expected_row[0])
self.assertEqual(
compat.as_str(string_col), ("s_%d" % int64_col) if expected_row[1]
else "s_default")
self.assertItemsEqual(seen_rows, range(num_rows))
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
sess.run([key, value])
def testReadingSingleRowUsingFeatures(self):
self._ReadAndCheckRowsUsingFeatures(1)
def testReadingMultipleRowsUsingFeatures(self):
self._ReadAndCheckRowsUsingFeatures(10)
def testReadingMultipleRowsUsingColumns(self):
num_rows = 10
self.server.handler.num_rows = num_rows
with self.test_session() as sess:
reader = cloud.BigQueryReader(
project_id=_PROJECT,
dataset_id=_DATASET,
table_id=_TABLE,
num_partitions=4,
columns=["int64_col", "float_col", "string_col"],
timestamp_millis=1,
test_end_point=("%s:%s" % (self.server.httpd.server_address[0],
self.server.httpd.server_address[1])))
key, value = _SetUpQueue(reader)
seen_rows = []
for row_index in range(num_rows):
returned_row_id, example_proto = sess.run([key, value])
example = example_pb2.Example()
example.ParseFromString(example_proto)
self.assertIn("int64_col", example.features.feature)
feature = example.features.feature["int64_col"]
self.assertEqual(len(feature.int64_list.value), 1)
int64_col = feature.int64_list.value[0]
seen_rows.append(int64_col)
# Create our expected Example.
expected_example = example_pb2.Example()
expected_example = _ConvertRowToExampleProto(_ROWS[int64_col])
# Compare.
self.assertProtoEquals(example, expected_example)
self.assertEqual(row_index, int(returned_row_id))
self.assertItemsEqual(seen_rows, range(num_rows))
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
sess.run([key, value])
if __name__ == "__main__":
test.main()
| apache-2.0 | 6,620,960,106,350,109,000 | 32.513986 | 80 | 0.622744 | false |
kenorb-contrib/BitTorrent | launchmany-console.py | 2 | 6333 | #!/usr/bin/env python
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Written by John Hoffman
# Updated to 4.20 by David Harrison
app_name = "BitTorrent"
if __name__ == '__main__':
from BTL.translation import _
import sys
import os
from BitTorrent import platform
from BitTorrent.launchmanycore import LaunchMany
from BitTorrent.defaultargs import get_defaults
from BitTorrent.parseargs import parseargs, printHelp
from BitTorrent.prefs import Preferences
from BitTorrent import configfile
from BitTorrent import version
from BTL.platform import encode_for_filesystem, decode_from_filesystem
from BitTorrent import BTFailure
from BitTorrent import bt_log_fmt
from BTL.log import injectLogger
import logging
from logging import ERROR, WARNING, INFO
from BitTorrent import console, old_stderr, STDERR
exceptions = []
log = logging.getLogger('launchmany-console')
class HeadlessDisplayer:
def display(self, data):
# formats the data and dumps it to the root logger.
if not data:
log.info( _("no torrents"))
elif type(data) == str:
log.info(data)
else:
for x in data:
( name, status, progress, peers, seeds, seedsmsg,
uprate, dnrate, upamt, dnamt, size, t, msg ) = x
logging.getLogger('launchmany-console').info(
'"%s": "%s" (%s) - %sP%s%s u%0.1fK/s-d%0.1fK/s u%dK-d%dK "%s"' % (
name, status, progress, peers, seeds, seedsmsg,
uprate/1000, dnrate/1000, upamt/1024, dnamt/1024, msg))
return False
def modify_default( defaults_tuplelist, key, newvalue ):
name,value,doc = [(n,v,d) for (n,v,d) in defaults_tuplelist if n == key][0]
defaults_tuplelist = [(n,v,d) for (n,v,d) in defaults_tuplelist
if not n == key]
defaults_tuplelist.append( (key,newvalue,doc) )
return defaults_tuplelist
if __name__ == '__main__':
uiname = 'launchmany-console'
defaults = get_defaults(uiname)
try:
if len(sys.argv) < 2:
printHelp(uiname, defaults)
sys.exit(1)
# Modifying default values from get_defaults is annoying...
# Implementing specific default values for each uiname in
# defaultargs.py is even more annoying. --Dave
ddir = os.path.join( platform.get_dot_dir(), "launchmany-console" )
ddir = decode_from_filesystem(ddir)
modify_default(defaults, 'data_dir', ddir)
config, args = configfile.parse_configuration_and_args(defaults,
uiname, sys.argv[1:], 0, 1)
# returned from here config['save_in'] is /home/dave/Desktop/...
if args:
torrent_dir = args[0]
config['torrent_dir'] = decode_from_filesystem(torrent_dir)
else:
torrent_dir = config['torrent_dir']
torrent_dir,bad = encode_for_filesystem(torrent_dir)
if bad:
raise BTFailure(_("Warning: ")+config['torrent_dir']+
_(" is not a directory"))
if not os.path.isdir(torrent_dir):
raise BTFailure(_("Warning: ")+torrent_dir+
_(" is not a directory"))
# the default behavior is to save_in files to the platform
# get_save_dir. For launchmany, if no command-line argument
# changed the save directory then use the torrent directory.
#if config['save_in'] == platform.get_save_dir():
# config['save_in'] = config['torrent_dir']
if '--save_in' in sys.argv:
print "Don't use --save_in for launchmany-console. Saving files from " \
"many torrents in the same directory can result in filename collisions."
sys.exit(1)
# The default 'save_in' is likely to be something like /home/myname/BitTorrent Downloads
# but we would prefer that the 'save_in' be an empty string so that
# LaunchMany will save the file in the same location as the destination for the file.
# When downloading or seeding a large number of files we want to be sure there are
# no file name collisions which can occur when the same save_in directory is used for
# all torrents. When 'save in' is empty, the destination of the filename is used, which
# for save_as style 4 (the default) will be in the same directory as the .torrent file.
# If each .torrent file is in its own directory then filename collisions cannot occur.
config['save_in'] = u""
except BTFailure, e:
print _("error: ") + unicode(e.args[0]) + \
_("\nrun with no args for parameter explanations")
sys.exit(1)
except KeyboardInterrupt:
sys.exit(1)
d = HeadlessDisplayer()
config = Preferences().initWithDict(config)
injectLogger(use_syslog = False, capture_output = True, verbose = True,
log_level = logging.INFO, log_twisted = False )
logging.getLogger('').removeHandler(console) # remove handler installed by BitTorrent.__init__.
LaunchMany(config, d.display, 'launchmany-console')
logging.getLogger("").critical( "After return from LaunchMany" )
# Uncomment the following if it looks like threads are hanging around.
# monitor_thread can be found in cdv://cdv.bittorrent.com:6602/python-scripts
#import threading
#nondaemons = [d for d in threading.enumerate() if not d.isDaemon()]
#if len(nondaemons) > 1:
# import time
# from monitor_thread import print_stacks
# time.sleep(4)
# nondaemons = [d for d in threading.enumerate() if not d.isDaemon()]
# if len(nondaemons) > 1:
# print_stacks()
| gpl-3.0 | -6,538,406,648,143,412,000 | 41.790541 | 100 | 0.64156 | false |
flyher/pymo | android/pgs4a-0.9.6/python-install/lib/python2.7/test/test_xml_etree.py | 16 | 53139 | # xml.etree test. This file contains enough tests to make sure that
# all included components work as they should.
# Large parts are extracted from the upstream test suite.
# IMPORTANT: the same doctests are run from "test_xml_etree_c" in
# order to ensure consistency between the C implementation and the
# Python implementation.
#
# For this purpose, the module-level "ET" symbol is temporarily
# monkey-patched when running the "test_xml_etree_c" test suite.
# Don't re-import "xml.etree.ElementTree" module in the docstring,
# except if the test is specific to the Python implementation.
import sys
import cgi
from test import test_support
from test.test_support import findfile
from xml.etree import ElementTree as ET
SIMPLE_XMLFILE = findfile("simple.xml", subdir="xmltestdata")
SIMPLE_NS_XMLFILE = findfile("simple-ns.xml", subdir="xmltestdata")
SAMPLE_XML = """\
<body>
<tag class='a'>text</tag>
<tag class='b' />
<section>
<tag class='b' id='inner'>subtext</tag>
</section>
</body>
"""
SAMPLE_SECTION = """\
<section>
<tag class='b' id='inner'>subtext</tag>
<nexttag />
<nextsection>
<tag />
</nextsection>
</section>
"""
SAMPLE_XML_NS = """
<body xmlns="http://effbot.org/ns">
<tag>text</tag>
<tag />
<section>
<tag>subtext</tag>
</section>
</body>
"""
def sanity():
"""
Import sanity.
>>> from xml.etree import ElementTree
>>> from xml.etree import ElementInclude
>>> from xml.etree import ElementPath
"""
def check_method(method):
if not hasattr(method, '__call__'):
print method, "not callable"
def serialize(elem, to_string=True, **options):
import StringIO
file = StringIO.StringIO()
tree = ET.ElementTree(elem)
tree.write(file, **options)
if to_string:
return file.getvalue()
else:
file.seek(0)
return file
def summarize(elem):
if elem.tag == ET.Comment:
return "<Comment>"
return elem.tag
def summarize_list(seq):
return [summarize(elem) for elem in seq]
def normalize_crlf(tree):
for elem in tree.iter():
if elem.text:
elem.text = elem.text.replace("\r\n", "\n")
if elem.tail:
elem.tail = elem.tail.replace("\r\n", "\n")
def check_string(string):
len(string)
for char in string:
if len(char) != 1:
print "expected one-character string, got %r" % char
new_string = string + ""
new_string = string + " "
string[:0]
def check_mapping(mapping):
len(mapping)
keys = mapping.keys()
items = mapping.items()
for key in keys:
item = mapping[key]
mapping["key"] = "value"
if mapping["key"] != "value":
print "expected value string, got %r" % mapping["key"]
def check_element(element):
if not ET.iselement(element):
print "not an element"
if not hasattr(element, "tag"):
print "no tag member"
if not hasattr(element, "attrib"):
print "no attrib member"
if not hasattr(element, "text"):
print "no text member"
if not hasattr(element, "tail"):
print "no tail member"
check_string(element.tag)
check_mapping(element.attrib)
if element.text is not None:
check_string(element.text)
if element.tail is not None:
check_string(element.tail)
for elem in element:
check_element(elem)
# --------------------------------------------------------------------
# element tree tests
def interface():
r"""
Test element tree interface.
>>> element = ET.Element("tag")
>>> check_element(element)
>>> tree = ET.ElementTree(element)
>>> check_element(tree.getroot())
>>> element = ET.Element("t\xe4g", key="value")
>>> tree = ET.ElementTree(element)
>>> repr(element) # doctest: +ELLIPSIS
"<Element 't\\xe4g' at 0x...>"
>>> element = ET.Element("tag", key="value")
Make sure all standard element methods exist.
>>> check_method(element.append)
>>> check_method(element.extend)
>>> check_method(element.insert)
>>> check_method(element.remove)
>>> check_method(element.getchildren)
>>> check_method(element.find)
>>> check_method(element.iterfind)
>>> check_method(element.findall)
>>> check_method(element.findtext)
>>> check_method(element.clear)
>>> check_method(element.get)
>>> check_method(element.set)
>>> check_method(element.keys)
>>> check_method(element.items)
>>> check_method(element.iter)
>>> check_method(element.itertext)
>>> check_method(element.getiterator)
These methods return an iterable. See bug 6472.
>>> check_method(element.iter("tag").next)
>>> check_method(element.iterfind("tag").next)
>>> check_method(element.iterfind("*").next)
>>> check_method(tree.iter("tag").next)
>>> check_method(tree.iterfind("tag").next)
>>> check_method(tree.iterfind("*").next)
These aliases are provided:
>>> assert ET.XML == ET.fromstring
>>> assert ET.PI == ET.ProcessingInstruction
>>> assert ET.XMLParser == ET.XMLTreeBuilder
"""
def simpleops():
"""
Basic method sanity checks.
>>> elem = ET.XML("<body><tag/></body>")
>>> serialize(elem)
'<body><tag /></body>'
>>> e = ET.Element("tag2")
>>> elem.append(e)
>>> serialize(elem)
'<body><tag /><tag2 /></body>'
>>> elem.remove(e)
>>> serialize(elem)
'<body><tag /></body>'
>>> elem.insert(0, e)
>>> serialize(elem)
'<body><tag2 /><tag /></body>'
>>> elem.remove(e)
>>> elem.extend([e])
>>> serialize(elem)
'<body><tag /><tag2 /></body>'
>>> elem.remove(e)
>>> element = ET.Element("tag", key="value")
>>> serialize(element) # 1
'<tag key="value" />'
>>> subelement = ET.Element("subtag")
>>> element.append(subelement)
>>> serialize(element) # 2
'<tag key="value"><subtag /></tag>'
>>> element.insert(0, subelement)
>>> serialize(element) # 3
'<tag key="value"><subtag /><subtag /></tag>'
>>> element.remove(subelement)
>>> serialize(element) # 4
'<tag key="value"><subtag /></tag>'
>>> element.remove(subelement)
>>> serialize(element) # 5
'<tag key="value" />'
>>> element.remove(subelement)
Traceback (most recent call last):
ValueError: list.remove(x): x not in list
>>> serialize(element) # 6
'<tag key="value" />'
>>> element[0:0] = [subelement, subelement, subelement]
>>> serialize(element[1])
'<subtag />'
>>> element[1:9] == [element[1], element[2]]
True
>>> element[:9:2] == [element[0], element[2]]
True
>>> del element[1:2]
>>> serialize(element)
'<tag key="value"><subtag /><subtag /></tag>'
"""
def cdata():
"""
Test CDATA handling (etc).
>>> serialize(ET.XML("<tag>hello</tag>"))
'<tag>hello</tag>'
>>> serialize(ET.XML("<tag>hello</tag>"))
'<tag>hello</tag>'
>>> serialize(ET.XML("<tag><![CDATA[hello]]></tag>"))
'<tag>hello</tag>'
"""
# Only with Python implementation
def simplefind():
"""
Test find methods using the elementpath fallback.
>>> from xml.etree import ElementTree
>>> CurrentElementPath = ElementTree.ElementPath
>>> ElementTree.ElementPath = ElementTree._SimpleElementPath()
>>> elem = ElementTree.XML(SAMPLE_XML)
>>> elem.find("tag").tag
'tag'
>>> ElementTree.ElementTree(elem).find("tag").tag
'tag'
>>> elem.findtext("tag")
'text'
>>> elem.findtext("tog")
>>> elem.findtext("tog", "default")
'default'
>>> ElementTree.ElementTree(elem).findtext("tag")
'text'
>>> summarize_list(elem.findall("tag"))
['tag', 'tag']
>>> summarize_list(elem.findall(".//tag"))
['tag', 'tag', 'tag']
Path syntax doesn't work in this case.
>>> elem.find("section/tag")
>>> elem.findtext("section/tag")
>>> summarize_list(elem.findall("section/tag"))
[]
>>> ElementTree.ElementPath = CurrentElementPath
"""
def find():
"""
Test find methods (including xpath syntax).
>>> elem = ET.XML(SAMPLE_XML)
>>> elem.find("tag").tag
'tag'
>>> ET.ElementTree(elem).find("tag").tag
'tag'
>>> elem.find("section/tag").tag
'tag'
>>> elem.find("./tag").tag
'tag'
>>> ET.ElementTree(elem).find("./tag").tag
'tag'
>>> ET.ElementTree(elem).find("/tag").tag
'tag'
>>> elem[2] = ET.XML(SAMPLE_SECTION)
>>> elem.find("section/nexttag").tag
'nexttag'
>>> ET.ElementTree(elem).find("section/tag").tag
'tag'
>>> ET.ElementTree(elem).find("tog")
>>> ET.ElementTree(elem).find("tog/foo")
>>> elem.findtext("tag")
'text'
>>> elem.findtext("section/nexttag")
''
>>> elem.findtext("section/nexttag", "default")
''
>>> elem.findtext("tog")
>>> elem.findtext("tog", "default")
'default'
>>> ET.ElementTree(elem).findtext("tag")
'text'
>>> ET.ElementTree(elem).findtext("tog/foo")
>>> ET.ElementTree(elem).findtext("tog/foo", "default")
'default'
>>> ET.ElementTree(elem).findtext("./tag")
'text'
>>> ET.ElementTree(elem).findtext("/tag")
'text'
>>> elem.findtext("section/tag")
'subtext'
>>> ET.ElementTree(elem).findtext("section/tag")
'subtext'
>>> summarize_list(elem.findall("."))
['body']
>>> summarize_list(elem.findall("tag"))
['tag', 'tag']
>>> summarize_list(elem.findall("tog"))
[]
>>> summarize_list(elem.findall("tog/foo"))
[]
>>> summarize_list(elem.findall("*"))
['tag', 'tag', 'section']
>>> summarize_list(elem.findall(".//tag"))
['tag', 'tag', 'tag', 'tag']
>>> summarize_list(elem.findall("section/tag"))
['tag']
>>> summarize_list(elem.findall("section//tag"))
['tag', 'tag']
>>> summarize_list(elem.findall("section/*"))
['tag', 'nexttag', 'nextsection']
>>> summarize_list(elem.findall("section//*"))
['tag', 'nexttag', 'nextsection', 'tag']
>>> summarize_list(elem.findall("section/.//*"))
['tag', 'nexttag', 'nextsection', 'tag']
>>> summarize_list(elem.findall("*/*"))
['tag', 'nexttag', 'nextsection']
>>> summarize_list(elem.findall("*//*"))
['tag', 'nexttag', 'nextsection', 'tag']
>>> summarize_list(elem.findall("*/tag"))
['tag']
>>> summarize_list(elem.findall("*/./tag"))
['tag']
>>> summarize_list(elem.findall("./tag"))
['tag', 'tag']
>>> summarize_list(elem.findall(".//tag"))
['tag', 'tag', 'tag', 'tag']
>>> summarize_list(elem.findall("././tag"))
['tag', 'tag']
>>> summarize_list(elem.findall(".//tag[@class]"))
['tag', 'tag', 'tag']
>>> summarize_list(elem.findall(".//tag[@class='a']"))
['tag']
>>> summarize_list(elem.findall(".//tag[@class='b']"))
['tag', 'tag']
>>> summarize_list(elem.findall(".//tag[@id]"))
['tag']
>>> summarize_list(elem.findall(".//section[tag]"))
['section']
>>> summarize_list(elem.findall(".//section[element]"))
[]
>>> summarize_list(elem.findall("../tag"))
[]
>>> summarize_list(elem.findall("section/../tag"))
['tag', 'tag']
>>> summarize_list(ET.ElementTree(elem).findall("./tag"))
['tag', 'tag']
Following example is invalid in 1.2.
A leading '*' is assumed in 1.3.
>>> elem.findall("section//") == elem.findall("section//*")
True
ET's Path module handles this case incorrectly; this gives
a warning in 1.3, and the behaviour will be modified in 1.4.
>>> summarize_list(ET.ElementTree(elem).findall("/tag"))
['tag', 'tag']
>>> elem = ET.XML(SAMPLE_XML_NS)
>>> summarize_list(elem.findall("tag"))
[]
>>> summarize_list(elem.findall("{http://effbot.org/ns}tag"))
['{http://effbot.org/ns}tag', '{http://effbot.org/ns}tag']
>>> summarize_list(elem.findall(".//{http://effbot.org/ns}tag"))
['{http://effbot.org/ns}tag', '{http://effbot.org/ns}tag', '{http://effbot.org/ns}tag']
"""
def file_init():
"""
>>> import StringIO
>>> stringfile = StringIO.StringIO(SAMPLE_XML)
>>> tree = ET.ElementTree(file=stringfile)
>>> tree.find("tag").tag
'tag'
>>> tree.find("section/tag").tag
'tag'
>>> tree = ET.ElementTree(file=SIMPLE_XMLFILE)
>>> tree.find("element").tag
'element'
>>> tree.find("element/../empty-element").tag
'empty-element'
"""
def bad_find():
"""
Check bad or unsupported path expressions.
>>> elem = ET.XML(SAMPLE_XML)
>>> elem.findall("/tag")
Traceback (most recent call last):
SyntaxError: cannot use absolute path on element
"""
def path_cache():
"""
Check that the path cache behaves sanely.
>>> elem = ET.XML(SAMPLE_XML)
>>> for i in range(10): ET.ElementTree(elem).find('./'+str(i))
>>> cache_len_10 = len(ET.ElementPath._cache)
>>> for i in range(10): ET.ElementTree(elem).find('./'+str(i))
>>> len(ET.ElementPath._cache) == cache_len_10
True
>>> for i in range(20): ET.ElementTree(elem).find('./'+str(i))
>>> len(ET.ElementPath._cache) > cache_len_10
True
>>> for i in range(600): ET.ElementTree(elem).find('./'+str(i))
>>> len(ET.ElementPath._cache) < 500
True
"""
def copy():
"""
Test copy handling (etc).
>>> import copy
>>> e1 = ET.XML("<tag>hello<foo/></tag>")
>>> e2 = copy.copy(e1)
>>> e3 = copy.deepcopy(e1)
>>> e1.find("foo").tag = "bar"
>>> serialize(e1)
'<tag>hello<bar /></tag>'
>>> serialize(e2)
'<tag>hello<bar /></tag>'
>>> serialize(e3)
'<tag>hello<foo /></tag>'
"""
def attrib():
"""
Test attribute handling.
>>> elem = ET.Element("tag")
>>> elem.get("key") # 1.1
>>> elem.get("key", "default") # 1.2
'default'
>>> elem.set("key", "value")
>>> elem.get("key") # 1.3
'value'
>>> elem = ET.Element("tag", key="value")
>>> elem.get("key") # 2.1
'value'
>>> elem.attrib # 2.2
{'key': 'value'}
>>> attrib = {"key": "value"}
>>> elem = ET.Element("tag", attrib)
>>> attrib.clear() # check for aliasing issues
>>> elem.get("key") # 3.1
'value'
>>> elem.attrib # 3.2
{'key': 'value'}
>>> attrib = {"key": "value"}
>>> elem = ET.Element("tag", **attrib)
>>> attrib.clear() # check for aliasing issues
>>> elem.get("key") # 4.1
'value'
>>> elem.attrib # 4.2
{'key': 'value'}
>>> elem = ET.Element("tag", {"key": "other"}, key="value")
>>> elem.get("key") # 5.1
'value'
>>> elem.attrib # 5.2
{'key': 'value'}
>>> elem = ET.Element('test')
>>> elem.text = "aa"
>>> elem.set('testa', 'testval')
>>> elem.set('testb', 'test2')
>>> ET.tostring(elem)
'<test testa="testval" testb="test2">aa</test>'
>>> sorted(elem.keys())
['testa', 'testb']
>>> sorted(elem.items())
[('testa', 'testval'), ('testb', 'test2')]
>>> elem.attrib['testb']
'test2'
>>> elem.attrib['testb'] = 'test1'
>>> elem.attrib['testc'] = 'test2'
>>> ET.tostring(elem)
'<test testa="testval" testb="test1" testc="test2">aa</test>'
"""
def makeelement():
"""
Test makeelement handling.
>>> elem = ET.Element("tag")
>>> attrib = {"key": "value"}
>>> subelem = elem.makeelement("subtag", attrib)
>>> if subelem.attrib is attrib:
... print "attrib aliasing"
>>> elem.append(subelem)
>>> serialize(elem)
'<tag><subtag key="value" /></tag>'
>>> elem.clear()
>>> serialize(elem)
'<tag />'
>>> elem.append(subelem)
>>> serialize(elem)
'<tag><subtag key="value" /></tag>'
>>> elem.extend([subelem, subelem])
>>> serialize(elem)
'<tag><subtag key="value" /><subtag key="value" /><subtag key="value" /></tag>'
>>> elem[:] = [subelem]
>>> serialize(elem)
'<tag><subtag key="value" /></tag>'
>>> elem[:] = tuple([subelem])
>>> serialize(elem)
'<tag><subtag key="value" /></tag>'
"""
def parsefile():
"""
Test parsing from file.
>>> tree = ET.parse(SIMPLE_XMLFILE)
>>> normalize_crlf(tree)
>>> tree.write(sys.stdout)
<root>
<element key="value">text</element>
<element>text</element>tail
<empty-element />
</root>
>>> tree = ET.parse(SIMPLE_NS_XMLFILE)
>>> normalize_crlf(tree)
>>> tree.write(sys.stdout)
<ns0:root xmlns:ns0="namespace">
<ns0:element key="value">text</ns0:element>
<ns0:element>text</ns0:element>tail
<ns0:empty-element />
</ns0:root>
>>> with open(SIMPLE_XMLFILE) as f:
... data = f.read()
>>> parser = ET.XMLParser()
>>> parser.version # doctest: +ELLIPSIS
'Expat ...'
>>> parser.feed(data)
>>> print serialize(parser.close())
<root>
<element key="value">text</element>
<element>text</element>tail
<empty-element />
</root>
>>> parser = ET.XMLTreeBuilder() # 1.2 compatibility
>>> parser.feed(data)
>>> print serialize(parser.close())
<root>
<element key="value">text</element>
<element>text</element>tail
<empty-element />
</root>
>>> target = ET.TreeBuilder()
>>> parser = ET.XMLParser(target=target)
>>> parser.feed(data)
>>> print serialize(parser.close())
<root>
<element key="value">text</element>
<element>text</element>tail
<empty-element />
</root>
"""
def parseliteral():
"""
>>> element = ET.XML("<html><body>text</body></html>")
>>> ET.ElementTree(element).write(sys.stdout)
<html><body>text</body></html>
>>> element = ET.fromstring("<html><body>text</body></html>")
>>> ET.ElementTree(element).write(sys.stdout)
<html><body>text</body></html>
>>> sequence = ["<html><body>", "text</bo", "dy></html>"]
>>> element = ET.fromstringlist(sequence)
>>> print ET.tostring(element)
<html><body>text</body></html>
>>> print "".join(ET.tostringlist(element))
<html><body>text</body></html>
>>> ET.tostring(element, "ascii")
"<?xml version='1.0' encoding='ascii'?>\\n<html><body>text</body></html>"
>>> _, ids = ET.XMLID("<html><body>text</body></html>")
>>> len(ids)
0
>>> _, ids = ET.XMLID("<html><body id='body'>text</body></html>")
>>> len(ids)
1
>>> ids["body"].tag
'body'
"""
def iterparse():
"""
Test iterparse interface.
>>> iterparse = ET.iterparse
>>> context = iterparse(SIMPLE_XMLFILE)
>>> action, elem = next(context)
>>> print action, elem.tag
end element
>>> for action, elem in context:
... print action, elem.tag
end element
end empty-element
end root
>>> context.root.tag
'root'
>>> context = iterparse(SIMPLE_NS_XMLFILE)
>>> for action, elem in context:
... print action, elem.tag
end {namespace}element
end {namespace}element
end {namespace}empty-element
end {namespace}root
>>> events = ()
>>> context = iterparse(SIMPLE_XMLFILE, events)
>>> for action, elem in context:
... print action, elem.tag
>>> events = ()
>>> context = iterparse(SIMPLE_XMLFILE, events=events)
>>> for action, elem in context:
... print action, elem.tag
>>> events = ("start", "end")
>>> context = iterparse(SIMPLE_XMLFILE, events)
>>> for action, elem in context:
... print action, elem.tag
start root
start element
end element
start element
end element
start empty-element
end empty-element
end root
>>> events = ("start", "end", "start-ns", "end-ns")
>>> context = iterparse(SIMPLE_NS_XMLFILE, events)
>>> for action, elem in context:
... if action in ("start", "end"):
... print action, elem.tag
... else:
... print action, elem
start-ns ('', 'namespace')
start {namespace}root
start {namespace}element
end {namespace}element
start {namespace}element
end {namespace}element
start {namespace}empty-element
end {namespace}empty-element
end {namespace}root
end-ns None
>>> events = ("start", "end", "bogus")
>>> with open(SIMPLE_XMLFILE, "rb") as f:
... iterparse(f, events)
Traceback (most recent call last):
ValueError: unknown event 'bogus'
>>> import StringIO
>>> source = StringIO.StringIO(
... "<?xml version='1.0' encoding='iso-8859-1'?>\\n"
... "<body xmlns='http://éffbot.org/ns'\\n"
... " xmlns:cl\\xe9='http://effbot.org/ns'>text</body>\\n")
>>> events = ("start-ns",)
>>> context = iterparse(source, events)
>>> for action, elem in context:
... print action, elem
start-ns ('', u'http://\\xe9ffbot.org/ns')
start-ns (u'cl\\xe9', 'http://effbot.org/ns')
>>> source = StringIO.StringIO("<document />junk")
>>> try:
... for action, elem in iterparse(source):
... print action, elem.tag
... except ET.ParseError, v:
... print v
junk after document element: line 1, column 12
"""
def writefile():
"""
>>> elem = ET.Element("tag")
>>> elem.text = "text"
>>> serialize(elem)
'<tag>text</tag>'
>>> ET.SubElement(elem, "subtag").text = "subtext"
>>> serialize(elem)
'<tag>text<subtag>subtext</subtag></tag>'
Test tag suppression
>>> elem.tag = None
>>> serialize(elem)
'text<subtag>subtext</subtag>'
>>> elem.insert(0, ET.Comment("comment"))
>>> serialize(elem) # assumes 1.3
'text<!--comment--><subtag>subtext</subtag>'
>>> elem[0] = ET.PI("key", "value")
>>> serialize(elem)
'text<?key value?><subtag>subtext</subtag>'
"""
def custom_builder():
"""
Test parser w. custom builder.
>>> with open(SIMPLE_XMLFILE) as f:
... data = f.read()
>>> class Builder:
... def start(self, tag, attrib):
... print "start", tag
... def end(self, tag):
... print "end", tag
... def data(self, text):
... pass
>>> builder = Builder()
>>> parser = ET.XMLParser(target=builder)
>>> parser.feed(data)
start root
start element
end element
start element
end element
start empty-element
end empty-element
end root
>>> with open(SIMPLE_NS_XMLFILE) as f:
... data = f.read()
>>> class Builder:
... def start(self, tag, attrib):
... print "start", tag
... def end(self, tag):
... print "end", tag
... def data(self, text):
... pass
... def pi(self, target, data):
... print "pi", target, repr(data)
... def comment(self, data):
... print "comment", repr(data)
>>> builder = Builder()
>>> parser = ET.XMLParser(target=builder)
>>> parser.feed(data)
pi pi 'data'
comment ' comment '
start {namespace}root
start {namespace}element
end {namespace}element
start {namespace}element
end {namespace}element
start {namespace}empty-element
end {namespace}empty-element
end {namespace}root
"""
def getchildren():
"""
Test Element.getchildren()
>>> with open(SIMPLE_XMLFILE, "r") as f:
... tree = ET.parse(f)
>>> for elem in tree.getroot().iter():
... summarize_list(elem.getchildren())
['element', 'element', 'empty-element']
[]
[]
[]
>>> for elem in tree.getiterator():
... summarize_list(elem.getchildren())
['element', 'element', 'empty-element']
[]
[]
[]
>>> elem = ET.XML(SAMPLE_XML)
>>> len(elem.getchildren())
3
>>> len(elem[2].getchildren())
1
>>> elem[:] == elem.getchildren()
True
>>> child1 = elem[0]
>>> child2 = elem[2]
>>> del elem[1:2]
>>> len(elem.getchildren())
2
>>> child1 == elem[0]
True
>>> child2 == elem[1]
True
>>> elem[0:2] = [child2, child1]
>>> child2 == elem[0]
True
>>> child1 == elem[1]
True
>>> child1 == elem[0]
False
>>> elem.clear()
>>> elem.getchildren()
[]
"""
def writestring():
"""
>>> elem = ET.XML("<html><body>text</body></html>")
>>> ET.tostring(elem)
'<html><body>text</body></html>'
>>> elem = ET.fromstring("<html><body>text</body></html>")
>>> ET.tostring(elem)
'<html><body>text</body></html>'
"""
def check_encoding(encoding):
"""
>>> check_encoding("ascii")
>>> check_encoding("us-ascii")
>>> check_encoding("iso-8859-1")
>>> check_encoding("iso-8859-15")
>>> check_encoding("cp437")
>>> check_encoding("mac-roman")
"""
ET.XML("<?xml version='1.0' encoding='%s'?><xml />" % encoding)
def encoding():
r"""
Test encoding issues.
>>> elem = ET.Element("tag")
>>> elem.text = u"abc"
>>> serialize(elem)
'<tag>abc</tag>'
>>> serialize(elem, encoding="utf-8")
'<tag>abc</tag>'
>>> serialize(elem, encoding="us-ascii")
'<tag>abc</tag>'
>>> serialize(elem, encoding="iso-8859-1")
"<?xml version='1.0' encoding='iso-8859-1'?>\n<tag>abc</tag>"
>>> elem.text = "<&\"\'>"
>>> serialize(elem)
'<tag><&"\'></tag>'
>>> serialize(elem, encoding="utf-8")
'<tag><&"\'></tag>'
>>> serialize(elem, encoding="us-ascii") # cdata characters
'<tag><&"\'></tag>'
>>> serialize(elem, encoding="iso-8859-1")
'<?xml version=\'1.0\' encoding=\'iso-8859-1\'?>\n<tag><&"\'></tag>'
>>> elem.attrib["key"] = "<&\"\'>"
>>> elem.text = None
>>> serialize(elem)
'<tag key="<&"\'>" />'
>>> serialize(elem, encoding="utf-8")
'<tag key="<&"\'>" />'
>>> serialize(elem, encoding="us-ascii")
'<tag key="<&"\'>" />'
>>> serialize(elem, encoding="iso-8859-1")
'<?xml version=\'1.0\' encoding=\'iso-8859-1\'?>\n<tag key="<&"\'>" />'
>>> elem.text = u'\xe5\xf6\xf6<>'
>>> elem.attrib.clear()
>>> serialize(elem)
'<tag>åöö<></tag>'
>>> serialize(elem, encoding="utf-8")
'<tag>\xc3\xa5\xc3\xb6\xc3\xb6<></tag>'
>>> serialize(elem, encoding="us-ascii")
'<tag>åöö<></tag>'
>>> serialize(elem, encoding="iso-8859-1")
"<?xml version='1.0' encoding='iso-8859-1'?>\n<tag>\xe5\xf6\xf6<></tag>"
>>> elem.attrib["key"] = u'\xe5\xf6\xf6<>'
>>> elem.text = None
>>> serialize(elem)
'<tag key="åöö<>" />'
>>> serialize(elem, encoding="utf-8")
'<tag key="\xc3\xa5\xc3\xb6\xc3\xb6<>" />'
>>> serialize(elem, encoding="us-ascii")
'<tag key="åöö<>" />'
>>> serialize(elem, encoding="iso-8859-1")
'<?xml version=\'1.0\' encoding=\'iso-8859-1\'?>\n<tag key="\xe5\xf6\xf6<>" />'
"""
def methods():
r"""
Test serialization methods.
>>> e = ET.XML("<html><link/><script>1 < 2</script></html>")
>>> e.tail = "\n"
>>> serialize(e)
'<html><link /><script>1 < 2</script></html>\n'
>>> serialize(e, method=None)
'<html><link /><script>1 < 2</script></html>\n'
>>> serialize(e, method="xml")
'<html><link /><script>1 < 2</script></html>\n'
>>> serialize(e, method="html")
'<html><link><script>1 < 2</script></html>\n'
>>> serialize(e, method="text")
'1 < 2\n'
"""
def iterators():
"""
Test iterators.
>>> e = ET.XML("<html><body>this is a <i>paragraph</i>.</body>..</html>")
>>> summarize_list(e.iter())
['html', 'body', 'i']
>>> summarize_list(e.find("body").iter())
['body', 'i']
>>> summarize(next(e.iter()))
'html'
>>> "".join(e.itertext())
'this is a paragraph...'
>>> "".join(e.find("body").itertext())
'this is a paragraph.'
>>> next(e.itertext())
'this is a '
Method iterparse should return an iterator. See bug 6472.
>>> sourcefile = serialize(e, to_string=False)
>>> next(ET.iterparse(sourcefile)) # doctest: +ELLIPSIS
('end', <Element 'i' at 0x...>)
>>> tree = ET.ElementTree(None)
>>> tree.iter()
Traceback (most recent call last):
AttributeError: 'NoneType' object has no attribute 'iter'
"""
ENTITY_XML = """\
<!DOCTYPE points [
<!ENTITY % user-entities SYSTEM 'user-entities.xml'>
%user-entities;
]>
<document>&entity;</document>
"""
def entity():
"""
Test entity handling.
1) good entities
>>> e = ET.XML("<document title='舰'>test</document>")
>>> serialize(e)
'<document title="舰">test</document>'
2) bad entities
>>> ET.XML("<document>&entity;</document>")
Traceback (most recent call last):
ParseError: undefined entity: line 1, column 10
>>> ET.XML(ENTITY_XML)
Traceback (most recent call last):
ParseError: undefined entity &entity;: line 5, column 10
3) custom entity
>>> parser = ET.XMLParser()
>>> parser.entity["entity"] = "text"
>>> parser.feed(ENTITY_XML)
>>> root = parser.close()
>>> serialize(root)
'<document>text</document>'
"""
def error(xml):
"""
Test error handling.
>>> issubclass(ET.ParseError, SyntaxError)
True
>>> error("foo").position
(1, 0)
>>> error("<tag>&foo;</tag>").position
(1, 5)
>>> error("foobar<").position
(1, 6)
"""
try:
ET.XML(xml)
except ET.ParseError:
return sys.exc_value
def namespace():
"""
Test namespace issues.
1) xml namespace
>>> elem = ET.XML("<tag xml:lang='en' />")
>>> serialize(elem) # 1.1
'<tag xml:lang="en" />'
2) other "well-known" namespaces
>>> elem = ET.XML("<rdf:RDF xmlns:rdf='http://www.w3.org/1999/02/22-rdf-syntax-ns#' />")
>>> serialize(elem) # 2.1
'<rdf:RDF xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" />'
>>> elem = ET.XML("<html:html xmlns:html='http://www.w3.org/1999/xhtml' />")
>>> serialize(elem) # 2.2
'<html:html xmlns:html="http://www.w3.org/1999/xhtml" />'
>>> elem = ET.XML("<soap:Envelope xmlns:soap='http://schemas.xmlsoap.org/soap/envelope' />")
>>> serialize(elem) # 2.3
'<ns0:Envelope xmlns:ns0="http://schemas.xmlsoap.org/soap/envelope" />'
3) unknown namespaces
>>> elem = ET.XML(SAMPLE_XML_NS)
>>> print serialize(elem)
<ns0:body xmlns:ns0="http://effbot.org/ns">
<ns0:tag>text</ns0:tag>
<ns0:tag />
<ns0:section>
<ns0:tag>subtext</ns0:tag>
</ns0:section>
</ns0:body>
"""
def qname():
"""
Test QName handling.
1) decorated tags
>>> elem = ET.Element("{uri}tag")
>>> serialize(elem) # 1.1
'<ns0:tag xmlns:ns0="uri" />'
>>> elem = ET.Element(ET.QName("{uri}tag"))
>>> serialize(elem) # 1.2
'<ns0:tag xmlns:ns0="uri" />'
>>> elem = ET.Element(ET.QName("uri", "tag"))
>>> serialize(elem) # 1.3
'<ns0:tag xmlns:ns0="uri" />'
>>> elem = ET.Element(ET.QName("uri", "tag"))
>>> subelem = ET.SubElement(elem, ET.QName("uri", "tag1"))
>>> subelem = ET.SubElement(elem, ET.QName("uri", "tag2"))
>>> serialize(elem) # 1.4
'<ns0:tag xmlns:ns0="uri"><ns0:tag1 /><ns0:tag2 /></ns0:tag>'
2) decorated attributes
>>> elem.clear()
>>> elem.attrib["{uri}key"] = "value"
>>> serialize(elem) # 2.1
'<ns0:tag xmlns:ns0="uri" ns0:key="value" />'
>>> elem.clear()
>>> elem.attrib[ET.QName("{uri}key")] = "value"
>>> serialize(elem) # 2.2
'<ns0:tag xmlns:ns0="uri" ns0:key="value" />'
3) decorated values are not converted by default, but the
QName wrapper can be used for values
>>> elem.clear()
>>> elem.attrib["{uri}key"] = "{uri}value"
>>> serialize(elem) # 3.1
'<ns0:tag xmlns:ns0="uri" ns0:key="{uri}value" />'
>>> elem.clear()
>>> elem.attrib["{uri}key"] = ET.QName("{uri}value")
>>> serialize(elem) # 3.2
'<ns0:tag xmlns:ns0="uri" ns0:key="ns0:value" />'
>>> elem.clear()
>>> subelem = ET.Element("tag")
>>> subelem.attrib["{uri1}key"] = ET.QName("{uri2}value")
>>> elem.append(subelem)
>>> elem.append(subelem)
>>> serialize(elem) # 3.3
'<ns0:tag xmlns:ns0="uri" xmlns:ns1="uri1" xmlns:ns2="uri2"><tag ns1:key="ns2:value" /><tag ns1:key="ns2:value" /></ns0:tag>'
4) Direct QName tests
>>> str(ET.QName('ns', 'tag'))
'{ns}tag'
>>> str(ET.QName('{ns}tag'))
'{ns}tag'
>>> q1 = ET.QName('ns', 'tag')
>>> q2 = ET.QName('ns', 'tag')
>>> q1 == q2
True
>>> q2 = ET.QName('ns', 'other-tag')
>>> q1 == q2
False
>>> q1 == 'ns:tag'
False
>>> q1 == '{ns}tag'
True
"""
def doctype_public():
"""
Test PUBLIC doctype.
>>> elem = ET.XML('<!DOCTYPE html PUBLIC'
... ' "-//W3C//DTD XHTML 1.0 Transitional//EN"'
... ' "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">'
... '<html>text</html>')
"""
def xpath_tokenizer(p):
"""
Test the XPath tokenizer.
>>> # tests from the xml specification
>>> xpath_tokenizer("*")
['*']
>>> xpath_tokenizer("text()")
['text', '()']
>>> xpath_tokenizer("@name")
['@', 'name']
>>> xpath_tokenizer("@*")
['@', '*']
>>> xpath_tokenizer("para[1]")
['para', '[', '1', ']']
>>> xpath_tokenizer("para[last()]")
['para', '[', 'last', '()', ']']
>>> xpath_tokenizer("*/para")
['*', '/', 'para']
>>> xpath_tokenizer("/doc/chapter[5]/section[2]")
['/', 'doc', '/', 'chapter', '[', '5', ']', '/', 'section', '[', '2', ']']
>>> xpath_tokenizer("chapter//para")
['chapter', '//', 'para']
>>> xpath_tokenizer("//para")
['//', 'para']
>>> xpath_tokenizer("//olist/item")
['//', 'olist', '/', 'item']
>>> xpath_tokenizer(".")
['.']
>>> xpath_tokenizer(".//para")
['.', '//', 'para']
>>> xpath_tokenizer("..")
['..']
>>> xpath_tokenizer("../@lang")
['..', '/', '@', 'lang']
>>> xpath_tokenizer("chapter[title]")
['chapter', '[', 'title', ']']
>>> xpath_tokenizer("employee[@secretary and @assistant]")
['employee', '[', '@', 'secretary', '', 'and', '', '@', 'assistant', ']']
>>> # additional tests
>>> xpath_tokenizer("{http://spam}egg")
['{http://spam}egg']
>>> xpath_tokenizer("./spam.egg")
['.', '/', 'spam.egg']
>>> xpath_tokenizer(".//{http://spam}egg")
['.', '//', '{http://spam}egg']
"""
from xml.etree import ElementPath
out = []
for op, tag in ElementPath.xpath_tokenizer(p):
out.append(op or tag)
return out
def processinginstruction():
"""
Test ProcessingInstruction directly
>>> ET.tostring(ET.ProcessingInstruction('test', 'instruction'))
'<?test instruction?>'
>>> ET.tostring(ET.PI('test', 'instruction'))
'<?test instruction?>'
Issue #2746
>>> ET.tostring(ET.PI('test', '<testing&>'))
'<?test <testing&>?>'
>>> ET.tostring(ET.PI('test', u'<testing&>\xe3'), 'latin1')
"<?xml version='1.0' encoding='latin1'?>\\n<?test <testing&>\\xe3?>"
"""
#
# xinclude tests (samples from appendix C of the xinclude specification)
XINCLUDE = {}
XINCLUDE["C1.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>120 Mz is adequate for an average home user.</p>
<xi:include href="disclaimer.xml"/>
</document>
"""
XINCLUDE["disclaimer.xml"] = """\
<?xml version='1.0'?>
<disclaimer>
<p>The opinions represented herein represent those of the individual
and should not be interpreted as official policy endorsed by this
organization.</p>
</disclaimer>
"""
XINCLUDE["C2.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>This document has been accessed
<xi:include href="count.txt" parse="text"/> times.</p>
</document>
"""
XINCLUDE["count.txt"] = "324387"
XINCLUDE["C2b.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>This document has been <em>accessed</em>
<xi:include href="count.txt" parse="text"/> times.</p>
</document>
"""
XINCLUDE["C3.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>The following is the source of the "data.xml" resource:</p>
<example><xi:include href="data.xml" parse="text"/></example>
</document>
"""
XINCLUDE["data.xml"] = """\
<?xml version='1.0'?>
<data>
<item><![CDATA[Brooks & Shields]]></item>
</data>
"""
XINCLUDE["C5.xml"] = """\
<?xml version='1.0'?>
<div xmlns:xi="http://www.w3.org/2001/XInclude">
<xi:include href="example.txt" parse="text">
<xi:fallback>
<xi:include href="fallback-example.txt" parse="text">
<xi:fallback><a href="mailto:[email protected]">Report error</a></xi:fallback>
</xi:include>
</xi:fallback>
</xi:include>
</div>
"""
XINCLUDE["default.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>Example.</p>
<xi:include href="{}"/>
</document>
""".format(cgi.escape(SIMPLE_XMLFILE, True))
def xinclude_loader(href, parse="xml", encoding=None):
try:
data = XINCLUDE[href]
except KeyError:
raise IOError("resource not found")
if parse == "xml":
from xml.etree.ElementTree import XML
return XML(data)
return data
def xinclude():
r"""
Basic inclusion example (XInclude C.1)
>>> from xml.etree import ElementTree as ET
>>> from xml.etree import ElementInclude
>>> document = xinclude_loader("C1.xml")
>>> ElementInclude.include(document, xinclude_loader)
>>> print serialize(document) # C1
<document>
<p>120 Mz is adequate for an average home user.</p>
<disclaimer>
<p>The opinions represented herein represent those of the individual
and should not be interpreted as official policy endorsed by this
organization.</p>
</disclaimer>
</document>
Textual inclusion example (XInclude C.2)
>>> document = xinclude_loader("C2.xml")
>>> ElementInclude.include(document, xinclude_loader)
>>> print serialize(document) # C2
<document>
<p>This document has been accessed
324387 times.</p>
</document>
Textual inclusion after sibling element (based on modified XInclude C.2)
>>> document = xinclude_loader("C2b.xml")
>>> ElementInclude.include(document, xinclude_loader)
>>> print(serialize(document)) # C2b
<document>
<p>This document has been <em>accessed</em>
324387 times.</p>
</document>
Textual inclusion of XML example (XInclude C.3)
>>> document = xinclude_loader("C3.xml")
>>> ElementInclude.include(document, xinclude_loader)
>>> print serialize(document) # C3
<document>
<p>The following is the source of the "data.xml" resource:</p>
<example><?xml version='1.0'?>
<data>
<item><![CDATA[Brooks & Shields]]></item>
</data>
</example>
</document>
Fallback example (XInclude C.5)
Note! Fallback support is not yet implemented
>>> document = xinclude_loader("C5.xml")
>>> ElementInclude.include(document, xinclude_loader)
Traceback (most recent call last):
IOError: resource not found
>>> # print serialize(document) # C5
"""
def xinclude_default():
"""
>>> from xml.etree import ElementInclude
>>> document = xinclude_loader("default.xml")
>>> ElementInclude.include(document)
>>> print serialize(document) # default
<document>
<p>Example.</p>
<root>
<element key="value">text</element>
<element>text</element>tail
<empty-element />
</root>
</document>
"""
#
# badly formatted xi:include tags
XINCLUDE_BAD = {}
XINCLUDE_BAD["B1.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>120 Mz is adequate for an average home user.</p>
<xi:include href="disclaimer.xml" parse="BAD_TYPE"/>
</document>
"""
XINCLUDE_BAD["B2.xml"] = """\
<?xml version='1.0'?>
<div xmlns:xi="http://www.w3.org/2001/XInclude">
<xi:fallback></xi:fallback>
</div>
"""
def xinclude_failures():
r"""
Test failure to locate included XML file.
>>> from xml.etree import ElementInclude
>>> def none_loader(href, parser, encoding=None):
... return None
>>> document = ET.XML(XINCLUDE["C1.xml"])
>>> ElementInclude.include(document, loader=none_loader)
Traceback (most recent call last):
FatalIncludeError: cannot load 'disclaimer.xml' as 'xml'
Test failure to locate included text file.
>>> document = ET.XML(XINCLUDE["C2.xml"])
>>> ElementInclude.include(document, loader=none_loader)
Traceback (most recent call last):
FatalIncludeError: cannot load 'count.txt' as 'text'
Test bad parse type.
>>> document = ET.XML(XINCLUDE_BAD["B1.xml"])
>>> ElementInclude.include(document, loader=none_loader)
Traceback (most recent call last):
FatalIncludeError: unknown parse type in xi:include tag ('BAD_TYPE')
Test xi:fallback outside xi:include.
>>> document = ET.XML(XINCLUDE_BAD["B2.xml"])
>>> ElementInclude.include(document, loader=none_loader)
Traceback (most recent call last):
FatalIncludeError: xi:fallback tag must be child of xi:include ('{http://www.w3.org/2001/XInclude}fallback')
"""
# --------------------------------------------------------------------
# reported bugs
def bug_xmltoolkit21():
"""
marshaller gives obscure errors for non-string values
>>> elem = ET.Element(123)
>>> serialize(elem) # tag
Traceback (most recent call last):
TypeError: cannot serialize 123 (type int)
>>> elem = ET.Element("elem")
>>> elem.text = 123
>>> serialize(elem) # text
Traceback (most recent call last):
TypeError: cannot serialize 123 (type int)
>>> elem = ET.Element("elem")
>>> elem.tail = 123
>>> serialize(elem) # tail
Traceback (most recent call last):
TypeError: cannot serialize 123 (type int)
>>> elem = ET.Element("elem")
>>> elem.set(123, "123")
>>> serialize(elem) # attribute key
Traceback (most recent call last):
TypeError: cannot serialize 123 (type int)
>>> elem = ET.Element("elem")
>>> elem.set("123", 123)
>>> serialize(elem) # attribute value
Traceback (most recent call last):
TypeError: cannot serialize 123 (type int)
"""
def bug_xmltoolkit25():
"""
typo in ElementTree.findtext
>>> elem = ET.XML(SAMPLE_XML)
>>> tree = ET.ElementTree(elem)
>>> tree.findtext("tag")
'text'
>>> tree.findtext("section/tag")
'subtext'
"""
def bug_xmltoolkit28():
"""
.//tag causes exceptions
>>> tree = ET.XML("<doc><table><tbody/></table></doc>")
>>> summarize_list(tree.findall(".//thead"))
[]
>>> summarize_list(tree.findall(".//tbody"))
['tbody']
"""
def bug_xmltoolkitX1():
"""
dump() doesn't flush the output buffer
>>> tree = ET.XML("<doc><table><tbody/></table></doc>")
>>> ET.dump(tree); sys.stdout.write("tail")
<doc><table><tbody /></table></doc>
tail
"""
def bug_xmltoolkit39():
"""
non-ascii element and attribute names doesn't work
>>> tree = ET.XML("<?xml version='1.0' encoding='iso-8859-1'?><t\xe4g />")
>>> ET.tostring(tree, "utf-8")
'<t\\xc3\\xa4g />'
>>> tree = ET.XML("<?xml version='1.0' encoding='iso-8859-1'?><tag \xe4ttr='välue' />")
>>> tree.attrib
{u'\\xe4ttr': u'v\\xe4lue'}
>>> ET.tostring(tree, "utf-8")
'<tag \\xc3\\xa4ttr="v\\xc3\\xa4lue" />'
>>> tree = ET.XML("<?xml version='1.0' encoding='iso-8859-1'?><t\xe4g>text</t\xe4g>")
>>> ET.tostring(tree, "utf-8")
'<t\\xc3\\xa4g>text</t\\xc3\\xa4g>'
>>> tree = ET.Element(u"t\u00e4g")
>>> ET.tostring(tree, "utf-8")
'<t\\xc3\\xa4g />'
>>> tree = ET.Element("tag")
>>> tree.set(u"\u00e4ttr", u"v\u00e4lue")
>>> ET.tostring(tree, "utf-8")
'<tag \\xc3\\xa4ttr="v\\xc3\\xa4lue" />'
"""
def bug_xmltoolkit54():
"""
problems handling internally defined entities
>>> e = ET.XML("<!DOCTYPE doc [<!ENTITY ldots '舰'>]><doc>&ldots;</doc>")
>>> serialize(e)
'<doc>舰</doc>'
"""
def bug_xmltoolkit55():
"""
make sure we're reporting the first error, not the last
>>> e = ET.XML("<!DOCTYPE doc SYSTEM 'doc.dtd'><doc>&ldots;&ndots;&rdots;</doc>")
Traceback (most recent call last):
ParseError: undefined entity &ldots;: line 1, column 36
"""
class ExceptionFile:
def read(self, x):
raise IOError
def xmltoolkit60():
"""
Handle crash in stream source.
>>> tree = ET.parse(ExceptionFile())
Traceback (most recent call last):
IOError
"""
XMLTOOLKIT62_DOC = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE patent-application-publication SYSTEM "pap-v15-2001-01-31.dtd" []>
<patent-application-publication>
<subdoc-abstract>
<paragraph id="A-0001" lvl="0">A new cultivar of Begonia plant named ‘BCT9801BEG’.</paragraph>
</subdoc-abstract>
</patent-application-publication>"""
def xmltoolkit62():
"""
Don't crash when using custom entities.
>>> xmltoolkit62()
u'A new cultivar of Begonia plant named \u2018BCT9801BEG\u2019.'
"""
ENTITIES = {u'rsquo': u'\u2019', u'lsquo': u'\u2018'}
parser = ET.XMLTreeBuilder()
parser.entity.update(ENTITIES)
parser.feed(XMLTOOLKIT62_DOC)
t = parser.close()
return t.find('.//paragraph').text
def xmltoolkit63():
"""
Check reference leak.
>>> xmltoolkit63()
>>> count = sys.getrefcount(None)
>>> for i in range(1000):
... xmltoolkit63()
>>> sys.getrefcount(None) - count
0
"""
tree = ET.TreeBuilder()
tree.start("tag", {})
tree.data("text")
tree.end("tag")
# --------------------------------------------------------------------
def bug_200708_newline():
r"""
Preserve newlines in attributes.
>>> e = ET.Element('SomeTag', text="def _f():\n return 3\n")
>>> ET.tostring(e)
'<SomeTag text="def _f(): return 3 " />'
>>> ET.XML(ET.tostring(e)).get("text")
'def _f():\n return 3\n'
>>> ET.tostring(ET.XML(ET.tostring(e)))
'<SomeTag text="def _f(): return 3 " />'
"""
def bug_200708_close():
"""
Test default builder.
>>> parser = ET.XMLParser() # default
>>> parser.feed("<element>some text</element>")
>>> summarize(parser.close())
'element'
Test custom builder.
>>> class EchoTarget:
... def close(self):
... return ET.Element("element") # simulate root
>>> parser = ET.XMLParser(EchoTarget())
>>> parser.feed("<element>some text</element>")
>>> summarize(parser.close())
'element'
"""
def bug_200709_default_namespace():
"""
>>> e = ET.Element("{default}elem")
>>> s = ET.SubElement(e, "{default}elem")
>>> serialize(e, default_namespace="default") # 1
'<elem xmlns="default"><elem /></elem>'
>>> e = ET.Element("{default}elem")
>>> s = ET.SubElement(e, "{default}elem")
>>> s = ET.SubElement(e, "{not-default}elem")
>>> serialize(e, default_namespace="default") # 2
'<elem xmlns="default" xmlns:ns1="not-default"><elem /><ns1:elem /></elem>'
>>> e = ET.Element("{default}elem")
>>> s = ET.SubElement(e, "{default}elem")
>>> s = ET.SubElement(e, "elem") # unprefixed name
>>> serialize(e, default_namespace="default") # 3
Traceback (most recent call last):
ValueError: cannot use non-qualified names with default_namespace option
"""
def bug_200709_register_namespace():
"""
>>> ET.tostring(ET.Element("{http://namespace.invalid/does/not/exist/}title"))
'<ns0:title xmlns:ns0="http://namespace.invalid/does/not/exist/" />'
>>> ET.register_namespace("foo", "http://namespace.invalid/does/not/exist/")
>>> ET.tostring(ET.Element("{http://namespace.invalid/does/not/exist/}title"))
'<foo:title xmlns:foo="http://namespace.invalid/does/not/exist/" />'
And the Dublin Core namespace is in the default list:
>>> ET.tostring(ET.Element("{http://purl.org/dc/elements/1.1/}title"))
'<dc:title xmlns:dc="http://purl.org/dc/elements/1.1/" />'
"""
def bug_200709_element_comment():
"""
Not sure if this can be fixed, really (since the serializer needs
ET.Comment, not cET.comment).
>>> a = ET.Element('a')
>>> a.append(ET.Comment('foo'))
>>> a[0].tag == ET.Comment
True
>>> a = ET.Element('a')
>>> a.append(ET.PI('foo'))
>>> a[0].tag == ET.PI
True
"""
def bug_200709_element_insert():
"""
>>> a = ET.Element('a')
>>> b = ET.SubElement(a, 'b')
>>> c = ET.SubElement(a, 'c')
>>> d = ET.Element('d')
>>> a.insert(0, d)
>>> summarize_list(a)
['d', 'b', 'c']
>>> a.insert(-1, d)
>>> summarize_list(a)
['d', 'b', 'd', 'c']
"""
def bug_200709_iter_comment():
"""
>>> a = ET.Element('a')
>>> b = ET.SubElement(a, 'b')
>>> comment_b = ET.Comment("TEST-b")
>>> b.append(comment_b)
>>> summarize_list(a.iter(ET.Comment))
['<Comment>']
"""
# --------------------------------------------------------------------
# reported on bugs.python.org
def bug_1534630():
"""
>>> bob = ET.TreeBuilder()
>>> e = bob.data("data")
>>> e = bob.start("tag", {})
>>> e = bob.end("tag")
>>> e = bob.close()
>>> serialize(e)
'<tag />'
"""
def check_issue6233():
"""
>>> e = ET.XML("<?xml version='1.0' encoding='utf-8'?><body>t\\xc3\\xa3g</body>")
>>> ET.tostring(e, 'ascii')
"<?xml version='1.0' encoding='ascii'?>\\n<body>tãg</body>"
>>> e = ET.XML("<?xml version='1.0' encoding='iso-8859-1'?><body>t\\xe3g</body>")
>>> ET.tostring(e, 'ascii')
"<?xml version='1.0' encoding='ascii'?>\\n<body>tãg</body>"
"""
def check_issue3151():
"""
>>> e = ET.XML('<prefix:localname xmlns:prefix="${stuff}"/>')
>>> e.tag
'{${stuff}}localname'
>>> t = ET.ElementTree(e)
>>> ET.tostring(e)
'<ns0:localname xmlns:ns0="${stuff}" />'
"""
def check_issue6565():
"""
>>> elem = ET.XML("<body><tag/></body>")
>>> summarize_list(elem)
['tag']
>>> newelem = ET.XML(SAMPLE_XML)
>>> elem[:] = newelem[:]
>>> summarize_list(elem)
['tag', 'tag', 'section']
"""
# --------------------------------------------------------------------
class CleanContext(object):
"""Provide default namespace mapping and path cache."""
checkwarnings = None
def __init__(self, quiet=False):
if sys.flags.optimize >= 2:
# under -OO, doctests cannot be run and therefore not all warnings
# will be emitted
quiet = True
deprecations = (
# Search behaviour is broken if search path starts with "/".
("This search is broken in 1.3 and earlier, and will be fixed "
"in a future version. If you rely on the current behaviour, "
"change it to '.+'", FutureWarning),
# Element.getchildren() and Element.getiterator() are deprecated.
("This method will be removed in future versions. "
"Use .+ instead.", DeprecationWarning),
("This method will be removed in future versions. "
"Use .+ instead.", PendingDeprecationWarning),
# XMLParser.doctype() is deprecated.
("This method of XMLParser is deprecated. Define doctype.. "
"method on the TreeBuilder target.", DeprecationWarning))
self.checkwarnings = test_support.check_warnings(*deprecations,
quiet=quiet)
def __enter__(self):
from xml.etree import ElementTree
self._nsmap = ElementTree._namespace_map
self._path_cache = ElementTree.ElementPath._cache
# Copy the default namespace mapping
ElementTree._namespace_map = self._nsmap.copy()
# Copy the path cache (should be empty)
ElementTree.ElementPath._cache = self._path_cache.copy()
self.checkwarnings.__enter__()
def __exit__(self, *args):
from xml.etree import ElementTree
# Restore mapping and path cache
ElementTree._namespace_map = self._nsmap
ElementTree.ElementPath._cache = self._path_cache
self.checkwarnings.__exit__(*args)
def test_main(module_name='xml.etree.ElementTree'):
from test import test_xml_etree
use_py_module = (module_name == 'xml.etree.ElementTree')
# The same doctests are used for both the Python and the C implementations
assert test_xml_etree.ET.__name__ == module_name
# XXX the C module should give the same warnings as the Python module
with CleanContext(quiet=not use_py_module):
test_support.run_doctest(test_xml_etree, verbosity=True)
# The module should not be changed by the tests
assert test_xml_etree.ET.__name__ == module_name
if __name__ == '__main__':
test_main()
| mit | 9,105,284,904,654,779,000 | 27.175504 | 129 | 0.564745 | false |
tim-janik/beast | yapps2_deb/yapps/grammar.py | 42 | 8821 | # grammar.py, part of Yapps 2 - yet another python parser system
# Copyright 1999-2003 by Amit J. Patel <[email protected]>
#
# This version of the Yapps 2 grammar can be distributed under the
# terms of the MIT open source license, either found in the LICENSE
# file included with the Yapps distribution
# <http://theory.stanford.edu/~amitp/yapps/> or at
# <http://www.opensource.org/licenses/mit-license.php>
#
"""Parser for Yapps grammars.
This file defines the grammar of Yapps grammars. Naturally, it is
implemented in Yapps. The grammar.py module needed by Yapps is built
by running Yapps on yapps_grammar.g. (Holy circularity, Batman!)
"""
import sys, re
from yapps import parsetree
######################################################################
def cleanup_choice(rule, lst):
if len(lst) == 0: return Sequence(rule, [])
if len(lst) == 1: return lst[0]
return parsetree.Choice(rule, *tuple(lst))
def cleanup_sequence(rule, lst):
if len(lst) == 1: return lst[0]
return parsetree.Sequence(rule, *tuple(lst))
def resolve_name(rule, tokens, id, args):
if id in [x[0] for x in tokens]:
# It's a token
if args:
print 'Warning: ignoring parameters on TOKEN %s<<%s>>' % (id, args)
return parsetree.Terminal(rule, id)
else:
# It's a name, so assume it's a nonterminal
return parsetree.NonTerminal(rule, id, args)
# Begin -- grammar generated by Yapps
import sys, re
from yapps import runtime
class ParserDescriptionScanner(runtime.Scanner):
patterns = [
('"rule"', re.compile('rule')),
('"ignore"', re.compile('ignore')),
('"token"', re.compile('token')),
('"option"', re.compile('option')),
('":"', re.compile(':')),
('"parser"', re.compile('parser')),
('[ \t\r\n]+', re.compile('[ \t\r\n]+')),
('#.*?\r?\n', re.compile('#.*?\r?\n')),
('EOF', re.compile('$')),
('ATTR', re.compile('<<.+?>>')),
('STMT', re.compile('{{.+?}}')),
('ID', re.compile('[a-zA-Z_][a-zA-Z_0-9]*')),
('STR', re.compile('[rR]?\'([^\\n\'\\\\]|\\\\.)*\'|[rR]?"([^\\n"\\\\]|\\\\.)*"')),
('LP', re.compile('\\(')),
('RP', re.compile('\\)')),
('LB', re.compile('\\[')),
('RB', re.compile('\\]')),
('OR', re.compile('[|]')),
('STAR', re.compile('[*]')),
('PLUS', re.compile('[+]')),
('QUEST', re.compile('[?]')),
('COLON', re.compile(':')),
]
def __init__(self, str,*args,**kw):
runtime.Scanner.__init__(self,None,{'[ \t\r\n]+':None,'#.*?\r?\n':None,},str,*args,**kw)
class ParserDescription(runtime.Parser):
Context = runtime.Context
def Parser(self, _parent=None):
_context = self.Context(_parent, self._scanner, 'Parser', [])
self._scan('"parser"', context=_context)
ID = self._scan('ID', context=_context)
self._scan('":"', context=_context)
Options = self.Options(_context)
Tokens = self.Tokens(_context)
Rules = self.Rules(Tokens, _context)
EOF = self._scan('EOF', context=_context)
return parsetree.Generator(ID,Options,Tokens,Rules)
def Options(self, _parent=None):
_context = self.Context(_parent, self._scanner, 'Options', [])
opt = {}
while self._peek('"option"', '"token"', '"ignore"', 'EOF', '"rule"', context=_context) == '"option"':
self._scan('"option"', context=_context)
self._scan('":"', context=_context)
Str = self.Str(_context)
opt[Str] = 1
return opt
def Tokens(self, _parent=None):
_context = self.Context(_parent, self._scanner, 'Tokens', [])
tok = []
while self._peek('"token"', '"ignore"', 'EOF', '"rule"', context=_context) in ['"token"', '"ignore"']:
_token = self._peek('"token"', '"ignore"', context=_context)
if _token == '"token"':
self._scan('"token"', context=_context)
ID = self._scan('ID', context=_context)
self._scan('":"', context=_context)
Str = self.Str(_context)
tok.append( (ID,Str) )
else: # == '"ignore"'
self._scan('"ignore"', context=_context)
self._scan('":"', context=_context)
Str = self.Str(_context)
ign = ('#ignore',Str)
if self._peek('STMT', '"token"', '"ignore"', 'EOF', '"rule"', context=_context) == 'STMT':
STMT = self._scan('STMT', context=_context)
ign = ign + (STMT[2:-2],)
tok.append( ign )
return tok
def Rules(self, tokens, _parent=None):
_context = self.Context(_parent, self._scanner, 'Rules', [tokens])
rul = []
while self._peek('"rule"', 'EOF', context=_context) == '"rule"':
self._scan('"rule"', context=_context)
ID = self._scan('ID', context=_context)
OptParam = self.OptParam(_context)
self._scan('":"', context=_context)
ClauseA = self.ClauseA(ID, tokens, _context)
rul.append( (ID, OptParam, ClauseA) )
return rul
def ClauseA(self, rule, tokens, _parent=None):
_context = self.Context(_parent, self._scanner, 'ClauseA', [rule, tokens])
ClauseB = self.ClauseB(rule,tokens, _context)
v = [ClauseB]
while self._peek('OR', 'RP', 'RB', '"rule"', 'EOF', context=_context) == 'OR':
OR = self._scan('OR', context=_context)
ClauseB = self.ClauseB(rule,tokens, _context)
v.append(ClauseB)
return cleanup_choice(rule,v)
def ClauseB(self, rule,tokens, _parent=None):
_context = self.Context(_parent, self._scanner, 'ClauseB', [rule,tokens])
v = []
while self._peek('STR', 'ID', 'LP', 'LB', 'STMT', 'OR', 'RP', 'RB', '"rule"', 'EOF', context=_context) in ['STR', 'ID', 'LP', 'LB', 'STMT']:
ClauseC = self.ClauseC(rule,tokens, _context)
v.append(ClauseC)
return cleanup_sequence(rule, v)
def ClauseC(self, rule,tokens, _parent=None):
_context = self.Context(_parent, self._scanner, 'ClauseC', [rule,tokens])
ClauseD = self.ClauseD(rule,tokens, _context)
_token = self._peek('PLUS', 'STAR', 'QUEST', 'STR', 'ID', 'LP', 'LB', 'STMT', 'OR', 'RP', 'RB', '"rule"', 'EOF', context=_context)
if _token == 'PLUS':
PLUS = self._scan('PLUS', context=_context)
return parsetree.Plus(rule, ClauseD)
elif _token == 'STAR':
STAR = self._scan('STAR', context=_context)
return parsetree.Star(rule, ClauseD)
elif _token == 'QUEST':
QUEST = self._scan('QUEST', context=_context)
return parsetree.Option(rule, ClauseD)
else:
return ClauseD
def ClauseD(self, rule,tokens, _parent=None):
_context = self.Context(_parent, self._scanner, 'ClauseD', [rule,tokens])
_token = self._peek('STR', 'ID', 'LP', 'LB', 'STMT', context=_context)
if _token == 'STR':
STR = self._scan('STR', context=_context)
t = (STR, eval(STR,{},{}))
if t not in tokens: tokens.insert( 0, t )
return parsetree.Terminal(rule, STR)
elif _token == 'ID':
ID = self._scan('ID', context=_context)
OptParam = self.OptParam(_context)
return resolve_name(rule,tokens, ID, OptParam)
elif _token == 'LP':
LP = self._scan('LP', context=_context)
ClauseA = self.ClauseA(rule,tokens, _context)
RP = self._scan('RP', context=_context)
return ClauseA
elif _token == 'LB':
LB = self._scan('LB', context=_context)
ClauseA = self.ClauseA(rule,tokens, _context)
RB = self._scan('RB', context=_context)
return parsetree.Option(rule, ClauseA)
else: # == 'STMT'
STMT = self._scan('STMT', context=_context)
return parsetree.Eval(rule, STMT[2:-2])
def OptParam(self, _parent=None):
_context = self.Context(_parent, self._scanner, 'OptParam', [])
if self._peek('ATTR', '":"', 'PLUS', 'STAR', 'QUEST', 'STR', 'ID', 'LP', 'LB', 'STMT', 'OR', 'RP', 'RB', '"rule"', 'EOF', context=_context) == 'ATTR':
ATTR = self._scan('ATTR', context=_context)
return ATTR[2:-2]
return ''
def Str(self, _parent=None):
_context = self.Context(_parent, self._scanner, 'Str', [])
STR = self._scan('STR', context=_context)
return eval(STR,{},{})
def parse(rule, text):
P = ParserDescription(ParserDescriptionScanner(text))
return runtime.wrap_error_reporter(P, rule)
# End -- grammar generated by Yapps
| lgpl-2.1 | -7,380,445,027,330,821,000 | 40.805687 | 158 | 0.534973 | false |
nhicher/ansible | lib/ansible/modules/cloud/ovirt/ovirt_storage_template_facts.py | 16 | 4457 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_storage_template_facts
short_description: Retrieve facts about one or more oVirt/RHV templates relate to a storage domain.
author: "Maor Lipchuk"
version_added: "2.4"
description:
- "Retrieve facts about one or more oVirt/RHV templates relate to a storage domain."
notes:
- "This module creates a new top-level C(ovirt_storage_templates) fact, which
contains a list of templates."
options:
unregistered:
description:
- "Flag which indicates whether to get unregistered templates which contain one or more
disks which reside on a storage domain or diskless templates."
type: bool
default: false
max:
description:
- "Sets the maximum number of templates to return. If not specified all the templates are returned."
storage_domain:
description:
- "The storage domain name where the templates should be listed."
extends_documentation_fragment: ovirt_facts
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Gather facts about all Templates which relate to a storage domain and
# are unregistered:
- ovirt_storage_template_facts:
unregistered=True
- debug:
var: ovirt_storage_templates
'''
RETURN = '''
ovirt_storage_templates:
description: "List of dictionaries describing the Templates. Template attributes are mapped to dictionary keys,
all Templates attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/template."
returned: On success.
type: list
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
check_sdk,
create_connection,
get_dict_of_struct,
ovirt_facts_full_argument_spec,
get_id_by_name
)
def main():
argument_spec = ovirt_facts_full_argument_spec(
storage_domain=dict(default=None),
max=dict(default=None, type='int'),
unregistered=dict(default=False, type='bool'),
)
module = AnsibleModule(argument_spec)
check_sdk(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
storage_domains_service = connection.system_service().storage_domains_service()
sd_id = get_id_by_name(storage_domains_service, module.params['storage_domain'])
storage_domain_service = storage_domains_service.storage_domain_service(sd_id)
templates_service = storage_domain_service.templates_service()
# Find the unregistered Template we want to register:
if module.params.get('unregistered'):
templates = templates_service.list(unregistered=True)
else:
templates = templates_service.list(max=module.params['max'])
module.exit_json(
changed=False,
ansible_facts=dict(
ovirt_storage_templates=[
get_dict_of_struct(
struct=c,
connection=connection,
fetch_nested=module.params.get('fetch_nested'),
attributes=module.params.get('nested_attributes'),
) for c in templates
],
),
)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == '__main__':
main()
| gpl-3.0 | 9,221,037,693,799,865,000 | 33.550388 | 144 | 0.660085 | false |
bakhtout/odoo-educ | addons/product_expiry/__init__.py | 442 | 1053 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import product_expiry
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -5,407,138,796,067,826,000 | 44.782609 | 79 | 0.615385 | false |
kartikgupta0909/gaia | test/unittest/test_parser.py | 3 | 5700 | #!/usr/bin/env python
# Copyright (C) 2006-2013 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Gaia
#
# Gaia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
from gaia2 import *
import unittest
import testdata
def testValidPoint(dataset, clause, fromList = None):
# search the point using the clause:
# if we have a result, the clause was true
# if we have no result, the clause was false
v = View(dataset)
dist = MetricFactory.create('null', dataset.layout())
filtr = 'WHERE ' + clause
if fromList:
filtr = 'FROM ' + fromList + ' ' + filtr
result = v.nnSearch(dataset.samplePoint(), dist, filtr).get(1)
if len(result) == 1:
return True
return False
def testClause(clause):
ds = testdata.createSimpleDataSet()
return testValidPoint(ds, clause)
class TestParser(unittest.TestCase):
def setUp(self):
cvar.verbose = False
def tearDown(self):
testdata.resetSettings()
def testSimpleExpression(self):
listTests = {
'10 < 20': True,
' 23.34 == 45': False,
' 23.34 = 45': False,
'12 ==12.0': True,
'23>23': False,
'23\t > 23 ': False,
'1<2 and 2<3': True,
'1<2 and 2>3': False,
'NOT (True)': False,
'1<2 and not (2>3)': True,
'12 in ( 1, 2, 3, 4, 5 )': False,
'23 in (1, 23, 37, 42, 5)': True,
'"ABC" IN ("FG")': False,
'"acid" in ("this", "is", "an", "acid", "test")': True,
'"smarties" NOT IN ("my pocket", "your pocket")': True,
'2.3 BETWEEN 1e-4 AND 2e7': True,
'2 between 2 and 3': True,
'2 between 3 and 2': True,
'4 between 3 and 2': False,
'true': True
}
for clause in listTests:
result = listTests[clause]
self.assertEqual(testClause(clause), result, clause)
# also test mix of these (1 OR 2, 1 AND 2)
for clauseA in listTests:
for clauseB in listTests:
self.assertEqual(testClause('(' + clauseA + ') AND (' + clauseB + ')'),
listTests[clauseA] and listTests[clauseB])
self.assertEqual(testClause('(' + clauseA + ') OR (' + clauseB + ')'),
listTests[clauseA] or listTests[clauseB])
def testParserStillInValidStateAfterParserError(self):
'''ticket #20: parser is in invalid state after parser error'''
ds = testdata.createSimpleDataSet()
dist = MetricFactory.create('null', ds.layout())
v = View(ds)
result = v.nnSearch(ds.samplePoint(), dist, 'WHERE true').get(1)
clause = 'WHERE label.tonal_key_mode.value = \\"major"'
try: result = v.nnSearch(ds.samplePoint(), dist, clause).get(1)
except: pass # filter correctly failed to compile
result = v.nnSearch(ds.samplePoint(), dist, 'WHERE true').get(1)
def testVariables(self):
d = testdata.createSimpleDataSet()
def test(clause, expected):
self.assertEqual(testValidPoint(d, clause), expected)
d.point('p')['d'] = 'Hello'
test('label.d = "Hello"', True)
test('label.d = "goodbye"', False)
d.point('p')['b'] = 23.
test('value.b < 23', False)
test('value.b <= 23', True)
test('value.b != 23', False)
test('value.b > 23', False)
test('value.b == 23', True)
test('value.b = 23', True)
test('value.b <= 23', True)
d.point('p')['e'] = [23.0, 24.0, 25.0]
test('value.e[0] < 23', False)
test('value.e[1] > 23', True)
test('value.e[2] > 24.3 and value.e[2] <= 25', True)
test('value.b = 23.0 and label.d = "Hello"', True)
test('value.b = 23.0 or label.d = "Ho ho"', True)
test('value.b < 23.0 and label.d = "Hello"', False)
test('value.b = 23.0 and label.d = "Hell"', False)
d.point('p')['a.1'] = 17
test('value.a.1 == 17', True)
test('value.a.1 < 20 and value.b > 20 and label.d != "ooh yeah"', True)
test('point.id IN ("c", "a", "t")', False)
test('point.id NOT IN ("a", "p", "u")', False)
test('point.id NOT IN ("s", "u", "n")', True)
test('point.id == "p"', True)
test('point.id != "rock\'n\'roll"', True)
def testFixLength(self):
testdata.useFixedLength = True
self.testSimpleExpression()
self.testParserStillInValidStateAfterParserError()
self.testVariables()
def testEnumerate(self):
testdata.useEnumerate = True
self.testSimpleExpression()
self.testParserStillInValidStateAfterParserError()
self.testVariables()
def testEnumerateFixLength(self):
testdata.useEnumerate = True
self.testFixLength()
suite = unittest.TestLoader().loadTestsFromTestCase(TestParser)
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite)
| agpl-3.0 | -1,018,737,581,797,000,100 | 33.969325 | 87 | 0.570877 | false |
backmari/moose | python/peacock/tests/input_tab/InputFileEditorWithMesh/test_InputFileEditorWithMesh.py | 1 | 9015 | #!/usr/bin/env python
from peacock.Input.InputFileEditorWithMesh import InputFileEditorWithMesh
from PyQt5.QtWidgets import QMainWindow, QMessageBox
from peacock.Input.ExecutableInfo import ExecutableInfo
from peacock.utils import Testing
import argparse, os
from mock import patch
class BaseTests(Testing.PeacockTester):
def setUp(self):
super(BaseTests, self).setUp()
self.input_file = "../../common/transient.i"
self.highlight_left = "meshrender_highlight_left.png"
self.highlight_right = "meshrender_highlight_right.png"
self.highlight_all = "meshrender_highlight_all.png"
self.basic_mesh = "meshrender_basic.png"
#Testing.remove_file(self.highlight_all)
#Testing.remove_file(self.highlight_right)
#Testing.remove_file(self.highlight_left)
#Testing.remove_file(self.basic_mesh)
self.num_time_steps = None
self.time_step_changed_count = 0
def newWidget(self):
main_win = QMainWindow()
w = InputFileEditorWithMesh(size=[640,640])
main_win.setCentralWidget(w)
app_info = ExecutableInfo()
app_info.setPath(Testing.find_moose_test_exe())
self.assertTrue(app_info.valid())
w.onExecutableInfoChanged(app_info)
menubar = main_win.menuBar()
menubar.setNativeMenuBar(False)
w.addToMainMenu(menubar)
w.initialize()
w.setEnabled(True)
main_win.show()
self.assertEqual(w.vtkwin.isVisible(), False)
w.numTimeStepsChanged.connect(self.timeStepChanged)
return main_win, w
def timeStepChanged(self, num_steps):
self.num_time_steps = num_steps
self.time_step_changed_count += 1
def compareGold(self, w, filename):
w.input_filename = filename
Testing.remove_file(filename)
w.InputFileEditorPlugin.writeInputFile(filename)
self.compareFiles(filename)
def compareFiles(self, test_file):
gold_file = "gold/%s" % test_file
test_data = ""
gold_data = ""
with open(test_file, "r") as f:
test_data = f.read()
with open(gold_file, "r") as f:
gold_data = f.read()
self.assertEqual(test_data, gold_data)
class Tests(BaseTests):
def testBasic(self):
main_win, w = self.newWidget()
self.assertEqual(w.vtkwin.isVisible(), False)
w.setInputFile(self.input_file)
self.assertEqual(w.vtkwin.isVisible(), True)
Testing.set_window_size(w.vtkwin)
w.vtkwin.onWrite(self.basic_mesh)
self.assertFalse(Testing.gold_diff(self.basic_mesh))
def testHighlight(self):
main_win, w = self.newWidget()
w.setInputFile(self.input_file)
self.assertEqual(w.vtkwin.isVisible(), True)
tree = w.InputFileEditorPlugin.tree
b = tree.getBlockInfo("/BCs/left")
self.assertNotEqual(b, None)
self.assertEqual(b.getParamInfo("boundary").value, '3')
w.blockChanged(b)
Testing.set_window_size(w.vtkwin)
w.vtkwin.onWrite(self.highlight_left)
self.assertFalse(Testing.gold_diff(self.highlight_left))
b = tree.getBlockInfo("/BCs/right")
self.assertNotEqual(b, None)
self.assertEqual(b.getParamInfo("boundary").value, '1')
w.blockChanged(b)
Testing.set_window_size(w.vtkwin)
w.vtkwin.onWrite(self.highlight_right)
self.assertFalse(Testing.gold_diff(self.highlight_right))
b = tree.getBlockInfo("/BCs/all")
self.assertNotEqual(b, None)
self.assertEqual(b.getParamInfo("boundary").value, '0 1 2 3')
w.blockChanged(b)
Testing.set_window_size(w.vtkwin)
w.vtkwin.onWrite(self.highlight_all)
self.assertFalse(Testing.gold_diff(self.highlight_all))
b= tree.getBlockInfo("/Executioner")
self.assertNotEqual(b, None)
w.highlightChanged(b)
Testing.set_window_size(w.vtkwin)
w.vtkwin.onWrite(self.basic_mesh)
self.assertFalse(Testing.gold_diff(self.basic_mesh))
def testHighlightDiffusion(self):
self.input_file = "../../common/simple_diffusion.i"
main_win, w = self.newWidget()
w.setInputFile(self.input_file)
self.assertEqual(w.vtkwin.isVisible(), True)
tree = w.InputFileEditorPlugin.tree
b = tree.getBlockInfo("/BCs/left")
self.assertNotEqual(b, None)
self.assertEqual(b.getParamInfo("boundary").value, 'left')
w.highlightChanged(b)
Testing.set_window_size(w.vtkwin)
w.vtkwin.onWrite(self.highlight_left)
self.assertFalse(Testing.gold_diff(self.highlight_left))
b = tree.getBlockInfo("/BCs/right")
self.assertNotEqual(b, None)
self.assertEqual(b.getParamInfo("boundary").value, 'right')
w.highlightChanged(b)
Testing.set_window_size(w.vtkwin)
w.vtkwin.onWrite(self.highlight_right)
self.assertFalse(Testing.gold_diff(self.highlight_right))
def testFromOptions(self):
parser = argparse.ArgumentParser()
parser.add_argument('arguments',
type=str,
metavar="N",
nargs="*",
)
InputFileEditorWithMesh.commandLineArgs(parser)
main_win, w = self.newWidget()
opts = {"cmd_line_options": parser.parse_args([])}
self.assertEqual(w.vtkwin.isVisible(), False)
abs_input_file = os.path.abspath(self.input_file)
opts = {"cmd_line_options": parser.parse_args(['-i', self.input_file])}
w.initialize(**opts)
self.assertEqual(w.vtkwin.isVisible(), True)
self.assertEqual(w.InputFileEditorPlugin.tree.input_filename, abs_input_file)
w.InputFileEditorPlugin._clearInputFile()
self.assertEqual(w.vtkwin.isVisible(), False)
self.assertEqual(w.InputFileEditorPlugin.tree.input_filename, None)
opts = {"cmd_line_options": parser.parse_args([self.input_file])}
w.initialize(**opts)
self.assertEqual(w.vtkwin.isVisible(), True)
self.assertEqual(w.InputFileEditorPlugin.tree.input_filename, abs_input_file)
def testBlockChanged(self):
main_win, w = self.newWidget()
w.setInputFile(self.input_file)
tree = w.InputFileEditorPlugin.tree
b = tree.getBlockInfo("/Mesh")
self.assertNotEqual(b, None)
w.blockChanged(b)
b = tree.getBlockInfo("/Executioner")
self.assertNotEqual(b, None)
self.assertEqual(self.time_step_changed_count, 1)
self.assertEqual(self.num_time_steps, 8)
w.blockChanged(b)
self.assertEqual(self.time_step_changed_count, 2)
self.assertEqual(self.num_time_steps, 8)
num_steps_param = b.getParamInfo("num_steps")
self.assertNotEqual(num_steps_param, None)
# was 5, add 5 more
num_steps_param.value = 10
w.blockChanged(b)
self.assertEqual(self.time_step_changed_count, 3)
self.assertEqual(self.num_time_steps, 13)
b.included = False
w.blockChanged(b)
self.assertEqual(self.time_step_changed_count, 4)
self.assertEqual(self.num_time_steps, 0)
@patch.object(QMessageBox, "question")
def testCanClose(self, mock_q):
mock_q.return_value = QMessageBox.No
main_win, w = self.newWidget()
self.assertEqual(w.canClose(), True)
tree = w.InputFileEditorPlugin.tree
w.setInputFile(self.input_file)
self.assertEqual(w.canClose(), True)
b = tree.getBlockInfo("/Mesh")
self.assertNotEqual(b, None)
w.InputFileEditorPlugin.blockChanged.emit(b, w.InputFileEditorPlugin.tree)
self.assertEqual(w.InputFileEditorPlugin.has_changed, True)
self.assertEqual(w.canClose(), False)
w.setInputFile(self.input_file)
self.assertEqual(w.canClose(), True)
@patch.object(QMessageBox, "question")
def testAddBlock(self, mock_q):
"""
Tests to make sure adding a block to the InputFileEditor
actually updates the input file.
"""
mock_q.return_value = QMessageBox.No # The user doesn't want to ignore changes
main_win, w = self.newWidget()
tree = w.InputFileEditorPlugin.tree
self.assertEqual(w.canClose(), True)
b = tree.getBlockInfo("/AuxVariables")
self.assertNotEqual(b, None)
w.InputFileEditorPlugin.block_tree.copyBlock(b)
self.assertEqual(w.InputFileEditorPlugin.has_changed, True)
self.assertEqual(w.canClose(), False)
mock_q.return_value = QMessageBox.Yes # The user wants to ignore changes
self.assertEqual(w.canClose(), True)
s = tree.getInputFileString()
self.assertEqual("", s) # AuxVariables isn't included
b.included = True
s = tree.getInputFileString()
self.assertEqual("[AuxVariables]\n [./New_0]\n [../]\n[]\n\n", s)
if __name__ == '__main__':
Testing.run_tests()
| lgpl-2.1 | -819,407,972,654,807,900 | 37.037975 | 86 | 0.641043 | false |
petewarden/tensorflow | tensorflow/python/debug/wrappers/disk_usage_test.py | 11 | 4453 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Debugger Wrapper Session Consisting of a Local Curses-based CLI."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
from tensorflow.python.client import session
from tensorflow.python.debug.wrappers import dumping_wrapper
from tensorflow.python.debug.wrappers import hooks
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.training import monitored_session
@test_util.run_v1_only("Sessions are not available in TF 2.x")
class DumpingDebugWrapperDiskUsageLimitTest(test_util.TensorFlowTestCase):
@classmethod
def setUpClass(cls):
# For efficient testing, set the disk usage bytes limit to a small
# number (10).
os.environ["TFDBG_DISK_BYTES_LIMIT"] = "10"
def setUp(self):
self.session_root = tempfile.mkdtemp()
self.v = variables.Variable(10.0, dtype=dtypes.float32, name="v")
self.delta = constant_op.constant(1.0, dtype=dtypes.float32, name="delta")
self.eta = constant_op.constant(-1.4, dtype=dtypes.float32, name="eta")
self.inc_v = state_ops.assign_add(self.v, self.delta, name="inc_v")
self.dec_v = state_ops.assign_add(self.v, self.eta, name="dec_v")
self.sess = session.Session()
self.sess.run(self.v.initializer)
def testWrapperSessionNotExceedingLimit(self):
def _watch_fn(fetches, feeds):
del fetches, feeds
return "DebugIdentity", r"(.*delta.*|.*inc_v.*)", r".*"
sess = dumping_wrapper.DumpingDebugWrapperSession(
self.sess, session_root=self.session_root,
watch_fn=_watch_fn, log_usage=False)
sess.run(self.inc_v)
def testWrapperSessionExceedingLimit(self):
def _watch_fn(fetches, feeds):
del fetches, feeds
return "DebugIdentity", r".*delta.*", r".*"
sess = dumping_wrapper.DumpingDebugWrapperSession(
self.sess, session_root=self.session_root,
watch_fn=_watch_fn, log_usage=False)
# Due to the watch function, each run should dump only 1 tensor,
# which has a size of 4 bytes, which corresponds to the dumped 'delta:0'
# tensor of scalar shape and float32 dtype.
# 1st run should pass, after which the disk usage is at 4 bytes.
sess.run(self.inc_v)
# 2nd run should also pass, after which 8 bytes are used.
sess.run(self.inc_v)
# 3rd run should fail, because the total byte count (12) exceeds the
# limit (10)
with self.assertRaises(ValueError):
sess.run(self.inc_v)
def testHookNotExceedingLimit(self):
def _watch_fn(fetches, feeds):
del fetches, feeds
return "DebugIdentity", r".*delta.*", r".*"
dumping_hook = hooks.DumpingDebugHook(
self.session_root, watch_fn=_watch_fn, log_usage=False)
mon_sess = monitored_session._HookedSession(self.sess, [dumping_hook])
mon_sess.run(self.inc_v)
def testHookExceedingLimit(self):
def _watch_fn(fetches, feeds):
del fetches, feeds
return "DebugIdentity", r".*delta.*", r".*"
dumping_hook = hooks.DumpingDebugHook(
self.session_root, watch_fn=_watch_fn, log_usage=False)
mon_sess = monitored_session._HookedSession(self.sess, [dumping_hook])
# Like in `testWrapperSessionExceedingLimit`, the first two calls
# should be within the byte limit, but the third one should error
# out due to exceeding the limit.
mon_sess.run(self.inc_v)
mon_sess.run(self.inc_v)
with self.assertRaises(ValueError):
mon_sess.run(self.inc_v)
if __name__ == "__main__":
googletest.main()
| apache-2.0 | 2,000,309,103,865,044,200 | 39.481818 | 80 | 0.702448 | false |
danlrobertson/servo | tests/wpt/web-platform-tests/tools/third_party/py/py/_code/_assertionnew.py | 60 | 11450 | """
Find intermediate evalutation results in assert statements through builtin AST.
This should replace _assertionold.py eventually.
"""
import sys
import ast
import py
from py._code.assertion import _format_explanation, BuiltinAssertionError
def _is_ast_expr(node):
return isinstance(node, ast.expr)
def _is_ast_stmt(node):
return isinstance(node, ast.stmt)
class Failure(Exception):
"""Error found while interpreting AST."""
def __init__(self, explanation=""):
self.cause = sys.exc_info()
self.explanation = explanation
def interpret(source, frame, should_fail=False):
mod = ast.parse(source)
visitor = DebugInterpreter(frame)
try:
visitor.visit(mod)
except Failure:
failure = sys.exc_info()[1]
return getfailure(failure)
if should_fail:
return ("(assertion failed, but when it was re-run for "
"printing intermediate values, it did not fail. Suggestions: "
"compute assert expression before the assert or use --no-assert)")
def run(offending_line, frame=None):
if frame is None:
frame = py.code.Frame(sys._getframe(1))
return interpret(offending_line, frame)
def getfailure(failure):
explanation = _format_explanation(failure.explanation)
value = failure.cause[1]
if str(value):
lines = explanation.splitlines()
if not lines:
lines.append("")
lines[0] += " << %s" % (value,)
explanation = "\n".join(lines)
text = "%s: %s" % (failure.cause[0].__name__, explanation)
if text.startswith("AssertionError: assert "):
text = text[16:]
return text
operator_map = {
ast.BitOr : "|",
ast.BitXor : "^",
ast.BitAnd : "&",
ast.LShift : "<<",
ast.RShift : ">>",
ast.Add : "+",
ast.Sub : "-",
ast.Mult : "*",
ast.Div : "/",
ast.FloorDiv : "//",
ast.Mod : "%",
ast.Eq : "==",
ast.NotEq : "!=",
ast.Lt : "<",
ast.LtE : "<=",
ast.Gt : ">",
ast.GtE : ">=",
ast.Pow : "**",
ast.Is : "is",
ast.IsNot : "is not",
ast.In : "in",
ast.NotIn : "not in"
}
unary_map = {
ast.Not : "not %s",
ast.Invert : "~%s",
ast.USub : "-%s",
ast.UAdd : "+%s"
}
class DebugInterpreter(ast.NodeVisitor):
"""Interpret AST nodes to gleam useful debugging information. """
def __init__(self, frame):
self.frame = frame
def generic_visit(self, node):
# Fallback when we don't have a special implementation.
if _is_ast_expr(node):
mod = ast.Expression(node)
co = self._compile(mod)
try:
result = self.frame.eval(co)
except Exception:
raise Failure()
explanation = self.frame.repr(result)
return explanation, result
elif _is_ast_stmt(node):
mod = ast.Module([node])
co = self._compile(mod, "exec")
try:
self.frame.exec_(co)
except Exception:
raise Failure()
return None, None
else:
raise AssertionError("can't handle %s" %(node,))
def _compile(self, source, mode="eval"):
return compile(source, "<assertion interpretation>", mode)
def visit_Expr(self, expr):
return self.visit(expr.value)
def visit_Module(self, mod):
for stmt in mod.body:
self.visit(stmt)
def visit_Name(self, name):
explanation, result = self.generic_visit(name)
# See if the name is local.
source = "%r in locals() is not globals()" % (name.id,)
co = self._compile(source)
try:
local = self.frame.eval(co)
except Exception:
# have to assume it isn't
local = False
if not local:
return name.id, result
return explanation, result
def visit_Compare(self, comp):
left = comp.left
left_explanation, left_result = self.visit(left)
for op, next_op in zip(comp.ops, comp.comparators):
next_explanation, next_result = self.visit(next_op)
op_symbol = operator_map[op.__class__]
explanation = "%s %s %s" % (left_explanation, op_symbol,
next_explanation)
source = "__exprinfo_left %s __exprinfo_right" % (op_symbol,)
co = self._compile(source)
try:
result = self.frame.eval(co, __exprinfo_left=left_result,
__exprinfo_right=next_result)
except Exception:
raise Failure(explanation)
try:
if not result:
break
except KeyboardInterrupt:
raise
except:
break
left_explanation, left_result = next_explanation, next_result
rcomp = py.code._reprcompare
if rcomp:
res = rcomp(op_symbol, left_result, next_result)
if res:
explanation = res
return explanation, result
def visit_BoolOp(self, boolop):
is_or = isinstance(boolop.op, ast.Or)
explanations = []
for operand in boolop.values:
explanation, result = self.visit(operand)
explanations.append(explanation)
if result == is_or:
break
name = is_or and " or " or " and "
explanation = "(" + name.join(explanations) + ")"
return explanation, result
def visit_UnaryOp(self, unary):
pattern = unary_map[unary.op.__class__]
operand_explanation, operand_result = self.visit(unary.operand)
explanation = pattern % (operand_explanation,)
co = self._compile(pattern % ("__exprinfo_expr",))
try:
result = self.frame.eval(co, __exprinfo_expr=operand_result)
except Exception:
raise Failure(explanation)
return explanation, result
def visit_BinOp(self, binop):
left_explanation, left_result = self.visit(binop.left)
right_explanation, right_result = self.visit(binop.right)
symbol = operator_map[binop.op.__class__]
explanation = "(%s %s %s)" % (left_explanation, symbol,
right_explanation)
source = "__exprinfo_left %s __exprinfo_right" % (symbol,)
co = self._compile(source)
try:
result = self.frame.eval(co, __exprinfo_left=left_result,
__exprinfo_right=right_result)
except Exception:
raise Failure(explanation)
return explanation, result
def visit_Call(self, call):
func_explanation, func = self.visit(call.func)
arg_explanations = []
ns = {"__exprinfo_func" : func}
arguments = []
for arg in call.args:
arg_explanation, arg_result = self.visit(arg)
arg_name = "__exprinfo_%s" % (len(ns),)
ns[arg_name] = arg_result
arguments.append(arg_name)
arg_explanations.append(arg_explanation)
for keyword in call.keywords:
arg_explanation, arg_result = self.visit(keyword.value)
arg_name = "__exprinfo_%s" % (len(ns),)
ns[arg_name] = arg_result
keyword_source = "%s=%%s" % (keyword.arg)
arguments.append(keyword_source % (arg_name,))
arg_explanations.append(keyword_source % (arg_explanation,))
if call.starargs:
arg_explanation, arg_result = self.visit(call.starargs)
arg_name = "__exprinfo_star"
ns[arg_name] = arg_result
arguments.append("*%s" % (arg_name,))
arg_explanations.append("*%s" % (arg_explanation,))
if call.kwargs:
arg_explanation, arg_result = self.visit(call.kwargs)
arg_name = "__exprinfo_kwds"
ns[arg_name] = arg_result
arguments.append("**%s" % (arg_name,))
arg_explanations.append("**%s" % (arg_explanation,))
args_explained = ", ".join(arg_explanations)
explanation = "%s(%s)" % (func_explanation, args_explained)
args = ", ".join(arguments)
source = "__exprinfo_func(%s)" % (args,)
co = self._compile(source)
try:
result = self.frame.eval(co, **ns)
except Exception:
raise Failure(explanation)
pattern = "%s\n{%s = %s\n}"
rep = self.frame.repr(result)
explanation = pattern % (rep, rep, explanation)
return explanation, result
def _is_builtin_name(self, name):
pattern = "%r not in globals() and %r not in locals()"
source = pattern % (name.id, name.id)
co = self._compile(source)
try:
return self.frame.eval(co)
except Exception:
return False
def visit_Attribute(self, attr):
if not isinstance(attr.ctx, ast.Load):
return self.generic_visit(attr)
source_explanation, source_result = self.visit(attr.value)
explanation = "%s.%s" % (source_explanation, attr.attr)
source = "__exprinfo_expr.%s" % (attr.attr,)
co = self._compile(source)
try:
result = self.frame.eval(co, __exprinfo_expr=source_result)
except Exception:
raise Failure(explanation)
explanation = "%s\n{%s = %s.%s\n}" % (self.frame.repr(result),
self.frame.repr(result),
source_explanation, attr.attr)
# Check if the attr is from an instance.
source = "%r in getattr(__exprinfo_expr, '__dict__', {})"
source = source % (attr.attr,)
co = self._compile(source)
try:
from_instance = self.frame.eval(co, __exprinfo_expr=source_result)
except Exception:
from_instance = True
if from_instance:
rep = self.frame.repr(result)
pattern = "%s\n{%s = %s\n}"
explanation = pattern % (rep, rep, explanation)
return explanation, result
def visit_Assert(self, assrt):
test_explanation, test_result = self.visit(assrt.test)
if test_explanation.startswith("False\n{False =") and \
test_explanation.endswith("\n"):
test_explanation = test_explanation[15:-2]
explanation = "assert %s" % (test_explanation,)
if not test_result:
try:
raise BuiltinAssertionError
except Exception:
raise Failure(explanation)
return explanation, test_result
def visit_Assign(self, assign):
value_explanation, value_result = self.visit(assign.value)
explanation = "... = %s" % (value_explanation,)
name = ast.Name("__exprinfo_expr", ast.Load(),
lineno=assign.value.lineno,
col_offset=assign.value.col_offset)
new_assign = ast.Assign(assign.targets, name, lineno=assign.lineno,
col_offset=assign.col_offset)
mod = ast.Module([new_assign])
co = self._compile(mod, "exec")
try:
self.frame.exec_(co, __exprinfo_expr=value_result)
except Exception:
raise Failure(explanation)
return explanation, value_result
| mpl-2.0 | 4,941,451,064,539,613,000 | 34.559006 | 82 | 0.549607 | false |
marc-sensenich/ansible | test/units/modules/network/netvisor/test_pn_user.py | 9 | 3099 | # Copyright: (c) 2018, Pluribus Networks
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from units.compat.mock import patch
from ansible.modules.network.netvisor import pn_user
from units.modules.utils import set_module_args
from .nvos_module import TestNvosModule, load_fixture
class TestUserModule(TestNvosModule):
module = pn_user
def setUp(self):
self.mock_run_nvos_commands = patch('ansible.modules.network.netvisor.pn_user.run_cli')
self.run_nvos_commands = self.mock_run_nvos_commands.start()
self.mock_run_check_cli = patch('ansible.modules.network.netvisor.pn_user.check_cli')
self.run_check_cli = self.mock_run_check_cli.start()
def tearDown(self):
self.mock_run_nvos_commands.stop()
self.run_check_cli.stop()
def run_cli_patch(self, module, cli, state_map):
if state_map['present'] == 'user-create':
results = dict(
changed=True,
cli_cmd=cli
)
elif state_map['absent'] == 'user-delete':
results = dict(
changed=True,
cli_cmd=cli
)
elif state_map['update'] == 'user-modify':
results = dict(
changed=True,
cli_cmd=cli
)
module.exit_json(**results)
def load_fixtures(self, commands=None, state=None, transport='cli'):
self.run_nvos_commands.side_effect = self.run_cli_patch
if state == 'present':
self.run_check_cli.return_value = False
if state == 'absent':
self.run_check_cli.return_value = True
if state == 'update':
self.run_check_cli.return_value = True
def test_user_create(self):
set_module_args({'pn_cliswitch': 'sw01', 'pn_name': 'foo',
'pn_scope': 'local', 'pn_password': 'test123', 'state': 'present'})
result = self.execute_module(changed=True, state='present')
expected_cmd = '/usr/bin/cli --quiet -e --no-login-prompt switch sw01 user-create name foo scope local password test123'
self.assertEqual(result['cli_cmd'], expected_cmd)
def test_user_delete(self):
set_module_args({'pn_cliswitch': 'sw01', 'pn_name': 'foo',
'state': 'absent'})
result = self.execute_module(changed=True, state='absent')
expected_cmd = '/usr/bin/cli --quiet -e --no-login-prompt switch sw01 user-delete name foo '
self.assertEqual(result['cli_cmd'], expected_cmd)
def test_user_modify(self):
set_module_args({'pn_cliswitch': 'sw01', 'pn_name': 'foo',
'pn_password': 'test1234', 'state': 'update'})
result = self.execute_module(changed=True, state='update')
expected_cmd = '/usr/bin/cli --quiet -e --no-login-prompt switch sw01 user-modify name foo password test1234'
self.assertEqual(result['cli_cmd'], expected_cmd)
| gpl-3.0 | 3,382,621,277,372,873,700 | 39.776316 | 130 | 0.605034 | false |
nwjs/chromium.src | third_party/blink/web_tests/external/wpt/tools/third_party/pytest/testing/test_parseopt.py | 30 | 13227 | from __future__ import absolute_import, division, print_function
import argparse
import sys
import os
import py
import pytest
from _pytest.config import argparsing as parseopt
@pytest.fixture
def parser():
return parseopt.Parser()
class TestParser(object):
def test_no_help_by_default(self, capsys):
parser = parseopt.Parser(usage="xyz")
pytest.raises(SystemExit, lambda: parser.parse(["-h"]))
out, err = capsys.readouterr()
assert err.find("error: unrecognized arguments") != -1
def test_argument(self):
with pytest.raises(parseopt.ArgumentError):
# need a short or long option
argument = parseopt.Argument()
argument = parseopt.Argument("-t")
assert argument._short_opts == ["-t"]
assert argument._long_opts == []
assert argument.dest == "t"
argument = parseopt.Argument("-t", "--test")
assert argument._short_opts == ["-t"]
assert argument._long_opts == ["--test"]
assert argument.dest == "test"
argument = parseopt.Argument("-t", "--test", dest="abc")
assert argument.dest == "abc"
assert (
str(argument)
== ("Argument(_short_opts: ['-t'], _long_opts: ['--test'], dest: 'abc')")
)
def test_argument_type(self):
argument = parseopt.Argument("-t", dest="abc", type=int)
assert argument.type is int
argument = parseopt.Argument("-t", dest="abc", type=str)
assert argument.type is str
argument = parseopt.Argument("-t", dest="abc", type=float)
assert argument.type is float
with pytest.warns(DeprecationWarning):
with pytest.raises(KeyError):
argument = parseopt.Argument("-t", dest="abc", type="choice")
argument = parseopt.Argument(
"-t", dest="abc", type=str, choices=["red", "blue"]
)
assert argument.type is str
def test_argument_processopt(self):
argument = parseopt.Argument("-t", type=int)
argument.default = 42
argument.dest = "abc"
res = argument.attrs()
assert res["default"] == 42
assert res["dest"] == "abc"
def test_group_add_and_get(self, parser):
group = parser.getgroup("hello", description="desc")
assert group.name == "hello"
assert group.description == "desc"
def test_getgroup_simple(self, parser):
group = parser.getgroup("hello", description="desc")
assert group.name == "hello"
assert group.description == "desc"
group2 = parser.getgroup("hello")
assert group2 is group
def test_group_ordering(self, parser):
parser.getgroup("1")
parser.getgroup("2")
parser.getgroup("3", after="1")
groups = parser._groups
groups_names = [x.name for x in groups]
assert groups_names == list("132")
def test_group_addoption(self):
group = parseopt.OptionGroup("hello")
group.addoption("--option1", action="store_true")
assert len(group.options) == 1
assert isinstance(group.options[0], parseopt.Argument)
def test_group_addoption_conflict(self):
group = parseopt.OptionGroup("hello again")
group.addoption("--option1", "--option-1", action="store_true")
with pytest.raises(ValueError) as err:
group.addoption("--option1", "--option-one", action="store_true")
assert str({"--option1"}) in str(err.value)
def test_group_shortopt_lowercase(self, parser):
group = parser.getgroup("hello")
pytest.raises(
ValueError,
"""
group.addoption("-x", action="store_true")
""",
)
assert len(group.options) == 0
group._addoption("-x", action="store_true")
assert len(group.options) == 1
def test_parser_addoption(self, parser):
group = parser.getgroup("custom options")
assert len(group.options) == 0
group.addoption("--option1", action="store_true")
assert len(group.options) == 1
def test_parse(self, parser):
parser.addoption("--hello", dest="hello", action="store")
args = parser.parse(["--hello", "world"])
assert args.hello == "world"
assert not getattr(args, parseopt.FILE_OR_DIR)
def test_parse2(self, parser):
args = parser.parse([py.path.local()])
assert getattr(args, parseopt.FILE_OR_DIR)[0] == py.path.local()
def test_parse_known_args(self, parser):
parser.parse_known_args([py.path.local()])
parser.addoption("--hello", action="store_true")
ns = parser.parse_known_args(["x", "--y", "--hello", "this"])
assert ns.hello
assert ns.file_or_dir == ["x"]
def test_parse_known_and_unknown_args(self, parser):
parser.addoption("--hello", action="store_true")
ns, unknown = parser.parse_known_and_unknown_args(
["x", "--y", "--hello", "this"]
)
assert ns.hello
assert ns.file_or_dir == ["x"]
assert unknown == ["--y", "this"]
def test_parse_will_set_default(self, parser):
parser.addoption("--hello", dest="hello", default="x", action="store")
option = parser.parse([])
assert option.hello == "x"
del option.hello
parser.parse_setoption([], option)
assert option.hello == "x"
def test_parse_setoption(self, parser):
parser.addoption("--hello", dest="hello", action="store")
parser.addoption("--world", dest="world", default=42)
class A(object):
pass
option = A()
args = parser.parse_setoption(["--hello", "world"], option)
assert option.hello == "world"
assert option.world == 42
assert not args
def test_parse_special_destination(self, parser):
parser.addoption("--ultimate-answer", type=int)
args = parser.parse(["--ultimate-answer", "42"])
assert args.ultimate_answer == 42
def test_parse_split_positional_arguments(self, parser):
parser.addoption("-R", action="store_true")
parser.addoption("-S", action="store_false")
args = parser.parse(["-R", "4", "2", "-S"])
assert getattr(args, parseopt.FILE_OR_DIR) == ["4", "2"]
args = parser.parse(["-R", "-S", "4", "2", "-R"])
assert getattr(args, parseopt.FILE_OR_DIR) == ["4", "2"]
assert args.R is True
assert args.S is False
args = parser.parse(["-R", "4", "-S", "2"])
assert getattr(args, parseopt.FILE_OR_DIR) == ["4", "2"]
assert args.R is True
assert args.S is False
def test_parse_defaultgetter(self):
def defaultget(option):
if not hasattr(option, "type"):
return
if option.type is int:
option.default = 42
elif option.type is str:
option.default = "world"
parser = parseopt.Parser(processopt=defaultget)
parser.addoption("--this", dest="this", type=int, action="store")
parser.addoption("--hello", dest="hello", type=str, action="store")
parser.addoption("--no", dest="no", action="store_true")
option = parser.parse([])
assert option.hello == "world"
assert option.this == 42
assert option.no is False
def test_drop_short_helper(self):
parser = argparse.ArgumentParser(
formatter_class=parseopt.DropShorterLongHelpFormatter
)
parser.add_argument(
"-t", "--twoword", "--duo", "--two-word", "--two", help="foo"
).map_long_option = {
"two": "two-word"
}
# throws error on --deux only!
parser.add_argument(
"-d", "--deuxmots", "--deux-mots", action="store_true", help="foo"
).map_long_option = {
"deux": "deux-mots"
}
parser.add_argument("-s", action="store_true", help="single short")
parser.add_argument("--abc", "-a", action="store_true", help="bar")
parser.add_argument("--klm", "-k", "--kl-m", action="store_true", help="bar")
parser.add_argument(
"-P", "--pq-r", "-p", "--pqr", action="store_true", help="bar"
)
parser.add_argument(
"--zwei-wort", "--zweiwort", "--zweiwort", action="store_true", help="bar"
)
parser.add_argument(
"-x", "--exit-on-first", "--exitfirst", action="store_true", help="spam"
).map_long_option = {
"exitfirst": "exit-on-first"
}
parser.add_argument("files_and_dirs", nargs="*")
args = parser.parse_args(["-k", "--duo", "hallo", "--exitfirst"])
assert args.twoword == "hallo"
assert args.klm is True
assert args.zwei_wort is False
assert args.exit_on_first is True
assert args.s is False
args = parser.parse_args(["--deux-mots"])
with pytest.raises(AttributeError):
assert args.deux_mots is True
assert args.deuxmots is True
args = parser.parse_args(["file", "dir"])
assert "|".join(args.files_and_dirs) == "file|dir"
def test_drop_short_0(self, parser):
parser.addoption("--funcarg", "--func-arg", action="store_true")
parser.addoption("--abc-def", "--abc-def", action="store_true")
parser.addoption("--klm-hij", action="store_true")
args = parser.parse(["--funcarg", "--k"])
assert args.funcarg is True
assert args.abc_def is False
assert args.klm_hij is True
def test_drop_short_2(self, parser):
parser.addoption("--func-arg", "--doit", action="store_true")
args = parser.parse(["--doit"])
assert args.func_arg is True
def test_drop_short_3(self, parser):
parser.addoption("--func-arg", "--funcarg", "--doit", action="store_true")
args = parser.parse(["abcd"])
assert args.func_arg is False
assert args.file_or_dir == ["abcd"]
def test_drop_short_help0(self, parser, capsys):
parser.addoption("--func-args", "--doit", help="foo", action="store_true")
parser.parse([])
help = parser.optparser.format_help()
assert "--func-args, --doit foo" in help
# testing would be more helpful with all help generated
def test_drop_short_help1(self, parser, capsys):
group = parser.getgroup("general")
group.addoption("--doit", "--func-args", action="store_true", help="foo")
group._addoption(
"-h",
"--help",
action="store_true",
dest="help",
help="show help message and configuration info",
)
parser.parse(["-h"])
help = parser.optparser.format_help()
assert "-doit, --func-args foo" in help
def test_multiple_metavar_help(self, parser):
"""
Help text for options with a metavar tuple should display help
in the form "--preferences=value1 value2 value3" (#2004).
"""
group = parser.getgroup("general")
group.addoption(
"--preferences", metavar=("value1", "value2", "value3"), nargs=3
)
group._addoption("-h", "--help", action="store_true", dest="help")
parser.parse(["-h"])
help = parser.optparser.format_help()
assert "--preferences=value1 value2 value3" in help
def test_argcomplete(testdir, monkeypatch):
if not py.path.local.sysfind("bash"):
pytest.skip("bash not available")
script = str(testdir.tmpdir.join("test_argcomplete"))
pytest_bin = sys.argv[0]
if "pytest" not in os.path.basename(pytest_bin):
pytest.skip("need to be run with pytest executable, not %s" % (pytest_bin,))
with open(str(script), "w") as fp:
# redirect output from argcomplete to stdin and stderr is not trivial
# http://stackoverflow.com/q/12589419/1307905
# so we use bash
fp.write('COMP_WORDBREAKS="$COMP_WORDBREAKS" %s 8>&1 9>&2' % pytest_bin)
# alternative would be exteneded Testdir.{run(),_run(),popen()} to be able
# to handle a keyword argument env that replaces os.environ in popen or
# extends the copy, advantage: could not forget to restore
monkeypatch.setenv("_ARGCOMPLETE", "1")
monkeypatch.setenv("_ARGCOMPLETE_IFS", "\x0b")
monkeypatch.setenv("COMP_WORDBREAKS", " \\t\\n\"\\'><=;|&(:")
arg = "--fu"
monkeypatch.setenv("COMP_LINE", "pytest " + arg)
monkeypatch.setenv("COMP_POINT", str(len("pytest " + arg)))
result = testdir.run("bash", str(script), arg)
if result.ret == 255:
# argcomplete not found
pytest.skip("argcomplete not available")
elif not result.stdout.str():
pytest.skip("bash provided no output, argcomplete not available?")
else:
result.stdout.fnmatch_lines(["--funcargs", "--fulltrace"])
os.mkdir("test_argcomplete.d")
arg = "test_argc"
monkeypatch.setenv("COMP_LINE", "pytest " + arg)
monkeypatch.setenv("COMP_POINT", str(len("pytest " + arg)))
result = testdir.run("bash", str(script), arg)
result.stdout.fnmatch_lines(["test_argcomplete", "test_argcomplete.d/"])
| bsd-3-clause | -373,345,755,749,597,440 | 38.366071 | 86 | 0.581311 | false |
bufferx/tornado | tornado/test/web_test.py | 1 | 68872 | from __future__ import absolute_import, division, print_function, with_statement
from tornado import gen
from tornado.escape import json_decode, utf8, to_unicode, recursive_unicode, native_str, to_basestring
from tornado.httputil import format_timestamp
from tornado.iostream import IOStream
from tornado.log import app_log, gen_log
from tornado.simple_httpclient import SimpleAsyncHTTPClient
from tornado.template import DictLoader
from tornado.testing import AsyncHTTPTestCase, ExpectLog
from tornado.test.util import unittest
from tornado.util import u, bytes_type, ObjectDict, unicode_type
from tornado.web import RequestHandler, authenticated, Application, asynchronous, url, HTTPError, StaticFileHandler, _create_signature, create_signed_value, ErrorHandler, UIModule, MissingArgumentError
import binascii
import datetime
import email.utils
import logging
import os
import re
import socket
import sys
try:
import urllib.parse as urllib_parse # py3
except ImportError:
import urllib as urllib_parse # py2
wsgi_safe_tests = []
relpath = lambda *a: os.path.join(os.path.dirname(__file__), *a)
def wsgi_safe(cls):
wsgi_safe_tests.append(cls)
return cls
class WebTestCase(AsyncHTTPTestCase):
"""Base class for web tests that also supports WSGI mode.
Override get_handlers and get_app_kwargs instead of get_app.
Append to wsgi_safe to have it run in wsgi_test as well.
"""
def get_app(self):
self.app = Application(self.get_handlers(), **self.get_app_kwargs())
return self.app
def get_handlers(self):
raise NotImplementedError()
def get_app_kwargs(self):
return {}
class SimpleHandlerTestCase(WebTestCase):
"""Simplified base class for tests that work with a single handler class.
To use, define a nested class named ``Handler``.
"""
def get_handlers(self):
return [('/', self.Handler)]
class HelloHandler(RequestHandler):
def get(self):
self.write('hello')
class CookieTestRequestHandler(RequestHandler):
# stub out enough methods to make the secure_cookie functions work
def __init__(self):
# don't call super.__init__
self._cookies = {}
self.application = ObjectDict(settings=dict(cookie_secret='0123456789'))
def get_cookie(self, name):
return self._cookies.get(name)
def set_cookie(self, name, value, expires_days=None):
self._cookies[name] = value
class SecureCookieTest(unittest.TestCase):
def test_round_trip(self):
handler = CookieTestRequestHandler()
handler.set_secure_cookie('foo', b'bar')
self.assertEqual(handler.get_secure_cookie('foo'), b'bar')
def test_cookie_tampering_future_timestamp(self):
handler = CookieTestRequestHandler()
# this string base64-encodes to '12345678'
handler.set_secure_cookie('foo', binascii.a2b_hex(b'd76df8e7aefc'))
cookie = handler._cookies['foo']
match = re.match(br'12345678\|([0-9]+)\|([0-9a-f]+)', cookie)
self.assertTrue(match)
timestamp = match.group(1)
sig = match.group(2)
self.assertEqual(
_create_signature(handler.application.settings["cookie_secret"],
'foo', '12345678', timestamp),
sig)
# shifting digits from payload to timestamp doesn't alter signature
# (this is not desirable behavior, just confirming that that's how it
# works)
self.assertEqual(
_create_signature(handler.application.settings["cookie_secret"],
'foo', '1234', b'5678' + timestamp),
sig)
# tamper with the cookie
handler._cookies['foo'] = utf8('1234|5678%s|%s' % (
to_basestring(timestamp), to_basestring(sig)))
# it gets rejected
with ExpectLog(gen_log, "Cookie timestamp in future"):
self.assertTrue(handler.get_secure_cookie('foo') is None)
def test_arbitrary_bytes(self):
# Secure cookies accept arbitrary data (which is base64 encoded).
# Note that normal cookies accept only a subset of ascii.
handler = CookieTestRequestHandler()
handler.set_secure_cookie('foo', b'\xe9')
self.assertEqual(handler.get_secure_cookie('foo'), b'\xe9')
class CookieTest(WebTestCase):
def get_handlers(self):
class SetCookieHandler(RequestHandler):
def get(self):
# Try setting cookies with different argument types
# to ensure that everything gets encoded correctly
self.set_cookie("str", "asdf")
self.set_cookie("unicode", u("qwer"))
self.set_cookie("bytes", b"zxcv")
class GetCookieHandler(RequestHandler):
def get(self):
self.write(self.get_cookie("foo", "default"))
class SetCookieDomainHandler(RequestHandler):
def get(self):
# unicode domain and path arguments shouldn't break things
# either (see bug #285)
self.set_cookie("unicode_args", "blah", domain=u("foo.com"),
path=u("/foo"))
class SetCookieSpecialCharHandler(RequestHandler):
def get(self):
self.set_cookie("equals", "a=b")
self.set_cookie("semicolon", "a;b")
self.set_cookie("quote", 'a"b')
class SetCookieOverwriteHandler(RequestHandler):
def get(self):
self.set_cookie("a", "b", domain="example.com")
self.set_cookie("c", "d", domain="example.com")
# A second call with the same name clobbers the first.
# Attributes from the first call are not carried over.
self.set_cookie("a", "e")
return [("/set", SetCookieHandler),
("/get", GetCookieHandler),
("/set_domain", SetCookieDomainHandler),
("/special_char", SetCookieSpecialCharHandler),
("/set_overwrite", SetCookieOverwriteHandler),
]
def test_set_cookie(self):
response = self.fetch("/set")
self.assertEqual(sorted(response.headers.get_list("Set-Cookie")),
["bytes=zxcv; Path=/",
"str=asdf; Path=/",
"unicode=qwer; Path=/",
])
def test_get_cookie(self):
response = self.fetch("/get", headers={"Cookie": "foo=bar"})
self.assertEqual(response.body, b"bar")
response = self.fetch("/get", headers={"Cookie": 'foo="bar"'})
self.assertEqual(response.body, b"bar")
response = self.fetch("/get", headers={"Cookie": "/=exception;"})
self.assertEqual(response.body, b"default")
def test_set_cookie_domain(self):
response = self.fetch("/set_domain")
self.assertEqual(response.headers.get_list("Set-Cookie"),
["unicode_args=blah; Domain=foo.com; Path=/foo"])
def test_cookie_special_char(self):
response = self.fetch("/special_char")
headers = sorted(response.headers.get_list("Set-Cookie"))
self.assertEqual(len(headers), 3)
self.assertEqual(headers[0], 'equals="a=b"; Path=/')
self.assertEqual(headers[1], 'quote="a\\"b"; Path=/')
# python 2.7 octal-escapes the semicolon; older versions leave it alone
self.assertTrue(headers[2] in ('semicolon="a;b"; Path=/',
'semicolon="a\\073b"; Path=/'),
headers[2])
data = [('foo=a=b', 'a=b'),
('foo="a=b"', 'a=b'),
('foo="a;b"', 'a;b'),
# ('foo=a\\073b', 'a;b'), # even encoded, ";" is a delimiter
('foo="a\\073b"', 'a;b'),
('foo="a\\"b"', 'a"b'),
]
for header, expected in data:
logging.debug("trying %r", header)
response = self.fetch("/get", headers={"Cookie": header})
self.assertEqual(response.body, utf8(expected))
def test_set_cookie_overwrite(self):
response = self.fetch("/set_overwrite")
headers = response.headers.get_list("Set-Cookie")
self.assertEqual(sorted(headers),
["a=e; Path=/", "c=d; Domain=example.com; Path=/"])
class AuthRedirectRequestHandler(RequestHandler):
def initialize(self, login_url):
self.login_url = login_url
def get_login_url(self):
return self.login_url
@authenticated
def get(self):
# we'll never actually get here because the test doesn't follow redirects
self.send_error(500)
class AuthRedirectTest(WebTestCase):
def get_handlers(self):
return [('/relative', AuthRedirectRequestHandler,
dict(login_url='/login')),
('/absolute', AuthRedirectRequestHandler,
dict(login_url='http://example.com/login'))]
def test_relative_auth_redirect(self):
self.http_client.fetch(self.get_url('/relative'), self.stop,
follow_redirects=False)
response = self.wait()
self.assertEqual(response.code, 302)
self.assertEqual(response.headers['Location'], '/login?next=%2Frelative')
def test_absolute_auth_redirect(self):
self.http_client.fetch(self.get_url('/absolute'), self.stop,
follow_redirects=False)
response = self.wait()
self.assertEqual(response.code, 302)
self.assertTrue(re.match(
'http://example.com/login\?next=http%3A%2F%2Flocalhost%3A[0-9]+%2Fabsolute',
response.headers['Location']), response.headers['Location'])
class ConnectionCloseHandler(RequestHandler):
def initialize(self, test):
self.test = test
@asynchronous
def get(self):
self.test.on_handler_waiting()
def on_connection_close(self):
self.test.on_connection_close()
class ConnectionCloseTest(WebTestCase):
def get_handlers(self):
return [('/', ConnectionCloseHandler, dict(test=self))]
def test_connection_close(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
s.connect(("localhost", self.get_http_port()))
self.stream = IOStream(s, io_loop=self.io_loop)
self.stream.write(b"GET / HTTP/1.0\r\n\r\n")
self.wait()
def on_handler_waiting(self):
logging.debug('handler waiting')
self.stream.close()
def on_connection_close(self):
logging.debug('connection closed')
self.stop()
class EchoHandler(RequestHandler):
def get(self, *path_args):
# Type checks: web.py interfaces convert argument values to
# unicode strings (by default, but see also decode_argument).
# In httpserver.py (i.e. self.request.arguments), they're left
# as bytes. Keys are always native strings.
for key in self.request.arguments:
if type(key) != str:
raise Exception("incorrect type for key: %r" % type(key))
for value in self.request.arguments[key]:
if type(value) != bytes_type:
raise Exception("incorrect type for value: %r" %
type(value))
for value in self.get_arguments(key):
if type(value) != unicode_type:
raise Exception("incorrect type for value: %r" %
type(value))
for arg in path_args:
if type(arg) != unicode_type:
raise Exception("incorrect type for path arg: %r" % type(arg))
self.write(dict(path=self.request.path,
path_args=path_args,
args=recursive_unicode(self.request.arguments)))
class RequestEncodingTest(WebTestCase):
def get_handlers(self):
return [("/group/(.*)", EchoHandler),
("/slashes/([^/]*)/([^/]*)", EchoHandler),
]
def fetch_json(self, path):
return json_decode(self.fetch(path).body)
def test_group_question_mark(self):
# Ensure that url-encoded question marks are handled properly
self.assertEqual(self.fetch_json('/group/%3F'),
dict(path='/group/%3F', path_args=['?'], args={}))
self.assertEqual(self.fetch_json('/group/%3F?%3F=%3F'),
dict(path='/group/%3F', path_args=['?'], args={'?': ['?']}))
def test_group_encoding(self):
# Path components and query arguments should be decoded the same way
self.assertEqual(self.fetch_json('/group/%C3%A9?arg=%C3%A9'),
{u("path"): u("/group/%C3%A9"),
u("path_args"): [u("\u00e9")],
u("args"): {u("arg"): [u("\u00e9")]}})
def test_slashes(self):
# Slashes may be escaped to appear as a single "directory" in the path,
# but they are then unescaped when passed to the get() method.
self.assertEqual(self.fetch_json('/slashes/foo/bar'),
dict(path="/slashes/foo/bar",
path_args=["foo", "bar"],
args={}))
self.assertEqual(self.fetch_json('/slashes/a%2Fb/c%2Fd'),
dict(path="/slashes/a%2Fb/c%2Fd",
path_args=["a/b", "c/d"],
args={}))
class TypeCheckHandler(RequestHandler):
def prepare(self):
self.errors = {}
self.check_type('status', self.get_status(), int)
# get_argument is an exception from the general rule of using
# type str for non-body data mainly for historical reasons.
self.check_type('argument', self.get_argument('foo'), unicode_type)
self.check_type('cookie_key', list(self.cookies.keys())[0], str)
self.check_type('cookie_value', list(self.cookies.values())[0].value, str)
# Secure cookies return bytes because they can contain arbitrary
# data, but regular cookies are native strings.
if list(self.cookies.keys()) != ['asdf']:
raise Exception("unexpected values for cookie keys: %r" %
self.cookies.keys())
self.check_type('get_secure_cookie', self.get_secure_cookie('asdf'), bytes_type)
self.check_type('get_cookie', self.get_cookie('asdf'), str)
self.check_type('xsrf_token', self.xsrf_token, bytes_type)
self.check_type('xsrf_form_html', self.xsrf_form_html(), str)
self.check_type('reverse_url', self.reverse_url('typecheck', 'foo'), str)
self.check_type('request_summary', self._request_summary(), str)
def get(self, path_component):
# path_component uses type unicode instead of str for consistency
# with get_argument()
self.check_type('path_component', path_component, unicode_type)
self.write(self.errors)
def post(self, path_component):
self.check_type('path_component', path_component, unicode_type)
self.write(self.errors)
def check_type(self, name, obj, expected_type):
actual_type = type(obj)
if expected_type != actual_type:
self.errors[name] = "expected %s, got %s" % (expected_type,
actual_type)
class DecodeArgHandler(RequestHandler):
def decode_argument(self, value, name=None):
if type(value) != bytes_type:
raise Exception("unexpected type for value: %r" % type(value))
# use self.request.arguments directly to avoid recursion
if 'encoding' in self.request.arguments:
return value.decode(to_unicode(self.request.arguments['encoding'][0]))
else:
return value
def get(self, arg):
def describe(s):
if type(s) == bytes_type:
return ["bytes", native_str(binascii.b2a_hex(s))]
elif type(s) == unicode_type:
return ["unicode", s]
raise Exception("unknown type")
self.write({'path': describe(arg),
'query': describe(self.get_argument("foo")),
})
class LinkifyHandler(RequestHandler):
def get(self):
self.render("linkify.html", message="http://example.com")
class UIModuleResourceHandler(RequestHandler):
def get(self):
self.render("page.html", entries=[1, 2])
class OptionalPathHandler(RequestHandler):
def get(self, path):
self.write({"path": path})
class FlowControlHandler(RequestHandler):
# These writes are too small to demonstrate real flow control,
# but at least it shows that the callbacks get run.
@asynchronous
def get(self):
self.write("1")
self.flush(callback=self.step2)
def step2(self):
self.write("2")
self.flush(callback=self.step3)
def step3(self):
self.write("3")
self.finish()
class MultiHeaderHandler(RequestHandler):
def get(self):
self.set_header("x-overwrite", "1")
self.set_header("X-Overwrite", 2)
self.add_header("x-multi", 3)
self.add_header("X-Multi", "4")
class RedirectHandler(RequestHandler):
def get(self):
if self.get_argument('permanent', None) is not None:
self.redirect('/', permanent=int(self.get_argument('permanent')))
elif self.get_argument('status', None) is not None:
self.redirect('/', status=int(self.get_argument('status')))
else:
raise Exception("didn't get permanent or status arguments")
class EmptyFlushCallbackHandler(RequestHandler):
@gen.engine
@asynchronous
def get(self):
# Ensure that the flush callback is run whether or not there
# was any output.
yield gen.Task(self.flush) # "empty" flush, but writes headers
yield gen.Task(self.flush) # empty flush
self.write("o")
yield gen.Task(self.flush) # flushes the "o"
yield gen.Task(self.flush) # empty flush
self.finish("k")
class HeaderInjectionHandler(RequestHandler):
def get(self):
try:
self.set_header("X-Foo", "foo\r\nX-Bar: baz")
raise Exception("Didn't get expected exception")
except ValueError as e:
if "Unsafe header value" in str(e):
self.finish(b"ok")
else:
raise
class GetArgumentHandler(RequestHandler):
def prepare(self):
if self.get_argument('source', None) == 'query':
method = self.get_query_argument
elif self.get_argument('source', None) == 'body':
method = self.get_body_argument
else:
method = self.get_argument
self.finish(method("foo", "default"))
class GetArgumentsHandler(RequestHandler):
def prepare(self):
self.finish(dict(default=self.get_arguments("foo"),
query=self.get_query_arguments("foo"),
body=self.get_body_arguments("foo")))
# This test is shared with wsgi_test.py
@wsgi_safe
class WSGISafeWebTest(WebTestCase):
COOKIE_SECRET = "WebTest.COOKIE_SECRET"
def get_app_kwargs(self):
loader = DictLoader({
"linkify.html": "{% module linkify(message) %}",
"page.html": """\
<html><head></head><body>
{% for e in entries %}
{% module Template("entry.html", entry=e) %}
{% end %}
</body></html>""",
"entry.html": """\
{{ set_resources(embedded_css=".entry { margin-bottom: 1em; }", embedded_javascript="js_embed()", css_files=["/base.css", "/foo.css"], javascript_files="/common.js", html_head="<meta>", html_body='<script src="/analytics.js"/>') }}
<div class="entry">...</div>""",
})
return dict(template_loader=loader,
autoescape="xhtml_escape",
cookie_secret=self.COOKIE_SECRET)
def tearDown(self):
super(WSGISafeWebTest, self).tearDown()
RequestHandler._template_loaders.clear()
def get_handlers(self):
urls = [
url("/typecheck/(.*)", TypeCheckHandler, name='typecheck'),
url("/decode_arg/(.*)", DecodeArgHandler, name='decode_arg'),
url("/decode_arg_kw/(?P<arg>.*)", DecodeArgHandler),
url("/linkify", LinkifyHandler),
url("/uimodule_resources", UIModuleResourceHandler),
url("/optional_path/(.+)?", OptionalPathHandler),
url("/multi_header", MultiHeaderHandler),
url("/redirect", RedirectHandler),
url("/header_injection", HeaderInjectionHandler),
url("/get_argument", GetArgumentHandler),
url("/get_arguments", GetArgumentsHandler),
]
return urls
def fetch_json(self, *args, **kwargs):
response = self.fetch(*args, **kwargs)
response.rethrow()
return json_decode(response.body)
def test_types(self):
cookie_value = to_unicode(create_signed_value(self.COOKIE_SECRET,
"asdf", "qwer"))
response = self.fetch("/typecheck/asdf?foo=bar",
headers={"Cookie": "asdf=" + cookie_value})
data = json_decode(response.body)
self.assertEqual(data, {})
response = self.fetch("/typecheck/asdf?foo=bar", method="POST",
headers={"Cookie": "asdf=" + cookie_value},
body="foo=bar")
def test_decode_argument(self):
# These urls all decode to the same thing
urls = ["/decode_arg/%C3%A9?foo=%C3%A9&encoding=utf-8",
"/decode_arg/%E9?foo=%E9&encoding=latin1",
"/decode_arg_kw/%E9?foo=%E9&encoding=latin1",
]
for url in urls:
response = self.fetch(url)
response.rethrow()
data = json_decode(response.body)
self.assertEqual(data, {u('path'): [u('unicode'), u('\u00e9')],
u('query'): [u('unicode'), u('\u00e9')],
})
response = self.fetch("/decode_arg/%C3%A9?foo=%C3%A9")
response.rethrow()
data = json_decode(response.body)
self.assertEqual(data, {u('path'): [u('bytes'), u('c3a9')],
u('query'): [u('bytes'), u('c3a9')],
})
def test_decode_argument_plus(self):
# These urls are all equivalent.
urls = ["/decode_arg/1%20%2B%201?foo=1%20%2B%201&encoding=utf-8",
"/decode_arg/1%20+%201?foo=1+%2B+1&encoding=utf-8"]
for url in urls:
response = self.fetch(url)
response.rethrow()
data = json_decode(response.body)
self.assertEqual(data, {u('path'): [u('unicode'), u('1 + 1')],
u('query'): [u('unicode'), u('1 + 1')],
})
def test_reverse_url(self):
self.assertEqual(self.app.reverse_url('decode_arg', 'foo'),
'/decode_arg/foo')
self.assertEqual(self.app.reverse_url('decode_arg', 42),
'/decode_arg/42')
self.assertEqual(self.app.reverse_url('decode_arg', b'\xe9'),
'/decode_arg/%E9')
self.assertEqual(self.app.reverse_url('decode_arg', u('\u00e9')),
'/decode_arg/%C3%A9')
self.assertEqual(self.app.reverse_url('decode_arg', '1 + 1'),
'/decode_arg/1%20%2B%201')
def test_uimodule_unescaped(self):
response = self.fetch("/linkify")
self.assertEqual(response.body,
b"<a href=\"http://example.com\">http://example.com</a>")
def test_uimodule_resources(self):
response = self.fetch("/uimodule_resources")
self.assertEqual(response.body, b"""\
<html><head><link href="/base.css" type="text/css" rel="stylesheet"/><link href="/foo.css" type="text/css" rel="stylesheet"/>
<style type="text/css">
.entry { margin-bottom: 1em; }
</style>
<meta>
</head><body>
<div class="entry">...</div>
<div class="entry">...</div>
<script src="/common.js" type="text/javascript"></script>
<script type="text/javascript">
//<![CDATA[
js_embed()
//]]>
</script>
<script src="/analytics.js"/>
</body></html>""")
def test_optional_path(self):
self.assertEqual(self.fetch_json("/optional_path/foo"),
{u("path"): u("foo")})
self.assertEqual(self.fetch_json("/optional_path/"),
{u("path"): None})
def test_multi_header(self):
response = self.fetch("/multi_header")
self.assertEqual(response.headers["x-overwrite"], "2")
self.assertEqual(response.headers.get_list("x-multi"), ["3", "4"])
def test_redirect(self):
response = self.fetch("/redirect?permanent=1", follow_redirects=False)
self.assertEqual(response.code, 301)
response = self.fetch("/redirect?permanent=0", follow_redirects=False)
self.assertEqual(response.code, 302)
response = self.fetch("/redirect?status=307", follow_redirects=False)
self.assertEqual(response.code, 307)
def test_header_injection(self):
response = self.fetch("/header_injection")
self.assertEqual(response.body, b"ok")
def test_get_argument(self):
response = self.fetch("/get_argument?foo=bar")
self.assertEqual(response.body, b"bar")
response = self.fetch("/get_argument?foo=")
self.assertEqual(response.body, b"")
response = self.fetch("/get_argument")
self.assertEqual(response.body, b"default")
# Test merging of query and body arguments.
# In singular form, body arguments take precedence over query arguments.
body = urllib_parse.urlencode(dict(foo="hello"))
response = self.fetch("/get_argument?foo=bar", method="POST", body=body)
self.assertEqual(response.body, b"hello")
# In plural methods they are merged.
response = self.fetch("/get_arguments?foo=bar",
method="POST", body=body)
self.assertEqual(json_decode(response.body),
dict(default=['bar', 'hello'],
query=['bar'],
body=['hello']))
def test_get_query_arguments(self):
# send as a post so we can ensure the separation between query
# string and body arguments.
body = urllib_parse.urlencode(dict(foo="hello"))
response = self.fetch("/get_argument?source=query&foo=bar",
method="POST", body=body)
self.assertEqual(response.body, b"bar")
response = self.fetch("/get_argument?source=query&foo=",
method="POST", body=body)
self.assertEqual(response.body, b"")
response = self.fetch("/get_argument?source=query",
method="POST", body=body)
self.assertEqual(response.body, b"default")
def test_get_body_arguments(self):
body = urllib_parse.urlencode(dict(foo="bar"))
response = self.fetch("/get_argument?source=body&foo=hello",
method="POST", body=body)
self.assertEqual(response.body, b"bar")
body = urllib_parse.urlencode(dict(foo=""))
response = self.fetch("/get_argument?source=body&foo=hello",
method="POST", body=body)
self.assertEqual(response.body, b"")
body = urllib_parse.urlencode(dict())
response = self.fetch("/get_argument?source=body&foo=hello",
method="POST", body=body)
self.assertEqual(response.body, b"default")
def test_no_gzip(self):
response = self.fetch('/get_argument')
self.assertNotIn('Accept-Encoding', response.headers.get('Vary', ''))
self.assertNotIn('gzip', response.headers.get('Content-Encoding', ''))
class NonWSGIWebTests(WebTestCase):
def get_handlers(self):
return [("/flow_control", FlowControlHandler),
("/empty_flush", EmptyFlushCallbackHandler),
]
def test_flow_control(self):
self.assertEqual(self.fetch("/flow_control").body, b"123")
def test_empty_flush(self):
response = self.fetch("/empty_flush")
self.assertEqual(response.body, b"ok")
@wsgi_safe
class ErrorResponseTest(WebTestCase):
def get_handlers(self):
class DefaultHandler(RequestHandler):
def get(self):
if self.get_argument("status", None):
raise HTTPError(int(self.get_argument("status")))
1 / 0
class WriteErrorHandler(RequestHandler):
def get(self):
if self.get_argument("status", None):
self.send_error(int(self.get_argument("status")))
else:
1 / 0
def write_error(self, status_code, **kwargs):
self.set_header("Content-Type", "text/plain")
if "exc_info" in kwargs:
self.write("Exception: %s" % kwargs["exc_info"][0].__name__)
else:
self.write("Status: %d" % status_code)
class GetErrorHtmlHandler(RequestHandler):
def get(self):
if self.get_argument("status", None):
self.send_error(int(self.get_argument("status")))
else:
1 / 0
def get_error_html(self, status_code, **kwargs):
self.set_header("Content-Type", "text/plain")
if "exception" in kwargs:
self.write("Exception: %s" % sys.exc_info()[0].__name__)
else:
self.write("Status: %d" % status_code)
class FailedWriteErrorHandler(RequestHandler):
def get(self):
1 / 0
def write_error(self, status_code, **kwargs):
raise Exception("exception in write_error")
return [url("/default", DefaultHandler),
url("/write_error", WriteErrorHandler),
url("/get_error_html", GetErrorHtmlHandler),
url("/failed_write_error", FailedWriteErrorHandler),
]
def test_default(self):
with ExpectLog(app_log, "Uncaught exception"):
response = self.fetch("/default")
self.assertEqual(response.code, 500)
self.assertTrue(b"500: Internal Server Error" in response.body)
response = self.fetch("/default?status=503")
self.assertEqual(response.code, 503)
self.assertTrue(b"503: Service Unavailable" in response.body)
def test_write_error(self):
with ExpectLog(app_log, "Uncaught exception"):
response = self.fetch("/write_error")
self.assertEqual(response.code, 500)
self.assertEqual(b"Exception: ZeroDivisionError", response.body)
response = self.fetch("/write_error?status=503")
self.assertEqual(response.code, 503)
self.assertEqual(b"Status: 503", response.body)
def test_get_error_html(self):
with ExpectLog(app_log, "Uncaught exception"):
response = self.fetch("/get_error_html")
self.assertEqual(response.code, 500)
self.assertEqual(b"Exception: ZeroDivisionError", response.body)
response = self.fetch("/get_error_html?status=503")
self.assertEqual(response.code, 503)
self.assertEqual(b"Status: 503", response.body)
def test_failed_write_error(self):
with ExpectLog(app_log, "Uncaught exception"):
response = self.fetch("/failed_write_error")
self.assertEqual(response.code, 500)
self.assertEqual(b"", response.body)
@wsgi_safe
class StaticFileTest(WebTestCase):
# The expected MD5 hash of robots.txt, used in tests that call
# StaticFileHandler.get_version
robots_txt_hash = b"f71d20196d4caf35b6a670db8c70b03d"
static_dir = os.path.join(os.path.dirname(__file__), 'static')
def get_handlers(self):
class StaticUrlHandler(RequestHandler):
def get(self, path):
with_v = int(self.get_argument('include_version', 1))
self.write(self.static_url(path, include_version=with_v))
class AbsoluteStaticUrlHandler(StaticUrlHandler):
include_host = True
class OverrideStaticUrlHandler(RequestHandler):
def get(self, path):
do_include = bool(self.get_argument("include_host"))
self.include_host = not do_include
regular_url = self.static_url(path)
override_url = self.static_url(path, include_host=do_include)
if override_url == regular_url:
return self.write(str(False))
protocol = self.request.protocol + "://"
protocol_length = len(protocol)
check_regular = regular_url.find(protocol, 0, protocol_length)
check_override = override_url.find(protocol, 0, protocol_length)
if do_include:
result = (check_override == 0 and check_regular == -1)
else:
result = (check_override == -1 and check_regular == 0)
self.write(str(result))
return [('/static_url/(.*)', StaticUrlHandler),
('/abs_static_url/(.*)', AbsoluteStaticUrlHandler),
('/override_static_url/(.*)', OverrideStaticUrlHandler)]
def get_app_kwargs(self):
return dict(static_path=relpath('static'))
def test_static_files(self):
response = self.fetch('/robots.txt')
self.assertTrue(b"Disallow: /" in response.body)
response = self.fetch('/static/robots.txt')
self.assertTrue(b"Disallow: /" in response.body)
def test_static_url(self):
response = self.fetch("/static_url/robots.txt")
self.assertEqual(response.body,
b"/static/robots.txt?v=" + self.robots_txt_hash)
def test_absolute_static_url(self):
response = self.fetch("/abs_static_url/robots.txt")
self.assertEqual(response.body, (
utf8(self.get_url("/")) +
b"static/robots.txt?v=" +
self.robots_txt_hash
))
def test_relative_version_exclusion(self):
response = self.fetch("/static_url/robots.txt?include_version=0")
self.assertEqual(response.body, b"/static/robots.txt")
def test_absolute_version_exclusion(self):
response = self.fetch("/abs_static_url/robots.txt?include_version=0")
self.assertEqual(response.body,
utf8(self.get_url("/") + "static/robots.txt"))
def test_include_host_override(self):
self._trigger_include_host_check(False)
self._trigger_include_host_check(True)
def _trigger_include_host_check(self, include_host):
path = "/override_static_url/robots.txt?include_host=%s"
response = self.fetch(path % int(include_host))
self.assertEqual(response.body, utf8(str(True)))
def test_static_304_if_modified_since(self):
response1 = self.fetch("/static/robots.txt")
response2 = self.fetch("/static/robots.txt", headers={
'If-Modified-Since': response1.headers['Last-Modified']})
self.assertEqual(response2.code, 304)
self.assertTrue('Content-Length' not in response2.headers)
self.assertTrue('Last-Modified' not in response2.headers)
def test_static_304_if_none_match(self):
response1 = self.fetch("/static/robots.txt")
response2 = self.fetch("/static/robots.txt", headers={
'If-None-Match': response1.headers['Etag']})
self.assertEqual(response2.code, 304)
def test_static_if_modified_since_pre_epoch(self):
# On windows, the functions that work with time_t do not accept
# negative values, and at least one client (processing.js) seems
# to use if-modified-since 1/1/1960 as a cache-busting technique.
response = self.fetch("/static/robots.txt", headers={
'If-Modified-Since': 'Fri, 01 Jan 1960 00:00:00 GMT'})
self.assertEqual(response.code, 200)
def test_static_if_modified_since_time_zone(self):
# Instead of the value from Last-Modified, make requests with times
# chosen just before and after the known modification time
# of the file to ensure that the right time zone is being used
# when parsing If-Modified-Since.
stat = os.stat(relpath('static/robots.txt'))
response = self.fetch('/static/robots.txt', headers={
'If-Modified-Since': format_timestamp(stat.st_mtime - 1)})
self.assertEqual(response.code, 200)
response = self.fetch('/static/robots.txt', headers={
'If-Modified-Since': format_timestamp(stat.st_mtime + 1)})
self.assertEqual(response.code, 304)
def test_static_etag(self):
response = self.fetch('/static/robots.txt')
self.assertEqual(utf8(response.headers.get("Etag")),
b'"' + self.robots_txt_hash + b'"')
def test_static_with_range(self):
response = self.fetch('/static/robots.txt', headers={
'Range': 'bytes=0-9'})
self.assertEqual(response.code, 206)
self.assertEqual(response.body, b"User-agent")
self.assertEqual(utf8(response.headers.get("Etag")),
b'"' + self.robots_txt_hash + b'"')
self.assertEqual(response.headers.get("Content-Length"), "10")
self.assertEqual(response.headers.get("Content-Range"),
"bytes 0-9/26")
def test_static_with_range_full_file(self):
response = self.fetch('/static/robots.txt', headers={
'Range': 'bytes=0-'})
# Note: Chrome refuses to play audio if it gets an HTTP 206 in response
# to ``Range: bytes=0-`` :(
self.assertEqual(response.code, 200)
robots_file_path = os.path.join(self.static_dir, "robots.txt")
with open(robots_file_path) as f:
self.assertEqual(response.body, utf8(f.read()))
self.assertEqual(response.headers.get("Content-Length"), "26")
self.assertEqual(response.headers.get("Content-Range"), None)
def test_static_with_range_full_past_end(self):
response = self.fetch('/static/robots.txt', headers={
'Range': 'bytes=0-10000000'})
self.assertEqual(response.code, 200)
robots_file_path = os.path.join(self.static_dir, "robots.txt")
with open(robots_file_path) as f:
self.assertEqual(response.body, utf8(f.read()))
self.assertEqual(response.headers.get("Content-Length"), "26")
self.assertEqual(response.headers.get("Content-Range"), None)
def test_static_with_range_partial_past_end(self):
response = self.fetch('/static/robots.txt', headers={
'Range': 'bytes=1-10000000'})
self.assertEqual(response.code, 206)
robots_file_path = os.path.join(self.static_dir, "robots.txt")
with open(robots_file_path) as f:
self.assertEqual(response.body, utf8(f.read()[1:]))
self.assertEqual(response.headers.get("Content-Length"), "25")
self.assertEqual(response.headers.get("Content-Range"), "bytes 1-25/26")
def test_static_with_range_end_edge(self):
response = self.fetch('/static/robots.txt', headers={
'Range': 'bytes=22-'})
self.assertEqual(response.body, b": /\n")
self.assertEqual(response.headers.get("Content-Length"), "4")
self.assertEqual(response.headers.get("Content-Range"),
"bytes 22-25/26")
def test_static_with_range_neg_end(self):
response = self.fetch('/static/robots.txt', headers={
'Range': 'bytes=-4'})
self.assertEqual(response.body, b": /\n")
self.assertEqual(response.headers.get("Content-Length"), "4")
self.assertEqual(response.headers.get("Content-Range"),
"bytes 22-25/26")
def test_static_invalid_range(self):
response = self.fetch('/static/robots.txt', headers={
'Range': 'asdf'})
self.assertEqual(response.code, 200)
def test_static_unsatisfiable_range_zero_suffix(self):
response = self.fetch('/static/robots.txt', headers={
'Range': 'bytes=-0'})
self.assertEqual(response.headers.get("Content-Range"),
"bytes */26")
self.assertEqual(response.code, 416)
def test_static_unsatisfiable_range_invalid_start(self):
response = self.fetch('/static/robots.txt', headers={
'Range': 'bytes=26'})
self.assertEqual(response.code, 416)
self.assertEqual(response.headers.get("Content-Range"),
"bytes */26")
def test_static_head(self):
response = self.fetch('/static/robots.txt', method='HEAD')
self.assertEqual(response.code, 200)
# No body was returned, but we did get the right content length.
self.assertEqual(response.body, b'')
self.assertEqual(response.headers['Content-Length'], '26')
self.assertEqual(utf8(response.headers['Etag']),
b'"' + self.robots_txt_hash + b'"')
def test_static_head_range(self):
response = self.fetch('/static/robots.txt', method='HEAD',
headers={'Range': 'bytes=1-4'})
self.assertEqual(response.code, 206)
self.assertEqual(response.body, b'')
self.assertEqual(response.headers['Content-Length'], '4')
self.assertEqual(utf8(response.headers['Etag']),
b'"' + self.robots_txt_hash + b'"')
def test_static_range_if_none_match(self):
response = self.fetch('/static/robots.txt', headers={
'Range': 'bytes=1-4',
'If-None-Match': b'"' + self.robots_txt_hash + b'"'})
self.assertEqual(response.code, 304)
self.assertEqual(response.body, b'')
self.assertTrue('Content-Length' not in response.headers)
self.assertEqual(utf8(response.headers['Etag']),
b'"' + self.robots_txt_hash + b'"')
def test_static_404(self):
response = self.fetch('/static/blarg')
self.assertEqual(response.code, 404)
@wsgi_safe
class StaticDefaultFilenameTest(WebTestCase):
def get_app_kwargs(self):
return dict(static_path=relpath('static'),
static_handler_args=dict(default_filename='index.html'))
def get_handlers(self):
return []
def test_static_default_filename(self):
response = self.fetch('/static/dir/', follow_redirects=False)
self.assertEqual(response.code, 200)
self.assertEqual(b'this is the index\n', response.body)
def test_static_default_redirect(self):
response = self.fetch('/static/dir', follow_redirects=False)
self.assertEqual(response.code, 301)
self.assertTrue(response.headers['Location'].endswith('/static/dir/'))
@wsgi_safe
class StaticFileWithPathTest(WebTestCase):
def get_app_kwargs(self):
return dict(static_path=relpath('static'),
static_handler_args=dict(default_filename='index.html'))
def get_handlers(self):
return [("/foo/(.*)", StaticFileHandler, {
"path": relpath("templates/"),
})]
def test_serve(self):
response = self.fetch("/foo/utf8.html")
self.assertEqual(response.body, b"H\xc3\xa9llo\n")
@wsgi_safe
class CustomStaticFileTest(WebTestCase):
def get_handlers(self):
class MyStaticFileHandler(StaticFileHandler):
@classmethod
def make_static_url(cls, settings, path):
version_hash = cls.get_version(settings, path)
extension_index = path.rindex('.')
before_version = path[:extension_index]
after_version = path[(extension_index + 1):]
return '/static/%s.%s.%s' % (before_version, version_hash,
after_version)
def parse_url_path(self, url_path):
extension_index = url_path.rindex('.')
version_index = url_path.rindex('.', 0, extension_index)
return '%s%s' % (url_path[:version_index],
url_path[extension_index:])
@classmethod
def get_absolute_path(cls, settings, path):
return 'CustomStaticFileTest:' + path
def validate_absolute_path(self, root, absolute_path):
return absolute_path
@classmethod
def get_content(self, path, start=None, end=None):
assert start is None and end is None
if path == 'CustomStaticFileTest:foo.txt':
return b'bar'
raise Exception("unexpected path %r" % path)
def get_modified_time(self):
return None
@classmethod
def get_version(cls, settings, path):
return "42"
class StaticUrlHandler(RequestHandler):
def get(self, path):
self.write(self.static_url(path))
self.static_handler_class = MyStaticFileHandler
return [("/static_url/(.*)", StaticUrlHandler)]
def get_app_kwargs(self):
return dict(static_path="dummy",
static_handler_class=self.static_handler_class)
def test_serve(self):
response = self.fetch("/static/foo.42.txt")
self.assertEqual(response.body, b"bar")
def test_static_url(self):
with ExpectLog(gen_log, "Could not open static file", required=False):
response = self.fetch("/static_url/foo.txt")
self.assertEqual(response.body, b"/static/foo.42.txt")
@wsgi_safe
class HostMatchingTest(WebTestCase):
class Handler(RequestHandler):
def initialize(self, reply):
self.reply = reply
def get(self):
self.write(self.reply)
def get_handlers(self):
return [("/foo", HostMatchingTest.Handler, {"reply": "wildcard"})]
def test_host_matching(self):
self.app.add_handlers("www.example.com",
[("/foo", HostMatchingTest.Handler, {"reply": "[0]"})])
self.app.add_handlers(r"www\.example\.com",
[("/bar", HostMatchingTest.Handler, {"reply": "[1]"})])
self.app.add_handlers("www.example.com",
[("/baz", HostMatchingTest.Handler, {"reply": "[2]"})])
response = self.fetch("/foo")
self.assertEqual(response.body, b"wildcard")
response = self.fetch("/bar")
self.assertEqual(response.code, 404)
response = self.fetch("/baz")
self.assertEqual(response.code, 404)
response = self.fetch("/foo", headers={'Host': 'www.example.com'})
self.assertEqual(response.body, b"[0]")
response = self.fetch("/bar", headers={'Host': 'www.example.com'})
self.assertEqual(response.body, b"[1]")
response = self.fetch("/baz", headers={'Host': 'www.example.com'})
self.assertEqual(response.body, b"[2]")
@wsgi_safe
class NamedURLSpecGroupsTest(WebTestCase):
def get_handlers(self):
class EchoHandler(RequestHandler):
def get(self, path):
self.write(path)
return [("/str/(?P<path>.*)", EchoHandler),
(u("/unicode/(?P<path>.*)"), EchoHandler)]
def test_named_urlspec_groups(self):
response = self.fetch("/str/foo")
self.assertEqual(response.body, b"foo")
response = self.fetch("/unicode/bar")
self.assertEqual(response.body, b"bar")
@wsgi_safe
class ClearHeaderTest(SimpleHandlerTestCase):
class Handler(RequestHandler):
def get(self):
self.set_header("h1", "foo")
self.set_header("h2", "bar")
self.clear_header("h1")
self.clear_header("nonexistent")
def test_clear_header(self):
response = self.fetch("/")
self.assertTrue("h1" not in response.headers)
self.assertEqual(response.headers["h2"], "bar")
@wsgi_safe
class Header304Test(SimpleHandlerTestCase):
class Handler(RequestHandler):
def get(self):
self.set_header("Content-Language", "en_US")
self.write("hello")
def test_304_headers(self):
response1 = self.fetch('/')
self.assertEqual(response1.headers["Content-Length"], "5")
self.assertEqual(response1.headers["Content-Language"], "en_US")
response2 = self.fetch('/', headers={
'If-None-Match': response1.headers["Etag"]})
self.assertEqual(response2.code, 304)
self.assertTrue("Content-Length" not in response2.headers)
self.assertTrue("Content-Language" not in response2.headers)
# Not an entity header, but should not be added to 304s by chunking
self.assertTrue("Transfer-Encoding" not in response2.headers)
@wsgi_safe
class StatusReasonTest(SimpleHandlerTestCase):
class Handler(RequestHandler):
def get(self):
reason = self.request.arguments.get('reason', [])
self.set_status(int(self.get_argument('code')),
reason=reason[0] if reason else None)
def get_http_client(self):
# simple_httpclient only: curl doesn't expose the reason string
return SimpleAsyncHTTPClient(io_loop=self.io_loop)
def test_status(self):
response = self.fetch("/?code=304")
self.assertEqual(response.code, 304)
self.assertEqual(response.reason, "Not Modified")
response = self.fetch("/?code=304&reason=Foo")
self.assertEqual(response.code, 304)
self.assertEqual(response.reason, "Foo")
response = self.fetch("/?code=682&reason=Bar")
self.assertEqual(response.code, 682)
self.assertEqual(response.reason, "Bar")
with ExpectLog(app_log, 'Uncaught exception'):
response = self.fetch("/?code=682")
self.assertEqual(response.code, 500)
@wsgi_safe
class DateHeaderTest(SimpleHandlerTestCase):
class Handler(RequestHandler):
def get(self):
self.write("hello")
def test_date_header(self):
response = self.fetch('/')
header_date = datetime.datetime(
*email.utils.parsedate(response.headers['Date'])[:6])
self.assertTrue(header_date - datetime.datetime.utcnow() <
datetime.timedelta(seconds=2))
@wsgi_safe
class RaiseWithReasonTest(SimpleHandlerTestCase):
class Handler(RequestHandler):
def get(self):
raise HTTPError(682, reason="Foo")
def get_http_client(self):
# simple_httpclient only: curl doesn't expose the reason string
return SimpleAsyncHTTPClient(io_loop=self.io_loop)
def test_raise_with_reason(self):
response = self.fetch("/")
self.assertEqual(response.code, 682)
self.assertEqual(response.reason, "Foo")
self.assertIn(b'682: Foo', response.body)
def test_httperror_str(self):
self.assertEqual(str(HTTPError(682, reason="Foo")), "HTTP 682: Foo")
@wsgi_safe
class ErrorHandlerXSRFTest(WebTestCase):
def get_handlers(self):
# note that if the handlers list is empty we get the default_host
# redirect fallback instead of a 404, so test with both an
# explicitly defined error handler and an implicit 404.
return [('/error', ErrorHandler, dict(status_code=417))]
def get_app_kwargs(self):
return dict(xsrf_cookies=True)
def test_error_xsrf(self):
response = self.fetch('/error', method='POST', body='')
self.assertEqual(response.code, 417)
def test_404_xsrf(self):
response = self.fetch('/404', method='POST', body='')
self.assertEqual(response.code, 404)
class GzipTestCase(SimpleHandlerTestCase):
class Handler(RequestHandler):
def get(self):
if self.get_argument('vary', None):
self.set_header('Vary', self.get_argument('vary'))
self.write('hello world')
def get_app_kwargs(self):
return dict(gzip=True)
def test_gzip(self):
response = self.fetch('/')
self.assertEqual(response.headers['Content-Encoding'], 'gzip')
self.assertEqual(response.headers['Vary'], 'Accept-Encoding')
def test_gzip_not_requested(self):
response = self.fetch('/', use_gzip=False)
self.assertNotIn('Content-Encoding', response.headers)
self.assertEqual(response.headers['Vary'], 'Accept-Encoding')
def test_vary_already_present(self):
response = self.fetch('/?vary=Accept-Language')
self.assertEqual(response.headers['Vary'],
'Accept-Language, Accept-Encoding')
@wsgi_safe
class PathArgsInPrepareTest(WebTestCase):
class Handler(RequestHandler):
def prepare(self):
self.write(dict(args=self.path_args, kwargs=self.path_kwargs))
def get(self, path):
assert path == 'foo'
self.finish()
def get_handlers(self):
return [('/pos/(.*)', self.Handler),
('/kw/(?P<path>.*)', self.Handler)]
def test_pos(self):
response = self.fetch('/pos/foo')
response.rethrow()
data = json_decode(response.body)
self.assertEqual(data, {'args': ['foo'], 'kwargs': {}})
def test_kw(self):
response = self.fetch('/kw/foo')
response.rethrow()
data = json_decode(response.body)
self.assertEqual(data, {'args': [], 'kwargs': {'path': 'foo'}})
@wsgi_safe
class ClearAllCookiesTest(SimpleHandlerTestCase):
class Handler(RequestHandler):
def get(self):
self.clear_all_cookies()
self.write('ok')
def test_clear_all_cookies(self):
response = self.fetch('/', headers={'Cookie': 'foo=bar; baz=xyzzy'})
set_cookies = sorted(response.headers.get_list('Set-Cookie'))
self.assertTrue(set_cookies[0].startswith('baz=;'))
self.assertTrue(set_cookies[1].startswith('foo=;'))
class PermissionError(Exception):
pass
@wsgi_safe
class ExceptionHandlerTest(SimpleHandlerTestCase):
class Handler(RequestHandler):
def get(self):
exc = self.get_argument('exc')
if exc == 'http':
raise HTTPError(410, "no longer here")
elif exc == 'zero':
1 / 0
elif exc == 'permission':
raise PermissionError('not allowed')
def write_error(self, status_code, **kwargs):
if 'exc_info' in kwargs:
typ, value, tb = kwargs['exc_info']
if isinstance(value, PermissionError):
self.set_status(403)
self.write('PermissionError')
return
RequestHandler.write_error(self, status_code, **kwargs)
def log_exception(self, typ, value, tb):
if isinstance(value, PermissionError):
app_log.warning('custom logging for PermissionError: %s',
value.args[0])
else:
RequestHandler.log_exception(self, typ, value, tb)
def test_http_error(self):
# HTTPErrors are logged as warnings with no stack trace.
# TODO: extend ExpectLog to test this more precisely
with ExpectLog(gen_log, '.*no longer here'):
response = self.fetch('/?exc=http')
self.assertEqual(response.code, 410)
def test_unknown_error(self):
# Unknown errors are logged as errors with a stack trace.
with ExpectLog(app_log, 'Uncaught exception'):
response = self.fetch('/?exc=zero')
self.assertEqual(response.code, 500)
def test_known_error(self):
# log_exception can override logging behavior, and write_error
# can override the response.
with ExpectLog(app_log,
'custom logging for PermissionError: not allowed'):
response = self.fetch('/?exc=permission')
self.assertEqual(response.code, 403)
@wsgi_safe
class UIMethodUIModuleTest(SimpleHandlerTestCase):
"""Test that UI methods and modules are created correctly and
associated with the handler.
"""
class Handler(RequestHandler):
def get(self):
self.render('foo.html')
def value(self):
return self.get_argument("value")
def get_app_kwargs(self):
def my_ui_method(handler, x):
return "In my_ui_method(%s) with handler value %s." % (
x, handler.value())
class MyModule(UIModule):
def render(self, x):
return "In MyModule(%s) with handler value %s." % (
x, self.handler.value())
loader = DictLoader({
'foo.html': '{{ my_ui_method(42) }} {% module MyModule(123) %}',
})
return dict(template_loader=loader,
ui_methods={'my_ui_method': my_ui_method},
ui_modules={'MyModule': MyModule})
def tearDown(self):
super(UIMethodUIModuleTest, self).tearDown()
# TODO: fix template loader caching so this isn't necessary.
RequestHandler._template_loaders.clear()
def test_ui_method(self):
response = self.fetch('/?value=asdf')
self.assertEqual(response.body,
b'In my_ui_method(42) with handler value asdf. '
b'In MyModule(123) with handler value asdf.')
@wsgi_safe
class GetArgumentErrorTest(SimpleHandlerTestCase):
class Handler(RequestHandler):
def get(self):
try:
self.get_argument('foo')
self.write({})
except MissingArgumentError as e:
self.write({'arg_name': e.arg_name,
'log_message': e.log_message})
def test_catch_error(self):
response = self.fetch('/')
self.assertEqual(json_decode(response.body),
{'arg_name': 'foo',
'log_message': 'Missing argument foo'})
class MultipleExceptionTest(SimpleHandlerTestCase):
class Handler(RequestHandler):
exc_count = 0
@asynchronous
def get(self):
from tornado.ioloop import IOLoop
IOLoop.current().add_callback(lambda: 1 / 0)
IOLoop.current().add_callback(lambda: 1 / 0)
def log_exception(self, typ, value, tb):
MultipleExceptionTest.Handler.exc_count += 1
def test_multi_exception(self):
# This test verifies that multiple exceptions raised into the same
# ExceptionStackContext do not generate extraneous log entries
# due to "Cannot send error response after headers written".
# log_exception is called, but it does not proceed to send_error.
response = self.fetch('/')
self.assertEqual(response.code, 500)
response = self.fetch('/')
self.assertEqual(response.code, 500)
# Each of our two requests generated two exceptions, we should have
# seen at least three of them by now (the fourth may still be
# in the queue).
self.assertGreater(MultipleExceptionTest.Handler.exc_count, 2)
@wsgi_safe
class SetCurrentUserTest(SimpleHandlerTestCase):
class Handler(RequestHandler):
def prepare(self):
self.current_user = 'Ben'
def get(self):
self.write('Hello %s' % self.current_user)
def test_set_current_user(self):
# Ensure that current_user can be assigned to normally for apps
# that want to forgo the lazy get_current_user property
response = self.fetch('/')
self.assertEqual(response.body, b'Hello Ben')
@wsgi_safe
class GetCurrentUserTest(WebTestCase):
def get_app_kwargs(self):
class WithoutUserModule(UIModule):
def render(self):
return ''
class WithUserModule(UIModule):
def render(self):
return str(self.current_user)
loader = DictLoader({
'without_user.html': '',
'with_user.html': '{{ current_user }}',
'without_user_module.html': '{% module WithoutUserModule() %}',
'with_user_module.html': '{% module WithUserModule() %}',
})
return dict(template_loader=loader,
ui_modules={'WithUserModule': WithUserModule,
'WithoutUserModule': WithoutUserModule})
def tearDown(self):
super(GetCurrentUserTest, self).tearDown()
RequestHandler._template_loaders.clear()
def get_handlers(self):
class CurrentUserHandler(RequestHandler):
def prepare(self):
self.has_loaded_current_user = False
def get_current_user(self):
self.has_loaded_current_user = True
return ''
class WithoutUserHandler(CurrentUserHandler):
def get(self):
self.render_string('without_user.html')
self.finish(str(self.has_loaded_current_user))
class WithUserHandler(CurrentUserHandler):
def get(self):
self.render_string('with_user.html')
self.finish(str(self.has_loaded_current_user))
class CurrentUserModuleHandler(CurrentUserHandler):
def get_template_namespace(self):
# If RequestHandler.get_template_namespace is called, then
# get_current_user is evaluated. Until #820 is fixed, this
# is a small hack to circumvent the issue.
return self.ui
class WithoutUserModuleHandler(CurrentUserModuleHandler):
def get(self):
self.render_string('without_user_module.html')
self.finish(str(self.has_loaded_current_user))
class WithUserModuleHandler(CurrentUserModuleHandler):
def get(self):
self.render_string('with_user_module.html')
self.finish(str(self.has_loaded_current_user))
return [('/without_user', WithoutUserHandler),
('/with_user', WithUserHandler),
('/without_user_module', WithoutUserModuleHandler),
('/with_user_module', WithUserModuleHandler)]
@unittest.skip('needs fix')
def test_get_current_user_is_lazy(self):
# TODO: Make this test pass. See #820.
response = self.fetch('/without_user')
self.assertEqual(response.body, b'False')
def test_get_current_user_works(self):
response = self.fetch('/with_user')
self.assertEqual(response.body, b'True')
def test_get_current_user_from_ui_module_is_lazy(self):
response = self.fetch('/without_user_module')
self.assertEqual(response.body, b'False')
def test_get_current_user_from_ui_module_works(self):
response = self.fetch('/with_user_module')
self.assertEqual(response.body, b'True')
@wsgi_safe
class UnimplementedHTTPMethodsTest(SimpleHandlerTestCase):
class Handler(RequestHandler):
pass
def test_unimplemented_standard_methods(self):
for method in ['HEAD', 'GET', 'DELETE', 'OPTIONS']:
response = self.fetch('/', method=method)
self.assertEqual(response.code, 405)
for method in ['POST', 'PUT']:
response = self.fetch('/', method=method, body=b'')
self.assertEqual(response.code, 405)
class UnimplementedNonStandardMethodsTest(SimpleHandlerTestCase):
# wsgiref.validate complains about unknown methods in a way that makes
# this test not wsgi_safe.
class Handler(RequestHandler):
def other(self):
# Even though this method exists, it won't get called automatically
# because it is not in SUPPORTED_METHODS.
self.write('other')
def test_unimplemented_patch(self):
# PATCH is recently standardized; Tornado supports it by default
# but wsgiref.validate doesn't like it.
response = self.fetch('/', method='PATCH', body=b'')
self.assertEqual(response.code, 405)
def test_unimplemented_other(self):
response = self.fetch('/', method='OTHER',
allow_nonstandard_methods=True)
self.assertEqual(response.code, 405)
@wsgi_safe
class AllHTTPMethodsTest(SimpleHandlerTestCase):
class Handler(RequestHandler):
def method(self):
self.write(self.request.method)
get = delete = options = post = put = method
def test_standard_methods(self):
response = self.fetch('/', method='HEAD')
self.assertEqual(response.body, b'')
for method in ['GET', 'DELETE', 'OPTIONS']:
response = self.fetch('/', method=method)
self.assertEqual(response.body, utf8(method))
for method in ['POST', 'PUT']:
response = self.fetch('/', method=method, body=b'')
self.assertEqual(response.body, utf8(method))
class PatchMethodTest(SimpleHandlerTestCase):
class Handler(RequestHandler):
SUPPORTED_METHODS = RequestHandler.SUPPORTED_METHODS + ('OTHER',)
def patch(self):
self.write('patch')
def other(self):
self.write('other')
def test_patch(self):
response = self.fetch('/', method='PATCH', body=b'')
self.assertEqual(response.body, b'patch')
def test_other(self):
response = self.fetch('/', method='OTHER',
allow_nonstandard_methods=True)
self.assertEqual(response.body, b'other')
@wsgi_safe
class FinishInPrepareTest(SimpleHandlerTestCase):
class Handler(RequestHandler):
def prepare(self):
self.finish('done')
def get(self):
# It's difficult to assert for certain that a method did not
# or will not be called in an asynchronous context, but this
# will be logged noisily if it is reached.
raise Exception('should not reach this method')
def test_finish_in_prepare(self):
response = self.fetch('/')
self.assertEqual(response.body, b'done')
@wsgi_safe
class Default404Test(WebTestCase):
def get_handlers(self):
# If there are no handlers at all a default redirect handler gets added.
return [('/foo', RequestHandler)]
def test_404(self):
response = self.fetch('/')
self.assertEqual(response.code, 404)
self.assertEqual(response.body,
b'<html><title>404: Not Found</title>'
b'<body>404: Not Found</body></html>')
@wsgi_safe
class Custom404Test(WebTestCase):
def get_handlers(self):
return [('/foo', RequestHandler)]
def get_app_kwargs(self):
class Custom404Handler(RequestHandler):
def get(self):
self.set_status(404)
self.write('custom 404 response')
return dict(default_handler_class=Custom404Handler)
def test_404(self):
response = self.fetch('/')
self.assertEqual(response.code, 404)
self.assertEqual(response.body, b'custom 404 response')
@wsgi_safe
class DefaultHandlerArgumentsTest(WebTestCase):
def get_handlers(self):
return [('/foo', RequestHandler)]
def get_app_kwargs(self):
return dict(default_handler_class=ErrorHandler,
default_handler_args=dict(status_code=403))
def test_403(self):
response = self.fetch('/')
self.assertEqual(response.code, 403)
@wsgi_safe
class HandlerByNameTest(WebTestCase):
def get_handlers(self):
# All three are equivalent.
return [('/hello1', HelloHandler),
('/hello2', 'tornado.test.web_test.HelloHandler'),
url('/hello3', 'tornado.test.web_test.HelloHandler'),
]
def test_handler_by_name(self):
resp = self.fetch('/hello1')
self.assertEqual(resp.body, b'hello')
resp = self.fetch('/hello2')
self.assertEqual(resp.body, b'hello')
resp = self.fetch('/hello3')
self.assertEqual(resp.body, b'hello')
| apache-2.0 | 4,746,392,921,405,410,000 | 37.583754 | 231 | 0.589877 | false |
jgraham/servo | tests/wpt/harness/wptrunner/update/base.py | 196 | 2148 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
exit_unclean = object()
exit_clean = object()
class Step(object):
provides = []
def __init__(self, logger):
self.logger = logger
def run(self, step_index, state):
"""Base class for state-creating steps.
When a Step is run() the current state is checked to see
if the state from this step has already been created. If it
has the restore() method is invoked. Otherwise the create()
method is invoked with the state object. This is expected to
add items with all the keys in __class__.provides to the state
object.
"""
name = self.__class__.__name__
try:
stored_step = state.steps[step_index]
except IndexError:
stored_step = None
if stored_step == name:
self.restore(state)
elif stored_step is None:
self.create(state)
assert set(self.provides).issubset(set(state.keys()))
state.steps = state.steps + [name]
else:
raise ValueError("Expected a %s step, got a %s step" % (name, stored_step))
def create(self, data):
raise NotImplementedError
def restore(self, state):
self.logger.debug("Step %s using stored state" % (self.__class__.__name__,))
for key in self.provides:
assert key in state
class StepRunner(object):
steps = []
def __init__(self, logger, state):
"""Class that runs a specified series of Steps with a common State"""
self.state = state
self.logger = logger
if "steps" not in state:
state.steps = []
def run(self):
rv = None
for step_index, step in enumerate(self.steps):
self.logger.debug("Starting step %s" % step.__name__)
rv = step(self.logger).run(step_index, self.state)
if rv in (exit_clean, exit_unclean):
break
return rv
| mpl-2.0 | -216,479,394,361,351,000 | 30.130435 | 87 | 0.585661 | false |
Pal3love/otRebuilder | Package/otRebuilder/Dep/fontTools/misc/xmlReader.py | 1 | 4302 | from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
from fontTools import ttLib
from fontTools.misc.textTools import safeEval
from fontTools.ttLib.tables.DefaultTable import DefaultTable
import sys
import os
import logging
log = logging.getLogger(__name__)
class TTXParseError(Exception): pass
BUFSIZE = 0x4000
class XMLReader(object):
def __init__(self, fileOrPath, ttFont, progress=None, quiet=None):
if fileOrPath == '-':
fileOrPath = sys.stdin
if not hasattr(fileOrPath, "read"):
self.file = open(fileOrPath, "rb")
self._closeStream = True
else:
# assume readable file object
self.file = fileOrPath
self._closeStream = False
self.ttFont = ttFont
self.progress = progress
if quiet is not None:
from fontTools.misc.loggingTools import deprecateArgument
deprecateArgument("quiet", "configure logging instead")
self.quiet = quiet
self.root = None
self.contentStack = []
self.stackSize = 0
def read(self, rootless=False):
if rootless:
self.stackSize += 1
if self.progress:
self.file.seek(0, 2)
fileSize = self.file.tell()
self.progress.set(0, fileSize // 100 or 1)
self.file.seek(0)
self._parseFile(self.file)
if self._closeStream:
self.close()
if rootless:
self.stackSize -= 1
def close(self):
self.file.close()
def _parseFile(self, file):
from xml.parsers.expat import ParserCreate
parser = ParserCreate()
parser.StartElementHandler = self._startElementHandler
parser.EndElementHandler = self._endElementHandler
parser.CharacterDataHandler = self._characterDataHandler
pos = 0
while True:
chunk = file.read(BUFSIZE)
if not chunk:
parser.Parse(chunk, 1)
break
pos = pos + len(chunk)
if self.progress:
self.progress.set(pos // 100)
parser.Parse(chunk, 0)
def _startElementHandler(self, name, attrs):
stackSize = self.stackSize
self.stackSize = stackSize + 1
if not stackSize:
if name != "ttFont":
raise TTXParseError("illegal root tag: %s" % name)
sfntVersion = attrs.get("sfntVersion")
if sfntVersion is not None:
if len(sfntVersion) != 4:
sfntVersion = safeEval('"' + sfntVersion + '"')
self.ttFont.sfntVersion = sfntVersion
self.contentStack.append([])
elif stackSize == 1:
subFile = attrs.get("src")
if subFile is not None:
if hasattr(self.file, 'name'):
# if file has a name, get its parent directory
dirname = os.path.dirname(self.file.name)
else:
# else fall back to using the current working directory
dirname = os.getcwd()
subFile = os.path.join(dirname, subFile)
subReader = XMLReader(subFile, self.ttFont, self.progress)
subReader.read()
self.contentStack.append([])
return
tag = ttLib.xmlToTag(name)
msg = "Parsing '%s' table..." % tag
if self.progress:
self.progress.setLabel(msg)
log.info(msg)
if tag == "GlyphOrder":
tableClass = ttLib.GlyphOrder
elif "ERROR" in attrs or ('raw' in attrs and safeEval(attrs['raw'])):
tableClass = DefaultTable
else:
tableClass = ttLib.getTableClass(tag)
if tableClass is None:
tableClass = DefaultTable
if tag == 'loca' and tag in self.ttFont:
# Special-case the 'loca' table as we need the
# original if the 'glyf' table isn't recompiled.
self.currentTable = self.ttFont[tag]
else:
self.currentTable = tableClass(tag)
self.ttFont[tag] = self.currentTable
self.contentStack.append([])
elif stackSize == 2:
self.contentStack.append([])
self.root = (name, attrs, self.contentStack[-1])
else:
l = []
self.contentStack[-1].append((name, attrs, l))
self.contentStack.append(l)
def _characterDataHandler(self, data):
if self.stackSize > 1:
self.contentStack[-1].append(data)
def _endElementHandler(self, name):
self.stackSize = self.stackSize - 1
del self.contentStack[-1]
if self.stackSize == 1:
self.root = None
elif self.stackSize == 2:
name, attrs, content = self.root
self.currentTable.fromXML(name, attrs, content, self.ttFont)
self.root = None
class ProgressPrinter(object):
def __init__(self, title, maxval=100):
print(title)
def set(self, val, maxval=None):
pass
def increment(self, val=1):
pass
def setLabel(self, text):
print(text)
| mit | 5,323,565,127,717,191,000 | 26.401274 | 72 | 0.688517 | false |
ganeshrn/ansible | test/units/galaxy/test_collection_install.py | 15 | 43234 | # -*- coding: utf-8 -*-
# Copyright: (c) 2019, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import copy
import json
import os
import pytest
import re
import shutil
import stat
import tarfile
import yaml
from io import BytesIO, StringIO
from units.compat.mock import MagicMock
import ansible.module_utils.six.moves.urllib.error as urllib_error
from ansible import context
from ansible.cli.galaxy import GalaxyCLI
from ansible.errors import AnsibleError
from ansible.galaxy import collection, api, dependency_resolution
from ansible.galaxy.dependency_resolution.dataclasses import Candidate, Requirement
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.utils import context_objects as co
from ansible.utils.display import Display
class RequirementCandidates():
def __init__(self):
self.candidates = []
def func_wrapper(self, func):
def run(*args, **kwargs):
self.candidates = func(*args, **kwargs)
return self.candidates
return run
def call_galaxy_cli(args):
orig = co.GlobalCLIArgs._Singleton__instance
co.GlobalCLIArgs._Singleton__instance = None
try:
GalaxyCLI(args=['ansible-galaxy', 'collection'] + args).run()
finally:
co.GlobalCLIArgs._Singleton__instance = orig
def artifact_json(namespace, name, version, dependencies, server):
json_str = json.dumps({
'artifact': {
'filename': '%s-%s-%s.tar.gz' % (namespace, name, version),
'sha256': '2d76f3b8c4bab1072848107fb3914c345f71a12a1722f25c08f5d3f51f4ab5fd',
'size': 1234,
},
'download_url': '%s/download/%s-%s-%s.tar.gz' % (server, namespace, name, version),
'metadata': {
'namespace': namespace,
'name': name,
'dependencies': dependencies,
},
'version': version
})
return to_text(json_str)
def artifact_versions_json(namespace, name, versions, galaxy_api, available_api_versions=None):
results = []
available_api_versions = available_api_versions or {}
api_version = 'v2'
if 'v3' in available_api_versions:
api_version = 'v3'
for version in versions:
results.append({
'href': '%s/api/%s/%s/%s/versions/%s/' % (galaxy_api.api_server, api_version, namespace, name, version),
'version': version,
})
if api_version == 'v2':
json_str = json.dumps({
'count': len(versions),
'next': None,
'previous': None,
'results': results
})
if api_version == 'v3':
response = {'meta': {'count': len(versions)},
'data': results,
'links': {'first': None,
'last': None,
'next': None,
'previous': None},
}
json_str = json.dumps(response)
return to_text(json_str)
def error_json(galaxy_api, errors_to_return=None, available_api_versions=None):
errors_to_return = errors_to_return or []
available_api_versions = available_api_versions or {}
response = {}
api_version = 'v2'
if 'v3' in available_api_versions:
api_version = 'v3'
if api_version == 'v2':
assert len(errors_to_return) <= 1
if errors_to_return:
response = errors_to_return[0]
if api_version == 'v3':
response['errors'] = errors_to_return
json_str = json.dumps(response)
return to_text(json_str)
@pytest.fixture(autouse='function')
def reset_cli_args():
co.GlobalCLIArgs._Singleton__instance = None
yield
co.GlobalCLIArgs._Singleton__instance = None
@pytest.fixture()
def collection_artifact(request, tmp_path_factory):
test_dir = to_text(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
namespace = 'ansible_namespace'
collection = 'collection'
skeleton_path = os.path.join(os.path.dirname(os.path.split(__file__)[0]), 'cli', 'test_data', 'collection_skeleton')
collection_path = os.path.join(test_dir, namespace, collection)
call_galaxy_cli(['init', '%s.%s' % (namespace, collection), '-c', '--init-path', test_dir,
'--collection-skeleton', skeleton_path])
dependencies = getattr(request, 'param', None)
if dependencies:
galaxy_yml = os.path.join(collection_path, 'galaxy.yml')
with open(galaxy_yml, 'rb+') as galaxy_obj:
existing_yaml = yaml.safe_load(galaxy_obj)
existing_yaml['dependencies'] = dependencies
galaxy_obj.seek(0)
galaxy_obj.write(to_bytes(yaml.safe_dump(existing_yaml)))
galaxy_obj.truncate()
# Create a file with +x in the collection so we can test the permissions
execute_path = os.path.join(collection_path, 'runme.sh')
with open(execute_path, mode='wb') as fd:
fd.write(b"echo hi")
os.chmod(execute_path, os.stat(execute_path).st_mode | stat.S_IEXEC)
call_galaxy_cli(['build', collection_path, '--output-path', test_dir])
collection_tar = os.path.join(test_dir, '%s-%s-0.1.0.tar.gz' % (namespace, collection))
return to_bytes(collection_path), to_bytes(collection_tar)
@pytest.fixture()
def galaxy_server():
context.CLIARGS._store = {'ignore_certs': False}
galaxy_api = api.GalaxyAPI(None, 'test_server', 'https://galaxy.ansible.com')
return galaxy_api
def test_build_requirement_from_path(collection_artifact):
tmp_path = os.path.join(os.path.split(collection_artifact[1])[0], b'temp')
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(tmp_path, validate_certs=False)
actual = Requirement.from_dir_path_as_unknown(collection_artifact[0], concrete_artifact_cm)
assert actual.namespace == u'ansible_namespace'
assert actual.name == u'collection'
assert actual.src == collection_artifact[0]
assert actual.ver == u'0.1.0'
@pytest.mark.parametrize('version', ['1.1.1', '1.1.0', '1.0.0'])
def test_build_requirement_from_path_with_manifest(version, collection_artifact):
manifest_path = os.path.join(collection_artifact[0], b'MANIFEST.json')
manifest_value = json.dumps({
'collection_info': {
'namespace': 'namespace',
'name': 'name',
'version': version,
'dependencies': {
'ansible_namespace.collection': '*'
}
}
})
with open(manifest_path, 'wb') as manifest_obj:
manifest_obj.write(to_bytes(manifest_value))
tmp_path = os.path.join(os.path.split(collection_artifact[1])[0], b'temp')
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(tmp_path, validate_certs=False)
actual = Requirement.from_dir_path_as_unknown(collection_artifact[0], concrete_artifact_cm)
# While the folder name suggests a different collection, we treat MANIFEST.json as the source of truth.
assert actual.namespace == u'namespace'
assert actual.name == u'name'
assert actual.src == collection_artifact[0]
assert actual.ver == to_text(version)
def test_build_requirement_from_path_invalid_manifest(collection_artifact):
manifest_path = os.path.join(collection_artifact[0], b'MANIFEST.json')
with open(manifest_path, 'wb') as manifest_obj:
manifest_obj.write(b"not json")
tmp_path = os.path.join(os.path.split(collection_artifact[1])[0], b'temp')
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(tmp_path, validate_certs=False)
expected = "Collection tar file member MANIFEST.json does not contain a valid json string."
with pytest.raises(AnsibleError, match=expected):
Requirement.from_dir_path_as_unknown(collection_artifact[0], concrete_artifact_cm)
def test_build_artifact_from_path_no_version(collection_artifact, monkeypatch):
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
# a collection artifact should always contain a valid version
manifest_path = os.path.join(collection_artifact[0], b'MANIFEST.json')
manifest_value = json.dumps({
'collection_info': {
'namespace': 'namespace',
'name': 'name',
'version': '',
'dependencies': {}
}
})
with open(manifest_path, 'wb') as manifest_obj:
manifest_obj.write(to_bytes(manifest_value))
tmp_path = os.path.join(os.path.split(collection_artifact[1])[0], b'temp')
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(tmp_path, validate_certs=False)
expected = (
'^Collection metadata file `.*` at `.*` is expected to have a valid SemVer '
'version value but got {empty_unicode_string!r}$'.
format(empty_unicode_string=u'')
)
with pytest.raises(AnsibleError, match=expected):
Requirement.from_dir_path_as_unknown(collection_artifact[0], concrete_artifact_cm)
def test_build_requirement_from_path_no_version(collection_artifact, monkeypatch):
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
# version may be falsey/arbitrary strings for collections in development
manifest_path = os.path.join(collection_artifact[0], b'galaxy.yml')
metadata = {
'authors': ['Ansible'],
'readme': 'README.md',
'namespace': 'namespace',
'name': 'name',
'version': '',
'dependencies': {},
}
with open(manifest_path, 'wb') as manifest_obj:
manifest_obj.write(to_bytes(yaml.safe_dump(metadata)))
tmp_path = os.path.join(os.path.split(collection_artifact[1])[0], b'temp')
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(tmp_path, validate_certs=False)
actual = Requirement.from_dir_path_as_unknown(collection_artifact[0], concrete_artifact_cm)
# While the folder name suggests a different collection, we treat MANIFEST.json as the source of truth.
assert actual.namespace == u'namespace'
assert actual.name == u'name'
assert actual.src == collection_artifact[0]
assert actual.ver == u'*'
def test_build_requirement_from_tar(collection_artifact):
tmp_path = os.path.join(os.path.split(collection_artifact[1])[0], b'temp')
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(tmp_path, validate_certs=False)
actual = Requirement.from_requirement_dict({'name': to_text(collection_artifact[1])}, concrete_artifact_cm)
assert actual.namespace == u'ansible_namespace'
assert actual.name == u'collection'
assert actual.src == to_text(collection_artifact[1])
assert actual.ver == u'0.1.0'
def test_build_requirement_from_tar_fail_not_tar(tmp_path_factory):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
test_file = os.path.join(test_dir, b'fake.tar.gz')
with open(test_file, 'wb') as test_obj:
test_obj.write(b"\x00\x01\x02\x03")
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
expected = "Collection artifact at '%s' is not a valid tar file." % to_native(test_file)
with pytest.raises(AnsibleError, match=expected):
Requirement.from_requirement_dict({'name': to_text(test_file)}, concrete_artifact_cm)
def test_build_requirement_from_tar_no_manifest(tmp_path_factory):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
json_data = to_bytes(json.dumps(
{
'files': [],
'format': 1,
}
))
tar_path = os.path.join(test_dir, b'ansible-collections.tar.gz')
with tarfile.open(tar_path, 'w:gz') as tfile:
b_io = BytesIO(json_data)
tar_info = tarfile.TarInfo('FILES.json')
tar_info.size = len(json_data)
tar_info.mode = 0o0644
tfile.addfile(tarinfo=tar_info, fileobj=b_io)
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
expected = "Collection at '%s' does not contain the required file MANIFEST.json." % to_native(tar_path)
with pytest.raises(AnsibleError, match=expected):
Requirement.from_requirement_dict({'name': to_text(tar_path)}, concrete_artifact_cm)
def test_build_requirement_from_tar_no_files(tmp_path_factory):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
json_data = to_bytes(json.dumps(
{
'collection_info': {},
}
))
tar_path = os.path.join(test_dir, b'ansible-collections.tar.gz')
with tarfile.open(tar_path, 'w:gz') as tfile:
b_io = BytesIO(json_data)
tar_info = tarfile.TarInfo('MANIFEST.json')
tar_info.size = len(json_data)
tar_info.mode = 0o0644
tfile.addfile(tarinfo=tar_info, fileobj=b_io)
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
with pytest.raises(KeyError, match='namespace'):
Requirement.from_requirement_dict({'name': to_text(tar_path)}, concrete_artifact_cm)
def test_build_requirement_from_tar_invalid_manifest(tmp_path_factory):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
json_data = b"not a json"
tar_path = os.path.join(test_dir, b'ansible-collections.tar.gz')
with tarfile.open(tar_path, 'w:gz') as tfile:
b_io = BytesIO(json_data)
tar_info = tarfile.TarInfo('MANIFEST.json')
tar_info.size = len(json_data)
tar_info.mode = 0o0644
tfile.addfile(tarinfo=tar_info, fileobj=b_io)
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
expected = "Collection tar file member MANIFEST.json does not contain a valid json string."
with pytest.raises(AnsibleError, match=expected):
Requirement.from_requirement_dict({'name': to_text(tar_path)}, concrete_artifact_cm)
def test_build_requirement_from_name(galaxy_server, monkeypatch, tmp_path_factory):
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['2.1.9', '2.1.10']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
mock_version_metadata = MagicMock(
namespace='namespace', name='collection',
version='2.1.10', artifact_sha256='', dependencies={}
)
monkeypatch.setattr(api.GalaxyAPI, 'get_collection_version_metadata', mock_version_metadata)
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
collections = ['namespace.collection']
requirements_file = None
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', collections[0]])
requirements = cli._require_one_of_collections_requirements(
collections, requirements_file, artifacts_manager=concrete_artifact_cm
)['collections']
actual = collection._resolve_depenency_map(requirements, [galaxy_server], concrete_artifact_cm, None, True, False, False)['namespace.collection']
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.ver == u'2.1.10'
assert actual.src == galaxy_server
assert mock_get_versions.call_count == 1
assert mock_get_versions.mock_calls[0][1] == ('namespace', 'collection')
def test_build_requirement_from_name_with_prerelease(galaxy_server, monkeypatch, tmp_path_factory):
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['1.0.1', '2.0.1-beta.1', '2.0.1']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
mock_get_info = MagicMock()
mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '2.0.1', None, None, {})
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection'])
requirements = cli._require_one_of_collections_requirements(
['namespace.collection'], None, artifacts_manager=concrete_artifact_cm
)['collections']
actual = collection._resolve_depenency_map(requirements, [galaxy_server], concrete_artifact_cm, None, True, False, False)['namespace.collection']
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.src == galaxy_server
assert actual.ver == u'2.0.1'
assert mock_get_versions.call_count == 1
assert mock_get_versions.mock_calls[0][1] == ('namespace', 'collection')
def test_build_requirment_from_name_with_prerelease_explicit(galaxy_server, monkeypatch, tmp_path_factory):
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['1.0.1', '2.0.1-beta.1', '2.0.1']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
mock_get_info = MagicMock()
mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '2.0.1-beta.1', None, None,
{})
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection:2.0.1-beta.1'])
requirements = cli._require_one_of_collections_requirements(
['namespace.collection:2.0.1-beta.1'], None, artifacts_manager=concrete_artifact_cm
)['collections']
actual = collection._resolve_depenency_map(requirements, [galaxy_server], concrete_artifact_cm, None, True, False, False)['namespace.collection']
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.src == galaxy_server
assert actual.ver == u'2.0.1-beta.1'
assert mock_get_info.call_count == 1
assert mock_get_info.mock_calls[0][1] == ('namespace', 'collection', '2.0.1-beta.1')
def test_build_requirement_from_name_second_server(galaxy_server, monkeypatch, tmp_path_factory):
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['1.0.1', '1.0.2', '1.0.3']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
mock_get_info = MagicMock()
mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '1.0.3', None, None, {})
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
broken_server = copy.copy(galaxy_server)
broken_server.api_server = 'https://broken.com/'
mock_version_list = MagicMock()
mock_version_list.return_value = []
monkeypatch.setattr(broken_server, 'get_collection_versions', mock_version_list)
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection:>1.0.1'])
requirements = cli._require_one_of_collections_requirements(
['namespace.collection:>1.0.1'], None, artifacts_manager=concrete_artifact_cm
)['collections']
actual = collection._resolve_depenency_map(
requirements, [broken_server, galaxy_server], concrete_artifact_cm, None, True, False, False
)['namespace.collection']
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.src == galaxy_server
assert actual.ver == u'1.0.3'
assert mock_version_list.call_count == 1
assert mock_version_list.mock_calls[0][1] == ('namespace', 'collection')
assert mock_get_versions.call_count == 1
assert mock_get_versions.mock_calls[0][1] == ('namespace', 'collection')
def test_build_requirement_from_name_missing(galaxy_server, monkeypatch, tmp_path_factory):
mock_open = MagicMock()
mock_open.return_value = []
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_open)
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection:>1.0.1'])
requirements = cli._require_one_of_collections_requirements(
['namespace.collection'], None, artifacts_manager=concrete_artifact_cm
)['collections']
expected = "Failed to resolve the requested dependencies map. Could not satisfy the following requirements:\n* namespace.collection:* (direct request)"
with pytest.raises(AnsibleError, match=re.escape(expected)):
collection._resolve_depenency_map(requirements, [galaxy_server, galaxy_server], concrete_artifact_cm, None, False, True, False)
def test_build_requirement_from_name_401_unauthorized(galaxy_server, monkeypatch, tmp_path_factory):
mock_open = MagicMock()
mock_open.side_effect = api.GalaxyError(urllib_error.HTTPError('https://galaxy.server.com', 401, 'msg', {},
StringIO()), "error")
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_open)
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection:>1.0.1'])
requirements = cli._require_one_of_collections_requirements(
['namespace.collection'], None, artifacts_manager=concrete_artifact_cm
)['collections']
expected = "error (HTTP Code: 401, Message: msg)"
with pytest.raises(api.GalaxyError, match=re.escape(expected)):
collection._resolve_depenency_map(requirements, [galaxy_server, galaxy_server], concrete_artifact_cm, None, False, False, False)
def test_build_requirement_from_name_single_version(galaxy_server, monkeypatch, tmp_path_factory):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
multi_api_proxy = collection.galaxy_api_proxy.MultiGalaxyAPIProxy([galaxy_server], concrete_artifact_cm)
dep_provider = dependency_resolution.providers.CollectionDependencyProvider(apis=multi_api_proxy, concrete_artifacts_manager=concrete_artifact_cm)
matches = RequirementCandidates()
mock_find_matches = MagicMock(side_effect=matches.func_wrapper(dep_provider.find_matches), autospec=True)
monkeypatch.setattr(dependency_resolution.providers.CollectionDependencyProvider, 'find_matches', mock_find_matches)
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['2.0.0']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
mock_get_info = MagicMock()
mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '2.0.0', None, None,
{})
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection:==2.0.0'])
requirements = cli._require_one_of_collections_requirements(
['namespace.collection:==2.0.0'], None, artifacts_manager=concrete_artifact_cm
)['collections']
actual = collection._resolve_depenency_map(requirements, [galaxy_server], concrete_artifact_cm, None, False, True, False)['namespace.collection']
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.src == galaxy_server
assert actual.ver == u'2.0.0'
assert [c.ver for c in matches.candidates] == [u'2.0.0']
assert mock_get_info.call_count == 1
assert mock_get_info.mock_calls[0][1] == ('namespace', 'collection', '2.0.0')
def test_build_requirement_from_name_multiple_versions_one_match(galaxy_server, monkeypatch, tmp_path_factory):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
multi_api_proxy = collection.galaxy_api_proxy.MultiGalaxyAPIProxy([galaxy_server], concrete_artifact_cm)
dep_provider = dependency_resolution.providers.CollectionDependencyProvider(apis=multi_api_proxy, concrete_artifacts_manager=concrete_artifact_cm)
matches = RequirementCandidates()
mock_find_matches = MagicMock(side_effect=matches.func_wrapper(dep_provider.find_matches), autospec=True)
monkeypatch.setattr(dependency_resolution.providers.CollectionDependencyProvider, 'find_matches', mock_find_matches)
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['2.0.0', '2.0.1', '2.0.2']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
mock_get_info = MagicMock()
mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '2.0.1', None, None,
{})
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection:>=2.0.1,<2.0.2'])
requirements = cli._require_one_of_collections_requirements(
['namespace.collection:>=2.0.1,<2.0.2'], None, artifacts_manager=concrete_artifact_cm
)['collections']
actual = collection._resolve_depenency_map(requirements, [galaxy_server], concrete_artifact_cm, None, False, True, False)['namespace.collection']
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.src == galaxy_server
assert actual.ver == u'2.0.1'
assert [c.ver for c in matches.candidates] == [u'2.0.1']
assert mock_get_versions.call_count == 1
assert mock_get_versions.mock_calls[0][1] == ('namespace', 'collection')
assert mock_get_info.call_count == 1
assert mock_get_info.mock_calls[0][1] == ('namespace', 'collection', '2.0.1')
def test_build_requirement_from_name_multiple_version_results(galaxy_server, monkeypatch, tmp_path_factory):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
multi_api_proxy = collection.galaxy_api_proxy.MultiGalaxyAPIProxy([galaxy_server], concrete_artifact_cm)
dep_provider = dependency_resolution.providers.CollectionDependencyProvider(apis=multi_api_proxy, concrete_artifacts_manager=concrete_artifact_cm)
matches = RequirementCandidates()
mock_find_matches = MagicMock(side_effect=matches.func_wrapper(dep_provider.find_matches), autospec=True)
monkeypatch.setattr(dependency_resolution.providers.CollectionDependencyProvider, 'find_matches', mock_find_matches)
mock_get_info = MagicMock()
mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '2.0.5', None, None, {})
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['1.0.1', '1.0.2', '1.0.3']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
mock_get_versions.return_value = ['2.0.0', '2.0.1', '2.0.2', '2.0.3', '2.0.4', '2.0.5']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection:!=2.0.2'])
requirements = cli._require_one_of_collections_requirements(
['namespace.collection:!=2.0.2'], None, artifacts_manager=concrete_artifact_cm
)['collections']
actual = collection._resolve_depenency_map(requirements, [galaxy_server], concrete_artifact_cm, None, False, True, False)['namespace.collection']
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.src == galaxy_server
assert actual.ver == u'2.0.5'
# should be ordered latest to earliest
assert [c.ver for c in matches.candidates] == [u'2.0.5', u'2.0.4', u'2.0.3', u'2.0.1', u'2.0.0']
assert mock_get_versions.call_count == 1
assert mock_get_versions.mock_calls[0][1] == ('namespace', 'collection')
def test_candidate_with_conflict(monkeypatch, tmp_path_factory, galaxy_server):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
mock_get_info = MagicMock()
mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '2.0.5', None, None, {})
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['2.0.5']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection:!=2.0.5'])
requirements = cli._require_one_of_collections_requirements(
['namespace.collection:!=2.0.5'], None, artifacts_manager=concrete_artifact_cm
)['collections']
expected = "Failed to resolve the requested dependencies map. Could not satisfy the following requirements:\n"
expected += "* namespace.collection:!=2.0.5 (direct request)"
with pytest.raises(AnsibleError, match=re.escape(expected)):
collection._resolve_depenency_map(requirements, [galaxy_server], concrete_artifact_cm, None, False, True, False)
def test_dep_candidate_with_conflict(monkeypatch, tmp_path_factory, galaxy_server):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
mock_get_info_return = [
api.CollectionVersionMetadata('parent', 'collection', '2.0.5', None, None, {'namespace.collection': '!=1.0.0'}),
api.CollectionVersionMetadata('namespace', 'collection', '1.0.0', None, None, {}),
]
mock_get_info = MagicMock(side_effect=mock_get_info_return)
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
mock_get_versions = MagicMock(side_effect=[['2.0.5'], ['1.0.0']])
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'parent.collection:2.0.5'])
requirements = cli._require_one_of_collections_requirements(
['parent.collection:2.0.5'], None, artifacts_manager=concrete_artifact_cm
)['collections']
expected = "Failed to resolve the requested dependencies map. Could not satisfy the following requirements:\n"
expected += "* namespace.collection:!=1.0.0 (dependency of parent.collection:2.0.5)"
with pytest.raises(AnsibleError, match=re.escape(expected)):
collection._resolve_depenency_map(requirements, [galaxy_server], concrete_artifact_cm, None, False, True, False)
def test_install_installed_collection(monkeypatch, tmp_path_factory, galaxy_server):
mock_installed_collections = MagicMock(return_value=[Candidate('namespace.collection', '1.2.3', None, 'dir')])
monkeypatch.setattr(collection, 'find_existing_collections', mock_installed_collections)
test_dir = to_text(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
mock_get_info = MagicMock()
mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '1.2.3', None, None, {})
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
mock_get_versions = MagicMock(return_value=['1.2.3', '1.3.0'])
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection'])
cli.run()
expected = "Nothing to do. All requested collections are already installed. If you want to reinstall them, consider using `--force`."
assert mock_display.mock_calls[1][1][0] == expected
def test_install_collection(collection_artifact, monkeypatch):
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
collection_tar = collection_artifact[1]
temp_path = os.path.join(os.path.split(collection_tar)[0], b'temp')
os.makedirs(temp_path)
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(temp_path, validate_certs=False)
output_path = os.path.join(os.path.split(collection_tar)[0])
collection_path = os.path.join(output_path, b'ansible_namespace', b'collection')
os.makedirs(os.path.join(collection_path, b'delete_me')) # Create a folder to verify the install cleans out the dir
candidate = Candidate('ansible_namespace.collection', '0.1.0', to_text(collection_tar), 'file')
collection.install(candidate, to_text(output_path), concrete_artifact_cm)
# Ensure the temp directory is empty, nothing is left behind
assert os.listdir(temp_path) == []
actual_files = os.listdir(collection_path)
actual_files.sort()
assert actual_files == [b'FILES.json', b'MANIFEST.json', b'README.md', b'docs', b'playbooks', b'plugins', b'roles',
b'runme.sh']
assert stat.S_IMODE(os.stat(os.path.join(collection_path, b'plugins')).st_mode) == 0o0755
assert stat.S_IMODE(os.stat(os.path.join(collection_path, b'README.md')).st_mode) == 0o0644
assert stat.S_IMODE(os.stat(os.path.join(collection_path, b'runme.sh')).st_mode) == 0o0755
assert mock_display.call_count == 2
assert mock_display.mock_calls[0][1][0] == "Installing 'ansible_namespace.collection:0.1.0' to '%s'" \
% to_text(collection_path)
assert mock_display.mock_calls[1][1][0] == "ansible_namespace.collection:0.1.0 was installed successfully"
def test_install_collection_with_download(galaxy_server, collection_artifact, monkeypatch):
collection_path, collection_tar = collection_artifact
shutil.rmtree(collection_path)
collections_dir = ('%s' % os.path.sep).join(to_text(collection_path).split('%s' % os.path.sep)[:-2])
temp_path = os.path.join(os.path.split(collection_tar)[0], b'temp')
os.makedirs(temp_path)
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(temp_path, validate_certs=False)
mock_download = MagicMock()
mock_download.return_value = collection_tar
monkeypatch.setattr(concrete_artifact_cm, 'get_galaxy_artifact_path', mock_download)
req = Requirement('ansible_namespace.collection', '0.1.0', 'https://downloadme.com', 'galaxy')
collection.install(req, to_text(collections_dir), concrete_artifact_cm)
actual_files = os.listdir(collection_path)
actual_files.sort()
assert actual_files == [b'FILES.json', b'MANIFEST.json', b'README.md', b'docs', b'playbooks', b'plugins', b'roles',
b'runme.sh']
assert mock_display.call_count == 2
assert mock_display.mock_calls[0][1][0] == "Installing 'ansible_namespace.collection:0.1.0' to '%s'" \
% to_text(collection_path)
assert mock_display.mock_calls[1][1][0] == "ansible_namespace.collection:0.1.0 was installed successfully"
assert mock_download.call_count == 1
assert mock_download.mock_calls[0][1][0].src == 'https://downloadme.com'
assert mock_download.mock_calls[0][1][0].type == 'galaxy'
def test_install_collections_from_tar(collection_artifact, monkeypatch):
collection_path, collection_tar = collection_artifact
temp_path = os.path.split(collection_tar)[0]
shutil.rmtree(collection_path)
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(temp_path, validate_certs=False)
requirements = [Requirement('ansible_namespace.collection', '0.1.0', to_text(collection_tar), 'file')]
collection.install_collections(requirements, to_text(temp_path), [], False, False, False, False, False, False, concrete_artifact_cm)
assert os.path.isdir(collection_path)
actual_files = os.listdir(collection_path)
actual_files.sort()
assert actual_files == [b'FILES.json', b'MANIFEST.json', b'README.md', b'docs', b'playbooks', b'plugins', b'roles',
b'runme.sh']
with open(os.path.join(collection_path, b'MANIFEST.json'), 'rb') as manifest_obj:
actual_manifest = json.loads(to_text(manifest_obj.read()))
assert actual_manifest['collection_info']['namespace'] == 'ansible_namespace'
assert actual_manifest['collection_info']['name'] == 'collection'
assert actual_manifest['collection_info']['version'] == '0.1.0'
# Filter out the progress cursor display calls.
display_msgs = [m[1][0] for m in mock_display.mock_calls if 'newline' not in m[2] and len(m[1]) == 1]
assert len(display_msgs) == 4
assert display_msgs[0] == "Process install dependency map"
assert display_msgs[1] == "Starting collection install process"
assert display_msgs[2] == "Installing 'ansible_namespace.collection:0.1.0' to '%s'" % to_text(collection_path)
def test_install_collections_existing_without_force(collection_artifact, monkeypatch):
collection_path, collection_tar = collection_artifact
temp_path = os.path.split(collection_tar)[0]
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(temp_path, validate_certs=False)
assert os.path.isdir(collection_path)
requirements = [Requirement('ansible_namespace.collection', '0.1.0', to_text(collection_tar), 'file')]
collection.install_collections(requirements, to_text(temp_path), [], False, False, False, False, False, False, concrete_artifact_cm)
assert os.path.isdir(collection_path)
actual_files = os.listdir(collection_path)
actual_files.sort()
assert actual_files == [b'README.md', b'docs', b'galaxy.yml', b'playbooks', b'plugins', b'roles', b'runme.sh']
# Filter out the progress cursor display calls.
display_msgs = [m[1][0] for m in mock_display.mock_calls if 'newline' not in m[2] and len(m[1]) == 1]
assert len(display_msgs) == 1
assert display_msgs[0] == 'Nothing to do. All requested collections are already installed. If you want to reinstall them, consider using `--force`.'
for msg in display_msgs:
assert 'WARNING' not in msg
def test_install_missing_metadata_warning(collection_artifact, monkeypatch):
collection_path, collection_tar = collection_artifact
temp_path = os.path.split(collection_tar)[0]
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
for file in [b'MANIFEST.json', b'galaxy.yml']:
b_path = os.path.join(collection_path, file)
if os.path.isfile(b_path):
os.unlink(b_path)
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(temp_path, validate_certs=False)
requirements = [Requirement('ansible_namespace.collection', '0.1.0', to_text(collection_tar), 'file')]
collection.install_collections(requirements, to_text(temp_path), [], False, False, False, False, False, False, concrete_artifact_cm)
display_msgs = [m[1][0] for m in mock_display.mock_calls if 'newline' not in m[2] and len(m[1]) == 1]
assert 'WARNING' in display_msgs[0]
# Makes sure we don't get stuck in some recursive loop
@pytest.mark.parametrize('collection_artifact', [
{'ansible_namespace.collection': '>=0.0.1'},
], indirect=True)
def test_install_collection_with_circular_dependency(collection_artifact, monkeypatch):
collection_path, collection_tar = collection_artifact
temp_path = os.path.split(collection_tar)[0]
shutil.rmtree(collection_path)
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(temp_path, validate_certs=False)
requirements = [Requirement('ansible_namespace.collection', '0.1.0', to_text(collection_tar), 'file')]
collection.install_collections(requirements, to_text(temp_path), [], False, False, False, False, False, False, concrete_artifact_cm)
assert os.path.isdir(collection_path)
actual_files = os.listdir(collection_path)
actual_files.sort()
assert actual_files == [b'FILES.json', b'MANIFEST.json', b'README.md', b'docs', b'playbooks', b'plugins', b'roles',
b'runme.sh']
with open(os.path.join(collection_path, b'MANIFEST.json'), 'rb') as manifest_obj:
actual_manifest = json.loads(to_text(manifest_obj.read()))
assert actual_manifest['collection_info']['namespace'] == 'ansible_namespace'
assert actual_manifest['collection_info']['name'] == 'collection'
assert actual_manifest['collection_info']['version'] == '0.1.0'
# Filter out the progress cursor display calls.
display_msgs = [m[1][0] for m in mock_display.mock_calls if 'newline' not in m[2] and len(m[1]) == 1]
assert len(display_msgs) == 4
assert display_msgs[0] == "Process install dependency map"
assert display_msgs[1] == "Starting collection install process"
assert display_msgs[2] == "Installing 'ansible_namespace.collection:0.1.0' to '%s'" % to_text(collection_path)
assert display_msgs[3] == "ansible_namespace.collection:0.1.0 was installed successfully"
| gpl-3.0 | -5,942,629,006,617,492,000 | 45.86413 | 155 | 0.692914 | false |
hnakamur/django | django/contrib/gis/gdal/srs.py | 366 | 12043 | """
The Spatial Reference class, represents OGR Spatial Reference objects.
Example:
>>> from django.contrib.gis.gdal import SpatialReference
>>> srs = SpatialReference('WGS84')
>>> print(srs)
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
TOWGS84[0,0,0,0,0,0,0],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.01745329251994328,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4326"]]
>>> print(srs.proj)
+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs
>>> print(srs.ellipsoid)
(6378137.0, 6356752.3142451793, 298.25722356300003)
>>> print(srs.projected, srs.geographic)
False True
>>> srs.import_epsg(32140)
>>> print(srs.name)
NAD83 / Texas South Central
"""
from ctypes import byref, c_char_p, c_int
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.error import SRSException
from django.contrib.gis.gdal.prototypes import srs as capi
from django.utils import six
from django.utils.encoding import force_bytes, force_text
class SpatialReference(GDALBase):
"""
A wrapper for the OGRSpatialReference object. According to the GDAL Web site,
the SpatialReference object "provide[s] services to represent coordinate
systems (projections and datums) and to transform between them."
"""
def __init__(self, srs_input='', srs_type='user'):
"""
Creates a GDAL OSR Spatial Reference object from the given input.
The input may be string of OGC Well Known Text (WKT), an integer
EPSG code, a PROJ.4 string, and/or a projection "well known" shorthand
string (one of 'WGS84', 'WGS72', 'NAD27', 'NAD83').
"""
if srs_type == 'wkt':
self.ptr = capi.new_srs(c_char_p(b''))
self.import_wkt(srs_input)
return
elif isinstance(srs_input, six.string_types):
# Encoding to ASCII if unicode passed in.
if isinstance(srs_input, six.text_type):
srs_input = srs_input.encode('ascii')
try:
# If SRID is a string, e.g., '4326', then make acceptable
# as user input.
srid = int(srs_input)
srs_input = 'EPSG:%d' % srid
except ValueError:
pass
elif isinstance(srs_input, six.integer_types):
# EPSG integer code was input.
srs_type = 'epsg'
elif isinstance(srs_input, self.ptr_type):
srs = srs_input
srs_type = 'ogr'
else:
raise TypeError('Invalid SRS type "%s"' % srs_type)
if srs_type == 'ogr':
# Input is already an SRS pointer.
srs = srs_input
else:
# Creating a new SRS pointer, using the string buffer.
buf = c_char_p(b'')
srs = capi.new_srs(buf)
# If the pointer is NULL, throw an exception.
if not srs:
raise SRSException('Could not create spatial reference from: %s' % srs_input)
else:
self.ptr = srs
# Importing from either the user input string or an integer SRID.
if srs_type == 'user':
self.import_user_input(srs_input)
elif srs_type == 'epsg':
self.import_epsg(srs_input)
def __del__(self):
"Destroys this spatial reference."
if self._ptr and capi:
capi.release_srs(self._ptr)
def __getitem__(self, target):
"""
Returns the value of the given string attribute node, None if the node
doesn't exist. Can also take a tuple as a parameter, (target, child),
where child is the index of the attribute in the WKT. For example:
>>> wkt = 'GEOGCS["WGS 84", DATUM["WGS_1984, ... AUTHORITY["EPSG","4326"]]'
>>> srs = SpatialReference(wkt) # could also use 'WGS84', or 4326
>>> print(srs['GEOGCS'])
WGS 84
>>> print(srs['DATUM'])
WGS_1984
>>> print(srs['AUTHORITY'])
EPSG
>>> print(srs['AUTHORITY', 1]) # The authority value
4326
>>> print(srs['TOWGS84', 4]) # the fourth value in this wkt
0
>>> print(srs['UNIT|AUTHORITY']) # For the units authority, have to use the pipe symbole.
EPSG
>>> print(srs['UNIT|AUTHORITY', 1]) # The authority value for the units
9122
"""
if isinstance(target, tuple):
return self.attr_value(*target)
else:
return self.attr_value(target)
def __str__(self):
"The string representation uses 'pretty' WKT."
return self.pretty_wkt
# #### SpatialReference Methods ####
def attr_value(self, target, index=0):
"""
The attribute value for the given target node (e.g. 'PROJCS'). The index
keyword specifies an index of the child node to return.
"""
if not isinstance(target, six.string_types) or not isinstance(index, int):
raise TypeError
return capi.get_attr_value(self.ptr, force_bytes(target), index)
def auth_name(self, target):
"Returns the authority name for the given string target node."
return capi.get_auth_name(self.ptr, force_bytes(target))
def auth_code(self, target):
"Returns the authority code for the given string target node."
return capi.get_auth_code(self.ptr, force_bytes(target))
def clone(self):
"Returns a clone of this SpatialReference object."
return SpatialReference(capi.clone_srs(self.ptr))
def from_esri(self):
"Morphs this SpatialReference from ESRI's format to EPSG."
capi.morph_from_esri(self.ptr)
def identify_epsg(self):
"""
This method inspects the WKT of this SpatialReference, and will
add EPSG authority nodes where an EPSG identifier is applicable.
"""
capi.identify_epsg(self.ptr)
def to_esri(self):
"Morphs this SpatialReference to ESRI's format."
capi.morph_to_esri(self.ptr)
def validate(self):
"Checks to see if the given spatial reference is valid."
capi.srs_validate(self.ptr)
# #### Name & SRID properties ####
@property
def name(self):
"Returns the name of this Spatial Reference."
if self.projected:
return self.attr_value('PROJCS')
elif self.geographic:
return self.attr_value('GEOGCS')
elif self.local:
return self.attr_value('LOCAL_CS')
else:
return None
@property
def srid(self):
"Returns the SRID of top-level authority, or None if undefined."
try:
return int(self.attr_value('AUTHORITY', 1))
except (TypeError, ValueError):
return None
# #### Unit Properties ####
@property
def linear_name(self):
"Returns the name of the linear units."
units, name = capi.linear_units(self.ptr, byref(c_char_p()))
return name
@property
def linear_units(self):
"Returns the value of the linear units."
units, name = capi.linear_units(self.ptr, byref(c_char_p()))
return units
@property
def angular_name(self):
"Returns the name of the angular units."
units, name = capi.angular_units(self.ptr, byref(c_char_p()))
return name
@property
def angular_units(self):
"Returns the value of the angular units."
units, name = capi.angular_units(self.ptr, byref(c_char_p()))
return units
@property
def units(self):
"""
Returns a 2-tuple of the units value and the units name,
and will automatically determines whether to return the linear
or angular units.
"""
units, name = None, None
if self.projected or self.local:
units, name = capi.linear_units(self.ptr, byref(c_char_p()))
elif self.geographic:
units, name = capi.angular_units(self.ptr, byref(c_char_p()))
if name is not None:
name = force_text(name)
return (units, name)
# #### Spheroid/Ellipsoid Properties ####
@property
def ellipsoid(self):
"""
Returns a tuple of the ellipsoid parameters:
(semimajor axis, semiminor axis, and inverse flattening)
"""
return (self.semi_major, self.semi_minor, self.inverse_flattening)
@property
def semi_major(self):
"Returns the Semi Major Axis for this Spatial Reference."
return capi.semi_major(self.ptr, byref(c_int()))
@property
def semi_minor(self):
"Returns the Semi Minor Axis for this Spatial Reference."
return capi.semi_minor(self.ptr, byref(c_int()))
@property
def inverse_flattening(self):
"Returns the Inverse Flattening for this Spatial Reference."
return capi.invflattening(self.ptr, byref(c_int()))
# #### Boolean Properties ####
@property
def geographic(self):
"""
Returns True if this SpatialReference is geographic
(root node is GEOGCS).
"""
return bool(capi.isgeographic(self.ptr))
@property
def local(self):
"Returns True if this SpatialReference is local (root node is LOCAL_CS)."
return bool(capi.islocal(self.ptr))
@property
def projected(self):
"""
Returns True if this SpatialReference is a projected coordinate system
(root node is PROJCS).
"""
return bool(capi.isprojected(self.ptr))
# #### Import Routines #####
def import_epsg(self, epsg):
"Imports the Spatial Reference from the EPSG code (an integer)."
capi.from_epsg(self.ptr, epsg)
def import_proj(self, proj):
"Imports the Spatial Reference from a PROJ.4 string."
capi.from_proj(self.ptr, proj)
def import_user_input(self, user_input):
"Imports the Spatial Reference from the given user input string."
capi.from_user_input(self.ptr, force_bytes(user_input))
def import_wkt(self, wkt):
"Imports the Spatial Reference from OGC WKT (string)"
capi.from_wkt(self.ptr, byref(c_char_p(wkt)))
def import_xml(self, xml):
"Imports the Spatial Reference from an XML string."
capi.from_xml(self.ptr, xml)
# #### Export Properties ####
@property
def wkt(self):
"Returns the WKT representation of this Spatial Reference."
return capi.to_wkt(self.ptr, byref(c_char_p()))
@property
def pretty_wkt(self, simplify=0):
"Returns the 'pretty' representation of the WKT."
return capi.to_pretty_wkt(self.ptr, byref(c_char_p()), simplify)
@property
def proj(self):
"Returns the PROJ.4 representation for this Spatial Reference."
return capi.to_proj(self.ptr, byref(c_char_p()))
@property
def proj4(self):
"Alias for proj()."
return self.proj
@property
def xml(self, dialect=''):
"Returns the XML representation of this Spatial Reference."
return capi.to_xml(self.ptr, byref(c_char_p()), dialect)
class CoordTransform(GDALBase):
"The coordinate system transformation object."
def __init__(self, source, target):
"Initializes on a source and target SpatialReference objects."
if not isinstance(source, SpatialReference) or not isinstance(target, SpatialReference):
raise TypeError('source and target must be of type SpatialReference')
self.ptr = capi.new_ct(source._ptr, target._ptr)
self._srs1_name = source.name
self._srs2_name = target.name
def __del__(self):
"Deletes this Coordinate Transformation object."
if self._ptr and capi:
capi.destroy_ct(self._ptr)
def __str__(self):
return 'Transform from "%s" to "%s"' % (self._srs1_name, self._srs2_name)
| bsd-3-clause | 3,933,069,457,942,029,300 | 33.606322 | 97 | 0.603836 | false |
pmghalvorsen/gramps_branch | gramps/plugins/gramplet/topsurnamesgramplet.py | 2 | 3999 | # Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2007-2009 Douglas S. Blank <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from collections import defaultdict
#------------------------------------------------------------------------
#
# GRAMPS modules
#
#------------------------------------------------------------------------
from gramps.gen.plug import Gramplet
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.sgettext
from gramps.gen.config import config
#------------------------------------------------------------------------
#
# Constants
#
#------------------------------------------------------------------------
_YIELD_INTERVAL = 350
#------------------------------------------------------------------------
#
# TopSurnamesGramplet class
#
#------------------------------------------------------------------------
class TopSurnamesGramplet(Gramplet):
def init(self):
self.set_tooltip(_("Double-click surname for details"))
self.top_size = 10 # will be overwritten in load
self.set_text(_("No Family Tree loaded."))
def db_changed(self):
self.dbstate.db.connect('person-add', self.update)
self.dbstate.db.connect('person-delete', self.update)
self.dbstate.db.connect('person-update', self.update)
self.dbstate.db.connect('person-rebuild', self.update)
self.dbstate.db.connect('family-rebuild', self.update)
def on_load(self):
if len(self.gui.data) > 0:
self.top_size = int(self.gui.data[0])
def on_save(self):
self.gui.data = [self.top_size]
def main(self):
self.set_text(_("Processing...") + "\n")
surnames = defaultdict(int)
representative_handle = {}
cnt = 0
for person in self.dbstate.db.iter_people():
allnames = [person.get_primary_name()] + person.get_alternate_names()
allnames = set([name.get_group_name().strip() for name in allnames])
for surname in allnames:
surnames[surname] += 1
representative_handle[surname] = person.handle
cnt += 1
if not cnt % _YIELD_INTERVAL:
yield True
total_people = cnt
surname_sort = []
total = 0
cnt = 0
for surname in surnames:
surname_sort.append( (surnames[surname], surname) )
total += surnames[surname]
cnt += 1
if not cnt % _YIELD_INTERVAL:
yield True
total_surnames = cnt
surname_sort.sort(reverse=True)
line = 0
### All done!
self.set_text("")
nosurname = config.get('preferences.no-surname-text')
for (count, surname) in surname_sort:
text = "%s, " % (surname if surname else nosurname)
text += "%d%% (%d)\n" % (int((float(count)/total) * 100), count)
self.append_text(" %d. " % (line + 1))
self.link(text, 'Surname', representative_handle[surname])
line += 1
if line >= self.top_size:
break
self.append_text(("\n" + _("Total unique surnames") + ": %d\n") %
total_surnames)
self.append_text((_("Total people") + ": %d") % total_people, "begin")
| gpl-2.0 | -2,794,664,892,853,630,000 | 36.027778 | 81 | 0.544136 | false |
daoluan/decode-Django | Django-1.5.1/tests/regressiontests/fixtures_regress/models.py | 60 | 5752 | from __future__ import absolute_import, unicode_literals
from django.contrib.auth.models import User
from django.db import models
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Animal(models.Model):
name = models.CharField(max_length=150)
latin_name = models.CharField(max_length=150)
count = models.IntegerField()
weight = models.FloatField()
# use a non-default name for the default manager
specimens = models.Manager()
def __str__(self):
return self.name
class Plant(models.Model):
name = models.CharField(max_length=150)
class Meta:
# For testing when upper case letter in app name; regression for #4057
db_table = "Fixtures_regress_plant"
@python_2_unicode_compatible
class Stuff(models.Model):
name = models.CharField(max_length=20, null=True)
owner = models.ForeignKey(User, null=True)
def __str__(self):
return six.text_type(self.name) + ' is owned by ' + six.text_type(self.owner)
class Absolute(models.Model):
name = models.CharField(max_length=40)
load_count = 0
def __init__(self, *args, **kwargs):
super(Absolute, self).__init__(*args, **kwargs)
Absolute.load_count += 1
class Parent(models.Model):
name = models.CharField(max_length=10)
class Meta:
ordering = ('id',)
class Child(Parent):
data = models.CharField(max_length=10)
# Models to regression test #7572
class Channel(models.Model):
name = models.CharField(max_length=255)
class Article(models.Model):
title = models.CharField(max_length=255)
channels = models.ManyToManyField(Channel)
class Meta:
ordering = ('id',)
# Models to regression test #11428
@python_2_unicode_compatible
class Widget(models.Model):
name = models.CharField(max_length=255)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
class WidgetProxy(Widget):
class Meta:
proxy = True
# Check for forward references in FKs and M2Ms with natural keys
class TestManager(models.Manager):
def get_by_natural_key(self, key):
return self.get(name=key)
@python_2_unicode_compatible
class Store(models.Model):
objects = TestManager()
name = models.CharField(max_length=255)
main = models.ForeignKey('self', null=True)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
def natural_key(self):
return (self.name,)
@python_2_unicode_compatible
class Person(models.Model):
objects = TestManager()
name = models.CharField(max_length=255)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
# Person doesn't actually have a dependency on store, but we need to define
# one to test the behavior of the dependency resolution algorithm.
def natural_key(self):
return (self.name,)
natural_key.dependencies = ['fixtures_regress.store']
@python_2_unicode_compatible
class Book(models.Model):
name = models.CharField(max_length=255)
author = models.ForeignKey(Person)
stores = models.ManyToManyField(Store)
class Meta:
ordering = ('name',)
def __str__(self):
return '%s by %s (available at %s)' % (
self.name,
self.author.name,
', '.join(s.name for s in self.stores.all())
)
class NKManager(models.Manager):
def get_by_natural_key(self, data):
return self.get(data=data)
@python_2_unicode_compatible
class NKChild(Parent):
data = models.CharField(max_length=10, unique=True)
objects = NKManager()
def natural_key(self):
return self.data
def __str__(self):
return 'NKChild %s:%s' % (self.name, self.data)
@python_2_unicode_compatible
class RefToNKChild(models.Model):
text = models.CharField(max_length=10)
nk_fk = models.ForeignKey(NKChild, related_name='ref_fks')
nk_m2m = models.ManyToManyField(NKChild, related_name='ref_m2ms')
def __str__(self):
return '%s: Reference to %s [%s]' % (
self.text,
self.nk_fk,
', '.join(str(o) for o in self.nk_m2m.all())
)
# ome models with pathological circular dependencies
class Circle1(models.Model):
name = models.CharField(max_length=255)
def natural_key(self):
return self.name
natural_key.dependencies = ['fixtures_regress.circle2']
class Circle2(models.Model):
name = models.CharField(max_length=255)
def natural_key(self):
return self.name
natural_key.dependencies = ['fixtures_regress.circle1']
class Circle3(models.Model):
name = models.CharField(max_length=255)
def natural_key(self):
return self.name
natural_key.dependencies = ['fixtures_regress.circle3']
class Circle4(models.Model):
name = models.CharField(max_length=255)
def natural_key(self):
return self.name
natural_key.dependencies = ['fixtures_regress.circle5']
class Circle5(models.Model):
name = models.CharField(max_length=255)
def natural_key(self):
return self.name
natural_key.dependencies = ['fixtures_regress.circle6']
class Circle6(models.Model):
name = models.CharField(max_length=255)
def natural_key(self):
return self.name
natural_key.dependencies = ['fixtures_regress.circle4']
class ExternalDependency(models.Model):
name = models.CharField(max_length=255)
def natural_key(self):
return self.name
natural_key.dependencies = ['fixtures_regress.book']
# Model for regression test of #11101
class Thingy(models.Model):
name = models.CharField(max_length=255)
| gpl-2.0 | 3,504,672,174,795,047,000 | 23.270042 | 85 | 0.660814 | false |
zanderle/django | django/contrib/gis/gdal/envelope.py | 477 | 7009 | """
The GDAL/OGR library uses an Envelope structure to hold the bounding
box information for a geometry. The envelope (bounding box) contains
two pairs of coordinates, one for the lower left coordinate and one
for the upper right coordinate:
+----------o Upper right; (max_x, max_y)
| |
| |
| |
Lower left (min_x, min_y) o----------+
"""
from ctypes import Structure, c_double
from django.contrib.gis.gdal.error import GDALException
# The OGR definition of an Envelope is a C structure containing four doubles.
# See the 'ogr_core.h' source file for more information:
# http://www.gdal.org/ogr/ogr__core_8h-source.html
class OGREnvelope(Structure):
"Represents the OGREnvelope C Structure."
_fields_ = [("MinX", c_double),
("MaxX", c_double),
("MinY", c_double),
("MaxY", c_double),
]
class Envelope(object):
"""
The Envelope object is a C structure that contains the minimum and
maximum X, Y coordinates for a rectangle bounding box. The naming
of the variables is compatible with the OGR Envelope structure.
"""
def __init__(self, *args):
"""
The initialization function may take an OGREnvelope structure, 4-element
tuple or list, or 4 individual arguments.
"""
if len(args) == 1:
if isinstance(args[0], OGREnvelope):
# OGREnvelope (a ctypes Structure) was passed in.
self._envelope = args[0]
elif isinstance(args[0], (tuple, list)):
# A tuple was passed in.
if len(args[0]) != 4:
raise GDALException('Incorrect number of tuple elements (%d).' % len(args[0]))
else:
self._from_sequence(args[0])
else:
raise TypeError('Incorrect type of argument: %s' % str(type(args[0])))
elif len(args) == 4:
# Individual parameters passed in.
# Thanks to ww for the help
self._from_sequence([float(a) for a in args])
else:
raise GDALException('Incorrect number (%d) of arguments.' % len(args))
# Checking the x,y coordinates
if self.min_x > self.max_x:
raise GDALException('Envelope minimum X > maximum X.')
if self.min_y > self.max_y:
raise GDALException('Envelope minimum Y > maximum Y.')
def __eq__(self, other):
"""
Returns True if the envelopes are equivalent; can compare against
other Envelopes and 4-tuples.
"""
if isinstance(other, Envelope):
return (self.min_x == other.min_x) and (self.min_y == other.min_y) and \
(self.max_x == other.max_x) and (self.max_y == other.max_y)
elif isinstance(other, tuple) and len(other) == 4:
return (self.min_x == other[0]) and (self.min_y == other[1]) and \
(self.max_x == other[2]) and (self.max_y == other[3])
else:
raise GDALException('Equivalence testing only works with other Envelopes.')
def __str__(self):
"Returns a string representation of the tuple."
return str(self.tuple)
def _from_sequence(self, seq):
"Initializes the C OGR Envelope structure from the given sequence."
self._envelope = OGREnvelope()
self._envelope.MinX = seq[0]
self._envelope.MinY = seq[1]
self._envelope.MaxX = seq[2]
self._envelope.MaxY = seq[3]
def expand_to_include(self, *args):
"""
Modifies the envelope to expand to include the boundaries of
the passed-in 2-tuple (a point), 4-tuple (an extent) or
envelope.
"""
# We provide a number of different signatures for this method,
# and the logic here is all about converting them into a
# 4-tuple single parameter which does the actual work of
# expanding the envelope.
if len(args) == 1:
if isinstance(args[0], Envelope):
return self.expand_to_include(args[0].tuple)
elif hasattr(args[0], 'x') and hasattr(args[0], 'y'):
return self.expand_to_include(args[0].x, args[0].y, args[0].x, args[0].y)
elif isinstance(args[0], (tuple, list)):
# A tuple was passed in.
if len(args[0]) == 2:
return self.expand_to_include((args[0][0], args[0][1], args[0][0], args[0][1]))
elif len(args[0]) == 4:
(minx, miny, maxx, maxy) = args[0]
if minx < self._envelope.MinX:
self._envelope.MinX = minx
if miny < self._envelope.MinY:
self._envelope.MinY = miny
if maxx > self._envelope.MaxX:
self._envelope.MaxX = maxx
if maxy > self._envelope.MaxY:
self._envelope.MaxY = maxy
else:
raise GDALException('Incorrect number of tuple elements (%d).' % len(args[0]))
else:
raise TypeError('Incorrect type of argument: %s' % str(type(args[0])))
elif len(args) == 2:
# An x and an y parameter were passed in
return self.expand_to_include((args[0], args[1], args[0], args[1]))
elif len(args) == 4:
# Individual parameters passed in.
return self.expand_to_include(args)
else:
raise GDALException('Incorrect number (%d) of arguments.' % len(args[0]))
@property
def min_x(self):
"Returns the value of the minimum X coordinate."
return self._envelope.MinX
@property
def min_y(self):
"Returns the value of the minimum Y coordinate."
return self._envelope.MinY
@property
def max_x(self):
"Returns the value of the maximum X coordinate."
return self._envelope.MaxX
@property
def max_y(self):
"Returns the value of the maximum Y coordinate."
return self._envelope.MaxY
@property
def ur(self):
"Returns the upper-right coordinate."
return (self.max_x, self.max_y)
@property
def ll(self):
"Returns the lower-left coordinate."
return (self.min_x, self.min_y)
@property
def tuple(self):
"Returns a tuple representing the envelope."
return (self.min_x, self.min_y, self.max_x, self.max_y)
@property
def wkt(self):
"Returns WKT representing a Polygon for this envelope."
# TODO: Fix significant figures.
return 'POLYGON((%s %s,%s %s,%s %s,%s %s,%s %s))' % \
(self.min_x, self.min_y, self.min_x, self.max_y,
self.max_x, self.max_y, self.max_x, self.min_y,
self.min_x, self.min_y)
| bsd-3-clause | 5,974,041,769,423,588,000 | 38.376404 | 99 | 0.55015 | false |
Hellenn/doctors_joomla | templates/doctors/node_modules/node-gyp/gyp/buildbot/buildbot_run.py | 1467 | 4228 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Argument-less script to select what to run on the buildbots."""
import os
import shutil
import subprocess
import sys
BUILDBOT_DIR = os.path.dirname(os.path.abspath(__file__))
TRUNK_DIR = os.path.dirname(BUILDBOT_DIR)
ROOT_DIR = os.path.dirname(TRUNK_DIR)
CMAKE_DIR = os.path.join(ROOT_DIR, 'cmake')
CMAKE_BIN_DIR = os.path.join(CMAKE_DIR, 'bin')
OUT_DIR = os.path.join(TRUNK_DIR, 'out')
def CallSubProcess(*args, **kwargs):
"""Wrapper around subprocess.call which treats errors as build exceptions."""
with open(os.devnull) as devnull_fd:
retcode = subprocess.call(stdin=devnull_fd, *args, **kwargs)
if retcode != 0:
print '@@@STEP_EXCEPTION@@@'
sys.exit(1)
def PrepareCmake():
"""Build CMake 2.8.8 since the version in Precise is 2.8.7."""
if os.environ['BUILDBOT_CLOBBER'] == '1':
print '@@@BUILD_STEP Clobber CMake checkout@@@'
shutil.rmtree(CMAKE_DIR)
# We always build CMake 2.8.8, so no need to do anything
# if the directory already exists.
if os.path.isdir(CMAKE_DIR):
return
print '@@@BUILD_STEP Initialize CMake checkout@@@'
os.mkdir(CMAKE_DIR)
print '@@@BUILD_STEP Sync CMake@@@'
CallSubProcess(
['git', 'clone',
'--depth', '1',
'--single-branch',
'--branch', 'v2.8.8',
'--',
'git://cmake.org/cmake.git',
CMAKE_DIR],
cwd=CMAKE_DIR)
print '@@@BUILD_STEP Build CMake@@@'
CallSubProcess(
['/bin/bash', 'bootstrap', '--prefix=%s' % CMAKE_DIR],
cwd=CMAKE_DIR)
CallSubProcess( ['make', 'cmake'], cwd=CMAKE_DIR)
def GypTestFormat(title, format=None, msvs_version=None, tests=[]):
"""Run the gyp tests for a given format, emitting annotator tags.
See annotator docs at:
https://sites.google.com/a/chromium.org/dev/developers/testing/chromium-build-infrastructure/buildbot-annotations
Args:
format: gyp format to test.
Returns:
0 for sucesss, 1 for failure.
"""
if not format:
format = title
print '@@@BUILD_STEP ' + title + '@@@'
sys.stdout.flush()
env = os.environ.copy()
if msvs_version:
env['GYP_MSVS_VERSION'] = msvs_version
command = ' '.join(
[sys.executable, 'gyp/gyptest.py',
'--all',
'--passed',
'--format', format,
'--path', CMAKE_BIN_DIR,
'--chdir', 'gyp'] + tests)
retcode = subprocess.call(command, cwd=ROOT_DIR, env=env, shell=True)
if retcode:
# Emit failure tag, and keep going.
print '@@@STEP_FAILURE@@@'
return 1
return 0
def GypBuild():
# Dump out/ directory.
print '@@@BUILD_STEP cleanup@@@'
print 'Removing %s...' % OUT_DIR
shutil.rmtree(OUT_DIR, ignore_errors=True)
print 'Done.'
retcode = 0
if sys.platform.startswith('linux'):
retcode += GypTestFormat('ninja')
retcode += GypTestFormat('make')
PrepareCmake()
retcode += GypTestFormat('cmake')
elif sys.platform == 'darwin':
retcode += GypTestFormat('ninja')
retcode += GypTestFormat('xcode')
retcode += GypTestFormat('make')
elif sys.platform == 'win32':
retcode += GypTestFormat('ninja')
if os.environ['BUILDBOT_BUILDERNAME'] == 'gyp-win64':
retcode += GypTestFormat('msvs-ninja-2013', format='msvs-ninja',
msvs_version='2013',
tests=[
r'test\generator-output\gyptest-actions.py',
r'test\generator-output\gyptest-relocate.py',
r'test\generator-output\gyptest-rules.py'])
retcode += GypTestFormat('msvs-2013', format='msvs', msvs_version='2013')
else:
raise Exception('Unknown platform')
if retcode:
# TODO(bradnelson): once the annotator supports a postscript (section for
# after the build proper that could be used for cumulative failures),
# use that instead of this. This isolates the final return value so
# that it isn't misattributed to the last stage.
print '@@@BUILD_STEP failures@@@'
sys.exit(retcode)
if __name__ == '__main__':
GypBuild()
| gpl-2.0 | -8,050,265,196,797,189,000 | 30.088235 | 117 | 0.626064 | false |
cloudbase/cinder | cinder/tests/unit/group/test_groups_manager.py | 4 | 31202 | # Copyright (C) 2016 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from oslo_utils import importutils
from cinder import context
from cinder import db
from cinder import exception
from cinder import objects
from cinder.objects import fields
from cinder import quota
from cinder import test
from cinder.tests.unit import conf_fixture
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import utils as tests_utils
from cinder.volume import api as volume_api
from cinder.volume import configuration as conf
from cinder.volume import driver
from cinder.volume import utils as volutils
GROUP_QUOTAS = quota.GROUP_QUOTAS
CONF = cfg.CONF
class GroupManagerTestCase(test.TestCase):
def setUp(self):
super(GroupManagerTestCase, self).setUp()
self.volume = importutils.import_object(CONF.volume_manager)
self.configuration = mock.Mock(conf.Configuration)
self.context = context.get_admin_context()
self.context.user_id = fake.USER_ID
self.project_id = fake.PROJECT3_ID
self.context.project_id = self.project_id
self.volume.driver.set_initialized()
self.volume.stats = {'allocated_capacity_gb': 0,
'pools': {}}
self.volume_api = volume_api.API()
def test_delete_volume_in_group(self):
"""Test deleting a volume that's tied to a group fails."""
volume_params = {'status': 'available',
'group_id': fake.GROUP_ID}
volume = tests_utils.create_volume(self.context, **volume_params)
self.assertRaises(exception.InvalidVolume,
self.volume_api.delete, self.context, volume)
@mock.patch.object(GROUP_QUOTAS, "reserve",
return_value=["RESERVATION"])
@mock.patch.object(GROUP_QUOTAS, "commit")
@mock.patch.object(GROUP_QUOTAS, "rollback")
@mock.patch.object(driver.VolumeDriver,
"delete_group",
return_value=({'status': (
fields.GroupStatus.DELETED)}, []))
def test_create_delete_group(self, fake_delete_grp,
fake_rollback,
fake_commit, fake_reserve):
"""Test group can be created and deleted."""
def fake_driver_create_grp(context, group):
"""Make sure that the pool is part of the host."""
self.assertIn('host', group)
host = group.host
pool = volutils.extract_host(host, level='pool')
self.assertEqual('fakepool', pool)
return {'status': fields.GroupStatus.AVAILABLE}
self.mock_object(self.volume.driver, 'create_group',
fake_driver_create_grp)
group = tests_utils.create_group(
self.context,
availability_zone=CONF.storage_availability_zone,
volume_type_ids=[fake.VOLUME_TYPE_ID],
host='fakehost@fakedrv#fakepool',
group_type_id=fake.GROUP_TYPE_ID)
group = objects.Group.get_by_id(self.context, group.id)
self.assertEqual(0, len(self.notifier.notifications),
self.notifier.notifications)
self.volume.create_group(self.context, group)
self.assertEqual(2, len(self.notifier.notifications),
self.notifier.notifications)
msg = self.notifier.notifications[0]
self.assertEqual('group.create.start', msg['event_type'])
expected = {
'status': fields.GroupStatus.AVAILABLE,
'name': 'test_group',
'availability_zone': 'nova',
'tenant_id': self.context.project_id,
'created_at': 'DONTCARE',
'user_id': fake.USER_ID,
'group_id': group.id,
'group_type': fake.GROUP_TYPE_ID
}
self.assertDictMatch(expected, msg['payload'])
msg = self.notifier.notifications[1]
self.assertEqual('group.create.end', msg['event_type'])
self.assertDictMatch(expected, msg['payload'])
self.assertEqual(
group.id,
objects.Group.get_by_id(context.get_admin_context(),
group.id).id)
self.volume.delete_group(self.context, group)
grp = objects.Group.get_by_id(
context.get_admin_context(read_deleted='yes'), group.id)
self.assertEqual(fields.GroupStatus.DELETED, grp.status)
self.assertEqual(4, len(self.notifier.notifications),
self.notifier.notifications)
msg = self.notifier.notifications[2]
self.assertEqual('group.delete.start', msg['event_type'])
self.assertDictMatch(expected, msg['payload'])
msg = self.notifier.notifications[3]
self.assertEqual('group.delete.end', msg['event_type'])
expected['status'] = fields.GroupStatus.DELETED
self.assertDictMatch(expected, msg['payload'])
self.assertRaises(exception.NotFound,
objects.Group.get_by_id,
self.context,
group.id)
@mock.patch.object(GROUP_QUOTAS, "reserve",
return_value=["RESERVATION"])
@mock.patch.object(GROUP_QUOTAS, "commit")
@mock.patch.object(GROUP_QUOTAS, "rollback")
@mock.patch.object(driver.VolumeDriver,
"create_group",
return_value={'status': 'available'})
@mock.patch.object(driver.VolumeDriver,
"update_group")
def test_update_group(self, fake_update_grp,
fake_create_grp, fake_rollback,
fake_commit, fake_reserve):
"""Test group can be updated."""
group = tests_utils.create_group(
self.context,
availability_zone=CONF.storage_availability_zone,
volume_type_ids=[fake.VOLUME_TYPE_ID],
group_type_id=fake.GROUP_TYPE_ID,
host=CONF.host)
self.volume.create_group(self.context, group)
volume = tests_utils.create_volume(
self.context,
group_id=group.id,
volume_type_id=fake.VOLUME_TYPE_ID,
status='available',
host=group.host)
self.volume.create_volume(self.context, volume)
volume2 = tests_utils.create_volume(
self.context,
group_id=None,
volume_type_id=fake.VOLUME_TYPE_ID,
status='available',
host=group.host)
self.volume.create_volume(self.context, volume)
fake_update_grp.return_value = (
{'status': fields.GroupStatus.AVAILABLE},
[{'id': volume2.id, 'status': 'available'}],
[{'id': volume.id, 'status': 'available'}])
self.volume.update_group(self.context, group,
add_volumes=volume2.id,
remove_volumes=volume.id)
grp = objects.Group.get_by_id(self.context, group.id)
expected = {
'status': fields.GroupStatus.AVAILABLE,
'name': 'test_group',
'availability_zone': 'nova',
'tenant_id': self.context.project_id,
'created_at': 'DONTCARE',
'user_id': fake.USER_ID,
'group_id': group.id,
'group_type': fake.GROUP_TYPE_ID
}
self.assertEqual(fields.GroupStatus.AVAILABLE, grp.status)
self.assertEqual(10, len(self.notifier.notifications),
self.notifier.notifications)
msg = self.notifier.notifications[6]
self.assertEqual('group.update.start', msg['event_type'])
self.assertDictMatch(expected, msg['payload'])
msg = self.notifier.notifications[8]
self.assertEqual('group.update.end', msg['event_type'])
self.assertDictMatch(expected, msg['payload'])
grpvolumes = db.volume_get_all_by_generic_group(self.context, group.id)
grpvol_ids = [grpvol['id'] for grpvol in grpvolumes]
# Verify volume is removed.
self.assertNotIn(volume.id, grpvol_ids)
# Verify volume is added.
self.assertIn(volume2.id, grpvol_ids)
volume3 = tests_utils.create_volume(
self.context,
group_id=None,
host=group.host,
volume_type_id=fake.VOLUME_TYPE_ID,
status='wrong-status')
volume_id3 = volume3['id']
volume_get_orig = self.volume.db.volume_get
self.volume.db.volume_get = mock.Mock(
return_value={'status': 'wrong_status',
'id': volume_id3})
# Try to add a volume in wrong status
self.assertRaises(exception.InvalidVolume,
self.volume.update_group,
self.context,
group,
add_volumes=volume_id3,
remove_volumes=None)
self.volume.db.volume_get.reset_mock()
self.volume.db.volume_get = volume_get_orig
@mock.patch.object(driver.VolumeDriver,
"create_group",
return_value={'status': 'available'})
@mock.patch.object(driver.VolumeDriver,
"delete_group",
return_value=({'status': 'deleted'}, []))
@mock.patch.object(driver.VolumeDriver,
"create_group_snapshot",
return_value={'status': 'available'})
@mock.patch.object(driver.VolumeDriver,
"delete_group_snapshot",
return_value=({'status': 'deleted'}, []))
@mock.patch.object(driver.VolumeDriver,
"create_group_from_src",
return_value=(None, None))
@mock.patch('cinder.volume.drivers.lvm.LVMVolumeDriver.'
'create_volume_from_snapshot')
@mock.patch('cinder.volume.drivers.lvm.LVMVolumeDriver.'
'create_cloned_volume')
def test_create_group_from_src(self,
mock_create_cloned_vol,
mock_create_vol_from_snap,
mock_create_from_src,
mock_delete_grpsnap,
mock_create_grpsnap,
mock_delete_grp,
mock_create_grp):
"""Test group can be created and deleted."""
group = tests_utils.create_group(
self.context,
availability_zone=CONF.storage_availability_zone,
status=fields.GroupStatus.AVAILABLE,
volume_type_ids=[fake.VOLUME_TYPE_ID],
group_type_id=fake.GROUP_TYPE_ID,
host=CONF.host)
volume = tests_utils.create_volume(
self.context,
group_id=group.id,
status='available',
host=group.host,
volume_type_id=fake.VOLUME_TYPE_ID,
size=1)
volume_id = volume['id']
group_snapshot_returns = self._create_group_snapshot(group.id,
[volume_id])
group_snapshot = group_snapshot_returns[0]
snapshot_id = group_snapshot_returns[1][0]['id']
# Create group from source group snapshot.
group2 = tests_utils.create_group(
self.context,
availability_zone=CONF.storage_availability_zone,
group_snapshot_id=group_snapshot.id,
volume_type_ids=[fake.VOLUME_TYPE_ID],
group_type_id=fake.GROUP_TYPE_ID,
host=CONF.host)
group2 = objects.Group.get_by_id(self.context, group2.id)
volume2 = tests_utils.create_volume(
self.context,
group_id=group2.id,
snapshot_id=snapshot_id,
status='available',
host=group2.host,
volume_type_id=fake.VOLUME_TYPE_ID)
self.volume.create_volume(self.context, volume2)
self.volume.create_group_from_src(
self.context, group2, group_snapshot=group_snapshot)
grp2 = objects.Group.get_by_id(self.context, group2.id)
expected = {
'status': fields.GroupStatus.AVAILABLE,
'name': 'test_group',
'availability_zone': 'nova',
'tenant_id': self.context.project_id,
'created_at': 'DONTCARE',
'user_id': fake.USER_ID,
'group_id': group2.id,
'group_type': fake.GROUP_TYPE_ID,
}
self.assertEqual(fields.GroupStatus.AVAILABLE, grp2.status)
self.assertEqual(group2.id, grp2['id'])
self.assertEqual(group_snapshot.id, grp2['group_snapshot_id'])
self.assertIsNone(grp2['source_group_id'])
msg = self.notifier.notifications[2]
self.assertEqual('group.create.start', msg['event_type'])
self.assertDictMatch(expected, msg['payload'])
msg = self.notifier.notifications[4]
self.assertEqual('group.create.end', msg['event_type'])
self.assertDictMatch(expected, msg['payload'])
if len(self.notifier.notifications) > 6:
self.assertFalse(self.notifier.notifications[6],
self.notifier.notifications)
self.assertEqual(6, len(self.notifier.notifications),
self.notifier.notifications)
self.volume.delete_group(self.context, group2)
if len(self.notifier.notifications) > 9:
self.assertFalse(self.notifier.notifications[10],
self.notifier.notifications)
self.assertEqual(9, len(self.notifier.notifications),
self.notifier.notifications)
msg = self.notifier.notifications[6]
self.assertEqual('group.delete.start', msg['event_type'])
expected['status'] = fields.GroupStatus.AVAILABLE
self.assertDictMatch(expected, msg['payload'])
msg = self.notifier.notifications[8]
self.assertEqual('group.delete.end', msg['event_type'])
expected['status'] = fields.GroupStatus.DELETED
self.assertDictMatch(expected, msg['payload'])
grp2 = objects.Group.get_by_id(
context.get_admin_context(read_deleted='yes'), group2.id)
self.assertEqual(fields.GroupStatus.DELETED, grp2.status)
self.assertRaises(exception.NotFound,
objects.Group.get_by_id,
self.context,
group2.id)
# Create group from source group
group3 = tests_utils.create_group(
self.context,
availability_zone=CONF.storage_availability_zone,
source_group_id=group.id,
volume_type_ids=[fake.VOLUME_TYPE_ID],
group_type_id=fake.GROUP_TYPE_ID,
host=CONF.host)
volume3 = tests_utils.create_volume(
self.context,
group_id=group3.id,
source_volid=volume_id,
status='available',
host=group3.host,
volume_type_id=fake.VOLUME_TYPE_ID)
self.volume.create_volume(self.context, volume3)
self.volume.create_group_from_src(
self.context, group3, source_group=group)
grp3 = objects.Group.get_by_id(self.context, group3.id)
self.assertEqual(fields.GroupStatus.AVAILABLE, grp3.status)
self.assertEqual(group3.id, grp3.id)
self.assertEqual(group.id, grp3.source_group_id)
self.assertIsNone(grp3.group_snapshot_id)
self.volume.delete_group_snapshot(self.context, group_snapshot)
self.volume.delete_group(self.context, group)
def test_sort_snapshots(self):
vol1 = {'id': fake.VOLUME_ID, 'name': 'volume 1',
'snapshot_id': fake.SNAPSHOT_ID,
'group_id': fake.GROUP_ID}
vol2 = {'id': fake.VOLUME2_ID, 'name': 'volume 2',
'snapshot_id': fake.SNAPSHOT2_ID,
'group_id': fake.GROUP_ID}
vol3 = {'id': fake.VOLUME3_ID, 'name': 'volume 3',
'snapshot_id': fake.SNAPSHOT3_ID,
'group_id': fake.GROUP_ID}
snp1 = {'id': fake.SNAPSHOT_ID, 'name': 'snap 1',
'group_snapshot_id': fake.GROUP_ID}
snp2 = {'id': fake.SNAPSHOT2_ID, 'name': 'snap 2',
'group_snapshot_id': fake.GROUP_ID}
snp3 = {'id': fake.SNAPSHOT3_ID, 'name': 'snap 3',
'group_snapshot_id': fake.GROUP_ID}
snp1_obj = fake_snapshot.fake_snapshot_obj(self.context, **snp1)
snp2_obj = fake_snapshot.fake_snapshot_obj(self.context, **snp2)
snp3_obj = fake_snapshot.fake_snapshot_obj(self.context, **snp3)
volumes = []
snapshots = []
volumes.append(vol1)
volumes.append(vol2)
volumes.append(vol3)
snapshots.append(snp2_obj)
snapshots.append(snp3_obj)
snapshots.append(snp1_obj)
i = 0
for vol in volumes:
snap = snapshots[i]
i += 1
self.assertNotEqual(vol['snapshot_id'], snap.id)
sorted_snaps = self.volume._sort_snapshots(volumes, snapshots)
i = 0
for vol in volumes:
snap = sorted_snaps[i]
i += 1
self.assertEqual(vol['snapshot_id'], snap.id)
snapshots[2]['id'] = fake.WILL_NOT_BE_FOUND_ID
self.assertRaises(exception.SnapshotNotFound,
self.volume._sort_snapshots,
volumes, snapshots)
self.assertRaises(exception.InvalidInput,
self.volume._sort_snapshots,
volumes, [])
def test_sort_source_vols(self):
vol1 = {'id': '1', 'name': 'volume 1',
'source_volid': '1',
'group_id': '2'}
vol2 = {'id': '2', 'name': 'volume 2',
'source_volid': '2',
'group_id': '2'}
vol3 = {'id': '3', 'name': 'volume 3',
'source_volid': '3',
'group_id': '2'}
src_vol1 = {'id': '1', 'name': 'source vol 1',
'group_id': '1'}
src_vol2 = {'id': '2', 'name': 'source vol 2',
'group_id': '1'}
src_vol3 = {'id': '3', 'name': 'source vol 3',
'group_id': '1'}
volumes = []
src_vols = []
volumes.append(vol1)
volumes.append(vol2)
volumes.append(vol3)
src_vols.append(src_vol2)
src_vols.append(src_vol3)
src_vols.append(src_vol1)
i = 0
for vol in volumes:
src_vol = src_vols[i]
i += 1
self.assertNotEqual(vol['source_volid'], src_vol['id'])
sorted_src_vols = self.volume._sort_source_vols(volumes, src_vols)
i = 0
for vol in volumes:
src_vol = sorted_src_vols[i]
i += 1
self.assertEqual(vol['source_volid'], src_vol['id'])
src_vols[2]['id'] = '9999'
self.assertRaises(exception.VolumeNotFound,
self.volume._sort_source_vols,
volumes, src_vols)
self.assertRaises(exception.InvalidInput,
self.volume._sort_source_vols,
volumes, [])
def _create_group_snapshot(self, group_id, volume_ids, size='0'):
"""Create a group_snapshot object."""
grpsnap = objects.GroupSnapshot(self.context)
grpsnap.user_id = fake.USER_ID
grpsnap.project_id = fake.PROJECT_ID
grpsnap.group_id = group_id
grpsnap.status = fields.GroupStatus.CREATING
grpsnap.create()
# Create snapshot list
for volume_id in volume_ids:
snaps = []
snap = objects.Snapshot(context.get_admin_context())
snap.volume_size = size
snap.user_id = fake.USER_ID
snap.project_id = fake.PROJECT_ID
snap.volume_id = volume_id
snap.status = fields.SnapshotStatus.AVAILABLE
snap.group_snapshot_id = grpsnap.id
snap.create()
snaps.append(snap)
return grpsnap, snaps
@mock.patch('cinder.tests.unit.fake_notifier.FakeNotifier._notify')
@mock.patch('cinder.volume.driver.VolumeDriver.create_group',
autospec=True,
return_value={'status': 'available'})
@mock.patch('cinder.volume.driver.VolumeDriver.delete_group',
autospec=True,
return_value=({'status': 'deleted'}, []))
@mock.patch('cinder.volume.driver.VolumeDriver.create_group_snapshot',
autospec=True,
return_value=({'status': 'available'}, []))
@mock.patch('cinder.volume.driver.VolumeDriver.delete_group_snapshot',
autospec=True,
return_value=({'status': 'deleted'}, []))
def test_create_delete_group_snapshot(self,
mock_del_grpsnap,
mock_create_grpsnap,
mock_del_grp,
_mock_create_grp,
mock_notify):
"""Test group_snapshot can be created and deleted."""
group = tests_utils.create_group(
self.context,
availability_zone=CONF.storage_availability_zone,
volume_type_ids=[fake.VOLUME_TYPE_ID],
group_type_id=fake.GROUP_TYPE_ID,
host=CONF.host)
volume = tests_utils.create_volume(
self.context,
group_id=group.id,
host=group.host,
volume_type_id=fake.VOLUME_TYPE_ID)
self.volume.create_volume(self.context, volume)
self.assert_notify_called(mock_notify,
(['INFO', 'volume.create.start'],
['INFO', 'volume.create.end']))
group_snapshot_returns = self._create_group_snapshot(group.id,
[volume.id])
group_snapshot = group_snapshot_returns[0]
self.volume.create_group_snapshot(self.context, group_snapshot)
self.assertEqual(group_snapshot.id,
objects.GroupSnapshot.get_by_id(
context.get_admin_context(),
group_snapshot.id).id)
self.assert_notify_called(mock_notify,
(['INFO', 'volume.create.start'],
['INFO', 'volume.create.end'],
['INFO', 'group_snapshot.create.start'],
['INFO', 'snapshot.create.start'],
['INFO', 'group_snapshot.create.end'],
['INFO', 'snapshot.create.end']))
self.volume.delete_group_snapshot(self.context, group_snapshot)
self.assert_notify_called(mock_notify,
(['INFO', 'volume.create.start'],
['INFO', 'volume.create.end'],
['INFO', 'group_snapshot.create.start'],
['INFO', 'snapshot.create.start'],
['INFO', 'group_snapshot.create.end'],
['INFO', 'snapshot.create.end'],
['INFO', 'group_snapshot.delete.start'],
['INFO', 'snapshot.delete.start'],
['INFO', 'group_snapshot.delete.end'],
['INFO', 'snapshot.delete.end']))
grpsnap = objects.GroupSnapshot.get_by_id(
context.get_admin_context(read_deleted='yes'),
group_snapshot.id)
self.assertEqual('deleted', grpsnap.status)
self.assertRaises(exception.NotFound,
objects.GroupSnapshot.get_by_id,
self.context,
group_snapshot.id)
self.volume.delete_group(self.context, group)
self.assertTrue(mock_create_grpsnap.called)
self.assertTrue(mock_del_grpsnap.called)
self.assertTrue(mock_del_grp.called)
@mock.patch('cinder.volume.driver.VolumeDriver.create_group',
return_value={'status': 'available'})
@mock.patch('cinder.volume.driver.VolumeDriver.delete_group',
return_value=({'status': 'deleted'}, []))
def test_delete_group_correct_host(self,
mock_del_grp,
_mock_create_grp):
"""Test group can be deleted.
Test group can be deleted when volumes are on
the correct volume node.
"""
group = tests_utils.create_group(
self.context,
availability_zone=CONF.storage_availability_zone,
volume_type_ids=[fake.VOLUME_TYPE_ID],
group_type_id=fake.GROUP_TYPE_ID)
volume = tests_utils.create_volume(
self.context,
group_id=group.id,
host='host1@backend1#pool1',
status='creating',
volume_type_id=fake.VOLUME_TYPE_ID,
size=1)
self.volume.host = 'host1@backend1'
self.volume.create_volume(self.context, volume)
self.volume.delete_group(self.context, group)
grp = objects.Group.get_by_id(
context.get_admin_context(read_deleted='yes'),
group.id)
self.assertEqual(fields.GroupStatus.DELETED, grp.status)
self.assertRaises(exception.NotFound,
objects.Group.get_by_id,
self.context,
group.id)
self.assertTrue(mock_del_grp.called)
@mock.patch('cinder.volume.driver.VolumeDriver.create_group',
return_value={'status': 'available'})
def test_delete_group_wrong_host(self, *_mock_create_grp):
"""Test group cannot be deleted.
Test group cannot be deleted when volumes in the
group are not local to the volume node.
"""
group = tests_utils.create_group(
self.context,
availability_zone=CONF.storage_availability_zone,
volume_type_ids=[fake.VOLUME_TYPE_ID],
group_type_id=fake.GROUP_TYPE_ID)
volume = tests_utils.create_volume(
self.context,
group_id=group.id,
host='host1@backend1#pool1',
status='creating',
volume_type_id=fake.VOLUME_TYPE_ID,
size=1)
self.volume.host = 'host1@backend2'
self.volume.create_volume(self.context, volume)
self.assertRaises(exception.InvalidVolume,
self.volume.delete_group,
self.context,
group)
grp = objects.Group.get_by_id(self.context, group.id)
# Group is not deleted
self.assertEqual(fields.GroupStatus.AVAILABLE, grp.status)
def test_create_volume_with_group_invalid_type(self):
"""Test volume creation with group & invalid volume type."""
vol_type = db.volume_type_create(
context.get_admin_context(),
dict(name=conf_fixture.def_vol_type, extra_specs={})
)
db_vol_type = db.volume_type_get(context.get_admin_context(),
vol_type.id)
grp = tests_utils.create_group(
self.context,
availability_zone=CONF.storage_availability_zone,
status=fields.GroupStatus.AVAILABLE,
volume_type_ids=[db_vol_type['id']],
group_type_id=fake.GROUP_TYPE_ID,
host=CONF.host)
fake_type = {
'id': '9999',
'name': 'fake',
}
# Volume type must be provided when creating a volume in a
# group.
self.assertRaises(exception.InvalidInput,
self.volume_api.create,
self.context, 1, 'vol1', 'volume 1',
group=grp)
# Volume type must be valid.
self.assertRaises(exception.InvalidInput,
self.volume_api.create,
self.context, 1, 'vol1', 'volume 1',
volume_type=fake_type,
group=grp)
@mock.patch('cinder.volume.driver.VolumeDriver.create_group_snapshot',
autospec=True,
return_value=({'status': 'available'}, []))
def test_create_group_snapshot_with_bootable_volumes(self,
mock_create_grpsnap):
"""Test group_snapshot can be created and deleted."""
group = tests_utils.create_group(
self.context,
availability_zone=CONF.storage_availability_zone,
volume_type_ids=[fake.VOLUME_TYPE_ID],
group_type_id=fake.GROUP_TYPE_ID,
host=CONF.host)
volume = tests_utils.create_volume(
self.context,
group_id=group.id,
host=group.host,
volume_type_id=fake.VOLUME_TYPE_ID)
self.volume.create_volume(self.context, volume)
# Create a bootable volume
bootable_vol_params = {'status': 'creating', 'host': CONF.host,
'size': 1, 'bootable': True}
bootable_vol = tests_utils.create_volume(self.context,
group_id=group.id,
**bootable_vol_params)
# Create a common volume
self.volume.create_volume(self.context, bootable_vol)
volume_ids = [volume.id, bootable_vol.id]
group_snapshot_returns = self._create_group_snapshot(group.id,
volume_ids)
group_snapshot = group_snapshot_returns[0]
self.volume.create_group_snapshot(self.context, group_snapshot)
self.assertEqual(group_snapshot.id,
objects.GroupSnapshot.get_by_id(
context.get_admin_context(),
group_snapshot.id).id)
self.assertTrue(mock_create_grpsnap.called)
| apache-2.0 | 4,636,621,172,082,472,000 | 42.037241 | 79 | 0.547882 | false |
CTSRD-SOAAP/chromium-42.0.2311.135 | build/android/pylib/valgrind_tools.py | 40 | 9131 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Classes in this file define additional actions that need to be taken to run a
test under some kind of runtime error detection tool.
The interface is intended to be used as follows.
1. For tests that simply run a native process (i.e. no activity is spawned):
Call tool.CopyFiles(device).
Prepend test command line with tool.GetTestWrapper().
2. For tests that spawn an activity:
Call tool.CopyFiles(device).
Call tool.SetupEnvironment().
Run the test as usual.
Call tool.CleanUpEnvironment().
"""
# pylint: disable=R0201
import glob
import logging
import os.path
import subprocess
import sys
from pylib.constants import DIR_SOURCE_ROOT
from pylib.device import device_errors
def SetChromeTimeoutScale(device, scale):
"""Sets the timeout scale in /data/local/tmp/chrome_timeout_scale to scale."""
path = '/data/local/tmp/chrome_timeout_scale'
if not scale or scale == 1.0:
# Delete if scale is None/0.0/1.0 since the default timeout scale is 1.0
device.RunShellCommand('rm %s' % path)
else:
device.WriteFile(path, '%f' % scale, as_root=True)
class BaseTool(object):
"""A tool that does nothing."""
def __init__(self):
"""Does nothing."""
pass
def GetTestWrapper(self):
"""Returns a string that is to be prepended to the test command line."""
return ''
def GetUtilWrapper(self):
"""Returns the wrapper name for the utilities.
Returns:
A string that is to be prepended to the command line of utility
processes (forwarder, etc.).
"""
return ''
@classmethod
def CopyFiles(cls, device):
"""Copies tool-specific files to the device, create directories, etc."""
pass
def SetupEnvironment(self):
"""Sets up the system environment for a test.
This is a good place to set system properties.
"""
pass
def CleanUpEnvironment(self):
"""Cleans up environment."""
pass
def GetTimeoutScale(self):
"""Returns a multiplier that should be applied to timeout values."""
return 1.0
def NeedsDebugInfo(self):
"""Whether this tool requires debug info.
Returns:
True if this tool can not work with stripped binaries.
"""
return False
class AddressSanitizerTool(BaseTool):
"""AddressSanitizer tool."""
WRAPPER_NAME = '/system/bin/asanwrapper'
# Disable memcmp overlap check.There are blobs (gl drivers)
# on some android devices that use memcmp on overlapping regions,
# nothing we can do about that.
EXTRA_OPTIONS = 'strict_memcmp=0,use_sigaltstack=1'
def __init__(self, device):
super(AddressSanitizerTool, self).__init__()
self._device = device
# Configure AndroidCommands to run utils (such as md5sum_bin) under ASan.
# This is required because ASan is a compiler-based tool, and md5sum
# includes instrumented code from base.
device.old_interface.SetUtilWrapper(self.GetUtilWrapper())
@classmethod
def CopyFiles(cls, device):
"""Copies ASan tools to the device."""
libs = glob.glob(os.path.join(DIR_SOURCE_ROOT,
'third_party/llvm-build/Release+Asserts/',
'lib/clang/*/lib/linux/',
'libclang_rt.asan-arm-android.so'))
assert len(libs) == 1
subprocess.call(
[os.path.join(
DIR_SOURCE_ROOT,
'tools/android/asan/third_party/asan_device_setup.sh'),
'--device', str(device),
'--lib', libs[0],
'--extra-options', AddressSanitizerTool.EXTRA_OPTIONS])
device.WaitUntilFullyBooted()
def GetTestWrapper(self):
return AddressSanitizerTool.WRAPPER_NAME
def GetUtilWrapper(self):
"""Returns the wrapper for utilities, such as forwarder.
AddressSanitizer wrapper must be added to all instrumented binaries,
including forwarder and the like. This can be removed if such binaries
were built without instrumentation. """
return self.GetTestWrapper()
def SetupEnvironment(self):
try:
self._device.EnableRoot()
except device_errors.CommandFailedError as e:
# Try to set the timeout scale anyway.
# TODO(jbudorick) Handle this exception appropriately after interface
# conversions are finished.
logging.error(str(e))
SetChromeTimeoutScale(self._device, self.GetTimeoutScale())
def CleanUpEnvironment(self):
SetChromeTimeoutScale(self._device, None)
def GetTimeoutScale(self):
# Very slow startup.
return 20.0
class ValgrindTool(BaseTool):
"""Base abstract class for Valgrind tools."""
VG_DIR = '/data/local/tmp/valgrind'
VGLOGS_DIR = '/data/local/tmp/vglogs'
def __init__(self, device):
super(ValgrindTool, self).__init__()
self._device = device
# exactly 31 chars, SystemProperties::PROP_NAME_MAX
self._wrap_properties = ['wrap.com.google.android.apps.ch',
'wrap.org.chromium.native_test']
@classmethod
def CopyFiles(cls, device):
"""Copies Valgrind tools to the device."""
device.RunShellCommand(
'rm -r %s; mkdir %s' % (ValgrindTool.VG_DIR, ValgrindTool.VG_DIR))
device.RunShellCommand(
'rm -r %s; mkdir %s' % (ValgrindTool.VGLOGS_DIR,
ValgrindTool.VGLOGS_DIR))
files = cls.GetFilesForTool()
device.PushChangedFiles(
[((os.path.join(DIR_SOURCE_ROOT, f),
os.path.join(ValgrindTool.VG_DIR, os.path.basename(f)))
for f in files)])
def SetupEnvironment(self):
"""Sets up device environment."""
self._device.RunShellCommand('chmod 777 /data/local/tmp')
self._device.RunShellCommand('setenforce 0')
for prop in self._wrap_properties:
self._device.RunShellCommand(
'setprop %s "logwrapper %s"' % (prop, self.GetTestWrapper()))
SetChromeTimeoutScale(self._device, self.GetTimeoutScale())
def CleanUpEnvironment(self):
"""Cleans up device environment."""
for prop in self._wrap_properties:
self._device.RunShellCommand('setprop %s ""' % (prop,))
SetChromeTimeoutScale(self._device, None)
@staticmethod
def GetFilesForTool():
"""Returns a list of file names for the tool."""
raise NotImplementedError()
def NeedsDebugInfo(self):
"""Whether this tool requires debug info.
Returns:
True if this tool can not work with stripped binaries.
"""
return True
class MemcheckTool(ValgrindTool):
"""Memcheck tool."""
def __init__(self, device):
super(MemcheckTool, self).__init__(device)
@staticmethod
def GetFilesForTool():
"""Returns a list of file names for the tool."""
return ['tools/valgrind/android/vg-chrome-wrapper.sh',
'tools/valgrind/memcheck/suppressions.txt',
'tools/valgrind/memcheck/suppressions_android.txt']
def GetTestWrapper(self):
"""Returns a string that is to be prepended to the test command line."""
return ValgrindTool.VG_DIR + '/' + 'vg-chrome-wrapper.sh'
def GetTimeoutScale(self):
"""Returns a multiplier that should be applied to timeout values."""
return 30
class TSanTool(ValgrindTool):
"""ThreadSanitizer tool. See http://code.google.com/p/data-race-test ."""
def __init__(self, device):
super(TSanTool, self).__init__(device)
@staticmethod
def GetFilesForTool():
"""Returns a list of file names for the tool."""
return ['tools/valgrind/android/vg-chrome-wrapper-tsan.sh',
'tools/valgrind/tsan/suppressions.txt',
'tools/valgrind/tsan/suppressions_android.txt',
'tools/valgrind/tsan/ignores.txt']
def GetTestWrapper(self):
"""Returns a string that is to be prepended to the test command line."""
return ValgrindTool.VG_DIR + '/' + 'vg-chrome-wrapper-tsan.sh'
def GetTimeoutScale(self):
"""Returns a multiplier that should be applied to timeout values."""
return 30.0
TOOL_REGISTRY = {
'memcheck': MemcheckTool,
'memcheck-renderer': MemcheckTool,
'tsan': TSanTool,
'tsan-renderer': TSanTool,
'asan': AddressSanitizerTool,
}
def CreateTool(tool_name, device):
"""Creates a tool with the specified tool name.
Args:
tool_name: Name of the tool to create.
device: A DeviceUtils instance.
Returns:
A tool for the specified tool_name.
"""
if not tool_name:
return BaseTool()
ctor = TOOL_REGISTRY.get(tool_name)
if ctor:
return ctor(device)
else:
print 'Unknown tool %s, available tools: %s' % (
tool_name, ', '.join(sorted(TOOL_REGISTRY.keys())))
sys.exit(1)
def PushFilesForTool(tool_name, device):
"""Pushes the files required for |tool_name| to |device|.
Args:
tool_name: Name of the tool to create.
device: A DeviceUtils instance.
"""
if not tool_name:
return
clazz = TOOL_REGISTRY.get(tool_name)
if clazz:
clazz.CopyFiles(device)
else:
print 'Unknown tool %s, available tools: %s' % (
tool_name, ', '.join(sorted(TOOL_REGISTRY.keys())))
sys.exit(1)
| bsd-3-clause | -5,759,298,061,306,272,000 | 29.036184 | 80 | 0.668601 | false |
globau/servo | tests/wpt/web-platform-tests/tools/pywebsocket/src/test/testdata/handlers/blank_wsh.py | 499 | 1557 | # Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# intentionally left blank
| mpl-2.0 | 5,047,748,141,726,140,000 | 49.225806 | 72 | 0.782274 | false |
wbc2010/django1.2.5 | django/core/mail/backends/filebased.py | 394 | 2485 | """Email backend that writes messages to a file."""
import datetime
import os
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.mail.backends.console import EmailBackend as ConsoleEmailBackend
class EmailBackend(ConsoleEmailBackend):
def __init__(self, *args, **kwargs):
self._fname = None
if 'file_path' in kwargs:
self.file_path = kwargs.pop('file_path')
else:
self.file_path = getattr(settings, 'EMAIL_FILE_PATH',None)
# Make sure self.file_path is a string.
if not isinstance(self.file_path, basestring):
raise ImproperlyConfigured('Path for saving emails is invalid: %r' % self.file_path)
self.file_path = os.path.abspath(self.file_path)
# Make sure that self.file_path is an directory if it exists.
if os.path.exists(self.file_path) and not os.path.isdir(self.file_path):
raise ImproperlyConfigured('Path for saving email messages exists, but is not a directory: %s' % self.file_path)
# Try to create it, if it not exists.
elif not os.path.exists(self.file_path):
try:
os.makedirs(self.file_path)
except OSError, err:
raise ImproperlyConfigured('Could not create directory for saving email messages: %s (%s)' % (self.file_path, err))
# Make sure that self.file_path is writable.
if not os.access(self.file_path, os.W_OK):
raise ImproperlyConfigured('Could not write to directory: %s' % self.file_path)
# Finally, call super().
# Since we're using the console-based backend as a base,
# force the stream to be None, so we don't default to stdout
kwargs['stream'] = None
super(EmailBackend, self).__init__(*args, **kwargs)
def _get_filename(self):
"""Return a unique file name."""
if self._fname is None:
timestamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
fname = "%s-%s.log" % (timestamp, abs(id(self)))
self._fname = os.path.join(self.file_path, fname)
return self._fname
def open(self):
if self.stream is None:
self.stream = open(self._get_filename(), 'a')
return True
return False
def close(self):
try:
if self.stream is not None:
self.stream.close()
finally:
self.stream = None
| bsd-3-clause | -7,158,368,009,309,340,000 | 41.118644 | 131 | 0.614889 | false |
brainelectronics/towerdefense | _build/lib/pyglet/gl/glu.py | 45 | 25679 | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Wrapper for /usr/include/GL/glu.h
Generated by tools/gengl.py.
Do not modify this file.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
from ctypes import *
from pyglet.gl.lib import link_GLU as _link_function
from pyglet.gl.lib import c_ptrdiff_t
# BEGIN GENERATED CONTENT (do not edit below this line)
# This content is generated by tools/gengl.py.
# Wrapper for /usr/include/GL/glu.h
GLU_EXT_object_space_tess = 1 # /usr/include/GL/glu.h:71
GLU_EXT_nurbs_tessellator = 1 # /usr/include/GL/glu.h:72
GLU_FALSE = 0 # /usr/include/GL/glu.h:75
GLU_TRUE = 1 # /usr/include/GL/glu.h:76
GLU_VERSION_1_1 = 1 # /usr/include/GL/glu.h:79
GLU_VERSION_1_2 = 1 # /usr/include/GL/glu.h:80
GLU_VERSION_1_3 = 1 # /usr/include/GL/glu.h:81
GLU_VERSION = 100800 # /usr/include/GL/glu.h:84
GLU_EXTENSIONS = 100801 # /usr/include/GL/glu.h:85
GLU_INVALID_ENUM = 100900 # /usr/include/GL/glu.h:88
GLU_INVALID_VALUE = 100901 # /usr/include/GL/glu.h:89
GLU_OUT_OF_MEMORY = 100902 # /usr/include/GL/glu.h:90
GLU_INCOMPATIBLE_GL_VERSION = 100903 # /usr/include/GL/glu.h:91
GLU_INVALID_OPERATION = 100904 # /usr/include/GL/glu.h:92
GLU_OUTLINE_POLYGON = 100240 # /usr/include/GL/glu.h:96
GLU_OUTLINE_PATCH = 100241 # /usr/include/GL/glu.h:97
GLU_NURBS_ERROR = 100103 # /usr/include/GL/glu.h:100
GLU_ERROR = 100103 # /usr/include/GL/glu.h:101
GLU_NURBS_BEGIN = 100164 # /usr/include/GL/glu.h:102
GLU_NURBS_BEGIN_EXT = 100164 # /usr/include/GL/glu.h:103
GLU_NURBS_VERTEX = 100165 # /usr/include/GL/glu.h:104
GLU_NURBS_VERTEX_EXT = 100165 # /usr/include/GL/glu.h:105
GLU_NURBS_NORMAL = 100166 # /usr/include/GL/glu.h:106
GLU_NURBS_NORMAL_EXT = 100166 # /usr/include/GL/glu.h:107
GLU_NURBS_COLOR = 100167 # /usr/include/GL/glu.h:108
GLU_NURBS_COLOR_EXT = 100167 # /usr/include/GL/glu.h:109
GLU_NURBS_TEXTURE_COORD = 100168 # /usr/include/GL/glu.h:110
GLU_NURBS_TEX_COORD_EXT = 100168 # /usr/include/GL/glu.h:111
GLU_NURBS_END = 100169 # /usr/include/GL/glu.h:112
GLU_NURBS_END_EXT = 100169 # /usr/include/GL/glu.h:113
GLU_NURBS_BEGIN_DATA = 100170 # /usr/include/GL/glu.h:114
GLU_NURBS_BEGIN_DATA_EXT = 100170 # /usr/include/GL/glu.h:115
GLU_NURBS_VERTEX_DATA = 100171 # /usr/include/GL/glu.h:116
GLU_NURBS_VERTEX_DATA_EXT = 100171 # /usr/include/GL/glu.h:117
GLU_NURBS_NORMAL_DATA = 100172 # /usr/include/GL/glu.h:118
GLU_NURBS_NORMAL_DATA_EXT = 100172 # /usr/include/GL/glu.h:119
GLU_NURBS_COLOR_DATA = 100173 # /usr/include/GL/glu.h:120
GLU_NURBS_COLOR_DATA_EXT = 100173 # /usr/include/GL/glu.h:121
GLU_NURBS_TEXTURE_COORD_DATA = 100174 # /usr/include/GL/glu.h:122
GLU_NURBS_TEX_COORD_DATA_EXT = 100174 # /usr/include/GL/glu.h:123
GLU_NURBS_END_DATA = 100175 # /usr/include/GL/glu.h:124
GLU_NURBS_END_DATA_EXT = 100175 # /usr/include/GL/glu.h:125
GLU_NURBS_ERROR1 = 100251 # /usr/include/GL/glu.h:128
GLU_NURBS_ERROR2 = 100252 # /usr/include/GL/glu.h:129
GLU_NURBS_ERROR3 = 100253 # /usr/include/GL/glu.h:130
GLU_NURBS_ERROR4 = 100254 # /usr/include/GL/glu.h:131
GLU_NURBS_ERROR5 = 100255 # /usr/include/GL/glu.h:132
GLU_NURBS_ERROR6 = 100256 # /usr/include/GL/glu.h:133
GLU_NURBS_ERROR7 = 100257 # /usr/include/GL/glu.h:134
GLU_NURBS_ERROR8 = 100258 # /usr/include/GL/glu.h:135
GLU_NURBS_ERROR9 = 100259 # /usr/include/GL/glu.h:136
GLU_NURBS_ERROR10 = 100260 # /usr/include/GL/glu.h:137
GLU_NURBS_ERROR11 = 100261 # /usr/include/GL/glu.h:138
GLU_NURBS_ERROR12 = 100262 # /usr/include/GL/glu.h:139
GLU_NURBS_ERROR13 = 100263 # /usr/include/GL/glu.h:140
GLU_NURBS_ERROR14 = 100264 # /usr/include/GL/glu.h:141
GLU_NURBS_ERROR15 = 100265 # /usr/include/GL/glu.h:142
GLU_NURBS_ERROR16 = 100266 # /usr/include/GL/glu.h:143
GLU_NURBS_ERROR17 = 100267 # /usr/include/GL/glu.h:144
GLU_NURBS_ERROR18 = 100268 # /usr/include/GL/glu.h:145
GLU_NURBS_ERROR19 = 100269 # /usr/include/GL/glu.h:146
GLU_NURBS_ERROR20 = 100270 # /usr/include/GL/glu.h:147
GLU_NURBS_ERROR21 = 100271 # /usr/include/GL/glu.h:148
GLU_NURBS_ERROR22 = 100272 # /usr/include/GL/glu.h:149
GLU_NURBS_ERROR23 = 100273 # /usr/include/GL/glu.h:150
GLU_NURBS_ERROR24 = 100274 # /usr/include/GL/glu.h:151
GLU_NURBS_ERROR25 = 100275 # /usr/include/GL/glu.h:152
GLU_NURBS_ERROR26 = 100276 # /usr/include/GL/glu.h:153
GLU_NURBS_ERROR27 = 100277 # /usr/include/GL/glu.h:154
GLU_NURBS_ERROR28 = 100278 # /usr/include/GL/glu.h:155
GLU_NURBS_ERROR29 = 100279 # /usr/include/GL/glu.h:156
GLU_NURBS_ERROR30 = 100280 # /usr/include/GL/glu.h:157
GLU_NURBS_ERROR31 = 100281 # /usr/include/GL/glu.h:158
GLU_NURBS_ERROR32 = 100282 # /usr/include/GL/glu.h:159
GLU_NURBS_ERROR33 = 100283 # /usr/include/GL/glu.h:160
GLU_NURBS_ERROR34 = 100284 # /usr/include/GL/glu.h:161
GLU_NURBS_ERROR35 = 100285 # /usr/include/GL/glu.h:162
GLU_NURBS_ERROR36 = 100286 # /usr/include/GL/glu.h:163
GLU_NURBS_ERROR37 = 100287 # /usr/include/GL/glu.h:164
GLU_AUTO_LOAD_MATRIX = 100200 # /usr/include/GL/glu.h:167
GLU_CULLING = 100201 # /usr/include/GL/glu.h:168
GLU_SAMPLING_TOLERANCE = 100203 # /usr/include/GL/glu.h:169
GLU_DISPLAY_MODE = 100204 # /usr/include/GL/glu.h:170
GLU_PARAMETRIC_TOLERANCE = 100202 # /usr/include/GL/glu.h:171
GLU_SAMPLING_METHOD = 100205 # /usr/include/GL/glu.h:172
GLU_U_STEP = 100206 # /usr/include/GL/glu.h:173
GLU_V_STEP = 100207 # /usr/include/GL/glu.h:174
GLU_NURBS_MODE = 100160 # /usr/include/GL/glu.h:175
GLU_NURBS_MODE_EXT = 100160 # /usr/include/GL/glu.h:176
GLU_NURBS_TESSELLATOR = 100161 # /usr/include/GL/glu.h:177
GLU_NURBS_TESSELLATOR_EXT = 100161 # /usr/include/GL/glu.h:178
GLU_NURBS_RENDERER = 100162 # /usr/include/GL/glu.h:179
GLU_NURBS_RENDERER_EXT = 100162 # /usr/include/GL/glu.h:180
GLU_OBJECT_PARAMETRIC_ERROR = 100208 # /usr/include/GL/glu.h:183
GLU_OBJECT_PARAMETRIC_ERROR_EXT = 100208 # /usr/include/GL/glu.h:184
GLU_OBJECT_PATH_LENGTH = 100209 # /usr/include/GL/glu.h:185
GLU_OBJECT_PATH_LENGTH_EXT = 100209 # /usr/include/GL/glu.h:186
GLU_PATH_LENGTH = 100215 # /usr/include/GL/glu.h:187
GLU_PARAMETRIC_ERROR = 100216 # /usr/include/GL/glu.h:188
GLU_DOMAIN_DISTANCE = 100217 # /usr/include/GL/glu.h:189
GLU_MAP1_TRIM_2 = 100210 # /usr/include/GL/glu.h:192
GLU_MAP1_TRIM_3 = 100211 # /usr/include/GL/glu.h:193
GLU_POINT = 100010 # /usr/include/GL/glu.h:196
GLU_LINE = 100011 # /usr/include/GL/glu.h:197
GLU_FILL = 100012 # /usr/include/GL/glu.h:198
GLU_SILHOUETTE = 100013 # /usr/include/GL/glu.h:199
GLU_SMOOTH = 100000 # /usr/include/GL/glu.h:205
GLU_FLAT = 100001 # /usr/include/GL/glu.h:206
GLU_NONE = 100002 # /usr/include/GL/glu.h:207
GLU_OUTSIDE = 100020 # /usr/include/GL/glu.h:210
GLU_INSIDE = 100021 # /usr/include/GL/glu.h:211
GLU_TESS_BEGIN = 100100 # /usr/include/GL/glu.h:214
GLU_BEGIN = 100100 # /usr/include/GL/glu.h:215
GLU_TESS_VERTEX = 100101 # /usr/include/GL/glu.h:216
GLU_VERTEX = 100101 # /usr/include/GL/glu.h:217
GLU_TESS_END = 100102 # /usr/include/GL/glu.h:218
GLU_END = 100102 # /usr/include/GL/glu.h:219
GLU_TESS_ERROR = 100103 # /usr/include/GL/glu.h:220
GLU_TESS_EDGE_FLAG = 100104 # /usr/include/GL/glu.h:221
GLU_EDGE_FLAG = 100104 # /usr/include/GL/glu.h:222
GLU_TESS_COMBINE = 100105 # /usr/include/GL/glu.h:223
GLU_TESS_BEGIN_DATA = 100106 # /usr/include/GL/glu.h:224
GLU_TESS_VERTEX_DATA = 100107 # /usr/include/GL/glu.h:225
GLU_TESS_END_DATA = 100108 # /usr/include/GL/glu.h:226
GLU_TESS_ERROR_DATA = 100109 # /usr/include/GL/glu.h:227
GLU_TESS_EDGE_FLAG_DATA = 100110 # /usr/include/GL/glu.h:228
GLU_TESS_COMBINE_DATA = 100111 # /usr/include/GL/glu.h:229
GLU_CW = 100120 # /usr/include/GL/glu.h:232
GLU_CCW = 100121 # /usr/include/GL/glu.h:233
GLU_INTERIOR = 100122 # /usr/include/GL/glu.h:234
GLU_EXTERIOR = 100123 # /usr/include/GL/glu.h:235
GLU_UNKNOWN = 100124 # /usr/include/GL/glu.h:236
GLU_TESS_WINDING_RULE = 100140 # /usr/include/GL/glu.h:239
GLU_TESS_BOUNDARY_ONLY = 100141 # /usr/include/GL/glu.h:240
GLU_TESS_TOLERANCE = 100142 # /usr/include/GL/glu.h:241
GLU_TESS_ERROR1 = 100151 # /usr/include/GL/glu.h:244
GLU_TESS_ERROR2 = 100152 # /usr/include/GL/glu.h:245
GLU_TESS_ERROR3 = 100153 # /usr/include/GL/glu.h:246
GLU_TESS_ERROR4 = 100154 # /usr/include/GL/glu.h:247
GLU_TESS_ERROR5 = 100155 # /usr/include/GL/glu.h:248
GLU_TESS_ERROR6 = 100156 # /usr/include/GL/glu.h:249
GLU_TESS_ERROR7 = 100157 # /usr/include/GL/glu.h:250
GLU_TESS_ERROR8 = 100158 # /usr/include/GL/glu.h:251
GLU_TESS_MISSING_BEGIN_POLYGON = 100151 # /usr/include/GL/glu.h:252
GLU_TESS_MISSING_BEGIN_CONTOUR = 100152 # /usr/include/GL/glu.h:253
GLU_TESS_MISSING_END_POLYGON = 100153 # /usr/include/GL/glu.h:254
GLU_TESS_MISSING_END_CONTOUR = 100154 # /usr/include/GL/glu.h:255
GLU_TESS_COORD_TOO_LARGE = 100155 # /usr/include/GL/glu.h:256
GLU_TESS_NEED_COMBINE_CALLBACK = 100156 # /usr/include/GL/glu.h:257
GLU_TESS_WINDING_ODD = 100130 # /usr/include/GL/glu.h:260
GLU_TESS_WINDING_NONZERO = 100131 # /usr/include/GL/glu.h:261
GLU_TESS_WINDING_POSITIVE = 100132 # /usr/include/GL/glu.h:262
GLU_TESS_WINDING_NEGATIVE = 100133 # /usr/include/GL/glu.h:263
GLU_TESS_WINDING_ABS_GEQ_TWO = 100134 # /usr/include/GL/glu.h:264
class struct_GLUnurbs(Structure):
__slots__ = [
]
struct_GLUnurbs._fields_ = [
('_opaque_struct', c_int)
]
class struct_GLUnurbs(Structure):
__slots__ = [
]
struct_GLUnurbs._fields_ = [
('_opaque_struct', c_int)
]
GLUnurbs = struct_GLUnurbs # /usr/include/GL/glu.h:274
class struct_GLUquadric(Structure):
__slots__ = [
]
struct_GLUquadric._fields_ = [
('_opaque_struct', c_int)
]
class struct_GLUquadric(Structure):
__slots__ = [
]
struct_GLUquadric._fields_ = [
('_opaque_struct', c_int)
]
GLUquadric = struct_GLUquadric # /usr/include/GL/glu.h:275
class struct_GLUtesselator(Structure):
__slots__ = [
]
struct_GLUtesselator._fields_ = [
('_opaque_struct', c_int)
]
class struct_GLUtesselator(Structure):
__slots__ = [
]
struct_GLUtesselator._fields_ = [
('_opaque_struct', c_int)
]
GLUtesselator = struct_GLUtesselator # /usr/include/GL/glu.h:276
GLUnurbsObj = GLUnurbs # /usr/include/GL/glu.h:279
GLUquadricObj = GLUquadric # /usr/include/GL/glu.h:280
GLUtesselatorObj = GLUtesselator # /usr/include/GL/glu.h:281
GLUtriangulatorObj = GLUtesselator # /usr/include/GL/glu.h:282
GLU_TESS_MAX_COORD = 9.9999999999999998e+149 # /usr/include/GL/glu.h:284
_GLUfuncptr = CFUNCTYPE(None) # /usr/include/GL/glu.h:287
# /usr/include/GL/glu.h:289
gluBeginCurve = _link_function('gluBeginCurve', None, [POINTER(GLUnurbs)], None)
# /usr/include/GL/glu.h:290
gluBeginPolygon = _link_function('gluBeginPolygon', None, [POINTER(GLUtesselator)], None)
# /usr/include/GL/glu.h:291
gluBeginSurface = _link_function('gluBeginSurface', None, [POINTER(GLUnurbs)], None)
# /usr/include/GL/glu.h:292
gluBeginTrim = _link_function('gluBeginTrim', None, [POINTER(GLUnurbs)], None)
GLint = c_int # /usr/include/GL/gl.h:159
GLenum = c_uint # /usr/include/GL/gl.h:153
GLsizei = c_int # /usr/include/GL/gl.h:163
# /usr/include/GL/glu.h:293
gluBuild1DMipmapLevels = _link_function('gluBuild1DMipmapLevels', GLint, [GLenum, GLint, GLsizei, GLenum, GLenum, GLint, GLint, GLint, POINTER(None)], None)
# /usr/include/GL/glu.h:294
gluBuild1DMipmaps = _link_function('gluBuild1DMipmaps', GLint, [GLenum, GLint, GLsizei, GLenum, GLenum, POINTER(None)], None)
# /usr/include/GL/glu.h:295
gluBuild2DMipmapLevels = _link_function('gluBuild2DMipmapLevels', GLint, [GLenum, GLint, GLsizei, GLsizei, GLenum, GLenum, GLint, GLint, GLint, POINTER(None)], None)
# /usr/include/GL/glu.h:296
gluBuild2DMipmaps = _link_function('gluBuild2DMipmaps', GLint, [GLenum, GLint, GLsizei, GLsizei, GLenum, GLenum, POINTER(None)], None)
# /usr/include/GL/glu.h:297
gluBuild3DMipmapLevels = _link_function('gluBuild3DMipmapLevels', GLint, [GLenum, GLint, GLsizei, GLsizei, GLsizei, GLenum, GLenum, GLint, GLint, GLint, POINTER(None)], None)
# /usr/include/GL/glu.h:298
gluBuild3DMipmaps = _link_function('gluBuild3DMipmaps', GLint, [GLenum, GLint, GLsizei, GLsizei, GLsizei, GLenum, GLenum, POINTER(None)], None)
GLboolean = c_ubyte # /usr/include/GL/gl.h:154
GLubyte = c_ubyte # /usr/include/GL/gl.h:160
# /usr/include/GL/glu.h:299
gluCheckExtension = _link_function('gluCheckExtension', GLboolean, [POINTER(GLubyte), POINTER(GLubyte)], None)
GLdouble = c_double # /usr/include/GL/gl.h:166
# /usr/include/GL/glu.h:300
gluCylinder = _link_function('gluCylinder', None, [POINTER(GLUquadric), GLdouble, GLdouble, GLdouble, GLint, GLint], None)
# /usr/include/GL/glu.h:301
gluDeleteNurbsRenderer = _link_function('gluDeleteNurbsRenderer', None, [POINTER(GLUnurbs)], None)
# /usr/include/GL/glu.h:302
gluDeleteQuadric = _link_function('gluDeleteQuadric', None, [POINTER(GLUquadric)], None)
# /usr/include/GL/glu.h:303
gluDeleteTess = _link_function('gluDeleteTess', None, [POINTER(GLUtesselator)], None)
# /usr/include/GL/glu.h:304
gluDisk = _link_function('gluDisk', None, [POINTER(GLUquadric), GLdouble, GLdouble, GLint, GLint], None)
# /usr/include/GL/glu.h:305
gluEndCurve = _link_function('gluEndCurve', None, [POINTER(GLUnurbs)], None)
# /usr/include/GL/glu.h:306
gluEndPolygon = _link_function('gluEndPolygon', None, [POINTER(GLUtesselator)], None)
# /usr/include/GL/glu.h:307
gluEndSurface = _link_function('gluEndSurface', None, [POINTER(GLUnurbs)], None)
# /usr/include/GL/glu.h:308
gluEndTrim = _link_function('gluEndTrim', None, [POINTER(GLUnurbs)], None)
# /usr/include/GL/glu.h:309
gluErrorString = _link_function('gluErrorString', POINTER(GLubyte), [GLenum], None)
GLfloat = c_float # /usr/include/GL/gl.h:164
# /usr/include/GL/glu.h:310
gluGetNurbsProperty = _link_function('gluGetNurbsProperty', None, [POINTER(GLUnurbs), GLenum, POINTER(GLfloat)], None)
# /usr/include/GL/glu.h:311
gluGetString = _link_function('gluGetString', POINTER(GLubyte), [GLenum], None)
# /usr/include/GL/glu.h:312
gluGetTessProperty = _link_function('gluGetTessProperty', None, [POINTER(GLUtesselator), GLenum, POINTER(GLdouble)], None)
# /usr/include/GL/glu.h:313
gluLoadSamplingMatrices = _link_function('gluLoadSamplingMatrices', None, [POINTER(GLUnurbs), POINTER(GLfloat), POINTER(GLfloat), POINTER(GLint)], None)
# /usr/include/GL/glu.h:314
gluLookAt = _link_function('gluLookAt', None, [GLdouble, GLdouble, GLdouble, GLdouble, GLdouble, GLdouble, GLdouble, GLdouble, GLdouble], None)
# /usr/include/GL/glu.h:315
gluNewNurbsRenderer = _link_function('gluNewNurbsRenderer', POINTER(GLUnurbs), [], None)
# /usr/include/GL/glu.h:316
gluNewQuadric = _link_function('gluNewQuadric', POINTER(GLUquadric), [], None)
# /usr/include/GL/glu.h:317
gluNewTess = _link_function('gluNewTess', POINTER(GLUtesselator), [], None)
# /usr/include/GL/glu.h:318
gluNextContour = _link_function('gluNextContour', None, [POINTER(GLUtesselator), GLenum], None)
# /usr/include/GL/glu.h:319
gluNurbsCallback = _link_function('gluNurbsCallback', None, [POINTER(GLUnurbs), GLenum, _GLUfuncptr], None)
GLvoid = None # /usr/include/GL/gl.h:156
# /usr/include/GL/glu.h:320
gluNurbsCallbackData = _link_function('gluNurbsCallbackData', None, [POINTER(GLUnurbs), POINTER(GLvoid)], None)
# /usr/include/GL/glu.h:321
gluNurbsCallbackDataEXT = _link_function('gluNurbsCallbackDataEXT', None, [POINTER(GLUnurbs), POINTER(GLvoid)], None)
# /usr/include/GL/glu.h:322
gluNurbsCurve = _link_function('gluNurbsCurve', None, [POINTER(GLUnurbs), GLint, POINTER(GLfloat), GLint, POINTER(GLfloat), GLint, GLenum], None)
# /usr/include/GL/glu.h:323
gluNurbsProperty = _link_function('gluNurbsProperty', None, [POINTER(GLUnurbs), GLenum, GLfloat], None)
# /usr/include/GL/glu.h:324
gluNurbsSurface = _link_function('gluNurbsSurface', None, [POINTER(GLUnurbs), GLint, POINTER(GLfloat), GLint, POINTER(GLfloat), GLint, GLint, POINTER(GLfloat), GLint, GLint, GLenum], None)
# /usr/include/GL/glu.h:325
gluOrtho2D = _link_function('gluOrtho2D', None, [GLdouble, GLdouble, GLdouble, GLdouble], None)
# /usr/include/GL/glu.h:326
gluPartialDisk = _link_function('gluPartialDisk', None, [POINTER(GLUquadric), GLdouble, GLdouble, GLint, GLint, GLdouble, GLdouble], None)
# /usr/include/GL/glu.h:327
gluPerspective = _link_function('gluPerspective', None, [GLdouble, GLdouble, GLdouble, GLdouble], None)
# /usr/include/GL/glu.h:328
gluPickMatrix = _link_function('gluPickMatrix', None, [GLdouble, GLdouble, GLdouble, GLdouble, POINTER(GLint)], None)
# /usr/include/GL/glu.h:329
gluProject = _link_function('gluProject', GLint, [GLdouble, GLdouble, GLdouble, POINTER(GLdouble), POINTER(GLdouble), POINTER(GLint), POINTER(GLdouble), POINTER(GLdouble), POINTER(GLdouble)], None)
# /usr/include/GL/glu.h:330
gluPwlCurve = _link_function('gluPwlCurve', None, [POINTER(GLUnurbs), GLint, POINTER(GLfloat), GLint, GLenum], None)
# /usr/include/GL/glu.h:331
gluQuadricCallback = _link_function('gluQuadricCallback', None, [POINTER(GLUquadric), GLenum, _GLUfuncptr], None)
# /usr/include/GL/glu.h:332
gluQuadricDrawStyle = _link_function('gluQuadricDrawStyle', None, [POINTER(GLUquadric), GLenum], None)
# /usr/include/GL/glu.h:333
gluQuadricNormals = _link_function('gluQuadricNormals', None, [POINTER(GLUquadric), GLenum], None)
# /usr/include/GL/glu.h:334
gluQuadricOrientation = _link_function('gluQuadricOrientation', None, [POINTER(GLUquadric), GLenum], None)
# /usr/include/GL/glu.h:335
gluQuadricTexture = _link_function('gluQuadricTexture', None, [POINTER(GLUquadric), GLboolean], None)
# /usr/include/GL/glu.h:336
gluScaleImage = _link_function('gluScaleImage', GLint, [GLenum, GLsizei, GLsizei, GLenum, POINTER(None), GLsizei, GLsizei, GLenum, POINTER(GLvoid)], None)
# /usr/include/GL/glu.h:337
gluSphere = _link_function('gluSphere', None, [POINTER(GLUquadric), GLdouble, GLint, GLint], None)
# /usr/include/GL/glu.h:338
gluTessBeginContour = _link_function('gluTessBeginContour', None, [POINTER(GLUtesselator)], None)
# /usr/include/GL/glu.h:339
gluTessBeginPolygon = _link_function('gluTessBeginPolygon', None, [POINTER(GLUtesselator), POINTER(GLvoid)], None)
# /usr/include/GL/glu.h:340
gluTessCallback = _link_function('gluTessCallback', None, [POINTER(GLUtesselator), GLenum, _GLUfuncptr], None)
# /usr/include/GL/glu.h:341
gluTessEndContour = _link_function('gluTessEndContour', None, [POINTER(GLUtesselator)], None)
# /usr/include/GL/glu.h:342
gluTessEndPolygon = _link_function('gluTessEndPolygon', None, [POINTER(GLUtesselator)], None)
# /usr/include/GL/glu.h:343
gluTessNormal = _link_function('gluTessNormal', None, [POINTER(GLUtesselator), GLdouble, GLdouble, GLdouble], None)
# /usr/include/GL/glu.h:344
gluTessProperty = _link_function('gluTessProperty', None, [POINTER(GLUtesselator), GLenum, GLdouble], None)
# /usr/include/GL/glu.h:345
gluTessVertex = _link_function('gluTessVertex', None, [POINTER(GLUtesselator), POINTER(GLdouble), POINTER(GLvoid)], None)
# /usr/include/GL/glu.h:346
gluUnProject = _link_function('gluUnProject', GLint, [GLdouble, GLdouble, GLdouble, POINTER(GLdouble), POINTER(GLdouble), POINTER(GLint), POINTER(GLdouble), POINTER(GLdouble), POINTER(GLdouble)], None)
# /usr/include/GL/glu.h:347
gluUnProject4 = _link_function('gluUnProject4', GLint, [GLdouble, GLdouble, GLdouble, GLdouble, POINTER(GLdouble), POINTER(GLdouble), POINTER(GLint), GLdouble, GLdouble, POINTER(GLdouble), POINTER(GLdouble), POINTER(GLdouble), POINTER(GLdouble)], None)
__all__ = ['GLU_EXT_object_space_tess', 'GLU_EXT_nurbs_tessellator',
'GLU_FALSE', 'GLU_TRUE', 'GLU_VERSION_1_1', 'GLU_VERSION_1_2',
'GLU_VERSION_1_3', 'GLU_VERSION', 'GLU_EXTENSIONS', 'GLU_INVALID_ENUM',
'GLU_INVALID_VALUE', 'GLU_OUT_OF_MEMORY', 'GLU_INCOMPATIBLE_GL_VERSION',
'GLU_INVALID_OPERATION', 'GLU_OUTLINE_POLYGON', 'GLU_OUTLINE_PATCH',
'GLU_NURBS_ERROR', 'GLU_ERROR', 'GLU_NURBS_BEGIN', 'GLU_NURBS_BEGIN_EXT',
'GLU_NURBS_VERTEX', 'GLU_NURBS_VERTEX_EXT', 'GLU_NURBS_NORMAL',
'GLU_NURBS_NORMAL_EXT', 'GLU_NURBS_COLOR', 'GLU_NURBS_COLOR_EXT',
'GLU_NURBS_TEXTURE_COORD', 'GLU_NURBS_TEX_COORD_EXT', 'GLU_NURBS_END',
'GLU_NURBS_END_EXT', 'GLU_NURBS_BEGIN_DATA', 'GLU_NURBS_BEGIN_DATA_EXT',
'GLU_NURBS_VERTEX_DATA', 'GLU_NURBS_VERTEX_DATA_EXT', 'GLU_NURBS_NORMAL_DATA',
'GLU_NURBS_NORMAL_DATA_EXT', 'GLU_NURBS_COLOR_DATA',
'GLU_NURBS_COLOR_DATA_EXT', 'GLU_NURBS_TEXTURE_COORD_DATA',
'GLU_NURBS_TEX_COORD_DATA_EXT', 'GLU_NURBS_END_DATA',
'GLU_NURBS_END_DATA_EXT', 'GLU_NURBS_ERROR1', 'GLU_NURBS_ERROR2',
'GLU_NURBS_ERROR3', 'GLU_NURBS_ERROR4', 'GLU_NURBS_ERROR5',
'GLU_NURBS_ERROR6', 'GLU_NURBS_ERROR7', 'GLU_NURBS_ERROR8',
'GLU_NURBS_ERROR9', 'GLU_NURBS_ERROR10', 'GLU_NURBS_ERROR11',
'GLU_NURBS_ERROR12', 'GLU_NURBS_ERROR13', 'GLU_NURBS_ERROR14',
'GLU_NURBS_ERROR15', 'GLU_NURBS_ERROR16', 'GLU_NURBS_ERROR17',
'GLU_NURBS_ERROR18', 'GLU_NURBS_ERROR19', 'GLU_NURBS_ERROR20',
'GLU_NURBS_ERROR21', 'GLU_NURBS_ERROR22', 'GLU_NURBS_ERROR23',
'GLU_NURBS_ERROR24', 'GLU_NURBS_ERROR25', 'GLU_NURBS_ERROR26',
'GLU_NURBS_ERROR27', 'GLU_NURBS_ERROR28', 'GLU_NURBS_ERROR29',
'GLU_NURBS_ERROR30', 'GLU_NURBS_ERROR31', 'GLU_NURBS_ERROR32',
'GLU_NURBS_ERROR33', 'GLU_NURBS_ERROR34', 'GLU_NURBS_ERROR35',
'GLU_NURBS_ERROR36', 'GLU_NURBS_ERROR37', 'GLU_AUTO_LOAD_MATRIX',
'GLU_CULLING', 'GLU_SAMPLING_TOLERANCE', 'GLU_DISPLAY_MODE',
'GLU_PARAMETRIC_TOLERANCE', 'GLU_SAMPLING_METHOD', 'GLU_U_STEP', 'GLU_V_STEP',
'GLU_NURBS_MODE', 'GLU_NURBS_MODE_EXT', 'GLU_NURBS_TESSELLATOR',
'GLU_NURBS_TESSELLATOR_EXT', 'GLU_NURBS_RENDERER', 'GLU_NURBS_RENDERER_EXT',
'GLU_OBJECT_PARAMETRIC_ERROR', 'GLU_OBJECT_PARAMETRIC_ERROR_EXT',
'GLU_OBJECT_PATH_LENGTH', 'GLU_OBJECT_PATH_LENGTH_EXT', 'GLU_PATH_LENGTH',
'GLU_PARAMETRIC_ERROR', 'GLU_DOMAIN_DISTANCE', 'GLU_MAP1_TRIM_2',
'GLU_MAP1_TRIM_3', 'GLU_POINT', 'GLU_LINE', 'GLU_FILL', 'GLU_SILHOUETTE',
'GLU_SMOOTH', 'GLU_FLAT', 'GLU_NONE', 'GLU_OUTSIDE', 'GLU_INSIDE',
'GLU_TESS_BEGIN', 'GLU_BEGIN', 'GLU_TESS_VERTEX', 'GLU_VERTEX',
'GLU_TESS_END', 'GLU_END', 'GLU_TESS_ERROR', 'GLU_TESS_EDGE_FLAG',
'GLU_EDGE_FLAG', 'GLU_TESS_COMBINE', 'GLU_TESS_BEGIN_DATA',
'GLU_TESS_VERTEX_DATA', 'GLU_TESS_END_DATA', 'GLU_TESS_ERROR_DATA',
'GLU_TESS_EDGE_FLAG_DATA', 'GLU_TESS_COMBINE_DATA', 'GLU_CW', 'GLU_CCW',
'GLU_INTERIOR', 'GLU_EXTERIOR', 'GLU_UNKNOWN', 'GLU_TESS_WINDING_RULE',
'GLU_TESS_BOUNDARY_ONLY', 'GLU_TESS_TOLERANCE', 'GLU_TESS_ERROR1',
'GLU_TESS_ERROR2', 'GLU_TESS_ERROR3', 'GLU_TESS_ERROR4', 'GLU_TESS_ERROR5',
'GLU_TESS_ERROR6', 'GLU_TESS_ERROR7', 'GLU_TESS_ERROR8',
'GLU_TESS_MISSING_BEGIN_POLYGON', 'GLU_TESS_MISSING_BEGIN_CONTOUR',
'GLU_TESS_MISSING_END_POLYGON', 'GLU_TESS_MISSING_END_CONTOUR',
'GLU_TESS_COORD_TOO_LARGE', 'GLU_TESS_NEED_COMBINE_CALLBACK',
'GLU_TESS_WINDING_ODD', 'GLU_TESS_WINDING_NONZERO',
'GLU_TESS_WINDING_POSITIVE', 'GLU_TESS_WINDING_NEGATIVE',
'GLU_TESS_WINDING_ABS_GEQ_TWO', 'GLUnurbs', 'GLUquadric', 'GLUtesselator',
'GLUnurbsObj', 'GLUquadricObj', 'GLUtesselatorObj', 'GLUtriangulatorObj',
'GLU_TESS_MAX_COORD', '_GLUfuncptr', 'gluBeginCurve', 'gluBeginPolygon',
'gluBeginSurface', 'gluBeginTrim', 'gluBuild1DMipmapLevels',
'gluBuild1DMipmaps', 'gluBuild2DMipmapLevels', 'gluBuild2DMipmaps',
'gluBuild3DMipmapLevels', 'gluBuild3DMipmaps', 'gluCheckExtension',
'gluCylinder', 'gluDeleteNurbsRenderer', 'gluDeleteQuadric', 'gluDeleteTess',
'gluDisk', 'gluEndCurve', 'gluEndPolygon', 'gluEndSurface', 'gluEndTrim',
'gluErrorString', 'gluGetNurbsProperty', 'gluGetString', 'gluGetTessProperty',
'gluLoadSamplingMatrices', 'gluLookAt', 'gluNewNurbsRenderer',
'gluNewQuadric', 'gluNewTess', 'gluNextContour', 'gluNurbsCallback',
'gluNurbsCallbackData', 'gluNurbsCallbackDataEXT', 'gluNurbsCurve',
'gluNurbsProperty', 'gluNurbsSurface', 'gluOrtho2D', 'gluPartialDisk',
'gluPerspective', 'gluPickMatrix', 'gluProject', 'gluPwlCurve',
'gluQuadricCallback', 'gluQuadricDrawStyle', 'gluQuadricNormals',
'gluQuadricOrientation', 'gluQuadricTexture', 'gluScaleImage', 'gluSphere',
'gluTessBeginContour', 'gluTessBeginPolygon', 'gluTessCallback',
'gluTessEndContour', 'gluTessEndPolygon', 'gluTessNormal', 'gluTessProperty',
'gluTessVertex', 'gluUnProject', 'gluUnProject4']
# END GENERATED CONTENT (do not edit above this line)
| bsd-3-clause | -3,376,340,563,036,097,000 | 48.765504 | 252 | 0.721329 | false |
analyseuc3m/ANALYSE-v1 | lms/djangoapps/course_api/blocks/transformers/tests/test_block_counts.py | 12 | 2030 | """
Tests for BlockCountsTransformer.
"""
# pylint: disable=protected-access
from openedx.core.lib.block_structure.factory import BlockStructureFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import SampleCourseFactory
from ..block_counts import BlockCountsTransformer
class TestBlockCountsTransformer(ModuleStoreTestCase):
"""
Test behavior of BlockCountsTransformer
"""
def setUp(self):
super(TestBlockCountsTransformer, self).setUp()
self.course_key = SampleCourseFactory.create().id
self.course_usage_key = self.store.make_course_usage_key(self.course_key)
self.block_structure = BlockStructureFactory.create_from_modulestore(self.course_usage_key, self.store)
def test_transform(self):
# collect phase
BlockCountsTransformer.collect(self.block_structure)
self.block_structure._collect_requested_xblock_fields()
# transform phase
BlockCountsTransformer(['problem', 'chapter']).transform(usage_info=None, block_structure=self.block_structure)
# block_counts
chapter_x_key = self.course_key.make_usage_key('chapter', 'chapter_x')
block_counts_for_chapter_x = self.block_structure.get_transformer_block_data(
chapter_x_key, BlockCountsTransformer,
)
block_counts_for_course = self.block_structure.get_transformer_block_data(
self.course_usage_key, BlockCountsTransformer,
)
# verify count of chapters
self.assertEquals(block_counts_for_course['chapter'], 2)
# verify count of problems
self.assertEquals(block_counts_for_course['problem'], 6)
self.assertEquals(block_counts_for_chapter_x['problem'], 3)
# verify other block types are not counted
for block_type in ['course', 'html', 'video']:
self.assertNotIn(block_type, block_counts_for_course)
self.assertNotIn(block_type, block_counts_for_chapter_x)
| agpl-3.0 | -1,705,462,066,008,102,400 | 39.6 | 119 | 0.704926 | false |
pombreda/libforensics | code/lf/win/shell/link/dtypes.py | 13 | 5891 | # Copyright 2010 Michael Murr
#
# This file is part of LibForensics.
#
# LibForensics is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# LibForensics is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with LibForensics. If not, see <http://www.gnu.org/licenses/>.
"""Data structures to work with shell link files"""
# local imports
from lf.dtypes import LERecord, BitTypeU32, bit, raw
from lf.win.dtypes import (
FILETIME_LE, COLORREF, DWORD, WORD, BYTE, CLSID_LE, GUID_LE,
LCID_LE
)
from lf.win.con.dtypes import COORD_LE
__docformat__ = "restructuredtext en"
__all__ = [
"HotKey", "ShellLinkHeader", "LinkInfoHeader", "VolumeIDHeader",
"CNRLHeader", "DataBlockHeader", "ConsoleDataBlock", "ConsoleFEDataBlock",
"DarwinDataBlock", "EnvironmentVariableDataBlock",
"IconEnvironmentDataBlock", "KnownFolderDataBlock",
"SpecialFolderDataBlock", "TrackerDataBlock", "TrackerDataBlockFooter",
"DomainRelativeObjId"
]
class LinkFlagsBits(BitTypeU32):
has_idlist = bit
has_link_info = bit
has_name = bit
has_relative_path = bit
has_working_dir = bit
has_args = bit
has_icon_location = bit
is_unicode = bit
force_no_link_info = bit
has_exp_string = bit
run_in_separate_proc = bit
has_logo3_id = bit
has_darwin_id = bit
run_as_user = bit
has_exp_icon = bit
no_pidl_alias = bit
force_unc_name = bit
run_with_shim_layer = bit
force_no_link_track = bit
enable_target_metadata = bit
disable_link_path_tracking = bit
disable_known_folder_rel_tracking = bit
no_kf_alias = bit
allow_link_to_link = bit
unalias_on_save = bit
prefer_environment_path = bit
keep_local_idlist_for_unc_target = bit
# end class LinkFlagsBits
class LinkFlags(LERecord):
field = LinkFlagsBits
# end class LinkFlags
class FileAttributesBits(BitTypeU32):
read_only = bit
hidden = bit
system = bit
reserved1 = bit
directory = bit
archive = bit
reserved2 = bit
normal = bit
temp = bit
sparse = bit
reparse_point = bit
compressed = bit
offline = bit
not_content_indexed = bit
encrypted = bit
# end class FileAttributesBits
class FileAttributes(LERecord):
field = FileAttributesBits
# end class FileAttributes
class HotKey(LERecord):
vkcode = BYTE
vkmod = BYTE
# end class HotKey
class ShellLinkHeader(LERecord):
size = DWORD
clsid = CLSID_LE
flags = LinkFlags
attrs = FileAttributes
btime = FILETIME_LE
atime = FILETIME_LE
mtime = FILETIME_LE
target_size = DWORD
icon_index = DWORD
show_cmd = DWORD
hotkey = HotKey
reserved1 = raw(2)
reserved2 = raw(4)
reserved3 = raw(4)
# end class ShellLinkHeader
class LinkInfoFlags(BitTypeU32):
has_vol_id_and_local_base_path = bit
has_cnrl_and_path_suffix = bit
# end class LinkInfoFlags
class LinkInfoHeader(LERecord):
size = DWORD
header_size = DWORD
flags = LinkInfoFlags
vol_id_offset = DWORD
local_base_path_offset = DWORD
cnrl_offset = DWORD
path_suffix_offset = DWORD
# end class LinkInfoHeader
class VolumeIDHeader(LERecord):
size = DWORD
type = DWORD
serial_num = DWORD
vol_label_offset = DWORD
# end class VolumeIDHeader
class CNRLFlags(BitTypeU32):
valid_device = bit
valid_net_type = bit
# end class CNRLFlags
class CNRLHeader(LERecord):
size = DWORD
flags = CNRLFlags
net_name_offset = DWORD
device_name_offset = DWORD
net_type = DWORD
# end class CNRLHeader
class DataBlockHeader(LERecord):
size = DWORD
sig = DWORD
# end class DataBlockHeader
class ConsoleDataBlock(DataBlockHeader):
fill_attributes = WORD
popup_fill_attributes = WORD
screen_buffer_size = COORD_LE
window_size = COORD_LE
window_origin = COORD_LE
font = DWORD
input_buf_size = DWORD
font_size = DWORD
font_family = DWORD
font_weight = DWORD
face_name = raw(64)
cursor_size = DWORD
full_screen = DWORD
quick_edit = DWORD
insert_mode = DWORD
auto_position = DWORD
history_buf_size = DWORD
history_buf_count = DWORD
history_no_dup = DWORD
color_table = [COLORREF] * 16
# end class ConsoleDataBlock
class ConsoleFEDataBlock(DataBlockHeader):
code_page = LCID_LE
# end class ConsoleFEDataBlock
class DarwinDataBlock(DataBlockHeader):
darwin_data_ansi = raw(260)
darwin_data_uni = raw(520)
# end class DarwinDataBlock
class ExpandableStringsDataBlock(DataBlockHeader):
target_ansi = raw(260)
target_uni = raw(520)
# end class ExpandableStringsDataBlock
class EnvironmentVariableDataBlock(ExpandableStringsDataBlock):
pass
# end class EnvironmentVariableDataBlock
class IconEnvironmentDataBlock(ExpandableStringsDataBlock):
pass
# end class IconEnvironmentDataBlock
class KnownFolderDataBlock(DataBlockHeader):
kf_id = GUID_LE
offset = DWORD
# end class KnownFolderDataBlock
class SpecialFolderDataBlock(DataBlockHeader):
sf_id = DWORD
offset = DWORD
# end class SpecialFolderDataBlock
class DomainRelativeObjId(LERecord):
volume = GUID_LE
object = GUID_LE
# end class DomainRelativeObjId
class TrackerDataBlock(DataBlockHeader):
length = DWORD
version = DWORD
# end class TrackerDataBlock
class TrackerDataBlockFooter(LERecord):
droid = DomainRelativeObjId
droid_birth = DomainRelativeObjId
# end class TrackerDataBlockFooter
| gpl-3.0 | -113,635,050,440,121,060 | 25.299107 | 78 | 0.708708 | false |
lishensan/xbmc | lib/gtest/test/gtest_output_test.py | 184 | 12027 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests the text output of Google C++ Testing Framework.
SYNOPSIS
gtest_output_test.py --build_dir=BUILD/DIR --gengolden
# where BUILD/DIR contains the built gtest_output_test_ file.
gtest_output_test.py --gengolden
gtest_output_test.py
"""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import re
import sys
import gtest_test_utils
# The flag for generating the golden file
GENGOLDEN_FLAG = '--gengolden'
CATCH_EXCEPTIONS_ENV_VAR_NAME = 'GTEST_CATCH_EXCEPTIONS'
IS_WINDOWS = os.name == 'nt'
# TODO([email protected]): remove the _lin suffix.
GOLDEN_NAME = 'gtest_output_test_golden_lin.txt'
PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_output_test_')
# At least one command we exercise must not have the
# --gtest_internal_skip_environment_and_ad_hoc_tests flag.
COMMAND_LIST_TESTS = ({}, [PROGRAM_PATH, '--gtest_list_tests'])
COMMAND_WITH_COLOR = ({}, [PROGRAM_PATH, '--gtest_color=yes'])
COMMAND_WITH_TIME = ({}, [PROGRAM_PATH,
'--gtest_print_time',
'--gtest_internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=FatalFailureTest.*:LoggingTest.*'])
COMMAND_WITH_DISABLED = (
{}, [PROGRAM_PATH,
'--gtest_also_run_disabled_tests',
'--gtest_internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=*DISABLED_*'])
COMMAND_WITH_SHARDING = (
{'GTEST_SHARD_INDEX': '1', 'GTEST_TOTAL_SHARDS': '2'},
[PROGRAM_PATH,
'--gtest_internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=PassingTest.*'])
GOLDEN_PATH = os.path.join(gtest_test_utils.GetSourceDir(), GOLDEN_NAME)
def ToUnixLineEnding(s):
"""Changes all Windows/Mac line endings in s to UNIX line endings."""
return s.replace('\r\n', '\n').replace('\r', '\n')
def RemoveLocations(test_output):
"""Removes all file location info from a Google Test program's output.
Args:
test_output: the output of a Google Test program.
Returns:
output with all file location info (in the form of
'DIRECTORY/FILE_NAME:LINE_NUMBER: 'or
'DIRECTORY\\FILE_NAME(LINE_NUMBER): ') replaced by
'FILE_NAME:#: '.
"""
return re.sub(r'.*[/\\](.+)(\:\d+|\(\d+\))\: ', r'\1:#: ', test_output)
def RemoveStackTraceDetails(output):
"""Removes all stack traces from a Google Test program's output."""
# *? means "find the shortest string that matches".
return re.sub(r'Stack trace:(.|\n)*?\n\n',
'Stack trace: (omitted)\n\n', output)
def RemoveStackTraces(output):
"""Removes all traces of stack traces from a Google Test program's output."""
# *? means "find the shortest string that matches".
return re.sub(r'Stack trace:(.|\n)*?\n\n', '', output)
def RemoveTime(output):
"""Removes all time information from a Google Test program's output."""
return re.sub(r'\(\d+ ms', '(? ms', output)
def RemoveTypeInfoDetails(test_output):
"""Removes compiler-specific type info from Google Test program's output.
Args:
test_output: the output of a Google Test program.
Returns:
output with type information normalized to canonical form.
"""
# some compilers output the name of type 'unsigned int' as 'unsigned'
return re.sub(r'unsigned int', 'unsigned', test_output)
def NormalizeToCurrentPlatform(test_output):
"""Normalizes platform specific output details for easier comparison."""
if IS_WINDOWS:
# Removes the color information that is not present on Windows.
test_output = re.sub('\x1b\\[(0;3\d)?m', '', test_output)
# Changes failure message headers into the Windows format.
test_output = re.sub(r': Failure\n', r': error: ', test_output)
# Changes file(line_number) to file:line_number.
test_output = re.sub(r'((\w|\.)+)\((\d+)\):', r'\1:\3:', test_output)
return test_output
def RemoveTestCounts(output):
"""Removes test counts from a Google Test program's output."""
output = re.sub(r'\d+ tests?, listed below',
'? tests, listed below', output)
output = re.sub(r'\d+ FAILED TESTS',
'? FAILED TESTS', output)
output = re.sub(r'\d+ tests? from \d+ test cases?',
'? tests from ? test cases', output)
output = re.sub(r'\d+ tests? from ([a-zA-Z_])',
r'? tests from \1', output)
return re.sub(r'\d+ tests?\.', '? tests.', output)
def RemoveMatchingTests(test_output, pattern):
"""Removes output of specified tests from a Google Test program's output.
This function strips not only the beginning and the end of a test but also
all output in between.
Args:
test_output: A string containing the test output.
pattern: A regex string that matches names of test cases or
tests to remove.
Returns:
Contents of test_output with tests whose names match pattern removed.
"""
test_output = re.sub(
r'.*\[ RUN \] .*%s(.|\n)*?\[( FAILED | OK )\] .*%s.*\n' % (
pattern, pattern),
'',
test_output)
return re.sub(r'.*%s.*\n' % pattern, '', test_output)
def NormalizeOutput(output):
"""Normalizes output (the output of gtest_output_test_.exe)."""
output = ToUnixLineEnding(output)
output = RemoveLocations(output)
output = RemoveStackTraceDetails(output)
output = RemoveTime(output)
return output
def GetShellCommandOutput(env_cmd):
"""Runs a command in a sub-process, and returns its output in a string.
Args:
env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra
environment variables to set, and element 1 is a string with
the command and any flags.
Returns:
A string with the command's combined standard and diagnostic output.
"""
# Spawns cmd in a sub-process, and gets its standard I/O file objects.
# Set and save the environment properly.
environ = os.environ.copy()
environ.update(env_cmd[0])
p = gtest_test_utils.Subprocess(env_cmd[1], env=environ, capture_stderr=False)
return p.output
def GetCommandOutput(env_cmd):
"""Runs a command and returns its output with all file location
info stripped off.
Args:
env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra
environment variables to set, and element 1 is a string with
the command and any flags.
"""
# Disables exception pop-ups on Windows.
environ, cmdline = env_cmd
environ = dict(environ) # Ensures we are modifying a copy.
environ[CATCH_EXCEPTIONS_ENV_VAR_NAME] = '1'
return NormalizeOutput(GetShellCommandOutput((environ, cmdline)))
def GetOutputOfAllCommands():
"""Returns concatenated output from several representative commands."""
return (GetCommandOutput(COMMAND_WITH_COLOR) +
GetCommandOutput(COMMAND_WITH_TIME) +
GetCommandOutput(COMMAND_WITH_DISABLED) +
GetCommandOutput(COMMAND_WITH_SHARDING))
test_list = GetShellCommandOutput(COMMAND_LIST_TESTS)
SUPPORTS_DEATH_TESTS = 'DeathTest' in test_list
SUPPORTS_TYPED_TESTS = 'TypedTest' in test_list
SUPPORTS_THREADS = 'ExpectFailureWithThreadsTest' in test_list
SUPPORTS_STACK_TRACES = False
CAN_GENERATE_GOLDEN_FILE = (SUPPORTS_DEATH_TESTS and
SUPPORTS_TYPED_TESTS and
SUPPORTS_THREADS)
class GTestOutputTest(gtest_test_utils.TestCase):
def RemoveUnsupportedTests(self, test_output):
if not SUPPORTS_DEATH_TESTS:
test_output = RemoveMatchingTests(test_output, 'DeathTest')
if not SUPPORTS_TYPED_TESTS:
test_output = RemoveMatchingTests(test_output, 'TypedTest')
test_output = RemoveMatchingTests(test_output, 'TypedDeathTest')
test_output = RemoveMatchingTests(test_output, 'TypeParamDeathTest')
if not SUPPORTS_THREADS:
test_output = RemoveMatchingTests(test_output,
'ExpectFailureWithThreadsTest')
test_output = RemoveMatchingTests(test_output,
'ScopedFakeTestPartResultReporterTest')
test_output = RemoveMatchingTests(test_output,
'WorksConcurrently')
if not SUPPORTS_STACK_TRACES:
test_output = RemoveStackTraces(test_output)
return test_output
def testOutput(self):
output = GetOutputOfAllCommands()
golden_file = open(GOLDEN_PATH, 'rb')
# A mis-configured source control system can cause \r appear in EOL
# sequences when we read the golden file irrespective of an operating
# system used. Therefore, we need to strip those \r's from newlines
# unconditionally.
golden = ToUnixLineEnding(golden_file.read())
golden_file.close()
# We want the test to pass regardless of certain features being
# supported or not.
# We still have to remove type name specifics in all cases.
normalized_actual = RemoveTypeInfoDetails(output)
normalized_golden = RemoveTypeInfoDetails(golden)
if CAN_GENERATE_GOLDEN_FILE:
self.assertEqual(normalized_golden, normalized_actual)
else:
normalized_actual = NormalizeToCurrentPlatform(
RemoveTestCounts(normalized_actual))
normalized_golden = NormalizeToCurrentPlatform(
RemoveTestCounts(self.RemoveUnsupportedTests(normalized_golden)))
# This code is very handy when debugging golden file differences:
if os.getenv('DEBUG_GTEST_OUTPUT_TEST'):
open(os.path.join(
gtest_test_utils.GetSourceDir(),
'_gtest_output_test_normalized_actual.txt'), 'wb').write(
normalized_actual)
open(os.path.join(
gtest_test_utils.GetSourceDir(),
'_gtest_output_test_normalized_golden.txt'), 'wb').write(
normalized_golden)
self.assertEqual(normalized_golden, normalized_actual)
if __name__ == '__main__':
if sys.argv[1:] == [GENGOLDEN_FLAG]:
if CAN_GENERATE_GOLDEN_FILE:
output = GetOutputOfAllCommands()
golden_file = open(GOLDEN_PATH, 'wb')
golden_file.write(output)
golden_file.close()
else:
message = (
"""Unable to write a golden file when compiled in an environment
that does not support all the required features (death tests, typed tests,
and multiple threads). Please generate the golden file using a binary built
with those features enabled.""")
sys.stderr.write(message)
sys.exit(1)
else:
gtest_test_utils.Main()
| gpl-2.0 | 6,393,144,828,064,978,000 | 34.901493 | 80 | 0.675896 | false |
atul-bhouraskar/django | tests/model_inheritance_regress/models.py | 243 | 5863 | from __future__ import unicode_literals
import datetime
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Place(models.Model):
name = models.CharField(max_length=50)
address = models.CharField(max_length=80)
class Meta:
ordering = ('name',)
def __str__(self):
return "%s the place" % self.name
@python_2_unicode_compatible
class Restaurant(Place):
serves_hot_dogs = models.BooleanField(default=False)
serves_pizza = models.BooleanField(default=False)
def __str__(self):
return "%s the restaurant" % self.name
@python_2_unicode_compatible
class ItalianRestaurant(Restaurant):
serves_gnocchi = models.BooleanField(default=False)
def __str__(self):
return "%s the italian restaurant" % self.name
@python_2_unicode_compatible
class ParkingLot(Place):
# An explicit link to the parent (we can control the attribute name).
parent = models.OneToOneField(Place, models.CASCADE, primary_key=True, parent_link=True)
capacity = models.IntegerField()
def __str__(self):
return "%s the parking lot" % self.name
class ParkingLot2(Place):
# In lieu of any other connector, an existing OneToOneField will be
# promoted to the primary key.
parent = models.OneToOneField(Place, models.CASCADE)
class ParkingLot3(Place):
# The parent_link connector need not be the pk on the model.
primary_key = models.AutoField(primary_key=True)
parent = models.OneToOneField(Place, models.CASCADE, parent_link=True)
class ParkingLot4(models.Model):
# Test parent_link connector can be discovered in abstract classes.
parent = models.OneToOneField(Place, models.CASCADE, parent_link=True)
class Meta:
abstract = True
class ParkingLot4A(ParkingLot4, Place):
pass
class ParkingLot4B(Place, ParkingLot4):
pass
@python_2_unicode_compatible
class Supplier(models.Model):
name = models.CharField(max_length=50)
restaurant = models.ForeignKey(Restaurant, models.CASCADE)
def __str__(self):
return self.name
class Wholesaler(Supplier):
retailer = models.ForeignKey(Supplier, models.CASCADE, related_name='wholesale_supplier')
class Parent(models.Model):
created = models.DateTimeField(default=datetime.datetime.now)
class Child(Parent):
name = models.CharField(max_length=10)
class SelfRefParent(models.Model):
parent_data = models.IntegerField()
self_data = models.ForeignKey('self', models.SET_NULL, null=True)
class SelfRefChild(SelfRefParent):
child_data = models.IntegerField()
@python_2_unicode_compatible
class Article(models.Model):
headline = models.CharField(max_length=100)
pub_date = models.DateTimeField()
class Meta:
ordering = ('-pub_date', 'headline')
def __str__(self):
return self.headline
class ArticleWithAuthor(Article):
author = models.CharField(max_length=100)
class M2MBase(models.Model):
articles = models.ManyToManyField(Article)
class M2MChild(M2MBase):
name = models.CharField(max_length=50)
class Evaluation(Article):
quality = models.IntegerField()
class Meta:
abstract = True
class QualityControl(Evaluation):
assignee = models.CharField(max_length=50)
@python_2_unicode_compatible
class BaseM(models.Model):
base_name = models.CharField(max_length=100)
def __str__(self):
return self.base_name
@python_2_unicode_compatible
class DerivedM(BaseM):
customPK = models.IntegerField(primary_key=True)
derived_name = models.CharField(max_length=100)
def __str__(self):
return "PK = %d, base_name = %s, derived_name = %s" % (
self.customPK, self.base_name, self.derived_name)
class AuditBase(models.Model):
planned_date = models.DateField()
class Meta:
abstract = True
verbose_name_plural = 'Audits'
class CertificationAudit(AuditBase):
class Meta(AuditBase.Meta):
abstract = True
class InternalCertificationAudit(CertificationAudit):
auditing_dept = models.CharField(max_length=20)
# Check that abstract classes don't get m2m tables autocreated.
@python_2_unicode_compatible
class Person(models.Model):
name = models.CharField(max_length=100)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
@python_2_unicode_compatible
class AbstractEvent(models.Model):
name = models.CharField(max_length=100)
attendees = models.ManyToManyField(Person, related_name="%(class)s_set")
class Meta:
abstract = True
ordering = ('name',)
def __str__(self):
return self.name
class BirthdayParty(AbstractEvent):
pass
class BachelorParty(AbstractEvent):
pass
class MessyBachelorParty(BachelorParty):
pass
# Check concrete -> abstract -> concrete inheritance
class SearchableLocation(models.Model):
keywords = models.CharField(max_length=256)
class Station(SearchableLocation):
name = models.CharField(max_length=128)
class Meta:
abstract = True
class BusStation(Station):
bus_routes = models.CommaSeparatedIntegerField(max_length=128)
inbound = models.BooleanField(default=False)
class TrainStation(Station):
zone = models.IntegerField()
class User(models.Model):
username = models.CharField(max_length=30, unique=True)
class Profile(User):
profile_id = models.AutoField(primary_key=True)
extra = models.CharField(max_length=30, blank=True)
# Check concrete + concrete -> concrete -> concrete
class Politician(models.Model):
politician_id = models.AutoField(primary_key=True)
title = models.CharField(max_length=50)
class Congressman(Person, Politician):
state = models.CharField(max_length=2)
class Senator(Congressman):
pass
| bsd-3-clause | -3,870,764,939,350,760,000 | 22.082677 | 93 | 0.704418 | false |
Adnn/django | tests/responses/tests.py | 226 | 4171 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.http import HttpResponse
from django.http.response import HttpResponseBase
from django.test import SimpleTestCase
UTF8 = 'utf-8'
ISO88591 = 'iso-8859-1'
class HttpResponseBaseTests(SimpleTestCase):
def test_closed(self):
r = HttpResponseBase()
self.assertIs(r.closed, False)
r.close()
self.assertIs(r.closed, True)
def test_write(self):
r = HttpResponseBase()
self.assertIs(r.writable(), False)
with self.assertRaisesMessage(IOError, 'This HttpResponseBase instance is not writable'):
r.write('asdf')
with self.assertRaisesMessage(IOError, 'This HttpResponseBase instance is not writable'):
r.writelines(['asdf\n', 'qwer\n'])
def test_tell(self):
r = HttpResponseBase()
with self.assertRaisesMessage(IOError, 'This HttpResponseBase instance cannot tell its position'):
r.tell()
def test_setdefault(self):
"""
HttpResponseBase.setdefault() should not change an existing header
and should be case insensitive.
"""
r = HttpResponseBase()
r['Header'] = 'Value'
r.setdefault('header', 'changed')
self.assertEqual(r['header'], 'Value')
r.setdefault('x-header', 'DefaultValue')
self.assertEqual(r['X-Header'], 'DefaultValue')
class HttpResponseTests(SimpleTestCase):
def test_status_code(self):
resp = HttpResponse(status=503)
self.assertEqual(resp.status_code, 503)
self.assertEqual(resp.reason_phrase, "Service Unavailable")
def test_change_status_code(self):
resp = HttpResponse()
resp.status_code = 503
self.assertEqual(resp.status_code, 503)
self.assertEqual(resp.reason_phrase, "Service Unavailable")
def test_reason_phrase(self):
reason = "I'm an anarchist coffee pot on crack."
resp = HttpResponse(status=814, reason=reason)
self.assertEqual(resp.status_code, 814)
self.assertEqual(resp.reason_phrase, reason)
def test_charset_detection(self):
""" HttpResponse should parse charset from content_type."""
response = HttpResponse('ok')
self.assertEqual(response.charset, settings.DEFAULT_CHARSET)
response = HttpResponse(charset=ISO88591)
self.assertEqual(response.charset, ISO88591)
self.assertEqual(response['Content-Type'], 'text/html; charset=%s' % ISO88591)
response = HttpResponse(content_type='text/plain; charset=%s' % UTF8, charset=ISO88591)
self.assertEqual(response.charset, ISO88591)
response = HttpResponse(content_type='text/plain; charset=%s' % ISO88591)
self.assertEqual(response.charset, ISO88591)
response = HttpResponse(content_type='text/plain; charset="%s"' % ISO88591)
self.assertEqual(response.charset, ISO88591)
response = HttpResponse(content_type='text/plain; charset=')
self.assertEqual(response.charset, settings.DEFAULT_CHARSET)
response = HttpResponse(content_type='text/plain')
self.assertEqual(response.charset, settings.DEFAULT_CHARSET)
def test_response_content_charset(self):
"""HttpResponse should encode based on charset."""
content = "Café :)"
utf8_content = content.encode(UTF8)
iso_content = content.encode(ISO88591)
response = HttpResponse(utf8_content)
self.assertContains(response, utf8_content)
response = HttpResponse(iso_content, content_type='text/plain; charset=%s' % ISO88591)
self.assertContains(response, iso_content)
response = HttpResponse(iso_content)
self.assertContains(response, iso_content)
response = HttpResponse(iso_content, content_type='text/plain')
self.assertContains(response, iso_content)
def test_repr(self):
response = HttpResponse(content="Café :)".encode(UTF8), status=201)
expected = '<HttpResponse status_code=201, "text/html; charset=utf-8">'
self.assertEqual(repr(response), expected)
| bsd-3-clause | -5,156,841,790,988,676,000 | 35.570175 | 106 | 0.667546 | false |
0Chencc/CTFCrackTools | Lib/Lib/distutils/util.py | 6 | 22225 | """distutils.util
Miscellaneous utility functions -- anything that doesn't fit into
one of the other *util.py modules.
"""
__revision__ = "$Id: util.py 83588 2010-08-02 21:35:06Z ezio.melotti $"
import sys, os, string, re
from distutils.errors import DistutilsPlatformError
from distutils.dep_util import newer
from distutils.spawn import spawn
from distutils import log
from distutils.errors import DistutilsByteCompileError
if sys.platform.startswith('java'):
import _imp
def get_platform ():
"""Return a string that identifies the current platform. This is used
mainly to distinguish platform-specific build directories and
platform-specific built distributions. Typically includes the OS name
and version and the architecture (as supplied by 'os.uname()'),
although the exact information included depends on the OS; eg. for IRIX
the architecture isn't particularly important (IRIX only runs on SGI
hardware), but for Linux the kernel version isn't particularly
important.
Examples of returned values:
linux-i586
linux-alpha (?)
solaris-2.6-sun4u
irix-5.3
irix64-6.2
Windows will return one of:
win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc)
win-ia64 (64bit Windows on Itanium)
win32 (all others - specifically, sys.platform is returned)
For other non-POSIX platforms, currently just returns 'sys.platform'.
"""
if os.name == 'nt':
# sniff sys.version for architecture.
prefix = " bit ("
i = string.find(sys.version, prefix)
if i == -1:
return sys.platform
j = string.find(sys.version, ")", i)
look = sys.version[i+len(prefix):j].lower()
if look=='amd64':
return 'win-amd64'
if look=='itanium':
return 'win-ia64'
return sys.platform
if os.name != "posix" or not hasattr(os, 'uname'):
# XXX what about the architecture? NT is Intel or Alpha,
# Mac OS is M68k or PPC, etc.
return sys.platform
# Try to distinguish various flavours of Unix
(osname, host, release, version, machine) = os.uname()
# Convert the OS name to lowercase, remove '/' characters
# (to accommodate BSD/OS), and translate spaces (for "Power Macintosh")
osname = string.lower(osname)
osname = string.replace(osname, '/', '')
machine = string.replace(machine, ' ', '_')
machine = string.replace(machine, '/', '-')
if osname[:5] == "linux":
# At least on Linux/Intel, 'machine' is the processor --
# i386, etc.
# XXX what about Alpha, SPARC, etc?
return "%s-%s" % (osname, machine)
elif osname[:5] == "sunos":
if release[0] >= "5": # SunOS 5 == Solaris 2
osname = "solaris"
release = "%d.%s" % (int(release[0]) - 3, release[2:])
# fall through to standard osname-release-machine representation
elif osname[:4] == "irix": # could be "irix64"!
return "%s-%s" % (osname, release)
elif osname[:3] == "aix":
return "%s-%s.%s" % (osname, version, release)
elif osname[:6] == "cygwin":
osname = "cygwin"
rel_re = re.compile (r'[\d.]+')
m = rel_re.match(release)
if m:
release = m.group()
elif osname[:6] == "darwin":
#
# For our purposes, we'll assume that the system version from
# distutils' perspective is what MACOSX_DEPLOYMENT_TARGET is set
# to. This makes the compatibility story a bit more sane because the
# machine is going to compile and link as if it were
# MACOSX_DEPLOYMENT_TARGET.
from distutils.sysconfig import get_config_vars
cfgvars = get_config_vars()
macver = os.environ.get('MACOSX_DEPLOYMENT_TARGET')
if not macver:
macver = cfgvars.get('MACOSX_DEPLOYMENT_TARGET')
if 1:
# Always calculate the release of the running machine,
# needed to determine if we can build fat binaries or not.
macrelease = macver
# Get the system version. Reading this plist is a documented
# way to get the system version (see the documentation for
# the Gestalt Manager)
try:
f = open('/System/Library/CoreServices/SystemVersion.plist')
except IOError:
# We're on a plain darwin box, fall back to the default
# behaviour.
pass
else:
m = re.search(
r'<key>ProductUserVisibleVersion</key>\s*' +
r'<string>(.*?)</string>', f.read())
f.close()
if m is not None:
macrelease = '.'.join(m.group(1).split('.')[:2])
# else: fall back to the default behaviour
if not macver:
macver = macrelease
if macver:
from distutils.sysconfig import get_config_vars
release = macver
osname = "macosx"
if (macrelease + '.') >= '10.4.' and \
'-arch' in get_config_vars().get('CFLAGS', '').strip():
# The universal build will build fat binaries, but not on
# systems before 10.4
#
# Try to detect 4-way universal builds, those have machine-type
# 'universal' instead of 'fat'.
machine = 'fat'
cflags = get_config_vars().get('CFLAGS')
archs = re.findall('-arch\s+(\S+)', cflags)
archs = tuple(sorted(set(archs)))
if len(archs) == 1:
machine = archs[0]
elif archs == ('i386', 'ppc'):
machine = 'fat'
elif archs == ('i386', 'x86_64'):
machine = 'intel'
elif archs == ('i386', 'ppc', 'x86_64'):
machine = 'fat3'
elif archs == ('ppc64', 'x86_64'):
machine = 'fat64'
elif archs == ('i386', 'ppc', 'ppc64', 'x86_64'):
machine = 'universal'
else:
raise ValueError(
"Don't know machine value for archs=%r"%(archs,))
elif machine == 'i386':
# On OSX the machine type returned by uname is always the
# 32-bit variant, even if the executable architecture is
# the 64-bit variant
if sys.maxint >= 2**32:
machine = 'x86_64'
elif machine in ('PowerPC', 'Power_Macintosh'):
# Pick a sane name for the PPC architecture.
machine = 'ppc'
# See 'i386' case
if sys.maxint >= 2**32:
machine = 'ppc64'
return "%s-%s-%s" % (osname, release, machine)
# get_platform ()
def convert_path (pathname):
"""Return 'pathname' as a name that will work on the native filesystem,
i.e. split it on '/' and put it back together again using the current
directory separator. Needed because filenames in the setup script are
always supplied in Unix style, and have to be converted to the local
convention before we can actually use them in the filesystem. Raises
ValueError on non-Unix-ish systems if 'pathname' either starts or
ends with a slash.
"""
if os.sep == '/':
return pathname
if not pathname:
return pathname
if pathname[0] == '/':
raise ValueError, "path '%s' cannot be absolute" % pathname
if pathname[-1] == '/':
raise ValueError, "path '%s' cannot end with '/'" % pathname
paths = string.split(pathname, '/')
while '.' in paths:
paths.remove('.')
if not paths:
return os.curdir
return os.path.join(*paths)
# convert_path ()
def change_root (new_root, pathname):
"""Return 'pathname' with 'new_root' prepended. If 'pathname' is
relative, this is equivalent to "os.path.join(new_root,pathname)".
Otherwise, it requires making 'pathname' relative and then joining the
two, which is tricky on DOS/Windows and Mac OS.
"""
os_name = os._name if sys.platform.startswith('java') else os.name
if os_name == 'posix':
if not os.path.isabs(pathname):
return os.path.join(new_root, pathname)
else:
return os.path.join(new_root, pathname[1:])
elif os_name == 'nt':
(drive, path) = os.path.splitdrive(pathname)
if path[0] == '\\':
path = path[1:]
return os.path.join(new_root, path)
elif os_name == 'os2':
(drive, path) = os.path.splitdrive(pathname)
if path[0] == os.sep:
path = path[1:]
return os.path.join(new_root, path)
elif os_name == 'mac':
if not os.path.isabs(pathname):
return os.path.join(new_root, pathname)
else:
# Chop off volume name from start of path
elements = string.split(pathname, ":", 1)
pathname = ":" + elements[1]
return os.path.join(new_root, pathname)
else:
raise DistutilsPlatformError, \
"nothing known about platform '%s'" % os_name
_environ_checked = 0
def check_environ ():
"""Ensure that 'os.environ' has all the environment variables we
guarantee that users can use in config files, command-line options,
etc. Currently this includes:
HOME - user's home directory (Unix only)
PLAT - description of the current platform, including hardware
and OS (see 'get_platform()')
"""
global _environ_checked
if _environ_checked:
return
if os.name == 'posix' and 'HOME' not in os.environ:
import pwd
os.environ['HOME'] = pwd.getpwuid(os.getuid())[5]
if 'PLAT' not in os.environ:
os.environ['PLAT'] = get_platform()
_environ_checked = 1
def subst_vars (s, local_vars):
"""Perform shell/Perl-style variable substitution on 'string'. Every
occurrence of '$' followed by a name is considered a variable, and
variable is substituted by the value found in the 'local_vars'
dictionary, or in 'os.environ' if it's not in 'local_vars'.
'os.environ' is first checked/augmented to guarantee that it contains
certain values: see 'check_environ()'. Raise ValueError for any
variables not found in either 'local_vars' or 'os.environ'.
"""
check_environ()
def _subst (match, local_vars=local_vars):
var_name = match.group(1)
if var_name in local_vars:
return str(local_vars[var_name])
else:
return os.environ[var_name]
try:
return re.sub(r'\$([a-zA-Z_][a-zA-Z_0-9]*)', _subst, s)
except KeyError, var:
raise ValueError, "invalid variable '$%s'" % var
# subst_vars ()
def grok_environment_error (exc, prefix="error: "):
"""Generate a useful error message from an EnvironmentError (IOError or
OSError) exception object. Handles Python 1.5.1 and 1.5.2 styles, and
does what it can to deal with exception objects that don't have a
filename (which happens when the error is due to a two-file operation,
such as 'rename()' or 'link()'. Returns the error message as a string
prefixed with 'prefix'.
"""
# check for Python 1.5.2-style {IO,OS}Error exception objects
if hasattr(exc, 'filename') and hasattr(exc, 'strerror'):
if exc.filename:
error = prefix + "%s: %s" % (exc.filename, exc.strerror)
else:
# two-argument functions in posix module don't
# include the filename in the exception object!
error = prefix + "%s" % exc.strerror
else:
error = prefix + str(exc[-1])
return error
# Needed by 'split_quoted()'
_wordchars_re = _squote_re = _dquote_re = None
def _init_regex():
global _wordchars_re, _squote_re, _dquote_re
_wordchars_re = re.compile(r'[^\\\'\"%s ]*' % string.whitespace)
_squote_re = re.compile(r"'(?:[^'\\]|\\.)*'")
_dquote_re = re.compile(r'"(?:[^"\\]|\\.)*"')
def split_quoted (s):
"""Split a string up according to Unix shell-like rules for quotes and
backslashes. In short: words are delimited by spaces, as long as those
spaces are not escaped by a backslash, or inside a quoted string.
Single and double quotes are equivalent, and the quote characters can
be backslash-escaped. The backslash is stripped from any two-character
escape sequence, leaving only the escaped character. The quote
characters are stripped from any quoted string. Returns a list of
words.
"""
# This is a nice algorithm for splitting up a single string, since it
# doesn't require character-by-character examination. It was a little
# bit of a brain-bender to get it working right, though...
if _wordchars_re is None: _init_regex()
s = string.strip(s)
words = []
pos = 0
while s:
m = _wordchars_re.match(s, pos)
end = m.end()
if end == len(s):
words.append(s[:end])
break
if s[end] in string.whitespace: # unescaped, unquoted whitespace: now
words.append(s[:end]) # we definitely have a word delimiter
s = string.lstrip(s[end:])
pos = 0
elif s[end] == '\\': # preserve whatever is being escaped;
# will become part of the current word
s = s[:end] + s[end+1:]
pos = end+1
else:
if s[end] == "'": # slurp singly-quoted string
m = _squote_re.match(s, end)
elif s[end] == '"': # slurp doubly-quoted string
m = _dquote_re.match(s, end)
else:
raise RuntimeError, \
"this can't happen (bad char '%c')" % s[end]
if m is None:
raise ValueError, \
"bad string (mismatched %s quotes?)" % s[end]
(beg, end) = m.span()
s = s[:beg] + s[beg+1:end-1] + s[end:]
pos = m.end() - 2
if pos >= len(s):
words.append(s)
break
return words
# split_quoted ()
def execute (func, args, msg=None, verbose=0, dry_run=0):
"""Perform some action that affects the outside world (eg. by
writing to the filesystem). Such actions are special because they
are disabled by the 'dry_run' flag. This method takes care of all
that bureaucracy for you; all you have to do is supply the
function to call and an argument tuple for it (to embody the
"external action" being performed), and an optional message to
print.
"""
if msg is None:
msg = "%s%r" % (func.__name__, args)
if msg[-2:] == ',)': # correct for singleton tuple
msg = msg[0:-2] + ')'
log.info(msg)
if not dry_run:
func(*args)
def strtobool (val):
"""Convert a string representation of truth to true (1) or false (0).
True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values
are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if
'val' is anything else.
"""
val = string.lower(val)
if val in ('y', 'yes', 't', 'true', 'on', '1'):
return 1
elif val in ('n', 'no', 'f', 'false', 'off', '0'):
return 0
else:
raise ValueError, "invalid truth value %r" % (val,)
def byte_compile (py_files,
optimize=0, force=0,
prefix=None, base_dir=None,
verbose=1, dry_run=0,
direct=None):
"""Byte-compile a collection of Python source files to either .pyc
or .pyo files in the same directory. 'py_files' is a list of files
to compile; any files that don't end in ".py" are silently skipped.
'optimize' must be one of the following:
0 - don't optimize (generate .pyc)
1 - normal optimization (like "python -O")
2 - extra optimization (like "python -OO")
If 'force' is true, all files are recompiled regardless of
timestamps.
The source filename encoded in each bytecode file defaults to the
filenames listed in 'py_files'; you can modify these with 'prefix' and
'basedir'. 'prefix' is a string that will be stripped off of each
source filename, and 'base_dir' is a directory name that will be
prepended (after 'prefix' is stripped). You can supply either or both
(or neither) of 'prefix' and 'base_dir', as you wish.
If 'dry_run' is true, doesn't actually do anything that would
affect the filesystem.
Byte-compilation is either done directly in this interpreter process
with the standard py_compile module, or indirectly by writing a
temporary script and executing it. Normally, you should let
'byte_compile()' figure out to use direct compilation or not (see
the source for details). The 'direct' flag is used by the script
generated in indirect mode; unless you know what you're doing, leave
it set to None.
"""
# nothing is done if sys.dont_write_bytecode is True
if sys.dont_write_bytecode:
raise DistutilsByteCompileError('byte-compiling is disabled.')
# First, if the caller didn't force us into direct or indirect mode,
# figure out which mode we should be in. We take a conservative
# approach: choose direct mode *only* if the current interpreter is
# in debug mode and optimize is 0. If we're not in debug mode (-O
# or -OO), we don't know which level of optimization this
# interpreter is running with, so we can't do direct
# byte-compilation and be certain that it's the right thing. Thus,
# always compile indirectly if the current interpreter is in either
# optimize mode, or if either optimization level was requested by
# the caller.
if direct is None:
direct = (__debug__ and optimize == 0)
# "Indirect" byte-compilation: write a temporary script and then
# run it with the appropriate flags.
if not direct:
try:
from tempfile import mkstemp
(script_fd, script_name) = mkstemp(".py")
except ImportError:
from tempfile import mktemp
(script_fd, script_name) = None, mktemp(".py")
log.info("writing byte-compilation script '%s'", script_name)
if not dry_run:
if script_fd is not None:
script = os.fdopen(script_fd, "w")
else:
script = open(script_name, "w")
script.write("""\
from distutils.util import byte_compile
files = [
""")
# XXX would be nice to write absolute filenames, just for
# safety's sake (script should be more robust in the face of
# chdir'ing before running it). But this requires abspath'ing
# 'prefix' as well, and that breaks the hack in build_lib's
# 'byte_compile()' method that carefully tacks on a trailing
# slash (os.sep really) to make sure the prefix here is "just
# right". This whole prefix business is rather delicate -- the
# problem is that it's really a directory, but I'm treating it
# as a dumb string, so trailing slashes and so forth matter.
#py_files = map(os.path.abspath, py_files)
#if prefix:
# prefix = os.path.abspath(prefix)
script.write(string.join(map(repr, py_files), ",\n") + "]\n")
script.write("""
byte_compile(files, optimize=%r, force=%r,
prefix=%r, base_dir=%r,
verbose=%r, dry_run=0,
direct=1)
""" % (optimize, force, prefix, base_dir, verbose))
script.close()
cmd = [sys.executable, script_name]
if optimize == 1:
cmd.insert(1, "-O")
elif optimize == 2:
cmd.insert(1, "-OO")
spawn(cmd, dry_run=dry_run)
execute(os.remove, (script_name,), "removing %s" % script_name,
dry_run=dry_run)
# "Direct" byte-compilation: use the py_compile module to compile
# right here, right now. Note that the script generated in indirect
# mode simply calls 'byte_compile()' in direct mode, a weird sort of
# cross-process recursion. Hey, it works!
else:
import py_compile
for file in py_files:
if file[-3:] != ".py":
# This lets us be lazy and not filter filenames in
# the "install_lib" command.
continue
# Terminology from the py_compile module:
# cfile - byte-compiled file
# dfile - purported source filename (same as 'file' by default)
if sys.platform.startswith('java'):
cfile = _imp.makeCompiledFilename(file)
else:
cfile = file + (__debug__ and "c" or "o")
dfile = file
if prefix:
if file[:len(prefix)] != prefix:
raise ValueError, \
("invalid prefix: filename %r doesn't start with %r"
% (file, prefix))
dfile = dfile[len(prefix):]
if base_dir:
dfile = os.path.join(base_dir, dfile)
cfile_base = os.path.basename(cfile)
if direct:
if force or newer(file, cfile):
log.info("byte-compiling %s to %s", file, cfile_base)
if not dry_run:
py_compile.compile(file, cfile, dfile)
else:
log.debug("skipping byte-compilation of %s to %s",
file, cfile_base)
# byte_compile ()
def rfc822_escape (header):
"""Return a version of the string escaped for inclusion in an
RFC-822 header, by ensuring there are 8 spaces space after each newline.
"""
lines = string.split(header, '\n')
header = string.join(lines, '\n' + 8*' ')
return header
| gpl-3.0 | -8,828,049,446,129,417,000 | 37.187285 | 79 | 0.576828 | false |
chrisdev/wagtail-cookiecutter-foundation | {{cookiecutter.project_slug}}/pages/migrations/0014_auto_20170817_1705.py | 2 | 2045 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-17 17:05
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.fields
import wagtail.core.fields
class Migration(migrations.Migration):
dependencies = [
('wagtailimages', '0019_delete_filter'),
('wagtailcore', '0039_collectionviewrestriction'),
('wagtailsearchpromotions', '0002_capitalizeverbose'),
('wagtailredirects', '0005_capitalizeverbose'),
('wagtailforms', '0003_capitalizeverbose'),
('pages', '0013_videopage_videopagecarouselitem'),
]
operations = [
migrations.CreateModel(
name='VideoGalleryPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('intro', wagtail.core.fields.RichTextField(blank=True)),
('feed_image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.RenameModel(
old_name='VideoPageCarouselItem',
new_name='VideoGalleryPageCarouselItem',
),
migrations.RemoveField(
model_name='videopage',
name='feed_image',
),
migrations.RemoveField(
model_name='videopage',
name='page_ptr',
),
migrations.AlterField(
model_name='videogallerypagecarouselitem',
name='page',
field=modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='carousel_items', to='pages.VideoGalleryPage'),
),
migrations.DeleteModel(
name='VideoPage',
),
]
| mit | 1,442,260,757,259,978,500 | 36.181818 | 191 | 0.607824 | false |
alanlhutchison/empirical-JTK_CYCLE-with-asymmetry | previous_files/jtk7.py | 1 | 19118 | #!/usr/bin/env python
"""
Created on April 20 2014
@author: Alan L. Hutchison, [email protected], Aaron R. Dinner Group, University of Chicago
This script is one in a series of scripts for running empirical JTK_CYCLE analysis as described in
Hutchison, Maienschein-Cline, and Chiang et al. Improved statistical methods enable greater sensitivity in rhythm detection for genome-wide data, PLoS Computational Biology 2015 11(3): e 1004094. doi:10.1371/journal.pcbi.1004094
Please use ./jtk7.py -h to see the help screen for further instructions on running this script.
"""
VERSION="1.1"
from scipy.stats import kendalltau
from operator import itemgetter
import numpy as np
import sys
import argparse
import itertools as it
import time
#import matplotlib.pyplot as plt
#import matplotlib.cm as cm
from scipy.stats import norm
import os.path
def main(args):
fn = args.filename
prefix = args.prefix
fn_waveform = args.waveform
fn_period = args.period
fn_phase = args.phase
fn_width = args.width
fn_out = args.output
if fn_out == "DEFAULT":
if ".txt" in fn:
fn_out=fn.replace(".txt","_"+prefix+"_jtkout.txt")
else:
fn_out = fn+"_" +prefix + "_jtkout.txt"
print fn
add_on = 1
while os.path.isfile(fn_out):
print fn_out, "already exists, take evasive action!!!"
endstr = '.'+fn_out.split('.')[-1]
mid = '_'+str(add_on)+endstr
if add_on ==1:
fn_out = fn_out.replace(endstr,mid)
else:
midendstr = '_'+fn_out.split('_')[-1]
fn_out = fn_out.replace(midendstr,mid)
add_on = add_on + 1
waveforms = read_in_list(fn_waveform)
periods = read_in_list(fn_period)
phases = read_in_list(fn_phase)
widths = read_in_list(fn_width)
#fn_out = "\t".join(fn.replace("jtkprepared","").split(".")[0:-1])+"_jtkout_emprical.txt"
header,data = read_in(fn)
header,series = organize_data(header,data)
RealKen = KendallTauP()
#output = ["ID\tWaveform\tPeriod\tPhase\tAsymmetry\tMean\tStd_Dev\tMax\tMin\tMax_Amp\tFC\tIQR_FC\tTau\tempP"]
Ps = []
with open(fn_out,'w') as g:
g.write("ID\tWaveform\tPeriod\tPhase\tAsymmetry\tMean\tStd_Dev\tMax\tMaxLoc\tMin\tMinLoc\tMax_Amp\tFC\tIQR_FC\tTau\tP\n")
for serie in series:
if [s for s in serie[1:] if s!="NA"]==[]:
name = [serie[0]]+["All_NA"]+[-10000]*10+[np.nan,np.nan]
else:
mmax,mmaxloc,mmin,mminloc,MAX_AMP=series_char(serie,header)
sIQR_FC=IQR_FC(serie)
smean = series_mean(serie)
sstd = series_std(serie)
sFC = FC(serie)
local_ps = []
for waveform in waveforms:
for period in periods:
for phase in phases:
for width in widths:
reference = generate_base_reference(header,waveform,period,phase,width)
geneID,tau,p = generate_mod_series(reference,serie,RealKen)
out_line = [geneID,waveform,period,phase,width,smean,sstd,mmax,mmaxloc,mmin,mminloc,MAX_AMP,sFC,sIQR_FC,tau,p]
out_line = [str(l) for l in out_line]
g.write("\t".join(out_line)+"\n")
#local_ps = sorted(local_ps)
#best = min(local_ps)
#Ps.append(best)
#append_out(fn_out,best)
#name = [geneID,waveform,period,phase,width,smean,sstd,mmax,mmin,MAX_AMP,sFC,sIQR_FC,tau,empirical_p]
#name = [str(n) for n in name]
#print "\t".join(name)
#print time.asctime( time.localtime(time.time()) )
#output.append("\t".join(name))
#write_out(fn_out,Ps)
def append_out(fn_out,line):
line = [str(l) for l in line]
with open(fn_out,'a') as g:
g.write("\t".join(line)+"\n")
def write_out(fn_out,output):
with open(fn_out,'w') as g:
for line in output:
g.write(str(line)+"\n")
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def read_in_list(fn):
with open(fn,'r') as f:
lines = f.read().splitlines()
return lines
def read_in(fn):
"""Read in data to header and data"""
with open(fn,'r') as f:
data=[]
start_right=0
for line in f:
words = line.strip().split()
words = [word.strip() for word in words]
if words[0] == "#":
start_right = 1
header = words[1:]
else:
if start_right == 0:
print "Please enter file with header starting with #"
elif start_right == 1:
data.append(words)
return header, data
def organize_data(header,data):
"""
Organize list of lists from such that genes with similar time-series holes match (for null distribution calc)
Return a header ['#','ZTX','ZTY'...] and a list of lists [ lists with similar holes (identical null distribution) , [],[],[]]
"""
L = data
for i in xrange(1,len(header)):
L=sorted(L, key=itemgetter(i))
return header,L
def generate_base_reference(header,waveform="cosine",period=24,phase=0,width=12):
"""
This will generate a waveform with a given phase and period based on the header,
"""
tpoints = []
ZTs = header
coef = 2.0 * np.pi / float(period)
w = float(width) * coef
for ZT in ZTs:
z = ZT[2:].split("_")[0]
tpoints.append( (float(z)-float(phase) ) * coef)
def trough(x,w):
x = x % (2*np.pi)
w = w % (2*np.pi)
if x <= w:
y = 1 + -x/w
elif x > w:
y = (x-w)/(2*np.pi - w)
return y
def cosine(x,w):
x = x % (2*np.pi)
w = w % (2*np.pi)
if x <= w:
y = np.cos(x/(w/np.pi))
elif x > w:
y = np.cos( (x+2.*(np.pi-w))*np.pi/ (2*np.pi - w) )
return y
if waveform == "cosine":
reference=[cosine(tpoint,w) for tpoint in tpoints]
elif waveform == "trough":
reference=[trough(tpoint,w) for tpoint in tpoints]
return reference
def IQR_FC(series):
qlo = __score_at_percentile__(series, 25)
qhi = __score_at_percentile__(series, 75)
if (qlo=="NA" or qhi=="NA"):
return "NA"
elif (qhi==0):
return 0
elif ( qlo==0):
return "NA"
else:
iqr = qhi/qlo
return iqr
def FC(series):
series=[float(s) if s!="NA" else 0 for s in series[1:] if s!="NA" ]
if series!=[]:
mmax = max(series)
mmin = min(series)
if mmin==0:
sFC = -10000
else:
sFC = mmax / mmin
else:
sFC = "NA"
return sFC
def series_char(fullseries,header):
"""Uses interquartile range to estimate amplitude of a time series."""
series=[float(s) for s in fullseries[1:] if s!="NA"]
head = [header[i] for i,s in enumerate(fullseries[1:]) if s!="NA"]
if series!=[]:
mmax = max(series)
#print series.index(mmax)
mmaxloc = head[series.index(mmax)]
mmin = min(series)
#print series.index(mmin)
mminloc = head[series.index(mmin)]
diff=mmax-mmin
else:
mmax = "NA"
mmaxloc = "NA"
mmin = "NA"
mminloc = "NA"
diff = "NA"
return mmax,mmaxloc,mmin,mminloc,diff
def series_mean(series):
"""Finds the mean of a timeseries"""
series = [float(s) for s in series[1:] if s!="NA"]
return np.mean(series)
def series_std(series):
"""Finds the std dev of a timeseries"""
series = [float(s) for s in series[1:] if s!="NA"]
return np.std(series)
def __score_at_percentile__(ser, per):
ser = [float(se) for se in ser[1:] if se!="NA"]
if len(ser)<5:
score ="NA"
return score
else:
ser = np.sort(ser)
i = (per/100. * len(ser))
if (i % 1 == 0):
score = ser[i]
else:
interpolate = lambda a,b,frac: a + (b - a)*frac
score = interpolate(ser[int(i)], ser[int(i) + 1], i % 1)
return float(score)
def generate_mod_series(reference,series,RealKen):
"""
Takes the series from generate_base_null, takes the list from data, and makes a null
for each gene in data or uses the one previously calculated.
Then it runs Kendall's Tau on the exp. series against the null
"""
geneID = series[0]
values = series[1:]
binary = np.array([1.0 if value!="NA" else np.nan for value in values])
reference = np.array(reference)
temp = reference*binary
mod_reference = [value for value in temp if not np.isnan(value)]
mod_values = [float(value) for value in values if value!='NA']
if len(mod_values) < 3:
tau,p = np.nan,np.nan
elif mod_values.count(np.nan) == len(mod_values):
tau,p = np.nan,np.nan
elif mod_values.count(0) == len(mod_values):
tau,p = np.nan,np.nan
else:
tau,p=kendalltau(mod_values,mod_reference)
if not np.isnan(tau):
if len(mod_values) < 150:
pk = RealKen.pval(tau,len(mod_values))
if pk is not None:
p=pk
else:
p = p / 2.0
if tau < 0:
p = 1-p
return geneID,tau,p
def __create_parser__():
p = argparse.ArgumentParser(
description="Python script for running empirical JTK_CYCLE with asymmetry search as described in Hutchison, Maienschein-Cline, and Chiang et al. Improved statistical methods enable greater sensitivity in rhythm detection for genome-wide data, PLoS Computational Biology 2015 11(3): e1004094. This script was written by Alan L. Hutchison, [email protected], Aaron R. Dinner Group, University of Chicago.",
epilog="Please contact the correpsonding author if you have any questions.",
version=VERSION
)
#p.add_argument("-t", "--test",
# action='store_true',
# default=False,
# help="run the Python unittest testing suite")
p.add_argument("-o", "--output",
dest="output",
action='store',
metavar="filename string",
type=str,
default = "DEFAULT",
help="You want to output something. If you leave this blank, _jtkout.txt will be appended to your filename")
analysis = p.add_argument_group(title="JTK_CYCLE analysis options")
analysis.add_argument("-f", "--filename",
dest="filename",
action='store',
metavar="filename string",
type=str,
help='This is the filename of the data series you wish to analyze.\
The data should be tab-spaced. The first row should contain a # sign followed by the time points with either CT or ZT preceding the time point (such as ZT0 or ZT4). Longer or shorter prefixes will not work. The following rows should contain the gene/series ID followed by the values for every time point. Where values are not available NA should be put in it\'s place.')
analysis.add_argument("-x","--prefix",
dest="prefix",
type=str,
metavar="string",
action='store',
default="",
help="string to be inserted in the output filename for this run")
analysis.add_argument("--waveform",
dest="waveform",
type=str,
metavar="filename string",
action='store',
default="cosine",
#choices=["waveform_cosine.txt","waveform_rampup.txt","waveform_rampdown.txt","waveform_step.txt","waveform_impulse.txt","waveform_trough.txt"],
help='Should be a file with waveforms you wish to search for listed in a single column separated by newlines.\
Options include cosine (dflt), trough')
analysis.add_argument("-w", "--width", "-a", "--asymmetry",
dest="width",
type=str,
metavar="filename string",
action='store',
default="widths_02-22.txt",
#choices=["widths_02-22.txt","widths_04-20_by4.txt","widths_04-12-20.txt","widths_08-16.txt","width_12.txt"]
help='Should be a file with asymmetries (widths) you wish to search for listed in a single column separated by newlines.\
Provided files include files like "widths_02-22.txt","widths_04-20_by4.txt","widths_04-12-20.txt","widths_08-16.txt","width_12.txt"\nasymmetries=widths')
analysis.add_argument("-ph", "--phase",
dest="phase",
metavar="filename string",
type=str,
default="phases_00-22_by2.txt",
help='Should be a file with phases you wish to search for listed in a single column separated by newlines.\
Example files include "phases_00-22_by2.txt" or "phases_00-22_by4.txt" or "phases_00-20_by4.txt"')
analysis.add_argument("-p","--period",
dest="period",
metavar="filename string",
type=str,
action='store',
default="period_24.txt",
help='Should be a file with phases you wish to search for listed in a single column separated by newlines.\
Provided file is "period_24.txt"')
distribution = analysis.add_mutually_exclusive_group(required=False)
distribution.add_argument("-e", "--exact",
dest="harding",
action='store_true',
default=False,
help="use Harding's exact null distribution (dflt)")
distribution.add_argument("-n", "--normal",
dest="normal",
action='store_true',
default=False,
help="use normal approximation to null distribution")
return p
# instantiate class to precalculate distribution
# usage:
# K = KendallTauP()
# pval = K.pval(tau,n,two_tailed=True)
class KendallTauP:
def __init__(self,N=150):
# largest number of samples to precompute
self.N = N
Nint = self.N*(self.N-1)/2
# first allocate freq slots for largest sample array
# as we fill this in we'll save the results for smaller samples
# total possible number of inversions is Nint + 1
freqN = np.zeros(Nint + 1)
freqN[0] = 1.0
# save results at each step in freqs array
self.freqs = [np.array([1.0])]
for i in xrange(1,self.N):
last = np.copy(freqN)
for j in xrange(Nint+1):
# update each entry by summing over i entries to the left
freqN[j] += sum(last[max(0,j-i):j])
# copy current state into freqs array
# the kth entry of freqs should have 1+k*(k-1)/2 entries
self.freqs.append(np.copy(freqN[0:(1+(i+1)*i/2)]))
# turn freqs into cdfs
# distributions still with respect to number of inversions
self.cdfs = []
for i in xrange(self.N):
self.cdfs.append(np.copy(self.freqs[i]))
# turn into cumulative frequencies
for j in xrange(1,len(self.freqs[i])):
self.cdfs[i][j] += self.cdfs[i][j-1]
# convert freqs to probs
self.cdfs[i] = self.cdfs[i]/sum(self.freqs[i])
# plot exact distribution compared to normal approx
def plot(self,nlist):
colors = cm.Set1(np.linspace(0,1,len(nlist)))
# for plotting gaussian
x = np.linspace(-1.2,1.2,300)
# plot pdfs
plt.figure()
for i in xrange(len(nlist)):
ntot = len(self.freqs[nlist[i]-1])-1
tauvals = (ntot - 2.0*np.arange(len(self.freqs[nlist[i]-1])))/ntot
probs = ((ntot+1.0)/2.0)*self.freqs[nlist[i]-1]/sum(self.freqs[nlist[i]-1])
plt.scatter(tauvals,probs,color=colors[i])
# now plot gaussian comparison
var = 2.0*(2.0*nlist[i]+5.0)/(nlist[i]*(nlist[i]-1)*9.0)
plt.plot(x,norm.pdf(x,0.0,np.sqrt(var)),color=colors[i])
plt.legend(nlist,loc='best')
# plt.savefig('pdfs.png')
plt.show()
# now plot cdfs
plt.figure()
for i in xrange(len(nlist)):
ntot = len(self.freqs[nlist[i]-1])-1
tauvals = -1.0*(ntot - 2.0*np.arange(len(self.freqs[nlist[i]-1])))/ntot
probs = self.cdfs[nlist[i]-1]
plt.scatter(tauvals,probs,color=colors[i])
# now plot gaussian comparison
var = 2.0*(2.0*nlist[i]+5.0)/(nlist[i]*(nlist[i]-1)*9.0)
plt.plot(x,norm.cdf(x,0.0,np.sqrt(var)),color=colors[i])
plt.legend(nlist,loc='best')
# plt.savefig('cdfs.png')
plt.show()
# use cdfs to return pval
# default to return two tailed pval
def pval(self,tau,n,two_tailed=False):
# enforce tau is between -1 and 1
if tau <= -1.000001 or tau >= 1.000001:
sys.stderr.write(str(type(tau))+"\n")
sys.stderr.write(str(tau)+"\n")
sys.stderr.write("invalid tau\n")
#print 'invalid tau'
return None
# enforce n is less than our precomputed quantities
if n > self.N:
#print 'n is too large'
sys.stderr.write("n is too large/n")
return None
# convert tau to value in terms of number of inversions
ntot = n*(n-1)/2
inv_score = int(round((ntot - tau * ntot)/2.0))
# I'm a little worried about the precision of this,
# but probably not enough to be really worried for reasonable n
# since we really only need precision to resolve ntot points
# if two tailed, we're getting a tail from a symmetric dist
min_inv_score = min(inv_score,ntot-inv_score)
if two_tailed:
pval = self.cdfs[n-1][min_inv_score]*2.0
else:
# if one tailed return prob of getting that or fewer inversions
pval = self.cdfs[n-1][inv_score]
# if inv_score is 0, might have larger than 0.5 prob
return min(pval,1.0)
if __name__=="__main__":
parser = __create_parser__()
args = parser.parse_args()
main(args)
| mit | -671,654,815,076,224,900 | 36.782609 | 422 | 0.546187 | false |
blooparksystems/odoo | addons/mass_mailing/controllers/main.py | 18 | 1211 | # -*- coding: utf-8 -*-
import werkzeug
from openerp import http, SUPERUSER_ID
from openerp.http import request
class MassMailController(http.Controller):
@http.route('/mail/track/<int:mail_id>/blank.gif', type='http', auth='none')
def track_mail_open(self, mail_id, **post):
""" Email tracking. """
mail_mail_stats = request.registry.get('mail.mail.statistics')
mail_mail_stats.set_opened(request.cr, SUPERUSER_ID, mail_mail_ids=[mail_id])
response = werkzeug.wrappers.Response()
response.mimetype = 'image/gif'
response.data = 'R0lGODlhAQABAIAAANvf7wAAACH5BAEAAAAALAAAAAABAAEAAAICRAEAOw=='.decode('base64')
return response
@http.route('/r/<string:code>/m/<int:stat_id>', type='http', auth="none")
def full_url_redirect(self, code, stat_id, **post):
cr, uid, context = request.cr, request.uid, request.context
request.registry['link.tracker.click'].add_click(cr, uid, code, request.httprequest.remote_addr, request.session['geoip'].get('country_code'), stat_id=stat_id, context=context)
return werkzeug.utils.redirect(request.registry['link.tracker'].get_url_from_code(cr, uid, code, context=context), 301)
| gpl-3.0 | 2,881,848,456,850,798,000 | 45.576923 | 184 | 0.685384 | false |
benpatterson/edx-platform | common/djangoapps/track/migrations/0001_initial.py | 189 | 2527 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'TrackingLog'
db.create_table('track_trackinglog', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('dtcreated', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('username', self.gf('django.db.models.fields.CharField')(max_length=32, blank=True)),
('ip', self.gf('django.db.models.fields.CharField')(max_length=32, blank=True)),
('event_source', self.gf('django.db.models.fields.CharField')(max_length=32)),
('event_type', self.gf('django.db.models.fields.CharField')(max_length=32, blank=True)),
('event', self.gf('django.db.models.fields.TextField')(blank=True)),
('agent', self.gf('django.db.models.fields.CharField')(max_length=256, blank=True)),
('page', self.gf('django.db.models.fields.CharField')(max_length=32, null=True, blank=True)),
('time', self.gf('django.db.models.fields.DateTimeField')()),
))
db.send_create_signal('track', ['TrackingLog'])
def backwards(self, orm):
# Deleting model 'TrackingLog'
db.delete_table('track_trackinglog')
models = {
'track.trackinglog': {
'Meta': {'object_name': 'TrackingLog'},
'agent': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'dtcreated': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'event': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'event_source': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'event_type': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'page': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'})
}
}
complete_apps = ['track']
| agpl-3.0 | -953,182,245,877,072,800 | 51.645833 | 117 | 0.582905 | false |
happyleavesaoc/home-assistant | tests/components/camera/test_init.py | 15 | 3572 | """The tests for the camera component."""
import asyncio
from unittest.mock import patch
import pytest
from homeassistant.setup import setup_component
from homeassistant.const import ATTR_ENTITY_PICTURE
import homeassistant.components.camera as camera
import homeassistant.components.http as http
from homeassistant.exceptions import HomeAssistantError
from homeassistant.util.async import run_coroutine_threadsafe
from tests.common import (
get_test_home_assistant, get_test_instance_port, assert_setup_component)
class TestSetupCamera(object):
"""Test class for setup camera."""
def setup_method(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
def teardown_method(self):
"""Stop everything that was started."""
self.hass.stop()
def test_setup_component(self):
"""Setup demo platfrom on camera component."""
config = {
camera.DOMAIN: {
'platform': 'demo'
}
}
with assert_setup_component(1, camera.DOMAIN):
setup_component(self.hass, camera.DOMAIN, config)
class TestGetImage(object):
"""Test class for camera."""
def setup_method(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
setup_component(
self.hass, http.DOMAIN,
{http.DOMAIN: {http.CONF_SERVER_PORT: get_test_instance_port()}})
config = {
camera.DOMAIN: {
'platform': 'demo'
}
}
setup_component(self.hass, camera.DOMAIN, config)
state = self.hass.states.get('camera.demo_camera')
self.url = "{0}{1}".format(
self.hass.config.api.base_url,
state.attributes.get(ATTR_ENTITY_PICTURE))
def teardown_method(self):
"""Stop everything that was started."""
self.hass.stop()
@patch('homeassistant.components.camera.demo.DemoCamera.camera_image',
autospec=True, return_value=b'Test')
def test_get_image_from_camera(self, mock_camera):
"""Grab a image from camera entity."""
self.hass.start()
image = run_coroutine_threadsafe(camera.async_get_image(
self.hass, 'camera.demo_camera'), self.hass.loop).result()
assert mock_camera.called
assert image == b'Test'
def test_get_image_without_exists_camera(self):
"""Try to get image without exists camera."""
self.hass.states.remove('camera.demo_camera')
with pytest.raises(HomeAssistantError):
run_coroutine_threadsafe(camera.async_get_image(
self.hass, 'camera.demo_camera'), self.hass.loop).result()
def test_get_image_with_timeout(self, aioclient_mock):
"""Try to get image with timeout."""
aioclient_mock.get(self.url, exc=asyncio.TimeoutError())
with pytest.raises(HomeAssistantError):
run_coroutine_threadsafe(camera.async_get_image(
self.hass, 'camera.demo_camera'), self.hass.loop).result()
assert len(aioclient_mock.mock_calls) == 1
def test_get_image_with_bad_http_state(self, aioclient_mock):
"""Try to get image with bad http status."""
aioclient_mock.get(self.url, status=400)
with pytest.raises(HomeAssistantError):
run_coroutine_threadsafe(camera.async_get_image(
self.hass, 'camera.demo_camera'), self.hass.loop).result()
assert len(aioclient_mock.mock_calls) == 1
| apache-2.0 | -237,461,647,704,012,670 | 32.383178 | 77 | 0.636618 | false |
quinot/ansible | lib/ansible/modules/network/cloudengine/ce_switchport.py | 20 | 27667 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ce_switchport
version_added: "2.4"
short_description: Manages Layer 2 switchport interfaces on HUAWEI CloudEngine switches.
description:
- Manages Layer 2 switchport interfaces on HUAWEI CloudEngine switches.
author: QijunPan (@CloudEngine-Ansible)
notes:
- When C(state=absent), VLANs can be added/removed from trunk links and
the existing access VLAN can be 'unconfigured' to just having VLAN 1
on that interface.
- When working with trunks VLANs the keywords add/remove are always sent
in the C(port trunk allow-pass vlan) command. Use verbose mode to see
commands sent.
- When C(state=unconfigured), the interface will result with having a default
Layer 2 interface, i.e. vlan 1 in access mode.
options:
interface:
description:
- Full name of the interface, i.e. 40GE1/0/22.
required: true
default: null
mode:
description:
- The link type of an interface.
required: false
default: null
choices: ['access','trunk']
access_vlan:
description:
- If C(mode=access), used as the access VLAN ID, in the range from 1 to 4094.
required: false
default: null
native_vlan:
description:
- If C(mode=trunk), used as the trunk native VLAN ID, in the range from 1 to 4094.
required: false
default: null
trunk_vlans:
description:
- If C(mode=trunk), used as the VLAN range to ADD or REMOVE
from the trunk, such as 2-10 or 2,5,10-15, etc.
required: false
default: null
state:
description:
- Manage the state of the resource.
required: false
default: present
choices: ['present', 'absent', 'unconfigured']
'''
EXAMPLES = '''
- name: switchport module test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: Ensure 10GE1/0/22 is in its default switchport state
ce_switchport:
interface: 10GE1/0/22
state: unconfigured
provider: '{{ cli }}'
- name: Ensure 10GE1/0/22 is configured for access vlan 20
ce_switchport:
interface: 10GE1/0/22
mode: access
access_vlan: 20
provider: '{{ cli }}'
- name: Ensure 10GE1/0/22 only has vlans 5-10 as trunk vlans
ce_switchport:
interface: 10GE1/0/22
mode: trunk
native_vlan: 10
trunk_vlans: 5-10
provider: '{{ cli }}'
- name: Ensure 10GE1/0/22 is a trunk port and ensure 2-50 are being tagged (doesn't mean others aren't also being tagged)
ce_switchport:
interface: 10GE1/0/22
mode: trunk
native_vlan: 10
trunk_vlans: 2-50
provider: '{{ cli }}'
- name: Ensure these VLANs are not being tagged on the trunk
ce_switchport:
interface: 10GE1/0/22
mode: trunk
trunk_vlans: 51-4000
state: absent
provider: '{{ cli }}'
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"access_vlan": "20", "interface": "10GE1/0/22", "mode": "access"}
existing:
description: k/v pairs of existing switchport
returned: always
type: dict
sample: {"access_vlan": "10", "interface": "10GE1/0/22",
"mode": "access", "switchport": "enable"}
end_state:
description: k/v pairs of switchport after module execution
returned: always
type: dict
sample: {"access_vlan": "20", "interface": "10GE1/0/22",
"mode": "access", "switchport": "enable"}
updates:
description: command string sent to the device
returned: always
type: list
sample: ["10GE1/0/22", "port default vlan 20"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config, ce_argument_spec
CE_NC_GET_INTF = """
<filter type="subtree">
<ifm xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<interfaces>
<interface>
<ifName>%s</ifName>
<isL2SwitchPort></isL2SwitchPort>
</interface>
</interfaces>
</ifm>
</filter>
"""
CE_NC_GET_PORT_ATTR = """
<filter type="subtree">
<ethernet xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<ethernetIfs>
<ethernetIf>
<ifName>%s</ifName>
<l2Enable></l2Enable>
<l2Attribute>
<linkType></linkType>
<pvid></pvid>
<trunkVlans></trunkVlans>
</l2Attribute>
</ethernetIf>
</ethernetIfs>
</ethernet>
</filter>
"""
CE_NC_SET_ACCESS_PORT = """
<config>
<ethernet xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<ethernetIfs>
<ethernetIf operation="merge">
<ifName>%s</ifName>
<l2Attribute>
<linkType>access</linkType>
<pvid>%s</pvid>
<trunkVlans></trunkVlans>
<untagVlans></untagVlans>
</l2Attribute>
</ethernetIf>
</ethernetIfs>
</ethernet>
</config>
"""
CE_NC_SET_TRUNK_PORT_MODE = """
<ethernet xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<ethernetIfs>
<ethernetIf operation="merge">
<ifName>%s</ifName>
<l2Attribute>
<linkType>trunk</linkType>
</l2Attribute>
</ethernetIf>
</ethernetIfs>
</ethernet>
"""
CE_NC_SET_TRUNK_PORT_PVID = """
<ethernet xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<ethernetIfs>
<ethernetIf operation="merge">
<ifName>%s</ifName>
<l2Attribute>
<linkType>trunk</linkType>
<pvid>%s</pvid>
<untagVlans></untagVlans>
</l2Attribute>
</ethernetIf>
</ethernetIfs>
</ethernet>
"""
CE_NC_SET_TRUNK_PORT_VLANS = """
<ethernet xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<ethernetIfs>
<ethernetIf operation="merge">
<ifName>%s</ifName>
<l2Attribute>
<linkType>trunk</linkType>
<trunkVlans>%s:%s</trunkVlans>
<untagVlans></untagVlans>
</l2Attribute>
</ethernetIf>
</ethernetIfs>
</ethernet>
"""
CE_NC_SET_DEFAULT_PORT = """
<config>
<ethernet xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<ethernetIfs>
<ethernetIf operation="merge">
<ifName>%s</ifName>
<l2Attribute>
<linkType>access</linkType>
<pvid>1</pvid>
<trunkVlans></trunkVlans>
<untagVlans></untagVlans>
</l2Attribute>
</ethernetIf>
</ethernetIfs>
</ethernet>
</config>
"""
SWITCH_PORT_TYPE = ('ge', '10ge', '25ge',
'4x10ge', '40ge', '100ge', 'eth-trunk')
def get_interface_type(interface):
"""Gets the type of interface, such as 10GE, ETH-TRUNK, VLANIF..."""
if interface is None:
return None
iftype = None
if interface.upper().startswith('GE'):
iftype = 'ge'
elif interface.upper().startswith('10GE'):
iftype = '10ge'
elif interface.upper().startswith('25GE'):
iftype = '25ge'
elif interface.upper().startswith('4X10GE'):
iftype = '4x10ge'
elif interface.upper().startswith('40GE'):
iftype = '40ge'
elif interface.upper().startswith('100GE'):
iftype = '100ge'
elif interface.upper().startswith('VLANIF'):
iftype = 'vlanif'
elif interface.upper().startswith('LOOPBACK'):
iftype = 'loopback'
elif interface.upper().startswith('METH'):
iftype = 'meth'
elif interface.upper().startswith('ETH-TRUNK'):
iftype = 'eth-trunk'
elif interface.upper().startswith('VBDIF'):
iftype = 'vbdif'
elif interface.upper().startswith('NVE'):
iftype = 'nve'
elif interface.upper().startswith('TUNNEL'):
iftype = 'tunnel'
elif interface.upper().startswith('ETHERNET'):
iftype = 'ethernet'
elif interface.upper().startswith('FCOE-PORT'):
iftype = 'fcoe-port'
elif interface.upper().startswith('FABRIC-PORT'):
iftype = 'fabric-port'
elif interface.upper().startswith('STACK-PORT'):
iftype = 'stack-port'
elif interface.upper().startswith('NULL'):
iftype = 'null'
else:
return None
return iftype.lower()
def is_portswitch_enalbed(iftype):
""""[undo] portswitch"""
return bool(iftype in SWITCH_PORT_TYPE)
def vlan_bitmap_undo(bitmap):
"""convert vlan bitmap to undo bitmap"""
vlan_bit = ['F'] * 1024
if not bitmap or len(bitmap) == 0:
return ''.join(vlan_bit)
bit_len = len(bitmap)
for num in range(bit_len):
undo = (~int(bitmap[num], 16)) & 0xF
vlan_bit[num] = hex(undo)[2]
return ''.join(vlan_bit)
def is_vlan_bitmap_empty(bitmap):
"""check vlan bitmap empty"""
if not bitmap or len(bitmap) == 0:
return True
bit_len = len(bitmap)
for num in range(bit_len):
if bitmap[num] != '0':
return False
return True
class SwitchPort(object):
"""
Manages Layer 2 switchport interfaces.
"""
def __init__(self, argument_spec):
self.spec = argument_spec
self.module = None
self.init_module()
# interface and vlan info
self.interface = self.module.params['interface']
self.mode = self.module.params['mode']
self.state = self.module.params['state']
self.access_vlan = self.module.params['access_vlan']
self.native_vlan = self.module.params['native_vlan']
self.trunk_vlans = self.module.params['trunk_vlans']
# host info
self.host = self.module.params['host']
self.username = self.module.params['username']
self.port = self.module.params['port']
# state
self.changed = False
self.updates_cmd = list()
self.results = dict()
self.proposed = dict()
self.existing = dict()
self.end_state = dict()
self.intf_info = dict() # interface vlan info
self.intf_type = None # loopback tunnel ...
def init_module(self):
""" init module """
required_if = [('state', 'absent', ['mode']), ('state', 'present', ['mode'])]
self.module = AnsibleModule(
argument_spec=self.spec, required_if=required_if, supports_check_mode=True)
def check_response(self, xml_str, xml_name):
"""Check if response message is already succeed."""
if "<ok/>" not in xml_str:
self.module.fail_json(msg='Error: %s failed.' % xml_name)
def get_interface_dict(self, ifname):
""" get one interface attributes dict."""
intf_info = dict()
conf_str = CE_NC_GET_PORT_ATTR % ifname
rcv_xml = get_nc_config(self.module, conf_str)
if "<data/>" in rcv_xml:
return intf_info
intf = re.findall(
r'.*<ifName>(.*)</ifName>.*\s*<l2Enable>(.*)</l2Enable>.*', rcv_xml)
if intf:
intf_info = dict(ifName=intf[0][0],
l2Enable=intf[0][1],
linkType="",
pvid="",
trunkVlans="")
if intf_info["l2Enable"] == "enable":
attr = re.findall(
r'.*<linkType>(.*)</linkType>.*.*\s*<pvid>(.*)'
r'</pvid>.*\s*<trunkVlans>(.*)</trunkVlans>.*', rcv_xml)
if attr:
intf_info["linkType"] = attr[0][0]
intf_info["pvid"] = attr[0][1]
intf_info["trunkVlans"] = attr[0][2]
return intf_info
def is_l2switchport(self):
"""Check layer2 switch port"""
return bool(self.intf_info["l2Enable"] == "enable")
def merge_access_vlan(self, ifname, access_vlan):
"""Merge access interface vlan"""
change = False
conf_str = ""
self.updates_cmd.append("interface %s" % ifname)
if self.state == "present":
if self.intf_info["linkType"] == "access":
if access_vlan and self.intf_info["pvid"] != access_vlan:
self.updates_cmd.append(
"port default vlan %s" % access_vlan)
conf_str = CE_NC_SET_ACCESS_PORT % (ifname, access_vlan)
change = True
else: # not access
self.updates_cmd.append("port link-type access")
if access_vlan:
self.updates_cmd.append(
"port default vlan %s" % access_vlan)
conf_str = CE_NC_SET_ACCESS_PORT % (ifname, access_vlan)
else:
conf_str = CE_NC_SET_ACCESS_PORT % (ifname, "1")
change = True
elif self.state == "absent":
if self.intf_info["linkType"] == "access":
if access_vlan and self.intf_info["pvid"] == access_vlan and access_vlan != "1":
self.updates_cmd.append(
"undo port default vlan %s" % access_vlan)
conf_str = CE_NC_SET_ACCESS_PORT % (ifname, "1")
change = True
else: # not access
self.updates_cmd.append("port link-type access")
conf_str = CE_NC_SET_ACCESS_PORT % (ifname, "1")
change = True
if not change:
self.updates_cmd.pop() # remove interface
return
rcv_xml = set_nc_config(self.module, conf_str)
self.check_response(rcv_xml, "MERGE_ACCESS_PORT")
self.changed = True
def merge_trunk_vlan(self, ifname, native_vlan, trunk_vlans):
"""Merge trunk interface vlan"""
change = False
xmlstr = ""
self.updates_cmd.append("interface %s" % ifname)
if trunk_vlans:
vlan_list = self.vlan_range_to_list(trunk_vlans)
vlan_map = self.vlan_list_to_bitmap(vlan_list)
if self.state == "present":
if self.intf_info["linkType"] == "trunk":
if native_vlan and self.intf_info["pvid"] != native_vlan:
self.updates_cmd.append(
"port trunk pvid vlan %s" % native_vlan)
xmlstr += CE_NC_SET_TRUNK_PORT_PVID % (ifname, native_vlan)
change = True
if trunk_vlans:
add_vlans = self.vlan_bitmap_add(
self.intf_info["trunkVlans"], vlan_map)
if not is_vlan_bitmap_empty(add_vlans):
self.updates_cmd.append(
"port trunk allow-pass %s"
% trunk_vlans.replace(',', ' ').replace('-', ' to '))
xmlstr += CE_NC_SET_TRUNK_PORT_VLANS % (
ifname, add_vlans, add_vlans)
change = True
else: # not trunk
self.updates_cmd.append("port link-type trunk")
change = True
if native_vlan:
self.updates_cmd.append(
"port trunk pvid vlan %s" % native_vlan)
xmlstr += CE_NC_SET_TRUNK_PORT_PVID % (ifname, native_vlan)
if trunk_vlans:
self.updates_cmd.append(
"port trunk allow-pass %s"
% trunk_vlans.replace(',', ' ').replace('-', ' to '))
xmlstr += CE_NC_SET_TRUNK_PORT_VLANS % (
ifname, vlan_map, vlan_map)
if not native_vlan and not trunk_vlans:
xmlstr += CE_NC_SET_TRUNK_PORT_MODE % ifname
self.updates_cmd.append(
"undo port trunk allow-pass vlan 1")
elif self.state == "absent":
if self.intf_info["linkType"] == "trunk":
if native_vlan and self.intf_info["pvid"] == native_vlan and native_vlan != '1':
self.updates_cmd.append(
"undo port trunk pvid vlan %s" % native_vlan)
xmlstr += CE_NC_SET_TRUNK_PORT_PVID % (ifname, 1)
change = True
if trunk_vlans:
del_vlans = self.vlan_bitmap_del(
self.intf_info["trunkVlans"], vlan_map)
if not is_vlan_bitmap_empty(del_vlans):
self.updates_cmd.append(
"undo port trunk allow-pass %s"
% trunk_vlans.replace(',', ' ').replace('-', ' to '))
undo_map = vlan_bitmap_undo(del_vlans)
xmlstr += CE_NC_SET_TRUNK_PORT_VLANS % (
ifname, undo_map, del_vlans)
change = True
else: # not trunk
self.updates_cmd.append("port link-type trunk")
self.updates_cmd.append("undo port trunk allow-pass vlan 1")
xmlstr += CE_NC_SET_TRUNK_PORT_MODE % ifname
change = True
if not change:
self.updates_cmd.pop()
return
conf_str = "<config>" + xmlstr + "</config>"
rcv_xml = set_nc_config(self.module, conf_str)
self.check_response(rcv_xml, "MERGE_TRUNK_PORT")
self.changed = True
def default_switchport(self, ifname):
"""Set interface default or unconfigured"""
change = False
if self.intf_info["linkType"] != "access":
self.updates_cmd.append("interface %s" % ifname)
self.updates_cmd.append("port link-type access")
self.updates_cmd.append("port default vlan 1")
change = True
else:
if self.intf_info["pvid"] != "1":
self.updates_cmd.append("interface %s" % ifname)
self.updates_cmd.append("port default vlan 1")
change = True
if not change:
return
conf_str = CE_NC_SET_DEFAULT_PORT % ifname
rcv_xml = set_nc_config(self.module, conf_str)
self.check_response(rcv_xml, "DEFAULT_INTF_VLAN")
self.changed = True
def vlan_series(self, vlanid_s):
""" convert vlan range to vlan list """
vlan_list = []
peerlistlen = len(vlanid_s)
if peerlistlen != 2:
self.module.fail_json(msg='Error: Format of vlanid is invalid.')
for num in range(peerlistlen):
if not vlanid_s[num].isdigit():
self.module.fail_json(
msg='Error: Format of vlanid is invalid.')
if int(vlanid_s[0]) > int(vlanid_s[1]):
self.module.fail_json(msg='Error: Format of vlanid is invalid.')
elif int(vlanid_s[0]) == int(vlanid_s[1]):
vlan_list.append(str(vlanid_s[0]))
return vlan_list
for num in range(int(vlanid_s[0]), int(vlanid_s[1])):
vlan_list.append(str(num))
vlan_list.append(vlanid_s[1])
return vlan_list
def vlan_region(self, vlanid_list):
""" convert vlan range to vlan list """
vlan_list = []
peerlistlen = len(vlanid_list)
for num in range(peerlistlen):
if vlanid_list[num].isdigit():
vlan_list.append(vlanid_list[num])
else:
vlan_s = self.vlan_series(vlanid_list[num].split('-'))
vlan_list.extend(vlan_s)
return vlan_list
def vlan_range_to_list(self, vlan_range):
""" convert vlan range to vlan list """
vlan_list = self.vlan_region(vlan_range.split(','))
return vlan_list
def vlan_list_to_bitmap(self, vlanlist):
""" convert vlan list to vlan bitmap """
vlan_bit = ['0'] * 1024
bit_int = [0] * 1024
vlan_list_len = len(vlanlist)
for num in range(vlan_list_len):
tagged_vlans = int(vlanlist[num])
if tagged_vlans <= 0 or tagged_vlans > 4094:
self.module.fail_json(
msg='Error: Vlan id is not in the range from 1 to 4094.')
j = tagged_vlans / 4
bit_int[j] |= 0x8 >> (tagged_vlans % 4)
vlan_bit[j] = hex(bit_int[j])[2]
vlan_xml = ''.join(vlan_bit)
return vlan_xml
def vlan_bitmap_add(self, oldmap, newmap):
"""vlan add bitmap"""
vlan_bit = ['0'] * 1024
if len(newmap) != 1024:
self.module.fail_json(msg='Error: New vlan bitmap is invalid.')
if len(oldmap) != 1024 and len(oldmap) != 0:
self.module.fail_json(msg='Error: old vlan bitmap is invalid.')
if len(oldmap) == 0:
return newmap
for num in range(1024):
new_tmp = int(newmap[num], 16)
old_tmp = int(oldmap[num], 16)
add = (~(new_tmp & old_tmp)) & new_tmp
vlan_bit[num] = hex(add)[2]
vlan_xml = ''.join(vlan_bit)
return vlan_xml
def vlan_bitmap_del(self, oldmap, delmap):
"""vlan del bitmap"""
vlan_bit = ['0'] * 1024
if not oldmap or len(oldmap) == 0:
return ''.join(vlan_bit)
if len(oldmap) != 1024 or len(delmap) != 1024:
self.module.fail_json(msg='Error: vlan bitmap is invalid.')
for num in range(1024):
tmp = int(delmap[num], 16) & int(oldmap[num], 16)
vlan_bit[num] = hex(tmp)[2]
vlan_xml = ''.join(vlan_bit)
return vlan_xml
def check_params(self):
"""Check all input params"""
# interface type check
if self.interface:
self.intf_type = get_interface_type(self.interface)
if not self.intf_type:
self.module.fail_json(
msg='Error: Interface name of %s is error.' % self.interface)
if not self.intf_type or not is_portswitch_enalbed(self.intf_type):
self.module.fail_json(msg='Error: Interface %s is error.')
# check access_vlan
if self.access_vlan:
if not self.access_vlan.isdigit():
self.module.fail_json(msg='Error: Access vlan id is invalid.')
if int(self.access_vlan) <= 0 or int(self.access_vlan) > 4094:
self.module.fail_json(
msg='Error: Access vlan id is not in the range from 1 to 4094.')
# check native_vlan
if self.native_vlan:
if not self.native_vlan.isdigit():
self.module.fail_json(msg='Error: Native vlan id is invalid.')
if int(self.native_vlan) <= 0 or int(self.native_vlan) > 4094:
self.module.fail_json(
msg='Error: Native vlan id is not in the range from 1 to 4094.')
# get interface info
self.intf_info = self.get_interface_dict(self.interface)
if not self.intf_info:
self.module.fail_json(msg='Error: Interface does not exists.')
if not self.is_l2switchport():
self.module.fail_json(
msg='Error: Interface is not layer2 swtich port.')
def get_proposed(self):
"""get proposed info"""
self.proposed['state'] = self.state
self.proposed['interface'] = self.interface
self.proposed['mode'] = self.mode
self.proposed['access_vlan'] = self.access_vlan
self.proposed['native_vlan'] = self.native_vlan
self.proposed['trunk_vlans'] = self.trunk_vlans
def get_existing(self):
"""get existing info"""
if self.intf_info:
self.existing["interface"] = self.intf_info["ifName"]
self.existing["mode"] = self.intf_info["linkType"]
self.existing["switchport"] = self.intf_info["l2Enable"]
self.existing['access_vlan'] = self.intf_info["pvid"]
self.existing['native_vlan'] = self.intf_info["pvid"]
self.existing['trunk_vlans'] = self.intf_info["trunkVlans"]
def get_end_state(self):
"""get end state info"""
if self.intf_info:
end_info = self.get_interface_dict(self.interface)
if end_info:
self.end_state["interface"] = end_info["ifName"]
self.end_state["mode"] = end_info["linkType"]
self.end_state["switchport"] = end_info["l2Enable"]
self.end_state['access_vlan'] = end_info["pvid"]
self.end_state['native_vlan'] = end_info["pvid"]
self.end_state['trunk_vlans'] = end_info["trunkVlans"]
def work(self):
"""worker"""
self.check_params()
if not self.intf_info:
self.module.fail_json(msg='Error: interface does not exists.')
self.get_existing()
self.get_proposed()
# present or absent
if self.state == "present" or self.state == "absent":
if self.mode == "access":
self.merge_access_vlan(self.interface, self.access_vlan)
elif self.mode == "trunk":
self.merge_trunk_vlan(
self.interface, self.native_vlan, self.trunk_vlans)
# unconfigured
else:
self.default_switchport(self.interface)
self.get_end_state()
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
if self.changed:
self.results['updates'] = self.updates_cmd
else:
self.results['updates'] = list()
self.module.exit_json(**self.results)
def main():
"""Module main"""
argument_spec = dict(
interface=dict(required=True, type='str'),
mode=dict(choices=['access', 'trunk'], required=False),
access_vlan=dict(type='str', required=False),
native_vlan=dict(type='str', required=False),
trunk_vlans=dict(type='str', required=False),
state=dict(choices=['absent', 'present', 'unconfigured'],
default='present')
)
argument_spec.update(ce_argument_spec)
switchport = SwitchPort(argument_spec)
switchport.work()
if __name__ == '__main__':
main()
| gpl-3.0 | 7,935,151,372,722,523,000 | 32.947239 | 123 | 0.553728 | false |
batxes/4Cin | SHH_WT_models/SHH_WT_models_final_output_0.1_-0.1_11000/SHH_WT_models47488.py | 2 | 17589 | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "particle_0 geometry" not in marker_sets:
s=new_marker_set('particle_0 geometry')
marker_sets["particle_0 geometry"]=s
s= marker_sets["particle_0 geometry"]
mark=s.place_marker((-1417.69, 7999.35, 7862.76), (0.7, 0.7, 0.7), 890.203)
if "particle_1 geometry" not in marker_sets:
s=new_marker_set('particle_1 geometry')
marker_sets["particle_1 geometry"]=s
s= marker_sets["particle_1 geometry"]
mark=s.place_marker((-255.789, 8001.16, 6498.04), (0.7, 0.7, 0.7), 792.956)
if "particle_2 geometry" not in marker_sets:
s=new_marker_set('particle_2 geometry')
marker_sets["particle_2 geometry"]=s
s= marker_sets["particle_2 geometry"]
mark=s.place_marker((1043.94, 6568.7, 6558.27), (0.7, 0.7, 0.7), 856.786)
if "particle_3 geometry" not in marker_sets:
s=new_marker_set('particle_3 geometry')
marker_sets["particle_3 geometry"]=s
s= marker_sets["particle_3 geometry"]
mark=s.place_marker((-1105.23, 6466.69, 7375.34), (0.7, 0.7, 0.7), 963.679)
if "particle_4 geometry" not in marker_sets:
s=new_marker_set('particle_4 geometry')
marker_sets["particle_4 geometry"]=s
s= marker_sets["particle_4 geometry"]
mark=s.place_marker((-941.162, 5006.39, 7850.1), (0.7, 0.7, 0.7), 761.442)
if "particle_5 geometry" not in marker_sets:
s=new_marker_set('particle_5 geometry')
marker_sets["particle_5 geometry"]=s
s= marker_sets["particle_5 geometry"]
mark=s.place_marker((1250.29, 4004.55, 7630.89), (0.7, 0.7, 0.7), 961.183)
if "particle_6 geometry" not in marker_sets:
s=new_marker_set('particle_6 geometry')
marker_sets["particle_6 geometry"]=s
s= marker_sets["particle_6 geometry"]
mark=s.place_marker((2884.02, 3624.42, 8062.42), (0.7, 0.7, 0.7), 753.151)
if "particle_7 geometry" not in marker_sets:
s=new_marker_set('particle_7 geometry')
marker_sets["particle_7 geometry"]=s
s= marker_sets["particle_7 geometry"]
mark=s.place_marker((2110.46, 3500.1, 8396.99), (1, 0.7, 0), 1098.07)
if "particle_8 geometry" not in marker_sets:
s=new_marker_set('particle_8 geometry')
marker_sets["particle_8 geometry"]=s
s= marker_sets["particle_8 geometry"]
mark=s.place_marker((4715.72, 3921.63, 8075.45), (0.7, 0.7, 0.7), 1010.42)
if "particle_9 geometry" not in marker_sets:
s=new_marker_set('particle_9 geometry')
marker_sets["particle_9 geometry"]=s
s= marker_sets["particle_9 geometry"]
mark=s.place_marker((5603.13, 4104.1, 9479.44), (1, 0.7, 0), 821.043)
if "particle_10 geometry" not in marker_sets:
s=new_marker_set('particle_10 geometry')
marker_sets["particle_10 geometry"]=s
s= marker_sets["particle_10 geometry"]
mark=s.place_marker((7067.6, 4794.48, 8494.14), (0.7, 0.7, 0.7), 873.876)
if "particle_11 geometry" not in marker_sets:
s=new_marker_set('particle_11 geometry')
marker_sets["particle_11 geometry"]=s
s= marker_sets["particle_11 geometry"]
mark=s.place_marker((6688.59, 5676.66, 7930.28), (0.7, 0.7, 0.7), 625.532)
if "particle_12 geometry" not in marker_sets:
s=new_marker_set('particle_12 geometry')
marker_sets["particle_12 geometry"]=s
s= marker_sets["particle_12 geometry"]
mark=s.place_marker((6795.76, 7228.32, 7499.21), (0.7, 0.7, 0.7), 880.474)
if "particle_13 geometry" not in marker_sets:
s=new_marker_set('particle_13 geometry')
marker_sets["particle_13 geometry"]=s
s= marker_sets["particle_13 geometry"]
mark=s.place_marker((5634.72, 7607.97, 8385.17), (0.7, 0.7, 0.7), 659.161)
if "particle_14 geometry" not in marker_sets:
s=new_marker_set('particle_14 geometry')
marker_sets["particle_14 geometry"]=s
s= marker_sets["particle_14 geometry"]
mark=s.place_marker((5975.56, 9776.47, 8813.62), (0.7, 0.7, 0.7), 831.745)
if "particle_15 geometry" not in marker_sets:
s=new_marker_set('particle_15 geometry')
marker_sets["particle_15 geometry"]=s
s= marker_sets["particle_15 geometry"]
mark=s.place_marker((5649.95, 12276.5, 6970.73), (0.7, 0.7, 0.7), 803.065)
if "particle_16 geometry" not in marker_sets:
s=new_marker_set('particle_16 geometry')
marker_sets["particle_16 geometry"]=s
s= marker_sets["particle_16 geometry"]
mark=s.place_marker((5172.84, 11330.3, 5316.72), (0.7, 0.7, 0.7), 610.262)
if "particle_17 geometry" not in marker_sets:
s=new_marker_set('particle_17 geometry')
marker_sets["particle_17 geometry"]=s
s= marker_sets["particle_17 geometry"]
mark=s.place_marker((6551.64, 10831.2, 5332.18), (0.7, 0.7, 0.7), 741.265)
if "particle_18 geometry" not in marker_sets:
s=new_marker_set('particle_18 geometry')
marker_sets["particle_18 geometry"]=s
s= marker_sets["particle_18 geometry"]
mark=s.place_marker((7004.08, 9405.61, 6160.73), (0.7, 0.7, 0.7), 748.625)
if "particle_19 geometry" not in marker_sets:
s=new_marker_set('particle_19 geometry')
marker_sets["particle_19 geometry"]=s
s= marker_sets["particle_19 geometry"]
mark=s.place_marker((8213.17, 8881.32, 6942.09), (0.7, 0.7, 0.7), 677.181)
if "particle_20 geometry" not in marker_sets:
s=new_marker_set('particle_20 geometry')
marker_sets["particle_20 geometry"]=s
s= marker_sets["particle_20 geometry"]
mark=s.place_marker((7202.72, 6696.88, 6542.3), (0.7, 0.7, 0.7), 616.015)
if "particle_21 geometry" not in marker_sets:
s=new_marker_set('particle_21 geometry')
marker_sets["particle_21 geometry"]=s
s= marker_sets["particle_21 geometry"]
mark=s.place_marker((7962.53, 8540.03, 6089.45), (0.7, 0.7, 0.7), 653.154)
if "particle_22 geometry" not in marker_sets:
s=new_marker_set('particle_22 geometry')
marker_sets["particle_22 geometry"]=s
s= marker_sets["particle_22 geometry"]
mark=s.place_marker((8096.11, 8006.02, 4986.79), (0.7, 0.7, 0.7), 595.33)
if "particle_23 geometry" not in marker_sets:
s=new_marker_set('particle_23 geometry')
marker_sets["particle_23 geometry"]=s
s= marker_sets["particle_23 geometry"]
mark=s.place_marker((8011.78, 9155.23, 4371.13), (0.7, 0.7, 0.7), 627.901)
if "particle_24 geometry" not in marker_sets:
s=new_marker_set('particle_24 geometry')
marker_sets["particle_24 geometry"]=s
s= marker_sets["particle_24 geometry"]
mark=s.place_marker((6949.51, 10034.4, 4743.02), (0.7, 0.7, 0.7), 663.941)
if "particle_25 geometry" not in marker_sets:
s=new_marker_set('particle_25 geometry')
marker_sets["particle_25 geometry"]=s
s= marker_sets["particle_25 geometry"]
mark=s.place_marker((6646.02, 11608.3, 4814.11), (0.7, 0.7, 0.7), 663.899)
if "particle_26 geometry" not in marker_sets:
s=new_marker_set('particle_26 geometry')
marker_sets["particle_26 geometry"]=s
s= marker_sets["particle_26 geometry"]
mark=s.place_marker((7058.86, 10192.8, 5233.91), (0.7, 0.7, 0.7), 644.694)
if "particle_27 geometry" not in marker_sets:
s=new_marker_set('particle_27 geometry')
marker_sets["particle_27 geometry"]=s
s= marker_sets["particle_27 geometry"]
mark=s.place_marker((6239.48, 8380.45, 6155.62), (0.7, 0.7, 0.7), 896.802)
if "particle_28 geometry" not in marker_sets:
s=new_marker_set('particle_28 geometry')
marker_sets["particle_28 geometry"]=s
s= marker_sets["particle_28 geometry"]
mark=s.place_marker((7425.37, 7808.2, 5585.9), (0.7, 0.7, 0.7), 576.38)
if "particle_29 geometry" not in marker_sets:
s=new_marker_set('particle_29 geometry')
marker_sets["particle_29 geometry"]=s
s= marker_sets["particle_29 geometry"]
mark=s.place_marker((7734.98, 6616.85, 5947.09), (0.7, 0.7, 0.7), 635.092)
if "particle_30 geometry" not in marker_sets:
s=new_marker_set('particle_30 geometry')
marker_sets["particle_30 geometry"]=s
s= marker_sets["particle_30 geometry"]
mark=s.place_marker((7707.83, 6311.34, 5104.46), (0.7, 0.7, 0.7), 651.505)
if "particle_31 geometry" not in marker_sets:
s=new_marker_set('particle_31 geometry')
marker_sets["particle_31 geometry"]=s
s= marker_sets["particle_31 geometry"]
mark=s.place_marker((7397.39, 5899.65, 6742.1), (0.7, 0.7, 0.7), 718.042)
if "particle_32 geometry" not in marker_sets:
s=new_marker_set('particle_32 geometry')
marker_sets["particle_32 geometry"]=s
s= marker_sets["particle_32 geometry"]
mark=s.place_marker((8786.72, 6445.96, 5776.58), (0.7, 0.7, 0.7), 726.714)
if "particle_33 geometry" not in marker_sets:
s=new_marker_set('particle_33 geometry')
marker_sets["particle_33 geometry"]=s
s= marker_sets["particle_33 geometry"]
mark=s.place_marker((8002.09, 7254.54, 4818.09), (0.7, 0.7, 0.7), 673.585)
if "particle_34 geometry" not in marker_sets:
s=new_marker_set('particle_34 geometry')
marker_sets["particle_34 geometry"]=s
s= marker_sets["particle_34 geometry"]
mark=s.place_marker((8091.62, 8572.28, 5129.98), (0.7, 0.7, 0.7), 598.418)
if "particle_35 geometry" not in marker_sets:
s=new_marker_set('particle_35 geometry')
marker_sets["particle_35 geometry"]=s
s= marker_sets["particle_35 geometry"]
mark=s.place_marker((8481.41, 9896.28, 5385.73), (0.7, 0.7, 0.7), 693.382)
if "particle_36 geometry" not in marker_sets:
s=new_marker_set('particle_36 geometry')
marker_sets["particle_36 geometry"]=s
s= marker_sets["particle_36 geometry"]
mark=s.place_marker((7830, 7510.32, 5729.58), (0.7, 0.7, 0.7), 804.038)
if "particle_37 geometry" not in marker_sets:
s=new_marker_set('particle_37 geometry')
marker_sets["particle_37 geometry"]=s
s= marker_sets["particle_37 geometry"]
mark=s.place_marker((8838.78, 8494.92, 4532.17), (0.7, 0.7, 0.7), 816.178)
if "particle_38 geometry" not in marker_sets:
s=new_marker_set('particle_38 geometry')
marker_sets["particle_38 geometry"]=s
s= marker_sets["particle_38 geometry"]
mark=s.place_marker((7846.44, 8018.73, 4088.37), (0.7, 0.7, 0.7), 776.628)
if "particle_39 geometry" not in marker_sets:
s=new_marker_set('particle_39 geometry')
marker_sets["particle_39 geometry"]=s
s= marker_sets["particle_39 geometry"]
mark=s.place_marker((9360.31, 7795.61, 4522.41), (0.7, 0.7, 0.7), 750.656)
if "particle_40 geometry" not in marker_sets:
s=new_marker_set('particle_40 geometry')
marker_sets["particle_40 geometry"]=s
s= marker_sets["particle_40 geometry"]
mark=s.place_marker((8797.64, 6169.54, 4562.66), (0.7, 0.7, 0.7), 709.625)
if "particle_41 geometry" not in marker_sets:
s=new_marker_set('particle_41 geometry')
marker_sets["particle_41 geometry"]=s
s= marker_sets["particle_41 geometry"]
mark=s.place_marker((9629.03, 4740.16, 5396.91), (0.7, 0.7, 0.7), 927.681)
if "particle_42 geometry" not in marker_sets:
s=new_marker_set('particle_42 geometry')
marker_sets["particle_42 geometry"]=s
s= marker_sets["particle_42 geometry"]
mark=s.place_marker((11563.6, 4238.41, 3551.8), (0.7, 0.7, 0.7), 1088.21)
if "particle_43 geometry" not in marker_sets:
s=new_marker_set('particle_43 geometry')
marker_sets["particle_43 geometry"]=s
s= marker_sets["particle_43 geometry"]
mark=s.place_marker((10427, 3109.14, 4639.27), (0.7, 0.7, 0.7), 736.147)
if "particle_44 geometry" not in marker_sets:
s=new_marker_set('particle_44 geometry')
marker_sets["particle_44 geometry"]=s
s= marker_sets["particle_44 geometry"]
mark=s.place_marker((9909.56, 4653.57, 4218.64), (0.7, 0.7, 0.7), 861.101)
if "particle_45 geometry" not in marker_sets:
s=new_marker_set('particle_45 geometry')
marker_sets["particle_45 geometry"]=s
s= marker_sets["particle_45 geometry"]
mark=s.place_marker((8122.37, 4792.28, 4962.55), (0.7, 0.7, 0.7), 924.213)
if "particle_46 geometry" not in marker_sets:
s=new_marker_set('particle_46 geometry')
marker_sets["particle_46 geometry"]=s
s= marker_sets["particle_46 geometry"]
mark=s.place_marker((7840.32, 4749.12, 3040.13), (0.7, 0.7, 0.7), 881.828)
if "particle_47 geometry" not in marker_sets:
s=new_marker_set('particle_47 geometry')
marker_sets["particle_47 geometry"]=s
s= marker_sets["particle_47 geometry"]
mark=s.place_marker((9854.48, 4998.02, 2711.31), (0.7, 0.7, 0.7), 927.681)
if "particle_48 geometry" not in marker_sets:
s=new_marker_set('particle_48 geometry')
marker_sets["particle_48 geometry"]=s
s= marker_sets["particle_48 geometry"]
mark=s.place_marker((8132.93, 4317.58, 2877.28), (0.7, 0.7, 0.7), 831.576)
if "particle_49 geometry" not in marker_sets:
s=new_marker_set('particle_49 geometry')
marker_sets["particle_49 geometry"]=s
s= marker_sets["particle_49 geometry"]
mark=s.place_marker((6638.64, 3828.94, 3974.42), (0.7, 0.7, 0.7), 859.494)
if "particle_50 geometry" not in marker_sets:
s=new_marker_set('particle_50 geometry')
marker_sets["particle_50 geometry"]=s
s= marker_sets["particle_50 geometry"]
mark=s.place_marker((7608.62, 2749.54, 3802.01), (0.7, 0.7, 0.7), 704.845)
if "particle_51 geometry" not in marker_sets:
s=new_marker_set('particle_51 geometry')
marker_sets["particle_51 geometry"]=s
s= marker_sets["particle_51 geometry"]
mark=s.place_marker((6754.89, 3424.53, 5098.69), (0.7, 0.7, 0.7), 804.461)
if "particle_52 geometry" not in marker_sets:
s=new_marker_set('particle_52 geometry')
marker_sets["particle_52 geometry"]=s
s= marker_sets["particle_52 geometry"]
mark=s.place_marker((5639.85, 4421.52, 6128.29), (0.7, 0.7, 0.7), 934.111)
if "particle_53 geometry" not in marker_sets:
s=new_marker_set('particle_53 geometry')
marker_sets["particle_53 geometry"]=s
s= marker_sets["particle_53 geometry"]
mark=s.place_marker((5360.29, 3177.05, 7008.75), (0.7, 0.7, 0.7), 988.339)
if "particle_54 geometry" not in marker_sets:
s=new_marker_set('particle_54 geometry')
marker_sets["particle_54 geometry"]=s
s= marker_sets["particle_54 geometry"]
mark=s.place_marker((5545.02, 2580.77, 6606.94), (1, 0.7, 0), 803.7)
if "particle_55 geometry" not in marker_sets:
s=new_marker_set('particle_55 geometry')
marker_sets["particle_55 geometry"]=s
s= marker_sets["particle_55 geometry"]
mark=s.place_marker((5877.52, 3639.15, 4788.19), (0.7, 0.7, 0.7), 812.118)
if "particle_56 geometry" not in marker_sets:
s=new_marker_set('particle_56 geometry')
marker_sets["particle_56 geometry"]=s
s= marker_sets["particle_56 geometry"]
mark=s.place_marker((3879.74, 3905.18, 3928.14), (0.7, 0.7, 0.7), 1177.93)
if "particle_57 geometry" not in marker_sets:
s=new_marker_set('particle_57 geometry')
marker_sets["particle_57 geometry"]=s
s= marker_sets["particle_57 geometry"]
mark=s.place_marker((2837.81, 4577.98, 1724.25), (0.7, 0.7, 0.7), 1038.21)
if "particle_58 geometry" not in marker_sets:
s=new_marker_set('particle_58 geometry')
marker_sets["particle_58 geometry"]=s
s= marker_sets["particle_58 geometry"]
mark=s.place_marker((2662.59, 4759.64, 1181.39), (1, 0.7, 0), 758.016)
if "particle_59 geometry" not in marker_sets:
s=new_marker_set('particle_59 geometry')
marker_sets["particle_59 geometry"]=s
s= marker_sets["particle_59 geometry"]
mark=s.place_marker((1989.69, 4429.33, 1533.68), (0.7, 0.7, 0.7), 824.046)
if "particle_60 geometry" not in marker_sets:
s=new_marker_set('particle_60 geometry')
marker_sets["particle_60 geometry"]=s
s= marker_sets["particle_60 geometry"]
mark=s.place_marker((2989.63, 4262.46, 1724.71), (0.7, 0.7, 0.7), 793.379)
if "particle_61 geometry" not in marker_sets:
s=new_marker_set('particle_61 geometry')
marker_sets["particle_61 geometry"]=s
s= marker_sets["particle_61 geometry"]
mark=s.place_marker((2622.24, 3482.56, 1346.33), (0.7, 0.7, 0.7), 1011.56)
if "particle_62 geometry" not in marker_sets:
s=new_marker_set('particle_62 geometry')
marker_sets["particle_62 geometry"]=s
s= marker_sets["particle_62 geometry"]
mark=s.place_marker((3423.06, 4206.64, 2896.65), (0.7, 0.7, 0.7), 1097.01)
if "particle_63 geometry" not in marker_sets:
s=new_marker_set('particle_63 geometry')
marker_sets["particle_63 geometry"]=s
s= marker_sets["particle_63 geometry"]
mark=s.place_marker((2220.22, 3217.21, 1844.37), (0.7, 0.7, 0.7), 851.626)
if "particle_64 geometry" not in marker_sets:
s=new_marker_set('particle_64 geometry')
marker_sets["particle_64 geometry"]=s
s= marker_sets["particle_64 geometry"]
mark=s.place_marker((1013.07, 2718.64, 254.321), (0.7, 0.7, 0.7), 869.434)
if "particle_65 geometry" not in marker_sets:
s=new_marker_set('particle_65 geometry')
marker_sets["particle_65 geometry"]=s
s= marker_sets["particle_65 geometry"]
mark=s.place_marker((2635.84, 1944.75, 412.557), (0.7, 0.7, 0.7), 818.463)
if "particle_66 geometry" not in marker_sets:
s=new_marker_set('particle_66 geometry')
marker_sets["particle_66 geometry"]=s
s= marker_sets["particle_66 geometry"]
mark=s.place_marker((2741.68, 2678.39, -1078.41), (0.7, 0.7, 0.7), 759.539)
if "particle_67 geometry" not in marker_sets:
s=new_marker_set('particle_67 geometry')
marker_sets["particle_67 geometry"]=s
s= marker_sets["particle_67 geometry"]
mark=s.place_marker((3156.76, 3007.48, 1318.91), (0.7, 0.7, 0.7), 1088.59)
if "particle_68 geometry" not in marker_sets:
s=new_marker_set('particle_68 geometry')
marker_sets["particle_68 geometry"]=s
s= marker_sets["particle_68 geometry"]
mark=s.place_marker((1418.87, 3169.47, 203.221), (0.7, 0.7, 0.7), 822.312)
if "particle_69 geometry" not in marker_sets:
s=new_marker_set('particle_69 geometry')
marker_sets["particle_69 geometry"]=s
s= marker_sets["particle_69 geometry"]
mark=s.place_marker((2004.23, 2953.28, -1245.3), (0.7, 0.7, 0.7), 749.81)
if "particle_70 geometry" not in marker_sets:
s=new_marker_set('particle_70 geometry')
marker_sets["particle_70 geometry"]=s
s= marker_sets["particle_70 geometry"]
mark=s.place_marker((3098.33, 3965.17, -962.568), (0.7, 0.7, 0.7), 764.488)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| gpl-3.0 | 1,244,318,513,115,747,800 | 46.155496 | 75 | 0.699756 | false |
craigderington/studentloan5 | studentloan5/Lib/encodings/cp874.py | 272 | 12595 | """ Python Character Mapping Codec cp874 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP874.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp874',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\u20ac' # 0x80 -> EURO SIGN
'\ufffe' # 0x81 -> UNDEFINED
'\ufffe' # 0x82 -> UNDEFINED
'\ufffe' # 0x83 -> UNDEFINED
'\ufffe' # 0x84 -> UNDEFINED
'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
'\ufffe' # 0x86 -> UNDEFINED
'\ufffe' # 0x87 -> UNDEFINED
'\ufffe' # 0x88 -> UNDEFINED
'\ufffe' # 0x89 -> UNDEFINED
'\ufffe' # 0x8A -> UNDEFINED
'\ufffe' # 0x8B -> UNDEFINED
'\ufffe' # 0x8C -> UNDEFINED
'\ufffe' # 0x8D -> UNDEFINED
'\ufffe' # 0x8E -> UNDEFINED
'\ufffe' # 0x8F -> UNDEFINED
'\ufffe' # 0x90 -> UNDEFINED
'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
'\u2022' # 0x95 -> BULLET
'\u2013' # 0x96 -> EN DASH
'\u2014' # 0x97 -> EM DASH
'\ufffe' # 0x98 -> UNDEFINED
'\ufffe' # 0x99 -> UNDEFINED
'\ufffe' # 0x9A -> UNDEFINED
'\ufffe' # 0x9B -> UNDEFINED
'\ufffe' # 0x9C -> UNDEFINED
'\ufffe' # 0x9D -> UNDEFINED
'\ufffe' # 0x9E -> UNDEFINED
'\ufffe' # 0x9F -> UNDEFINED
'\xa0' # 0xA0 -> NO-BREAK SPACE
'\u0e01' # 0xA1 -> THAI CHARACTER KO KAI
'\u0e02' # 0xA2 -> THAI CHARACTER KHO KHAI
'\u0e03' # 0xA3 -> THAI CHARACTER KHO KHUAT
'\u0e04' # 0xA4 -> THAI CHARACTER KHO KHWAI
'\u0e05' # 0xA5 -> THAI CHARACTER KHO KHON
'\u0e06' # 0xA6 -> THAI CHARACTER KHO RAKHANG
'\u0e07' # 0xA7 -> THAI CHARACTER NGO NGU
'\u0e08' # 0xA8 -> THAI CHARACTER CHO CHAN
'\u0e09' # 0xA9 -> THAI CHARACTER CHO CHING
'\u0e0a' # 0xAA -> THAI CHARACTER CHO CHANG
'\u0e0b' # 0xAB -> THAI CHARACTER SO SO
'\u0e0c' # 0xAC -> THAI CHARACTER CHO CHOE
'\u0e0d' # 0xAD -> THAI CHARACTER YO YING
'\u0e0e' # 0xAE -> THAI CHARACTER DO CHADA
'\u0e0f' # 0xAF -> THAI CHARACTER TO PATAK
'\u0e10' # 0xB0 -> THAI CHARACTER THO THAN
'\u0e11' # 0xB1 -> THAI CHARACTER THO NANGMONTHO
'\u0e12' # 0xB2 -> THAI CHARACTER THO PHUTHAO
'\u0e13' # 0xB3 -> THAI CHARACTER NO NEN
'\u0e14' # 0xB4 -> THAI CHARACTER DO DEK
'\u0e15' # 0xB5 -> THAI CHARACTER TO TAO
'\u0e16' # 0xB6 -> THAI CHARACTER THO THUNG
'\u0e17' # 0xB7 -> THAI CHARACTER THO THAHAN
'\u0e18' # 0xB8 -> THAI CHARACTER THO THONG
'\u0e19' # 0xB9 -> THAI CHARACTER NO NU
'\u0e1a' # 0xBA -> THAI CHARACTER BO BAIMAI
'\u0e1b' # 0xBB -> THAI CHARACTER PO PLA
'\u0e1c' # 0xBC -> THAI CHARACTER PHO PHUNG
'\u0e1d' # 0xBD -> THAI CHARACTER FO FA
'\u0e1e' # 0xBE -> THAI CHARACTER PHO PHAN
'\u0e1f' # 0xBF -> THAI CHARACTER FO FAN
'\u0e20' # 0xC0 -> THAI CHARACTER PHO SAMPHAO
'\u0e21' # 0xC1 -> THAI CHARACTER MO MA
'\u0e22' # 0xC2 -> THAI CHARACTER YO YAK
'\u0e23' # 0xC3 -> THAI CHARACTER RO RUA
'\u0e24' # 0xC4 -> THAI CHARACTER RU
'\u0e25' # 0xC5 -> THAI CHARACTER LO LING
'\u0e26' # 0xC6 -> THAI CHARACTER LU
'\u0e27' # 0xC7 -> THAI CHARACTER WO WAEN
'\u0e28' # 0xC8 -> THAI CHARACTER SO SALA
'\u0e29' # 0xC9 -> THAI CHARACTER SO RUSI
'\u0e2a' # 0xCA -> THAI CHARACTER SO SUA
'\u0e2b' # 0xCB -> THAI CHARACTER HO HIP
'\u0e2c' # 0xCC -> THAI CHARACTER LO CHULA
'\u0e2d' # 0xCD -> THAI CHARACTER O ANG
'\u0e2e' # 0xCE -> THAI CHARACTER HO NOKHUK
'\u0e2f' # 0xCF -> THAI CHARACTER PAIYANNOI
'\u0e30' # 0xD0 -> THAI CHARACTER SARA A
'\u0e31' # 0xD1 -> THAI CHARACTER MAI HAN-AKAT
'\u0e32' # 0xD2 -> THAI CHARACTER SARA AA
'\u0e33' # 0xD3 -> THAI CHARACTER SARA AM
'\u0e34' # 0xD4 -> THAI CHARACTER SARA I
'\u0e35' # 0xD5 -> THAI CHARACTER SARA II
'\u0e36' # 0xD6 -> THAI CHARACTER SARA UE
'\u0e37' # 0xD7 -> THAI CHARACTER SARA UEE
'\u0e38' # 0xD8 -> THAI CHARACTER SARA U
'\u0e39' # 0xD9 -> THAI CHARACTER SARA UU
'\u0e3a' # 0xDA -> THAI CHARACTER PHINTHU
'\ufffe' # 0xDB -> UNDEFINED
'\ufffe' # 0xDC -> UNDEFINED
'\ufffe' # 0xDD -> UNDEFINED
'\ufffe' # 0xDE -> UNDEFINED
'\u0e3f' # 0xDF -> THAI CURRENCY SYMBOL BAHT
'\u0e40' # 0xE0 -> THAI CHARACTER SARA E
'\u0e41' # 0xE1 -> THAI CHARACTER SARA AE
'\u0e42' # 0xE2 -> THAI CHARACTER SARA O
'\u0e43' # 0xE3 -> THAI CHARACTER SARA AI MAIMUAN
'\u0e44' # 0xE4 -> THAI CHARACTER SARA AI MAIMALAI
'\u0e45' # 0xE5 -> THAI CHARACTER LAKKHANGYAO
'\u0e46' # 0xE6 -> THAI CHARACTER MAIYAMOK
'\u0e47' # 0xE7 -> THAI CHARACTER MAITAIKHU
'\u0e48' # 0xE8 -> THAI CHARACTER MAI EK
'\u0e49' # 0xE9 -> THAI CHARACTER MAI THO
'\u0e4a' # 0xEA -> THAI CHARACTER MAI TRI
'\u0e4b' # 0xEB -> THAI CHARACTER MAI CHATTAWA
'\u0e4c' # 0xEC -> THAI CHARACTER THANTHAKHAT
'\u0e4d' # 0xED -> THAI CHARACTER NIKHAHIT
'\u0e4e' # 0xEE -> THAI CHARACTER YAMAKKAN
'\u0e4f' # 0xEF -> THAI CHARACTER FONGMAN
'\u0e50' # 0xF0 -> THAI DIGIT ZERO
'\u0e51' # 0xF1 -> THAI DIGIT ONE
'\u0e52' # 0xF2 -> THAI DIGIT TWO
'\u0e53' # 0xF3 -> THAI DIGIT THREE
'\u0e54' # 0xF4 -> THAI DIGIT FOUR
'\u0e55' # 0xF5 -> THAI DIGIT FIVE
'\u0e56' # 0xF6 -> THAI DIGIT SIX
'\u0e57' # 0xF7 -> THAI DIGIT SEVEN
'\u0e58' # 0xF8 -> THAI DIGIT EIGHT
'\u0e59' # 0xF9 -> THAI DIGIT NINE
'\u0e5a' # 0xFA -> THAI CHARACTER ANGKHANKHU
'\u0e5b' # 0xFB -> THAI CHARACTER KHOMUT
'\ufffe' # 0xFC -> UNDEFINED
'\ufffe' # 0xFD -> UNDEFINED
'\ufffe' # 0xFE -> UNDEFINED
'\ufffe' # 0xFF -> UNDEFINED
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| bsd-3-clause | -1,250,610,034,300,649,700 | 40.026059 | 117 | 0.52759 | false |
pchauncey/ansible | lib/ansible/modules/cloud/rackspace/rax_files.py | 33 | 11750 | #!/usr/bin/python
# (c) 2013, Paul Durivage <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rax_files
short_description: Manipulate Rackspace Cloud Files Containers
description:
- Manipulate Rackspace Cloud Files Containers
version_added: "1.5"
options:
clear_meta:
description:
- Optionally clear existing metadata when applying metadata to existing containers.
Selecting this option is only appropriate when setting type=meta
choices:
- "yes"
- "no"
default: "no"
container:
description:
- The container to use for container or metadata operations.
required: true
meta:
description:
- A hash of items to set as metadata values on a container
private:
description:
- Used to set a container as private, removing it from the CDN. B(Warning!)
Private containers, if previously made public, can have live objects
available until the TTL on cached objects expires
public:
description:
- Used to set a container as public, available via the Cloud Files CDN
region:
description:
- Region to create an instance in
default: DFW
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
ttl:
description:
- In seconds, set a container-wide TTL for all objects cached on CDN edge nodes.
Setting a TTL is only appropriate for containers that are public
type:
description:
- Type of object to do work on, i.e. metadata object or a container object
choices:
- file
- meta
default: file
web_error:
description:
- Sets an object to be presented as the HTTP error page when accessed by the CDN URL
web_index:
description:
- Sets an object to be presented as the HTTP index page when accessed by the CDN URL
author: "Paul Durivage (@angstwad)"
extends_documentation_fragment: rackspace
'''
EXAMPLES = '''
- name: "Test Cloud Files Containers"
hosts: local
gather_facts: no
tasks:
- name: "List all containers"
rax_files:
state: list
- name: "Create container called 'mycontainer'"
rax_files:
container: mycontainer
- name: "Create container 'mycontainer2' with metadata"
rax_files:
container: mycontainer2
meta:
key: value
file_for: [email protected]
- name: "Set a container's web index page"
rax_files:
container: mycontainer
web_index: index.html
- name: "Set a container's web error page"
rax_files:
container: mycontainer
web_error: error.html
- name: "Make container public"
rax_files:
container: mycontainer
public: yes
- name: "Make container public with a 24 hour TTL"
rax_files:
container: mycontainer
public: yes
ttl: 86400
- name: "Make container private"
rax_files:
container: mycontainer
private: yes
- name: "Test Cloud Files Containers Metadata Storage"
hosts: local
gather_facts: no
tasks:
- name: "Get mycontainer2 metadata"
rax_files:
container: mycontainer2
type: meta
- name: "Set mycontainer2 metadata"
rax_files:
container: mycontainer2
type: meta
meta:
uploaded_by: [email protected]
- name: "Remove mycontainer2 metadata"
rax_files:
container: "mycontainer2"
type: meta
state: absent
meta:
key: ""
file_for: ""
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError as e:
HAS_PYRAX = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
EXIT_DICT = dict(success=True)
META_PREFIX = 'x-container-meta-'
def _get_container(module, cf, container):
try:
return cf.get_container(container)
except pyrax.exc.NoSuchContainer as e:
module.fail_json(msg=e.message)
def _fetch_meta(module, container):
EXIT_DICT['meta'] = dict()
try:
for k, v in container.get_metadata().items():
split_key = k.split(META_PREFIX)[-1]
EXIT_DICT['meta'][split_key] = v
except Exception as e:
module.fail_json(msg=e.message)
def meta(cf, module, container_, state, meta_, clear_meta):
c = _get_container(module, cf, container_)
if meta_ and state == 'present':
try:
meta_set = c.set_metadata(meta_, clear=clear_meta)
except Exception as e:
module.fail_json(msg=e.message)
elif meta_ and state == 'absent':
remove_results = []
for k, v in meta_.items():
c.remove_metadata_key(k)
remove_results.append(k)
EXIT_DICT['deleted_meta_keys'] = remove_results
elif state == 'absent':
remove_results = []
for k, v in c.get_metadata().items():
c.remove_metadata_key(k)
remove_results.append(k)
EXIT_DICT['deleted_meta_keys'] = remove_results
_fetch_meta(module, c)
_locals = locals().keys()
EXIT_DICT['container'] = c.name
if 'meta_set' in _locals or 'remove_results' in _locals:
EXIT_DICT['changed'] = True
module.exit_json(**EXIT_DICT)
def container(cf, module, container_, state, meta_, clear_meta, ttl, public,
private, web_index, web_error):
if public and private:
module.fail_json(msg='container cannot be simultaneously '
'set to public and private')
if state == 'absent' and (meta_ or clear_meta or public or private or web_index or web_error):
module.fail_json(msg='state cannot be omitted when setting/removing '
'attributes on a container')
if state == 'list':
# We don't care if attributes are specified, let's list containers
EXIT_DICT['containers'] = cf.list_containers()
module.exit_json(**EXIT_DICT)
try:
c = cf.get_container(container_)
except pyrax.exc.NoSuchContainer as e:
# Make the container if state=present, otherwise bomb out
if state == 'present':
try:
c = cf.create_container(container_)
except Exception as e:
module.fail_json(msg=e.message)
else:
EXIT_DICT['changed'] = True
EXIT_DICT['created'] = True
else:
module.fail_json(msg=e.message)
else:
# Successfully grabbed a container object
# Delete if state is absent
if state == 'absent':
try:
cont_deleted = c.delete()
except Exception as e:
module.fail_json(msg=e.message)
else:
EXIT_DICT['deleted'] = True
if meta_:
try:
meta_set = c.set_metadata(meta_, clear=clear_meta)
except Exception as e:
module.fail_json(msg=e.message)
finally:
_fetch_meta(module, c)
if ttl:
try:
c.cdn_ttl = ttl
except Exception as e:
module.fail_json(msg=e.message)
else:
EXIT_DICT['ttl'] = c.cdn_ttl
if public:
try:
cont_public = c.make_public()
except Exception as e:
module.fail_json(msg=e.message)
else:
EXIT_DICT['container_urls'] = dict(url=c.cdn_uri,
ssl_url=c.cdn_ssl_uri,
streaming_url=c.cdn_streaming_uri,
ios_uri=c.cdn_ios_uri)
if private:
try:
cont_private = c.make_private()
except Exception as e:
module.fail_json(msg=e.message)
else:
EXIT_DICT['set_private'] = True
if web_index:
try:
cont_web_index = c.set_web_index_page(web_index)
except Exception as e:
module.fail_json(msg=e.message)
else:
EXIT_DICT['set_index'] = True
finally:
_fetch_meta(module, c)
if web_error:
try:
cont_err_index = c.set_web_error_page(web_error)
except Exception as e:
module.fail_json(msg=e.message)
else:
EXIT_DICT['set_error'] = True
finally:
_fetch_meta(module, c)
EXIT_DICT['container'] = c.name
EXIT_DICT['objs_in_container'] = c.object_count
EXIT_DICT['total_bytes'] = c.total_bytes
_locals = locals().keys()
if ('cont_deleted' in _locals
or 'meta_set' in _locals
or 'cont_public' in _locals
or 'cont_private' in _locals
or 'cont_web_index' in _locals
or 'cont_err_index' in _locals):
EXIT_DICT['changed'] = True
module.exit_json(**EXIT_DICT)
def cloudfiles(module, container_, state, meta_, clear_meta, typ, ttl, public,
private, web_index, web_error):
""" Dispatch from here to work with metadata or file objects """
cf = pyrax.cloudfiles
if cf is None:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
if typ == "container":
container(cf, module, container_, state, meta_, clear_meta, ttl,
public, private, web_index, web_error)
else:
meta(cf, module, container_, state, meta_, clear_meta)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
container=dict(),
state=dict(choices=['present', 'absent', 'list'],
default='present'),
meta=dict(type='dict', default=dict()),
clear_meta=dict(default=False, type='bool'),
type=dict(choices=['container', 'meta'], default='container'),
ttl=dict(type='int'),
public=dict(default=False, type='bool'),
private=dict(default=False, type='bool'),
web_index=dict(),
web_error=dict()
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together()
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
container_ = module.params.get('container')
state = module.params.get('state')
meta_ = module.params.get('meta')
clear_meta = module.params.get('clear_meta')
typ = module.params.get('type')
ttl = module.params.get('ttl')
public = module.params.get('public')
private = module.params.get('private')
web_index = module.params.get('web_index')
web_error = module.params.get('web_error')
if state in ['present', 'absent'] and not container_:
module.fail_json(msg='please specify a container name')
if clear_meta and not typ == 'meta':
module.fail_json(msg='clear_meta can only be used when setting '
'metadata')
setup_rax_module(module, pyrax)
cloudfiles(module, container_, state, meta_, clear_meta, typ, ttl, public,
private, web_index, web_error)
if __name__ == '__main__':
main()
| gpl-3.0 | 5,000,722,813,322,906,000 | 29.361757 | 98 | 0.584766 | false |
wanghao524151/scrapy_joy | open_insurance/models.py | 1 | 2916 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from django.db import models
from django.db.models.signals import pre_delete
from django.dispatch import receiver
from django.db.models.signals import pre_save
from scrapy.contrib.djangoitem import DjangoItem
from dynamic_scraper.models import Scraper, SchedulerRuntime
class InsuranceWebsite(models.Model):
site = models.CharField(u'平台名称', max_length=20)
name = models.CharField(u'产品名称', max_length=200)
area = models.CharField(u'地域', max_length=20, default='', null=True, blank=True)
category = models.CharField(u'产品类型', max_length=20, default='', null=True, blank=True)
url = models.URLField(u'爬取链接')
scraper = models.ForeignKey(Scraper, verbose_name=u'爬虫', blank=True, null=True, on_delete=models.SET_NULL)
scraper_runtime = models.ForeignKey(SchedulerRuntime, blank=True, null=True, on_delete=models.SET_NULL)
def __unicode__(self):
return self.site + self.name + '[' + str(self.category) + ' ' + str(self.area) + ']'
class Meta:
verbose_name = u'保险平台'
verbose_name_plural = u'保险平台'
class Insurance(models.Model):
TERM_UNIT_CHOICES = (
('day', u'天'),
('month', u'月'),
)
insurance_website = models.ForeignKey(InsuranceWebsite, verbose_name=u'保险平台')
title = models.CharField(u'借款标题', max_length=200)
amount = models.FloatField(u'金额(元)', default=0)
year_rate = models.FloatField(u'年利率%', default=0)
duration = models.IntegerField(u'期限(天)', default=0)
term = models.IntegerField(u'期限', default=0)
term_unit = models.CharField(u'期限单位', max_length=10, choices=TERM_UNIT_CHOICES)
url = models.URLField(u'链接地址', blank=True)
checker_runtime = models.ForeignKey(SchedulerRuntime, blank=True, null=True, on_delete=models.SET_NULL)
created = models.DateTimeField(u'录入时间', auto_now_add=True)
def __unicode__(self):
return self.title
class Meta:
verbose_name = u'保险'
verbose_name_plural = u'保险'
class InsuranceItem(DjangoItem):
django_model = Insurance
@receiver(pre_delete)
def pre_delete_handler(sender, instance, using, **kwargs):
if isinstance(instance, InsuranceWebsite):
if instance.scraper_runtime:
instance.scraper_runtime.delete()
if isinstance(instance, Insurance):
if instance.checker_runtime:
instance.checker_runtime.delete()
pre_delete.connect(pre_delete_handler)
@receiver(pre_save, sender=Insurance, dispatch_uid='open_insurance.insurance')
def loan_push_product(sender, **kwargs):
instance = kwargs.get('instance')
if instance.term_unit == 'day':
instance.duration = instance.term
elif instance.term_unit == 'month':
instance.duration = int(instance.term)*30 | apache-2.0 | 9,013,831,063,802,531,000 | 35.194805 | 110 | 0.679828 | false |
craftytrickster/servo | tests/wpt/css-tests/tools/pywebsocket/src/test/test_http_header_util.py | 496 | 3372 | #!/usr/bin/env python
#
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for http_header_util module."""
import unittest
from mod_pywebsocket import http_header_util
class UnitTest(unittest.TestCase):
"""A unittest for http_header_util module."""
def test_parse_relative_uri(self):
host, port, resource = http_header_util.parse_uri('/ws/test')
self.assertEqual(None, host)
self.assertEqual(None, port)
self.assertEqual('/ws/test', resource)
def test_parse_absolute_uri(self):
host, port, resource = http_header_util.parse_uri(
'ws://localhost:10080/ws/test')
self.assertEqual('localhost', host)
self.assertEqual(10080, port)
self.assertEqual('/ws/test', resource)
host, port, resource = http_header_util.parse_uri(
'ws://example.com/ws/test')
self.assertEqual('example.com', host)
self.assertEqual(80, port)
self.assertEqual('/ws/test', resource)
host, port, resource = http_header_util.parse_uri(
'wss://example.com/')
self.assertEqual('example.com', host)
self.assertEqual(443, port)
self.assertEqual('/', resource)
host, port, resource = http_header_util.parse_uri(
'ws://example.com:8080')
self.assertEqual('example.com', host)
self.assertEqual(8080, port)
self.assertEqual('/', resource)
def test_parse_invalid_uri(self):
host, port, resource = http_header_util.parse_uri('ws:///')
self.assertEqual(None, resource)
host, port, resource = http_header_util.parse_uri('ws://localhost:')
self.assertEqual(None, resource)
host, port, resource = http_header_util.parse_uri('ws://localhost:/ws')
self.assertEqual(None, resource)
if __name__ == '__main__':
unittest.main()
# vi:sts=4 sw=4 et
| mpl-2.0 | 4,985,388,599,685,385,000 | 36.466667 | 79 | 0.692467 | false |
redhat-openstack/nova | nova/db/sqlalchemy/utils.py | 14 | 5568 | # Copyright (c) 2013 Boris Pavlovic ([email protected]).
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.db import exception as db_exc
from oslo.db.sqlalchemy import utils as oslodbutils
from sqlalchemy.exc import OperationalError
from sqlalchemy.ext.compiler import compiles
from sqlalchemy import MetaData
from sqlalchemy.sql.expression import UpdateBase
from sqlalchemy import Table
from sqlalchemy.types import NullType
from nova.db.sqlalchemy import api as db
from nova import exception
from nova.i18n import _
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class DeleteFromSelect(UpdateBase):
def __init__(self, table, select, column):
self.table = table
self.select = select
self.column = column
# NOTE(guochbo): some verions of MySQL doesn't yet support subquery with
# 'LIMIT & IN/ALL/ANY/SOME' We need work around this with nesting select .
@compiles(DeleteFromSelect)
def visit_delete_from_select(element, compiler, **kw):
return "DELETE FROM %s WHERE %s in (SELECT T1.%s FROM (%s) as T1)" % (
compiler.process(element.table, asfrom=True),
compiler.process(element.column),
element.column.name,
compiler.process(element.select))
def check_shadow_table(migrate_engine, table_name):
"""This method checks that table with ``table_name`` and
corresponding shadow table have same columns.
"""
meta = MetaData()
meta.bind = migrate_engine
table = Table(table_name, meta, autoload=True)
shadow_table = Table(db._SHADOW_TABLE_PREFIX + table_name, meta,
autoload=True)
columns = dict([(c.name, c) for c in table.columns])
shadow_columns = dict([(c.name, c) for c in shadow_table.columns])
for name, column in columns.iteritems():
if name not in shadow_columns:
raise exception.NovaException(
_("Missing column %(table)s.%(column)s in shadow table")
% {'column': name, 'table': shadow_table.name})
shadow_column = shadow_columns[name]
if not isinstance(shadow_column.type, type(column.type)):
raise exception.NovaException(
_("Different types in %(table)s.%(column)s and shadow table: "
"%(c_type)s %(shadow_c_type)s")
% {'column': name, 'table': table.name,
'c_type': column.type,
'shadow_c_type': shadow_column.type})
for name, column in shadow_columns.iteritems():
if name not in columns:
raise exception.NovaException(
_("Extra column %(table)s.%(column)s in shadow table")
% {'column': name, 'table': shadow_table.name})
return True
def create_shadow_table(migrate_engine, table_name=None, table=None,
**col_name_col_instance):
"""This method create shadow table for table with name ``table_name``
or table instance ``table``.
:param table_name: Autoload table with this name and create shadow table
:param table: Autoloaded table, so just create corresponding shadow table.
:param col_name_col_instance: contains pair column_name=column_instance.
column_instance is instance of Column. These params are required only for
columns that have unsupported types by sqlite. For example BigInteger.
:returns: The created shadow_table object.
"""
meta = MetaData(bind=migrate_engine)
if table_name is None and table is None:
raise exception.NovaException(_("Specify `table_name` or `table` "
"param"))
if not (table_name is None or table is None):
raise exception.NovaException(_("Specify only one param `table_name` "
"`table`"))
if table is None:
table = Table(table_name, meta, autoload=True)
columns = []
for column in table.columns:
if isinstance(column.type, NullType):
new_column = oslodbutils._get_not_supported_column(
col_name_col_instance, column.name)
columns.append(new_column)
else:
columns.append(column.copy())
shadow_table_name = db._SHADOW_TABLE_PREFIX + table.name
shadow_table = Table(shadow_table_name, meta, *columns,
mysql_engine='InnoDB')
try:
shadow_table.create()
return shadow_table
except (db_exc.DBError, OperationalError):
# NOTE(ekudryashova): At the moment there is a case in oslo.db code,
# which raises unwrapped OperationalError, so we should catch it until
# oslo.db would wraps all such exceptions
LOG.info(repr(shadow_table))
LOG.exception(_('Exception while creating table.'))
raise exception.ShadowTableExists(name=shadow_table_name)
except Exception:
LOG.info(repr(shadow_table))
LOG.exception(_('Exception while creating table.'))
| apache-2.0 | -7,538,963,703,526,424,000 | 39.941176 | 78 | 0.648886 | false |
krieger-od/nwjs_chromium.src | tools/telemetry/telemetry/timeline/inspector_importer.py | 12 | 2626 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Imports event data obtained from the inspector's timeline.'''
import telemetry.timeline.slice as tracing_slice
import telemetry.timeline.thread as timeline_thread
from telemetry.timeline import importer
from telemetry.timeline import trace_data as trace_data_module
class InspectorTimelineImporter(importer.TimelineImporter):
def __init__(self, model, trace_data):
super(InspectorTimelineImporter, self).__init__(model,
trace_data,
import_order=1)
self._events = trace_data.GetEventsFor(
trace_data_module.INSPECTOR_TRACE_PART)
@staticmethod
def GetSupportedPart():
return trace_data_module.INSPECTOR_TRACE_PART
def ImportEvents(self):
render_process = self._model.GetOrCreateProcess(0)
for raw_event in self._events:
thread = render_process.GetOrCreateThread(raw_event.get('thread', 0))
InspectorTimelineImporter.AddRawEventToThreadRecursive(thread, raw_event)
def FinalizeImport(self):
pass
@staticmethod
def AddRawEventToThreadRecursive(thread, raw_inspector_event):
pending_slice = None
if ('startTime' in raw_inspector_event and
'type' in raw_inspector_event):
args = {}
for x in raw_inspector_event:
if x in ('startTime', 'endTime', 'children'):
continue
args[x] = raw_inspector_event[x]
if len(args) == 0:
args = None
start_time = raw_inspector_event['startTime']
end_time = raw_inspector_event.get('endTime', start_time)
pending_slice = tracing_slice.Slice(
thread, 'inspector',
raw_inspector_event['type'],
start_time,
thread_timestamp=None,
args=args)
for child in raw_inspector_event.get('children', []):
InspectorTimelineImporter.AddRawEventToThreadRecursive(
thread, child)
if pending_slice:
pending_slice.duration = end_time - pending_slice.start
thread.PushSlice(pending_slice)
@staticmethod
def RawEventToTimelineEvent(raw_inspector_event):
"""Converts raw_inspector_event to TimelineEvent."""
thread = timeline_thread.Thread(None, 0)
InspectorTimelineImporter.AddRawEventToThreadRecursive(
thread, raw_inspector_event)
thread.FinalizeImport()
assert len(thread.toplevel_slices) <= 1
if len(thread.toplevel_slices) == 0:
return None
return thread.toplevel_slices[0]
| bsd-3-clause | -512,878,484,294,045,400 | 34.972603 | 79 | 0.677456 | false |
avedaee/DIRAC | WorkloadManagementSystem/Service/OptimizationMindHandler.py | 1 | 9803 | import types
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Utilities import DEncode, ThreadScheduler
from DIRAC.Core.Security import Properties
from DIRAC.Core.Base.ExecutorMindHandler import ExecutorMindHandler
from DIRAC.WorkloadManagementSystem.Client.JobState.JobState import JobState
from DIRAC.WorkloadManagementSystem.Client.JobState.CachedJobState import CachedJobState
from DIRAC.WorkloadManagementSystem.Client.JobState.OptimizationTask import OptimizationTask
class OptimizationMindHandler( ExecutorMindHandler ):
__jobDB = False
__optimizationStates = [ 'Received', 'Checking' ]
__loadTaskId = False
MSG_DEFINITIONS = { 'OptimizeJobs' : { 'jids' : ( types.ListType, types.TupleType ) } }
auth_msg_OptimizeJobs = [ 'all' ]
def msg_OptimizeJobs( self, msgObj ):
jids = msgObj.jids
for jid in jids:
try:
jid = int( jid )
except ValueError:
self.log.error( "Job ID %s has to be an integer" % jid )
#Forget and add task to ensure state is reset
self.forgetTask( jid )
result = self.executeTask( jid, OptimizationTask( jid ) )
if not result[ 'OK' ]:
self.log.error( "Could not add job %s to optimization: %s" % ( jid, result[ 'Value' ] ) )
else:
self.log.info( "Received new job %s" % jid )
return S_OK()
@classmethod
def __loadJobs( cls, eTypes = None ):
log = cls.log
if cls.__loadTaskId:
period = cls.srv_getCSOption( "LoadJobPeriod", 300 )
ThreadScheduler.gThreadScheduler.setTaskPeriod( cls.__loadTaskId, period )
if not eTypes:
eConn = cls.getExecutorsConnected()
eTypes = [ eType for eType in eConn if eConn[ eType ] > 0 ]
if not eTypes:
log.info( "No optimizer connected. Skipping load" )
return S_OK()
log.info( "Getting jobs for %s" % ",".join( eTypes ) )
checkingMinors = [ eType.split( "/" )[1] for eType in eTypes if eType != "WorkloadManagement/JobPath" ]
for opState in cls.__optimizationStates:
#For Received states
if opState == "Received":
if 'WorkloadManagement/JobPath' not in eTypes:
continue
jobCond = { 'Status' : opState }
#For checking states
if opState == "Checking":
if not checkingMinors:
continue
jobCond = { 'Status': opState, 'MinorStatus' : checkingMinors }
#Do the magic
jobTypeCondition = cls.srv_getCSOption( "JobTypeRestriction", [] )
if jobTypeCondition:
jobCond[ 'JobType' ] = jobTypeCondition
result = cls.__jobDB.selectJobs( jobCond, limit = cls.srv_getCSOption( "JobQueryLimit", 10000 ) )
if not result[ 'OK' ]:
return result
jidList = result[ 'Value' ]
knownJids = cls.getTaskIds()
added = 0
for jid in jidList:
jid = long( jid )
if jid not in knownJids:
#Same as before. Check that the state is ok.
cls.executeTask( jid, OptimizationTask( jid ) )
added += 1
log.info( "Added %s/%s jobs for %s state" % ( added, len( jidList ), opState ) )
return S_OK()
@classmethod
def initializeHandler( cls, serviceInfoDict ):
try:
from DIRAC.WorkloadManagementSystem.DB.JobDB import JobDB
cls.__jobDB = JobDB()
except Exception, excp:
return S_ERROR( "Could not connect to JobDB: %s" % str( excp ) )
cls.setFailedOnTooFrozen( False )
cls.setFreezeOnFailedDispatch( False )
cls.setFreezeOnUnknownExecutor( False )
cls.setAllowedClients( "JobManager" )
JobState.checkDBAccess()
JobState.cleanTaskQueues()
period = cls.srv_getCSOption( "LoadJobPeriod", 60 )
result = ThreadScheduler.gThreadScheduler.addPeriodicTask( period, cls.__loadJobs )
if not result[ 'OK' ]:
return result
cls.__loadTaskId = result[ 'Value' ]
return cls.__loadJobs()
@classmethod
def exec_executorConnected( cls, trid, eTypes ):
return cls.__loadJobs( eTypes )
@classmethod
def __failJob( cls, jid, minorStatus, appStatus = "" ):
cls.forgetTask( jid )
cls.__jobDB.setJobStatus( jid, "Failed", minorStatus, appStatus )
@classmethod
def __splitJob( cls, jid, manifests ):
cls.log.notice( "Splitting job %s" % jid )
try:
result = cls.__jobDB.insertSplittedManifests( jid, manifests )
if not result[ 'OK' ]:
cls.__failJob( jid, "Error while splitting", result[ 'Message' ] )
return S_ERROR( "Fail splitting" )
for jid in result[ 'Value' ]:
cls.forgetTask( jid )
cls.executeTask( jid, OptimizationTask( jid ) )
except Exception, excp:
cls.log.exception( "While splitting" )
cls.__failJob( jid, "Error while splitting", str( excp ) )
return S_OK()
@classmethod
def exec_taskProcessed( cls, jid, taskObj, eType ):
cjs = taskObj.jobState
cls.log.info( "Saving changes for job %s after %s" % ( jid, eType ) )
result = cjs.commitChanges()
if not result[ 'OK' ]:
cls.log.error( "Could not save changes for job", "%s: %s" % ( jid, result[ 'Message' ] ) )
return result
if taskObj.splitManifests:
return cls.__splitJob( jid, taskObj.splitManifests )
if taskObj.tqReady:
result = cjs.getManifest()
if not result[ 'OK' ]:
cls.log.error( "Could not get manifest before inserting into TQ", "%s: %s" % ( jid, result[ 'Message' ] ) )
return result
manifest = result[ 'Value' ]
result = cjs.jobState.insertIntoTQ( manifest )
if not result[ 'OK' ]:
cls.log.error( "Could not insert into TQ", "%s: %s" % ( jid, result[ 'Message' ] ) )
return result
return S_OK()
@classmethod
def exec_taskFreeze( cls, jid, taskObj, eType ):
jobState = taskObj.jobState
cls.log.info( "Saving changes for job %s before freezing from %s" % ( jid, eType ) )
result = jobState.commitChanges()
if not result[ 'OK' ]:
cls.log.error( "Could not save changes for job", "%s: %s" % ( jid, result[ 'Message' ] ) )
return result
@classmethod
def exec_dispatch( cls, jid, taskObj, pathExecuted ):
jobState = taskObj.jobState
result = jobState.getStatus()
if not result[ 'OK' ]:
cls.log.error( "Could not get status for job", "%s: %s" % ( jid, result[ 'Message' ] ) )
return S_ERROR( "Could not retrieve status: %s" % result[ 'Message' ] )
status, minorStatus = result[ 'Value' ]
#If not in proper state then end chain
if status not in cls.__optimizationStates:
cls.log.info( "Dispatching job %s out of optimization" % jid )
return S_OK()
#If received send to JobPath
if status == "Received":
cls.log.info( "Dispatching job %s to JobPath" % jid )
return S_OK( "WorkloadManagement/JobPath" )
result = jobState.getOptParameter( 'OptimizerChain' )
if not result[ 'OK' ]:
cls.log.error( "Could not get optimizer chain for job, auto resetting job", "%s: %s" % ( jid, result[ 'Message' ] ) )
result = jobState.resetJob()
if not result[ 'OK' ]:
cls.log.error( "Could not reset job", "%s: %s" % ( jid, result[ 'Message' ] ) )
return S_ERROR( "Cound not get OptimizationChain or reset job %s" % jid )
return S_OK( "WorkloadManagement/JobPath" )
optChain = result[ 'Value' ]
if minorStatus not in optChain:
cls.log.error( "Next optimizer is not in the chain for job", "%s: %s not in %s" % ( jid, minorStatus, optChain ) )
return S_ERROR( "Next optimizer %s not in chain %s" % ( minorStatus, optChain ) )
cls.log.info( "Dispatching job %s to %s" % ( jid, minorStatus ) )
return S_OK( "WorkloadManagement/%s" % minorStatus )
@classmethod
def exec_prepareToSend( cls, jid, taskObj, eId ):
return taskObj.jobState.recheckValidity()
@classmethod
def exec_serializeTask( cls, taskObj ):
return S_OK( taskObj.serialize() )
@classmethod
def exec_deserializeTask( cls, taskStub ):
return OptimizationTask.deserialize( taskStub )
@classmethod
def exec_taskError( cls, jid, taskObj, errorMsg ):
result = taskObj.jobState.commitChanges()
if not result[ 'OK' ]:
cls.log.error( "Cannot write changes to job %s: %s" % ( jid, result[ 'Message' ] ) )
jobState = JobState( jid )
result = jobState.getStatus()
if result[ 'OK' ]:
if result[ 'Value' ][0].lower() == "failed":
return S_OK()
else:
cls.log.error( "Could not get status of job %s: %s" % ( jid, result[ 'Message ' ] ) )
cls.log.notice( "Job %s: Setting to Failed|%s" % ( jid, errorMsg ) )
return jobState.setStatus( "Failed", errorMsg, source = 'OptimizationMindHandler' )
auth_stageCallback = [ Properties.OPERATOR ]
types_stageCallback = ( ( types.StringType, types.IntType, types.LongType ), types.StringType )
def export_stageCallback( self, jid, stageStatus ):
""" Simple call back method to be used by the stager. """
try:
jid = int( jid )
except ValueError:
return S_ERROR( "Job ID is not a number!" )
failed = False
if stageStatus == 'Done':
major = 'Checking'
minor = 'InputDataValidation'
elif stageStatus == 'Failed':
major = 'Failed'
minor = 'Staging input files failed'
failed = True
else:
return S_ERROR( "%s status not known." % stageStatus )
result = self.__jobDB.getJobAttributes( jid, ['Status'] )
if not result['OK']:
return result
data = result[ 'Value' ]
if not data:
return S_OK( 'No Matching Job' )
if data[ 'Status' ] != 'Staging':
return S_OK( 'Job %s is not in Staging' % jid )
jobState = JobState( jid )
result = jobState.setStatus( major, minor, source = "StagerSystem" )
if not result[ 'OK' ]:
return result
if failed:
return S_OK()
return self.executeTask( jid, OptimizationTask( jid ) )
| gpl-3.0 | -990,969,427,920,183,300 | 38.055777 | 123 | 0.636132 | false |
sunils34/buffer-django-nonrel | django/core/xheaders.py | 518 | 1157 | """
Pages in Django can are served up with custom HTTP headers containing useful
information about those pages -- namely, the content type and object ID.
This module contains utility functions for retrieving and doing interesting
things with these special "X-Headers" (so called because the HTTP spec demands
that custom headers are prefixed with "X-").
Next time you're at slashdot.org, watch out for X-Fry and X-Bender. :)
"""
def populate_xheaders(request, response, model, object_id):
"""
Adds the "X-Object-Type" and "X-Object-Id" headers to the given
HttpResponse according to the given model and object_id -- but only if the
given HttpRequest object has an IP address within the INTERNAL_IPS setting
or if the request is from a logged in staff member.
"""
from django.conf import settings
if (request.META.get('REMOTE_ADDR') in settings.INTERNAL_IPS
or (hasattr(request, 'user') and request.user.is_active
and request.user.is_staff)):
response['X-Object-Type'] = "%s.%s" % (model._meta.app_label, model._meta.object_name.lower())
response['X-Object-Id'] = str(object_id)
| bsd-3-clause | 3,068,264,548,416,674,300 | 47.208333 | 102 | 0.707001 | false |
jannekai/project-euler | 017.py | 1 | 1222 | import time
import math
import sys
start = time.time()
w = dict()
w[1] = "one"
w[2] = "two"
w[3] = "three"
w[4] = "four"
w[5] = "five"
w[6] = "six"
w[7] = "seven"
w[8] = "eight"
w[9] = "nine"
w[10] = "ten"
w[11] = "eleven"
w[12] = "twelve"
w[13] = "thirteen"
w[14] = "fourteen"
w[15] = "fifteen"
w[16] = "sixteen"
w[17] = "seventeen"
w[18] = "eighteen"
w[19] = "nineteen"
w[20] = "twenty"
w[30] = "thirty"
w[40] = "forty"
w[50] = "fifty"
w[60] = "sixty"
w[70] = "seventy"
w[80] = "eighty"
w[90] = "ninety"
def tens(x):
global w
if x%10 == 0:
return w[x//10*10]
else:
return w[x//10*10] + "-" + w[x%10]
def letters(s):
c = 0
for i in range(0, len(s)):
if s[i] != " " and s[i] != "-":
c += 1
return c
n = dict()
c = letters("one thousand")
for i in range(1, 1000):
s = ""
if i < 20:
s += w[i]
elif i < 100:
s += tens(i)
elif i < 1000:
if i%100 == 0:
s += w[i//100] + " hundred "
else:
if i%100 < 20:
s += w[i//100] + " hundred and " + w[i%100]
else:
s += w[i//100] + " hundred and " + tens(i%100)
c += letters(s)
print c
end = time.time() - start
print "Total time was " + str(end)+ " seconds"
| mit | 971,198,617,764,273,700 | 15.211268 | 50 | 0.478723 | false |
dreucifer/chargenstart | product/views.py | 1 | 2302 | """ @todo """
from flask import render_template, request, flash, session, Blueprint, abort
from product.models import Product, Category
from cart.forms import AddToCartForm
from product.utils import get_or_404, first_or_abort, get_for_page, get_by_slug
from cart import ShoppingCart, SessionCart
Products = Blueprint('products', __name__, url_prefix='/parts',
template_folder='templates', static_folder='static')
import product.admin
PER_PAGE = 10
@Products.route('/', methods=['POST', 'GET'])
def index():
""" @todo """
cart = ShoppingCart.for_session_cart(request.cart)
categories = Category.query.filter_by(parent=None).all()
return render_template('products.html', cart=cart, categories=categories)
@Products.route('/<category_slug>')
def category(category_slug):
cat = get_by_slug('Category', category_slug)
return render_template('category.html', category=cat)
@Products.route('/catalog/', defaults={'page_number': 1})
@Products.route('/catalog/page/<int:page_number>')
def catalog(page_number):
cat_page = _catalog_page(page_number, per_page=10)
cart = ShoppingCart.for_session_cart(request.cart)
return render_template('catalog.html', cart=cart, catalog_page=cat_page)
@Products.route('/info/<product_id>', methods=['GET', 'POST'])
def details(product_id):
""" @todo """
part = get_or_404(Product, product_id)
cart = ShoppingCart.for_session_cart(request.cart)
form = AddToCartForm(request.form, product=part, cart=cart, product_id=part.id_)
return render_template('details.html', product=part, form=form, cart=cart)
def _catalog_page(page_number, per_page=20):
""" @todo """
pagination = get_for_page(Product.query, page_number, per_page=per_page)
product_listings = [_product_listing(part) for part in pagination.items]
return render_template('_catalog.html',
pagination=pagination,
parts=product_listings)
@Products.app_template_global()
def _product_listing(part):
cart = ShoppingCart.for_session_cart(request.cart)
form = AddToCartForm(request.form, product=part,
cart=cart, product_id=part.id_)
return render_template('_product_listing.html', form=form, product=part,
cart=cart)
| mit | -357,783,510,925,507,500 | 38.689655 | 84 | 0.682016 | false |
2hdddg/pyvidstream | test/vidanalyze.py | 1 | 3170 | from unittest import TestCase
from vidutil.vidstream import Frame
import vidutil.vidanalyze as a
def _num_f(num):
f = Frame(type='P', key_frame=False, width=1, height=1,
coded_picture_number=num)
return f
class TestSplitFramesMissing(TestCase):
def test_no_missing(self):
""" Verifies that a list of frames with no missing
frames are not split.
"""
frames = [
_num_f(1),
_num_f(2),
_num_f(3)
]
splitted = a.split_frames_by_missing(frames)
self.assertEqual(len(splitted), 1)
self.assertListEqual(frames, splitted[0])
def test_one_missing(self):
""" Verifies that a list of frames with a missing
frame in the middle are split into two parts.
"""
frames = [
_num_f(1),
_num_f(2),
_num_f(4),
_num_f(5)
]
splitted = a.split_frames_by_missing(frames)
self.assertEqual(len(splitted), 2)
self.assertListEqual(frames[0:2], splitted[0])
self.assertListEqual(frames[2:4], splitted[1])
def test_two_missing(self):
""" Verifies that a list of frames with two missing
frames are split into three parts.
"""
frames = [
_num_f(1),
_num_f(4),
_num_f(5),
_num_f(9),
_num_f(10),
_num_f(11),
]
splitted = a.split_frames_by_missing(frames)
self.assertEqual(len(splitted), 3)
self.assertListEqual(frames[0:1], splitted[0])
self.assertListEqual(frames[1:3], splitted[1])
self.assertListEqual(frames[3:6], splitted[2])
def test_empty(self):
""" Verifies that an empty list is returned
as an empty list.
"""
splitted = a.split_frames_by_missing([])
self.assertEqual(len(splitted), 1)
self.assertListEqual([], splitted[0])
def test_number_out_of_order(self):
""" Test that an exception is thrown if the
numbers are out of order
"""
frames = [
_num_f(2),
_num_f(1)
]
with self.assertRaises(a.FrameSeqException):
a.split_frames_by_missing(frames)
def test_same_number(self):
""" Test that an exception is thrown if same
number occures twice in a row
"""
frames = [
_num_f(2),
_num_f(2)
]
with self.assertRaises(a.FrameSeqException):
a.split_frames_by_missing(frames)
class TestAreFramesMissing(TestCase):
def test_no_missing(self):
""" Tests that False is returned when no
frames are missing
"""
frames = [
_num_f(2),
_num_f(3),
_num_f(4),
]
self.assertFalse(a.are_frames_missing(frames))
def test_missing(self):
""" Tests that True is returned when
frames are missing
"""
frames = [
_num_f(2),
_num_f(4),
_num_f(5),
]
self.assertTrue(a.are_frames_missing(frames))
| mit | 8,810,837,594,561,955,000 | 24.983607 | 59 | 0.529022 | false |
chafique-delli/OpenUpgrade | addons/base_report_designer/plugin/openerp_report_designer/bin/script/lib/actions.py | 382 | 3763 | ##########################################################################
#
# Copyright (c) 2003-2004 Danny Brewer [email protected]
# Copyright (C) 2004-2010 OpenERP SA (<http://openerp.com>).
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# See: http://www.gnu.org/licenses/lgpl.html
#
##############################################################################
import uno
import unohelper
import os
#--------------------------------------------------
# An ActionListener adapter.
# This object implements com.sun.star.awt.XActionListener.
# When actionPerformed is called, this will call an arbitrary
# python procedure, passing it...
# 1. the oActionEvent
# 2. any other parameters you specified to this object's constructor (as a tuple).
if __name__<>"package":
os.system( "ooffice '-accept=socket,host=localhost,port=2002;urp;'" )
passwd=""
database=""
uid=""
loginstatus=False
from com.sun.star.awt import XActionListener
class ActionListenerProcAdapter( unohelper.Base, XActionListener ):
def __init__( self, oProcToCall, tParams=() ):
self.oProcToCall = oProcToCall # a python procedure
self.tParams = tParams # a tuple
# oActionEvent is a com.sun.star.awt.ActionEvent struct.
def actionPerformed( self, oActionEvent ):
if callable( self.oProcToCall ):
apply( self.oProcToCall, (oActionEvent,) + self.tParams )
#--------------------------------------------------
# An ItemListener adapter.
# This object implements com.sun.star.awt.XItemListener.
# When itemStateChanged is called, this will call an arbitrary
# python procedure, passing it...
# 1. the oItemEvent
# 2. any other parameters you specified to this object's constructor (as a tuple).
from com.sun.star.awt import XItemListener
class ItemListenerProcAdapter( unohelper.Base, XItemListener ):
def __init__( self, oProcToCall, tParams=() ):
self.oProcToCall = oProcToCall # a python procedure
self.tParams = tParams # a tuple
# oItemEvent is a com.sun.star.awt.ItemEvent struct.
def itemStateChanged( self, oItemEvent ):
if callable( self.oProcToCall ):
apply( self.oProcToCall, (oItemEvent,) + self.tParams )
#--------------------------------------------------
# An TextListener adapter.
# This object implements com.sun.star.awt.XTextistener.
# When textChanged is called, this will call an arbitrary
# python procedure, passing it...
# 1. the oTextEvent
# 2. any other parameters you specified to this object's constructor (as a tuple).
from com.sun.star.awt import XTextListener
class TextListenerProcAdapter( unohelper.Base, XTextListener ):
def __init__( self, oProcToCall, tParams=() ):
self.oProcToCall = oProcToCall # a python procedure
self.tParams = tParams # a tuple
# oTextEvent is a com.sun.star.awt.TextEvent struct.
def textChanged( self, oTextEvent ):
if callable( self.oProcToCall ):
apply( self.oProcToCall, (oTextEvent,) + self.tParams )
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 621,330,875,642,178,600 | 41.280899 | 84 | 0.666224 | false |
SjorsVanGelderen/Graduation | python_3/data_structures/heap.py | 1 | 5731 | """Heap data structure example
Copyright 2017, Sjors van Gelderen
"""
import random
"""Heap data structure
The 'max' property determines whether this is a max or min heap
"""
class Heap:
def __init__(self, property):
self.property = property
self.keys = []
def __repr__(self):
return "{} heap containing {}".format(self.property, self.keys)
# Complexity: O(1)
@staticmethod
def get_parent_index(key_index):
return (key_index + 1) // 2 - 1
# Complexity: O(1)
@staticmethod
def get_left_child_index(key_index):
return 2 * (key_index + 1) - 1
# Complexity: O(1)
@staticmethod
def get_right_child_index(key_index):
return 2 * (key_index + 1)
"""Swap operation
Complexity: O(1)
"""
def swap(self, left_index, right_index):
print("{} <-> {}".format(self.keys[left_index], self.keys[right_index]))
temp = self.keys[left_index]
self.keys[left_index] = self.keys[right_index]
self.keys[right_index] = temp
"""Insert operation
Complexity: O(log n)
"""
def insert(self, value):
print("Inserting {}".format(value))
# Add the key
self.keys.append(value)
key_index = len(self.keys) - 1
# Swim through to restore property
while True:
if key_index == 0:
# This root cannot have parents
print("Root node, no swimming to be done")
break
# Query parent
parent_index = Heap.get_parent_index(key_index)
parent = self.keys[parent_index]
# Verify if property holds
holds = value <= parent if self.property == "MIN" else value >= parent
if holds:
print("Before swap: {}".format(self))
self.swap(key_index, parent_index)
print("After swap: {}".format(self))
key_index = parent_index # Continue swimming on the new position
else:
message = "{} >= {}" if self.property == "MIN" else "{} <= {}"
print("Property holds: " + message.format(value, parent))
# Done swimming, the property now holds
break;
print("Finished adding {}".format(value))
"""Extract operation
Complexity: O(log n)
"""
def extract(self):
if len(self.keys) == 1:
print("Extracting {}".format(self.keys[0]))
self.keys = []
elif len(self.keys) > 1:
# Replace root with last key
print("Extracting {}".format(self.keys[0]))
self.keys[0] = self.keys[len(self.keys) - 1]
self.keys.pop()
print("New root: {}".format(self.keys[0]))
# Restore heap property
self.heapify()
else:
print("Nothing to extract")
"""Heapify operation tailored to be used after extraction
Complexity: O(log n)
"""
def heapify(self):
print("Restoring heap property")
key_index = 0
# Loop until the heap property is restored
while True:
left_child_index = Heap.get_left_child_index(key_index)
right_child_index = Heap.get_right_child_index(key_index)
child_index = -1
if left_child_index < len(self.keys):
child_index = left_child_index
print("Child index: {}".format(child_index))
if right_child_index < len(self.keys):
left_child = self.keys[left_child_index]
right_child = self.keys[right_child_index]
if self.property == "MIN":
# Target child will be the smaller one
if left_child > right_child:
child_index = right_child_index
print("Child index updated: {}".format(child_index))
else:
# Target child will be the larger one
if left_child <= right_child:
child_index = right_child_index
print("Child index updated: {}".format(child_index))
key = self.keys[key_index]
child_key = self.keys[child_index]
swap = key > child_key if self.property == "MIN" else key < child_key
if swap:
# Swap elements to further restore the property
self.swap(key_index, child_index)
# Set key index for next iteration
key_index = child_index
else:
# Property holds
print("Property holds, no swap necessary")
break
else:
print("No further children")
break
print("Finished extraction")
# Main program logic
def program():
# Build a min heap
print("Constructing min heap:")
min_heap = Heap("MIN")
for i in range(8):
min_heap.insert(random.randrange(100))
print("Result: {}\n".format(min_heap))
print("Extracting from min heap:")
min_heap.extract()
print("Result: {}\n".format(min_heap))
# Build a max heap
print("Constructing max heap:")
max_heap = Heap("MAX")
for i in range(8):
max_heap.insert(random.randrange(100))
print("Result: {}\n".format(max_heap))
print("Extracting from max heap:")
max_heap.extract()
print("Result: {}\n".format(max_heap))
# Run the program
program()
| mit | -7,478,037,339,931,518,000 | 31.5625 | 85 | 0.517711 | false |
corburn/scikit-bio | skbio/alignment/__init__.py | 4 | 7301 | r"""
Alignments and Sequence collections (:mod:`skbio.alignment`)
============================================================
.. currentmodule:: skbio.alignment
This module provides functionality for working with biological sequence
collections and alignments. These can be composed of generic sequences,
nucelotide sequences, DNA sequences, and RNA sequences. By default, input is
not validated, except that sequence ids must be unique, but all
contructor methods take a validate option which checks different features of
the input based on ``SequenceCollection`` type.
Data Structures
---------------
.. autosummary::
:toctree: generated/
SequenceCollection
Alignment
TabularMSA
Optimized (i.e., production-ready) Alignment Algorithms
-------------------------------------------------------
.. autosummary::
:toctree: generated/
StripedSmithWaterman
AlignmentStructure
local_pairwise_align_ssw
Slow (i.e., educational-purposes only) Alignment Algorithms
-----------------------------------------------------------
.. autosummary::
:toctree: generated/
global_pairwise_align_nucleotide
global_pairwise_align_protein
global_pairwise_align
local_pairwise_align_nucleotide
local_pairwise_align_protein
local_pairwise_align
General functionality
---------------------
.. autosummary::
:toctree: generated/
make_identity_substitution_matrix
Exceptions
----------
.. autosummary::
:toctree: generated/
SequenceCollectionError
AlignmentError
Data Structure Examples
-----------------------
>>> from skbio import SequenceCollection, Alignment, DNA
>>> seqs = [DNA("ACC--G-GGTA..", metadata={'id':"seq1"}),
... DNA("TCC--G-GGCA..", metadata={'id':"seqs2"})]
>>> a1 = Alignment(seqs)
>>> a1
<Alignment: n=2; mean +/- std length=13.00 +/- 0.00>
>>> seqs = [DNA("ACCGGG", metadata={'id':"seq1"}),
... DNA("TCCGGGCA", metadata={'id':"seq2"})]
>>> s1 = SequenceCollection(seqs)
>>> s1
<SequenceCollection: n=2; mean +/- std length=7.00 +/- 1.00>
>>> fasta_lines = ['>seq1\n',
... 'CGATGTCGATCGATCGATCGATCAG\n',
... '>seq2\n',
... 'CATCGATCGATCGATGCATGCATGCATG\n']
>>> s1 = SequenceCollection.read(fasta_lines, constructor=DNA)
>>> s1
<SequenceCollection: n=2; mean +/- std length=26.50 +/- 1.50>
Alignment Algorithm Examples
----------------------------
Optimized Alignment Algorithm Examples
--------------------------------------
Using the convenient ``local_pairwise_align_ssw`` function:
>>> from skbio.alignment import local_pairwise_align_ssw
>>> alignment = local_pairwise_align_ssw(
... "ACTAAGGCTCTCTACCCCTCTCAGAGA",
... "ACTAAGGCTCCTAACCCCCTTTTCTCAGA"
... )
>>> print(alignment)
>query
ACTAAGGCTCTC-TACCC----CTCTCAGA
>target
ACTAAGGCTC-CTAACCCCCTTTTCTCAGA
<BLANKLINE>
Using the ``StripedSmithWaterman`` object:
>>> from skbio.alignment import StripedSmithWaterman
>>> query = StripedSmithWaterman("ACTAAGGCTCTCTACCCCTCTCAGAGA")
>>> alignment = query("AAAAAACTCTCTAAACTCACTAAGGCTCTCTACCCCTCTTCAGAGAAGTCGA")
>>> print(alignment)
ACTAAGGCTC...
ACTAAGGCTC...
Score: 49
Length: 28
Using the ``StripedSmithWaterman`` object for multiple targets in an efficient
way and finding the aligned sequence representations:
>>> from skbio.alignment import StripedSmithWaterman
>>> alignments = []
>>> target_sequences = [
... "GCTAACTAGGCTCCCTTCTACCCCTCTCAGAGA",
... "GCCCAGTAGCTTCCCAATATGAGAGCATCAATTGTAGATCGGGCC",
... "TCTATAAGATTCCGCATGCGTTACTTATAAGATGTCTCAACGG",
... "TAGAGATTAATTGCCACTGCCAAAATTCTG"
... ]
>>> query_sequence = "ACTAAGGCTCTCTACCCCTCTCAGAGA"
>>> query = StripedSmithWaterman(query_sequence)
>>> for target_sequence in target_sequences:
... alignment = query(target_sequence)
... alignments.append(alignment)
...
>>> print(alignments[0])
ACTAAGGCT-...
ACT-AGGCTC...
Score: 38
Length: 30
>>> print(alignments[0].aligned_query_sequence)
ACTAAGGCT---CTCTACCCCTCTCAGAGA
>>> print(alignments[0].aligned_target_sequence)
ACT-AGGCTCCCTTCTACCCCTCTCAGAGA
Slow Alignment Algorithm Examples
---------------------------------
scikit-bio also provides pure-Python implementations of Smith-Waterman and
Needleman-Wunsch alignment. These are much slower than the methods described
above, but serve as useful educational examples as they're simpler to
experiment with. Functions are provided for local and global alignment of
protein and nucleotide sequences. The ``global*`` and ``local*`` functions
differ in the underlying algorithm that is applied (``global*`` uses Needleman-
Wunsch while ``local*`` uses Smith-Waterman), and ``*protein`` and
``*nucleotide`` differ in their default scoring of matches, mismatches, and
gaps.
Here we locally align a pair of protein sequences using gap open penalty
of 11 and a gap extend penalty of 1 (in other words, it is much more
costly to open a new gap than extend an existing one).
>>> from skbio.alignment import local_pairwise_align_protein
>>> s1 = "HEAGAWGHEE"
>>> s2 = "PAWHEAE"
>>> r = local_pairwise_align_protein(s1, s2, 11, 1)
This returns an ``skbio.Alignment`` object. We can look at the aligned
sequences:
>>> print(str(r[0]))
AWGHE
>>> print(str(r[1]))
AW-HE
We can identify the start and end positions of each aligned sequence
as follows:
>>> r.start_end_positions()
[(4, 8), (1, 4)]
And we can view the score of the alignment using the ``score`` method:
>>> r.score()
25.0
Similarly, we can perform global alignment of nucleotide sequences, and print
the resulting alignment in FASTA format:
>>> from skbio.alignment import global_pairwise_align_nucleotide
>>> s1 = "GCGTGCCTAAGGTATGCAAG"
>>> s2 = "ACGTGCCTAGGTACGCAAG"
>>> r = global_pairwise_align_nucleotide(s1, s2)
>>> print(r)
>0
GCGTGCCTAAGGTATGCAAG
>1
ACGTGCCTA-GGTACGCAAG
<BLANKLINE>
"""
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from skbio.util import TestRunner
from ._tabular_msa import TabularMSA
from ._alignment import Alignment, SequenceCollection
from ._pairwise import (
local_pairwise_align_nucleotide, local_pairwise_align_protein,
local_pairwise_align, global_pairwise_align_nucleotide,
global_pairwise_align_protein, global_pairwise_align,
make_identity_substitution_matrix, local_pairwise_align_ssw
)
from skbio.alignment._ssw_wrapper import (
StripedSmithWaterman, AlignmentStructure)
from ._exception import (SequenceCollectionError, AlignmentError)
__all__ = ['TabularMSA', 'Alignment', 'SequenceCollection',
'StripedSmithWaterman', 'AlignmentStructure',
'local_pairwise_align_ssw', 'SequenceCollectionError',
'AlignmentError', 'global_pairwise_align',
'global_pairwise_align_nucleotide', 'global_pairwise_align_protein',
'local_pairwise_align', 'local_pairwise_align_nucleotide',
'local_pairwise_align_protein', 'make_identity_substitution_matrix']
test = TestRunner(__file__).test
| bsd-3-clause | -3,175,329,109,081,393,700 | 30.334764 | 79 | 0.679085 | false |
gameduell/duell | bin/win/python2.7.9/Lib/quopri.py | 424 | 6969 | #! /usr/bin/env python
"""Conversions to/from quoted-printable transport encoding as per RFC 1521."""
# (Dec 1991 version).
__all__ = ["encode", "decode", "encodestring", "decodestring"]
ESCAPE = '='
MAXLINESIZE = 76
HEX = '0123456789ABCDEF'
EMPTYSTRING = ''
try:
from binascii import a2b_qp, b2a_qp
except ImportError:
a2b_qp = None
b2a_qp = None
def needsquoting(c, quotetabs, header):
"""Decide whether a particular character needs to be quoted.
The 'quotetabs' flag indicates whether embedded tabs and spaces should be
quoted. Note that line-ending tabs and spaces are always encoded, as per
RFC 1521.
"""
if c in ' \t':
return quotetabs
# if header, we have to escape _ because _ is used to escape space
if c == '_':
return header
return c == ESCAPE or not (' ' <= c <= '~')
def quote(c):
"""Quote a single character."""
i = ord(c)
return ESCAPE + HEX[i//16] + HEX[i%16]
def encode(input, output, quotetabs, header = 0):
"""Read 'input', apply quoted-printable encoding, and write to 'output'.
'input' and 'output' are files with readline() and write() methods.
The 'quotetabs' flag indicates whether embedded tabs and spaces should be
quoted. Note that line-ending tabs and spaces are always encoded, as per
RFC 1521.
The 'header' flag indicates whether we are encoding spaces as _ as per
RFC 1522.
"""
if b2a_qp is not None:
data = input.read()
odata = b2a_qp(data, quotetabs = quotetabs, header = header)
output.write(odata)
return
def write(s, output=output, lineEnd='\n'):
# RFC 1521 requires that the line ending in a space or tab must have
# that trailing character encoded.
if s and s[-1:] in ' \t':
output.write(s[:-1] + quote(s[-1]) + lineEnd)
elif s == '.':
output.write(quote(s) + lineEnd)
else:
output.write(s + lineEnd)
prevline = None
while 1:
line = input.readline()
if not line:
break
outline = []
# Strip off any readline induced trailing newline
stripped = ''
if line[-1:] == '\n':
line = line[:-1]
stripped = '\n'
# Calculate the un-length-limited encoded line
for c in line:
if needsquoting(c, quotetabs, header):
c = quote(c)
if header and c == ' ':
outline.append('_')
else:
outline.append(c)
# First, write out the previous line
if prevline is not None:
write(prevline)
# Now see if we need any soft line breaks because of RFC-imposed
# length limitations. Then do the thisline->prevline dance.
thisline = EMPTYSTRING.join(outline)
while len(thisline) > MAXLINESIZE:
# Don't forget to include the soft line break `=' sign in the
# length calculation!
write(thisline[:MAXLINESIZE-1], lineEnd='=\n')
thisline = thisline[MAXLINESIZE-1:]
# Write out the current line
prevline = thisline
# Write out the last line, without a trailing newline
if prevline is not None:
write(prevline, lineEnd=stripped)
def encodestring(s, quotetabs = 0, header = 0):
if b2a_qp is not None:
return b2a_qp(s, quotetabs = quotetabs, header = header)
from cStringIO import StringIO
infp = StringIO(s)
outfp = StringIO()
encode(infp, outfp, quotetabs, header)
return outfp.getvalue()
def decode(input, output, header = 0):
"""Read 'input', apply quoted-printable decoding, and write to 'output'.
'input' and 'output' are files with readline() and write() methods.
If 'header' is true, decode underscore as space (per RFC 1522)."""
if a2b_qp is not None:
data = input.read()
odata = a2b_qp(data, header = header)
output.write(odata)
return
new = ''
while 1:
line = input.readline()
if not line: break
i, n = 0, len(line)
if n > 0 and line[n-1] == '\n':
partial = 0; n = n-1
# Strip trailing whitespace
while n > 0 and line[n-1] in " \t\r":
n = n-1
else:
partial = 1
while i < n:
c = line[i]
if c == '_' and header:
new = new + ' '; i = i+1
elif c != ESCAPE:
new = new + c; i = i+1
elif i+1 == n and not partial:
partial = 1; break
elif i+1 < n and line[i+1] == ESCAPE:
new = new + ESCAPE; i = i+2
elif i+2 < n and ishex(line[i+1]) and ishex(line[i+2]):
new = new + chr(unhex(line[i+1:i+3])); i = i+3
else: # Bad escape sequence -- leave it in
new = new + c; i = i+1
if not partial:
output.write(new + '\n')
new = ''
if new:
output.write(new)
def decodestring(s, header = 0):
if a2b_qp is not None:
return a2b_qp(s, header = header)
from cStringIO import StringIO
infp = StringIO(s)
outfp = StringIO()
decode(infp, outfp, header = header)
return outfp.getvalue()
# Other helper functions
def ishex(c):
"""Return true if the character 'c' is a hexadecimal digit."""
return '0' <= c <= '9' or 'a' <= c <= 'f' or 'A' <= c <= 'F'
def unhex(s):
"""Get the integer value of a hexadecimal number."""
bits = 0
for c in s:
if '0' <= c <= '9':
i = ord('0')
elif 'a' <= c <= 'f':
i = ord('a')-10
elif 'A' <= c <= 'F':
i = ord('A')-10
else:
break
bits = bits*16 + (ord(c) - i)
return bits
def main():
import sys
import getopt
try:
opts, args = getopt.getopt(sys.argv[1:], 'td')
except getopt.error, msg:
sys.stdout = sys.stderr
print msg
print "usage: quopri [-t | -d] [file] ..."
print "-t: quote tabs"
print "-d: decode; default encode"
sys.exit(2)
deco = 0
tabs = 0
for o, a in opts:
if o == '-t': tabs = 1
if o == '-d': deco = 1
if tabs and deco:
sys.stdout = sys.stderr
print "-t and -d are mutually exclusive"
sys.exit(2)
if not args: args = ['-']
sts = 0
for file in args:
if file == '-':
fp = sys.stdin
else:
try:
fp = open(file)
except IOError, msg:
sys.stderr.write("%s: can't open (%s)\n" % (file, msg))
sts = 1
continue
if deco:
decode(fp, sys.stdout)
else:
encode(fp, sys.stdout, tabs)
if fp is not sys.stdin:
fp.close()
if sts:
sys.exit(sts)
if __name__ == '__main__':
main()
| bsd-2-clause | -7,141,771,964,586,269,000 | 28.405063 | 78 | 0.531066 | false |
yamamatsu2/pimouse_ros | scripts/lightsensors.py | 1 | 1258 | #!/usr/bin/env python
#encoding: utf8
import sys, rospy
from pimouse_ros.msg import LightSensorValues
def get_freq():
f = rospy.get_param('lightsensors_freq',10)
try:
if f <= 0.0:
raise Exception()
except:
rospy.logerr("value error: ligtsensors_freq")
sys.exit(1)
return f
if __name__ == '__main__':
devfile = '/dev/rtlightsensor0'
rospy.init_node('lightsensors')
pub = rospy.Publisher('lightsensors', LightSensorValues, queue_size=1)
freq = get_freq()
rate = rospy.Rate(freq)
while not rospy.is_shutdown():
try:
with open(devfile,'r') as f:
data = f.readline().split()
data = [ int(e) for e in data]
d = LightSensorValues()
d.right_forward = data[0]
d.right_side = data[1]
d.left_side = data[2]
d.left_forward = data[3]
d.sum_all = sum(data)
d.sum_forward = data[0] + data[3]
pub.publish(d)
except:
rospy.logerr("cannot open " + devfile)
f = get_freq()
if f != freq:
freq = f
rate = rospy.Rate(freq)
rate.sleep()
| gpl-3.0 | -1,690,007,248,208,470,800 | 26.347826 | 74 | 0.503975 | false |
mchristopher/PokemonGo-DesktopMap | app/pylibs/win32/Cryptodome/Hash/SHA256.py | 2 | 6305 | # -*- coding: utf-8 -*-
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""SHA-256 cryptographic hash algorithm.
SHA-256 belongs to the SHA-2_ family of cryptographic hashes.
It produces the 256 bit digest of a message.
>>> from Cryptodome.Hash import SHA256
>>>
>>> h = SHA256.new()
>>> h.update(b'Hello')
>>> print h.hexdigest()
*SHA* stands for Secure Hash Algorithm.
.. _SHA-2: http://csrc.nist.gov/publications/fips/fips180-2/fips180-4.pdf
"""
from Cryptodome.Util.py3compat import *
from Cryptodome.Util._raw_api import (load_pycryptodome_raw_lib,
VoidPointer, SmartPointer,
create_string_buffer,
get_raw_buffer, c_size_t,
expect_byte_string)
_raw_sha256_lib = load_pycryptodome_raw_lib("Cryptodome.Hash._SHA256",
"""
int SHA256_init(void **shaState);
int SHA256_destroy(void *shaState);
int SHA256_update(void *hs,
const uint8_t *buf,
size_t len);
int SHA256_digest(const void *shaState,
uint8_t digest[32]);
int SHA256_copy(const void *src, void *dst);
""")
class SHA256Hash(object):
"""Class that implements a SHA-256 hash
"""
#: The size of the resulting hash in bytes.
digest_size = 32
#: The internal block size of the hash algorithm in bytes.
block_size = 64
#: ASN.1 Object ID
oid = "2.16.840.1.101.3.4.2.1"
def __init__(self, data=None):
state = VoidPointer()
result = _raw_sha256_lib.SHA256_init(state.address_of())
if result:
raise ValueError("Error %d while instantiating SHA256"
% result)
self._state = SmartPointer(state.get(),
_raw_sha256_lib.SHA256_destroy)
if data:
self.update(data)
def update(self, data):
"""Continue hashing of a message by consuming the next chunk of data.
Repeated calls are equivalent to a single call with the concatenation
of all the arguments. In other words:
>>> m.update(a); m.update(b)
is equivalent to:
>>> m.update(a+b)
:Parameters:
data : byte string
The next chunk of the message being hashed.
"""
expect_byte_string(data)
result = _raw_sha256_lib.SHA256_update(self._state.get(),
data,
c_size_t(len(data)))
if result:
raise ValueError("Error %d while instantiating SHA256"
% result)
def digest(self):
"""Return the **binary** (non-printable) digest of the message that has been hashed so far.
This method does not change the state of the hash object.
You can continue updating the object after calling this function.
:Return: A byte string of `digest_size` bytes. It may contain non-ASCII
characters, including null bytes.
"""
bfr = create_string_buffer(self.digest_size)
result = _raw_sha256_lib.SHA256_digest(self._state.get(),
bfr)
if result:
raise ValueError("Error %d while instantiating SHA256"
% result)
return get_raw_buffer(bfr)
def hexdigest(self):
"""Return the **printable** digest of the message that has been hashed so far.
This method does not change the state of the hash object.
:Return: A string of 2* `digest_size` characters. It contains only
hexadecimal ASCII digits.
"""
return "".join(["%02x" % bord(x) for x in self.digest()])
def copy(self):
"""Return a copy ("clone") of the hash object.
The copy will have the same internal state as the original hash
object.
This can be used to efficiently compute the digests of strings that
share a common initial substring.
:Return: A hash object of the same type
"""
clone = SHA256Hash()
result = _raw_sha256_lib.SHA256_copy(self._state.get(),
clone._state.get())
if result:
raise ValueError("Error %d while copying SHA256" % result)
return clone
def new(self, data=None):
return SHA256Hash(data)
def new(data=None):
"""Return a fresh instance of the hash object.
:Parameters:
data : byte string
The very first chunk of the message to hash.
It is equivalent to an early call to `SHA256Hash.update()`.
Optional.
:Return: A `SHA256Hash` object
"""
return SHA256Hash().new(data)
#: The size of the resulting hash in bytes.
digest_size = SHA256Hash.digest_size
#: The internal block size of the hash algorithm in bytes.
block_size = SHA256Hash.block_size
| mit | -5,691,074,405,908,422,000 | 34.445087 | 99 | 0.549881 | false |
authman/Python201609 | pairol_alex/Assignments/pylotNinja/controllers/Ninja.py | 1 | 1500 | from system.core.controller import*
import random
import datetime
from time import strftime
class Ninja(Controller):
def __init__(self, action):
super(Ninja, self).__init__(action)
def index(self):
try:
session['gold']
except:
session['gold'] = 0
try:
session['activities']
except:
session['activities'] = []
return self.load_view('ninja.html')
def clear(self):
session.clear()
return redirect ('/')
def process(self):
action = request.form["action"]
randomNumber = random.random()
print randomNumber
if action == "farm":
earn = int(randomNumber*10)+10
elif action == "cave":
earn = int(randomNumber*5)+5
elif action == "house":
earn = int(randomNumber*3)+3
elif action == "casino":
earn = int(randomNumber*100)-50
session['gold'] += earn
timeNow = datetime.datetime.now().strftime('%Y/%m/%d')
if earn >=0 :
newAction = {'status' : 'earn',
'action' : "Earned {} gold from {} ({})".format(earn, action, timeNow)}
else:
newAction = {'status' : 'lost',
'action' : "Entered Casino and lost {} gold ({})".format(-earn, action, timeNow)}
print newAction
session["activities"].append(newAction)
print session["activities"]
return redirect('/')
| mit | 7,202,244,967,128,972,000 | 22.809524 | 93 | 0.529333 | false |
dbrgn/fahrplan | fahrplan/tests/test.py | 1 | 9255 | # -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import, unicode_literals
import sys
from datetime import datetime
from subprocess import Popen, PIPE
if sys.version_info[0] == 2 and sys.version_info[1] < 7:
import unittest2 as unittest
else:
import unittest
from .. import meta
from .. import parser
BASE_COMMAND = 'python -m fahrplan.main'
try:
ENCODING = sys.stdout.encoding or 'utf-8'
except AttributeError:
ENCODING = 'utf-8'
# Run command
class CommandOutput(object):
def __init__(self, stdout, stderr, code):
self.std_err = stderr
self.std_out = stdout
self.status_code = code
def run_command(command):
p = Popen(command, stdout=PIPE, stderr=PIPE, shell=True)
stdout, stderr = p.communicate()
return CommandOutput(stdout.decode(ENCODING), stderr.decode(ENCODING), p.returncode)
class TestBasicArgumentHandling(unittest.TestCase):
def testRequiredArgumentsMissing(self):
r = run_command('{0} von bern'.format(BASE_COMMAND))
self.assertEqual('Error: "from" and "to" arguments must be present!\n', r.std_err)
def testVersionInfo(self):
args = ['-v', '--version', '-d -i -v']
for arg in args:
r = run_command('{0} {1}'.format(BASE_COMMAND, arg))
self.assertEqual('%s %s\n' % (meta.title, meta.version), r.std_out)
def testHelp(self):
args = ['-h', '--help', '-d -i --help']
for arg in args:
r = run_command('{0} {1}'.format(BASE_COMMAND, arg))
self.assertTrue(meta.description in r.std_out)
self.assertTrue('usage:' in r.std_out)
self.assertTrue('optional arguments:' in r.std_out)
self.assertTrue('positional arguments:' in r.std_out)
self.assertTrue('Examples:' in r.std_out)
class TestInputParsing(unittest.TestCase):
valid_expected_result = {
'arrival': '19:00',
'departure': '18:30',
'from': 'Zürich',
'to': 'Locarno',
'via': 'Genève',
}
def testEmptyArguments(self):
tokens = []
data, language = parser.parse_input(tokens)
self.assertEqual({}, data)
self.assertIsNone(language)
def testOneValidArgument(self):
tokens = 'from'.split()
data, language = parser.parse_input(tokens)
self.assertEqual({}, data)
self.assertIsNone(language)
def testOneInvalidArgument(self):
tokens = 'foobar'.split()
data, language = parser.parse_input(tokens)
self.assertEqual({}, data)
self.assertIsNone(language)
def testValidArgumentsEn(self):
tokens = 'from Zürich to Locarno via Genève departure 18:30 arrival 19:00'.split()
data, language = parser._process_tokens(tokens, sloppy_validation=True)
self.assertEqual(self.valid_expected_result, data)
self.assertEqual('en', language)
def testValidArgumentsDe(self):
tokens = 'von Zürich nach Locarno via Genève ab 18:30 an 19:00'.split()
data, language = parser._process_tokens(tokens, sloppy_validation=True)
self.assertEqual(self.valid_expected_result, data)
self.assertEqual('de', language)
def testValidArgumentsFr(self):
tokens = 'de Zürich à Locarno via Genève départ 18:30 arrivée 19:00'.split()
data, language = parser._process_tokens(tokens, sloppy_validation=True)
self.assertEqual(self.valid_expected_result, data)
self.assertEqual('fr', language)
def testTwoArguments(self):
tokens = 'Zürich Basel'.split()
data, language = parser.parse_input(tokens)
self.assertEqual({'from': 'Zürich', 'to': 'Basel'}, data)
self.assertEqual('en', language)
def testNotEnoughArgument(self):
tokens = 'from basel via bern'.split()
self.assertRaises(ValueError, parser.parse_input, tokens)
def testBasicDepartureTime(self):
tokens = 'von basel nach bern ab 1945'.split()
expected = {'from': 'basel', 'time': '19:45', 'to': 'bern'}
self.assertEqual(expected, parser.parse_input(tokens)[0])
def testBasicArrivalTime(self):
tokens = 'von basel nach bern an 18:00'.split()
expected = {'from': 'basel', 'isArrivalTime': 1, 'time': '18:00', 'to': 'bern'}
self.assertEqual(expected, parser.parse_input(tokens)[0])
def testImmediateTimes(self):
now = datetime.now().strftime('%H:%M')
queries = [
'von basel nach bern ab jetzt'.split(),
'von basel nach bern ab sofort'.split(),
'from basel to bern departure now'.split(),
'from basel to bern departure right now'.split(),
'from basel to bern departure immediately'.split(),
'de basel à bern départ maitenant'.split(),
]
for tokens in queries:
data, _ = parser.parse_input(tokens)
self.assertEqual(now, data['time'])
def testNoonTimes(self):
queries = [
'von basel nach bern ab mittag'.split(),
'from basel to bern departure noon'.split(),
'de basel à bern départ midi'.split(),
]
for tokens in queries:
data, _ = parser.parse_input(tokens)
self.assertEqual('12:00', data['time'])
def testMidnightTimes(self):
queries = [
'von basel nach bern ab mitternacht'.split(),
'from basel to bern departure midnight'.split(),
'de basel à bern départ minuit'.split(),
]
for tokens in queries:
data, _ = parser.parse_input(tokens)
self.assertEqual('23:59', data['time'])
def testAtTimes(self):
queries = [
'von basel nach bern ab am mittag'.split(),
'von basel nach bern ab um 12:00'.split(),
'from basel to bern departure at noon'.split(),
'from basel to bern departure at 12:00'.split(),
]
for tokens in queries:
data, _ = parser.parse_input(tokens)
self.assertEqual('12:00', data['time'])
def testDates(self):
year = datetime.now().year
queries = [
'von basel nach bern ab 22/10/{} 13:00'.format(year).split(),
'von basel nach bern ab um 22/10 13:00'.split(),
'from basel to bern departure 22/10 13:00'.split(),
'from basel to bern departure 22/10 13:00'.split(),
]
for tokens in queries:
data, _ = parser.parse_input(tokens)
self.assertEqual('13:00', data['time'])
self.assertEqual('{}/10/22'.format(year), data['date'])
class TestBasicQuery(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Setup method that is only run once."""
cmd = '{0} von basel nach zürich ab 07:00'.format(BASE_COMMAND)
cls.r = run_command(cmd)
cls.rows = cls.r.std_out.split('\n')
def returnStatus(self):
"""The command should return the status code 0."""
self.assertEqual(0, self.r.status_code)
def testRowCount(self):
"""A normal output table should have 16 rows."""
self.assertEqual(16, len(self.rows))
def testHeadline(self):
"""Test the headline items."""
headline_items = ['Station', 'Platform', 'Date', 'Time',
'Duration', 'Chg.', 'With', 'Occupancy']
for item in headline_items:
self.assertIn(item, self.rows[1])
def testStationNames(self):
"""Station names should be "Basel SBB" and "Zürich HB"."""
self.assertTrue("Basel SBB" in self.rows[3])
self.assertTrue("Zürich HB" in self.rows[4])
class TestLanguages(unittest.TestCase):
def testBasicQuery(self):
"""
Test a query in three languages and assert that the output of all
three queries is equal.
"""
args = ['von bern nach basel via zürich ab 15:00',
'from bern to basel via zürich departure 15:00',
'de bern à basel via zürich départ 15:00']
jobs = [run_command('{0} {1}'.format(BASE_COMMAND, arg)) for arg in args]
statuscodes = [job.status_code for job in jobs]
self.assertEqual([0, 0, 0], statuscodes)
stdout_values = [job.std_out for job in jobs]
self.assertTrue(stdout_values[1:] == stdout_values[:-1])
class RegressionTests(unittest.TestCase):
def testIss11(self):
"""
Github issue #11:
Don't allow both departure and arrival time.
"""
args = 'von bern nach basel ab 15:00 an 16:00'
query = run_command('{0} {1}'.format(BASE_COMMAND, args))
self.assertEqual('Error: You can\'t specify both departure *and* arrival time.\n',
query.std_err)
def testIss13(self):
"""
Github issue #13:
Station not found: ValueError: max() arg is an empty sequence.
"""
args = 'von zuerich manegg nach nach stadelhofen'
query = run_command('{0} {1}'.format(BASE_COMMAND, args))
self.assertEqual(0, query.status_code, 'Program terminated with statuscode != 0')
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | -7,256,230,340,062,512,000 | 34.629344 | 90 | 0.60013 | false |
EducationForDevelopment/webapp | lib/flask/__init__.py | 345 | 1672 | # -*- coding: utf-8 -*-
"""
flask
~~~~~
A microframework based on Werkzeug. It's extensively documented
and follows best practice patterns.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
__version__ = '0.10'
# utilities we import from Werkzeug and Jinja2 that are unused
# in the module but are exported as public interface.
from werkzeug.exceptions import abort
from werkzeug.utils import redirect
from jinja2 import Markup, escape
from .app import Flask, Request, Response
from .config import Config
from .helpers import url_for, flash, send_file, send_from_directory, \
get_flashed_messages, get_template_attribute, make_response, safe_join, \
stream_with_context
from .globals import current_app, g, request, session, _request_ctx_stack, \
_app_ctx_stack
from .ctx import has_request_context, has_app_context, \
after_this_request, copy_current_request_context
from .module import Module
from .blueprints import Blueprint
from .templating import render_template, render_template_string
# the signals
from .signals import signals_available, template_rendered, request_started, \
request_finished, got_request_exception, request_tearing_down, \
appcontext_tearing_down, appcontext_pushed, \
appcontext_popped, message_flashed
# We're not exposing the actual json module but a convenient wrapper around
# it.
from . import json
# This was the only thing that flask used to export at one point and it had
# a more generic name.
jsonify = json.jsonify
# backwards compat, goes away in 1.0
from .sessions import SecureCookieSession as Session
json_available = True
| mit | 241,608,978,765,968,220 | 32.44 | 77 | 0.746411 | false |
sshleifer/object_detection_kitti | object_detection/core/matcher_test.py | 21 | 7101 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.core.matcher."""
import numpy as np
import tensorflow as tf
from object_detection.core import matcher
class AnchorMatcherTest(tf.test.TestCase):
def test_get_correct_matched_columnIndices(self):
match_results = tf.constant([3, 1, -1, 0, -1, 5, -2])
match = matcher.Match(match_results)
expected_column_indices = [0, 1, 3, 5]
matched_column_indices = match.matched_column_indices()
self.assertEquals(matched_column_indices.dtype, tf.int32)
with self.test_session() as sess:
matched_column_indices = sess.run(matched_column_indices)
self.assertAllEqual(matched_column_indices, expected_column_indices)
def test_get_correct_counts(self):
match_results = tf.constant([3, 1, -1, 0, -1, 5, -2])
match = matcher.Match(match_results)
exp_num_matched_columns = 4
exp_num_unmatched_columns = 2
exp_num_ignored_columns = 1
num_matched_columns = match.num_matched_columns()
num_unmatched_columns = match.num_unmatched_columns()
num_ignored_columns = match.num_ignored_columns()
self.assertEquals(num_matched_columns.dtype, tf.int32)
self.assertEquals(num_unmatched_columns.dtype, tf.int32)
self.assertEquals(num_ignored_columns.dtype, tf.int32)
with self.test_session() as sess:
(num_matched_columns_out, num_unmatched_columns_out,
num_ignored_columns_out) = sess.run(
[num_matched_columns, num_unmatched_columns, num_ignored_columns])
self.assertAllEqual(num_matched_columns_out, exp_num_matched_columns)
self.assertAllEqual(num_unmatched_columns_out, exp_num_unmatched_columns)
self.assertAllEqual(num_ignored_columns_out, exp_num_ignored_columns)
def testGetCorrectUnmatchedColumnIndices(self):
match_results = tf.constant([3, 1, -1, 0, -1, 5, -2])
match = matcher.Match(match_results)
expected_column_indices = [2, 4]
unmatched_column_indices = match.unmatched_column_indices()
self.assertEquals(unmatched_column_indices.dtype, tf.int32)
with self.test_session() as sess:
unmatched_column_indices = sess.run(unmatched_column_indices)
self.assertAllEqual(unmatched_column_indices, expected_column_indices)
def testGetCorrectMatchedRowIndices(self):
match_results = tf.constant([3, 1, -1, 0, -1, 5, -2])
match = matcher.Match(match_results)
expected_row_indices = [3, 1, 0, 5]
matched_row_indices = match.matched_row_indices()
self.assertEquals(matched_row_indices.dtype, tf.int32)
with self.test_session() as sess:
matched_row_inds = sess.run(matched_row_indices)
self.assertAllEqual(matched_row_inds, expected_row_indices)
def test_get_correct_ignored_column_indices(self):
match_results = tf.constant([3, 1, -1, 0, -1, 5, -2])
match = matcher.Match(match_results)
expected_column_indices = [6]
ignored_column_indices = match.ignored_column_indices()
self.assertEquals(ignored_column_indices.dtype, tf.int32)
with self.test_session() as sess:
ignored_column_indices = sess.run(ignored_column_indices)
self.assertAllEqual(ignored_column_indices, expected_column_indices)
def test_get_correct_matched_column_indicator(self):
match_results = tf.constant([3, 1, -1, 0, -1, 5, -2])
match = matcher.Match(match_results)
expected_column_indicator = [True, True, False, True, False, True, False]
matched_column_indicator = match.matched_column_indicator()
self.assertEquals(matched_column_indicator.dtype, tf.bool)
with self.test_session() as sess:
matched_column_indicator = sess.run(matched_column_indicator)
self.assertAllEqual(matched_column_indicator, expected_column_indicator)
def test_get_correct_unmatched_column_indicator(self):
match_results = tf.constant([3, 1, -1, 0, -1, 5, -2])
match = matcher.Match(match_results)
expected_column_indicator = [False, False, True, False, True, False, False]
unmatched_column_indicator = match.unmatched_column_indicator()
self.assertEquals(unmatched_column_indicator.dtype, tf.bool)
with self.test_session() as sess:
unmatched_column_indicator = sess.run(unmatched_column_indicator)
self.assertAllEqual(unmatched_column_indicator, expected_column_indicator)
def test_get_correct_ignored_column_indicator(self):
match_results = tf.constant([3, 1, -1, 0, -1, 5, -2])
match = matcher.Match(match_results)
expected_column_indicator = [False, False, False, False, False, False, True]
ignored_column_indicator = match.ignored_column_indicator()
self.assertEquals(ignored_column_indicator.dtype, tf.bool)
with self.test_session() as sess:
ignored_column_indicator = sess.run(ignored_column_indicator)
self.assertAllEqual(ignored_column_indicator, expected_column_indicator)
def test_get_correct_unmatched_ignored_column_indices(self):
match_results = tf.constant([3, 1, -1, 0, -1, 5, -2])
match = matcher.Match(match_results)
expected_column_indices = [2, 4, 6]
unmatched_ignored_column_indices = (match.
unmatched_or_ignored_column_indices())
self.assertEquals(unmatched_ignored_column_indices.dtype, tf.int32)
with self.test_session() as sess:
unmatched_ignored_column_indices = sess.run(
unmatched_ignored_column_indices)
self.assertAllEqual(unmatched_ignored_column_indices,
expected_column_indices)
def test_all_columns_accounted_for(self):
# Note: deliberately setting to small number so not always
# all possibilities appear (matched, unmatched, ignored)
num_matches = 10
match_results = tf.random_uniform(
[num_matches], minval=-2, maxval=5, dtype=tf.int32)
match = matcher.Match(match_results)
matched_column_indices = match.matched_column_indices()
unmatched_column_indices = match.unmatched_column_indices()
ignored_column_indices = match.ignored_column_indices()
with self.test_session() as sess:
matched, unmatched, ignored = sess.run([
matched_column_indices, unmatched_column_indices,
ignored_column_indices
])
all_indices = np.hstack((matched, unmatched, ignored))
all_indices_sorted = np.sort(all_indices)
self.assertAllEqual(all_indices_sorted,
np.arange(num_matches, dtype=np.int32))
if __name__ == '__main__':
tf.test.main()
| apache-2.0 | 5,129,903,649,712,732,000 | 46.34 | 80 | 0.698071 | false |
caidongyun/pylearn2 | pylearn2/datasets/avicenna.py | 37 | 1062 | """
.. todo::
WRITEME
"""
from pylearn2.datasets import utlc
import numpy as N
class Avicenna(object):
"""
.. todo::
WRITEME
Parameters
----------
which_set : WRITEME
standardize : WRITEME
"""
def __init__(self, which_set, standardize):
train, valid, test = utlc.load_ndarray_dataset('avicenna')
if which_set == 'train':
self.X = train
elif which_set == 'valid':
self.X = valid
elif which_set == 'test':
self.X = test
else:
assert False
if standardize:
union = N.concatenate([train, valid, test], axis=0)
# perform mean and std in float64 to avoid losing
# too much numerical precision
self.X -= union.mean(axis=0, dtype='float64')
std = union.std(axis=0, dtype='float64')
std[std < 1e-3] = 1e-3
self.X /= std
def get_design_matrix(self):
"""
.. todo::
WRITEME
"""
return self.X
| bsd-3-clause | -4,869,421,321,122,092,000 | 20.24 | 66 | 0.499058 | false |
iem-projects/WILMAmix | WILMA/gui/Translator.py | 1 | 1678 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright © 2013, IOhannes m zmölnig, IEM
# This file is part of WILMix
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with WILMix. If not, see <http://www.gnu.org/licenses/>.
import logging as logging_
logging = logging_.getLogger('WILMA.gui.Translator')
import locale, os
from PySide.QtCore import QTranslator, QLibraryInfo
class Translator:
def __init__(self, oApp):
try:
# Install the appropriate editor translation file
sLocale = locale.getdefaultlocale()[0]
oTranslator = QTranslator()
path=os.path.join('i18n', sLocale)
if oTranslator.load(path):
oApp.installTranslator(oTranslator)
logging.debug( "translator: OK")
## # Install the appropriate Qt translation file
## oTranslatorQt = QTranslator()
## print 'qt_' + sLocale, QLibraryInfo.location(QLibraryInfo.TranslationsPath)
## if oTranslatorQt.load('qt_' + sLocale, QLibraryInfo.location(QLibraryInfo.TranslationsPath)):
## oApp.installTranslator(oTranslatorQt)
except Exception, oEx:
logging.exception( "translator")
pass
| gpl-2.0 | -1,814,120,234,432,210,200 | 36.244444 | 101 | 0.711814 | false |
dstufft/sqlalchemy | test/dialect/mysql/test_query.py | 12 | 6313 | # coding: utf-8
from sqlalchemy.testing import eq_, is_
from sqlalchemy import *
from sqlalchemy.testing import fixtures, AssertsCompiledSQL
from sqlalchemy import testing
class IdiosyncrasyTest(fixtures.TestBase, AssertsCompiledSQL):
__only_on__ = 'mysql'
__backend__ = True
@testing.emits_warning()
def test_is_boolean_symbols_despite_no_native(self):
is_(
testing.db.scalar(select([cast(true().is_(true()), Boolean)])),
True
)
is_(
testing.db.scalar(select([cast(true().isnot(true()), Boolean)])),
False
)
is_(
testing.db.scalar(select([cast(false().is_(false()), Boolean)])),
True
)
class MatchTest(fixtures.TestBase, AssertsCompiledSQL):
__only_on__ = 'mysql'
__backend__ = True
@classmethod
def setup_class(cls):
global metadata, cattable, matchtable
metadata = MetaData(testing.db)
cattable = Table('cattable', metadata,
Column('id', Integer, primary_key=True),
Column('description', String(50)),
mysql_engine='MyISAM'
)
matchtable = Table('matchtable', metadata,
Column('id', Integer, primary_key=True),
Column('title', String(200)),
Column('category_id', Integer, ForeignKey('cattable.id')),
mysql_engine='MyISAM'
)
metadata.create_all()
cattable.insert().execute([
{'id': 1, 'description': 'Python'},
{'id': 2, 'description': 'Ruby'},
])
matchtable.insert().execute([
{'id': 1,
'title': 'Agile Web Development with Ruby On Rails',
'category_id': 2},
{'id': 2,
'title': 'Dive Into Python',
'category_id': 1},
{'id': 3,
'title': "Programming Matz's Ruby",
'category_id': 2},
{'id': 4,
'title': 'The Definitive Guide to Django',
'category_id': 1},
{'id': 5,
'title': 'Python in a Nutshell',
'category_id': 1}
])
@classmethod
def teardown_class(cls):
metadata.drop_all()
@testing.fails_on('mysql+mysqlconnector', 'uses pyformat')
def test_expression_format(self):
format = testing.db.dialect.paramstyle == 'format' and '%s' or '?'
self.assert_compile(
matchtable.c.title.match('somstr'),
"MATCH (matchtable.title) AGAINST (%s IN BOOLEAN MODE)" % format)
@testing.fails_on('mysql+mysqldb', 'uses format')
@testing.fails_on('mysql+pymysql', 'uses format')
@testing.fails_on('mysql+cymysql', 'uses format')
@testing.fails_on('mysql+oursql', 'uses format')
@testing.fails_on('mysql+pyodbc', 'uses format')
@testing.fails_on('mysql+zxjdbc', 'uses format')
def test_expression_pyformat(self):
format = '%(title_1)s'
self.assert_compile(
matchtable.c.title.match('somstr'),
"MATCH (matchtable.title) AGAINST (%s IN BOOLEAN MODE)" % format)
def test_simple_match(self):
results = (matchtable.select().
where(matchtable.c.title.match('python')).
order_by(matchtable.c.id).
execute().
fetchall())
eq_([2, 5], [r.id for r in results])
def test_not_match(self):
results = (matchtable.select().
where(~matchtable.c.title.match('python')).
order_by(matchtable.c.id).
execute().
fetchall())
eq_([1, 3, 4], [r.id for r in results])
def test_simple_match_with_apostrophe(self):
results = (matchtable.select().
where(matchtable.c.title.match("Matz's")).
execute().
fetchall())
eq_([3], [r.id for r in results])
def test_return_value(self):
# test [ticket:3263]
result = testing.db.execute(
select([
matchtable.c.title.match('Agile Ruby Programming').label('ruby'),
matchtable.c.title.match('Dive Python').label('python'),
matchtable.c.title
]).order_by(matchtable.c.id)
).fetchall()
eq_(
result,
[
(2.0, 0.0, 'Agile Web Development with Ruby On Rails'),
(0.0, 2.0, 'Dive Into Python'),
(2.0, 0.0, "Programming Matz's Ruby"),
(0.0, 0.0, 'The Definitive Guide to Django'),
(0.0, 1.0, 'Python in a Nutshell')
]
)
def test_or_match(self):
results1 = (matchtable.select().
where(or_(matchtable.c.title.match('nutshell'),
matchtable.c.title.match('ruby'))).
order_by(matchtable.c.id).
execute().
fetchall())
eq_([1, 3, 5], [r.id for r in results1])
results2 = (matchtable.select().
where(matchtable.c.title.match('nutshell ruby')).
order_by(matchtable.c.id).
execute().
fetchall())
eq_([1, 3, 5], [r.id for r in results2])
def test_and_match(self):
results1 = (matchtable.select().
where(and_(matchtable.c.title.match('python'),
matchtable.c.title.match('nutshell'))).
execute().
fetchall())
eq_([5], [r.id for r in results1])
results2 = (matchtable.select().
where(matchtable.c.title.match('+python +nutshell')).
execute().
fetchall())
eq_([5], [r.id for r in results2])
def test_match_across_joins(self):
results = (matchtable.select().
where(and_(cattable.c.id==matchtable.c.category_id,
or_(cattable.c.description.match('Ruby'),
matchtable.c.title.match('nutshell')))).
order_by(matchtable.c.id).
execute().
fetchall())
eq_([1, 3, 5], [r.id for r in results])
| mit | 9,029,266,385,025,060,000 | 34.268156 | 81 | 0.503564 | false |
jsilhan/dnf-plugins-core | plugins/copr.py | 1 | 16500 | # supplies the 'copr' command.
#
# Copyright (C) 2014-2015 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import print_function
from dnf.pycomp import PY3
from subprocess import call
from dnfpluginscore import _, logger
from dnf.i18n import ucd
import dnf
import glob
import json
import os
import platform
import shutil
import stat
import rpm
PLUGIN_CONF = 'copr'
YES = set([_('yes'), _('y')])
NO = set([_('no'), _('n'), ''])
# compatibility with Py2 and Py3 - rename raw_input() to input() on Py2
try:
input = raw_input
except NameError:
pass
if PY3:
from configparser import ConfigParser
else:
from ConfigParser import ConfigParser
@dnf.plugin.register_command
class CoprCommand(dnf.cli.Command):
""" Copr plugin for DNF """
chroot_config = None
copr_url = "https://copr.fedorainfracloud.org"
aliases = ("copr",)
summary = _("Interact with Copr repositories.")
usage = _("""
enable name/project [chroot]
disable name/project
remove name/project
list name
search project
Examples:
copr enable rhscl/perl516 epel-6-x86_64
copr enable ignatenkobrain/ocltoys
copr disable rhscl/perl516
copr remove rhscl/perl516
copr list ignatenkobrain
copr search tests
""")
@staticmethod
def set_argparser(parser):
parser.add_argument('subcommand', nargs=1,
choices=['help', 'enable', 'disable',
'remove', 'list', 'search'])
parser.add_argument('arg', nargs='*')
def configure(self):
raw_config = ConfigParser()
filepath = os.path.join(os.path.expanduser("~"), ".config", "copr")
if raw_config.read(filepath):
if PY3:
self.copr_url = raw_config["copr-cli"].get("copr_url", None)
else:
self.copr_url = raw_config.get("copr-cli", "copr_url", None)
if self.copr_url != "https://copr.fedorainfracloud.org":
print(_("Warning: we are using non-standard Copr URL '{}'.").format(self.copr_url))
# Useful for forcing a distribution
copr_plugin_config = ConfigParser()
config_file = None
for path in self.base.conf.pluginconfpath:
test_config_file = '{}/{}.conf'.format(path, PLUGIN_CONF)
if os.path.isfile(test_config_file):
config_file = test_config_file
if config_file is not None:
copr_plugin_config.read(config_file)
if copr_plugin_config.has_option('main', 'distribution') and copr_plugin_config.has_option('main', 'releasever'):
distribution = copr_plugin_config.get('main', 'distribution')
releasever = copr_plugin_config.get('main', 'releasever')
self.chroot_config = [distribution, releasever]
else:
self.chroot_config = [False, False]
def run(self):
subcommand = self.opts.subcommand[0]
if subcommand == "help":
self.cli.optparser.print_help(self)
return 0
try:
project_name = self.opts.arg[0]
except (ValueError, IndexError):
logger.critical(
_('Error: ') +
_('exactly two additional parameters to '
'copr command are required'))
self.cli.optparser.print_help(self)
raise dnf.cli.CliError(
_('exactly two additional parameters to '
'copr command are required'))
try:
chroot = self.opts.arg[1]
except IndexError:
chroot = self._guess_chroot(self.chroot_config)
# commands without defined copr_username/copr_projectname
if subcommand == "list":
self._list_user_projects(project_name)
return
if subcommand == "search":
self._search(project_name)
return
try:
copr_username, copr_projectname = project_name.split("/")
except ValueError:
logger.critical(
_('Error: ') +
_('use format `copr_username/copr_projectname` '
'to reference copr project'))
raise dnf.cli.CliError(_('bad copr project format'))
repo_filename = "{}/_copr_{}-{}.repo" \
.format(self.base.conf.get_reposdir, copr_username, copr_projectname)
if subcommand == "enable":
self._need_root()
self._ask_user("""
You are about to enable a Copr repository. Please note that this
repository is not part of the main distribution, and quality may vary.
The Fedora Project does not exercise any power over the contents of
this repository beyond the rules outlined in the Copr FAQ at
<https://fedorahosted.org/copr/wiki/UserDocs#WhatIcanbuildinCopr>, and
packages are not held to any quality or security level.
Please do not file bug reports about these packages in Fedora
Bugzilla. In case of problems, contact the owner of this repository.
Do you want to continue? [y/N]: """)
self._download_repo(project_name, repo_filename, chroot)
logger.info(_("Repository successfully enabled."))
elif subcommand == "disable":
self._need_root()
self._disable_repo(copr_username, copr_projectname)
logger.info(_("Repository successfully disabled."))
elif subcommand == "remove":
self._need_root()
self._remove_repo(repo_filename)
logger.info(_("Repository successfully removed."))
else:
raise dnf.exceptions.Error(
_('Unknown subcommand {}.').format(subcommand))
def _list_user_projects(self, user_name):
# http://copr.fedorainfracloud.org/api/coprs/ignatenkobrain/
api_path = "/api/coprs/{}/".format(user_name)
res = self.base.urlopen(self.copr_url + api_path, mode='w+')
try:
json_parse = json.loads(res.read())
except ValueError:
raise dnf.exceptions.Error(
_("Can't parse repositories for username '{}'.")
.format(user_name))
self._check_json_output(json_parse)
section_text = _("List of {} coprs").format(user_name)
self._print_match_section(section_text)
i = 0
while i < len(json_parse["repos"]):
msg = "{0}/{1} : ".format(user_name,
json_parse["repos"][i]["name"])
desc = json_parse["repos"][i]["description"]
if not desc:
desc = _("No description given")
msg = self.base.output.fmtKeyValFill(ucd(msg), desc)
print(msg)
i += 1
def _search(self, query):
# http://copr.fedorainfracloud.org/api/coprs/search/tests/
api_path = "/api/coprs/search/{}/".format(query)
res = self.base.urlopen(self.copr_url + api_path, mode='w+')
try:
json_parse = json.loads(res.read())
except ValueError:
raise dnf.exceptions.Error(_("Can't parse search for '{}'."
).format(query))
self._check_json_output(json_parse)
section_text = _("Matched: {}").format(query)
self._print_match_section(section_text)
i = 0
while i < len(json_parse["repos"]):
msg = "{0}/{1} : ".format(json_parse["repos"][i]["username"],
json_parse["repos"][i]["coprname"])
desc = json_parse["repos"][i]["description"]
if not desc:
desc = _("No description given.")
msg = self.base.output.fmtKeyValFill(ucd(msg), desc)
print(msg)
i += 1
def _print_match_section(self, text):
formatted = self.base.output.fmtSection(text)
print(formatted)
def _ask_user(self, question):
if self.base.conf.assumeyes and not self.base.conf.assumeno:
return
elif self.base.conf.assumeno and not self.base.conf.assumeyes:
raise dnf.exceptions.Error(_('Safe and good answer. Exiting.'))
answer = None
while not ((answer in YES) or (answer in NO)):
answer = ucd(input(question)).lower()
answer = _(answer)
if answer in YES:
return
else:
raise dnf.exceptions.Error(_('Safe and good answer. Exiting.'))
@classmethod
def _need_root(cls):
# FIXME this should do dnf itself (BZ#1062889)
if os.geteuid() != 0:
raise dnf.exceptions.Error(
_('This command has to be run under the root user.'))
@staticmethod
def _guess_chroot(chroot_config):
""" Guess which chroot is equivalent to this machine """
# FIXME Copr should generate non-specific arch repo
dist = chroot_config
if dist is None or (dist[0] is False) or (dist[1] is False):
dist = platform.linux_distribution()
if "Fedora" in dist:
# x86_64 because repo-file is same for all arch
# ($basearch is used)
if "Rawhide" in dist:
chroot = ("fedora-rawhide-x86_64")
else:
chroot = ("fedora-{}-x86_64".format(dist[1]))
elif "Mageia" in dist:
# Get distribution architecture (Mageia does not use $basearch)
distarch = rpm.expandMacro("%{distro_arch}")
# Set the chroot
if "Cauldron" in dist:
chroot = ("mageia-cauldron-{}".format(distarch))
else:
chroot = ("mageia-{0}-{1}".format(dist[1], distarch))
else:
chroot = ("epel-%s-x86_64" % dist[1].split(".", 1)[0])
return chroot
def _download_repo(self, project_name, repo_filename, chroot=None):
if chroot is None:
chroot = self._guess_chroot(self.chroot_config)
short_chroot = '-'.join(chroot.split('-')[:2])
#http://copr.fedorainfracloud.org/coprs/larsks/rcm/repo/epel-7-x86_64/
api_path = "/coprs/{0}/repo/{1}/".format(project_name, short_chroot)
try:
f = self.base.urlopen(self.copr_url + api_path, mode='w+')
except IOError as e:
if os.path.exists(repo_filename):
os.remove(repo_filename)
if '404' in str(e):
if PY3:
import urllib.request
try:
res = urllib.request.urlopen(self.copr_url + "/coprs/" + project_name)
status_code = res.getcode()
except urllib.error.HTTPError as e:
status_code = e.getcode()
else:
import urllib
res = urllib.urlopen(self.copr_url + "/coprs/" + project_name)
status_code = res.getcode()
if str(status_code) != '404':
raise dnf.exceptions.Error(_("This repository does not have"\
" any builds yet so you cannot enable it now."))
else:
raise dnf.exceptions.Error(_("Such repository does not exist."))
raise
shutil.copy2(f.name, repo_filename)
os.chmod(repo_filename, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH)
@classmethod
def _remove_repo(cls, repo_filename):
# FIXME is it Copr repo ?
try:
os.remove(repo_filename)
except OSError as e:
raise dnf.exceptions.Error(str(e))
@classmethod
def _disable_repo(cls, copr_username, copr_projectname):
exit_code = call(["dnf", "config-manager", "--set-disabled",
"{}-{}".format(
cls._sanitize_username(copr_username),
copr_projectname)])
if exit_code != 0:
raise dnf.exceptions.Error(
_("Failed to disable copr repo {}/{}"
.format(copr_username, copr_projectname)))
@classmethod
def _get_data(cls, f):
""" Wrapper around response from server
check data and print nice error in case of some error (and return None)
otherwise return json object.
"""
try:
output = json.loads(f.read())
except ValueError:
dnf.cli.CliError(_("Unknown response from server."))
return
return output
@classmethod
def _check_json_output(cls, json_obj):
if json_obj["output"] != "ok":
raise dnf.exceptions.Error("{}".format(json_obj["error"]))
@classmethod
def _sanitize_username(cls, copr_username):
if copr_username[0] == "@":
return "group_{}".format(copr_username[1:])
else:
return copr_username
@dnf.plugin.register_command
class PlaygroundCommand(CoprCommand):
""" Playground plugin for DNF """
aliases = ("playground",)
summary = _("Interact with Playground repository.")
usage = " [enable|disable|upgrade]"
def _cmd_enable(self, chroot):
self._need_root()
self._ask_user("""
You are about to enable a Playground repository.
Do you want to continue? [y/N]: """)
api_url = "{0}/api/playground/list/".format(
self.copr_url)
f = self.base.urlopen(api_url, mode="w+")
output = self._get_data(f)
f.close()
if output["output"] != "ok":
raise dnf.cli.CliError(_("Unknown response from server."))
for repo in output["repos"]:
project_name = "{0}/{1}".format(repo["username"],
repo["coprname"])
repo_filename = "{}/_playground_{}.repo".format(self.base.conf.get_reposdir, project_name.replace("/", "-"))
try:
if chroot not in repo["chroots"]:
continue
api_url = "{0}/api/coprs/{1}/detail/{2}/".format(
self.copr_url, project_name, chroot)
f = self.base.urlopen(api_url, mode='w+')
output2 = self._get_data(f)
f.close()
if (output2 and ("output" in output2)
and (output2["output"] == "ok")):
self._download_repo(project_name, repo_filename, chroot)
except dnf.exceptions.Error:
# likely 404 and that repo does not exist
pass
def _cmd_disable(self):
self._need_root()
for repo_filename in glob.glob("{}/_playground_*.repo".format(self.base.conf.get_reposdir)):
self._remove_repo(repo_filename)
@staticmethod
def set_argparser(parser):
parser.add_argument('subcommand', nargs=1,
choices=['enable', 'disable', 'upgrade'])
def run(self):
subcommand = self.opts.subcommand[0]
chroot = self._guess_chroot(self.chroot_config)
if subcommand == "enable":
self._cmd_enable(chroot)
logger.info(_("Playground repositories successfully enabled."))
elif subcommand == "disable":
self._cmd_disable()
logger.info(_("Playground repositories successfully disabled."))
elif subcommand == "upgrade":
self._cmd_disable()
self._cmd_enable(chroot)
logger.info(_("Playground repositories successfully updated."))
else:
raise dnf.exceptions.Error(
_('Unknown subcommand {}.').format(subcommand))
| gpl-2.0 | -5,778,732,182,900,510,000 | 38.007092 | 125 | 0.568424 | false |
jcsirot/kubespray | roles/kubernetes-apps/ansible/library/kube.py | 7 | 8019 | #!/usr/bin/python
# -*- coding: utf-8 -*-
DOCUMENTATION = """
---
module: kube
short_description: Manage Kubernetes Cluster
description:
- Create, replace, remove, and stop resources within a Kubernetes Cluster
version_added: "2.0"
options:
name:
required: false
default: null
description:
- The name associated with resource
filename:
required: false
default: null
description:
- The path and filename of the resource(s) definition file.
kubectl:
required: false
default: null
description:
- The path to the kubectl bin
namespace:
required: false
default: null
description:
- The namespace associated with the resource(s)
resource:
required: false
default: null
description:
- The resource to perform an action on. pods (po), replicationControllers (rc), services (svc)
label:
required: false
default: null
description:
- The labels used to filter specific resources.
server:
required: false
default: null
description:
- The url for the API server that commands are executed against.
force:
required: false
default: false
description:
- A flag to indicate to force delete, replace, or stop.
all:
required: false
default: false
description:
- A flag to indicate delete all, stop all, or all namespaces when checking exists.
log_level:
required: false
default: 0
description:
- Indicates the level of verbosity of logging by kubectl.
state:
required: false
choices: ['present', 'absent', 'latest', 'reloaded', 'stopped']
default: present
description:
- present handles checking existence or creating if definition file provided,
absent handles deleting resource(s) based on other options,
latest handles creating ore updating based on existence,
reloaded handles updating resource(s) definition using definition file,
stopped handles stopping resource(s) based on other options.
requirements:
- kubectl
author: "Kenny Jones (@kenjones-cisco)"
"""
EXAMPLES = """
- name: test nginx is present
kube: name=nginx resource=rc state=present
- name: test nginx is stopped
kube: name=nginx resource=rc state=stopped
- name: test nginx is absent
kube: name=nginx resource=rc state=absent
- name: test nginx is present
kube: filename=/tmp/nginx.yml
"""
class KubeManager(object):
def __init__(self, module):
self.module = module
self.kubectl = module.params.get('kubectl')
if self.kubectl is None:
self.kubectl = module.get_bin_path('kubectl', True)
self.base_cmd = [self.kubectl]
if module.params.get('server'):
self.base_cmd.append('--server=' + module.params.get('server'))
if module.params.get('log_level'):
self.base_cmd.append('--v=' + str(module.params.get('log_level')))
if module.params.get('namespace'):
self.base_cmd.append('--namespace=' + module.params.get('namespace'))
self.all = module.params.get('all')
self.force = module.params.get('force')
self.name = module.params.get('name')
self.filename = module.params.get('filename')
self.resource = module.params.get('resource')
self.label = module.params.get('label')
def _execute(self, cmd):
args = self.base_cmd + cmd
try:
rc, out, err = self.module.run_command(args)
if rc != 0:
self.module.fail_json(
msg='error running kubectl (%s) command (rc=%d): %s' % (' '.join(args), rc, out or err))
except Exception as exc:
self.module.fail_json(
msg='error running kubectl (%s) command: %s' % (' '.join(args), str(exc)))
return out.splitlines()
def _execute_nofail(self, cmd):
args = self.base_cmd + cmd
rc, out, err = self.module.run_command(args)
if rc != 0:
return None
return out.splitlines()
def create(self, check=True):
if check and self.exists():
return []
cmd = ['create']
if not self.filename:
self.module.fail_json(msg='filename required to create')
cmd.append('--filename=' + self.filename)
return self._execute(cmd)
def replace(self):
if not self.force and not self.exists():
return []
cmd = ['replace']
if self.force:
cmd.append('--force')
if not self.filename:
self.module.fail_json(msg='filename required to reload')
cmd.append('--filename=' + self.filename)
return self._execute(cmd)
def delete(self):
if not self.force and not self.exists():
return []
cmd = ['delete']
if self.filename:
cmd.append('--filename=' + self.filename)
else:
if not self.resource:
self.module.fail_json(msg='resource required to delete without filename')
cmd.append(self.resource)
if self.name:
cmd.append(self.name)
if self.label:
cmd.append('--selector=' + self.label)
if self.all:
cmd.append('--all')
if self.force:
cmd.append('--ignore-not-found')
return self._execute(cmd)
def exists(self):
cmd = ['get']
if not self.resource:
return False
cmd.append(self.resource)
if self.name:
cmd.append(self.name)
cmd.append('--no-headers')
if self.label:
cmd.append('--selector=' + self.label)
if self.all:
cmd.append('--all-namespaces')
result = self._execute_nofail(cmd)
if not result:
return False
return True
def stop(self):
if not self.force and not self.exists():
return []
cmd = ['stop']
if self.filename:
cmd.append('--filename=' + self.filename)
else:
if not self.resource:
self.module.fail_json(msg='resource required to stop without filename')
cmd.append(self.resource)
if self.name:
cmd.append(self.name)
if self.label:
cmd.append('--selector=' + self.label)
if self.all:
cmd.append('--all')
if self.force:
cmd.append('--ignore-not-found')
return self._execute(cmd)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(),
filename=dict(),
namespace=dict(),
resource=dict(),
label=dict(),
server=dict(),
kubectl=dict(),
force=dict(default=False, type='bool'),
all=dict(default=False, type='bool'),
log_level=dict(default=0, type='int'),
state=dict(default='present', choices=['present', 'absent', 'latest', 'reloaded', 'stopped']),
)
)
changed = False
manager = KubeManager(module)
state = module.params.get('state')
if state == 'present':
result = manager.create()
elif state == 'absent':
result = manager.delete()
elif state == 'reloaded':
result = manager.replace()
elif state == 'stopped':
result = manager.stop()
elif state == 'latest':
if manager.exists():
manager.force = True
result = manager.replace()
else:
result = manager.create(check=False)
else:
module.fail_json(msg='Unrecognized state %s.' % state)
if result:
changed = True
module.exit_json(changed=changed,
msg='success: %s' % (' '.join(result))
)
from ansible.module_utils.basic import * # noqa
if __name__ == '__main__':
main()
| apache-2.0 | -4,585,314,485,349,802,500 | 25.291803 | 108 | 0.570894 | false |
Kamp9/scipy | scipy/io/tests/test_idl.py | 38 | 19231 | from __future__ import division, print_function, absolute_import
from os import path
from warnings import catch_warnings
DATA_PATH = path.join(path.dirname(__file__), 'data')
import numpy as np
from numpy.testing import (assert_equal, assert_array_equal, run_module_suite,
assert_)
from scipy.io.idl import readsav
def object_array(*args):
"""Constructs a numpy array of objects"""
array = np.empty(len(args), dtype=object)
for i in range(len(args)):
array[i] = args[i]
return array
def assert_identical(a, b):
"""Assert whether value AND type are the same"""
assert_equal(a, b)
if type(b) is str:
assert_equal(type(a), type(b))
else:
assert_equal(np.asarray(a).dtype.type, np.asarray(b).dtype.type)
def assert_array_identical(a, b):
"""Assert whether values AND type are the same"""
assert_array_equal(a, b)
assert_equal(a.dtype.type, b.dtype.type)
# Define vectorized ID function for pointer arrays
vect_id = np.vectorize(id)
class TestIdict:
def test_idict(self):
custom_dict = {'a': np.int16(999)}
original_id = id(custom_dict)
s = readsav(path.join(DATA_PATH, 'scalar_byte.sav'), idict=custom_dict, verbose=False)
assert_equal(original_id, id(s))
assert_('a' in s)
assert_identical(s['a'], np.int16(999))
assert_identical(s['i8u'], np.uint8(234))
class TestScalars:
# Test that scalar values are read in with the correct value and type
def test_byte(self):
s = readsav(path.join(DATA_PATH, 'scalar_byte.sav'), verbose=False)
assert_identical(s.i8u, np.uint8(234))
def test_int16(self):
s = readsav(path.join(DATA_PATH, 'scalar_int16.sav'), verbose=False)
assert_identical(s.i16s, np.int16(-23456))
def test_int32(self):
s = readsav(path.join(DATA_PATH, 'scalar_int32.sav'), verbose=False)
assert_identical(s.i32s, np.int32(-1234567890))
def test_float32(self):
s = readsav(path.join(DATA_PATH, 'scalar_float32.sav'), verbose=False)
assert_identical(s.f32, np.float32(-3.1234567e+37))
def test_float64(self):
s = readsav(path.join(DATA_PATH, 'scalar_float64.sav'), verbose=False)
assert_identical(s.f64, np.float64(-1.1976931348623157e+307))
def test_complex32(self):
s = readsav(path.join(DATA_PATH, 'scalar_complex32.sav'), verbose=False)
assert_identical(s.c32, np.complex64(3.124442e13-2.312442e31j))
def test_bytes(self):
s = readsav(path.join(DATA_PATH, 'scalar_string.sav'), verbose=False)
assert_identical(s.s, np.bytes_("The quick brown fox jumps over the lazy python"))
def test_structure(self):
pass
def test_complex64(self):
s = readsav(path.join(DATA_PATH, 'scalar_complex64.sav'), verbose=False)
assert_identical(s.c64, np.complex128(1.1987253647623157e+112-5.1987258887729157e+307j))
def test_heap_pointer(self):
pass
def test_object_reference(self):
pass
def test_uint16(self):
s = readsav(path.join(DATA_PATH, 'scalar_uint16.sav'), verbose=False)
assert_identical(s.i16u, np.uint16(65511))
def test_uint32(self):
s = readsav(path.join(DATA_PATH, 'scalar_uint32.sav'), verbose=False)
assert_identical(s.i32u, np.uint32(4294967233))
def test_int64(self):
s = readsav(path.join(DATA_PATH, 'scalar_int64.sav'), verbose=False)
assert_identical(s.i64s, np.int64(-9223372036854774567))
def test_uint64(self):
s = readsav(path.join(DATA_PATH, 'scalar_uint64.sav'), verbose=False)
assert_identical(s.i64u, np.uint64(18446744073709529285))
class TestCompressed(TestScalars):
# Test that compressed .sav files can be read in
def test_compressed(self):
s = readsav(path.join(DATA_PATH, 'various_compressed.sav'), verbose=False)
assert_identical(s.i8u, np.uint8(234))
assert_identical(s.f32, np.float32(-3.1234567e+37))
assert_identical(s.c64, np.complex128(1.1987253647623157e+112-5.1987258887729157e+307j))
assert_equal(s.array5d.shape, (4, 3, 4, 6, 5))
assert_identical(s.arrays.a[0], np.array([1, 2, 3], dtype=np.int16))
assert_identical(s.arrays.b[0], np.array([4., 5., 6., 7.], dtype=np.float32))
assert_identical(s.arrays.c[0], np.array([np.complex64(1+2j), np.complex64(7+8j)]))
assert_identical(s.arrays.d[0], np.array([b"cheese", b"bacon", b"spam"], dtype=object))
class TestArrayDimensions:
# Test that multi-dimensional arrays are read in with the correct dimensions
def test_1d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_1d.sav'), verbose=False)
assert_equal(s.array1d.shape, (123, ))
def test_2d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_2d.sav'), verbose=False)
assert_equal(s.array2d.shape, (22, 12))
def test_3d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_3d.sav'), verbose=False)
assert_equal(s.array3d.shape, (11, 22, 12))
def test_4d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_4d.sav'), verbose=False)
assert_equal(s.array4d.shape, (4, 5, 8, 7))
def test_5d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_5d.sav'), verbose=False)
assert_equal(s.array5d.shape, (4, 3, 4, 6, 5))
def test_6d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_6d.sav'), verbose=False)
assert_equal(s.array6d.shape, (3, 6, 4, 5, 3, 4))
def test_7d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_7d.sav'), verbose=False)
assert_equal(s.array7d.shape, (2, 1, 2, 3, 4, 3, 2))
def test_8d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_8d.sav'), verbose=False)
assert_equal(s.array8d.shape, (4, 3, 2, 1, 2, 3, 5, 4))
class TestStructures:
def test_scalars(self):
s = readsav(path.join(DATA_PATH, 'struct_scalars.sav'), verbose=False)
assert_identical(s.scalars.a, np.array(np.int16(1)))
assert_identical(s.scalars.b, np.array(np.int32(2)))
assert_identical(s.scalars.c, np.array(np.float32(3.)))
assert_identical(s.scalars.d, np.array(np.float64(4.)))
assert_identical(s.scalars.e, np.array([b"spam"], dtype=object))
assert_identical(s.scalars.f, np.array(np.complex64(-1.+3j)))
def test_scalars_replicated(self):
s = readsav(path.join(DATA_PATH, 'struct_scalars_replicated.sav'), verbose=False)
assert_identical(s.scalars_rep.a, np.repeat(np.int16(1), 5))
assert_identical(s.scalars_rep.b, np.repeat(np.int32(2), 5))
assert_identical(s.scalars_rep.c, np.repeat(np.float32(3.), 5))
assert_identical(s.scalars_rep.d, np.repeat(np.float64(4.), 5))
assert_identical(s.scalars_rep.e, np.repeat(b"spam", 5).astype(object))
assert_identical(s.scalars_rep.f, np.repeat(np.complex64(-1.+3j), 5))
def test_scalars_replicated_3d(self):
s = readsav(path.join(DATA_PATH, 'struct_scalars_replicated_3d.sav'), verbose=False)
assert_identical(s.scalars_rep.a, np.repeat(np.int16(1), 24).reshape(4, 3, 2))
assert_identical(s.scalars_rep.b, np.repeat(np.int32(2), 24).reshape(4, 3, 2))
assert_identical(s.scalars_rep.c, np.repeat(np.float32(3.), 24).reshape(4, 3, 2))
assert_identical(s.scalars_rep.d, np.repeat(np.float64(4.), 24).reshape(4, 3, 2))
assert_identical(s.scalars_rep.e, np.repeat(b"spam", 24).reshape(4, 3, 2).astype(object))
assert_identical(s.scalars_rep.f, np.repeat(np.complex64(-1.+3j), 24).reshape(4, 3, 2))
def test_arrays(self):
s = readsav(path.join(DATA_PATH, 'struct_arrays.sav'), verbose=False)
assert_array_identical(s.arrays.a[0], np.array([1, 2, 3], dtype=np.int16))
assert_array_identical(s.arrays.b[0], np.array([4., 5., 6., 7.], dtype=np.float32))
assert_array_identical(s.arrays.c[0], np.array([np.complex64(1+2j), np.complex64(7+8j)]))
assert_array_identical(s.arrays.d[0], np.array([b"cheese", b"bacon", b"spam"], dtype=object))
def test_arrays_replicated(self):
s = readsav(path.join(DATA_PATH, 'struct_arrays_replicated.sav'), verbose=False)
# Check column types
assert_(s.arrays_rep.a.dtype.type is np.object_)
assert_(s.arrays_rep.b.dtype.type is np.object_)
assert_(s.arrays_rep.c.dtype.type is np.object_)
assert_(s.arrays_rep.d.dtype.type is np.object_)
# Check column shapes
assert_equal(s.arrays_rep.a.shape, (5, ))
assert_equal(s.arrays_rep.b.shape, (5, ))
assert_equal(s.arrays_rep.c.shape, (5, ))
assert_equal(s.arrays_rep.d.shape, (5, ))
# Check values
for i in range(5):
assert_array_identical(s.arrays_rep.a[i],
np.array([1, 2, 3], dtype=np.int16))
assert_array_identical(s.arrays_rep.b[i],
np.array([4., 5., 6., 7.], dtype=np.float32))
assert_array_identical(s.arrays_rep.c[i],
np.array([np.complex64(1+2j),
np.complex64(7+8j)]))
assert_array_identical(s.arrays_rep.d[i],
np.array([b"cheese", b"bacon", b"spam"],
dtype=object))
def test_arrays_replicated_3d(self):
s = readsav(path.join(DATA_PATH, 'struct_arrays_replicated_3d.sav'), verbose=False)
# Check column types
assert_(s.arrays_rep.a.dtype.type is np.object_)
assert_(s.arrays_rep.b.dtype.type is np.object_)
assert_(s.arrays_rep.c.dtype.type is np.object_)
assert_(s.arrays_rep.d.dtype.type is np.object_)
# Check column shapes
assert_equal(s.arrays_rep.a.shape, (4, 3, 2))
assert_equal(s.arrays_rep.b.shape, (4, 3, 2))
assert_equal(s.arrays_rep.c.shape, (4, 3, 2))
assert_equal(s.arrays_rep.d.shape, (4, 3, 2))
# Check values
for i in range(4):
for j in range(3):
for k in range(2):
assert_array_identical(s.arrays_rep.a[i, j, k],
np.array([1, 2, 3], dtype=np.int16))
assert_array_identical(s.arrays_rep.b[i, j, k],
np.array([4., 5., 6., 7.],
dtype=np.float32))
assert_array_identical(s.arrays_rep.c[i, j, k],
np.array([np.complex64(1+2j),
np.complex64(7+8j)]))
assert_array_identical(s.arrays_rep.d[i, j, k],
np.array([b"cheese", b"bacon", b"spam"],
dtype=object))
def test_inheritance(self):
s = readsav(path.join(DATA_PATH, 'struct_inherit.sav'), verbose=False)
assert_identical(s.fc.x, np.array([0], dtype=np.int16))
assert_identical(s.fc.y, np.array([0], dtype=np.int16))
assert_identical(s.fc.r, np.array([0], dtype=np.int16))
assert_identical(s.fc.c, np.array([4], dtype=np.int16))
class TestPointers:
# Check that pointers in .sav files produce references to the same object in Python
def test_pointers(self):
s = readsav(path.join(DATA_PATH, 'scalar_heap_pointer.sav'), verbose=False)
assert_identical(s.c64_pointer1, np.complex128(1.1987253647623157e+112-5.1987258887729157e+307j))
assert_identical(s.c64_pointer2, np.complex128(1.1987253647623157e+112-5.1987258887729157e+307j))
assert_(s.c64_pointer1 is s.c64_pointer2)
class TestPointerArray:
# Test that pointers in arrays are correctly read in
def test_1d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_pointer_1d.sav'), verbose=False)
assert_equal(s.array1d.shape, (123, ))
assert_(np.all(s.array1d == np.float32(4.)))
assert_(np.all(vect_id(s.array1d) == id(s.array1d[0])))
def test_2d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_pointer_2d.sav'), verbose=False)
assert_equal(s.array2d.shape, (22, 12))
assert_(np.all(s.array2d == np.float32(4.)))
assert_(np.all(vect_id(s.array2d) == id(s.array2d[0,0])))
def test_3d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_pointer_3d.sav'), verbose=False)
assert_equal(s.array3d.shape, (11, 22, 12))
assert_(np.all(s.array3d == np.float32(4.)))
assert_(np.all(vect_id(s.array3d) == id(s.array3d[0,0,0])))
def test_4d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_pointer_4d.sav'), verbose=False)
assert_equal(s.array4d.shape, (4, 5, 8, 7))
assert_(np.all(s.array4d == np.float32(4.)))
assert_(np.all(vect_id(s.array4d) == id(s.array4d[0,0,0,0])))
def test_5d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_pointer_5d.sav'), verbose=False)
assert_equal(s.array5d.shape, (4, 3, 4, 6, 5))
assert_(np.all(s.array5d == np.float32(4.)))
assert_(np.all(vect_id(s.array5d) == id(s.array5d[0,0,0,0,0])))
def test_6d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_pointer_6d.sav'), verbose=False)
assert_equal(s.array6d.shape, (3, 6, 4, 5, 3, 4))
assert_(np.all(s.array6d == np.float32(4.)))
assert_(np.all(vect_id(s.array6d) == id(s.array6d[0,0,0,0,0,0])))
def test_7d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_pointer_7d.sav'), verbose=False)
assert_equal(s.array7d.shape, (2, 1, 2, 3, 4, 3, 2))
assert_(np.all(s.array7d == np.float32(4.)))
assert_(np.all(vect_id(s.array7d) == id(s.array7d[0,0,0,0,0,0,0])))
def test_8d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_pointer_8d.sav'), verbose=False)
assert_equal(s.array8d.shape, (4, 3, 2, 1, 2, 3, 5, 4))
assert_(np.all(s.array8d == np.float32(4.)))
assert_(np.all(vect_id(s.array8d) == id(s.array8d[0,0,0,0,0,0,0,0])))
class TestPointerStructures:
# Test that structures are correctly read in
def test_scalars(self):
s = readsav(path.join(DATA_PATH, 'struct_pointers.sav'), verbose=False)
assert_identical(s.pointers.g, np.array(np.float32(4.), dtype=np.object_))
assert_identical(s.pointers.h, np.array(np.float32(4.), dtype=np.object_))
assert_(id(s.pointers.g[0]) == id(s.pointers.h[0]))
def test_pointers_replicated(self):
s = readsav(path.join(DATA_PATH, 'struct_pointers_replicated.sav'), verbose=False)
assert_identical(s.pointers_rep.g, np.repeat(np.float32(4.), 5).astype(np.object_))
assert_identical(s.pointers_rep.h, np.repeat(np.float32(4.), 5).astype(np.object_))
assert_(np.all(vect_id(s.pointers_rep.g) == vect_id(s.pointers_rep.h)))
def test_pointers_replicated_3d(self):
s = readsav(path.join(DATA_PATH, 'struct_pointers_replicated_3d.sav'), verbose=False)
s_expect = np.repeat(np.float32(4.), 24).reshape(4, 3, 2).astype(np.object_)
assert_identical(s.pointers_rep.g, s_expect)
assert_identical(s.pointers_rep.h, s_expect)
assert_(np.all(vect_id(s.pointers_rep.g) == vect_id(s.pointers_rep.h)))
def test_arrays(self):
s = readsav(path.join(DATA_PATH, 'struct_pointer_arrays.sav'), verbose=False)
assert_array_identical(s.arrays.g[0], np.repeat(np.float32(4.), 2).astype(np.object_))
assert_array_identical(s.arrays.h[0], np.repeat(np.float32(4.), 3).astype(np.object_))
assert_(np.all(vect_id(s.arrays.g[0]) == id(s.arrays.g[0][0])))
assert_(np.all(vect_id(s.arrays.h[0]) == id(s.arrays.h[0][0])))
assert_(id(s.arrays.g[0][0]) == id(s.arrays.h[0][0]))
def test_arrays_replicated(self):
s = readsav(path.join(DATA_PATH, 'struct_pointer_arrays_replicated.sav'), verbose=False)
# Check column types
assert_(s.arrays_rep.g.dtype.type is np.object_)
assert_(s.arrays_rep.h.dtype.type is np.object_)
# Check column shapes
assert_equal(s.arrays_rep.g.shape, (5, ))
assert_equal(s.arrays_rep.h.shape, (5, ))
# Check values
for i in range(5):
assert_array_identical(s.arrays_rep.g[i], np.repeat(np.float32(4.), 2).astype(np.object_))
assert_array_identical(s.arrays_rep.h[i], np.repeat(np.float32(4.), 3).astype(np.object_))
assert_(np.all(vect_id(s.arrays_rep.g[i]) == id(s.arrays_rep.g[0][0])))
assert_(np.all(vect_id(s.arrays_rep.h[i]) == id(s.arrays_rep.h[0][0])))
def test_arrays_replicated_3d(self):
pth = path.join(DATA_PATH, 'struct_pointer_arrays_replicated_3d.sav')
s = readsav(pth, verbose=False)
# Check column types
assert_(s.arrays_rep.g.dtype.type is np.object_)
assert_(s.arrays_rep.h.dtype.type is np.object_)
# Check column shapes
assert_equal(s.arrays_rep.g.shape, (4, 3, 2))
assert_equal(s.arrays_rep.h.shape, (4, 3, 2))
# Check values
for i in range(4):
for j in range(3):
for k in range(2):
assert_array_identical(s.arrays_rep.g[i, j, k],
np.repeat(np.float32(4.), 2).astype(np.object_))
assert_array_identical(s.arrays_rep.h[i, j, k],
np.repeat(np.float32(4.), 3).astype(np.object_))
assert_(np.all(vect_id(s.arrays_rep.g[i, j, k]) == id(s.arrays_rep.g[0, 0, 0][0])))
assert_(np.all(vect_id(s.arrays_rep.h[i, j, k]) == id(s.arrays_rep.h[0, 0, 0][0])))
class TestTags:
'''Test that sav files with description tag read at all'''
def test_description(self):
s = readsav(path.join(DATA_PATH, 'scalar_byte_descr.sav'), verbose=False)
assert_identical(s.i8u, np.uint8(234))
def test_null_pointer():
# Regression test for null pointers.
s = readsav(path.join(DATA_PATH, 'null_pointer.sav'), verbose=False)
assert_identical(s.point, None)
assert_identical(s.check, np.int16(5))
def test_invalid_pointer():
# Regression test for invalid pointers (gh-4613).
# In some files in the wild, pointers can sometimes refer to a heap
# variable that does not exist. In that case, we now gracefully fail for
# that variable and replace the variable with None and emit a warning.
# Since it's difficult to artificially produce such files, the file used
# here has been edited to force the pointer reference to be invalid.
with catch_warnings(record=True) as w:
s = readsav(path.join(DATA_PATH, 'invalid_pointer.sav'), verbose=False)
assert_(len(w) == 1)
assert_(str(w[0].message) == ("Variable referenced by pointer not found in "
"heap: variable will be set to None"))
assert_identical(s['a'], np.array([None, None]))
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause | -8,347,595,664,585,654,000 | 43.31106 | 105 | 0.605533 | false |
jkonecki/autorest | ClientRuntimes/Python/msrest/doc/conf.py | 2 | 7620 | # -*- coding: utf-8 -*-
#
# azure-sdk-for-python documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 27 15:42:45 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import pip
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.autosummary', 'sphinx.ext.doctest',
'sphinx.ext.viewcode', 'sphinx.ext.intersphinx']
intersphinx_mapping = {
'python': ('https://docs.python.org/3.5', None),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'msrest'
copyright = u'2016, Microsoft'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.2.0'
# The full version, including alpha/beta/rc tags.
release = '0.2.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for extensions ----------------------------------------------------
autoclass_content = 'both'
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'default'
#html_theme_options = {'collapsiblesidebar': True}
# Activate the theme.
#pip.main(['install', 'sphinx_bootstrap_theme'])
#import sphinx_bootstrap_theme
#html_theme = 'bootstrap'
#html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'msrest-doc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'msrest.tex', u'msrest Documentation',
u'Microsoft', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
| mit | -8,192,403,536,649,779,000 | 31.016807 | 83 | 0.709974 | false |
mapr/hue | desktop/core/ext-py/Django-1.6.10/django/utils/encoding.py | 92 | 9512 | from __future__ import unicode_literals
import codecs
import datetime
from decimal import Decimal
import locale
import warnings
from django.utils.functional import Promise
from django.utils import six
from django.utils.six.moves.urllib.parse import quote
class DjangoUnicodeDecodeError(UnicodeDecodeError):
def __init__(self, obj, *args):
self.obj = obj
UnicodeDecodeError.__init__(self, *args)
def __str__(self):
original = UnicodeDecodeError.__str__(self)
return '%s. You passed in %r (%s)' % (original, self.obj,
type(self.obj))
class StrAndUnicode(object):
"""
A class that derives __str__ from __unicode__.
On Python 2, __str__ returns the output of __unicode__ encoded as a UTF-8
bytestring. On Python 3, __str__ returns the output of __unicode__.
Useful as a mix-in. If you support Python 2 and 3 with a single code base,
you can inherit this mix-in and just define __unicode__.
"""
def __init__(self, *args, **kwargs):
warnings.warn("StrAndUnicode is deprecated. Define a __str__ method "
"and apply the @python_2_unicode_compatible decorator "
"instead.", DeprecationWarning, stacklevel=2)
super(StrAndUnicode, self).__init__(*args, **kwargs)
if six.PY3:
def __str__(self):
return self.__unicode__()
else:
def __str__(self):
return self.__unicode__().encode('utf-8')
def python_2_unicode_compatible(klass):
"""
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
if six.PY2:
if '__str__' not in klass.__dict__:
raise ValueError("@python_2_unicode_compatible cannot be applied "
"to %s because it doesn't define __str__()." %
klass.__name__)
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
def smart_text(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Returns a text object representing 's' -- unicode on Python 2 and str on
Python 3. Treats bytestrings using the 'encoding' codec.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if isinstance(s, Promise):
# The input is the result of a gettext_lazy() call.
return s
return force_text(s, encoding, strings_only, errors)
def is_protected_type(obj):
"""Determine if the object instance is of a protected type.
Objects of protected types are preserved as-is when passed to
force_text(strings_only=True).
"""
return isinstance(obj, six.integer_types + (type(None), float, Decimal,
datetime.datetime, datetime.date, datetime.time))
def force_text(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Similar to smart_text, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
# Handle the common case first, saves 30-40% when s is an instance of
# six.text_type. This function gets called often in that setting.
if isinstance(s, six.text_type):
return s
if strings_only and is_protected_type(s):
return s
try:
if not isinstance(s, six.string_types):
if hasattr(s, '__unicode__'):
s = s.__unicode__()
else:
if six.PY3:
if isinstance(s, bytes):
s = six.text_type(s, encoding, errors)
else:
s = six.text_type(s)
else:
s = six.text_type(bytes(s), encoding, errors)
else:
# Note: We use .decode() here, instead of six.text_type(s, encoding,
# errors), so that if s is a SafeBytes, it ends up being a
# SafeText at the end.
s = s.decode(encoding, errors)
except UnicodeDecodeError as e:
if not isinstance(s, Exception):
raise DjangoUnicodeDecodeError(s, *e.args)
else:
# If we get to here, the caller has passed in an Exception
# subclass populated with non-ASCII bytestring data without a
# working unicode method. Try to handle this without raising a
# further exception by individually forcing the exception args
# to unicode.
s = ' '.join([force_text(arg, encoding, strings_only,
errors) for arg in s])
return s
def smart_bytes(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Returns a bytestring version of 's', encoded as specified in 'encoding'.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if isinstance(s, Promise):
# The input is the result of a gettext_lazy() call.
return s
return force_bytes(s, encoding, strings_only, errors)
def force_bytes(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Similar to smart_bytes, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if isinstance(s, six.memoryview):
s = bytes(s)
if isinstance(s, bytes):
if encoding == 'utf-8':
return s
else:
return s.decode('utf-8', errors).encode(encoding, errors)
if strings_only and (s is None or isinstance(s, int)):
return s
if isinstance(s, Promise):
return six.text_type(s).encode(encoding, errors)
if not isinstance(s, six.string_types):
try:
if six.PY3:
return six.text_type(s).encode(encoding)
else:
return bytes(s)
except UnicodeEncodeError:
if isinstance(s, Exception):
# An Exception subclass containing non-ASCII data that doesn't
# know how to print itself properly. We shouldn't raise a
# further exception.
return b' '.join([force_bytes(arg, encoding, strings_only,
errors) for arg in s])
return six.text_type(s).encode(encoding, errors)
else:
return s.encode(encoding, errors)
if six.PY3:
smart_str = smart_text
force_str = force_text
else:
smart_str = smart_bytes
force_str = force_bytes
# backwards compatibility for Python 2
smart_unicode = smart_text
force_unicode = force_text
smart_str.__doc__ = """\
Apply smart_text in Python 3 and smart_bytes in Python 2.
This is suitable for writing to sys.stdout (for instance).
"""
force_str.__doc__ = """\
Apply force_text in Python 3 and force_bytes in Python 2.
"""
def iri_to_uri(iri):
"""
Convert an Internationalized Resource Identifier (IRI) portion to a URI
portion that is suitable for inclusion in a URL.
This is the algorithm from section 3.1 of RFC 3987. However, since we are
assuming input is either UTF-8 or unicode already, we can simplify things a
little from the full method.
Returns an ASCII string containing the encoded result.
"""
# The list of safe characters here is constructed from the "reserved" and
# "unreserved" characters specified in sections 2.2 and 2.3 of RFC 3986:
# reserved = gen-delims / sub-delims
# gen-delims = ":" / "/" / "?" / "#" / "[" / "]" / "@"
# sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
# / "*" / "+" / "," / ";" / "="
# unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
# Of the unreserved characters, urllib.quote already considers all but
# the ~ safe.
# The % character is also added to the list of safe characters here, as the
# end of section 3.1 of RFC 3987 specifically mentions that % must not be
# converted.
if iri is None:
return iri
return quote(force_bytes(iri), safe=b"/#%[]=:;$&()+,!?*@'~")
def filepath_to_uri(path):
"""Convert a file system path to a URI portion that is suitable for
inclusion in a URL.
We are assuming input is either UTF-8 or unicode already.
This method will encode certain chars that would normally be recognized as
special chars for URIs. Note that this method does not encode the '
character, as it is a valid character within URIs. See
encodeURIComponent() JavaScript function for more details.
Returns an ASCII string containing the encoded result.
"""
if path is None:
return path
# I know about `os.sep` and `os.altsep` but I want to leave
# some flexibility for hardcoding separators.
return quote(force_bytes(path).replace(b"\\", b"/"), safe=b"/~!*()'")
def get_system_encoding():
"""
The encoding of the default system locale but falls back to the given
fallback encoding if the encoding is unsupported by python or could
not be determined. See tickets #10335 and #5846
"""
try:
encoding = locale.getdefaultlocale()[1] or 'ascii'
codecs.lookup(encoding)
except Exception:
encoding = 'ascii'
return encoding
DEFAULT_LOCALE_ENCODING = get_system_encoding()
| apache-2.0 | -8,279,716,757,792,622,000 | 36.596838 | 80 | 0.612174 | false |
CiscoUcs/Ironic-UCS | build/lib.linux-x86_64-2.7/ironic/openstack/common/jsonutils.py | 7 | 6832 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''
JSON related utilities.
This module provides a few things:
1) A handy function for getting an object down to something that can be
JSON serialized. See to_primitive().
2) Wrappers around loads() and dumps(). The dumps() wrapper will
automatically use to_primitive() for you if needed.
3) This sets up anyjson to use the loads() and dumps() wrappers if anyjson
is available.
'''
import codecs
import datetime
import functools
import inspect
import itertools
import sys
if sys.version_info < (2, 7):
# On Python <= 2.6, json module is not C boosted, so try to use
# simplejson module if available
try:
import simplejson as json
except ImportError:
import json
else:
import json
import six
import six.moves.xmlrpc_client as xmlrpclib
from ironic.openstack.common import gettextutils
from ironic.openstack.common import importutils
from ironic.openstack.common import strutils
from ironic.openstack.common import timeutils
netaddr = importutils.try_import("netaddr")
_nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod,
inspect.isfunction, inspect.isgeneratorfunction,
inspect.isgenerator, inspect.istraceback, inspect.isframe,
inspect.iscode, inspect.isbuiltin, inspect.isroutine,
inspect.isabstract]
_simple_types = (six.string_types + six.integer_types
+ (type(None), bool, float))
def to_primitive(value, convert_instances=False, convert_datetime=True,
level=0, max_depth=3):
"""Convert a complex object into primitives.
Handy for JSON serialization. We can optionally handle instances,
but since this is a recursive function, we could have cyclical
data structures.
To handle cyclical data structures we could track the actual objects
visited in a set, but not all objects are hashable. Instead we just
track the depth of the object inspections and don't go too deep.
Therefore, convert_instances=True is lossy ... be aware.
"""
# handle obvious types first - order of basic types determined by running
# full tests on nova project, resulting in the following counts:
# 572754 <type 'NoneType'>
# 460353 <type 'int'>
# 379632 <type 'unicode'>
# 274610 <type 'str'>
# 199918 <type 'dict'>
# 114200 <type 'datetime.datetime'>
# 51817 <type 'bool'>
# 26164 <type 'list'>
# 6491 <type 'float'>
# 283 <type 'tuple'>
# 19 <type 'long'>
if isinstance(value, _simple_types):
return value
if isinstance(value, datetime.datetime):
if convert_datetime:
return timeutils.strtime(value)
else:
return value
# value of itertools.count doesn't get caught by nasty_type_tests
# and results in infinite loop when list(value) is called.
if type(value) == itertools.count:
return six.text_type(value)
# FIXME(vish): Workaround for LP bug 852095. Without this workaround,
# tests that raise an exception in a mocked method that
# has a @wrap_exception with a notifier will fail. If
# we up the dependency to 0.5.4 (when it is released) we
# can remove this workaround.
if getattr(value, '__module__', None) == 'mox':
return 'mock'
if level > max_depth:
return '?'
# The try block may not be necessary after the class check above,
# but just in case ...
try:
recursive = functools.partial(to_primitive,
convert_instances=convert_instances,
convert_datetime=convert_datetime,
level=level,
max_depth=max_depth)
if isinstance(value, dict):
return dict((k, recursive(v)) for k, v in six.iteritems(value))
elif isinstance(value, (list, tuple)):
return [recursive(lv) for lv in value]
# It's not clear why xmlrpclib created their own DateTime type, but
# for our purposes, make it a datetime type which is explicitly
# handled
if isinstance(value, xmlrpclib.DateTime):
value = datetime.datetime(*tuple(value.timetuple())[:6])
if convert_datetime and isinstance(value, datetime.datetime):
return timeutils.strtime(value)
elif isinstance(value, gettextutils.Message):
return value.data
elif hasattr(value, 'iteritems'):
return recursive(dict(value.iteritems()), level=level + 1)
elif hasattr(value, '__iter__'):
return recursive(list(value))
elif convert_instances and hasattr(value, '__dict__'):
# Likely an instance of something. Watch for cycles.
# Ignore class member vars.
return recursive(value.__dict__, level=level + 1)
elif netaddr and isinstance(value, netaddr.IPAddress):
return six.text_type(value)
else:
if any(test(value) for test in _nasty_type_tests):
return six.text_type(value)
return value
except TypeError:
# Class objects are tricky since they may define something like
# __iter__ defined but it isn't callable as list().
return six.text_type(value)
def dumps(value, default=to_primitive, **kwargs):
return json.dumps(value, default=default, **kwargs)
def dump(obj, fp, *args, **kwargs):
return json.dump(obj, fp, *args, **kwargs)
def loads(s, encoding='utf-8', **kwargs):
return json.loads(strutils.safe_decode(s, encoding), **kwargs)
def load(fp, encoding='utf-8', **kwargs):
return json.load(codecs.getreader(encoding)(fp), **kwargs)
try:
import anyjson
except ImportError:
pass
else:
anyjson._modules.append((__name__, 'dumps', TypeError,
'loads', ValueError, 'load'))
anyjson.force_implementation(__name__)
| apache-2.0 | -8,270,880,727,049,315,000 | 34.957895 | 79 | 0.643443 | false |
sogis/Quantum-GIS | python/plugins/processing/core/ProcessingLog.py | 5 | 9837 | # -*- coding: utf-8 -*-
"""
***************************************************************************
ProcessingLog.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import re
import os
import codecs
import datetime
from processing.tools.system import userFolder
from processing.core.ProcessingConfig import ProcessingConfig
from qgis.core import *
class ProcessingLog:
LOG_ERROR = 'ERROR'
LOG_INFO = 'INFO'
LOG_WARNING = 'WARNING'
LOG_ALGORITHM = 'ALGORITHM'
DATE_FORMAT = u'%a %b %d %Y %H:%M:%S'.encode('utf-8')
recentAlgs = []
@staticmethod
def startLogging():
if os.path.isfile(ProcessingLog.logFilename()):
logfile = codecs.open(ProcessingLog.logFilename(), 'a',
encoding='utf-8')
else:
logfile = codecs.open(ProcessingLog.logFilename(), 'w',
encoding='utf-8')
logfile.write('Started logging at ' +
datetime.datetime.now().strftime(
ProcessingLog.DATE_FORMAT).decode('utf-8') + '\n')
logfile.close()
@staticmethod
def logFilename():
batchfile = userFolder() + os.sep + 'processing.log'
return batchfile
@staticmethod
def addToLog(msgtype, msg):
try:
# It seems that this fails sometimes depending on the msg
# added. To avoid it stopping the normal functioning of the
# algorithm, we catch all errors, assuming that is better
# to miss some log info that breaking the algorithm.
if msgtype == ProcessingLog.LOG_ALGORITHM:
line = msgtype + '|' + datetime.datetime.now().strftime(
ProcessingLog.DATE_FORMAT).decode('utf-8') + '|' \
+ msg + '\n'
logfile = codecs.open(ProcessingLog.logFilename(), 'a',
encoding='utf-8')
logfile.write(line)
logfile.close()
algname = msg[len('Processing.runalg("'):]
algname = algname[:algname.index('"')]
if algname not in ProcessingLog.recentAlgs:
ProcessingLog.recentAlgs.append(algname)
recentAlgsString = ';'.join(ProcessingLog.recentAlgs[-6:])
ProcessingConfig.setSettingValue(
ProcessingConfig.RECENT_ALGORITHMS,
recentAlgsString)
else:
if isinstance(msg, list):
msg = '\n'.join([m for m in msg])
msgtypes = {ProcessingLog.LOG_ERROR: QgsMessageLog.CRITICAL,
ProcessingLog.LOG_INFO: QgsMessageLog.INFO,
ProcessingLog.LOG_WARNING: QgsMessageLog.WARNING, }
QgsMessageLog.logMessage(msg, "Processing", msgtypes[msgtype])
except:
pass
@staticmethod
def getLogEntries():
entries = {}
errors = []
algorithms = []
warnings = []
info = []
lines = tail(ProcessingLog.logFilename())
for line in lines:
line = line.strip('\n').strip()
tokens = line.split('|')
text = ''
for i in range(2, len(tokens)):
text += tokens[i] + '|'
if line.startswith(ProcessingLog.LOG_ERROR):
errors.append(LogEntry(tokens[1], text))
elif line.startswith(ProcessingLog.LOG_ALGORITHM):
algorithms.append(LogEntry(tokens[1], tokens[2]))
elif line.startswith(ProcessingLog.LOG_WARNING):
warnings.append(LogEntry(tokens[1], text))
elif line.startswith(ProcessingLog.LOG_INFO):
info.append(LogEntry(tokens[1], text))
entries[ProcessingLog.LOG_ALGORITHM] = algorithms
return entries
@staticmethod
def getRecentAlgorithms():
recentAlgsSetting = ProcessingConfig.getSetting(
ProcessingConfig.RECENT_ALGORITHMS)
try:
ProcessingLog.recentAlgs = recentAlgsSetting.split(';')
except:
pass
return ProcessingLog.recentAlgs
@staticmethod
def clearLog():
os.unlink(ProcessingLog.logFilename())
ProcessingLog.startLogging()
@staticmethod
def saveLog(fileName):
entries = ProcessingLog.getLogEntries()
with codecs.open(fileName, 'w', encoding='utf-8') as f:
for k, v in entries.iteritems():
for entry in v:
f.write('%s|%s|%s\n' % (k, entry.date, entry.text))
class LogEntry:
def __init__(self, date, text):
self.date = date
self.text = text
"""
***************************************************************************
This code has been take from pytailer
http://code.google.com/p/pytailer/
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use, copy,
modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
***************************************************************************
"""
class Tailer(object):
"""Implements tailing and heading functionality like GNU tail and
head commands.
"""
line_terminators = ('\r\n', '\n', '\r')
def __init__(self, filename, read_size=1024, end=False):
self.read_size = read_size
self.file = codecs.open(filename, encoding='utf-8')
self.start_pos = self.file.tell()
if end:
self.seek_end()
def splitlines(self, data):
return re.split('|'.join(self.line_terminators), data)
def seek_end(self):
self.seek(0, 2)
def seek(self, pos, whence=0):
self.file.seek(pos, whence)
def read(self, read_size=None):
if read_size:
read_str = self.file.read(read_size)
else:
read_str = self.file.read()
return (len(read_str), read_str)
def seek_line(self):
"""Searches backwards from the current file position for a
line terminator and seeks to the charachter after it.
"""
pos = end_pos = self.file.tell()
read_size = self.read_size
if pos > read_size:
pos -= read_size
else:
pos = 0
read_size = end_pos
self.seek(pos)
(bytes_read, read_str) = self.read(read_size)
if bytes_read and read_str[-1] in self.line_terminators:
# The last charachter is a line terminator, don't count
# this one.
bytes_read -= 1
if read_str[-2:] == '\r\n' and '\r\n' in self.line_terminators:
# found CRLF
bytes_read -= 1
while bytes_read > 0:
# Scan backward, counting the newlines in this bufferfull
i = bytes_read - 1
while i >= 0:
if read_str[i] in self.line_terminators:
self.seek(pos + i + 1)
return self.file.tell()
i -= 1
if pos == 0 or pos - self.read_size < 0:
# Not enought lines in the buffer, send the whole file
self.seek(0)
return None
pos -= self.read_size
self.seek(pos)
(bytes_read, read_str) = self.read(self.read_size)
return None
def tail(self, lines=10):
"""Return the last lines of the file.
"""
self.seek_end()
end_pos = self.file.tell()
for i in xrange(lines):
if not self.seek_line():
break
data = self.file.read(end_pos - self.file.tell() - 1)
if data:
return self.splitlines(data)
else:
return []
def __iter__(self):
return self.follow()
def close(self):
self.file.close()
def tail(file, lines=200):
return Tailer(file).tail(lines)
| gpl-2.0 | 6,255,954,867,690,181,000 | 33.759717 | 79 | 0.536952 | false |
moble/sympy | sympy/polys/domains/tests/test_polynomialring.py | 99 | 3314 | """Tests for the PolynomialRing classes. """
from sympy.polys.domains import QQ, ZZ
from sympy.polys.polyerrors import ExactQuotientFailed, CoercionFailed, NotReversible
from sympy.abc import x, y
from sympy.utilities.pytest import raises
def test_build_order():
R = QQ.old_poly_ring(x, y, order=(("lex", x), ("ilex", y)))
assert R.order((1, 5)) == ((1,), (-5,))
def test_globalring():
Qxy = QQ.old_frac_field(x, y)
R = QQ.old_poly_ring(x, y)
X = R.convert(x)
Y = R.convert(y)
assert x in R
assert 1/x not in R
assert 1/(1 + x) not in R
assert Y in R
assert X.ring == R
assert X * (Y**2 + 1) == R.convert(x * (y**2 + 1))
assert X * y == X * Y == R.convert(x * y) == x * Y
assert X + y == X + Y == R.convert(x + y) == x + Y
assert X - y == X - Y == R.convert(x - y) == x - Y
assert X + 1 == R.convert(x + 1)
raises(ExactQuotientFailed, lambda: X/Y)
raises(ExactQuotientFailed, lambda: x/Y)
raises(ExactQuotientFailed, lambda: X/y)
assert X**2 / X == X
assert R.from_GlobalPolynomialRing(ZZ.old_poly_ring(x, y).convert(x), ZZ.old_poly_ring(x, y)) == X
assert R.from_FractionField(Qxy.convert(x), Qxy) == X
assert R.from_FractionField(Qxy.convert(x)/y, Qxy) is None
assert R._sdm_to_vector(R._vector_to_sdm([X, Y], R.order), 2) == [X, Y]
def test_localring():
Qxy = QQ.old_frac_field(x, y)
R = QQ.old_poly_ring(x, y, order="ilex")
X = R.convert(x)
Y = R.convert(y)
assert x in R
assert 1/x not in R
assert 1/(1 + x) in R
assert Y in R
assert X.ring == R
assert X*(Y**2 + 1)/(1 + X) == R.convert(x*(y**2 + 1)/(1 + x))
assert X*y == X*Y
raises(ExactQuotientFailed, lambda: X/Y)
raises(ExactQuotientFailed, lambda: x/Y)
raises(ExactQuotientFailed, lambda: X/y)
assert X + y == X + Y == R.convert(x + y) == x + Y
assert X - y == X - Y == R.convert(x - y) == x - Y
assert X + 1 == R.convert(x + 1)
assert X**2 / X == X
assert R.from_GlobalPolynomialRing(ZZ.old_poly_ring(x, y).convert(x), ZZ.old_poly_ring(x, y)) == X
assert R.from_FractionField(Qxy.convert(x), Qxy) == X
raises(CoercionFailed, lambda: R.from_FractionField(Qxy.convert(x)/y, Qxy))
raises(ExactQuotientFailed, lambda: X/Y)
raises(NotReversible, lambda: X.invert())
assert R._sdm_to_vector(
R._vector_to_sdm([X/(X + 1), Y/(1 + X*Y)], R.order), 2) == \
[X*(1 + X*Y), Y*(1 + X)]
def test_conversion():
L = QQ.old_poly_ring(x, y, order="ilex")
G = QQ.old_poly_ring(x, y)
assert L.convert(x) == L.convert(G.convert(x), G)
assert G.convert(x) == G.convert(L.convert(x), L)
raises(CoercionFailed, lambda: G.convert(L.convert(1/(1 + x)), L))
def test_units():
R = QQ.old_poly_ring(x)
assert R.is_unit(R.convert(1))
assert R.is_unit(R.convert(2))
assert not R.is_unit(R.convert(x))
assert not R.is_unit(R.convert(1 + x))
R = QQ.old_poly_ring(x, order='ilex')
assert R.is_unit(R.convert(1))
assert R.is_unit(R.convert(2))
assert not R.is_unit(R.convert(x))
assert R.is_unit(R.convert(1 + x))
R = ZZ.old_poly_ring(x)
assert R.is_unit(R.convert(1))
assert not R.is_unit(R.convert(2))
assert not R.is_unit(R.convert(x))
assert not R.is_unit(R.convert(1 + x))
| bsd-3-clause | 4,203,609,760,696,980,500 | 31.490196 | 102 | 0.588715 | false |
nwiizo/workspace_2017 | ansible-modules-core/network/eos/eos_eapi.py | 13 | 13144 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'core',
'version': '1.0'}
DOCUMENTATION = """
---
module: eos_eapi
version_added: "2.1"
author: "Peter Sprygada (@privateip)"
short_description: Manage and configure Arista EOS eAPI.
requirements:
- "EOS v4.12 or greater"
description:
- Use to enable or disable eAPI access, and set the port and state
of http, https, local_http and unix-socket servers.
- When enabling eAPI access the default is to enable HTTP on port
80, enable HTTPS on port 443, disable local HTTP, and disable
Unix socket server. Use the options listed below to override the
default configuration.
- Requires EOS v4.12 or greater.
extends_documentation_fragment: eos
options:
http:
description:
- The C(http) argument controls the operating state of the HTTP
transport protocol when eAPI is present in the running-config.
When the value is set to True, the HTTP protocol is enabled and
when the value is set to False, the HTTP protocol is disabled.
By default, when eAPI is first configured, the HTTP protocol is
disabled.
required: false
default: no
choices: ['yes', 'no']
aliases: ['enable_http']
http_port:
description:
- Configures the HTTP port that will listen for connections when
the HTTP transport protocol is enabled. This argument accepts
integer values in the valid range of 1 to 65535.
required: false
default: 80
https:
description:
- The C(https) argument controls the operating state of the HTTPS
transport protocol when eAPI is present in the running-config.
When the value is set to True, the HTTPS protocol is enabled and
when the value is set to False, the HTTPS protocol is disabled.
By default, when eAPI is first configured, the HTTPS protocol is
enabled.
required: false
default: yes
choices: ['yes', 'no']
aliases: ['enable_http']
https_port:
description:
- Configures the HTTP port that will listen for connections when
the HTTP transport protocol is enabled. This argument accepts
integer values in the valid range of 1 to 65535.
required: false
default: 443
local_http:
description:
- The C(local_http) argument controls the operating state of the
local HTTP transport protocol when eAPI is present in the
running-config. When the value is set to True, the HTTP protocol
is enabled and restricted to connections from localhost only. When
the value is set to False, the HTTP local protocol is disabled.
- Note is value is independent of the C(http) argument
required: false
default: false
choices: ['yes', 'no']
aliases: ['enable_local_http']
local_http_port:
description:
- Configures the HTTP port that will listen for connections when
the HTTP transport protocol is enabled. This argument accepts
integer values in the valid range of 1 to 65535.
required: false
default: 8080
socket:
description:
- The C(socket) argument controls the operating state of the UNIX
Domain Socket used to receive eAPI requests. When the value
of this argument is set to True, the UDS will listen for eAPI
requests. When the value is set to False, the UDS will not be
available to handle requests. By default when eAPI is first
configured, the UDS is disabled.
required: false
default: false
choices: ['yes', 'no']
aliases: ['enable_socket']
vrf:
description:
- The C(vrf) argument will configure eAPI to listen for connections
in the specified VRF. By default, eAPI transports will listen
for connections in the global table. This value requires the
VRF to already be created otherwise the task will fail.
required: false
default: default
version_added: "2.2"
config:
description:
- The module, by default, will connect to the remote device and
retrieve the current running-config to use as a base for comparing
against the contents of source. There are times when it is not
desirable to have the task get the current running-config for
every task in a playbook. The I(config) argument allows the
implementer to pass in the configuration to use as the base
config for comparison.
required: false
default: nul
version_added: "2.2"
state:
description:
- The C(state) argument controls the operational state of eAPI
on the remote device. When this argument is set to C(started),
eAPI is enabled to receive requests and when this argument is
C(stopped), eAPI is disabled and will not receive requests.
required: false
default: started
choices: ['started', 'stopped']
"""
EXAMPLES = """
# Note: examples below use the following provider dict to handle
# transport and authentication to the node.
vars:
cli:
host: "{{ inventory_hostname }}"
username: admin
password: admin
- name: Enable eAPI access with default configuration
eos_eapi:
state: started
provider: "{{ cli }}"
- name: Enable eAPI with no HTTP, HTTPS at port 9443, local HTTP at port 80, and socket enabled
eos_eapi:
state: started
http: false
https_port: 9443
local_http: yes
local_http_port: 80
socket: yes
provider: "{{ cli }}"
- name: Shutdown eAPI access
eos_eapi:
state: stopped
provider: "{{ cli }}"
"""
RETURN = """
updates:
description:
- Set of commands to be executed on remote device
returned: always
type: list
sample: ['management api http-commands', 'shutdown']
urls:
description: Hash of URL endpoints eAPI is listening on per interface
returned: when eAPI is started
type: dict
sample: {'Management1': ['http://172.26.10.1:80']}
"""
import re
import time
import ansible.module_utils.eos
from ansible.module_utils.basic import get_exception
from ansible.module_utils.network import NetworkModule, NetworkError
from ansible.module_utils.netcfg import NetworkConfig, dumps
PRIVATE_KEYS_RE = re.compile('__.+__')
def invoke(name, *args, **kwargs):
func = globals().get(name)
if func:
return func(*args, **kwargs)
def get_instance(module):
try:
resp = module.cli('show management api http-commands', 'json')
return dict(
http=resp[0]['httpServer']['configured'],
http_port=resp[0]['httpServer']['port'],
https=resp[0]['httpsServer']['configured'],
https_port=resp[0]['httpsServer']['port'],
local_http=resp[0]['localHttpServer']['configured'],
local_http_port=resp[0]['localHttpServer']['port'],
socket=resp[0]['unixSocketServer']['configured'],
vrf=resp[0]['vrf']
)
except NetworkError:
exc = get_exception()
module.fail_json(msg=str(exc), **exc.kwargs)
def started(module, instance, commands):
commands.append('no shutdown')
setters = set()
for key, value in module.argument_spec.iteritems():
if module.params[key] is not None:
setter = value.get('setter') or 'set_%s' % key
if setter not in setters:
setters.add(setter)
invoke(setter, module, instance, commands)
def stopped(module, instance, commands):
commands.append('shutdown')
def set_protocol_http(module, instance, commands):
port = module.params['http_port']
if not 1 <= port <= 65535:
module.fail_json(msg='http_port must be between 1 and 65535')
elif any((module.params['http'], instance['http'])):
commands.append('protocol http port %s' % port)
elif module.params['http'] is False:
commands.append('no protocol http')
def set_protocol_https(module, instance, commands):
port = module.params['https_port']
if not 1 <= port <= 65535:
module.fail_json(msg='https_port must be between 1 and 65535')
elif any((module.params['https'], instance['https'])):
commands.append('protocol https port %s' % port)
elif module.params['https'] is False:
commands.append('no protocol https')
def set_local_http(module, instance, commands):
port = module.params['local_http_port']
if not 1 <= port <= 65535:
module.fail_json(msg='local_http_port must be between 1 and 65535')
elif any((module.params['local_http'], instance['local_http'])):
commands.append('protocol http localhost port %s' % port)
elif module.params['local_http'] is False:
commands.append('no protocol http localhost port 8080')
def set_socket(module, instance, commands):
if any((module.params['socket'], instance['socket'])):
commands.append('protocol unix-socket')
elif module.params['socket'] is False:
commands.append('no protocol unix-socket')
def set_vrf(module, instance, commands):
vrf = module.params['vrf']
if vrf != 'default':
resp = module.cli(['show vrf'])
if vrf not in resp[0]:
module.fail_json(msg="vrf '%s' is not configured" % vrf)
commands.append('vrf %s' % vrf)
def get_config(module):
contents = module.params['config']
if not contents:
cmd = 'show running-config all section management api http-commands'
contents = module.cli([cmd])
config = NetworkConfig(indent=3, contents=contents[0])
return config
def load_config(module, instance, commands, result):
commit = not module.check_mode
diff = module.config.load_config(commands, commit=commit)
if diff:
result['diff'] = dict(prepared=diff)
result['changed'] = True
def load(module, instance, commands, result):
candidate = NetworkConfig(indent=3)
candidate.add(commands, parents=['management api http-commands'])
config = get_config(module)
configobjs = candidate.difference(config)
if configobjs:
commands = dumps(configobjs, 'commands').split('\n')
result['updates'] = commands
load_config(module, instance, commands, result)
def clean_result(result):
# strip out any keys that have two leading and two trailing
# underscore characters
for key in result.keys():
if PRIVATE_KEYS_RE.match(key):
del result[key]
def collect_facts(module, result):
resp = module.cli(['show management api http-commands'], output='json')
facts = dict(eos_eapi_urls=dict())
for each in resp[0]['urls']:
intf, url = each.split(' : ')
key = str(intf).strip()
if key not in facts['eos_eapi_urls']:
facts['eos_eapi_urls'][key] = list()
facts['eos_eapi_urls'][key].append(str(url).strip())
result['ansible_facts'] = facts
def main():
""" main entry point for module execution
"""
argument_spec = dict(
http=dict(aliases=['enable_http'], default=False, type='bool', setter='set_protocol_http'),
http_port=dict(default=80, type='int', setter='set_protocol_http'),
https=dict(aliases=['enable_https'], default=True, type='bool', setter='set_protocol_https'),
https_port=dict(default=443, type='int', setter='set_protocol_https'),
local_http=dict(aliases=['enable_local_http'], default=False, type='bool', setter='set_local_http'),
local_http_port=dict(default=8080, type='int', setter='set_local_http'),
socket=dict(aliases=['enable_socket'], default=False, type='bool'),
vrf=dict(default='default'),
config=dict(),
# Only allow use of transport cli when configuring eAPI
transport=dict(default='cli', choices=['cli']),
state=dict(default='started', choices=['stopped', 'started']),
)
module = NetworkModule(argument_spec=argument_spec,
connect_on_load=False,
supports_check_mode=True)
state = module.params['state']
result = dict(changed=False)
commands = list()
instance = get_instance(module)
invoke(state, module, instance, commands)
try:
load(module, instance, commands, result)
except NetworkError:
exc = get_exception()
module.fail_json(msg=str(exc), **exc.kwargs)
collect_facts(module, result)
clean_result(result)
module.exit_json(**result)
if __name__ == '__main__':
main()
| mit | -6,866,054,699,081,414,000 | 34.333333 | 108 | 0.658932 | false |
hatwar/Das_erpnext | erpnext/accounts/doctype/journal_entry/test_journal_entry.py | 25 | 7835 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import unittest, frappe
from frappe.utils import flt
from erpnext.accounts.utils import get_actual_expense, BudgetError, get_fiscal_year
class TestJournalEntry(unittest.TestCase):
def test_journal_entry_with_against_jv(self):
jv_invoice = frappe.copy_doc(test_records[2])
base_jv = frappe.copy_doc(test_records[0])
self.jv_against_voucher_testcase(base_jv, jv_invoice)
def test_jv_against_sales_order(self):
from erpnext.selling.doctype.sales_order.test_sales_order import make_sales_order
sales_order = make_sales_order(do_not_save=True)
base_jv = frappe.copy_doc(test_records[0])
self.jv_against_voucher_testcase(base_jv, sales_order)
def test_jv_against_purchase_order(self):
from erpnext.buying.doctype.purchase_order.test_purchase_order import create_purchase_order
purchase_order = create_purchase_order(do_not_save=True)
base_jv = frappe.copy_doc(test_records[1])
self.jv_against_voucher_testcase(base_jv, purchase_order)
def jv_against_voucher_testcase(self, base_jv, test_voucher):
dr_or_cr = "credit" if test_voucher.doctype in ["Sales Order", "Journal Entry"] else "debit"
field_dict = {'Journal Entry': "against_jv",
'Sales Order': "against_sales_order",
'Purchase Order': "against_purchase_order"
}
test_voucher.insert()
test_voucher.submit()
if test_voucher.doctype == "Journal Entry":
self.assertTrue(frappe.db.sql("""select name from `tabJournal Entry Account`
where account = %s and docstatus = 1 and parent = %s""",
("_Test Receivable - _TC", test_voucher.name)))
self.assertTrue(not frappe.db.sql("""select name from `tabJournal Entry Account`
where %s=%s""" % (field_dict.get(test_voucher.doctype), '%s'), (test_voucher.name)))
base_jv.get("accounts")[0].is_advance = "Yes" if (test_voucher.doctype in ["Sales Order", "Purchase Order"]) else "No"
base_jv.get("accounts")[0].set(field_dict.get(test_voucher.doctype), test_voucher.name)
base_jv.insert()
base_jv.submit()
submitted_voucher = frappe.get_doc(test_voucher.doctype, test_voucher.name)
self.assertTrue(frappe.db.sql("""select name from `tabJournal Entry Account`
where %s=%s""" % (field_dict.get(test_voucher.doctype), '%s'), (submitted_voucher.name)))
self.assertTrue(frappe.db.sql("""select name from `tabJournal Entry Account`
where %s=%s and %s=400""" % (field_dict.get(submitted_voucher.doctype), '%s', dr_or_cr), (submitted_voucher.name)))
if base_jv.get("accounts")[0].is_advance == "Yes":
self.advance_paid_testcase(base_jv, submitted_voucher, dr_or_cr)
self.cancel_against_voucher_testcase(submitted_voucher)
def advance_paid_testcase(self, base_jv, test_voucher, dr_or_cr):
#Test advance paid field
advance_paid = frappe.db.sql("""select advance_paid from `tab%s`
where name=%s""" % (test_voucher.doctype, '%s'), (test_voucher.name))
payment_against_order = base_jv.get("accounts")[0].get(dr_or_cr)
self.assertTrue(flt(advance_paid[0][0]) == flt(payment_against_order))
def cancel_against_voucher_testcase(self, test_voucher):
if test_voucher.doctype == "Journal Entry":
# if test_voucher is a Journal Entry, test cancellation of test_voucher
test_voucher.cancel()
self.assertTrue(not frappe.db.sql("""select name from `tabJournal Entry Account`
where against_jv=%s""", test_voucher.name))
elif test_voucher.doctype in ["Sales Order", "Purchase Order"]:
# if test_voucher is a Sales Order/Purchase Order, test error on cancellation of test_voucher
submitted_voucher = frappe.get_doc(test_voucher.doctype, test_voucher.name)
self.assertRaises(frappe.LinkExistsError, submitted_voucher.cancel)
def test_jv_against_stock_account(self):
from erpnext.stock.doctype.purchase_receipt.test_purchase_receipt import set_perpetual_inventory
set_perpetual_inventory()
jv = frappe.copy_doc(test_records[0])
jv.get("accounts")[0].update({
"account": "_Test Warehouse - _TC",
"party_type": None,
"party": None
})
jv.insert()
from erpnext.accounts.general_ledger import StockAccountInvalidTransaction
self.assertRaises(StockAccountInvalidTransaction, jv.submit)
set_perpetual_inventory(0)
def test_monthly_budget_crossed_ignore(self):
frappe.db.set_value("Company", "_Test Company", "monthly_bgt_flag", "Ignore")
self.set_total_expense_zero("2013-02-28")
jv = make_journal_entry("_Test Account Cost for Goods Sold - _TC",
"_Test Account Bank Account - _TC", 40000, "_Test Cost Center - _TC", submit=True)
self.assertTrue(frappe.db.get_value("GL Entry",
{"voucher_type": "Journal Entry", "voucher_no": jv.name}))
def test_monthly_budget_crossed_stop(self):
frappe.db.set_value("Company", "_Test Company", "monthly_bgt_flag", "Stop")
self.set_total_expense_zero("2013-02-28")
jv = make_journal_entry("_Test Account Cost for Goods Sold - _TC",
"_Test Account Bank Account - _TC", 40000, "_Test Cost Center - _TC")
self.assertRaises(BudgetError, jv.submit)
frappe.db.set_value("Company", "_Test Company", "monthly_bgt_flag", "Ignore")
def test_yearly_budget_crossed_stop(self):
self.test_monthly_budget_crossed_ignore()
frappe.db.set_value("Company", "_Test Company", "yearly_bgt_flag", "Stop")
self.set_total_expense_zero("2013-02-28")
jv = make_journal_entry("_Test Account Cost for Goods Sold - _TC",
"_Test Account Bank Account - _TC", 150000, "_Test Cost Center - _TC")
self.assertRaises(BudgetError, jv.submit)
frappe.db.set_value("Company", "_Test Company", "yearly_bgt_flag", "Ignore")
def test_monthly_budget_on_cancellation(self):
self.set_total_expense_zero("2013-02-28")
jv1 = make_journal_entry("_Test Account Cost for Goods Sold - _TC",
"_Test Account Bank Account - _TC", 20000, "_Test Cost Center - _TC", submit=True)
self.assertTrue(frappe.db.get_value("GL Entry",
{"voucher_type": "Journal Entry", "voucher_no": jv1.name}))
jv2 = make_journal_entry("_Test Account Cost for Goods Sold - _TC",
"_Test Account Bank Account - _TC", 20000, "_Test Cost Center - _TC", submit=True)
self.assertTrue(frappe.db.get_value("GL Entry",
{"voucher_type": "Journal Entry", "voucher_no": jv2.name}))
frappe.db.set_value("Company", "_Test Company", "monthly_bgt_flag", "Stop")
self.assertRaises(BudgetError, jv1.cancel)
frappe.db.set_value("Company", "_Test Company", "monthly_bgt_flag", "Ignore")
def get_actual_expense(self, monthly_end_date):
return get_actual_expense({
"account": "_Test Account Cost for Goods Sold - _TC",
"cost_center": "_Test Cost Center - _TC",
"monthly_end_date": monthly_end_date,
"company": "_Test Company",
"fiscal_year": get_fiscal_year(monthly_end_date)[0]
})
def set_total_expense_zero(self, posting_date):
existing_expense = self.get_actual_expense(posting_date)
make_journal_entry("_Test Account Cost for Goods Sold - _TC",
"_Test Account Bank Account - _TC", -existing_expense, "_Test Cost Center - _TC", submit=True)
def make_journal_entry(account1, account2, amount, cost_center=None, submit=False):
jv = frappe.new_doc("Journal Entry")
jv.posting_date = "2013-02-14"
jv.company = "_Test Company"
jv.fiscal_year = "_Test Fiscal Year 2013"
jv.user_remark = "test"
jv.set("accounts", [
{
"account": account1,
"cost_center": cost_center,
"debit": amount if amount > 0 else 0,
"credit": abs(amount) if amount < 0 else 0,
}, {
"account": account2,
"cost_center": cost_center,
"credit": amount if amount > 0 else 0,
"debit": abs(amount) if amount < 0 else 0,
}
])
jv.insert()
if submit:
jv.submit()
return jv
test_records = frappe.get_test_records('Journal Entry')
| agpl-3.0 | 1,435,088,348,423,113,500 | 37.596059 | 120 | 0.698149 | false |
iaklampanos/dj-vercereg | dj_vercereg/dj_vercereg/settings.py | 1 | 6595 | # Copyright 2014 The University of Edinburgh
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Django settings for dj_vercereg project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'vzb127q8k@vz5mqt5ct-(20ddyaklr4kuy^65!8+az0u)a!*^s'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# application definition
# TEMPLATE_CONTEXT_PROCESSORS = (
# "django.contrib.auth.context_processors.auth",
# "django.core.context_processors.debug",
# "django.core.context_processors.i18n",
# "django.core.context_processors.media",
# "django.core.context_processors.static",
# "django.core.context_processors.tz",
# "django.contrib.messages.context_processors.messages",
# 'django.core.context_processors.request'
# )
# Pip package update 12/10/2018 (davve.ath)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
'debug': DEBUG,
},
},
]
INSTALLED_APPS = (
# 'suit',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'vercereg',
'rest_framework',
'rest_framework.authtoken',
'reversion',
# 'django_pdb',
'guardian',
'rest_framework_swagger',
'watson',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'dj_vercereg.urls'
WSGI_APPLICATION = 'dj_vercereg.wsgi.application'
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_ROOT= os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
# STATICFILES_DIRS = (
# os.path.join(BASE_DIR, 'static'),
# )
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
# 'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.TokenAuthentication',
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
#'PAGINATE_BY': 10,
}
# For django guardian: ########################################
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend', # default
'guardian.backends.ObjectPermissionBackend',
)
ANONYMOUS_USER_ID = -1
###############################################################
# Pip package update 12/10/2018 (davve.ath)
# from django.conf.global_settings import TEMPLATE_CONTEXT_PROCESSORS as TCP
#
# TEMPLATE_CONTEXT_PROCESSORS = TCP + (
# 'django.core.context_processors.request',
# )
# Django Suit configuration example
#SUIT_CONFIG = {
# header
# 'ADMIN_NAME': 'VERCE Registry Admin',
# 'HEADER_DATE_FORMAT': 'l, j. F Y',
# 'HEADER_TIME_FORMAT': 'H:i',
# forms
# 'SHOW_REQUIRED_ASTERISK': True, # Default True
# 'CONFIRM_UNSAVED_CHANGES': True, # Default True
# menu
# 'SEARCH_URL': '/admin/auth/user/',
# 'MENU_ICONS': {
# 'sites': 'icon-leaf',
# 'auth': 'icon-lock',
# },
# 'MENU_OPEN_FIRST_CHILD': True, # Default True
# 'MENU_EXCLUDE': ('auth.group',),
# 'MENU': (
# 'sites',
# {'app': 'auth', 'icon':'icon-lock', 'models': ('user', 'group')},
# {'label': 'Settings', 'icon':'icon-cog', 'models': ('auth.user', 'auth.group')},
# {'label': 'Support', 'icon':'icon-question-sign', 'url': '/support/'},
# ),
# misc
# 'LIST_PER_PAGE': 15
#}
SWAGGER_SETTINGS = {
"exclude_namespaces": [], # List URL namespaces to ignore
"api_version": '0.1', # Specify your API's version
"api_path": "/", # Specify the path to your API not a root level
"enabled_methods": [ # Specify which methods to enable in Swagger UI
'get',
'post',
'put'
],
# "api_key": '4737f71829a7eff1e077268b89696ab536c26a11', # An API key
"is_authenticated": True, # Set to True to enforce user authentication,
"is_superuser": False, # Set to True to enforce admin only access
"permission_denied_handler": None, # If user has no permisssion, raise 403 error
"info": {
# Configure some Swagger UI variables, for details see:
# https://github.com/swagger-api/swagger-spec/blob/master/versions/1.2.md#513-info-object
'contact': '[email protected]',
'description': '',
'license': 'Apache 2.0',
'licenseUrl': 'http://www.apache.org/licenses/LICENSE-2.0.html',
'termsOfServiceUrl': '',
'title': 'VERCE dispel4py Registry',
},
}
try:
from local_settings import *
except ImportError:
pass
| apache-2.0 | -7,229,218,075,049,728,000 | 29.532407 | 97 | 0.651554 | false |
jstammers/EDMSuite | NavPython/IronPython/Lib/pprint.py | 147 | 11932 | # Author: Fred L. Drake, Jr.
# [email protected]
#
# This is a simple little module I wrote to make life easier. I didn't
# see anything quite like it in the library, though I may have overlooked
# something. I wrote this when I was trying to read some heavily nested
# tuples with fairly non-descriptive content. This is modeled very much
# after Lisp/Scheme - style pretty-printing of lists. If you find it
# useful, thank small children who sleep at night.
"""Support to pretty-print lists, tuples, & dictionaries recursively.
Very simple, but useful, especially in debugging data structures.
Classes
-------
PrettyPrinter()
Handle pretty-printing operations onto a stream using a configured
set of formatting parameters.
Functions
---------
pformat()
Format a Python object into a pretty-printed representation.
pprint()
Pretty-print a Python object to a stream [default is sys.stdout].
saferepr()
Generate a 'standard' repr()-like value, but protect against recursive
data structures.
"""
import sys as _sys
import warnings
from cStringIO import StringIO as _StringIO
__all__ = ["pprint","pformat","isreadable","isrecursive","saferepr",
"PrettyPrinter"]
# cache these for faster access:
_commajoin = ", ".join
_id = id
_len = len
_type = type
def pprint(object, stream=None, indent=1, width=80, depth=None):
"""Pretty-print a Python object to a stream [default is sys.stdout]."""
printer = PrettyPrinter(
stream=stream, indent=indent, width=width, depth=depth)
printer.pprint(object)
def pformat(object, indent=1, width=80, depth=None):
"""Format a Python object into a pretty-printed representation."""
return PrettyPrinter(indent=indent, width=width, depth=depth).pformat(object)
def saferepr(object):
"""Version of repr() which can handle recursive data structures."""
return _safe_repr(object, {}, None, 0)[0]
def isreadable(object):
"""Determine if saferepr(object) is readable by eval()."""
return _safe_repr(object, {}, None, 0)[1]
def isrecursive(object):
"""Determine if object requires a recursive representation."""
return _safe_repr(object, {}, None, 0)[2]
def _sorted(iterable):
with warnings.catch_warnings():
if _sys.py3kwarning:
warnings.filterwarnings("ignore", "comparing unequal types "
"not supported", DeprecationWarning)
return sorted(iterable)
class PrettyPrinter:
def __init__(self, indent=1, width=80, depth=None, stream=None):
"""Handle pretty printing operations onto a stream using a set of
configured parameters.
indent
Number of spaces to indent for each level of nesting.
width
Attempted maximum number of columns in the output.
depth
The maximum depth to print out nested structures.
stream
The desired output stream. If omitted (or false), the standard
output stream available at construction will be used.
"""
indent = int(indent)
width = int(width)
assert indent >= 0, "indent must be >= 0"
assert depth is None or depth > 0, "depth must be > 0"
assert width, "width must be != 0"
self._depth = depth
self._indent_per_level = indent
self._width = width
if stream is not None:
self._stream = stream
else:
self._stream = _sys.stdout
def pprint(self, object):
self._format(object, self._stream, 0, 0, {}, 0)
self._stream.write("\n")
def pformat(self, object):
sio = _StringIO()
self._format(object, sio, 0, 0, {}, 0)
return sio.getvalue()
def isrecursive(self, object):
return self.format(object, {}, 0, 0)[2]
def isreadable(self, object):
s, readable, recursive = self.format(object, {}, 0, 0)
return readable and not recursive
def _format(self, object, stream, indent, allowance, context, level):
level = level + 1
objid = _id(object)
if objid in context:
stream.write(_recursion(object))
self._recursive = True
self._readable = False
return
rep = self._repr(object, context, level - 1)
typ = _type(object)
sepLines = _len(rep) > (self._width - 1 - indent - allowance)
write = stream.write
if self._depth and level > self._depth:
write(rep)
return
r = getattr(typ, "__repr__", None)
if issubclass(typ, dict) and r is dict.__repr__:
write('{')
if self._indent_per_level > 1:
write((self._indent_per_level - 1) * ' ')
length = _len(object)
if length:
context[objid] = 1
indent = indent + self._indent_per_level
items = _sorted(object.items())
key, ent = items[0]
rep = self._repr(key, context, level)
write(rep)
write(': ')
self._format(ent, stream, indent + _len(rep) + 2,
allowance + 1, context, level)
if length > 1:
for key, ent in items[1:]:
rep = self._repr(key, context, level)
if sepLines:
write(',\n%s%s: ' % (' '*indent, rep))
else:
write(', %s: ' % rep)
self._format(ent, stream, indent + _len(rep) + 2,
allowance + 1, context, level)
indent = indent - self._indent_per_level
del context[objid]
write('}')
return
if ((issubclass(typ, list) and r is list.__repr__) or
(issubclass(typ, tuple) and r is tuple.__repr__) or
(issubclass(typ, set) and r is set.__repr__) or
(issubclass(typ, frozenset) and r is frozenset.__repr__)
):
length = _len(object)
if issubclass(typ, list):
write('[')
endchar = ']'
elif issubclass(typ, set):
if not length:
write('set()')
return
write('set([')
endchar = '])'
object = _sorted(object)
indent += 4
elif issubclass(typ, frozenset):
if not length:
write('frozenset()')
return
write('frozenset([')
endchar = '])'
object = _sorted(object)
indent += 10
else:
write('(')
endchar = ')'
if self._indent_per_level > 1 and sepLines:
write((self._indent_per_level - 1) * ' ')
if length:
context[objid] = 1
indent = indent + self._indent_per_level
self._format(object[0], stream, indent, allowance + 1,
context, level)
if length > 1:
for ent in object[1:]:
if sepLines:
write(',\n' + ' '*indent)
else:
write(', ')
self._format(ent, stream, indent,
allowance + 1, context, level)
indent = indent - self._indent_per_level
del context[objid]
if issubclass(typ, tuple) and length == 1:
write(',')
write(endchar)
return
write(rep)
def _repr(self, object, context, level):
repr, readable, recursive = self.format(object, context.copy(),
self._depth, level)
if not readable:
self._readable = False
if recursive:
self._recursive = True
return repr
def format(self, object, context, maxlevels, level):
"""Format object for a specific context, returning a string
and flags indicating whether the representation is 'readable'
and whether the object represents a recursive construct.
"""
return _safe_repr(object, context, maxlevels, level)
# Return triple (repr_string, isreadable, isrecursive).
def _safe_repr(object, context, maxlevels, level):
typ = _type(object)
if typ is str:
if 'locale' not in _sys.modules:
return repr(object), True, False
if "'" in object and '"' not in object:
closure = '"'
quotes = {'"': '\\"'}
else:
closure = "'"
quotes = {"'": "\\'"}
qget = quotes.get
sio = _StringIO()
write = sio.write
for char in object:
if char.isalpha():
write(char)
else:
write(qget(char, repr(char)[1:-1]))
return ("%s%s%s" % (closure, sio.getvalue(), closure)), True, False
r = getattr(typ, "__repr__", None)
if issubclass(typ, dict) and r is dict.__repr__:
if not object:
return "{}", True, False
objid = _id(object)
if maxlevels and level >= maxlevels:
return "{...}", False, objid in context
if objid in context:
return _recursion(object), False, True
context[objid] = 1
readable = True
recursive = False
components = []
append = components.append
level += 1
saferepr = _safe_repr
for k, v in _sorted(object.items()):
krepr, kreadable, krecur = saferepr(k, context, maxlevels, level)
vrepr, vreadable, vrecur = saferepr(v, context, maxlevels, level)
append("%s: %s" % (krepr, vrepr))
readable = readable and kreadable and vreadable
if krecur or vrecur:
recursive = True
del context[objid]
return "{%s}" % _commajoin(components), readable, recursive
if (issubclass(typ, list) and r is list.__repr__) or \
(issubclass(typ, tuple) and r is tuple.__repr__):
if issubclass(typ, list):
if not object:
return "[]", True, False
format = "[%s]"
elif _len(object) == 1:
format = "(%s,)"
else:
if not object:
return "()", True, False
format = "(%s)"
objid = _id(object)
if maxlevels and level >= maxlevels:
return format % "...", False, objid in context
if objid in context:
return _recursion(object), False, True
context[objid] = 1
readable = True
recursive = False
components = []
append = components.append
level += 1
for o in object:
orepr, oreadable, orecur = _safe_repr(o, context, maxlevels, level)
append(orepr)
if not oreadable:
readable = False
if orecur:
recursive = True
del context[objid]
return format % _commajoin(components), readable, recursive
rep = repr(object)
return rep, (rep and not rep.startswith('<')), False
def _recursion(object):
return ("<Recursion on %s with id=%s>"
% (_type(object).__name__, _id(object)))
def _perfcheck(object=None):
import time
if object is None:
object = [("string", (1, 2), [3, 4], {5: 6, 7: 8})] * 100000
p = PrettyPrinter()
t1 = time.time()
_safe_repr(object, {}, None, 0)
t2 = time.time()
p.pformat(object)
t3 = time.time()
print "_safe_repr:", t2 - t1
print "pformat:", t3 - t2
if __name__ == "__main__":
_perfcheck()
| mit | -9,020,407,297,888,988,000 | 33.091429 | 81 | 0.526986 | false |
m4nolo/steering-all | src/entities/relativeentity.py | 2 | 1279 | from entity import Entity
class RelativeEntity(Entity):
def __init__(self, width, height):
Entity.__init__(self, width, height)
self.margin = [0, 0, 0, 0]
def below(self, entity):
self.y = entity.y + entity.height + self.margin[1]
def above(self, entity):
self.y = entity.y - self.height - self.margin[3]
def leftOf(self, entity):
self.x = entity.x - self.width - self.margin[2]
def rightOf(self, entity):
self.x = entity.x + entity.width + self.margin[0]
def margin(self, margin):
self.margin = margin;
def marginLeft(self, margin):
self.margin[0] = margin
def marginRight(self, margin):
self.margin[2] = margin
def marginTop(self, margin):
self.margin[1] = margin
def marginBottom(self, margin):
self.margin[3] = margin
def alignLeft(self):
self.x = 0 + self.margin[0]
def alignRight(self, width):
self.x = width - self.width - self.margin[2]
def alignTop(self):
self.y = 0 + self.margin[1]
def alignBottom(self, height):
self.y = height - self.height - self.margin[3]
def centerRelativeX(self, entity):
self.x = entity.x + (entity.width / 2) - (self.width / 2)
def centerRelativeY(self, entity):
self.y = entity.y + (entity.height / 2) - (self.height / 2)
| mit | -8,583,138,722,605,353,000 | 22.254545 | 63 | 0.635653 | false |
etkirsch/scikit-learn | examples/svm/plot_custom_kernel.py | 171 | 1546 | """
======================
SVM with custom kernel
======================
Simple usage of Support Vector Machines to classify a sample. It will
plot the decision surface and the support vectors.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
Y = iris.target
def my_kernel(X, Y):
"""
We create a custom kernel:
(2 0)
k(X, Y) = X ( ) Y.T
(0 1)
"""
M = np.array([[2, 0], [0, 1.0]])
return np.dot(np.dot(X, M), Y.T)
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data.
clf = svm.SVC(kernel=my_kernel)
clf.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.title('3-Class classification using Support Vector Machine with custom'
' kernel')
plt.axis('tight')
plt.show()
| bsd-3-clause | -3,280,248,896,302,629,000 | 26.122807 | 76 | 0.596378 | false |
petercable/mi-instrument | mi/platform/rsn/simulator/oms_events.py | 5 | 10097 | #!/usr/bin/env python
"""
@package ion.agents.platform.rsn.simulator.oms_events
@file ion/agents/platform/rsn/simulator/oms_events.py
@author Carlos Rueda
@brief OMS simulator event definitions and supporting functions.
Demo program included that allows to run both a listener server and a
notifier. See demo program usage at the end of this file.
"""
__author__ = 'Carlos Rueda'
__license__ = 'Apache 2.0'
import sys
from time import sleep
import time
import ntplib
from urlparse import urlparse
import httplib
import yaml
import json
from ooi.logging import log
##########################################################################
# The "event type" concept was removed from the interface (~Apr/2013).
# To minimize changes in the code, simply introduce an 'ALL' event type here.
class EventInfo(object):
EVENT_TYPES = {
'ALL': {
'name': 'on battery',
'severity': 3,
'group': 'power',
}
}
class EventNotifier(object):
def __init__(self):
# _listeners: { event_type: {url: reg_time, ...}, ... }
# initialize with empty dict for each event type:
self._listeners = dict((et, {}) for et in EventInfo.EVENT_TYPES)
def add_listener(self, url, event_type):
assert event_type in EventInfo.EVENT_TYPES
url_dict = self._listeners[event_type]
if not url in url_dict:
url_dict[url] = ntplib.system_to_ntp_time(time.time())
log.trace("added listener=%s for event_type=%s", url, event_type)
return url_dict[url]
def remove_listener(self, url, event_type):
assert event_type in EventInfo.EVENT_TYPES
url_dict = self._listeners[event_type]
unreg_time = 0
if url in url_dict:
unreg_time = ntplib.system_to_ntp_time(time.time())
del url_dict[url]
log.trace("removed listener=%s for event_type=%s", url, event_type)
return unreg_time
def notify(self, event_instance):
"""
Notifies the event to all associated listeners.
"""
assert isinstance(event_instance, dict)
urls = self._listeners['ALL']
if not len(urls):
# no event listeners for event_type; just ignore notification:
return
# copy list to get a snapshot of the current dictionary and thus avoid
# concurrent modification kind of runtime errors like:
# RuntimeError: dictionary changed size during iteration
urls = list(urls)
for url in urls:
self._notify_listener(url, event_instance)
def _notify_listener(self, url, event_instance):
"""
Notifies event to given listener.
"""
if url == "http://NO_OMS_NOTIFICATIONS": # pragma: no cover
# developer convenience -see ion.agents.platform.rsn.oms_event_listener
return
log.debug("Notifying event_instance=%s to listener=%s", str(event_instance), url)
# include url in event instance for diagnostic/debugging purposes:
event_instance['listener_url'] = url
# prepare payload (JSON format):
payload = json.dumps(event_instance, indent=2)
log.trace("payload=\n%s", payload)
headers = {
"Content-type": "application/json",
"Accept": "text/plain"
}
conn = None
try:
o = urlparse(url)
url4conn = o.netloc
path = o.path
conn = httplib.HTTPConnection(url4conn)
conn.request("POST", path, body=payload, headers=headers)
response = conn.getresponse()
data = response.read()
log.trace("RESPONSE: %s, %s, %s", response.status, response.reason, data)
except Exception as e:
# the actual listener is no longer there; just log a message
log.warn("event notification HTTP request failed: %r: %s", url, e)
finally:
if conn:
conn.close()
class EventGenerator(object):
"""
Simple helper to generate and trigger event notifications.
"""
def __init__(self, notifier):
self._notifier = notifier
self._keep_running = True
self._index = 0 # in EventInfo.EVENT_TYPES
# self._runnable set depending on whether we're under pyon or not
if 'pyon' in sys.modules:
from gevent import Greenlet
self._runnable = Greenlet(self._run)
log.debug("!!!! EventGenerator: pyon detected: using Greenlet")
else:
from threading import Thread
self._runnable = Thread(target=self._run)
self._runnable.setDaemon(True)
log.debug("!!!! EventGenerator: pyon not detected: using Thread")
def generate_and_notify_event(self):
if self._index >= len(EventInfo.EVENT_TYPES):
self._index = 0
event_type = EventInfo.EVENT_TYPES.values()[self._index]
self._index += 1
platform_id = "TODO_some_platform_id"
message = "%s (synthetic event generated from simulator)" % event_type['name']
group = event_type['group']
timestamp = ntplib.system_to_ntp_time(time.time())
first_time_timestamp = timestamp
severity = event_type['severity']
event_instance = {
'message': message,
'platform_id': platform_id,
'timestamp': timestamp,
'first_time_timestamp': first_time_timestamp,
'severity': severity,
'group': group,
}
log.debug("notifying event_instance=%s", str(event_instance))
self._notifier.notify(event_instance)
def start(self):
self._runnable.start()
def _run(self):
sleep(3) # wait a bit before first event
while self._keep_running:
self.generate_and_notify_event()
# sleep for a few secs regularly checking we still are running
secs = 7
while self._keep_running and secs > 0:
sleep(0.3)
secs -= 0.3
log.trace("event generation stopped.")
def stop(self):
log.trace("stopping event generation...")
self._keep_running = False
if __name__ == "__main__": # pragma: no cover
#
# first, call this demo program with command line argument 'listener',
# then, on a second terminal, with argument 'notifier'
#
host, port = "localhost", 8000
import sys
if len(sys.argv) > 1 and sys.argv[1] == "listener":
# run listener
from gevent.pywsgi import WSGIServer
def application(environ, start_response):
#print('listener got environ=%s' % str(environ))
print(" ".join(('%s=%s' % (k, environ[k])) for k in [
'CONTENT_LENGTH','CONTENT_TYPE', 'HTTP_ACCEPT']))
input = environ['wsgi.input']
body = "".join(input.readlines())
print('body=\n%s' % body)
#
# note: the expected content format is JSON and we can in general
# parse with either json or yaml ("every JSON file is also a valid
# YAML file" -- http://yaml.org/spec/1.2/spec.html#id2759572):
#
event_instance = yaml.load(body)
print('event_instance=%s' % str(event_instance))
# respond OK:
headers = [('Content-Type', 'text/plain') ]
status = '200 OK'
start_response(status, headers)
return "MY-RESPONSE. BYE"
print("%s:%s: listening for event notifications..." % (host, port))
WSGIServer((host, port), application).serve_forever()
elif len(sys.argv) > 1 and sys.argv[1] == "notifier":
# run notifier
notifier = EventNotifier()
url = "http://%s:%s" % (host, port)
for event_type in EventInfo.EVENT_TYPES.keys():
notifier.add_listener(url, event_type)
print("registered listener to event_type=%r" % event_type)
generator = EventGenerator(notifier)
secs = 15
print("generating events for %s seconds ..." % secs)
generator.start()
sleep(secs)
generator.stop()
else:
print("usage: call me with arg 'listener' or 'notifier'")
"""
Test program
TERMINAL 1:
$ bin/python ion/agents/platform/rsn/simulator/oms_events.py listener
localhost:8000: listening for event notifications...
TERMINAL 2:
oms_simulator: setting log level to: logging.WARN
registered listener to event_type='ALL'
generating events for 15 seconds ...
TERMINAL 1:
CONTENT_LENGTH=270 CONTENT_TYPE=application/json HTTP_ACCEPT=text/plain
body=
{
"group": "power",
"severity": 3,
"url": "http://localhost:8000",
"timestamp": 3578265811.422655,
"platform_id": "TODO_some_platform_id",
"message": "on battery (synthetic event generated from simulator)",
"first_time_timestamp": 3578265811.422655
}
event_instance={'platform_id': 'TODO_some_platform_id', 'group': 'power', 'severity': 3, 'url': 'http://localhost:8000', 'timestamp': 3578265811.422655, 'message': 'on battery (synthetic event generated from simulator)', 'first_time_timestamp': 3578265811.422655}
127.0.0.1 - - [2013-05-22 19:43:31] "POST / HTTP/1.1" 200 118 0.002814
CONTENT_LENGTH=270 CONTENT_TYPE=application/json HTTP_ACCEPT=text/plain
body=
{
"group": "power",
"severity": 3,
"url": "http://localhost:8000",
"timestamp": 3578265818.647295,
"platform_id": "TODO_some_platform_id",
"message": "on battery (synthetic event generated from simulator)",
"first_time_timestamp": 3578265818.647295
}
event_instance={'platform_id': 'TODO_some_platform_id', 'group': 'power', 'severity': 3, 'url': 'http://localhost:8000', 'timestamp': 3578265818.647295, 'message': 'on battery (synthetic event generated from simulator)', 'first_time_timestamp': 3578265818.647295}
127.0.0.1 - - [2013-05-22 19:43:38] "POST / HTTP/1.1" 200 118 0.003455
"""
| bsd-2-clause | 2,641,930,980,059,355,000 | 34.059028 | 263 | 0.596811 | false |
neurokernel/retina | retina/screen/map/pointmap.py | 1 | 2303 | from abc import ABCMeta, abstractmethod
class PointMap(object):
""" Interface of mapping a point from one surface to another
(hence the 2 parameters)
"""
__metaclass__ = ABCMeta
@abstractmethod
def map(self, p1, p2):
""" map of point (p1, p2) from one surface to another """
return
@abstractmethod
def invmap(self, p1, p2):
""" inverse map of point (p1, p2) from one surface to another """
return
class PointMapWithDirection(object):
""" Interface of mapping a point from one surface to another
(hence the 2 parameters)
"""
__metaclass__ = ABCMeta
@abstractmethod
def map(self, p1, p2, dp1, dp2):
""" map of point (p1, p2) from one surface to another
in direction (dp1, dp2) """
return
@abstractmethod
def invmap(self, p1, p2, dp1, dp2):
""" inverse map of point (p1, p2) from one surface to another
in direction (dp1, dp2) """
return
class EyeToImagePointMap(PointMap):
""" Encapsulates 2 PointMap transformations to map a point
from the fly's eye to a screen and then to an image
"""
__metaclass__ = ABCMeta
@abstractmethod
def map_screen_to_image(self, p1, p2):
mapscreen = self._map_screen
r1, r2 = mapscreen.map(p1, p2)
return (r1, r2)
@abstractmethod
def invmap_screen_to_image(self, p1, p2):
mapscreen = self._map_screen
r1, r2 = mapscreen.invmap(p1, p2)
return (r1, r2)
@abstractmethod
def map_eye_to_screen(self, p1, p2):
mapeye = self._map_eye
r1, r2 = mapeye.map(p1, p2)
return (r1, r2)
@abstractmethod
def invmap_eye_to_screen(self, p1, p2):
mapeye = self._map_eye
r1, r2 = mapeye.invmap(p1, p2)
return (r1, r2)
# implementation of superclass abstract classes
def map(self, p1, p2):
mapeye = self._map_eye
mapscreen = self._map_screen
t1, t2 = mapeye.map(p1, p2)
r1, r2 = mapscreen.map(t1, t2)
return (r1, r2)
def invmap(self, p1, p2):
mapeye = self._map_eye
mapscreen = self._map_screen
t1, t2 = mapscreen.invmap(p1, p2)
r1, r2 = mapeye.invmap(t1, t2)
return (r1, r2)
| bsd-3-clause | 2,626,748,361,710,813,700 | 24.876404 | 73 | 0.582718 | false |
initialed85/mac_os_scripts | mac_os_scripts/set_firmware_password.py | 1 | 1788 | """
This script is responsible for setting the firmware password
Commands used:
- expect -d -f /usr/local/zetta/mac_os_scripts/external/set_firmware_password_expect
set password [lindex $argv 0];
spawn firmwarepasswd -setpasswd -setmode command
expect {
"Enter new password:" {
send "$password\r"
exp_continue
}
"Re-enter new password:" {
send "$password\r"
exp_continue
}
}
"""
from common import CLITieIn
class FirmwarePasswordSetter(CLITieIn):
def set_firmware_password(self, password):
command = '/usr/bin/expect -d -f /usr/local/zetta/mac_os_scripts/external/set_firmware_password.expect \'{0}\''.format(
password)
command_output = self.command(command)
if command_output.error_level != 0:
self._logger.error(
'{0} failed stating {1}'.format(
command, command_output
)
)
return False
return True
def run(self, password):
if not self.set_firmware_password(password):
self._logger.error('failed set_firmware_password; cannot continue')
return False
self._logger.debug('passed')
return True
if __name__ == '__main__':
from utils import get_argparser, get_args
parser = get_argparser()
parser.add_argument(
'-f',
'--firmware-password',
type=str,
required=True,
help='firmware password to set'
)
args = get_args(parser)
actor = FirmwarePasswordSetter(
sudo_password=args.sudo_password,
)
result = actor.run(
password=args.firmware_password
)
if not result:
exit(1)
exit(0)
| mit | 7,004,433,027,221,430,000 | 21.35 | 127 | 0.574944 | false |
KerkhoffTechnologies/django-connectwise | djconnectwise/migrations/0107_auto_20190729_1352.py | 1 | 1172 | # Generated by Django 2.1 on 2019-07-29 13:52
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('djconnectwise', '0106_auto_20190722_1524'),
]
operations = [
migrations.AddField(
model_name='connectwiseboard',
name='bill_time',
field=models.CharField(blank=True, choices=[('Billable', 'Billable'), ('DoNotBill', 'Do Not Bill'), ('NoCharge', 'No Charge')], max_length=50, null=True),
),
migrations.AddField(
model_name='member',
name='work_role',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='djconnectwise.WorkRole'),
),
migrations.AddField(
model_name='member',
name='work_type',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='djconnectwise.WorkType'),
),
migrations.AddField(
model_name='worktype',
name='overall_default_flag',
field=models.BooleanField(default=False),
),
]
| mit | 2,023,568,476,454,551,800 | 33.470588 | 166 | 0.599829 | false |
a11r/grpc | src/python/grpcio_health_checking/grpc_health/v1/health.py | 15 | 2677 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Reference implementation for health checking in gRPC Python."""
import threading
import grpc
from grpc_health.v1 import health_pb2
from grpc_health.v1 import health_pb2_grpc
class HealthServicer(health_pb2_grpc.HealthServicer):
"""Servicer handling RPCs for service statuses."""
def __init__(self):
self._server_status_lock = threading.Lock()
self._server_status = {}
def Check(self, request, context):
with self._server_status_lock:
status = self._server_status.get(request.service)
if status is None:
context.set_code(grpc.StatusCode.NOT_FOUND)
return health_pb2.HealthCheckResponse()
else:
return health_pb2.HealthCheckResponse(status=status)
def set(self, service, status):
"""Sets the status of a service.
Args:
service: string, the name of the service.
NOTE, '' must be set.
status: HealthCheckResponse.status enum value indicating
the status of the service
"""
with self._server_status_lock:
self._server_status[service] = status
| bsd-3-clause | 1,560,193,642,846,013,200 | 40.184615 | 72 | 0.719089 | false |
MjAbuz/watchdog | vendor/rdflib-2.4.0/rdflib/sparql/bison/SolutionModifier.py | 4 | 1223 | ASCENDING_ORDER = 1
DESCENDING_ORDER = 2
UNSPECIFIED_ORDER = 3
ORDER_VALUE_MAPPING = {
ASCENDING_ORDER : 'Ascending',
DESCENDING_ORDER : 'Descending',
UNSPECIFIED_ORDER : 'Default',
}
class SolutionModifier(object):
def __init__(self,orderClause=None,limitClause=None,offsetClause=None):
self.orderClause = orderClause
self.limitClause = limitClause
self.offsetClause = offsetClause
def __repr__(self):
if not(self.orderClause or self.limitClause or self.offsetClause):
return ""
return "<SoutionModifier:%s%s%s>"%(
self.orderClause and ' ORDER BY %s'%self.orderClause or '',
self.limitClause and ' LIMIT %s'%self.limitClause or '',
self.offsetClause and ' OFFSET %s'%self.offsetClause or '')
class ParsedOrderConditionExpression(object):
"""
A list of OrderConditions
OrderCondition ::= (('ASC'|'DESC')BrackettedExpression )|(FunctionCall|Var|BrackettedExpression)
"""
def __init__(self,expression,order):
self.expression = expression
self.order = order
def __repr__(self):
return "%s(%s)"%(ORDER_VALUE_MAPPING[self.order],self.expression.reduce())
| agpl-3.0 | 3,920,645,265,022,297,600 | 33 | 100 | 0.647588 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.