repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
kangye1014/python_practice | base_syntax/threads/threadwithmodual.py | 1 | 1997 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
# added by kangye, dependent on python27
import threading
import time
'''
Python通过两个标准库thread和threading提供对线程的支持。thread提供了低级别的、原始的线程以及一个简单的锁。
thread 模块提供的其他方法:
threading.currentThread(): 返回当前的线程变量。
threading.enumerate(): 返回一个包含正在运行的线程的list。正在运行指线程启动后、结束前,不包括启动前和终止后的线程。
threading.activeCount(): 返回正在运行的线程数量,与len(threading.enumerate())有相同的结果。
除了使用方法外,线程模块同样提供了Thread类来处理线程,Thread类提供了以下方法:
run(): 用以表示线程活动的方法。
start():启动线程活动。
join([time]): 等待至线程中止。这阻塞调用线程直至线程的join() 方法被调用中止-正常退出或者抛出未处理的异常-或者是可选的超时发生。
isAlive(): 返回线程是否活动的。
getName(): 返回线程名。
setName(): 设置线程名。
'''
exitFlag = 0
class myThread (threading.Thread): #继承父类threading.Thread
def __init__(self, threadID, name, counter):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.counter = counter
def run(self): #把要执行的代码写到run函数里面 线程在创建后会直接运行run函数
print "Starting " + self.name
print_time(self.name, self.counter, 5)
print "Exiting " + self.name
def print_time(threadName, delay, counter):
while counter:
if exitFlag:
thread.exit()
time.sleep(delay)
print "%s: %s" % (threadName, time.ctime(time.time()))
counter -= 1
# 创建新线程
thread1 = myThread(1, "Thread-1", 1)
thread2 = myThread(2, "Thread-2", 2)
# 开启线程
thread1.start()
thread2.start()
print "Exiting Main Thread" | apache-2.0 |
ramadhane/odoo | addons/l10n_be_invoice_bba/invoice.py | 141 | 12671 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (c) 2011 Noviat nv/sa (www.noviat.be). All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import re, time, random
from openerp import api
from openerp.osv import fields, osv
from openerp.tools.translate import _
import logging
_logger = logging.getLogger(__name__)
"""
account.invoice object:
- Add support for Belgian structured communication
- Rename 'reference' field labels to 'Communication'
"""
class account_invoice(osv.osv):
_inherit = 'account.invoice'
@api.cr_uid_context
def _get_reference_type(self, cursor, user, context=None):
"""Add BBA Structured Communication Type and change labels from 'reference' into 'communication' """
res = super(account_invoice, self)._get_reference_type(cursor, user,
context=context)
res[[i for i,x in enumerate(res) if x[0] == 'none'][0]] = ('none', 'Free Communication')
res.append(('bba', 'BBA Structured Communication'))
#l_logger.warning('reference_type = %s' %res )
return res
def check_bbacomm(self, val):
supported_chars = '0-9+*/ '
pattern = re.compile('[^' + supported_chars + ']')
if pattern.findall(val or ''):
return False
bbacomm = re.sub('\D', '', val or '')
if len(bbacomm) == 12:
base = int(bbacomm[:10])
mod = base % 97 or 97
if mod == int(bbacomm[-2:]):
return True
return False
def _check_communication(self, cr, uid, ids):
for inv in self.browse(cr, uid, ids):
if inv.reference_type == 'bba':
return self.check_bbacomm(inv.reference)
return True
def onchange_partner_id(self, cr, uid, ids, type, partner_id,
date_invoice=False, payment_term=False,
partner_bank_id=False, company_id=False,
context=None):
result = super(account_invoice, self).onchange_partner_id(cr, uid, ids, type, partner_id,
date_invoice, payment_term, partner_bank_id, company_id, context)
# reference_type = self.default_get(cr, uid, ['reference_type'])['reference_type']
# _logger.warning('partner_id %s' % partner_id)
reference = False
reference_type = 'none'
if partner_id:
if (type == 'out_invoice'):
reference_type = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context).out_inv_comm_type
if reference_type:
reference = self.generate_bbacomm(cr, uid, ids, type, reference_type, partner_id, '', context=context)['value']['reference']
res_update = {
'reference_type': reference_type or 'none',
'reference': reference,
}
result['value'].update(res_update)
return result
def generate_bbacomm(self, cr, uid, ids, type, reference_type, partner_id, reference, context=None):
partner_obj = self.pool.get('res.partner')
reference = reference or ''
algorithm = False
if partner_id:
algorithm = partner_obj.browse(cr, uid, partner_id, context=context).out_inv_comm_algorithm
algorithm = algorithm or 'random'
if (type == 'out_invoice'):
if reference_type == 'bba':
if algorithm == 'date':
if not self.check_bbacomm(reference):
doy = time.strftime('%j')
year = time.strftime('%Y')
seq = '001'
seq_ids = self.search(cr, uid,
[('type', '=', 'out_invoice'), ('reference_type', '=', 'bba'),
('reference', 'like', '+++%s/%s/%%' % (doy, year))], order='reference')
if seq_ids:
prev_seq = int(self.browse(cr, uid, seq_ids[-1]).reference[12:15])
if prev_seq < 999:
seq = '%03d' % (prev_seq + 1)
else:
raise osv.except_osv(_('Warning!'),
_('The daily maximum of outgoing invoices with an automatically generated BBA Structured Communications has been exceeded!' \
'\nPlease create manually a unique BBA Structured Communication.'))
bbacomm = doy + year + seq
base = int(bbacomm)
mod = base % 97 or 97
reference = '+++%s/%s/%s%02d+++' % (doy, year, seq, mod)
elif algorithm == 'partner_ref':
if not self.check_bbacomm(reference):
partner_ref = self.pool.get('res.partner').browse(cr, uid, partner_id).ref
partner_ref_nr = re.sub('\D', '', partner_ref or '')
if (len(partner_ref_nr) < 3) or (len(partner_ref_nr) > 7):
raise osv.except_osv(_('Warning!'),
_('The Partner should have a 3-7 digit Reference Number for the generation of BBA Structured Communications!' \
'\nPlease correct the Partner record.'))
else:
partner_ref_nr = partner_ref_nr.ljust(7, '0')
seq = '001'
seq_ids = self.search(cr, uid,
[('type', '=', 'out_invoice'), ('reference_type', '=', 'bba'),
('reference', 'like', '+++%s/%s/%%' % (partner_ref_nr[:3], partner_ref_nr[3:]))], order='reference')
if seq_ids:
prev_seq = int(self.browse(cr, uid, seq_ids[-1]).reference[12:15])
if prev_seq < 999:
seq = '%03d' % (prev_seq + 1)
else:
raise osv.except_osv(_('Warning!'),
_('The daily maximum of outgoing invoices with an automatically generated BBA Structured Communications has been exceeded!' \
'\nPlease create manually a unique BBA Structured Communication.'))
bbacomm = partner_ref_nr + seq
base = int(bbacomm)
mod = base % 97 or 97
reference = '+++%s/%s/%s%02d+++' % (partner_ref_nr[:3], partner_ref_nr[3:], seq, mod)
elif algorithm == 'random':
if not self.check_bbacomm(reference):
base = random.randint(1, 9999999999)
bbacomm = str(base).rjust(10, '0')
base = int(bbacomm)
mod = base % 97 or 97
mod = str(mod).rjust(2, '0')
reference = '+++%s/%s/%s%s+++' % (bbacomm[:3], bbacomm[3:7], bbacomm[7:], mod)
else:
raise osv.except_osv(_('Error!'),
_("Unsupported Structured Communication Type Algorithm '%s' !" \
"\nPlease contact your Odoo support channel.") % algorithm)
return {'value': {'reference': reference}}
def create(self, cr, uid, vals, context=None):
reference = vals.get('reference', False)
reference_type = vals.get('reference_type', False)
if vals.get('type') == 'out_invoice' and not reference_type:
# fallback on default communication type for partner
reference_type = self.pool.get('res.partner').browse(cr, uid, vals['partner_id']).out_inv_comm_type
if reference_type == 'bba':
reference = self.generate_bbacomm(cr, uid, [], vals['type'], reference_type, vals['partner_id'], '', context={})['value']['reference']
vals.update({
'reference_type': reference_type or 'none',
'reference': reference,
})
if reference_type == 'bba':
if not reference:
raise osv.except_osv(_('Warning!'),
_('Empty BBA Structured Communication!' \
'\nPlease fill in a unique BBA Structured Communication.'))
if self.check_bbacomm(reference):
reference = re.sub('\D', '', reference)
vals['reference'] = '+++' + reference[0:3] + '/' + reference[3:7] + '/' + reference[7:] + '+++'
same_ids = self.search(cr, uid,
[('type', '=', 'out_invoice'), ('reference_type', '=', 'bba'),
('reference', '=', vals['reference'])])
if same_ids:
raise osv.except_osv(_('Warning!'),
_('The BBA Structured Communication has already been used!' \
'\nPlease create manually a unique BBA Structured Communication.'))
return super(account_invoice, self).create(cr, uid, vals, context=context)
def write(self, cr, uid, ids, vals, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
for inv in self.browse(cr, uid, ids, context):
if vals.has_key('reference_type'):
reference_type = vals['reference_type']
else:
reference_type = inv.reference_type or ''
if reference_type == 'bba' and 'reference' in vals:
if self.check_bbacomm(vals['reference']):
reference = re.sub('\D', '', vals['reference'])
vals['reference'] = '+++' + reference[0:3] + '/' + reference[3:7] + '/' + reference[7:] + '+++'
same_ids = self.search(cr, uid,
[('id', '!=', inv.id), ('type', '=', 'out_invoice'),
('reference_type', '=', 'bba'), ('reference', '=', vals['reference'])])
if same_ids:
raise osv.except_osv(_('Warning!'),
_('The BBA Structured Communication has already been used!' \
'\nPlease create manually a unique BBA Structured Communication.'))
return super(account_invoice, self).write(cr, uid, ids, vals, context)
def copy(self, cr, uid, id, default=None, context=None):
default = default or {}
invoice = self.browse(cr, uid, id, context=context)
if invoice.type in ['out_invoice']:
reference_type = invoice.reference_type or 'none'
default['reference_type'] = reference_type
if reference_type == 'bba':
partner = invoice.partner_id
default['reference'] = self.generate_bbacomm(cr, uid, id,
invoice.type, reference_type,
partner.id, '', context=context)['value']['reference']
return super(account_invoice, self).copy(cr, uid, id, default, context=context)
_columns = {
'reference': fields.char('Communication', help="The partner reference of this invoice."),
'reference_type': fields.selection(_get_reference_type, 'Communication Type',
required=True, readonly=True),
}
_constraints = [
(_check_communication, 'Invalid BBA Structured Communication !', ['Communication']),
]
account_invoice()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
valexandersaulys/prudential_insurance_kaggle | venv/lib/python2.7/site-packages/pip/_vendor/distlib/metadata.py | 427 | 38314 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""Implementation of the Metadata for Python packages PEPs.
Supports all metadata formats (1.0, 1.1, 1.2, and 2.0 experimental).
"""
from __future__ import unicode_literals
import codecs
from email import message_from_file
import json
import logging
import re
from . import DistlibException, __version__
from .compat import StringIO, string_types, text_type
from .markers import interpret
from .util import extract_by_key, get_extras
from .version import get_scheme, PEP440_VERSION_RE
logger = logging.getLogger(__name__)
class MetadataMissingError(DistlibException):
"""A required metadata is missing"""
class MetadataConflictError(DistlibException):
"""Attempt to read or write metadata fields that are conflictual."""
class MetadataUnrecognizedVersionError(DistlibException):
"""Unknown metadata version number."""
class MetadataInvalidError(DistlibException):
"""A metadata value is invalid"""
# public API of this module
__all__ = ['Metadata', 'PKG_INFO_ENCODING', 'PKG_INFO_PREFERRED_VERSION']
# Encoding used for the PKG-INFO files
PKG_INFO_ENCODING = 'utf-8'
# preferred version. Hopefully will be changed
# to 1.2 once PEP 345 is supported everywhere
PKG_INFO_PREFERRED_VERSION = '1.1'
_LINE_PREFIX = re.compile('\n \|')
_241_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform',
'Summary', 'Description',
'Keywords', 'Home-page', 'Author', 'Author-email',
'License')
_314_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform',
'Supported-Platform', 'Summary', 'Description',
'Keywords', 'Home-page', 'Author', 'Author-email',
'License', 'Classifier', 'Download-URL', 'Obsoletes',
'Provides', 'Requires')
_314_MARKERS = ('Obsoletes', 'Provides', 'Requires', 'Classifier',
'Download-URL')
_345_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform',
'Supported-Platform', 'Summary', 'Description',
'Keywords', 'Home-page', 'Author', 'Author-email',
'Maintainer', 'Maintainer-email', 'License',
'Classifier', 'Download-URL', 'Obsoletes-Dist',
'Project-URL', 'Provides-Dist', 'Requires-Dist',
'Requires-Python', 'Requires-External')
_345_MARKERS = ('Provides-Dist', 'Requires-Dist', 'Requires-Python',
'Obsoletes-Dist', 'Requires-External', 'Maintainer',
'Maintainer-email', 'Project-URL')
_426_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform',
'Supported-Platform', 'Summary', 'Description',
'Keywords', 'Home-page', 'Author', 'Author-email',
'Maintainer', 'Maintainer-email', 'License',
'Classifier', 'Download-URL', 'Obsoletes-Dist',
'Project-URL', 'Provides-Dist', 'Requires-Dist',
'Requires-Python', 'Requires-External', 'Private-Version',
'Obsoleted-By', 'Setup-Requires-Dist', 'Extension',
'Provides-Extra')
_426_MARKERS = ('Private-Version', 'Provides-Extra', 'Obsoleted-By',
'Setup-Requires-Dist', 'Extension')
_ALL_FIELDS = set()
_ALL_FIELDS.update(_241_FIELDS)
_ALL_FIELDS.update(_314_FIELDS)
_ALL_FIELDS.update(_345_FIELDS)
_ALL_FIELDS.update(_426_FIELDS)
EXTRA_RE = re.compile(r'''extra\s*==\s*("([^"]+)"|'([^']+)')''')
def _version2fieldlist(version):
if version == '1.0':
return _241_FIELDS
elif version == '1.1':
return _314_FIELDS
elif version == '1.2':
return _345_FIELDS
elif version == '2.0':
return _426_FIELDS
raise MetadataUnrecognizedVersionError(version)
def _best_version(fields):
"""Detect the best version depending on the fields used."""
def _has_marker(keys, markers):
for marker in markers:
if marker in keys:
return True
return False
keys = []
for key, value in fields.items():
if value in ([], 'UNKNOWN', None):
continue
keys.append(key)
possible_versions = ['1.0', '1.1', '1.2', '2.0']
# first let's try to see if a field is not part of one of the version
for key in keys:
if key not in _241_FIELDS and '1.0' in possible_versions:
possible_versions.remove('1.0')
if key not in _314_FIELDS and '1.1' in possible_versions:
possible_versions.remove('1.1')
if key not in _345_FIELDS and '1.2' in possible_versions:
possible_versions.remove('1.2')
if key not in _426_FIELDS and '2.0' in possible_versions:
possible_versions.remove('2.0')
# possible_version contains qualified versions
if len(possible_versions) == 1:
return possible_versions[0] # found !
elif len(possible_versions) == 0:
raise MetadataConflictError('Unknown metadata set')
# let's see if one unique marker is found
is_1_1 = '1.1' in possible_versions and _has_marker(keys, _314_MARKERS)
is_1_2 = '1.2' in possible_versions and _has_marker(keys, _345_MARKERS)
is_2_0 = '2.0' in possible_versions and _has_marker(keys, _426_MARKERS)
if int(is_1_1) + int(is_1_2) + int(is_2_0) > 1:
raise MetadataConflictError('You used incompatible 1.1/1.2/2.0 fields')
# we have the choice, 1.0, or 1.2, or 2.0
# - 1.0 has a broken Summary field but works with all tools
# - 1.1 is to avoid
# - 1.2 fixes Summary but has little adoption
# - 2.0 adds more features and is very new
if not is_1_1 and not is_1_2 and not is_2_0:
# we couldn't find any specific marker
if PKG_INFO_PREFERRED_VERSION in possible_versions:
return PKG_INFO_PREFERRED_VERSION
if is_1_1:
return '1.1'
if is_1_2:
return '1.2'
return '2.0'
_ATTR2FIELD = {
'metadata_version': 'Metadata-Version',
'name': 'Name',
'version': 'Version',
'platform': 'Platform',
'supported_platform': 'Supported-Platform',
'summary': 'Summary',
'description': 'Description',
'keywords': 'Keywords',
'home_page': 'Home-page',
'author': 'Author',
'author_email': 'Author-email',
'maintainer': 'Maintainer',
'maintainer_email': 'Maintainer-email',
'license': 'License',
'classifier': 'Classifier',
'download_url': 'Download-URL',
'obsoletes_dist': 'Obsoletes-Dist',
'provides_dist': 'Provides-Dist',
'requires_dist': 'Requires-Dist',
'setup_requires_dist': 'Setup-Requires-Dist',
'requires_python': 'Requires-Python',
'requires_external': 'Requires-External',
'requires': 'Requires',
'provides': 'Provides',
'obsoletes': 'Obsoletes',
'project_url': 'Project-URL',
'private_version': 'Private-Version',
'obsoleted_by': 'Obsoleted-By',
'extension': 'Extension',
'provides_extra': 'Provides-Extra',
}
_PREDICATE_FIELDS = ('Requires-Dist', 'Obsoletes-Dist', 'Provides-Dist')
_VERSIONS_FIELDS = ('Requires-Python',)
_VERSION_FIELDS = ('Version',)
_LISTFIELDS = ('Platform', 'Classifier', 'Obsoletes',
'Requires', 'Provides', 'Obsoletes-Dist',
'Provides-Dist', 'Requires-Dist', 'Requires-External',
'Project-URL', 'Supported-Platform', 'Setup-Requires-Dist',
'Provides-Extra', 'Extension')
_LISTTUPLEFIELDS = ('Project-URL',)
_ELEMENTSFIELD = ('Keywords',)
_UNICODEFIELDS = ('Author', 'Maintainer', 'Summary', 'Description')
_MISSING = object()
_FILESAFE = re.compile('[^A-Za-z0-9.]+')
def _get_name_and_version(name, version, for_filename=False):
"""Return the distribution name with version.
If for_filename is true, return a filename-escaped form."""
if for_filename:
# For both name and version any runs of non-alphanumeric or '.'
# characters are replaced with a single '-'. Additionally any
# spaces in the version string become '.'
name = _FILESAFE.sub('-', name)
version = _FILESAFE.sub('-', version.replace(' ', '.'))
return '%s-%s' % (name, version)
class LegacyMetadata(object):
"""The legacy metadata of a release.
Supports versions 1.0, 1.1 and 1.2 (auto-detected). You can
instantiate the class with one of these arguments (or none):
- *path*, the path to a metadata file
- *fileobj* give a file-like object with metadata as content
- *mapping* is a dict-like object
- *scheme* is a version scheme name
"""
# TODO document the mapping API and UNKNOWN default key
def __init__(self, path=None, fileobj=None, mapping=None,
scheme='default'):
if [path, fileobj, mapping].count(None) < 2:
raise TypeError('path, fileobj and mapping are exclusive')
self._fields = {}
self.requires_files = []
self._dependencies = None
self.scheme = scheme
if path is not None:
self.read(path)
elif fileobj is not None:
self.read_file(fileobj)
elif mapping is not None:
self.update(mapping)
self.set_metadata_version()
def set_metadata_version(self):
self._fields['Metadata-Version'] = _best_version(self._fields)
def _write_field(self, fileobj, name, value):
fileobj.write('%s: %s\n' % (name, value))
def __getitem__(self, name):
return self.get(name)
def __setitem__(self, name, value):
return self.set(name, value)
def __delitem__(self, name):
field_name = self._convert_name(name)
try:
del self._fields[field_name]
except KeyError:
raise KeyError(name)
def __contains__(self, name):
return (name in self._fields or
self._convert_name(name) in self._fields)
def _convert_name(self, name):
if name in _ALL_FIELDS:
return name
name = name.replace('-', '_').lower()
return _ATTR2FIELD.get(name, name)
def _default_value(self, name):
if name in _LISTFIELDS or name in _ELEMENTSFIELD:
return []
return 'UNKNOWN'
def _remove_line_prefix(self, value):
return _LINE_PREFIX.sub('\n', value)
def __getattr__(self, name):
if name in _ATTR2FIELD:
return self[name]
raise AttributeError(name)
#
# Public API
#
# dependencies = property(_get_dependencies, _set_dependencies)
def get_fullname(self, filesafe=False):
"""Return the distribution name with version.
If filesafe is true, return a filename-escaped form."""
return _get_name_and_version(self['Name'], self['Version'], filesafe)
def is_field(self, name):
"""return True if name is a valid metadata key"""
name = self._convert_name(name)
return name in _ALL_FIELDS
def is_multi_field(self, name):
name = self._convert_name(name)
return name in _LISTFIELDS
def read(self, filepath):
"""Read the metadata values from a file path."""
fp = codecs.open(filepath, 'r', encoding='utf-8')
try:
self.read_file(fp)
finally:
fp.close()
def read_file(self, fileob):
"""Read the metadata values from a file object."""
msg = message_from_file(fileob)
self._fields['Metadata-Version'] = msg['metadata-version']
# When reading, get all the fields we can
for field in _ALL_FIELDS:
if field not in msg:
continue
if field in _LISTFIELDS:
# we can have multiple lines
values = msg.get_all(field)
if field in _LISTTUPLEFIELDS and values is not None:
values = [tuple(value.split(',')) for value in values]
self.set(field, values)
else:
# single line
value = msg[field]
if value is not None and value != 'UNKNOWN':
self.set(field, value)
self.set_metadata_version()
def write(self, filepath, skip_unknown=False):
"""Write the metadata fields to filepath."""
fp = codecs.open(filepath, 'w', encoding='utf-8')
try:
self.write_file(fp, skip_unknown)
finally:
fp.close()
def write_file(self, fileobject, skip_unknown=False):
"""Write the PKG-INFO format data to a file object."""
self.set_metadata_version()
for field in _version2fieldlist(self['Metadata-Version']):
values = self.get(field)
if skip_unknown and values in ('UNKNOWN', [], ['UNKNOWN']):
continue
if field in _ELEMENTSFIELD:
self._write_field(fileobject, field, ','.join(values))
continue
if field not in _LISTFIELDS:
if field == 'Description':
values = values.replace('\n', '\n |')
values = [values]
if field in _LISTTUPLEFIELDS:
values = [','.join(value) for value in values]
for value in values:
self._write_field(fileobject, field, value)
def update(self, other=None, **kwargs):
"""Set metadata values from the given iterable `other` and kwargs.
Behavior is like `dict.update`: If `other` has a ``keys`` method,
they are looped over and ``self[key]`` is assigned ``other[key]``.
Else, ``other`` is an iterable of ``(key, value)`` iterables.
Keys that don't match a metadata field or that have an empty value are
dropped.
"""
def _set(key, value):
if key in _ATTR2FIELD and value:
self.set(self._convert_name(key), value)
if not other:
# other is None or empty container
pass
elif hasattr(other, 'keys'):
for k in other.keys():
_set(k, other[k])
else:
for k, v in other:
_set(k, v)
if kwargs:
for k, v in kwargs.items():
_set(k, v)
def set(self, name, value):
"""Control then set a metadata field."""
name = self._convert_name(name)
if ((name in _ELEMENTSFIELD or name == 'Platform') and
not isinstance(value, (list, tuple))):
if isinstance(value, string_types):
value = [v.strip() for v in value.split(',')]
else:
value = []
elif (name in _LISTFIELDS and
not isinstance(value, (list, tuple))):
if isinstance(value, string_types):
value = [value]
else:
value = []
if logger.isEnabledFor(logging.WARNING):
project_name = self['Name']
scheme = get_scheme(self.scheme)
if name in _PREDICATE_FIELDS and value is not None:
for v in value:
# check that the values are valid
if not scheme.is_valid_matcher(v.split(';')[0]):
logger.warning(
'%r: %r is not valid (field %r)',
project_name, v, name)
# FIXME this rejects UNKNOWN, is that right?
elif name in _VERSIONS_FIELDS and value is not None:
if not scheme.is_valid_constraint_list(value):
logger.warning('%r: %r is not a valid version (field %r)',
project_name, value, name)
elif name in _VERSION_FIELDS and value is not None:
if not scheme.is_valid_version(value):
logger.warning('%r: %r is not a valid version (field %r)',
project_name, value, name)
if name in _UNICODEFIELDS:
if name == 'Description':
value = self._remove_line_prefix(value)
self._fields[name] = value
def get(self, name, default=_MISSING):
"""Get a metadata field."""
name = self._convert_name(name)
if name not in self._fields:
if default is _MISSING:
default = self._default_value(name)
return default
if name in _UNICODEFIELDS:
value = self._fields[name]
return value
elif name in _LISTFIELDS:
value = self._fields[name]
if value is None:
return []
res = []
for val in value:
if name not in _LISTTUPLEFIELDS:
res.append(val)
else:
# That's for Project-URL
res.append((val[0], val[1]))
return res
elif name in _ELEMENTSFIELD:
value = self._fields[name]
if isinstance(value, string_types):
return value.split(',')
return self._fields[name]
def check(self, strict=False):
"""Check if the metadata is compliant. If strict is True then raise if
no Name or Version are provided"""
self.set_metadata_version()
# XXX should check the versions (if the file was loaded)
missing, warnings = [], []
for attr in ('Name', 'Version'): # required by PEP 345
if attr not in self:
missing.append(attr)
if strict and missing != []:
msg = 'missing required metadata: %s' % ', '.join(missing)
raise MetadataMissingError(msg)
for attr in ('Home-page', 'Author'):
if attr not in self:
missing.append(attr)
# checking metadata 1.2 (XXX needs to check 1.1, 1.0)
if self['Metadata-Version'] != '1.2':
return missing, warnings
scheme = get_scheme(self.scheme)
def are_valid_constraints(value):
for v in value:
if not scheme.is_valid_matcher(v.split(';')[0]):
return False
return True
for fields, controller in ((_PREDICATE_FIELDS, are_valid_constraints),
(_VERSIONS_FIELDS,
scheme.is_valid_constraint_list),
(_VERSION_FIELDS,
scheme.is_valid_version)):
for field in fields:
value = self.get(field, None)
if value is not None and not controller(value):
warnings.append('Wrong value for %r: %s' % (field, value))
return missing, warnings
def todict(self, skip_missing=False):
"""Return fields as a dict.
Field names will be converted to use the underscore-lowercase style
instead of hyphen-mixed case (i.e. home_page instead of Home-page).
"""
self.set_metadata_version()
mapping_1_0 = (
('metadata_version', 'Metadata-Version'),
('name', 'Name'),
('version', 'Version'),
('summary', 'Summary'),
('home_page', 'Home-page'),
('author', 'Author'),
('author_email', 'Author-email'),
('license', 'License'),
('description', 'Description'),
('keywords', 'Keywords'),
('platform', 'Platform'),
('classifier', 'Classifier'),
('download_url', 'Download-URL'),
)
data = {}
for key, field_name in mapping_1_0:
if not skip_missing or field_name in self._fields:
data[key] = self[field_name]
if self['Metadata-Version'] == '1.2':
mapping_1_2 = (
('requires_dist', 'Requires-Dist'),
('requires_python', 'Requires-Python'),
('requires_external', 'Requires-External'),
('provides_dist', 'Provides-Dist'),
('obsoletes_dist', 'Obsoletes-Dist'),
('project_url', 'Project-URL'),
('maintainer', 'Maintainer'),
('maintainer_email', 'Maintainer-email'),
)
for key, field_name in mapping_1_2:
if not skip_missing or field_name in self._fields:
if key != 'project_url':
data[key] = self[field_name]
else:
data[key] = [','.join(u) for u in self[field_name]]
elif self['Metadata-Version'] == '1.1':
mapping_1_1 = (
('provides', 'Provides'),
('requires', 'Requires'),
('obsoletes', 'Obsoletes'),
)
for key, field_name in mapping_1_1:
if not skip_missing or field_name in self._fields:
data[key] = self[field_name]
return data
def add_requirements(self, requirements):
if self['Metadata-Version'] == '1.1':
# we can't have 1.1 metadata *and* Setuptools requires
for field in ('Obsoletes', 'Requires', 'Provides'):
if field in self:
del self[field]
self['Requires-Dist'] += requirements
# Mapping API
# TODO could add iter* variants
def keys(self):
return list(_version2fieldlist(self['Metadata-Version']))
def __iter__(self):
for key in self.keys():
yield key
def values(self):
return [self[key] for key in self.keys()]
def items(self):
return [(key, self[key]) for key in self.keys()]
def __repr__(self):
return '<%s %s %s>' % (self.__class__.__name__, self.name,
self.version)
METADATA_FILENAME = 'pydist.json'
class Metadata(object):
"""
The metadata of a release. This implementation uses 2.0 (JSON)
metadata where possible. If not possible, it wraps a LegacyMetadata
instance which handles the key-value metadata format.
"""
METADATA_VERSION_MATCHER = re.compile('^\d+(\.\d+)*$')
NAME_MATCHER = re.compile('^[0-9A-Z]([0-9A-Z_.-]*[0-9A-Z])?$', re.I)
VERSION_MATCHER = PEP440_VERSION_RE
SUMMARY_MATCHER = re.compile('.{1,2047}')
METADATA_VERSION = '2.0'
GENERATOR = 'distlib (%s)' % __version__
MANDATORY_KEYS = {
'name': (),
'version': (),
'summary': ('legacy',),
}
INDEX_KEYS = ('name version license summary description author '
'author_email keywords platform home_page classifiers '
'download_url')
DEPENDENCY_KEYS = ('extras run_requires test_requires build_requires '
'dev_requires provides meta_requires obsoleted_by '
'supports_environments')
SYNTAX_VALIDATORS = {
'metadata_version': (METADATA_VERSION_MATCHER, ()),
'name': (NAME_MATCHER, ('legacy',)),
'version': (VERSION_MATCHER, ('legacy',)),
'summary': (SUMMARY_MATCHER, ('legacy',)),
}
__slots__ = ('_legacy', '_data', 'scheme')
def __init__(self, path=None, fileobj=None, mapping=None,
scheme='default'):
if [path, fileobj, mapping].count(None) < 2:
raise TypeError('path, fileobj and mapping are exclusive')
self._legacy = None
self._data = None
self.scheme = scheme
#import pdb; pdb.set_trace()
if mapping is not None:
try:
self._validate_mapping(mapping, scheme)
self._data = mapping
except MetadataUnrecognizedVersionError:
self._legacy = LegacyMetadata(mapping=mapping, scheme=scheme)
self.validate()
else:
data = None
if path:
with open(path, 'rb') as f:
data = f.read()
elif fileobj:
data = fileobj.read()
if data is None:
# Initialised with no args - to be added
self._data = {
'metadata_version': self.METADATA_VERSION,
'generator': self.GENERATOR,
}
else:
if not isinstance(data, text_type):
data = data.decode('utf-8')
try:
self._data = json.loads(data)
self._validate_mapping(self._data, scheme)
except ValueError:
# Note: MetadataUnrecognizedVersionError does not
# inherit from ValueError (it's a DistlibException,
# which should not inherit from ValueError).
# The ValueError comes from the json.load - if that
# succeeds and we get a validation error, we want
# that to propagate
self._legacy = LegacyMetadata(fileobj=StringIO(data),
scheme=scheme)
self.validate()
common_keys = set(('name', 'version', 'license', 'keywords', 'summary'))
none_list = (None, list)
none_dict = (None, dict)
mapped_keys = {
'run_requires': ('Requires-Dist', list),
'build_requires': ('Setup-Requires-Dist', list),
'dev_requires': none_list,
'test_requires': none_list,
'meta_requires': none_list,
'extras': ('Provides-Extra', list),
'modules': none_list,
'namespaces': none_list,
'exports': none_dict,
'commands': none_dict,
'classifiers': ('Classifier', list),
'source_url': ('Download-URL', None),
'metadata_version': ('Metadata-Version', None),
}
del none_list, none_dict
def __getattribute__(self, key):
common = object.__getattribute__(self, 'common_keys')
mapped = object.__getattribute__(self, 'mapped_keys')
if key in mapped:
lk, maker = mapped[key]
if self._legacy:
if lk is None:
result = None if maker is None else maker()
else:
result = self._legacy.get(lk)
else:
value = None if maker is None else maker()
if key not in ('commands', 'exports', 'modules', 'namespaces',
'classifiers'):
result = self._data.get(key, value)
else:
# special cases for PEP 459
sentinel = object()
result = sentinel
d = self._data.get('extensions')
if d:
if key == 'commands':
result = d.get('python.commands', value)
elif key == 'classifiers':
d = d.get('python.details')
if d:
result = d.get(key, value)
else:
d = d.get('python.exports')
if d:
result = d.get(key, value)
if result is sentinel:
result = value
elif key not in common:
result = object.__getattribute__(self, key)
elif self._legacy:
result = self._legacy.get(key)
else:
result = self._data.get(key)
return result
def _validate_value(self, key, value, scheme=None):
if key in self.SYNTAX_VALIDATORS:
pattern, exclusions = self.SYNTAX_VALIDATORS[key]
if (scheme or self.scheme) not in exclusions:
m = pattern.match(value)
if not m:
raise MetadataInvalidError('%r is an invalid value for '
'the %r property' % (value,
key))
def __setattr__(self, key, value):
self._validate_value(key, value)
common = object.__getattribute__(self, 'common_keys')
mapped = object.__getattribute__(self, 'mapped_keys')
if key in mapped:
lk, _ = mapped[key]
if self._legacy:
if lk is None:
raise NotImplementedError
self._legacy[lk] = value
elif key not in ('commands', 'exports', 'modules', 'namespaces',
'classifiers'):
self._data[key] = value
else:
# special cases for PEP 459
d = self._data.setdefault('extensions', {})
if key == 'commands':
d['python.commands'] = value
elif key == 'classifiers':
d = d.setdefault('python.details', {})
d[key] = value
else:
d = d.setdefault('python.exports', {})
d[key] = value
elif key not in common:
object.__setattr__(self, key, value)
else:
if key == 'keywords':
if isinstance(value, string_types):
value = value.strip()
if value:
value = value.split()
else:
value = []
if self._legacy:
self._legacy[key] = value
else:
self._data[key] = value
@property
def name_and_version(self):
return _get_name_and_version(self.name, self.version, True)
@property
def provides(self):
if self._legacy:
result = self._legacy['Provides-Dist']
else:
result = self._data.setdefault('provides', [])
s = '%s (%s)' % (self.name, self.version)
if s not in result:
result.append(s)
return result
@provides.setter
def provides(self, value):
if self._legacy:
self._legacy['Provides-Dist'] = value
else:
self._data['provides'] = value
def get_requirements(self, reqts, extras=None, env=None):
"""
Base method to get dependencies, given a set of extras
to satisfy and an optional environment context.
:param reqts: A list of sometimes-wanted dependencies,
perhaps dependent on extras and environment.
:param extras: A list of optional components being requested.
:param env: An optional environment for marker evaluation.
"""
if self._legacy:
result = reqts
else:
result = []
extras = get_extras(extras or [], self.extras)
for d in reqts:
if 'extra' not in d and 'environment' not in d:
# unconditional
include = True
else:
if 'extra' not in d:
# Not extra-dependent - only environment-dependent
include = True
else:
include = d.get('extra') in extras
if include:
# Not excluded because of extras, check environment
marker = d.get('environment')
if marker:
include = interpret(marker, env)
if include:
result.extend(d['requires'])
for key in ('build', 'dev', 'test'):
e = ':%s:' % key
if e in extras:
extras.remove(e)
# A recursive call, but it should terminate since 'test'
# has been removed from the extras
reqts = self._data.get('%s_requires' % key, [])
result.extend(self.get_requirements(reqts, extras=extras,
env=env))
return result
@property
def dictionary(self):
if self._legacy:
return self._from_legacy()
return self._data
@property
def dependencies(self):
if self._legacy:
raise NotImplementedError
else:
return extract_by_key(self._data, self.DEPENDENCY_KEYS)
@dependencies.setter
def dependencies(self, value):
if self._legacy:
raise NotImplementedError
else:
self._data.update(value)
def _validate_mapping(self, mapping, scheme):
if mapping.get('metadata_version') != self.METADATA_VERSION:
raise MetadataUnrecognizedVersionError()
missing = []
for key, exclusions in self.MANDATORY_KEYS.items():
if key not in mapping:
if scheme not in exclusions:
missing.append(key)
if missing:
msg = 'Missing metadata items: %s' % ', '.join(missing)
raise MetadataMissingError(msg)
for k, v in mapping.items():
self._validate_value(k, v, scheme)
def validate(self):
if self._legacy:
missing, warnings = self._legacy.check(True)
if missing or warnings:
logger.warning('Metadata: missing: %s, warnings: %s',
missing, warnings)
else:
self._validate_mapping(self._data, self.scheme)
def todict(self):
if self._legacy:
return self._legacy.todict(True)
else:
result = extract_by_key(self._data, self.INDEX_KEYS)
return result
def _from_legacy(self):
assert self._legacy and not self._data
result = {
'metadata_version': self.METADATA_VERSION,
'generator': self.GENERATOR,
}
lmd = self._legacy.todict(True) # skip missing ones
for k in ('name', 'version', 'license', 'summary', 'description',
'classifier'):
if k in lmd:
if k == 'classifier':
nk = 'classifiers'
else:
nk = k
result[nk] = lmd[k]
kw = lmd.get('Keywords', [])
if kw == ['']:
kw = []
result['keywords'] = kw
keys = (('requires_dist', 'run_requires'),
('setup_requires_dist', 'build_requires'))
for ok, nk in keys:
if ok in lmd and lmd[ok]:
result[nk] = [{'requires': lmd[ok]}]
result['provides'] = self.provides
author = {}
maintainer = {}
return result
LEGACY_MAPPING = {
'name': 'Name',
'version': 'Version',
'license': 'License',
'summary': 'Summary',
'description': 'Description',
'classifiers': 'Classifier',
}
def _to_legacy(self):
def process_entries(entries):
reqts = set()
for e in entries:
extra = e.get('extra')
env = e.get('environment')
rlist = e['requires']
for r in rlist:
if not env and not extra:
reqts.add(r)
else:
marker = ''
if extra:
marker = 'extra == "%s"' % extra
if env:
if marker:
marker = '(%s) and %s' % (env, marker)
else:
marker = env
reqts.add(';'.join((r, marker)))
return reqts
assert self._data and not self._legacy
result = LegacyMetadata()
nmd = self._data
for nk, ok in self.LEGACY_MAPPING.items():
if nk in nmd:
result[ok] = nmd[nk]
r1 = process_entries(self.run_requires + self.meta_requires)
r2 = process_entries(self.build_requires + self.dev_requires)
if self.extras:
result['Provides-Extra'] = sorted(self.extras)
result['Requires-Dist'] = sorted(r1)
result['Setup-Requires-Dist'] = sorted(r2)
# TODO: other fields such as contacts
return result
def write(self, path=None, fileobj=None, legacy=False, skip_unknown=True):
if [path, fileobj].count(None) != 1:
raise ValueError('Exactly one of path and fileobj is needed')
self.validate()
if legacy:
if self._legacy:
legacy_md = self._legacy
else:
legacy_md = self._to_legacy()
if path:
legacy_md.write(path, skip_unknown=skip_unknown)
else:
legacy_md.write_file(fileobj, skip_unknown=skip_unknown)
else:
if self._legacy:
d = self._from_legacy()
else:
d = self._data
if fileobj:
json.dump(d, fileobj, ensure_ascii=True, indent=2,
sort_keys=True)
else:
with codecs.open(path, 'w', 'utf-8') as f:
json.dump(d, f, ensure_ascii=True, indent=2,
sort_keys=True)
def add_requirements(self, requirements):
if self._legacy:
self._legacy.add_requirements(requirements)
else:
run_requires = self._data.setdefault('run_requires', [])
always = None
for entry in run_requires:
if 'environment' not in entry and 'extra' not in entry:
always = entry
break
if always is None:
always = { 'requires': requirements }
run_requires.insert(0, always)
else:
rset = set(always['requires']) | set(requirements)
always['requires'] = sorted(rset)
def __repr__(self):
name = self.name or '(no name)'
version = self.version or 'no version'
return '<%s %s %s (%s)>' % (self.__class__.__name__,
self.metadata_version, name, version)
| gpl-2.0 |
BorisJeremic/Real-ESSI-Examples | education_examples/_Chapter_Material_Behaviour_Examples/Interface_Models/Axial_Models/SoftContact/ForceBasedSoftContact/plot.py | 1 | 1421 | #!/usr/bin/python
import h5py
import matplotlib.pylab as plt
import matplotlib as mpl
import sys
import numpy as np;
plt.rcParams.update({'font.size': 24})
# set tick width
mpl.rcParams['xtick.major.size'] = 10
mpl.rcParams['xtick.major.width'] = 5
mpl.rcParams['xtick.minor.size'] = 10
mpl.rcParams['xtick.minor.width'] = 5
plt.rcParams['xtick.labelsize']=20
mpl.rcParams['ytick.major.size'] = 10
mpl.rcParams['ytick.major.width'] = 5
mpl.rcParams['ytick.minor.size'] = 10
mpl.rcParams['ytick.minor.width'] = 5
plt.rcParams['ytick.labelsize']=20
# Go over each feioutput and plot each one.
thefile = "Monotonic_Contact_Behaviour_Adding_Normal_Load.h5.feioutput";
finput = h5py.File(thefile)
plt.style.use('grayscale')
# Read the time and displacement
times = finput["time"][:]
normal_disp = finput["/Model/Elements/Element_Outputs"][6,:]
normal_stress = -finput["/Model/Elements/Element_Outputs"][9,:];
# plt.figure()
fig = plt.figure(figsize=(10,10))
plt.plot(normal_disp*1e3,normal_stress/1000,Linewidth=4)
plt.xlabel(r"Penetration $\Delta_n$ $[mm]$")
plt.ylabel(r"Normal Stress $\sigma_n$ $[kPa]$")
plt.hold(True)
# axes = plt.gca()
# axes.set_xlim([-7,7])
# axes.set_ylim([-1,1])
outfigname = "Axial_Response.pdf";
# Make space for and rotate the x-axis tick labels
fig.autofmt_xdate()
plt.grid(linestyle='--', linewidth='0.5', color='k')
plt.savefig(outfigname, bbox_inches='tight')
# plt.show() | cc0-1.0 |
crossbridge-community/avmplus | gtest/gtest-1.6.0/test/gtest_catch_exceptions_test.py | 414 | 9312 | #!/usr/bin/env python
#
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests Google Test's exception catching behavior.
This script invokes gtest_catch_exceptions_test_ and
gtest_catch_exceptions_ex_test_ (programs written with
Google Test) and verifies their output.
"""
__author__ = '[email protected] (Vlad Losev)'
import os
import gtest_test_utils
# Constants.
FLAG_PREFIX = '--gtest_'
LIST_TESTS_FLAG = FLAG_PREFIX + 'list_tests'
NO_CATCH_EXCEPTIONS_FLAG = FLAG_PREFIX + 'catch_exceptions=0'
FILTER_FLAG = FLAG_PREFIX + 'filter'
# Path to the gtest_catch_exceptions_ex_test_ binary, compiled with
# exceptions enabled.
EX_EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_catch_exceptions_ex_test_')
# Path to the gtest_catch_exceptions_test_ binary, compiled with
# exceptions disabled.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_catch_exceptions_no_ex_test_')
TEST_LIST = gtest_test_utils.Subprocess([EXE_PATH, LIST_TESTS_FLAG]).output
SUPPORTS_SEH_EXCEPTIONS = 'ThrowsSehException' in TEST_LIST
if SUPPORTS_SEH_EXCEPTIONS:
BINARY_OUTPUT = gtest_test_utils.Subprocess([EXE_PATH]).output
EX_BINARY_OUTPUT = gtest_test_utils.Subprocess([EX_EXE_PATH]).output
# The tests.
if SUPPORTS_SEH_EXCEPTIONS:
# pylint:disable-msg=C6302
class CatchSehExceptionsTest(gtest_test_utils.TestCase):
"""Tests exception-catching behavior."""
def TestSehExceptions(self, test_output):
self.assert_('SEH exception with code 0x2a thrown '
'in the test fixture\'s constructor'
in test_output)
self.assert_('SEH exception with code 0x2a thrown '
'in the test fixture\'s destructor'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in SetUpTestCase()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in TearDownTestCase()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in SetUp()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in TearDown()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in the test body'
in test_output)
def testCatchesSehExceptionsWithCxxExceptionsEnabled(self):
self.TestSehExceptions(EX_BINARY_OUTPUT)
def testCatchesSehExceptionsWithCxxExceptionsDisabled(self):
self.TestSehExceptions(BINARY_OUTPUT)
class CatchCxxExceptionsTest(gtest_test_utils.TestCase):
"""Tests C++ exception-catching behavior.
Tests in this test case verify that:
* C++ exceptions are caught and logged as C++ (not SEH) exceptions
* Exception thrown affect the remainder of the test work flow in the
expected manner.
"""
def testCatchesCxxExceptionsInFixtureConstructor(self):
self.assert_('C++ exception with description '
'"Standard C++ exception" thrown '
'in the test fixture\'s constructor'
in EX_BINARY_OUTPUT)
self.assert_('unexpected' not in EX_BINARY_OUTPUT,
'This failure belongs in this test only if '
'"CxxExceptionInConstructorTest" (no quotes) '
'appears on the same line as words "called unexpectedly"')
def testCatchesCxxExceptionsInFixtureDestructor(self):
self.assert_('C++ exception with description '
'"Standard C++ exception" thrown '
'in the test fixture\'s destructor'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInDestructorTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInSetUpTestCase(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in SetUpTestCase()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInConstructorTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest constructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest::SetUp() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest test body '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInTearDownTestCase(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in TearDownTestCase()'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInSetUp(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in SetUp()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('unexpected' not in EX_BINARY_OUTPUT,
'This failure belongs in this test only if '
'"CxxExceptionInSetUpTest" (no quotes) '
'appears on the same line as words "called unexpectedly"')
def testCatchesCxxExceptionsInTearDown(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in TearDown()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTearDownTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTearDownTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInTestBody(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in the test body'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesNonStdCxxExceptions(self):
self.assert_('Unknown C++ exception thrown in the test body'
in EX_BINARY_OUTPUT)
def testUnhandledCxxExceptionsAbortTheProgram(self):
# Filters out SEH exception tests on Windows. Unhandled SEH exceptions
# cause tests to show pop-up windows there.
FITLER_OUT_SEH_TESTS_FLAG = FILTER_FLAG + '=-*Seh*'
# By default, Google Test doesn't catch the exceptions.
uncaught_exceptions_ex_binary_output = gtest_test_utils.Subprocess(
[EX_EXE_PATH,
NO_CATCH_EXCEPTIONS_FLAG,
FITLER_OUT_SEH_TESTS_FLAG]).output
self.assert_('Unhandled C++ exception terminating the program'
in uncaught_exceptions_ex_binary_output)
self.assert_('unexpected' not in uncaught_exceptions_ex_binary_output)
if __name__ == '__main__':
gtest_test_utils.Main()
| mpl-2.0 |
thnuclub/elasticsearch | dev-tools/upgrade-tests.py | 74 | 15347 | # Licensed to Elasticsearch under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on
# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import random
import os
import tempfile
import shutil
import subprocess
import time
import argparse
import logging
import sys
import re
from datetime import datetime
try:
from elasticsearch import Elasticsearch
from elasticsearch.exceptions import ConnectionError
from elasticsearch.exceptions import TransportError
except ImportError as e:
print('Can\'t import elasticsearch please install `sudo pip install elasticsearch`')
raise e
'''This file executes a basic upgrade test by running a full cluster restart.
The upgrade test starts 2 or more nodes of an old elasticserach version, indexes
a random number of documents into the running nodes and executes a full cluster restart.
After the nodes are recovered a small set of basic checks are executed to ensure all
documents are still searchable and field data can be loaded etc.
NOTE: This script requires the elasticsearch python client `elasticsearch-py` run the following command to install:
`sudo pip install elasticsearch`
if you are running python3 you need to install the client using pip3. On OSX `pip3` will be included in the Python 3.4
release available on `https://www.python.org/download/`:
`sudo pip3 install elasticsearch`
See `https://github.com/elasticsearch/elasticsearch-py` for details
In order to run this test two different version of elasticsearch are required. Both need to be unpacked into
the same directory:
```
$ cd /path/to/elasticsearch/clone
$ mkdir backwards && cd backwards
$ wget https://download.elasticsearch.org/elasticsearch/elasticsearch/elasticsearch-1.3.1.tar.gz
$ wget https://download.elasticsearch.org/elasticsearch/elasticsearch/elasticsearch-0.90.13.tar.gz
$ tar -zxvf elasticsearch-1.3.1.tar.gz && tar -zxvf elasticsearch-0.90.13.tar.gz
$ cd ..
$ python dev-tools/upgrade-tests.py --version.backwards 0.90.13 --version.current 1.3.1
```
'''
BLACK_LIST = {'1.2.0' : { 'reason': 'Contains a major bug where routing hashes are not consistent with previous version',
'issue': 'https://github.com/elasticsearch/elasticsearch/pull/6393'},
'1.3.0' : { 'reason': 'Lucene Related bug prevents upgrades from 0.90.7 and some earlier versions ',
'issue' : 'https://github.com/elasticsearch/elasticsearch/pull/7055'}}
# sometimes returns True
def rarely():
return random.randint(0, 10) == 0
# usually returns True
def frequently():
return not rarely()
# asserts the correctness of the given hits given they are sorted asc
def assert_sort(hits):
values = [hit['sort'] for hit in hits['hits']['hits']]
assert len(values) > 0, 'expected non emtpy result'
val = min(values)
for x in values:
assert x >= val, '%s >= %s' % (x, val)
val = x
# asserts that the cluster health didn't timeout etc.
def assert_health(cluster_health, num_shards, num_replicas):
assert cluster_health['timed_out'] == False, 'cluster health timed out %s' % cluster_health
# Starts a new elasticsearch node from a released & untared version.
# This node uses unicast discovery with the provided unicast host list and starts
# the nodes with the given data directory. This allows shutting down and starting up
# nodes on the same data dir simulating a full cluster restart.
def start_node(version, data_dir, node_dir, unicast_host_list, tcp_port, http_port):
es_run_path = os.path.join(node_dir, 'elasticsearch-%s' % (version), 'bin/elasticsearch')
if version.startswith('0.90.'):
foreground = '-f' # 0.90.x starts in background automatically
else:
foreground = ''
return subprocess.Popen([es_run_path,
'-Des.path.data=%s' % data_dir, '-Des.cluster.name=upgrade_test',
'-Des.discovery.zen.ping.unicast.hosts=%s' % unicast_host_list,
'-Des.discovery.zen.ping.multicast.enabled=false',
'-Des.transport.tcp.port=%s' % tcp_port,
'-Des.http.port=%s' % http_port,
foreground], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Indexes the given number of document into the given index
# and randomly runs refresh, optimize and flush commands
def index_documents(es, index_name, type, num_docs):
logging.info('Indexing %s docs' % num_docs)
for id in range(0, num_docs):
es.index(index=index_name, doc_type=type, id=id, body={'string': str(random.randint(0, 100)),
'long_sort': random.randint(0, 100),
'double_sort' : float(random.randint(0, 100))})
if rarely():
es.indices.refresh(index=index_name)
if rarely():
es.indices.flush(index=index_name, force=frequently())
if rarely():
es.indices.optimize(index=index_name)
es.indices.refresh(index=index_name)
# Runs a basic number of assertions including:
# - document counts
# - match all search with sort on double / long
# - Realtime GET operations
# TODO(simonw): we should add stuff like:
# - dates including sorting
# - string sorting
# - docvalues if available
# - global ordinal if available
def run_basic_asserts(es, index_name, type, num_docs):
count = es.count(index=index_name)['count']
assert count == num_docs, 'Expected %r but got %r documents' % (num_docs, count)
for _ in range(0, num_docs):
random_doc_id = random.randint(0, num_docs-1)
doc = es.get(index=index_name, doc_type=type, id=random_doc_id)
assert doc, 'Expected document for id %s but got %s' % (random_doc_id, doc)
assert_sort(es.search(index=index_name,
body={
'sort': [
{'double_sort': {'order': 'asc'}}
]
}))
assert_sort(es.search(index=index_name,
body={
'sort': [
{'long_sort': {'order': 'asc'}}
]
}))
# picks a random version or and entire random version tuple from the directory
# to run the backwards tests against.
def pick_random_upgrade_version(directory, lower_version=None, upper_version=None):
if lower_version and upper_version:
return lower_version, upper_version
assert os.path.isdir(directory), 'No such directory %s' % directory
versions = []
for version in map(lambda x : x[len('elasticsearch-'):], filter(lambda x : re.match(r'^elasticsearch-\d+[.]\d+[.]\d+$', x), os.listdir(directory))):
if not version in BLACK_LIST:
versions.append(build_tuple(version))
versions.sort()
if lower_version: # lower version is set - picking a higher one
versions = filter(lambda x : x > build_tuple(lower_version), versions)
assert len(versions) >= 1, 'Expected at least 1 higher version than %s version in %s ' % (lower_version, directory)
random.shuffle(versions)
return lower_version, build_version(versions[0])
if upper_version:
versions = filter(lambda x : x < build_tuple(upper_version), versions)
assert len(versions) >= 1, 'Expected at least 1 lower version than %s version in %s ' % (upper_version, directory)
random.shuffle(versions)
return build_version(versions[0]), upper_version
assert len(versions) >= 2, 'Expected at least 2 different version in %s but found %s' % (directory, len(versions))
random.shuffle(versions)
versions = versions[0:2]
versions.sort()
return build_version(versions[0]), build_version(versions[1])
def build_version(version_tuple):
return '.'.join([str(x) for x in version_tuple])
def build_tuple(version_string):
return [int(x) for x in version_string.split('.')]
# returns a new elasticsearch client and ensures the all nodes have joined the cluster
# this method waits at most 30 seconds for all nodes to join
def new_es_instance(num_nodes, http_port, timeout = 30):
logging.info('Waiting for %s nodes to join the cluster' % num_nodes)
for _ in range(0, timeout):
# TODO(simonw): ask Honza if there is a better way to do this?
try:
es = Elasticsearch([
{'host': '127.0.0.1', 'port': http_port + x}
for x in range(0, num_nodes)])
es.cluster.health(wait_for_nodes=num_nodes)
es.count() # can we actually search or do we get a 503? -- anyway retry
return es
except (ConnectionError, TransportError):
pass
time.sleep(1)
assert False, 'Timed out waiting for %s nodes for %s seconds' % (num_nodes, timeout)
def assert_versions(bwc_version, current_version, node_dir):
assert [int(x) for x in bwc_version.split('.')] < [int(x) for x in current_version.split('.')],\
'[%s] must be < than [%s]' % (bwc_version, current_version)
for version in [bwc_version, current_version]:
assert not version in BLACK_LIST, 'Version %s is blacklisted - %s, see %s' \
% (version, BLACK_LIST[version]['reason'],
BLACK_LIST[version]['issue'])
dir = os.path.join(node_dir, 'elasticsearch-%s' % current_version)
assert os.path.isdir(dir), 'Expected elasticsearch-%s install directory does not exists: %s' % (version, dir)
def full_cluster_restart(node_dir, current_version, bwc_version, tcp_port, http_port):
assert_versions(bwc_version, current_version, node_dir)
num_nodes = random.randint(2, 3)
nodes = []
data_dir = tempfile.mkdtemp()
logging.info('Running upgrade test from [%s] to [%s] seed: [%s] es.path.data: [%s] es.http.port [%s] es.tcp.port [%s]'
% (bwc_version, current_version, seed, data_dir, http_port, tcp_port))
try:
logging.info('Starting %s BWC nodes of version %s' % (num_nodes, bwc_version))
unicast_addresses = ','.join(['127.0.0.1:%s' % (tcp_port+x) for x in range(0, num_nodes)])
for id in range(0, num_nodes):
nodes.append(start_node(bwc_version, data_dir, node_dir, unicast_addresses, tcp_port+id, http_port+id))
es = new_es_instance(num_nodes, http_port)
es.indices.delete(index='test_index', ignore=404)
num_shards = random.randint(1, 10)
num_replicas = random.randint(0, 1)
logging.info('Create index with [%s] shards and [%s] replicas' % (num_shards, num_replicas))
es.indices.create(index='test_index', body={
# TODO(simonw): can we do more here in terms of randomization - seems hard due to all the different version
'settings': {
'number_of_shards': num_shards,
'number_of_replicas': num_replicas
}
})
logging.info('Nodes joined, waiting for green status')
health = es.cluster.health(wait_for_status='green', wait_for_relocating_shards=0)
assert_health(health, num_shards, num_replicas)
num_docs = random.randint(10, 100)
index_documents(es, 'test_index', 'test_type', num_docs)
logging.info('Run basic asserts before full cluster restart')
run_basic_asserts(es, 'test_index', 'test_type', num_docs)
logging.info('kill bwc nodes -- prepare upgrade')
for node in nodes:
node.terminate()
# now upgrade the nodes and rerun the checks
tcp_port = tcp_port + len(nodes) # bump up port to make sure we can claim them
http_port = http_port + len(nodes)
logging.info('Full Cluster restart starts upgrading to version [elasticsearch-%s] es.http.port [%s] es.tcp.port [%s]'
% (current_version, http_port, tcp_port))
nodes = []
unicast_addresses = ','.join(['127.0.0.1:%s' % (tcp_port+x) for x in range(0, num_nodes)])
for id in range(0, num_nodes+1): # one more to trigger relocation
nodes.append(start_node(current_version, data_dir, node_dir, unicast_addresses, tcp_port+id, http_port+id))
es = new_es_instance(num_nodes+1, http_port)
logging.info('Nodes joined, waiting for green status')
health = es.cluster.health(wait_for_status='green', wait_for_relocating_shards=0)
assert_health(health, num_shards, num_replicas)
run_basic_asserts(es, 'test_index', 'test_type', num_docs)
# by running the indexing again we try to catch possible mapping problems after the upgrade
index_documents(es, 'test_index', 'test_type', num_docs)
run_basic_asserts(es, 'test_index', 'test_type', num_docs)
logging.info("[SUCCESS] - all test passed upgrading from version [%s] to version [%s]" % (bwc_version, current_version))
finally:
for node in nodes:
node.terminate()
time.sleep(1) # wait a second until removing the data dirs to give the nodes a chance to shutdown
shutil.rmtree(data_dir) # remove the temp data dir
if __name__ == '__main__':
logging.basicConfig(format='[%(levelname)s] [%(asctime)s] %(message)s', level=logging.INFO,
datefmt='%Y-%m-%d %I:%M:%S %p')
logging.getLogger('elasticsearch').setLevel(logging.ERROR)
logging.getLogger('urllib3').setLevel(logging.WARN)
parser = argparse.ArgumentParser(description='Tests Full Cluster Restarts across major version')
parser.add_argument('--version.backwards', '-b', dest='backwards_version', metavar='V',
help='The elasticsearch version to upgrade from')
parser.add_argument('--version.current', '-c', dest='current_version', metavar='V',
help='The elasticsearch version to upgrade to')
parser.add_argument('--seed', '-s', dest='seed', metavar='N', type=int,
help='The random seed to use')
parser.add_argument('--backwards.dir', '-d', dest='bwc_directory', default='backwards', metavar='dir',
help='The directory to the backwards compatibility sources')
parser.add_argument('--tcp.port', '-p', dest='tcp_port', default=9300, metavar='port', type=int,
help='The port to use as the minimum port for TCP communication')
parser.add_argument('--http.port', '-t', dest='http_port', default=9200, metavar='port', type=int,
help='The port to use as the minimum port for HTTP communication')
parser.set_defaults(bwc_directory='backwards')
parser.set_defaults(seed=int(time.time()))
args = parser.parse_args()
node_dir = args.bwc_directory
current_version = args.current_version
bwc_version = args.backwards_version
seed = args.seed
random.seed(seed)
bwc_version, current_version = pick_random_upgrade_version(node_dir, bwc_version, current_version)
tcp_port = args.tcp_port
http_port = args.http_port
try:
full_cluster_restart(node_dir, current_version, bwc_version, tcp_port, http_port)
except:
logging.warn('REPRODUCE WITH: \n\t`python %s --version.backwards %s --version.current %s --seed %s --tcp.port %s --http.port %s`'
% (sys.argv[0], bwc_version, current_version, seed, tcp_port, http_port))
raise
| apache-2.0 |
mandeepdhami/neutron | neutron/tests/unit/db/test_l3_hamode_db.py | 4 | 23255 | # Copyright (C) 2014 eNovance SAS <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import mock
from oslo_config import cfg
from oslo_utils import timeutils
from oslo_utils import uuidutils
from neutron.api.v2 import attributes
from neutron.common import constants
from neutron import context
from neutron.db import agents_db
from neutron.db import common_db_mixin
from neutron.db import l3_agentschedulers_db
from neutron.db import l3_hamode_db
from neutron.extensions import l3
from neutron.extensions import l3_ext_ha_mode
from neutron import manager
from neutron.scheduler import l3_agent_scheduler
from neutron.tests.common import helpers
from neutron.tests.unit import testlib_api
_uuid = uuidutils.generate_uuid
class FakeL3PluginWithAgents(common_db_mixin.CommonDbMixin,
l3_hamode_db.L3_HA_NAT_db_mixin,
l3_agentschedulers_db.L3AgentSchedulerDbMixin,
agents_db.AgentDbMixin):
pass
class L3HATestFramework(testlib_api.SqlTestCase):
def setUp(self):
super(L3HATestFramework, self).setUp()
self.admin_ctx = context.get_admin_context()
self.setup_coreplugin('neutron.plugins.ml2.plugin.Ml2Plugin')
self.core_plugin = manager.NeutronManager.get_plugin()
notif_p = mock.patch.object(l3_hamode_db.L3_HA_NAT_db_mixin,
'_notify_ha_interfaces_updated')
self.notif_m = notif_p.start()
cfg.CONF.set_override('allow_overlapping_ips', True)
self.plugin = FakeL3PluginWithAgents()
self.agent1 = helpers.register_l3_agent()
self.agent2 = helpers.register_l3_agent(
'host_2', constants.L3_AGENT_MODE_DVR_SNAT)
def _bring_down_agent(self, agent_id):
update = {
'agent': {
'heartbeat_timestamp':
timeutils.utcnow() - datetime.timedelta(hours=1)}}
self.plugin.update_agent(self.admin_ctx, agent_id, update)
def _create_router(self, ha=True, tenant_id='tenant1', distributed=None,
ctx=None):
if ctx is None:
ctx = self.admin_ctx
ctx.tenant_id = tenant_id
router = {'name': 'router1', 'admin_state_up': True}
if ha is not None:
router['ha'] = ha
if distributed is not None:
router['distributed'] = distributed
return self.plugin.create_router(ctx, {'router': router})
def _update_router(self, router_id, ha=True, distributed=None, ctx=None):
if ctx is None:
ctx = self.admin_ctx
data = {'ha': ha} if ha is not None else {}
if distributed is not None:
data['distributed'] = distributed
return self.plugin._update_router_db(ctx, router_id,
data, None)
def _bind_router(self, router_id):
with self.admin_ctx.session.begin(subtransactions=True):
scheduler = l3_agent_scheduler.ChanceScheduler()
agents_db = self.plugin.get_agents_db(self.admin_ctx)
scheduler._bind_ha_router_to_agents(
self.plugin,
self.admin_ctx,
router_id,
agents_db)
class L3HATestCase(L3HATestFramework):
def test_verify_configuration_succeed(self):
# Default configuration should pass
self.plugin._verify_configuration()
def test_verify_configuration_l3_ha_net_cidr_is_not_a_cidr(self):
cfg.CONF.set_override('l3_ha_net_cidr', 'not a cidr')
self.assertRaises(
l3_ext_ha_mode.HANetworkCIDRNotValid,
self.plugin._verify_configuration)
def test_verify_configuration_l3_ha_net_cidr_is_not_a_subnet(self):
cfg.CONF.set_override('l3_ha_net_cidr', '10.0.0.1/8')
self.assertRaises(
l3_ext_ha_mode.HANetworkCIDRNotValid,
self.plugin._verify_configuration)
def test_verify_configuration_min_l3_agents_per_router_below_minimum(self):
cfg.CONF.set_override('min_l3_agents_per_router', 0)
self.assertRaises(
l3_ext_ha_mode.HAMinimumAgentsNumberNotValid,
self.plugin._check_num_agents_per_router)
def test_verify_configuration_max_l3_agents_below_min_l3_agents(self):
cfg.CONF.set_override('max_l3_agents_per_router', 3)
cfg.CONF.set_override('min_l3_agents_per_router', 4)
self.assertRaises(
l3_ext_ha_mode.HAMaximumAgentsNumberNotValid,
self.plugin._check_num_agents_per_router)
def test_verify_configuration_max_l3_agents_unlimited(self):
cfg.CONF.set_override('max_l3_agents_per_router',
l3_hamode_db.UNLIMITED_AGENTS_PER_ROUTER)
self.plugin._check_num_agents_per_router()
def test_get_ha_router_port_bindings(self):
router = self._create_router()
self._bind_router(router['id'])
bindings = self.plugin.get_ha_router_port_bindings(
self.admin_ctx, [router['id']])
binding_dicts = [{'router_id': binding['router_id'],
'l3_agent_id': binding['l3_agent_id']}
for binding in bindings]
self.assertIn({'router_id': router['id'],
'l3_agent_id': self.agent1['id']}, binding_dicts)
self.assertIn({'router_id': router['id'],
'l3_agent_id': self.agent2['id']}, binding_dicts)
def test_get_l3_bindings_hosting_router_with_ha_states_ha_router(self):
router = self._create_router()
self._bind_router(router['id'])
self.plugin.update_routers_states(
self.admin_ctx, {router['id']: 'active'}, self.agent1['host'])
bindings = self.plugin.get_l3_bindings_hosting_router_with_ha_states(
self.admin_ctx, router['id'])
agent_ids = [(agent[0]['id'], agent[1]) for agent in bindings]
self.assertIn((self.agent1['id'], 'active'), agent_ids)
self.assertIn((self.agent2['id'], 'standby'), agent_ids)
def test_get_l3_bindings_hosting_router_with_ha_states_not_scheduled(self):
router = self._create_router(ha=False)
bindings = self.plugin.get_l3_bindings_hosting_router_with_ha_states(
self.admin_ctx, router['id'])
self.assertEqual([], bindings)
def test_ha_router_create(self):
router = self._create_router()
self.assertTrue(router['ha'])
def test_ha_router_create_with_distributed(self):
self.assertRaises(l3_ext_ha_mode.DistributedHARouterNotSupported,
self._create_router,
distributed=True)
def test_no_ha_router_create(self):
router = self._create_router(ha=False)
self.assertFalse(router['ha'])
def test_router_create_with_ha_conf_enabled(self):
cfg.CONF.set_override('l3_ha', True)
router = self._create_router(ha=None)
self.assertTrue(router['ha'])
def test_migration_from_ha(self):
router = self._create_router()
self.assertTrue(router['ha'])
router = self._update_router(router['id'], ha=False)
self.assertFalse(router.extra_attributes['ha'])
self.assertIsNone(router.extra_attributes['ha_vr_id'])
def test_migration_to_ha(self):
router = self._create_router(ha=False)
self.assertFalse(router['ha'])
router = self._update_router(router['id'], ha=True)
self.assertTrue(router.extra_attributes['ha'])
self.assertIsNotNone(router.extra_attributes['ha_vr_id'])
def test_migrate_ha_router_to_distributed(self):
router = self._create_router()
self.assertTrue(router['ha'])
self.assertRaises(l3_ext_ha_mode.DistributedHARouterNotSupported,
self._update_router,
router['id'],
distributed=True)
def test_l3_agent_routers_query_interface(self):
router = self._create_router()
self._bind_router(router['id'])
routers = self.plugin.get_ha_sync_data_for_host(self.admin_ctx,
self.agent1['host'])
self.assertEqual(1, len(routers))
router = routers[0]
self.assertIsNotNone(router.get('ha'))
interface = router.get(constants.HA_INTERFACE_KEY)
self.assertIsNotNone(interface)
self.assertEqual(constants.DEVICE_OWNER_ROUTER_HA_INTF,
interface['device_owner'])
subnets = interface['subnets']
self.assertEqual(1, len(subnets))
self.assertEqual(cfg.CONF.l3_ha_net_cidr, subnets[0]['cidr'])
def test_unique_ha_network_per_tenant(self):
tenant1 = _uuid()
tenant2 = _uuid()
self._create_router(tenant_id=tenant1)
self._create_router(tenant_id=tenant2)
ha_network1 = self.plugin.get_ha_network(self.admin_ctx, tenant1)
ha_network2 = self.plugin.get_ha_network(self.admin_ctx, tenant2)
self.assertNotEqual(
ha_network1['network_id'], ha_network2['network_id'])
def _deployed_router_change_ha_flag(self, to_ha):
self._create_router(ha=not to_ha)
routers = self.plugin.get_ha_sync_data_for_host(self.admin_ctx)
router = routers[0]
interface = router.get(constants.HA_INTERFACE_KEY)
if to_ha:
self.assertIsNone(interface)
else:
self.assertIsNotNone(interface)
self._update_router(router['id'], to_ha)
routers = self.plugin.get_ha_sync_data_for_host(self.admin_ctx)
router = routers[0]
interface = router.get(constants.HA_INTERFACE_KEY)
if to_ha:
self.assertIsNotNone(interface)
else:
self.assertIsNone(interface)
def test_deployed_router_can_have_ha_enabled(self):
self._deployed_router_change_ha_flag(to_ha=True)
def test_deployed_router_can_have_ha_disabled(self):
self._deployed_router_change_ha_flag(to_ha=False)
def test_create_ha_router_notifies_agent(self):
self._create_router()
self.assertTrue(self.notif_m.called)
def test_update_router_to_ha_notifies_agent(self):
router = self._create_router(ha=False)
self.notif_m.reset_mock()
self._update_router(router['id'], ha=True)
self.assertTrue(self.notif_m.called)
def test_unique_vr_id_between_routers(self):
self._create_router()
self._create_router()
routers = self.plugin.get_ha_sync_data_for_host(self.admin_ctx)
self.assertEqual(2, len(routers))
self.assertNotEqual(routers[0]['ha_vr_id'], routers[1]['ha_vr_id'])
@mock.patch('neutron.db.l3_hamode_db.VR_ID_RANGE', new=set(range(1, 1)))
def test_vr_id_depleted(self):
self.assertRaises(l3_ext_ha_mode.NoVRIDAvailable, self._create_router)
@mock.patch('neutron.db.l3_hamode_db.VR_ID_RANGE', new=set(range(1, 2)))
def test_vr_id_unique_range_per_tenant(self):
self._create_router()
self._create_router(tenant_id=_uuid())
routers = self.plugin.get_ha_sync_data_for_host(self.admin_ctx)
self.assertEqual(2, len(routers))
self.assertEqual(routers[0]['ha_vr_id'], routers[1]['ha_vr_id'])
@mock.patch('neutron.db.l3_hamode_db.MAX_ALLOCATION_TRIES', new=2)
def test_vr_id_allocation_contraint_conflict(self):
router = self._create_router()
network = self.plugin.get_ha_network(self.admin_ctx,
router['tenant_id'])
with mock.patch.object(self.plugin, '_get_allocated_vr_id',
return_value=set()) as alloc:
self.assertRaises(l3_ext_ha_mode.MaxVRIDAllocationTriesReached,
self.plugin._allocate_vr_id, self.admin_ctx,
network.network_id, router['id'])
self.assertEqual(2, len(alloc.mock_calls))
def test_vr_id_allocation_delete_router(self):
router = self._create_router()
network = self.plugin.get_ha_network(self.admin_ctx,
router['tenant_id'])
allocs_before = self.plugin._get_allocated_vr_id(self.admin_ctx,
network.network_id)
router = self._create_router()
allocs_current = self.plugin._get_allocated_vr_id(self.admin_ctx,
network.network_id)
self.assertNotEqual(allocs_before, allocs_current)
self.plugin.delete_router(self.admin_ctx, router['id'])
allocs_after = self.plugin._get_allocated_vr_id(self.admin_ctx,
network.network_id)
self.assertEqual(allocs_before, allocs_after)
def test_vr_id_allocation_router_migration(self):
router = self._create_router()
network = self.plugin.get_ha_network(self.admin_ctx,
router['tenant_id'])
allocs_before = self.plugin._get_allocated_vr_id(self.admin_ctx,
network.network_id)
router = self._create_router()
self._update_router(router['id'], ha=False)
allocs_after = self.plugin._get_allocated_vr_id(self.admin_ctx,
network.network_id)
self.assertEqual(allocs_before, allocs_after)
def test_one_ha_router_one_not(self):
self._create_router(ha=False)
self._create_router()
routers = self.plugin.get_ha_sync_data_for_host(self.admin_ctx)
ha0 = routers[0]['ha']
ha1 = routers[1]['ha']
self.assertNotEqual(ha0, ha1)
def test_add_ha_port_binding_failure_rolls_back_port(self):
router = self._create_router()
device_filter = {'device_id': [router['id']]}
ports_before = self.core_plugin.get_ports(
self.admin_ctx, filters=device_filter)
network = self.plugin.get_ha_network(self.admin_ctx,
router['tenant_id'])
with mock.patch.object(self.plugin, '_create_ha_port_binding',
side_effect=ValueError):
self.assertRaises(ValueError, self.plugin.add_ha_port,
self.admin_ctx, router['id'], network.network_id,
router['tenant_id'])
ports_after = self.core_plugin.get_ports(
self.admin_ctx, filters=device_filter)
self.assertEqual(ports_before, ports_after)
def test_create_ha_network_binding_failure_rolls_back_network(self):
networks_before = self.core_plugin.get_networks(self.admin_ctx)
with mock.patch.object(self.plugin,
'_create_ha_network_tenant_binding',
side_effect=ValueError):
self.assertRaises(ValueError, self.plugin._create_ha_network,
self.admin_ctx, _uuid())
networks_after = self.core_plugin.get_networks(self.admin_ctx)
self.assertEqual(networks_before, networks_after)
def test_create_ha_network_subnet_failure_rolls_back_network(self):
networks_before = self.core_plugin.get_networks(self.admin_ctx)
with mock.patch.object(self.plugin, '_create_ha_subnet',
side_effect=ValueError):
self.assertRaises(ValueError, self.plugin._create_ha_network,
self.admin_ctx, _uuid())
networks_after = self.core_plugin.get_networks(self.admin_ctx)
self.assertEqual(networks_before, networks_after)
def test_create_ha_interfaces_binding_failure_rolls_back_ports(self):
router = self._create_router()
network = self.plugin.get_ha_network(self.admin_ctx,
router['tenant_id'])
device_filter = {'device_id': [router['id']]}
ports_before = self.core_plugin.get_ports(
self.admin_ctx, filters=device_filter)
router_db = self.plugin._get_router(self.admin_ctx, router['id'])
with mock.patch.object(self.plugin, '_create_ha_port_binding',
side_effect=ValueError):
self.assertRaises(ValueError, self.plugin._create_ha_interfaces,
self.admin_ctx, router_db, network)
ports_after = self.core_plugin.get_ports(
self.admin_ctx, filters=device_filter)
self.assertEqual(ports_before, ports_after)
def test_create_router_db_ha_attribute_failure_rolls_back_router(self):
routers_before = self.plugin.get_routers(self.admin_ctx)
for method in ('_set_vr_id',
'_create_ha_interfaces',
'_notify_ha_interfaces_updated'):
with mock.patch.object(self.plugin, method,
side_effect=ValueError):
self.assertRaises(ValueError, self._create_router)
routers_after = self.plugin.get_routers(self.admin_ctx)
self.assertEqual(routers_before, routers_after)
def test_update_routers_states(self):
router1 = self._create_router()
self._bind_router(router1['id'])
router2 = self._create_router()
self._bind_router(router2['id'])
routers = self.plugin.get_ha_sync_data_for_host(self.admin_ctx,
self.agent1['host'])
for router in routers:
self.assertEqual('standby', router[constants.HA_ROUTER_STATE_KEY])
states = {router1['id']: 'active',
router2['id']: 'standby'}
self.plugin.update_routers_states(
self.admin_ctx, states, self.agent1['host'])
routers = self.plugin.get_ha_sync_data_for_host(self.admin_ctx,
self.agent1['host'])
for router in routers:
self.assertEqual(states[router['id']],
router[constants.HA_ROUTER_STATE_KEY])
def test_set_router_states_handles_concurrently_deleted_router(self):
router1 = self._create_router()
self._bind_router(router1['id'])
router2 = self._create_router()
self._bind_router(router2['id'])
bindings = self.plugin.get_ha_router_port_bindings(
self.admin_ctx, [router1['id'], router2['id']])
self.plugin.delete_router(self.admin_ctx, router1['id'])
self.plugin._set_router_states(
self.admin_ctx, bindings, {router1['id']: 'active',
router2['id']: 'active'})
routers = self.plugin.get_ha_sync_data_for_host(self.admin_ctx,
self.agent1['host'])
self.assertEqual('active', routers[0][constants.HA_ROUTER_STATE_KEY])
def test_exclude_dvr_agents_for_ha_candidates(self):
"""Test dvr agents are not counted in the ha candidates.
This test case tests that when get_number_of_agents_for_scheduling
is called, it doesn't count dvr agents.
"""
# Test setup registers two l3 agents.
# Register another l3 agent with dvr mode and assert that
# get_number_of_ha_agent_candidates return 2.
helpers.register_l3_agent('host_3', constants.L3_AGENT_MODE_DVR)
num_ha_candidates = self.plugin.get_number_of_agents_for_scheduling(
self.admin_ctx)
self.assertEqual(2, num_ha_candidates)
def test_get_number_of_agents_for_scheduling_not_enough_agents(self):
cfg.CONF.set_override('min_l3_agents_per_router', 3)
agent_to_bring_down = helpers.register_l3_agent(host='l3host_3')
self._bring_down_agent(agent_to_bring_down['id'])
self.assertRaises(l3_ext_ha_mode.HANotEnoughAvailableAgents,
self.plugin.get_number_of_agents_for_scheduling,
self.admin_ctx)
class L3HAModeDbTestCase(L3HATestFramework):
def _create_network(self, plugin, ctx, name='net',
tenant_id='tenant1'):
network = {'network': {'name': name,
'shared': False,
'admin_state_up': True,
'tenant_id': tenant_id}}
return plugin.create_network(ctx, network)['id']
def _create_subnet(self, plugin, ctx, network_id, cidr='10.0.0.0/8',
name='subnet', tenant_id='tenant1'):
subnet = {'subnet': {'name': name,
'ip_version': 4,
'network_id': network_id,
'cidr': cidr,
'gateway_ip': attributes.ATTR_NOT_SPECIFIED,
'allocation_pools': attributes.ATTR_NOT_SPECIFIED,
'dns_nameservers': attributes.ATTR_NOT_SPECIFIED,
'host_routes': attributes.ATTR_NOT_SPECIFIED,
'tenant_id': tenant_id,
'enable_dhcp': True,
'ipv6_ra_mode': attributes.ATTR_NOT_SPECIFIED}}
created_subnet = plugin.create_subnet(ctx, subnet)
return created_subnet
def test_remove_ha_in_use(self):
router = self._create_router(ctx=self.admin_ctx)
network_id = self._create_network(self.core_plugin, self.admin_ctx)
subnet = self._create_subnet(self.core_plugin, self.admin_ctx,
network_id)
interface_info = {'subnet_id': subnet['id']}
self.plugin.add_router_interface(self.admin_ctx,
router['id'],
interface_info)
self.assertRaises(l3.RouterInUse, self.plugin.delete_router,
self.admin_ctx, router['id'])
bindings = self.plugin.get_ha_router_port_bindings(
self.admin_ctx, [router['id']])
self.assertEqual(2, len(bindings))
class L3HAUserTestCase(L3HATestFramework):
def setUp(self):
super(L3HAUserTestCase, self).setUp()
self.user_ctx = context.Context('', _uuid())
def test_create_ha_router(self):
self._create_router(ctx=self.user_ctx)
def test_update_router(self):
router = self._create_router(ctx=self.user_ctx)
self._update_router(router['id'], ha=False, ctx=self.user_ctx)
def test_delete_router(self):
router = self._create_router(ctx=self.user_ctx)
self.plugin.delete_router(self.user_ctx, router['id'])
| apache-2.0 |
alxgu/ansible | lib/ansible/modules/web_infrastructure/jenkins_job.py | 39 | 11412 | #!/usr/bin/python
#
# Copyright: (c) Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: jenkins_job
short_description: Manage jenkins jobs
description:
- Manage Jenkins jobs by using Jenkins REST API.
requirements:
- "python-jenkins >= 0.4.12"
- "lxml >= 3.3.3"
version_added: "2.2"
author: "Sergio Millan Rodriguez (@sermilrod)"
options:
config:
description:
- config in XML format.
- Required if job does not yet exist.
- Mutually exclusive with C(enabled).
- Considered if C(state=present).
required: false
enabled:
description:
- Whether the job should be enabled or disabled.
- Mutually exclusive with C(config).
- Considered if C(state=present).
type: bool
required: false
name:
description:
- Name of the Jenkins job.
required: true
password:
description:
- Password to authenticate with the Jenkins server.
required: false
state:
description:
- Attribute that specifies if the job has to be created or deleted.
required: false
default: present
choices: ['present', 'absent']
token:
description:
- API token used to authenticate alternatively to password.
required: false
url:
description:
- URL where the Jenkins server is accessible.
required: false
default: http://localhost:8080
user:
description:
- User to authenticate with the Jenkins server.
required: false
'''
EXAMPLES = '''
# Create a jenkins job using basic authentication
- jenkins_job:
config: "{{ lookup('file', 'templates/test.xml') }}"
name: test
password: admin
url: http://localhost:8080
user: admin
# Create a jenkins job using the token
- jenkins_job:
config: "{{ lookup('template', 'templates/test.xml.j2') }}"
name: test
token: asdfasfasfasdfasdfadfasfasdfasdfc
url: http://localhost:8080
user: admin
# Delete a jenkins job using basic authentication
- jenkins_job:
name: test
password: admin
state: absent
url: http://localhost:8080
user: admin
# Delete a jenkins job using the token
- jenkins_job:
name: test
token: asdfasfasfasdfasdfadfasfasdfasdfc
state: absent
url: http://localhost:8080
user: admin
# Disable a jenkins job using basic authentication
- jenkins_job:
name: test
password: admin
enabled: False
url: http://localhost:8080
user: admin
# Disable a jenkins job using the token
- jenkins_job:
name: test
token: asdfasfasfasdfasdfadfasfasdfasdfc
enabled: False
url: http://localhost:8080
user: admin
'''
RETURN = '''
---
name:
description: Name of the jenkins job.
returned: success
type: str
sample: test-job
state:
description: State of the jenkins job.
returned: success
type: str
sample: present
enabled:
description: Whether the jenkins job is enabled or not.
returned: success
type: bool
sample: true
user:
description: User used for authentication.
returned: success
type: str
sample: admin
url:
description: Url to connect to the Jenkins server.
returned: success
type: str
sample: https://jenkins.mydomain.com
'''
import traceback
JENKINS_IMP_ERR = None
try:
import jenkins
python_jenkins_installed = True
except ImportError:
JENKINS_IMP_ERR = traceback.format_exc()
python_jenkins_installed = False
LXML_IMP_ERR = None
try:
from lxml import etree as ET
python_lxml_installed = True
except ImportError:
LXML_IMP_ERR = traceback.format_exc()
python_lxml_installed = False
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils._text import to_native
class JenkinsJob:
def __init__(self, module):
self.module = module
self.config = module.params.get('config')
self.name = module.params.get('name')
self.password = module.params.get('password')
self.state = module.params.get('state')
self.enabled = module.params.get('enabled')
self.token = module.params.get('token')
self.user = module.params.get('user')
self.jenkins_url = module.params.get('url')
self.server = self.get_jenkins_connection()
self.result = {
'changed': False,
'url': self.jenkins_url,
'name': self.name,
'user': self.user,
'state': self.state,
'diff': {
'before': "",
'after': ""
}
}
self.EXCL_STATE = "excluded state"
def get_jenkins_connection(self):
try:
if (self.user and self.password):
return jenkins.Jenkins(self.jenkins_url, self.user, self.password)
elif (self.user and self.token):
return jenkins.Jenkins(self.jenkins_url, self.user, self.token)
elif (self.user and not (self.password or self.token)):
return jenkins.Jenkins(self.jenkins_url, self.user)
else:
return jenkins.Jenkins(self.jenkins_url)
except Exception as e:
self.module.fail_json(msg='Unable to connect to Jenkins server, %s' % to_native(e), exception=traceback.format_exc())
def get_job_status(self):
try:
response = self.server.get_job_info(self.name)
if "color" not in response:
return self.EXCL_STATE
else:
return response['color'].encode('utf-8')
except Exception as e:
self.module.fail_json(msg='Unable to fetch job information, %s' % to_native(e), exception=traceback.format_exc())
def job_exists(self):
try:
return bool(self.server.job_exists(self.name))
except Exception as e:
self.module.fail_json(msg='Unable to validate if job exists, %s for %s' % (to_native(e), self.jenkins_url),
exception=traceback.format_exc())
def get_config(self):
return job_config_to_string(self.config)
def get_current_config(self):
return job_config_to_string(self.server.get_job_config(self.name).encode('utf-8'))
def has_config_changed(self):
# config is optional, if not provided we keep the current config as is
if self.config is None:
return False
config_file = self.get_config()
machine_file = self.get_current_config()
self.result['diff']['after'] = config_file
self.result['diff']['before'] = machine_file
if machine_file != config_file:
return True
return False
def present_job(self):
if self.config is None and self.enabled is None:
self.module.fail_json(msg='one of the following params is required on state=present: config,enabled')
if not self.job_exists():
self.create_job()
else:
self.update_job()
def has_state_changed(self, status):
# Keep in current state if enabled arg_spec is not given
if self.enabled is None:
return False
if ((self.enabled is False and status != "disabled") or (self.enabled is True and status == "disabled")):
return True
return False
def switch_state(self):
if self.enabled is False:
self.server.disable_job(self.name)
else:
self.server.enable_job(self.name)
def update_job(self):
try:
status = self.get_job_status()
# Handle job config
if self.has_config_changed():
self.result['changed'] = True
if not self.module.check_mode:
self.server.reconfig_job(self.name, self.get_config())
# Handle job disable/enable
elif (status != self.EXCL_STATE and self.has_state_changed(status)):
self.result['changed'] = True
if not self.module.check_mode:
self.switch_state()
except Exception as e:
self.module.fail_json(msg='Unable to reconfigure job, %s for %s' % (to_native(e), self.jenkins_url),
exception=traceback.format_exc())
def create_job(self):
if self.config is None:
self.module.fail_json(msg='missing required param: config')
self.result['changed'] = True
try:
config_file = self.get_config()
self.result['diff']['after'] = config_file
if not self.module.check_mode:
self.server.create_job(self.name, config_file)
except Exception as e:
self.module.fail_json(msg='Unable to create job, %s for %s' % (to_native(e), self.jenkins_url),
exception=traceback.format_exc())
def absent_job(self):
if self.job_exists():
self.result['changed'] = True
self.result['diff']['before'] = self.get_current_config()
if not self.module.check_mode:
try:
self.server.delete_job(self.name)
except Exception as e:
self.module.fail_json(msg='Unable to delete job, %s for %s' % (to_native(e), self.jenkins_url),
exception=traceback.format_exc())
def get_result(self):
result = self.result
if self.job_exists():
result['enabled'] = self.get_job_status() != "disabled"
else:
result['enabled'] = None
return result
def test_dependencies(module):
if not python_jenkins_installed:
module.fail_json(
msg=missing_required_lib("python-jenkins",
url="https://python-jenkins.readthedocs.io/en/latest/install.html"),
exception=JENKINS_IMP_ERR)
if not python_lxml_installed:
module.fail_json(
msg=missing_required_lib("lxml", url="https://lxml.de/installation.html"),
exception=LXML_IMP_ERR)
def job_config_to_string(xml_str):
return ET.tostring(ET.fromstring(xml_str))
def main():
module = AnsibleModule(
argument_spec=dict(
config=dict(required=False),
name=dict(required=True),
password=dict(required=False, no_log=True),
state=dict(required=False, choices=['present', 'absent'], default="present"),
enabled=dict(required=False, type='bool'),
token=dict(required=False, no_log=True),
url=dict(required=False, default="http://localhost:8080"),
user=dict(required=False)
),
mutually_exclusive=[
['password', 'token'],
['config', 'enabled'],
],
supports_check_mode=True,
)
test_dependencies(module)
jenkins_job = JenkinsJob(module)
if module.params.get('state') == "present":
jenkins_job.present_job()
else:
jenkins_job.absent_job()
result = jenkins_job.get_result()
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
GoogleChromeLabs/chromeos_smart_card_connector | third_party/googletest/src/googletest/test/googletest-setuptestsuite-test.py | 23 | 2244 | #!/usr/bin/env python
#
# Copyright 2019, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that SetUpTestSuite and TearDownTestSuite errors are noticed."""
import gtest_test_utils
COMMAND = gtest_test_utils.GetTestExecutablePath(
'googletest-setuptestsuite-test_')
class GTestSetUpTestSuiteTest(gtest_test_utils.TestCase):
def testSetupErrorAndTearDownError(self):
p = gtest_test_utils.Subprocess(COMMAND)
self.assertNotEqual(p.exit_code, 0, msg=p.output)
self.assertIn(
'[ FAILED ] SetupFailTest: SetUpTestSuite or TearDownTestSuite\n'
'[ FAILED ] TearDownFailTest: SetUpTestSuite or TearDownTestSuite\n'
'\n'
' 2 FAILED TEST SUITES\n',
p.output)
if __name__ == '__main__':
gtest_test_utils.Main()
| apache-2.0 |
mollstam/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Scrapy-1.0.1/scrapy/utils/signal.py | 18 | 2931 | """Helper functions for working with signals"""
import logging
from twisted.internet.defer import maybeDeferred, DeferredList, Deferred
from twisted.python.failure import Failure
from scrapy.xlib.pydispatch.dispatcher import Any, Anonymous, liveReceivers, \
getAllReceivers, disconnect
from scrapy.xlib.pydispatch.robustapply import robustApply
from scrapy.utils.log import failure_to_exc_info
logger = logging.getLogger(__name__)
def send_catch_log(signal=Any, sender=Anonymous, *arguments, **named):
"""Like pydispatcher.robust.sendRobust but it also logs errors and returns
Failures instead of exceptions.
"""
dont_log = named.pop('dont_log', None)
spider = named.get('spider', None)
responses = []
for receiver in liveReceivers(getAllReceivers(sender, signal)):
try:
response = robustApply(receiver, signal=signal, sender=sender,
*arguments, **named)
if isinstance(response, Deferred):
logger.error("Cannot return deferreds from signal handler: %(receiver)s",
{'receiver': receiver}, extra={'spider': spider})
except dont_log:
result = Failure()
except Exception:
result = Failure()
logger.error("Error caught on signal handler: %(receiver)s",
{'receiver': receiver},
exc_info=True, extra={'spider': spider})
else:
result = response
responses.append((receiver, result))
return responses
def send_catch_log_deferred(signal=Any, sender=Anonymous, *arguments, **named):
"""Like send_catch_log but supports returning deferreds on signal handlers.
Returns a deferred that gets fired once all signal handlers deferreds were
fired.
"""
def logerror(failure, recv):
if dont_log is None or not isinstance(failure.value, dont_log):
logger.error("Error caught on signal handler: %(receiver)s",
{'receiver': recv},
exc_info=failure_to_exc_info(failure),
extra={'spider': spider})
return failure
dont_log = named.pop('dont_log', None)
spider = named.get('spider', None)
dfds = []
for receiver in liveReceivers(getAllReceivers(sender, signal)):
d = maybeDeferred(robustApply, receiver, signal=signal, sender=sender,
*arguments, **named)
d.addErrback(logerror, receiver)
d.addBoth(lambda result: (receiver, result))
dfds.append(d)
d = DeferredList(dfds)
d.addCallback(lambda out: [x[1] for x in out])
return d
def disconnect_all(signal=Any, sender=Any):
"""Disconnect all signal handlers. Useful for cleaning up after running
tests
"""
for receiver in liveReceivers(getAllReceivers(sender, signal)):
disconnect(receiver, signal=signal, sender=sender)
| mit |
waylan/rheostatic | rheostatic/utils.py | 1 | 6618 | """
Rheostatic - A Static File Server with options.
MIT License
Copyright (c) 2016 Waylan Limberg
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import os
import sys
# version_info should conform to PEP 386
# (major, minor, micro, alpha/beta/rc/final, #)
# (1, 1, 2, 'alpha', 0) => "1.1.2.dev"
# (1, 2, 0, 'beta', 2) => "1.2b2"
__version_info__ = (0, 0, 1, 'final', 0)
def _get_version(): # pragma: no cover
" Returns a PEP 386-compliant version number from version_info. "
assert len(__version_info__) == 5
assert __version_info__[3] in ('alpha', 'beta', 'rc', 'final')
parts = 2 if __version_info__[2] == 0 else 3
main = '.'.join(map(str, __version_info__[:parts]))
sub = ''
if __version_info__[3] == 'alpha' and __version_info__[4] == 0:
# TODO: maybe append some sort of git info here??
sub = '.dev'
elif __version_info__[3] != 'final':
mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'c'}
sub = mapping[__version_info__[3]] + str(__version_info__[4])
return str(main + sub)
__version__ = _get_version()
# Follow Django in treating URLs as UTF-8 encoded (which requires undoing the
# implicit ISO-8859-1 decoding applied in Python 3). Strictly speaking, URLs
# should only be ASCII anyway, but UTF-8 can be found in the wild.
if sys.version_info[0] >= 3: # pragma: no cover
def decode_path_info(path_info):
return path_info.encode('iso-8859-1').decode('utf-8')
else: # pragma: no cover
def decode_path_info(path_info):
return path_info.decode('utf-8')
# Define only the HTTP status codes we actually use
http_status = {
200: 'OK',
301: 'Moved Permanently',
304: 'Not Modified',
404: 'Not Found',
405: 'Method Not Allowed'
}
directory_template = """<!DOCTYPE html>
<html>
<head>
<title>Directory listing for {displaypath}</title>
</head>
<body>
<h2>Directory listing for {displaypath}</h2>
<hr>
<ul>
{items}
</ul>
<hr>
</body>
</html>
""".replace('\n', os.linesep)
# Define our own types for consistency cross platform.
# Use the types defined by nginx with a few additions.
types_map = {
'.3gp': 'video/3gpp',
'.3gpp': 'video/3gpp',
'.7z': 'application/x-7z-compressed',
'.ai': 'application/postscript',
'.asf': 'video/x-ms-asf',
'.asx': 'video/x-ms-asf',
'.atom': 'application/atom+xml',
'.avi': 'video/x-msvideo',
'.bmp': 'image/x-ms-bmp',
'.cco': 'application/x-cocoa',
'.crt': 'application/x-x509-ca-cert',
'.css': 'text/css',
'.der': 'application/x-x509-ca-cert',
'.doc': 'application/msword',
'.docx': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
'.ear': 'application/java-archive',
'.eot': 'application/vnd.ms-fontobject',
'.eps': 'application/postscript',
'.flv': 'video/x-flv',
'.gif': 'image/gif',
'.hqx': 'application/mac-binhex40',
'.htc': 'text/x-component',
'.htm': 'text/html',
'.html': 'text/html',
'.ico': 'image/x-icon',
'.jad': 'text/vnd.sun.j2me.app-descriptor',
'.jar': 'application/java-archive',
'.jardiff': 'application/x-java-archive-diff',
'.jng': 'image/x-jng',
'.jnlp': 'application/x-java-jnlp-file',
'.jpeg': 'image/jpeg',
'.jpg': 'image/jpeg',
'.js': 'application/javascript',
'.json': 'application/json',
'.kar': 'audio/midi',
'.kml': 'application/vnd.google-earth.kml+xml',
'.kmz': 'application/vnd.google-earth.kmz',
'.m3u8': 'application/vnd.apple.mpegurl',
'.m4a': 'audio/x-m4a',
'.m4v': 'video/x-m4v',
'.manifest': 'text/cache-manifest',
'.mid': 'audio/midi',
'.midi': 'audio/midi',
'.mml': 'text/mathml',
'.mng': 'video/x-mng',
'.mov': 'video/quicktime',
'.mp3': 'audio/mpeg',
'.mp4': 'video/mp4',
'.mpeg': 'video/mpeg',
'.mpg': 'video/mpeg',
'.ogg': 'audio/ogg',
'.pdb': 'application/x-pilot',
'.pdf': 'application/pdf',
'.pem': 'application/x-x509-ca-cert',
'.pl': 'application/x-perl',
'.pm': 'application/x-perl',
'.png': 'image/png',
'.ppt': 'application/vnd.ms-powerpoint',
'.pptx': 'application/vnd.openxmlformats-officedocument.presentationml.presentation',
'.prc': 'application/x-pilot',
'.ps': 'application/postscript',
'.ra': 'audio/x-realaudio',
'.rar': 'application/x-rar-compressed',
'.rpm': 'application/x-redhat-package-manager',
'.rss': 'application/rss+xml',
'.rtf': 'application/rtf',
'.run': 'application/x-makeself',
'.sea': 'application/x-sea',
'.shtml': 'text/html',
'.sit': 'application/x-stuffit',
'.svg': 'image/svg+xml',
'.svgz': 'image/svg+xml',
'.swf': 'application/x-shockwave-flash',
'.tcl': 'application/x-tcl',
'.tif': 'image/tiff',
'.tiff': 'image/tiff',
'.tk': 'application/x-tcl',
'.ts': 'video/mp2t',
'.txt': 'text/plain',
'.war': 'application/java-archive',
'.wbmp': 'image/vnd.wap.wbmp',
'.webm': 'video/webm',
'.webp': 'image/webp',
'.wml': 'text/vnd.wap.wml',
'.wmlc': 'application/vnd.wap.wmlc',
'.wmv': 'video/x-ms-wmv',
'.woff': 'application/font-woff',
'.woff2': 'font/woff2',
'.xhtml': 'application/xhtml+xml',
'.xls': 'application/vnd.ms-excel',
'.xlsx': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
'.xml': 'text/xml',
'.xpi': 'application/x-xpinstall',
'.xspf': 'application/xspf+xml',
'.zip': 'application/zip'
}
| mit |
mancoast/CPythonPyc_test | cpython/232_test_richcmp.py | 9 | 11493 | # Tests for rich comparisons
import unittest
from test import test_support
import operator
class Number:
def __init__(self, x):
self.x = x
def __lt__(self, other):
return self.x < other
def __le__(self, other):
return self.x <= other
def __eq__(self, other):
return self.x == other
def __ne__(self, other):
return self.x != other
def __gt__(self, other):
return self.x > other
def __ge__(self, other):
return self.x >= other
def __cmp__(self, other):
raise test_support.TestFailed, "Number.__cmp__() should not be called"
def __repr__(self):
return "Number(%r)" % (self.x, )
class Vector:
def __init__(self, data):
self.data = data
def __len__(self):
return len(self.data)
def __getitem__(self, i):
return self.data[i]
def __setitem__(self, i, v):
self.data[i] = v
def __hash__(self):
raise TypeError, "Vectors cannot be hashed"
def __nonzero__(self):
raise TypeError, "Vectors cannot be used in Boolean contexts"
def __cmp__(self, other):
raise test_support.TestFailed, "Vector.__cmp__() should not be called"
def __repr__(self):
return "Vector(%r)" % (self.data, )
def __lt__(self, other):
return Vector([a < b for a, b in zip(self.data, self.__cast(other))])
def __le__(self, other):
return Vector([a <= b for a, b in zip(self.data, self.__cast(other))])
def __eq__(self, other):
return Vector([a == b for a, b in zip(self.data, self.__cast(other))])
def __ne__(self, other):
return Vector([a != b for a, b in zip(self.data, self.__cast(other))])
def __gt__(self, other):
return Vector([a > b for a, b in zip(self.data, self.__cast(other))])
def __ge__(self, other):
return Vector([a >= b for a, b in zip(self.data, self.__cast(other))])
def __cast(self, other):
if isinstance(other, Vector):
other = other.data
if len(self.data) != len(other):
raise ValueError, "Cannot compare vectors of different length"
return other
opmap = {
"lt": (lambda a,b: a< b, operator.lt, operator.__lt__),
"le": (lambda a,b: a<=b, operator.le, operator.__le__),
"eq": (lambda a,b: a==b, operator.eq, operator.__eq__),
"ne": (lambda a,b: a!=b, operator.ne, operator.__ne__),
"gt": (lambda a,b: a> b, operator.gt, operator.__gt__),
"ge": (lambda a,b: a>=b, operator.ge, operator.__ge__)
}
class VectorTest(unittest.TestCase):
def checkfail(self, error, opname, *args):
for op in opmap[opname]:
self.assertRaises(error, op, *args)
def checkequal(self, opname, a, b, expres):
for op in opmap[opname]:
realres = op(a, b)
# can't use assertEqual(realres, expres) here
self.assertEqual(len(realres), len(expres))
for i in xrange(len(realres)):
# results are bool, so we can use "is" here
self.assert_(realres[i] is expres[i])
def test_mixed(self):
# check that comparisons involving Vector objects
# which return rich results (i.e. Vectors with itemwise
# comparison results) work
a = Vector(range(2))
b = Vector(range(3))
# all comparisons should fail for different length
for opname in opmap:
self.checkfail(ValueError, opname, a, b)
a = range(5)
b = 5 * [2]
# try mixed arguments (but not (a, b) as that won't return a bool vector)
args = [(a, Vector(b)), (Vector(a), b), (Vector(a), Vector(b))]
for (a, b) in args:
self.checkequal("lt", a, b, [True, True, False, False, False])
self.checkequal("le", a, b, [True, True, True, False, False])
self.checkequal("eq", a, b, [False, False, True, False, False])
self.checkequal("ne", a, b, [True, True, False, True, True ])
self.checkequal("gt", a, b, [False, False, False, True, True ])
self.checkequal("ge", a, b, [False, False, True, True, True ])
for ops in opmap.itervalues():
for op in ops:
# calls __nonzero__, which should fail
self.assertRaises(TypeError, bool, op(a, b))
class NumberTest(unittest.TestCase):
def test_basic(self):
# Check that comparisons involving Number objects
# give the same results give as comparing the
# corresponding ints
for a in xrange(3):
for b in xrange(3):
for typea in (int, Number):
for typeb in (int, Number):
if typea==typeb==int:
continue # the combination int, int is useless
ta = typea(a)
tb = typeb(b)
for ops in opmap.itervalues():
for op in ops:
realoutcome = op(a, b)
testoutcome = op(ta, tb)
self.assertEqual(realoutcome, testoutcome)
def checkvalue(self, opname, a, b, expres):
for typea in (int, Number):
for typeb in (int, Number):
ta = typea(a)
tb = typeb(b)
for op in opmap[opname]:
realres = op(ta, tb)
realres = getattr(realres, "x", realres)
self.assert_(realres is expres)
def test_values(self):
# check all operators and all comparison results
self.checkvalue("lt", 0, 0, False)
self.checkvalue("le", 0, 0, True )
self.checkvalue("eq", 0, 0, True )
self.checkvalue("ne", 0, 0, False)
self.checkvalue("gt", 0, 0, False)
self.checkvalue("ge", 0, 0, True )
self.checkvalue("lt", 0, 1, True )
self.checkvalue("le", 0, 1, True )
self.checkvalue("eq", 0, 1, False)
self.checkvalue("ne", 0, 1, True )
self.checkvalue("gt", 0, 1, False)
self.checkvalue("ge", 0, 1, False)
self.checkvalue("lt", 1, 0, False)
self.checkvalue("le", 1, 0, False)
self.checkvalue("eq", 1, 0, False)
self.checkvalue("ne", 1, 0, True )
self.checkvalue("gt", 1, 0, True )
self.checkvalue("ge", 1, 0, True )
class MiscTest(unittest.TestCase):
def test_misbehavin(self):
class Misb:
def __lt__(self, other): return 0
def __gt__(self, other): return 0
def __eq__(self, other): return 0
def __le__(self, other): raise TestFailed, "This shouldn't happen"
def __ge__(self, other): raise TestFailed, "This shouldn't happen"
def __ne__(self, other): raise TestFailed, "This shouldn't happen"
def __cmp__(self, other): raise RuntimeError, "expected"
a = Misb()
b = Misb()
self.assertEqual(a<b, 0)
self.assertEqual(a==b, 0)
self.assertEqual(a>b, 0)
self.assertRaises(RuntimeError, cmp, a, b)
def test_not(self):
# Check that exceptions in __nonzero__ are properly
# propagated by the not operator
import operator
class Exc:
pass
class Bad:
def __nonzero__(self):
raise Exc
def do(bad):
not bad
for func in (do, operator.not_):
self.assertRaises(Exc, func, Bad())
def test_recursion(self):
# Check comparison for recursive objects
from UserList import UserList
a = UserList(); a.append(a)
b = UserList(); b.append(b)
self.assert_(a == b)
self.assert_(not a != b)
a.append(1)
self.assert_(a == a[0])
self.assert_(not a != a[0])
self.assert_(a != b)
self.assert_(not a == b)
b.append(0)
self.assert_(a != b)
self.assert_(not a == b)
a[1] = -1
self.assert_(a != b)
self.assert_(not a == b)
a = UserList()
b = UserList()
a.append(b)
b.append(a)
self.assert_(a == b)
self.assert_(not a != b)
b.append(17)
self.assert_(a != b)
self.assert_(not a == b)
a.append(17)
self.assert_(a == b)
self.assert_(not a != b)
def test_recursion2(self):
# This test exercises the circular structure handling code
# in PyObject_RichCompare()
class Weird(object):
def __eq__(self, other):
return self != other
def __ne__(self, other):
return self == other
def __lt__(self, other):
return self > other
def __gt__(self, other):
return self < other
self.assert_(Weird() == Weird())
self.assert_(not (Weird() != Weird()))
for op in opmap["lt"]:
self.assertRaises(ValueError, op, Weird(), Weird())
class DictTest(unittest.TestCase):
def test_dicts(self):
# Verify that __eq__ and __ne__ work for dicts even if the keys and
# values don't support anything other than __eq__ and __ne__. Complex
# numbers are a fine example of that.
import random
imag1a = {}
for i in range(50):
imag1a[random.randrange(100)*1j] = random.randrange(100)*1j
items = imag1a.items()
random.shuffle(items)
imag1b = {}
for k, v in items:
imag1b[k] = v
imag2 = imag1b.copy()
imag2[k] = v + 1.0
self.assert_(imag1a == imag1a)
self.assert_(imag1a == imag1b)
self.assert_(imag2 == imag2)
self.assert_(imag1a != imag2)
for opname in ("lt", "le", "gt", "ge"):
for op in opmap[opname]:
self.assertRaises(TypeError, op, imag1a, imag2)
class ListTest(unittest.TestCase):
def assertIs(self, a, b):
self.assert_(a is b)
def test_coverage(self):
# exercise all comparisons for lists
x = [42]
self.assertIs(x<x, False)
self.assertIs(x<=x, True)
self.assertIs(x==x, True)
self.assertIs(x!=x, False)
self.assertIs(x>x, False)
self.assertIs(x>=x, True)
y = [42, 42]
self.assertIs(x<y, True)
self.assertIs(x<=y, True)
self.assertIs(x==y, False)
self.assertIs(x!=y, True)
self.assertIs(x>y, False)
self.assertIs(x>=y, False)
def test_badentry(self):
# make sure that exceptions for item comparison are properly
# propagated in list comparisons
class Exc:
pass
class Bad:
def __eq__(self, other):
raise Exc
x = [Bad()]
y = [Bad()]
for op in opmap["eq"]:
self.assertRaises(Exc, op, x, y)
def test_goodentry(self):
# This test exercises the final call to PyObject_RichCompare()
# in Objects/listobject.c::list_richcompare()
class Good:
def __lt__(self, other):
return True
x = [Good()]
y = [Good()]
for op in opmap["lt"]:
self.assertIs(op(x, y), True)
def test_main():
test_support.run_unittest(VectorTest, NumberTest, MiscTest, DictTest, ListTest)
if __name__ == "__main__":
test_main()
| gpl-3.0 |
fafaman/django | tests/queryset_pickle/tests.py | 209 | 6081 | from __future__ import unicode_literals
import datetime
import pickle
import unittest
import warnings
from django.test import TestCase
from django.utils import six
from django.utils.encoding import force_text
from django.utils.version import get_version
from .models import Container, Event, Group, Happening, M2MModel
class PickleabilityTestCase(TestCase):
def setUp(self):
Happening.objects.create() # make sure the defaults are working (#20158)
def assert_pickles(self, qs):
self.assertEqual(list(pickle.loads(pickle.dumps(qs))), list(qs))
def test_related_field(self):
g = Group.objects.create(name="Ponies Who Own Maybachs")
self.assert_pickles(Event.objects.filter(group=g.id))
def test_datetime_callable_default_all(self):
self.assert_pickles(Happening.objects.all())
def test_datetime_callable_default_filter(self):
self.assert_pickles(Happening.objects.filter(when=datetime.datetime.now()))
def test_string_as_default(self):
self.assert_pickles(Happening.objects.filter(name="test"))
def test_standalone_method_as_default(self):
self.assert_pickles(Happening.objects.filter(number1=1))
@unittest.skipIf(six.PY2, "Field doesn't exist on Python 2.")
def test_staticmethod_as_default(self):
self.assert_pickles(Happening.objects.filter(number2=1))
def test_filter_reverse_fk(self):
self.assert_pickles(Group.objects.filter(event=1))
def test_doesnotexist_exception(self):
# Ticket #17776
original = Event.DoesNotExist("Doesn't exist")
unpickled = pickle.loads(pickle.dumps(original))
# Exceptions are not equal to equivalent instances of themselves, so
# can't just use assertEqual(original, unpickled)
self.assertEqual(original.__class__, unpickled.__class__)
self.assertEqual(original.args, unpickled.args)
def test_manager_pickle(self):
pickle.loads(pickle.dumps(Happening.objects))
def test_model_pickle(self):
"""
Test that a model not defined on module level is pickleable.
"""
original = Container.SomeModel(pk=1)
dumped = pickle.dumps(original)
reloaded = pickle.loads(dumped)
self.assertEqual(original, reloaded)
# Also, deferred dynamic model works
Container.SomeModel.objects.create(somefield=1)
original = Container.SomeModel.objects.defer('somefield')[0]
dumped = pickle.dumps(original)
reloaded = pickle.loads(dumped)
self.assertEqual(original, reloaded)
self.assertEqual(original.somefield, reloaded.somefield)
def test_model_pickle_m2m(self):
"""
Test intentionally the automatically created through model.
"""
m1 = M2MModel.objects.create()
g1 = Group.objects.create(name='foof')
m1.groups.add(g1)
m2m_through = M2MModel._meta.get_field('groups').remote_field.through
original = m2m_through.objects.get()
dumped = pickle.dumps(original)
reloaded = pickle.loads(dumped)
self.assertEqual(original, reloaded)
def test_model_pickle_dynamic(self):
class Meta:
proxy = True
dynclass = type(str("DynamicEventSubclass"), (Event, ),
{'Meta': Meta, '__module__': Event.__module__})
original = dynclass(pk=1)
dumped = pickle.dumps(original)
reloaded = pickle.loads(dumped)
self.assertEqual(original, reloaded)
self.assertIs(reloaded.__class__, dynclass)
def test_specialized_queryset(self):
self.assert_pickles(Happening.objects.values('name'))
self.assert_pickles(Happening.objects.values('name').dates('when', 'year'))
# With related field (#14515)
self.assert_pickles(
Event.objects.select_related('group').order_by('title').values_list('title', 'group__name')
)
def test_pickle_prefetch_related_idempotence(self):
g = Group.objects.create(name='foo')
groups = Group.objects.prefetch_related('event_set')
# First pickling
groups = pickle.loads(pickle.dumps(groups))
self.assertQuerysetEqual(groups, [g], lambda x: x)
# Second pickling
groups = pickle.loads(pickle.dumps(groups))
self.assertQuerysetEqual(groups, [g], lambda x: x)
def test_pickle_prefetch_related_with_m2m_and_objects_deletion(self):
"""
#24831 -- Cached properties on ManyToOneRel created in QuerySet.delete()
caused subsequent QuerySet pickling to fail.
"""
g = Group.objects.create(name='foo')
m2m = M2MModel.objects.create()
m2m.groups.add(g)
Group.objects.all().delete()
m2ms = M2MModel.objects.prefetch_related('groups')
m2ms = pickle.loads(pickle.dumps(m2ms))
self.assertQuerysetEqual(m2ms, [m2m], lambda x: x)
def test_missing_django_version_unpickling(self):
"""
#21430 -- Verifies a warning is raised for querysets that are
unpickled without a Django version
"""
qs = Group.missing_django_version_objects.all()
with warnings.catch_warnings(record=True) as recorded:
pickle.loads(pickle.dumps(qs))
msg = force_text(recorded.pop().message)
self.assertEqual(msg,
"Pickled queryset instance's Django version is not specified.")
def test_unsupported_unpickle(self):
"""
#21430 -- Verifies a warning is raised for querysets that are
unpickled with a different Django version than the current
"""
qs = Group.previous_django_version_objects.all()
with warnings.catch_warnings(record=True) as recorded:
pickle.loads(pickle.dumps(qs))
msg = force_text(recorded.pop().message)
self.assertEqual(
msg,
"Pickled queryset instance's Django version 1.0 does not "
"match the current version %s." % get_version()
)
| bsd-3-clause |
SomethingExplosive/android_external_chromium_org | tools/perf/measurements/page_cycler.py | 23 | 5337 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""The page cycler measurement.
This measurement registers a window load handler in which is forces a layout and
then records the value of performance.now(). This call to now() measures the
time from navigationStart (immediately after the previous page's beforeunload
event) until after the layout in the page's load event. In addition, two garbage
collections are performed in between the page loads (in the beforeunload event).
This extra garbage collection time is not included in the measurement times.
Finally, various memory and IO statistics are gathered at the very end of
cycling all pages.
"""
import os
import sys
from metrics import histogram
from metrics import memory
from telemetry.core import util
from telemetry.page import page_measurement
MEMORY_HISTOGRAMS = [
{'name': 'V8.MemoryExternalFragmentationTotal', 'units': 'percent'},
{'name': 'V8.MemoryHeapSampleTotalCommitted', 'units': 'kb'},
{'name': 'V8.MemoryHeapSampleTotalUsed', 'units': 'kb'}]
class PageCycler(page_measurement.PageMeasurement):
def __init__(self, *args, **kwargs):
super(PageCycler, self).__init__(*args, **kwargs)
with open(os.path.join(os.path.dirname(__file__),
'page_cycler.js'), 'r') as f:
self._page_cycler_js = f.read()
self._memory_metric = None
self._histograms = None
def AddCommandLineOptions(self, parser):
# The page cyclers should default to 10 iterations. In order to change the
# default of an option, we must remove and re-add it.
# TODO: Remove this after transition to run_benchmark.
pageset_repeat_option = parser.get_option('--pageset-repeat')
pageset_repeat_option.default = 10
parser.remove_option('--pageset-repeat')
parser.add_option(pageset_repeat_option)
def DidStartBrowser(self, browser):
"""Initialize metrics once right after the browser has been launched."""
self._memory_metric = memory.MemoryMetric(browser)
self._memory_metric.Start()
self._histograms = [histogram.HistogramMetric(
h, histogram.RENDERER_HISTOGRAM)
for h in MEMORY_HISTOGRAMS]
def DidStartHTTPServer(self, tab):
# Avoid paying for a cross-renderer navigation on the first page on legacy
# page cyclers which use the filesystem.
tab.Navigate(tab.browser.http_server.UrlOf('nonexistent.html'))
def WillNavigateToPage(self, page, tab):
page.script_to_evaluate_on_commit = self._page_cycler_js
def DidNavigateToPage(self, page, tab):
for h in self._histograms:
h.Start(page, tab)
def CustomizeBrowserOptions(self, options):
options.AppendExtraBrowserArg('--enable-stats-collection-bindings')
options.AppendExtraBrowserArg('--js-flags=--expose_gc')
options.AppendExtraBrowserArg('--no-sandbox')
# Old commandline flags used for reference builds.
options.AppendExtraBrowserArg('--dom-automation')
# Temporarily disable typical_25 page set on mac.
if sys.platform == 'darwin' and sys.argv[-1].endswith('/typical_25.json'):
print 'typical_25 is currently disabled on mac. Skipping test.'
sys.exit(0)
def MeasureIO(self, tab, results):
io_stats = tab.browser.io_stats
if not io_stats['Browser']:
return
def AddSummariesForProcessType(process_type_io, process_type_trace):
if 'ReadOperationCount' in io_stats[process_type_io]:
results.AddSummary('read_operations_' + process_type_trace, '',
io_stats[process_type_io]
['ReadOperationCount'],
data_type='unimportant')
if 'WriteOperationCount' in io_stats[process_type_io]:
results.AddSummary('write_operations_' + process_type_trace, '',
io_stats[process_type_io]
['WriteOperationCount'],
data_type='unimportant')
if 'ReadTransferCount' in io_stats[process_type_io]:
results.AddSummary('read_bytes_' + process_type_trace, 'kb',
io_stats[process_type_io]
['ReadTransferCount'] / 1024,
data_type='unimportant')
if 'WriteTransferCount' in io_stats[process_type_io]:
results.AddSummary('write_bytes_' + process_type_trace, 'kb',
io_stats[process_type_io]
['WriteTransferCount'] / 1024,
data_type='unimportant')
AddSummariesForProcessType('Browser', 'browser')
AddSummariesForProcessType('Renderer', 'renderer')
AddSummariesForProcessType('Gpu', 'gpu')
def MeasurePage(self, page, tab, results):
def _IsDone():
return bool(tab.EvaluateJavaScript('__pc_load_time'))
util.WaitFor(_IsDone, 60)
for h in self._histograms:
h.GetValue(page, tab, results)
results.Add('page_load_time', 'ms',
int(float(tab.EvaluateJavaScript('__pc_load_time'))),
chart_name='times')
def DidRunTest(self, tab, results):
self._memory_metric.Stop()
self._memory_metric.AddResults(tab, results)
self.MeasureIO(tab, results)
| bsd-3-clause |
jalilm/ryu | ryu/controller/ofp_event.py | 33 | 2338 | # Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2011 Isaku Yamahata <yamahata at valinux co jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
OpenFlow event definitions.
"""
import inspect
from ryu.controller import handler
from ryu import ofproto
from ryu import utils
from . import event
class EventOFPMsgBase(event.EventBase):
def __init__(self, msg):
super(EventOFPMsgBase, self).__init__()
self.msg = msg
#
# Create ofp_event type corresponding to OFP Msg
#
_OFP_MSG_EVENTS = {}
def _ofp_msg_name_to_ev_name(msg_name):
return 'Event' + msg_name
def ofp_msg_to_ev(msg):
return ofp_msg_to_ev_cls(msg.__class__)(msg)
def ofp_msg_to_ev_cls(msg_cls):
name = _ofp_msg_name_to_ev_name(msg_cls.__name__)
return _OFP_MSG_EVENTS[name]
def _create_ofp_msg_ev_class(msg_cls):
name = _ofp_msg_name_to_ev_name(msg_cls.__name__)
# print 'creating ofp_event %s' % name
if name in _OFP_MSG_EVENTS:
return
cls = type(name, (EventOFPMsgBase,),
dict(__init__=lambda self, msg:
super(self.__class__, self).__init__(msg)))
globals()[name] = cls
_OFP_MSG_EVENTS[name] = cls
def _create_ofp_msg_ev_from_module(ofp_parser):
# print mod
for _k, cls in inspect.getmembers(ofp_parser, inspect.isclass):
if not hasattr(cls, 'cls_msg_type'):
continue
_create_ofp_msg_ev_class(cls)
for ofp_mods in ofproto.get_ofp_modules().values():
ofp_parser = ofp_mods[1]
# print 'loading module %s' % ofp_parser
_create_ofp_msg_ev_from_module(ofp_parser)
class EventOFPStateChange(event.EventBase):
def __init__(self, dp):
super(EventOFPStateChange, self).__init__()
self.datapath = dp
handler.register_service('ryu.controller.ofp_handler')
| apache-2.0 |
ArcherSys/ArcherSys | Lib/base64.py | 1 | 60641 | <<<<<<< HEAD
<<<<<<< HEAD
#! /usr/bin/env python3
"""Base16, Base32, Base64 (RFC 3548), Base85 and Ascii85 data encodings"""
# Modified 04-Oct-1995 by Jack Jansen to use binascii module
# Modified 30-Dec-2003 by Barry Warsaw to add full RFC 3548 support
# Modified 22-May-2007 by Guido van Rossum to use bytes everywhere
import re
import struct
import binascii
__all__ = [
# Legacy interface exports traditional RFC 1521 Base64 encodings
'encode', 'decode', 'encodebytes', 'decodebytes',
# Generalized interface for other encodings
'b64encode', 'b64decode', 'b32encode', 'b32decode',
'b16encode', 'b16decode',
# Base85 and Ascii85 encodings
'b85encode', 'b85decode', 'a85encode', 'a85decode',
# Standard Base64 encoding
'standard_b64encode', 'standard_b64decode',
# Some common Base64 alternatives. As referenced by RFC 3458, see thread
# starting at:
#
# http://zgp.org/pipermail/p2p-hackers/2001-September/000316.html
'urlsafe_b64encode', 'urlsafe_b64decode',
]
bytes_types = (bytes, bytearray) # Types acceptable as binary data
def _bytes_from_decode_data(s):
if isinstance(s, str):
try:
return s.encode('ascii')
except UnicodeEncodeError:
raise ValueError('string argument should contain only ASCII characters')
if isinstance(s, bytes_types):
return s
try:
return memoryview(s).tobytes()
except TypeError:
raise TypeError("argument should be a bytes-like object or ASCII "
"string, not %r" % s.__class__.__name__) from None
# Base64 encoding/decoding uses binascii
def b64encode(s, altchars=None):
"""Encode a byte string using Base64.
s is the byte string to encode. Optional altchars must be a byte
string of length 2 which specifies an alternative alphabet for the
'+' and '/' characters. This allows an application to
e.g. generate url or filesystem safe Base64 strings.
The encoded byte string is returned.
"""
# Strip off the trailing newline
encoded = binascii.b2a_base64(s)[:-1]
if altchars is not None:
assert len(altchars) == 2, repr(altchars)
return encoded.translate(bytes.maketrans(b'+/', altchars))
return encoded
def b64decode(s, altchars=None, validate=False):
"""Decode a Base64 encoded byte string.
s is the byte string to decode. Optional altchars must be a
string of length 2 which specifies the alternative alphabet used
instead of the '+' and '/' characters.
The decoded string is returned. A binascii.Error is raised if s is
incorrectly padded.
If validate is False (the default), non-base64-alphabet characters are
discarded prior to the padding check. If validate is True,
non-base64-alphabet characters in the input result in a binascii.Error.
"""
s = _bytes_from_decode_data(s)
if altchars is not None:
altchars = _bytes_from_decode_data(altchars)
assert len(altchars) == 2, repr(altchars)
s = s.translate(bytes.maketrans(altchars, b'+/'))
if validate and not re.match(b'^[A-Za-z0-9+/]*={0,2}$', s):
raise binascii.Error('Non-base64 digit found')
return binascii.a2b_base64(s)
def standard_b64encode(s):
"""Encode a byte string using the standard Base64 alphabet.
s is the byte string to encode. The encoded byte string is returned.
"""
return b64encode(s)
def standard_b64decode(s):
"""Decode a byte string encoded with the standard Base64 alphabet.
s is the byte string to decode. The decoded byte string is
returned. binascii.Error is raised if the input is incorrectly
padded or if there are non-alphabet characters present in the
input.
"""
return b64decode(s)
_urlsafe_encode_translation = bytes.maketrans(b'+/', b'-_')
_urlsafe_decode_translation = bytes.maketrans(b'-_', b'+/')
def urlsafe_b64encode(s):
"""Encode a byte string using a url-safe Base64 alphabet.
s is the byte string to encode. The encoded byte string is
returned. The alphabet uses '-' instead of '+' and '_' instead of
'/'.
"""
return b64encode(s).translate(_urlsafe_encode_translation)
def urlsafe_b64decode(s):
"""Decode a byte string encoded with the standard Base64 alphabet.
s is the byte string to decode. The decoded byte string is
returned. binascii.Error is raised if the input is incorrectly
padded or if there are non-alphabet characters present in the
input.
The alphabet uses '-' instead of '+' and '_' instead of '/'.
"""
s = _bytes_from_decode_data(s)
s = s.translate(_urlsafe_decode_translation)
return b64decode(s)
# Base32 encoding/decoding must be done in Python
_b32alphabet = b'ABCDEFGHIJKLMNOPQRSTUVWXYZ234567'
_b32tab2 = None
_b32rev = None
def b32encode(s):
"""Encode a byte string using Base32.
s is the byte string to encode. The encoded byte string is returned.
"""
global _b32tab2
# Delay the initialization of the table to not waste memory
# if the function is never called
if _b32tab2 is None:
b32tab = [bytes((i,)) for i in _b32alphabet]
_b32tab2 = [a + b for a in b32tab for b in b32tab]
b32tab = None
if not isinstance(s, bytes_types):
s = memoryview(s).tobytes()
leftover = len(s) % 5
# Pad the last quantum with zero bits if necessary
if leftover:
s = s + bytes(5 - leftover) # Don't use += !
encoded = bytearray()
from_bytes = int.from_bytes
b32tab2 = _b32tab2
for i in range(0, len(s), 5):
c = from_bytes(s[i: i + 5], 'big')
encoded += (b32tab2[c >> 30] + # bits 1 - 10
b32tab2[(c >> 20) & 0x3ff] + # bits 11 - 20
b32tab2[(c >> 10) & 0x3ff] + # bits 21 - 30
b32tab2[c & 0x3ff] # bits 31 - 40
)
# Adjust for any leftover partial quanta
if leftover == 1:
encoded[-6:] = b'======'
elif leftover == 2:
encoded[-4:] = b'===='
elif leftover == 3:
encoded[-3:] = b'==='
elif leftover == 4:
encoded[-1:] = b'='
return bytes(encoded)
def b32decode(s, casefold=False, map01=None):
"""Decode a Base32 encoded byte string.
s is the byte string to decode. Optional casefold is a flag
specifying whether a lowercase alphabet is acceptable as input.
For security purposes, the default is False.
RFC 3548 allows for optional mapping of the digit 0 (zero) to the
letter O (oh), and for optional mapping of the digit 1 (one) to
either the letter I (eye) or letter L (el). The optional argument
map01 when not None, specifies which letter the digit 1 should be
mapped to (when map01 is not None, the digit 0 is always mapped to
the letter O). For security purposes the default is None, so that
0 and 1 are not allowed in the input.
The decoded byte string is returned. binascii.Error is raised if
the input is incorrectly padded or if there are non-alphabet
characters present in the input.
"""
global _b32rev
# Delay the initialization of the table to not waste memory
# if the function is never called
if _b32rev is None:
_b32rev = {v: k for k, v in enumerate(_b32alphabet)}
s = _bytes_from_decode_data(s)
if len(s) % 8:
raise binascii.Error('Incorrect padding')
# Handle section 2.4 zero and one mapping. The flag map01 will be either
# False, or the character to map the digit 1 (one) to. It should be
# either L (el) or I (eye).
if map01 is not None:
map01 = _bytes_from_decode_data(map01)
assert len(map01) == 1, repr(map01)
s = s.translate(bytes.maketrans(b'01', b'O' + map01))
if casefold:
s = s.upper()
# Strip off pad characters from the right. We need to count the pad
# characters because this will tell us how many null bytes to remove from
# the end of the decoded string.
l = len(s)
s = s.rstrip(b'=')
padchars = l - len(s)
# Now decode the full quanta
decoded = bytearray()
b32rev = _b32rev
for i in range(0, len(s), 8):
quanta = s[i: i + 8]
acc = 0
try:
for c in quanta:
acc = (acc << 5) + b32rev[c]
except KeyError:
raise binascii.Error('Non-base32 digit found') from None
decoded += acc.to_bytes(5, 'big')
# Process the last, partial quanta
if padchars:
acc <<= 5 * padchars
last = acc.to_bytes(5, 'big')
if padchars == 1:
decoded[-5:] = last[:-1]
elif padchars == 3:
decoded[-5:] = last[:-2]
elif padchars == 4:
decoded[-5:] = last[:-3]
elif padchars == 6:
decoded[-5:] = last[:-4]
else:
raise binascii.Error('Incorrect padding')
return bytes(decoded)
# RFC 3548, Base 16 Alphabet specifies uppercase, but hexlify() returns
# lowercase. The RFC also recommends against accepting input case
# insensitively.
def b16encode(s):
"""Encode a byte string using Base16.
s is the byte string to encode. The encoded byte string is returned.
"""
return binascii.hexlify(s).upper()
def b16decode(s, casefold=False):
"""Decode a Base16 encoded byte string.
s is the byte string to decode. Optional casefold is a flag
specifying whether a lowercase alphabet is acceptable as input.
For security purposes, the default is False.
The decoded byte string is returned. binascii.Error is raised if
s were incorrectly padded or if there are non-alphabet characters
present in the string.
"""
s = _bytes_from_decode_data(s)
if casefold:
s = s.upper()
if re.search(b'[^0-9A-F]', s):
raise binascii.Error('Non-base16 digit found')
return binascii.unhexlify(s)
#
# Ascii85 encoding/decoding
#
_a85chars = None
_a85chars2 = None
_A85START = b"<~"
_A85END = b"~>"
def _85encode(b, chars, chars2, pad=False, foldnuls=False, foldspaces=False):
# Helper function for a85encode and b85encode
if not isinstance(b, bytes_types):
b = memoryview(b).tobytes()
padding = (-len(b)) % 4
if padding:
b = b + b'\0' * padding
words = struct.Struct('!%dI' % (len(b) // 4)).unpack(b)
chunks = [b'z' if foldnuls and not word else
b'y' if foldspaces and word == 0x20202020 else
(chars2[word // 614125] +
chars2[word // 85 % 7225] +
chars[word % 85])
for word in words]
if padding and not pad:
if chunks[-1] == b'z':
chunks[-1] = chars[0] * 5
chunks[-1] = chunks[-1][:-padding]
return b''.join(chunks)
def a85encode(b, *, foldspaces=False, wrapcol=0, pad=False, adobe=False):
"""Encode a byte string using Ascii85.
b is the byte string to encode. The encoded byte string is returned.
foldspaces is an optional flag that uses the special short sequence 'y'
instead of 4 consecutive spaces (ASCII 0x20) as supported by 'btoa'. This
feature is not supported by the "standard" Adobe encoding.
wrapcol controls whether the output should have newline ('\n') characters
added to it. If this is non-zero, each output line will be at most this
many characters long.
pad controls whether the input string is padded to a multiple of 4 before
encoding. Note that the btoa implementation always pads.
adobe controls whether the encoded byte sequence is framed with <~ and ~>,
which is used by the Adobe implementation.
"""
global _a85chars, _a85chars2
# Delay the initialization of tables to not waste memory
# if the function is never called
if _a85chars is None:
_a85chars = [bytes((i,)) for i in range(33, 118)]
_a85chars2 = [(a + b) for a in _a85chars for b in _a85chars]
result = _85encode(b, _a85chars, _a85chars2, pad, True, foldspaces)
if adobe:
result = _A85START + result
if wrapcol:
wrapcol = max(2 if adobe else 1, wrapcol)
chunks = [result[i: i + wrapcol]
for i in range(0, len(result), wrapcol)]
if adobe:
if len(chunks[-1]) + 2 > wrapcol:
chunks.append(b'')
result = b'\n'.join(chunks)
if adobe:
result += _A85END
return result
def a85decode(b, *, foldspaces=False, adobe=False, ignorechars=b' \t\n\r\v'):
"""Decode an Ascii85 encoded byte string.
s is the byte string to decode.
foldspaces is a flag that specifies whether the 'y' short sequence should be
accepted as shorthand for 4 consecutive spaces (ASCII 0x20). This feature is
not supported by the "standard" Adobe encoding.
adobe controls whether the input sequence is in Adobe Ascii85 format (i.e.
is framed with <~ and ~>).
ignorechars should be a byte string containing characters to ignore from the
input. This should only contain whitespace characters, and by default
contains all whitespace characters in ASCII.
"""
b = _bytes_from_decode_data(b)
if adobe:
if not (b.startswith(_A85START) and b.endswith(_A85END)):
raise ValueError("Ascii85 encoded byte sequences must be bracketed "
"by {!r} and {!r}".format(_A85START, _A85END))
b = b[2:-2] # Strip off start/end markers
#
# We have to go through this stepwise, so as to ignore spaces and handle
# special short sequences
#
packI = struct.Struct('!I').pack
decoded = []
decoded_append = decoded.append
curr = []
curr_append = curr.append
curr_clear = curr.clear
for x in b + b'u' * 4:
if b'!'[0] <= x <= b'u'[0]:
curr_append(x)
if len(curr) == 5:
acc = 0
for x in curr:
acc = 85 * acc + (x - 33)
try:
decoded_append(packI(acc))
except struct.error:
raise ValueError('Ascii85 overflow') from None
curr_clear()
elif x == b'z'[0]:
if curr:
raise ValueError('z inside Ascii85 5-tuple')
decoded_append(b'\0\0\0\0')
elif foldspaces and x == b'y'[0]:
if curr:
raise ValueError('y inside Ascii85 5-tuple')
decoded_append(b'\x20\x20\x20\x20')
elif x in ignorechars:
# Skip whitespace
continue
else:
raise ValueError('Non-Ascii85 digit found: %c' % x)
result = b''.join(decoded)
padding = 4 - len(curr)
if padding:
# Throw away the extra padding
result = result[:-padding]
return result
# The following code is originally taken (with permission) from Mercurial
_b85alphabet = (b"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
b"abcdefghijklmnopqrstuvwxyz!#$%&()*+-;<=>?@^_`{|}~")
_b85chars = None
_b85chars2 = None
_b85dec = None
def b85encode(b, pad=False):
"""Encode an ASCII-encoded byte array in base85 format.
If pad is true, the input is padded with "\0" so its length is a multiple of
4 characters before encoding.
"""
global _b85chars, _b85chars2
# Delay the initialization of tables to not waste memory
# if the function is never called
if _b85chars is None:
_b85chars = [bytes((i,)) for i in _b85alphabet]
_b85chars2 = [(a + b) for a in _b85chars for b in _b85chars]
return _85encode(b, _b85chars, _b85chars2, pad)
def b85decode(b):
"""Decode base85-encoded byte array"""
global _b85dec
# Delay the initialization of tables to not waste memory
# if the function is never called
if _b85dec is None:
_b85dec = [None] * 256
for i, c in enumerate(_b85alphabet):
_b85dec[c] = i
b = _bytes_from_decode_data(b)
padding = (-len(b)) % 5
b = b + b'~' * padding
out = []
packI = struct.Struct('!I').pack
for i in range(0, len(b), 5):
chunk = b[i:i + 5]
acc = 0
try:
for c in chunk:
acc = acc * 85 + _b85dec[c]
except TypeError:
for j, c in enumerate(chunk):
if _b85dec[c] is None:
raise ValueError('bad base85 character at position %d'
% (i + j)) from None
raise
try:
out.append(packI(acc))
except struct.error:
raise ValueError('base85 overflow in hunk starting at byte %d'
% i) from None
result = b''.join(out)
if padding:
result = result[:-padding]
return result
# Legacy interface. This code could be cleaned up since I don't believe
# binascii has any line length limitations. It just doesn't seem worth it
# though. The files should be opened in binary mode.
MAXLINESIZE = 76 # Excluding the CRLF
MAXBINSIZE = (MAXLINESIZE//4)*3
def encode(input, output):
"""Encode a file; input and output are binary files."""
while True:
s = input.read(MAXBINSIZE)
if not s:
break
while len(s) < MAXBINSIZE:
ns = input.read(MAXBINSIZE-len(s))
if not ns:
break
s += ns
line = binascii.b2a_base64(s)
output.write(line)
def decode(input, output):
"""Decode a file; input and output are binary files."""
while True:
line = input.readline()
if not line:
break
s = binascii.a2b_base64(line)
output.write(s)
def _input_type_check(s):
try:
m = memoryview(s)
except TypeError as err:
msg = "expected bytes-like object, not %s" % s.__class__.__name__
raise TypeError(msg) from err
if m.format not in ('c', 'b', 'B'):
msg = ("expected single byte elements, not %r from %s" %
(m.format, s.__class__.__name__))
raise TypeError(msg)
if m.ndim != 1:
msg = ("expected 1-D data, not %d-D data from %s" %
(m.ndim, s.__class__.__name__))
raise TypeError(msg)
def encodebytes(s):
"""Encode a bytestring into a bytestring containing multiple lines
of base-64 data."""
_input_type_check(s)
pieces = []
for i in range(0, len(s), MAXBINSIZE):
chunk = s[i : i + MAXBINSIZE]
pieces.append(binascii.b2a_base64(chunk))
return b"".join(pieces)
def encodestring(s):
"""Legacy alias of encodebytes()."""
import warnings
warnings.warn("encodestring() is a deprecated alias, use encodebytes()",
DeprecationWarning, 2)
return encodebytes(s)
def decodebytes(s):
"""Decode a bytestring of base-64 data into a bytestring."""
_input_type_check(s)
return binascii.a2b_base64(s)
def decodestring(s):
"""Legacy alias of decodebytes()."""
import warnings
warnings.warn("decodestring() is a deprecated alias, use decodebytes()",
DeprecationWarning, 2)
return decodebytes(s)
# Usable as a script...
def main():
"""Small main program"""
import sys, getopt
try:
opts, args = getopt.getopt(sys.argv[1:], 'deut')
except getopt.error as msg:
sys.stdout = sys.stderr
print(msg)
print("""usage: %s [-d|-e|-u|-t] [file|-]
-d, -u: decode
-e: encode (default)
-t: encode and decode string 'Aladdin:open sesame'"""%sys.argv[0])
sys.exit(2)
func = encode
for o, a in opts:
if o == '-e': func = encode
if o == '-d': func = decode
if o == '-u': func = decode
if o == '-t': test(); return
if args and args[0] != '-':
with open(args[0], 'rb') as f:
func(f, sys.stdout.buffer)
else:
func(sys.stdin.buffer, sys.stdout.buffer)
def test():
s0 = b"Aladdin:open sesame"
print(repr(s0))
s1 = encodebytes(s0)
print(repr(s1))
s2 = decodebytes(s1)
print(repr(s2))
assert s0 == s2
if __name__ == '__main__':
main()
=======
#! /usr/bin/env python3
"""Base16, Base32, Base64 (RFC 3548), Base85 and Ascii85 data encodings"""
# Modified 04-Oct-1995 by Jack Jansen to use binascii module
# Modified 30-Dec-2003 by Barry Warsaw to add full RFC 3548 support
# Modified 22-May-2007 by Guido van Rossum to use bytes everywhere
import re
import struct
import binascii
__all__ = [
# Legacy interface exports traditional RFC 1521 Base64 encodings
'encode', 'decode', 'encodebytes', 'decodebytes',
# Generalized interface for other encodings
'b64encode', 'b64decode', 'b32encode', 'b32decode',
'b16encode', 'b16decode',
# Base85 and Ascii85 encodings
'b85encode', 'b85decode', 'a85encode', 'a85decode',
# Standard Base64 encoding
'standard_b64encode', 'standard_b64decode',
# Some common Base64 alternatives. As referenced by RFC 3458, see thread
# starting at:
#
# http://zgp.org/pipermail/p2p-hackers/2001-September/000316.html
'urlsafe_b64encode', 'urlsafe_b64decode',
]
bytes_types = (bytes, bytearray) # Types acceptable as binary data
def _bytes_from_decode_data(s):
if isinstance(s, str):
try:
return s.encode('ascii')
except UnicodeEncodeError:
raise ValueError('string argument should contain only ASCII characters')
if isinstance(s, bytes_types):
return s
try:
return memoryview(s).tobytes()
except TypeError:
raise TypeError("argument should be a bytes-like object or ASCII "
"string, not %r" % s.__class__.__name__) from None
# Base64 encoding/decoding uses binascii
def b64encode(s, altchars=None):
"""Encode a byte string using Base64.
s is the byte string to encode. Optional altchars must be a byte
string of length 2 which specifies an alternative alphabet for the
'+' and '/' characters. This allows an application to
e.g. generate url or filesystem safe Base64 strings.
The encoded byte string is returned.
"""
# Strip off the trailing newline
encoded = binascii.b2a_base64(s)[:-1]
if altchars is not None:
assert len(altchars) == 2, repr(altchars)
return encoded.translate(bytes.maketrans(b'+/', altchars))
return encoded
def b64decode(s, altchars=None, validate=False):
"""Decode a Base64 encoded byte string.
s is the byte string to decode. Optional altchars must be a
string of length 2 which specifies the alternative alphabet used
instead of the '+' and '/' characters.
The decoded string is returned. A binascii.Error is raised if s is
incorrectly padded.
If validate is False (the default), non-base64-alphabet characters are
discarded prior to the padding check. If validate is True,
non-base64-alphabet characters in the input result in a binascii.Error.
"""
s = _bytes_from_decode_data(s)
if altchars is not None:
altchars = _bytes_from_decode_data(altchars)
assert len(altchars) == 2, repr(altchars)
s = s.translate(bytes.maketrans(altchars, b'+/'))
if validate and not re.match(b'^[A-Za-z0-9+/]*={0,2}$', s):
raise binascii.Error('Non-base64 digit found')
return binascii.a2b_base64(s)
def standard_b64encode(s):
"""Encode a byte string using the standard Base64 alphabet.
s is the byte string to encode. The encoded byte string is returned.
"""
return b64encode(s)
def standard_b64decode(s):
"""Decode a byte string encoded with the standard Base64 alphabet.
s is the byte string to decode. The decoded byte string is
returned. binascii.Error is raised if the input is incorrectly
padded or if there are non-alphabet characters present in the
input.
"""
return b64decode(s)
_urlsafe_encode_translation = bytes.maketrans(b'+/', b'-_')
_urlsafe_decode_translation = bytes.maketrans(b'-_', b'+/')
def urlsafe_b64encode(s):
"""Encode a byte string using a url-safe Base64 alphabet.
s is the byte string to encode. The encoded byte string is
returned. The alphabet uses '-' instead of '+' and '_' instead of
'/'.
"""
return b64encode(s).translate(_urlsafe_encode_translation)
def urlsafe_b64decode(s):
"""Decode a byte string encoded with the standard Base64 alphabet.
s is the byte string to decode. The decoded byte string is
returned. binascii.Error is raised if the input is incorrectly
padded or if there are non-alphabet characters present in the
input.
The alphabet uses '-' instead of '+' and '_' instead of '/'.
"""
s = _bytes_from_decode_data(s)
s = s.translate(_urlsafe_decode_translation)
return b64decode(s)
# Base32 encoding/decoding must be done in Python
_b32alphabet = b'ABCDEFGHIJKLMNOPQRSTUVWXYZ234567'
_b32tab2 = None
_b32rev = None
def b32encode(s):
"""Encode a byte string using Base32.
s is the byte string to encode. The encoded byte string is returned.
"""
global _b32tab2
# Delay the initialization of the table to not waste memory
# if the function is never called
if _b32tab2 is None:
b32tab = [bytes((i,)) for i in _b32alphabet]
_b32tab2 = [a + b for a in b32tab for b in b32tab]
b32tab = None
if not isinstance(s, bytes_types):
s = memoryview(s).tobytes()
leftover = len(s) % 5
# Pad the last quantum with zero bits if necessary
if leftover:
s = s + bytes(5 - leftover) # Don't use += !
encoded = bytearray()
from_bytes = int.from_bytes
b32tab2 = _b32tab2
for i in range(0, len(s), 5):
c = from_bytes(s[i: i + 5], 'big')
encoded += (b32tab2[c >> 30] + # bits 1 - 10
b32tab2[(c >> 20) & 0x3ff] + # bits 11 - 20
b32tab2[(c >> 10) & 0x3ff] + # bits 21 - 30
b32tab2[c & 0x3ff] # bits 31 - 40
)
# Adjust for any leftover partial quanta
if leftover == 1:
encoded[-6:] = b'======'
elif leftover == 2:
encoded[-4:] = b'===='
elif leftover == 3:
encoded[-3:] = b'==='
elif leftover == 4:
encoded[-1:] = b'='
return bytes(encoded)
def b32decode(s, casefold=False, map01=None):
"""Decode a Base32 encoded byte string.
s is the byte string to decode. Optional casefold is a flag
specifying whether a lowercase alphabet is acceptable as input.
For security purposes, the default is False.
RFC 3548 allows for optional mapping of the digit 0 (zero) to the
letter O (oh), and for optional mapping of the digit 1 (one) to
either the letter I (eye) or letter L (el). The optional argument
map01 when not None, specifies which letter the digit 1 should be
mapped to (when map01 is not None, the digit 0 is always mapped to
the letter O). For security purposes the default is None, so that
0 and 1 are not allowed in the input.
The decoded byte string is returned. binascii.Error is raised if
the input is incorrectly padded or if there are non-alphabet
characters present in the input.
"""
global _b32rev
# Delay the initialization of the table to not waste memory
# if the function is never called
if _b32rev is None:
_b32rev = {v: k for k, v in enumerate(_b32alphabet)}
s = _bytes_from_decode_data(s)
if len(s) % 8:
raise binascii.Error('Incorrect padding')
# Handle section 2.4 zero and one mapping. The flag map01 will be either
# False, or the character to map the digit 1 (one) to. It should be
# either L (el) or I (eye).
if map01 is not None:
map01 = _bytes_from_decode_data(map01)
assert len(map01) == 1, repr(map01)
s = s.translate(bytes.maketrans(b'01', b'O' + map01))
if casefold:
s = s.upper()
# Strip off pad characters from the right. We need to count the pad
# characters because this will tell us how many null bytes to remove from
# the end of the decoded string.
l = len(s)
s = s.rstrip(b'=')
padchars = l - len(s)
# Now decode the full quanta
decoded = bytearray()
b32rev = _b32rev
for i in range(0, len(s), 8):
quanta = s[i: i + 8]
acc = 0
try:
for c in quanta:
acc = (acc << 5) + b32rev[c]
except KeyError:
raise binascii.Error('Non-base32 digit found') from None
decoded += acc.to_bytes(5, 'big')
# Process the last, partial quanta
if padchars:
acc <<= 5 * padchars
last = acc.to_bytes(5, 'big')
if padchars == 1:
decoded[-5:] = last[:-1]
elif padchars == 3:
decoded[-5:] = last[:-2]
elif padchars == 4:
decoded[-5:] = last[:-3]
elif padchars == 6:
decoded[-5:] = last[:-4]
else:
raise binascii.Error('Incorrect padding')
return bytes(decoded)
# RFC 3548, Base 16 Alphabet specifies uppercase, but hexlify() returns
# lowercase. The RFC also recommends against accepting input case
# insensitively.
def b16encode(s):
"""Encode a byte string using Base16.
s is the byte string to encode. The encoded byte string is returned.
"""
return binascii.hexlify(s).upper()
def b16decode(s, casefold=False):
"""Decode a Base16 encoded byte string.
s is the byte string to decode. Optional casefold is a flag
specifying whether a lowercase alphabet is acceptable as input.
For security purposes, the default is False.
The decoded byte string is returned. binascii.Error is raised if
s were incorrectly padded or if there are non-alphabet characters
present in the string.
"""
s = _bytes_from_decode_data(s)
if casefold:
s = s.upper()
if re.search(b'[^0-9A-F]', s):
raise binascii.Error('Non-base16 digit found')
return binascii.unhexlify(s)
#
# Ascii85 encoding/decoding
#
_a85chars = None
_a85chars2 = None
_A85START = b"<~"
_A85END = b"~>"
def _85encode(b, chars, chars2, pad=False, foldnuls=False, foldspaces=False):
# Helper function for a85encode and b85encode
if not isinstance(b, bytes_types):
b = memoryview(b).tobytes()
padding = (-len(b)) % 4
if padding:
b = b + b'\0' * padding
words = struct.Struct('!%dI' % (len(b) // 4)).unpack(b)
chunks = [b'z' if foldnuls and not word else
b'y' if foldspaces and word == 0x20202020 else
(chars2[word // 614125] +
chars2[word // 85 % 7225] +
chars[word % 85])
for word in words]
if padding and not pad:
if chunks[-1] == b'z':
chunks[-1] = chars[0] * 5
chunks[-1] = chunks[-1][:-padding]
return b''.join(chunks)
def a85encode(b, *, foldspaces=False, wrapcol=0, pad=False, adobe=False):
"""Encode a byte string using Ascii85.
b is the byte string to encode. The encoded byte string is returned.
foldspaces is an optional flag that uses the special short sequence 'y'
instead of 4 consecutive spaces (ASCII 0x20) as supported by 'btoa'. This
feature is not supported by the "standard" Adobe encoding.
wrapcol controls whether the output should have newline ('\n') characters
added to it. If this is non-zero, each output line will be at most this
many characters long.
pad controls whether the input string is padded to a multiple of 4 before
encoding. Note that the btoa implementation always pads.
adobe controls whether the encoded byte sequence is framed with <~ and ~>,
which is used by the Adobe implementation.
"""
global _a85chars, _a85chars2
# Delay the initialization of tables to not waste memory
# if the function is never called
if _a85chars is None:
_a85chars = [bytes((i,)) for i in range(33, 118)]
_a85chars2 = [(a + b) for a in _a85chars for b in _a85chars]
result = _85encode(b, _a85chars, _a85chars2, pad, True, foldspaces)
if adobe:
result = _A85START + result
if wrapcol:
wrapcol = max(2 if adobe else 1, wrapcol)
chunks = [result[i: i + wrapcol]
for i in range(0, len(result), wrapcol)]
if adobe:
if len(chunks[-1]) + 2 > wrapcol:
chunks.append(b'')
result = b'\n'.join(chunks)
if adobe:
result += _A85END
return result
def a85decode(b, *, foldspaces=False, adobe=False, ignorechars=b' \t\n\r\v'):
"""Decode an Ascii85 encoded byte string.
s is the byte string to decode.
foldspaces is a flag that specifies whether the 'y' short sequence should be
accepted as shorthand for 4 consecutive spaces (ASCII 0x20). This feature is
not supported by the "standard" Adobe encoding.
adobe controls whether the input sequence is in Adobe Ascii85 format (i.e.
is framed with <~ and ~>).
ignorechars should be a byte string containing characters to ignore from the
input. This should only contain whitespace characters, and by default
contains all whitespace characters in ASCII.
"""
b = _bytes_from_decode_data(b)
if adobe:
if not (b.startswith(_A85START) and b.endswith(_A85END)):
raise ValueError("Ascii85 encoded byte sequences must be bracketed "
"by {!r} and {!r}".format(_A85START, _A85END))
b = b[2:-2] # Strip off start/end markers
#
# We have to go through this stepwise, so as to ignore spaces and handle
# special short sequences
#
packI = struct.Struct('!I').pack
decoded = []
decoded_append = decoded.append
curr = []
curr_append = curr.append
curr_clear = curr.clear
for x in b + b'u' * 4:
if b'!'[0] <= x <= b'u'[0]:
curr_append(x)
if len(curr) == 5:
acc = 0
for x in curr:
acc = 85 * acc + (x - 33)
try:
decoded_append(packI(acc))
except struct.error:
raise ValueError('Ascii85 overflow') from None
curr_clear()
elif x == b'z'[0]:
if curr:
raise ValueError('z inside Ascii85 5-tuple')
decoded_append(b'\0\0\0\0')
elif foldspaces and x == b'y'[0]:
if curr:
raise ValueError('y inside Ascii85 5-tuple')
decoded_append(b'\x20\x20\x20\x20')
elif x in ignorechars:
# Skip whitespace
continue
else:
raise ValueError('Non-Ascii85 digit found: %c' % x)
result = b''.join(decoded)
padding = 4 - len(curr)
if padding:
# Throw away the extra padding
result = result[:-padding]
return result
# The following code is originally taken (with permission) from Mercurial
_b85alphabet = (b"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
b"abcdefghijklmnopqrstuvwxyz!#$%&()*+-;<=>?@^_`{|}~")
_b85chars = None
_b85chars2 = None
_b85dec = None
def b85encode(b, pad=False):
"""Encode an ASCII-encoded byte array in base85 format.
If pad is true, the input is padded with "\0" so its length is a multiple of
4 characters before encoding.
"""
global _b85chars, _b85chars2
# Delay the initialization of tables to not waste memory
# if the function is never called
if _b85chars is None:
_b85chars = [bytes((i,)) for i in _b85alphabet]
_b85chars2 = [(a + b) for a in _b85chars for b in _b85chars]
return _85encode(b, _b85chars, _b85chars2, pad)
def b85decode(b):
"""Decode base85-encoded byte array"""
global _b85dec
# Delay the initialization of tables to not waste memory
# if the function is never called
if _b85dec is None:
_b85dec = [None] * 256
for i, c in enumerate(_b85alphabet):
_b85dec[c] = i
b = _bytes_from_decode_data(b)
padding = (-len(b)) % 5
b = b + b'~' * padding
out = []
packI = struct.Struct('!I').pack
for i in range(0, len(b), 5):
chunk = b[i:i + 5]
acc = 0
try:
for c in chunk:
acc = acc * 85 + _b85dec[c]
except TypeError:
for j, c in enumerate(chunk):
if _b85dec[c] is None:
raise ValueError('bad base85 character at position %d'
% (i + j)) from None
raise
try:
out.append(packI(acc))
except struct.error:
raise ValueError('base85 overflow in hunk starting at byte %d'
% i) from None
result = b''.join(out)
if padding:
result = result[:-padding]
return result
# Legacy interface. This code could be cleaned up since I don't believe
# binascii has any line length limitations. It just doesn't seem worth it
# though. The files should be opened in binary mode.
MAXLINESIZE = 76 # Excluding the CRLF
MAXBINSIZE = (MAXLINESIZE//4)*3
def encode(input, output):
"""Encode a file; input and output are binary files."""
while True:
s = input.read(MAXBINSIZE)
if not s:
break
while len(s) < MAXBINSIZE:
ns = input.read(MAXBINSIZE-len(s))
if not ns:
break
s += ns
line = binascii.b2a_base64(s)
output.write(line)
def decode(input, output):
"""Decode a file; input and output are binary files."""
while True:
line = input.readline()
if not line:
break
s = binascii.a2b_base64(line)
output.write(s)
def _input_type_check(s):
try:
m = memoryview(s)
except TypeError as err:
msg = "expected bytes-like object, not %s" % s.__class__.__name__
raise TypeError(msg) from err
if m.format not in ('c', 'b', 'B'):
msg = ("expected single byte elements, not %r from %s" %
(m.format, s.__class__.__name__))
raise TypeError(msg)
if m.ndim != 1:
msg = ("expected 1-D data, not %d-D data from %s" %
(m.ndim, s.__class__.__name__))
raise TypeError(msg)
def encodebytes(s):
"""Encode a bytestring into a bytestring containing multiple lines
of base-64 data."""
_input_type_check(s)
pieces = []
for i in range(0, len(s), MAXBINSIZE):
chunk = s[i : i + MAXBINSIZE]
pieces.append(binascii.b2a_base64(chunk))
return b"".join(pieces)
def encodestring(s):
"""Legacy alias of encodebytes()."""
import warnings
warnings.warn("encodestring() is a deprecated alias, use encodebytes()",
DeprecationWarning, 2)
return encodebytes(s)
def decodebytes(s):
"""Decode a bytestring of base-64 data into a bytestring."""
_input_type_check(s)
return binascii.a2b_base64(s)
def decodestring(s):
"""Legacy alias of decodebytes()."""
import warnings
warnings.warn("decodestring() is a deprecated alias, use decodebytes()",
DeprecationWarning, 2)
return decodebytes(s)
# Usable as a script...
def main():
"""Small main program"""
import sys, getopt
try:
opts, args = getopt.getopt(sys.argv[1:], 'deut')
except getopt.error as msg:
sys.stdout = sys.stderr
print(msg)
print("""usage: %s [-d|-e|-u|-t] [file|-]
-d, -u: decode
-e: encode (default)
-t: encode and decode string 'Aladdin:open sesame'"""%sys.argv[0])
sys.exit(2)
func = encode
for o, a in opts:
if o == '-e': func = encode
if o == '-d': func = decode
if o == '-u': func = decode
if o == '-t': test(); return
if args and args[0] != '-':
with open(args[0], 'rb') as f:
func(f, sys.stdout.buffer)
else:
func(sys.stdin.buffer, sys.stdout.buffer)
def test():
s0 = b"Aladdin:open sesame"
print(repr(s0))
s1 = encodebytes(s0)
print(repr(s1))
s2 = decodebytes(s1)
print(repr(s2))
assert s0 == s2
if __name__ == '__main__':
main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
#! /usr/bin/env python3
"""Base16, Base32, Base64 (RFC 3548), Base85 and Ascii85 data encodings"""
# Modified 04-Oct-1995 by Jack Jansen to use binascii module
# Modified 30-Dec-2003 by Barry Warsaw to add full RFC 3548 support
# Modified 22-May-2007 by Guido van Rossum to use bytes everywhere
import re
import struct
import binascii
__all__ = [
# Legacy interface exports traditional RFC 1521 Base64 encodings
'encode', 'decode', 'encodebytes', 'decodebytes',
# Generalized interface for other encodings
'b64encode', 'b64decode', 'b32encode', 'b32decode',
'b16encode', 'b16decode',
# Base85 and Ascii85 encodings
'b85encode', 'b85decode', 'a85encode', 'a85decode',
# Standard Base64 encoding
'standard_b64encode', 'standard_b64decode',
# Some common Base64 alternatives. As referenced by RFC 3458, see thread
# starting at:
#
# http://zgp.org/pipermail/p2p-hackers/2001-September/000316.html
'urlsafe_b64encode', 'urlsafe_b64decode',
]
bytes_types = (bytes, bytearray) # Types acceptable as binary data
def _bytes_from_decode_data(s):
if isinstance(s, str):
try:
return s.encode('ascii')
except UnicodeEncodeError:
raise ValueError('string argument should contain only ASCII characters')
if isinstance(s, bytes_types):
return s
try:
return memoryview(s).tobytes()
except TypeError:
raise TypeError("argument should be a bytes-like object or ASCII "
"string, not %r" % s.__class__.__name__) from None
# Base64 encoding/decoding uses binascii
def b64encode(s, altchars=None):
"""Encode a byte string using Base64.
s is the byte string to encode. Optional altchars must be a byte
string of length 2 which specifies an alternative alphabet for the
'+' and '/' characters. This allows an application to
e.g. generate url or filesystem safe Base64 strings.
The encoded byte string is returned.
"""
# Strip off the trailing newline
encoded = binascii.b2a_base64(s)[:-1]
if altchars is not None:
assert len(altchars) == 2, repr(altchars)
return encoded.translate(bytes.maketrans(b'+/', altchars))
return encoded
def b64decode(s, altchars=None, validate=False):
"""Decode a Base64 encoded byte string.
s is the byte string to decode. Optional altchars must be a
string of length 2 which specifies the alternative alphabet used
instead of the '+' and '/' characters.
The decoded string is returned. A binascii.Error is raised if s is
incorrectly padded.
If validate is False (the default), non-base64-alphabet characters are
discarded prior to the padding check. If validate is True,
non-base64-alphabet characters in the input result in a binascii.Error.
"""
s = _bytes_from_decode_data(s)
if altchars is not None:
altchars = _bytes_from_decode_data(altchars)
assert len(altchars) == 2, repr(altchars)
s = s.translate(bytes.maketrans(altchars, b'+/'))
if validate and not re.match(b'^[A-Za-z0-9+/]*={0,2}$', s):
raise binascii.Error('Non-base64 digit found')
return binascii.a2b_base64(s)
def standard_b64encode(s):
"""Encode a byte string using the standard Base64 alphabet.
s is the byte string to encode. The encoded byte string is returned.
"""
return b64encode(s)
def standard_b64decode(s):
"""Decode a byte string encoded with the standard Base64 alphabet.
s is the byte string to decode. The decoded byte string is
returned. binascii.Error is raised if the input is incorrectly
padded or if there are non-alphabet characters present in the
input.
"""
return b64decode(s)
_urlsafe_encode_translation = bytes.maketrans(b'+/', b'-_')
_urlsafe_decode_translation = bytes.maketrans(b'-_', b'+/')
def urlsafe_b64encode(s):
"""Encode a byte string using a url-safe Base64 alphabet.
s is the byte string to encode. The encoded byte string is
returned. The alphabet uses '-' instead of '+' and '_' instead of
'/'.
"""
return b64encode(s).translate(_urlsafe_encode_translation)
def urlsafe_b64decode(s):
"""Decode a byte string encoded with the standard Base64 alphabet.
s is the byte string to decode. The decoded byte string is
returned. binascii.Error is raised if the input is incorrectly
padded or if there are non-alphabet characters present in the
input.
The alphabet uses '-' instead of '+' and '_' instead of '/'.
"""
s = _bytes_from_decode_data(s)
s = s.translate(_urlsafe_decode_translation)
return b64decode(s)
# Base32 encoding/decoding must be done in Python
_b32alphabet = b'ABCDEFGHIJKLMNOPQRSTUVWXYZ234567'
_b32tab2 = None
_b32rev = None
def b32encode(s):
"""Encode a byte string using Base32.
s is the byte string to encode. The encoded byte string is returned.
"""
global _b32tab2
# Delay the initialization of the table to not waste memory
# if the function is never called
if _b32tab2 is None:
b32tab = [bytes((i,)) for i in _b32alphabet]
_b32tab2 = [a + b for a in b32tab for b in b32tab]
b32tab = None
if not isinstance(s, bytes_types):
s = memoryview(s).tobytes()
leftover = len(s) % 5
# Pad the last quantum with zero bits if necessary
if leftover:
s = s + bytes(5 - leftover) # Don't use += !
encoded = bytearray()
from_bytes = int.from_bytes
b32tab2 = _b32tab2
for i in range(0, len(s), 5):
c = from_bytes(s[i: i + 5], 'big')
encoded += (b32tab2[c >> 30] + # bits 1 - 10
b32tab2[(c >> 20) & 0x3ff] + # bits 11 - 20
b32tab2[(c >> 10) & 0x3ff] + # bits 21 - 30
b32tab2[c & 0x3ff] # bits 31 - 40
)
# Adjust for any leftover partial quanta
if leftover == 1:
encoded[-6:] = b'======'
elif leftover == 2:
encoded[-4:] = b'===='
elif leftover == 3:
encoded[-3:] = b'==='
elif leftover == 4:
encoded[-1:] = b'='
return bytes(encoded)
def b32decode(s, casefold=False, map01=None):
"""Decode a Base32 encoded byte string.
s is the byte string to decode. Optional casefold is a flag
specifying whether a lowercase alphabet is acceptable as input.
For security purposes, the default is False.
RFC 3548 allows for optional mapping of the digit 0 (zero) to the
letter O (oh), and for optional mapping of the digit 1 (one) to
either the letter I (eye) or letter L (el). The optional argument
map01 when not None, specifies which letter the digit 1 should be
mapped to (when map01 is not None, the digit 0 is always mapped to
the letter O). For security purposes the default is None, so that
0 and 1 are not allowed in the input.
The decoded byte string is returned. binascii.Error is raised if
the input is incorrectly padded or if there are non-alphabet
characters present in the input.
"""
global _b32rev
# Delay the initialization of the table to not waste memory
# if the function is never called
if _b32rev is None:
_b32rev = {v: k for k, v in enumerate(_b32alphabet)}
s = _bytes_from_decode_data(s)
if len(s) % 8:
raise binascii.Error('Incorrect padding')
# Handle section 2.4 zero and one mapping. The flag map01 will be either
# False, or the character to map the digit 1 (one) to. It should be
# either L (el) or I (eye).
if map01 is not None:
map01 = _bytes_from_decode_data(map01)
assert len(map01) == 1, repr(map01)
s = s.translate(bytes.maketrans(b'01', b'O' + map01))
if casefold:
s = s.upper()
# Strip off pad characters from the right. We need to count the pad
# characters because this will tell us how many null bytes to remove from
# the end of the decoded string.
l = len(s)
s = s.rstrip(b'=')
padchars = l - len(s)
# Now decode the full quanta
decoded = bytearray()
b32rev = _b32rev
for i in range(0, len(s), 8):
quanta = s[i: i + 8]
acc = 0
try:
for c in quanta:
acc = (acc << 5) + b32rev[c]
except KeyError:
raise binascii.Error('Non-base32 digit found') from None
decoded += acc.to_bytes(5, 'big')
# Process the last, partial quanta
if padchars:
acc <<= 5 * padchars
last = acc.to_bytes(5, 'big')
if padchars == 1:
decoded[-5:] = last[:-1]
elif padchars == 3:
decoded[-5:] = last[:-2]
elif padchars == 4:
decoded[-5:] = last[:-3]
elif padchars == 6:
decoded[-5:] = last[:-4]
else:
raise binascii.Error('Incorrect padding')
return bytes(decoded)
# RFC 3548, Base 16 Alphabet specifies uppercase, but hexlify() returns
# lowercase. The RFC also recommends against accepting input case
# insensitively.
def b16encode(s):
"""Encode a byte string using Base16.
s is the byte string to encode. The encoded byte string is returned.
"""
return binascii.hexlify(s).upper()
def b16decode(s, casefold=False):
"""Decode a Base16 encoded byte string.
s is the byte string to decode. Optional casefold is a flag
specifying whether a lowercase alphabet is acceptable as input.
For security purposes, the default is False.
The decoded byte string is returned. binascii.Error is raised if
s were incorrectly padded or if there are non-alphabet characters
present in the string.
"""
s = _bytes_from_decode_data(s)
if casefold:
s = s.upper()
if re.search(b'[^0-9A-F]', s):
raise binascii.Error('Non-base16 digit found')
return binascii.unhexlify(s)
#
# Ascii85 encoding/decoding
#
_a85chars = None
_a85chars2 = None
_A85START = b"<~"
_A85END = b"~>"
def _85encode(b, chars, chars2, pad=False, foldnuls=False, foldspaces=False):
# Helper function for a85encode and b85encode
if not isinstance(b, bytes_types):
b = memoryview(b).tobytes()
padding = (-len(b)) % 4
if padding:
b = b + b'\0' * padding
words = struct.Struct('!%dI' % (len(b) // 4)).unpack(b)
chunks = [b'z' if foldnuls and not word else
b'y' if foldspaces and word == 0x20202020 else
(chars2[word // 614125] +
chars2[word // 85 % 7225] +
chars[word % 85])
for word in words]
if padding and not pad:
if chunks[-1] == b'z':
chunks[-1] = chars[0] * 5
chunks[-1] = chunks[-1][:-padding]
return b''.join(chunks)
def a85encode(b, *, foldspaces=False, wrapcol=0, pad=False, adobe=False):
"""Encode a byte string using Ascii85.
b is the byte string to encode. The encoded byte string is returned.
foldspaces is an optional flag that uses the special short sequence 'y'
instead of 4 consecutive spaces (ASCII 0x20) as supported by 'btoa'. This
feature is not supported by the "standard" Adobe encoding.
wrapcol controls whether the output should have newline ('\n') characters
added to it. If this is non-zero, each output line will be at most this
many characters long.
pad controls whether the input string is padded to a multiple of 4 before
encoding. Note that the btoa implementation always pads.
adobe controls whether the encoded byte sequence is framed with <~ and ~>,
which is used by the Adobe implementation.
"""
global _a85chars, _a85chars2
# Delay the initialization of tables to not waste memory
# if the function is never called
if _a85chars is None:
_a85chars = [bytes((i,)) for i in range(33, 118)]
_a85chars2 = [(a + b) for a in _a85chars for b in _a85chars]
result = _85encode(b, _a85chars, _a85chars2, pad, True, foldspaces)
if adobe:
result = _A85START + result
if wrapcol:
wrapcol = max(2 if adobe else 1, wrapcol)
chunks = [result[i: i + wrapcol]
for i in range(0, len(result), wrapcol)]
if adobe:
if len(chunks[-1]) + 2 > wrapcol:
chunks.append(b'')
result = b'\n'.join(chunks)
if adobe:
result += _A85END
return result
def a85decode(b, *, foldspaces=False, adobe=False, ignorechars=b' \t\n\r\v'):
"""Decode an Ascii85 encoded byte string.
s is the byte string to decode.
foldspaces is a flag that specifies whether the 'y' short sequence should be
accepted as shorthand for 4 consecutive spaces (ASCII 0x20). This feature is
not supported by the "standard" Adobe encoding.
adobe controls whether the input sequence is in Adobe Ascii85 format (i.e.
is framed with <~ and ~>).
ignorechars should be a byte string containing characters to ignore from the
input. This should only contain whitespace characters, and by default
contains all whitespace characters in ASCII.
"""
b = _bytes_from_decode_data(b)
if adobe:
if not (b.startswith(_A85START) and b.endswith(_A85END)):
raise ValueError("Ascii85 encoded byte sequences must be bracketed "
"by {!r} and {!r}".format(_A85START, _A85END))
b = b[2:-2] # Strip off start/end markers
#
# We have to go through this stepwise, so as to ignore spaces and handle
# special short sequences
#
packI = struct.Struct('!I').pack
decoded = []
decoded_append = decoded.append
curr = []
curr_append = curr.append
curr_clear = curr.clear
for x in b + b'u' * 4:
if b'!'[0] <= x <= b'u'[0]:
curr_append(x)
if len(curr) == 5:
acc = 0
for x in curr:
acc = 85 * acc + (x - 33)
try:
decoded_append(packI(acc))
except struct.error:
raise ValueError('Ascii85 overflow') from None
curr_clear()
elif x == b'z'[0]:
if curr:
raise ValueError('z inside Ascii85 5-tuple')
decoded_append(b'\0\0\0\0')
elif foldspaces and x == b'y'[0]:
if curr:
raise ValueError('y inside Ascii85 5-tuple')
decoded_append(b'\x20\x20\x20\x20')
elif x in ignorechars:
# Skip whitespace
continue
else:
raise ValueError('Non-Ascii85 digit found: %c' % x)
result = b''.join(decoded)
padding = 4 - len(curr)
if padding:
# Throw away the extra padding
result = result[:-padding]
return result
# The following code is originally taken (with permission) from Mercurial
_b85alphabet = (b"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
b"abcdefghijklmnopqrstuvwxyz!#$%&()*+-;<=>?@^_`{|}~")
_b85chars = None
_b85chars2 = None
_b85dec = None
def b85encode(b, pad=False):
"""Encode an ASCII-encoded byte array in base85 format.
If pad is true, the input is padded with "\0" so its length is a multiple of
4 characters before encoding.
"""
global _b85chars, _b85chars2
# Delay the initialization of tables to not waste memory
# if the function is never called
if _b85chars is None:
_b85chars = [bytes((i,)) for i in _b85alphabet]
_b85chars2 = [(a + b) for a in _b85chars for b in _b85chars]
return _85encode(b, _b85chars, _b85chars2, pad)
def b85decode(b):
"""Decode base85-encoded byte array"""
global _b85dec
# Delay the initialization of tables to not waste memory
# if the function is never called
if _b85dec is None:
_b85dec = [None] * 256
for i, c in enumerate(_b85alphabet):
_b85dec[c] = i
b = _bytes_from_decode_data(b)
padding = (-len(b)) % 5
b = b + b'~' * padding
out = []
packI = struct.Struct('!I').pack
for i in range(0, len(b), 5):
chunk = b[i:i + 5]
acc = 0
try:
for c in chunk:
acc = acc * 85 + _b85dec[c]
except TypeError:
for j, c in enumerate(chunk):
if _b85dec[c] is None:
raise ValueError('bad base85 character at position %d'
% (i + j)) from None
raise
try:
out.append(packI(acc))
except struct.error:
raise ValueError('base85 overflow in hunk starting at byte %d'
% i) from None
result = b''.join(out)
if padding:
result = result[:-padding]
return result
# Legacy interface. This code could be cleaned up since I don't believe
# binascii has any line length limitations. It just doesn't seem worth it
# though. The files should be opened in binary mode.
MAXLINESIZE = 76 # Excluding the CRLF
MAXBINSIZE = (MAXLINESIZE//4)*3
def encode(input, output):
"""Encode a file; input and output are binary files."""
while True:
s = input.read(MAXBINSIZE)
if not s:
break
while len(s) < MAXBINSIZE:
ns = input.read(MAXBINSIZE-len(s))
if not ns:
break
s += ns
line = binascii.b2a_base64(s)
output.write(line)
def decode(input, output):
"""Decode a file; input and output are binary files."""
while True:
line = input.readline()
if not line:
break
s = binascii.a2b_base64(line)
output.write(s)
def _input_type_check(s):
try:
m = memoryview(s)
except TypeError as err:
msg = "expected bytes-like object, not %s" % s.__class__.__name__
raise TypeError(msg) from err
if m.format not in ('c', 'b', 'B'):
msg = ("expected single byte elements, not %r from %s" %
(m.format, s.__class__.__name__))
raise TypeError(msg)
if m.ndim != 1:
msg = ("expected 1-D data, not %d-D data from %s" %
(m.ndim, s.__class__.__name__))
raise TypeError(msg)
def encodebytes(s):
"""Encode a bytestring into a bytestring containing multiple lines
of base-64 data."""
_input_type_check(s)
pieces = []
for i in range(0, len(s), MAXBINSIZE):
chunk = s[i : i + MAXBINSIZE]
pieces.append(binascii.b2a_base64(chunk))
return b"".join(pieces)
def encodestring(s):
"""Legacy alias of encodebytes()."""
import warnings
warnings.warn("encodestring() is a deprecated alias, use encodebytes()",
DeprecationWarning, 2)
return encodebytes(s)
def decodebytes(s):
"""Decode a bytestring of base-64 data into a bytestring."""
_input_type_check(s)
return binascii.a2b_base64(s)
def decodestring(s):
"""Legacy alias of decodebytes()."""
import warnings
warnings.warn("decodestring() is a deprecated alias, use decodebytes()",
DeprecationWarning, 2)
return decodebytes(s)
# Usable as a script...
def main():
"""Small main program"""
import sys, getopt
try:
opts, args = getopt.getopt(sys.argv[1:], 'deut')
except getopt.error as msg:
sys.stdout = sys.stderr
print(msg)
print("""usage: %s [-d|-e|-u|-t] [file|-]
-d, -u: decode
-e: encode (default)
-t: encode and decode string 'Aladdin:open sesame'"""%sys.argv[0])
sys.exit(2)
func = encode
for o, a in opts:
if o == '-e': func = encode
if o == '-d': func = decode
if o == '-u': func = decode
if o == '-t': test(); return
if args and args[0] != '-':
with open(args[0], 'rb') as f:
func(f, sys.stdout.buffer)
else:
func(sys.stdin.buffer, sys.stdout.buffer)
def test():
s0 = b"Aladdin:open sesame"
print(repr(s0))
s1 = encodebytes(s0)
print(repr(s1))
s2 = decodebytes(s1)
print(repr(s2))
assert s0 == s2
if __name__ == '__main__':
main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
| mit |
kawamon/hue | desktop/core/ext-py/billiard-3.5.0.5/billiard/dummy/connection.py | 12 | 2954 | #
# Analogue of `multiprocessing.connection` which uses queues instead of sockets
#
# multiprocessing/dummy/connection.py
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of author nor the names of any contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
from __future__ import absolute_import
from billiard.five import Queue
__all__ = ['Client', 'Listener', 'Pipe']
families = [None]
class Listener(object):
def __init__(self, address=None, family=None, backlog=1):
self._backlog_queue = Queue(backlog)
def accept(self):
return Connection(*self._backlog_queue.get())
def close(self):
self._backlog_queue = None
address = property(lambda self: self._backlog_queue)
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
def Client(address):
_in, _out = Queue(), Queue()
address.put((_out, _in))
return Connection(_in, _out)
def Pipe(duplex=True):
a, b = Queue(), Queue()
return Connection(a, b), Connection(b, a)
class Connection(object):
def __init__(self, _in, _out):
self._out = _out
self._in = _in
self.send = self.send_bytes = _out.put
self.recv = self.recv_bytes = _in.get
def poll(self, timeout=0.0):
if self._in.qsize() > 0:
return True
if timeout <= 0.0:
return False
self._in.not_empty.acquire()
self._in.not_empty.wait(timeout)
self._in.not_empty.release()
return self._in.qsize() > 0
def close(self):
pass
| apache-2.0 |
python-rope/rope | ropetest/refactor/usefunctiontest.py | 3 | 5142 | try:
import unittest2 as unittest
except ImportError:
import unittest
from rope.base import exceptions
from ropetest import testutils
from rope.refactor.usefunction import UseFunction
class UseFunctionTest(unittest.TestCase):
def setUp(self):
super(UseFunctionTest, self).setUp()
self.project = testutils.sample_project()
self.mod1 = testutils.create_module(self.project, 'mod1')
self.mod2 = testutils.create_module(self.project, 'mod2')
def tearDown(self):
testutils.remove_project(self.project)
super(UseFunctionTest, self).tearDown()
def test_simple_case(self):
code = 'def f():\n pass\n'
self.mod1.write(code)
user = UseFunction(self.project, self.mod1, code.rindex('f'))
self.project.do(user.get_changes())
self.assertEqual(code, self.mod1.read())
def test_simple_function(self):
code = 'def f(p):\n print(p)\nprint(1)\n'
self.mod1.write(code)
user = UseFunction(self.project, self.mod1, code.rindex('f'))
self.project.do(user.get_changes())
self.assertEqual('def f(p):\n print(p)\nf(1)\n',
self.mod1.read())
def test_simple_function2(self):
code = 'def f(p):\n print(p + 1)\nprint(1 + 1)\n'
self.mod1.write(code)
user = UseFunction(self.project, self.mod1, code.rindex('f'))
self.project.do(user.get_changes())
self.assertEqual('def f(p):\n print(p + 1)\nf(1)\n',
self.mod1.read())
def test_functions_with_multiple_statements(self):
code = 'def f(p):\n r = p + 1\n print(r)\nr = 2 + 1\nprint(r)\n'
self.mod1.write(code)
user = UseFunction(self.project, self.mod1, code.rindex('f'))
self.project.do(user.get_changes())
self.assertEqual('def f(p):\n r = p + 1\n print(r)\nf(2)\n',
self.mod1.read())
def test_returning(self):
code = 'def f(p):\n return p + 1\nr = 2 + 1\nprint(r)\n'
self.mod1.write(code)
user = UseFunction(self.project, self.mod1, code.rindex('f'))
self.project.do(user.get_changes())
self.assertEqual(
'def f(p):\n return p + 1\nr = f(2)\nprint(r)\n',
self.mod1.read())
def test_returning_a_single_expression(self):
code = 'def f(p):\n return p + 1\nprint(2 + 1)\n'
self.mod1.write(code)
user = UseFunction(self.project, self.mod1, code.rindex('f'))
self.project.do(user.get_changes())
self.assertEqual(
'def f(p):\n return p + 1\nprint(f(2))\n',
self.mod1.read())
def test_occurrences_in_other_modules(self):
code = 'def f(p):\n return p + 1\n'
self.mod1.write(code)
user = UseFunction(self.project, self.mod1, code.rindex('f'))
self.mod2.write('print(2 + 1)\n')
self.project.do(user.get_changes())
self.assertEqual('import mod1\nprint(mod1.f(2))\n',
self.mod2.read())
def test_when_performing_on_non_functions(self):
code = 'var = 1\n'
self.mod1.write(code)
with self.assertRaises(exceptions.RefactoringError):
UseFunction(self.project, self.mod1, code.rindex('var'))
def test_differing_in_the_inner_temp_names(self):
code = 'def f(p):\n a = p + 1\n print(a)\nb = 2 + 1\nprint(b)\n'
self.mod1.write(code)
user = UseFunction(self.project, self.mod1, code.rindex('f'))
self.project.do(user.get_changes())
self.assertEqual('def f(p):\n a = p + 1\n print(a)\nf(2)\n',
self.mod1.read())
# TODO: probably new options should be added to restructure
def xxx_test_being_a_bit_more_intelligent_when_returning_assigneds(self):
code = 'def f(p):\n a = p + 1\n return a\n'\
'var = 2 + 1\nprint(var)\n'
self.mod1.write(code)
user = UseFunction(self.project, self.mod1, code.rindex('f'))
self.project.do(user.get_changes())
self.assertEqual('def f(p):\n a = p + 1\n return a\n'
'var = f(p)\nprint(var)\n', self.mod1.read())
def test_exception_when_performing_a_function_with_yield(self):
code = 'def func():\n yield 1\n'
self.mod1.write(code)
with self.assertRaises(exceptions.RefactoringError):
UseFunction(self.project, self.mod1, code.index('func'))
def test_exception_when_performing_a_function_two_returns(self):
code = 'def func():\n return 1\n return 2\n'
self.mod1.write(code)
with self.assertRaises(exceptions.RefactoringError):
UseFunction(self.project, self.mod1, code.index('func'))
def test_exception_when_returns_is_not_the_last_statement(self):
code = 'def func():\n return 2\n a = 1\n'
self.mod1.write(code)
with self.assertRaises(exceptions.RefactoringError):
UseFunction(self.project, self.mod1, code.index('func'))
if __name__ == '__main__':
unittest.main()
| lgpl-3.0 |
laszlocsomor/tensorflow | tensorflow/python/kernel_tests/random/random_gamma_test.py | 112 | 10741 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.random_ops.random_gamma."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
class RandomGammaTest(test.TestCase):
"""This is a medium test due to the moments computation taking some time."""
def setUp(self):
np.random.seed(137)
random_seed.set_random_seed(137)
def _Sampler(self, num, alpha, beta, dtype, use_gpu, seed=None):
def func():
with self.test_session(use_gpu=use_gpu, graph=ops.Graph()) as sess:
rng = random_ops.random_gamma(
[num], alpha, beta=beta, dtype=dtype, seed=seed)
ret = np.empty([10, num])
for i in xrange(10):
ret[i, :] = sess.run(rng)
return ret
return func
def testMomentsFloat32(self):
self._testMoments(dtypes.float32)
def testMomentsFloat64(self):
self._testMoments(dtypes.float64)
def _testMoments(self, dt):
try:
from scipy import stats # pylint: disable=g-import-not-at-top
except ImportError as e:
tf_logging.warn("Cannot test moments: %s" % e)
return
# Check the given array of samples matches the given theoretical moment
# function at different orders. The test is considered passing if the
# z-tests of all statistical moments are all below z_limit.
# Parameters:
# max_moments: the largest moments of the distribution to be tested
# stride: the distance between samples to check for statistical properties
# 0 means the n-th moment of each sample
# any other strides tests for spatial correlation between samples;
# z_limit: the maximum z-test we would consider the test to pass;
# The moments test is a z-value test. This is the largest z-value
# we want to tolerate. Since the z-test approximates a unit normal
# distribution, it should almost definitely never exceed 6.
z_limit = 6.0
for stride in 0, 1, 4, 17:
alphas = [0.2, 1.0, 3.0]
if dt == dtypes.float64:
alphas = [0.01] + alphas
for alpha in alphas:
for scale in 9, 17:
# Gamma moments only defined for values less than the scale param.
max_moment = min(6, scale // 2)
sampler = self._Sampler(
20000, alpha, 1 / scale, dt, use_gpu=False, seed=12345)
moments = [0] * (max_moment + 1)
moments_sample_count = [0] * (max_moment + 1)
x = np.array(sampler().flat) # sampler does 10x samples
for k in range(len(x)):
moment = 1.
for i in range(max_moment + 1):
index = k + i * stride
if index >= len(x):
break
moments[i] += moment
moments_sample_count[i] += 1
moment *= x[index]
for i in range(max_moment + 1):
moments[i] /= moments_sample_count[i]
for i in range(1, max_moment + 1):
g = stats.gamma(alpha, scale=scale)
if stride == 0:
moments_i_mean = g.moment(i)
moments_i_squared = g.moment(2 * i)
else:
moments_i_mean = pow(g.moment(1), i)
moments_i_squared = pow(g.moment(2), i)
# Calculate moment variance safely:
# This is just
# (moments_i_squared - moments_i_mean**2) / moments_sample_count[i]
normalized_moments_i_var = (
moments_i_mean / moments_sample_count[i] *
(moments_i_squared / moments_i_mean - moments_i_mean))
# Assume every operation has a small numerical error.
# It takes i multiplications to calculate one i-th moment.
error_per_moment = i * np.finfo(dt.as_numpy_dtype).eps
total_variance = (normalized_moments_i_var + error_per_moment)
tiny = np.finfo(dt.as_numpy_dtype).tiny
self.assertGreaterEqual(total_variance, 0)
if total_variance < tiny:
total_variance = tiny
# z_test is approximately a unit normal distribution.
z_test = abs(
(moments[i] - moments_i_mean) / math.sqrt(total_variance))
self.assertLess(z_test, z_limit)
def _testZeroDensity(self, alpha):
"""Zero isn't in the support of the gamma distribution.
But quantized floating point math has its limits.
TODO(bjp): Implement log-gamma sampler for small-shape distributions.
Args:
alpha: float shape value to test
"""
try:
from scipy import stats # pylint: disable=g-import-not-at-top
except ImportError as e:
tf_logging.warn("Cannot test zero density proportions: %s" % e)
return
allowable_zeros = {
dtypes.float16: stats.gamma(alpha).cdf(np.finfo(np.float16).tiny),
dtypes.float32: stats.gamma(alpha).cdf(np.finfo(np.float32).tiny),
dtypes.float64: stats.gamma(alpha).cdf(np.finfo(np.float64).tiny)
}
failures = []
for use_gpu in [False, True]:
for dt in dtypes.float16, dtypes.float32, dtypes.float64:
sampler = self._Sampler(
10000, alpha, 1.0, dt, use_gpu=use_gpu, seed=12345)
x = sampler()
allowable = allowable_zeros[dt] * x.size
allowable = allowable * 2 if allowable < 10 else allowable * 1.05
if np.sum(x <= 0) > allowable:
failures += [(use_gpu, dt)]
self.assertEqual([], failures)
def testNonZeroSmallShape(self):
self._testZeroDensity(0.01)
def testNonZeroSmallishShape(self):
self._testZeroDensity(0.35)
# Asserts that different trials (1000 samples per trial) is unlikely
# to see the same sequence of values. Will catch buggy
# implementations which uses the same random number seed.
def testDistinct(self):
for use_gpu in [False, True]:
for dt in dtypes.float16, dtypes.float32, dtypes.float64:
sampler = self._Sampler(1000, 2.0, 1.0, dt, use_gpu=use_gpu)
x = sampler()
y = sampler()
# Number of different samples.
count = (x == y).sum()
count_limit = 20 if dt == dtypes.float16 else 10
if count >= count_limit:
print(use_gpu, dt)
print("x = ", x)
print("y = ", y)
print("count = ", count)
self.assertLess(count, count_limit)
# Checks that the CPU and GPU implementation returns the same results,
# given the same random seed
def testCPUGPUMatch(self):
for dt in dtypes.float16, dtypes.float32, dtypes.float64:
results = {}
for use_gpu in [False, True]:
sampler = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=use_gpu, seed=12345)
results[use_gpu] = sampler()
if dt == dtypes.float16:
self.assertAllClose(results[False], results[True], rtol=1e-3, atol=1e-3)
else:
self.assertAllClose(results[False], results[True], rtol=1e-6, atol=1e-6)
def testSeed(self):
for use_gpu in [False, True]:
for dt in dtypes.float16, dtypes.float32, dtypes.float64:
sx = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=use_gpu, seed=345)
sy = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=use_gpu, seed=345)
self.assertAllEqual(sx(), sy())
def testNoCSE(self):
"""CSE = constant subexpression eliminator.
SetIsStateful() should prevent two identical random ops from getting
merged.
"""
for dtype in dtypes.float16, dtypes.float32, dtypes.float64:
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
rnd1 = random_ops.random_gamma([24], 2.0, dtype=dtype)
rnd2 = random_ops.random_gamma([24], 2.0, dtype=dtype)
diff = rnd2 - rnd1
self.assertGreater(np.linalg.norm(diff.eval()), 0.1)
def testShape(self):
# Fully known shape.
rnd = random_ops.random_gamma([150], 2.0)
self.assertEqual([150], rnd.get_shape().as_list())
rnd = random_ops.random_gamma([150], 2.0, beta=[3.0, 4.0])
self.assertEqual([150, 2], rnd.get_shape().as_list())
rnd = random_ops.random_gamma([150], array_ops.ones([1, 2, 3]))
self.assertEqual([150, 1, 2, 3], rnd.get_shape().as_list())
rnd = random_ops.random_gamma([20, 30], array_ops.ones([1, 2, 3]))
self.assertEqual([20, 30, 1, 2, 3], rnd.get_shape().as_list())
rnd = random_ops.random_gamma(
[123], array_ops.placeholder(
dtypes.float32, shape=(2,)))
self.assertEqual([123, 2], rnd.get_shape().as_list())
# Partially known shape.
rnd = random_ops.random_gamma(
array_ops.placeholder(
dtypes.int32, shape=(1,)), array_ops.ones([7, 3]))
self.assertEqual([None, 7, 3], rnd.get_shape().as_list())
rnd = random_ops.random_gamma(
array_ops.placeholder(
dtypes.int32, shape=(3,)), array_ops.ones([9, 6]))
self.assertEqual([None, None, None, 9, 6], rnd.get_shape().as_list())
# Unknown shape.
rnd = random_ops.random_gamma(
array_ops.placeholder(dtypes.int32),
array_ops.placeholder(dtypes.float32))
self.assertIs(None, rnd.get_shape().ndims)
rnd = random_ops.random_gamma([50], array_ops.placeholder(dtypes.float32))
self.assertIs(None, rnd.get_shape().ndims)
def testPositive(self):
n = int(10e3)
for dt in [dtypes.float16, dtypes.float32, dtypes.float64]:
with self.test_session():
x = random_ops.random_gamma(shape=[n], alpha=0.001, dtype=dt, seed=0)
self.assertEqual(0, math_ops.reduce_sum(math_ops.cast(
math_ops.less_equal(x, 0.), dtype=dtypes.int64)).eval())
if __name__ == "__main__":
test.main()
| apache-2.0 |
jayceyxc/hue | desktop/core/ext-py/simplejson/simplejson/tests/test_scanstring.py | 125 | 3835 | import sys
import decimal
from unittest import TestCase
import simplejson as json
import simplejson.decoder
class TestScanString(TestCase):
def test_py_scanstring(self):
self._test_scanstring(simplejson.decoder.py_scanstring)
def test_c_scanstring(self):
if not simplejson.decoder.c_scanstring:
return
self._test_scanstring(simplejson.decoder.c_scanstring)
def _test_scanstring(self, scanstring):
self.assertEquals(
scanstring('"z\\ud834\\udd20x"', 1, None, True),
(u'z\U0001d120x', 16))
if sys.maxunicode == 65535:
self.assertEquals(
scanstring(u'"z\U0001d120x"', 1, None, True),
(u'z\U0001d120x', 6))
else:
self.assertEquals(
scanstring(u'"z\U0001d120x"', 1, None, True),
(u'z\U0001d120x', 5))
self.assertEquals(
scanstring('"\\u007b"', 1, None, True),
(u'{', 8))
self.assertEquals(
scanstring('"A JSON payload should be an object or array, not a string."', 1, None, True),
(u'A JSON payload should be an object or array, not a string.', 60))
self.assertEquals(
scanstring('["Unclosed array"', 2, None, True),
(u'Unclosed array', 17))
self.assertEquals(
scanstring('["extra comma",]', 2, None, True),
(u'extra comma', 14))
self.assertEquals(
scanstring('["double extra comma",,]', 2, None, True),
(u'double extra comma', 21))
self.assertEquals(
scanstring('["Comma after the close"],', 2, None, True),
(u'Comma after the close', 24))
self.assertEquals(
scanstring('["Extra close"]]', 2, None, True),
(u'Extra close', 14))
self.assertEquals(
scanstring('{"Extra comma": true,}', 2, None, True),
(u'Extra comma', 14))
self.assertEquals(
scanstring('{"Extra value after close": true} "misplaced quoted value"', 2, None, True),
(u'Extra value after close', 26))
self.assertEquals(
scanstring('{"Illegal expression": 1 + 2}', 2, None, True),
(u'Illegal expression', 21))
self.assertEquals(
scanstring('{"Illegal invocation": alert()}', 2, None, True),
(u'Illegal invocation', 21))
self.assertEquals(
scanstring('{"Numbers cannot have leading zeroes": 013}', 2, None, True),
(u'Numbers cannot have leading zeroes', 37))
self.assertEquals(
scanstring('{"Numbers cannot be hex": 0x14}', 2, None, True),
(u'Numbers cannot be hex', 24))
self.assertEquals(
scanstring('[[[[[[[[[[[[[[[[[[[["Too deep"]]]]]]]]]]]]]]]]]]]]', 21, None, True),
(u'Too deep', 30))
self.assertEquals(
scanstring('{"Missing colon" null}', 2, None, True),
(u'Missing colon', 16))
self.assertEquals(
scanstring('{"Double colon":: null}', 2, None, True),
(u'Double colon', 15))
self.assertEquals(
scanstring('{"Comma instead of colon", null}', 2, None, True),
(u'Comma instead of colon', 25))
self.assertEquals(
scanstring('["Colon instead of comma": false]', 2, None, True),
(u'Colon instead of comma', 25))
self.assertEquals(
scanstring('["Bad value", truth]', 2, None, True),
(u'Bad value', 12))
def test_issue3623(self):
self.assertRaises(ValueError, json.decoder.scanstring, "xxx", 1,
"xxx")
self.assertRaises(UnicodeDecodeError,
json.encoder.encode_basestring_ascii, "xx\xff")
| apache-2.0 |
michalkurka/h2o-3 | h2o-py/tests/testdir_algos/automl/pyunit_automl_reruns.py | 2 | 10484 | from __future__ import print_function
import os
import sys
sys.path.insert(1, os.path.join("..","..",".."))
import h2o
from tests import pyunit_utils as pu
from h2o.automl import H2OAutoML
max_models = 5
def import_dataset():
df = h2o.import_file(path=pu.locate("smalldata/prostate/prostate.csv"))
target = "CAPSULE"
target_alt = "RACE"
df[target] = df[target].asfactor()
df[target_alt] = df[target_alt].asfactor()
return pu.ns(train=df, target=target, target_alt=target_alt)
def model_names(lb):
return lb[:, 0].as_data_frame().values.flatten()
def assert_same_leaderboard(lb1, lb2, size=0):
print(lb1)
assert len(lb1) == size
print(lb2)
assert len(lb2) == size
assert all(m in lb2 for m in lb1)
def assert_distinct_leaderboard(lb1, lb2, size=0):
print(lb1)
assert len(lb1) == size
print(lb2)
assert len(lb2) == size
assert not any(m in lb2 for m in lb1)
def assert_extended_leaderboard(lb1, lb2, size=0):
print(lb1)
assert len(lb1) == size
print(lb2)
assert len(lb2) == size*2
assert all(m in lb2 for m in lb1)
def suite_reruns_with_same_instance_without_project_name():
def test_rerun_with_same_data_adds_models_to_leaderboard():
ds = import_dataset()
aml = H2OAutoML(max_models=max_models, seed=1, keep_cross_validation_predictions=True)
aml.train(y=ds.target, training_frame=ds.train)
project_name, lb1 = aml.project_name, model_names(aml.leaderboard)
aml.train(y=ds.target, training_frame=ds.train)
lb2 = model_names(aml.leaderboard)
assert project_name == aml.project_name
assert_extended_leaderboard(lb1, lb2, size=max_models+2)
def test_rerun_with_different_predictors_adds_models_to_leaderboard():
ds = import_dataset()
aml = H2OAutoML(max_models=max_models, seed=1, keep_cross_validation_predictions=True)
aml.train(y=ds.target, training_frame=ds.train)
project_name, lb1 = aml.project_name, model_names(aml.leaderboard)
aml.train(x=ds.train.columns[1:], y=ds.target, training_frame=ds.train)
lb2 = model_names(aml.leaderboard)
assert project_name == aml.project_name
assert_extended_leaderboard(lb1, lb2, size=max_models+2)
def test_rerun_with_different_training_frame_adds_models_to_leaderboard():
ds = import_dataset()
aml = H2OAutoML(max_models=max_models, seed=1, keep_cross_validation_predictions=True)
aml.train(y=ds.target, training_frame=ds.train)
project_name, lb1 = aml.project_name, model_names(aml.leaderboard)
aml.train(y=ds.target, training_frame=ds.train[1:])
lb2 = model_names(aml.leaderboard)
assert project_name == aml.project_name
assert_extended_leaderboard(lb1, lb2, size=max_models+2)
def test_rerun_with_different_target_resets_leaderboard():
ds = import_dataset()
aml = H2OAutoML(max_models=max_models, seed=1, keep_cross_validation_predictions=True)
aml.train(y=ds.target, training_frame=ds.train)
project_name, lb1 = aml.project_name, model_names(aml.leaderboard)
aml.train(y=ds.target_alt, training_frame=ds.train)
lb2 = model_names(aml.leaderboard)
assert project_name == aml.project_name
assert_distinct_leaderboard(lb1, lb2, size=max_models+2)
return [
test_rerun_with_same_data_adds_models_to_leaderboard,
test_rerun_with_different_predictors_adds_models_to_leaderboard,
test_rerun_with_different_training_frame_adds_models_to_leaderboard,
test_rerun_with_different_target_resets_leaderboard,
]
def suite_reruns_with_same_instance_with_project_name():
def test_rerun_with_same_data_adds_models_to_leaderboard():
ds = import_dataset()
aml = H2OAutoML(project_name="test_automl_rerun", max_models=max_models, seed=1, keep_cross_validation_predictions=True)
aml.train(y=ds.target, training_frame=ds.train)
project_name, lb1 = aml.project_name, model_names(aml.leaderboard)
aml.train(y=ds.target, training_frame=ds.train)
lb2 = model_names(aml.leaderboard)
assert project_name == aml.project_name
assert_extended_leaderboard(lb1, lb2, size=max_models+2)
def test_rerun_with_different_predictors_adds_models_to_leaderboard():
ds = import_dataset()
aml = H2OAutoML(project_name="test_automl_rerun", max_models=max_models, seed=1, keep_cross_validation_predictions=True)
aml.train(y=ds.target, training_frame=ds.train)
project_name, lb1 = aml.project_name, model_names(aml.leaderboard)
aml.train(x=ds.train.columns[1:], y=ds.target, training_frame=ds.train)
lb2 = model_names(aml.leaderboard)
assert project_name == aml.project_name
assert_extended_leaderboard(lb1, lb2, size=max_models+2)
def test_rerun_with_different_training_frame_adds_models_to_leaderboard():
ds = import_dataset()
aml = H2OAutoML(project_name="test_automl_rerun", max_models=max_models, seed=1, keep_cross_validation_predictions=True)
aml.train(y=ds.target, training_frame=ds.train)
project_name, lb1 = aml.project_name, model_names(aml.leaderboard)
aml.train(y=ds.target, training_frame=ds.train[1:])
lb2 = model_names(aml.leaderboard)
assert project_name == aml.project_name
assert_extended_leaderboard(lb1, lb2, size=max_models+2)
def test_rerun_with_different_target_resets_leaderboard():
ds = import_dataset()
aml = H2OAutoML(project_name="test_automl_rerun", max_models=max_models, seed=1, keep_cross_validation_predictions=True)
aml.train(y=ds.target, training_frame=ds.train)
project_name, lb1 = aml.project_name, model_names(aml.leaderboard)
aml.train(y=ds.target_alt, training_frame=ds.train)
lb2 = model_names(aml.leaderboard)
assert project_name == aml.project_name
assert_distinct_leaderboard(lb1, lb2, size=max_models+2)
return [
test_rerun_with_same_data_adds_models_to_leaderboard,
test_rerun_with_different_predictors_adds_models_to_leaderboard,
test_rerun_with_different_training_frame_adds_models_to_leaderboard,
test_rerun_with_different_target_resets_leaderboard,
]
def suite_reruns_with_different_instance_without_project_name():
def test_rerun_with_same_data_generates_distinct_leaderboard():
ds = import_dataset()
aml1 = H2OAutoML(max_models=max_models, seed=1, keep_cross_validation_predictions=True)
aml1.train(y=ds.target, training_frame=ds.train)
lb1 = model_names(aml1.leaderboard)
aml2 = H2OAutoML(max_models=max_models, seed=1, keep_cross_validation_predictions=True)
aml2.train(y=ds.target, training_frame=ds.train)
lb2 = model_names(aml2.leaderboard)
assert aml2.project_name != aml1.project_name
assert_distinct_leaderboard(lb1, lb2, size=max_models+2)
return [
test_rerun_with_same_data_generates_distinct_leaderboard,
]
def suite_reruns_with_different_instances_same_project_name():
def test_rerun_with_same_data_adds_models_to_leaderboard():
ds = import_dataset()
aml1 = H2OAutoML(project_name="test_automl_rerun", max_models=max_models, seed=1, keep_cross_validation_predictions=True)
aml1.train(y=ds.target, training_frame=ds.train)
lb1 = model_names(aml1.leaderboard)
aml2 = H2OAutoML(project_name="test_automl_rerun", max_models=max_models, seed=1, keep_cross_validation_predictions=True)
aml2.train(y=ds.target, training_frame=ds.train)
lb2 = model_names(aml2.leaderboard)
assert aml1.project_name == aml2.project_name
assert_extended_leaderboard(lb1, lb2, size=max_models+2)
def test_rerun_with_different_predictors_adds_models_to_leaderboard():
ds = import_dataset()
aml1 = H2OAutoML(project_name="test_automl_rerun", max_models=max_models, seed=1, keep_cross_validation_predictions=True)
aml1.train(y=ds.target, training_frame=ds.train)
lb1 = model_names(aml1.leaderboard)
aml2 = H2OAutoML(project_name="test_automl_rerun", max_models=max_models, seed=1, keep_cross_validation_predictions=True)
aml2.train(x=ds.train.columns[1:], y=ds.target, training_frame=ds.train)
lb2 = model_names(aml2.leaderboard)
assert aml1.project_name == aml2.project_name
assert_extended_leaderboard(lb1, lb2, size=max_models+2)
def test_rerun_with_different_training_frame_adds_models_to_leaderboard():
ds = import_dataset()
aml1 = H2OAutoML(project_name="test_automl_rerun", max_models=max_models, seed=1, keep_cross_validation_predictions=True)
aml1.train(y=ds.target, training_frame=ds.train)
lb1 = model_names(aml1.leaderboard)
aml2 = H2OAutoML(project_name="test_automl_rerun", max_models=max_models, seed=1, keep_cross_validation_predictions=True)
aml2.train(y=ds.target, training_frame=ds.train[1:])
lb2 = model_names(aml2.leaderboard)
assert aml1.project_name == aml2.project_name
assert_extended_leaderboard(lb1, lb2, size=max_models+2)
def test_rerun_with_different_target_resets_leaderboard():
ds = import_dataset()
aml1 = H2OAutoML(project_name="test_automl_rerun", max_models=max_models, seed=1, keep_cross_validation_predictions=True)
aml1.train(y=ds.target, training_frame=ds.train)
lb1 = model_names(aml1.leaderboard)
aml2 = H2OAutoML(project_name="test_automl_rerun", max_models=max_models, seed=1, keep_cross_validation_predictions=True)
aml2.train(y=ds.target_alt, training_frame=ds.train)
lb2 = model_names(aml2.leaderboard)
assert aml1.project_name == aml2.project_name
assert_distinct_leaderboard(lb1, lb2, size=max_models+2)
return [
test_rerun_with_same_data_adds_models_to_leaderboard,
test_rerun_with_different_predictors_adds_models_to_leaderboard,
test_rerun_with_different_training_frame_adds_models_to_leaderboard,
test_rerun_with_different_target_resets_leaderboard,
]
pu.run_tests([
suite_reruns_with_same_instance_without_project_name(),
suite_reruns_with_same_instance_with_project_name(),
suite_reruns_with_different_instance_without_project_name(),
suite_reruns_with_different_instances_same_project_name(),
])
| apache-2.0 |
knnniggett/weewx | bin/daemon.py | 13 | 2886 | # -*- coding: iso-8859-1 -*-
#
# Copyright (c) 2009-2015 Tom Keffer <[email protected]>
#
# See the file LICENSE.txt for your full rights.
#
'''
This module is used to fork the current process into a daemon.
Almost none of this is necessary (or advisable) if your daemon
is being started by inetd. In that case, stdin, stdout and stderr are
all set up for you to refer to the network connection, and the fork()s
and session manipulation should not be done (to avoid confusing inetd).
Only the chdir() and umask() steps remain as useful.
References:
UNIX Programming FAQ
1.7 How do I get my program to act like a daemon?
http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
Advanced Programming in the Unix Environment
W. Richard Stevens, 1992, Addison-Wesley, ISBN 0-201-56317-7.
History:
2001/07/10 by Jürgen Hermann
2002/08/28 by Noah Spurrier
2003/02/24 by Clark Evans
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66012
'''
import sys, os
done = False
def daemonize(stdout='/dev/null', stderr=None, stdin='/dev/null',
pidfile=None, startmsg = 'started with pid %s' ):
'''
This forks the current process into a daemon.
The stdin, stdout, and stderr arguments are file names that
will be opened and be used to replace the standard file descriptors
in sys.stdin, sys.stdout, and sys.stderr.
These arguments are optional and default to /dev/null.
Note that stderr is opened unbuffered, so
if it shares a file with stdout then interleaved output
may not appear in the order that you expect.
'''
global done
# Don't proceed if we have already daemonized.
if done:
return
# Do first fork.
try:
pid = os.fork()
if pid > 0: sys.exit(0) # Exit first parent.
except OSError, e:
sys.stderr.write("fork #1 failed: (%d) %s\n" % (e.errno, e.strerror))
sys.exit(1)
# Decouple from parent environment.
os.chdir("/")
os.umask(0022)
os.setsid()
# Do second fork.
try:
pid = os.fork()
if pid > 0: sys.exit(0) # Exit second parent.
except OSError, e:
sys.stderr.write("fork #2 failed: (%d) %s\n" % (e.errno, e.strerror))
sys.exit(1)
# Open file descriptors and print start message
if not stderr: stderr = stdout
si = file(stdin, 'r')
so = file(stdout, 'a+')
se = file(stderr, 'a+', 0)
pid = str(os.getpid())
# sys.stderr.write("\n%s\n" % startmsg % pid)
# sys.stderr.flush()
if pidfile: file(pidfile,'w+').write("%s\n" % pid)
# Redirect standard file descriptors.
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
done = True
| gpl-3.0 |
agry/NGECore2 | scripts/mobiles/endor/blood_frenzied_boar_wolf.py | 2 | 1487 | import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('blood_frenzied_boar_wolf')
mobileTemplate.setLevel(68)
mobileTemplate.setDifficulty(Difficulty.NORMAL)
mobileTemplate.setMinSpawnDistance(4)
mobileTemplate.setMaxSpawnDistance(8)
mobileTemplate.setDeathblow(True)
mobileTemplate.setScale(1)
mobileTemplate.setSocialGroup("boar wolf")
mobileTemplate.setAssistRange(12)
mobileTemplate.setStalker(True)
mobileTemplate.setOptionsBitmask(Options.AGGRESSIVE | Options.ATTACKABLE)
templates = Vector()
templates.add('object/mobile/shared_boar_wolf.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/melee/unarmed/shared_unarmed_default.iff', WeaponType.UNARMED, 1.0, 6, 'kinetic')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
attacks.add('bm_charge_4')
attacks.add('bm_dampen_pain_4')
attacks.add('bm_slash_4')
mobileTemplate.setDefaultAttack('creatureMeleeAttack')
mobileTemplate.setAttacks(attacks)
core.spawnService.addMobileTemplate('blood_frenzied_boar_wolf', mobileTemplate)
return | lgpl-3.0 |
romain-dartigues/ansible | lib/ansible/modules/utilities/logic/set_fact.py | 6 | 3231 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2013, Dag Wieers (@dagwieers) <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
author:
- Dag Wieers (@dagwieers)
module: set_fact
short_description: Set host facts from a task
description:
- This module allows setting new variables. Variables are set on a host-by-host basis just like facts discovered by the setup module.
- These variables will be available to subsequent plays during an ansible-playbook run, but will not be saved across executions even if you use
a fact cache.
- Per the standard Ansible variable precedence rules, many other types of variables have a higher priority, so this value may be overridden.
See L(Variable Precedence Guide,../user_guide/playbooks_variables.html#variable-precedence-where-should-i-put-a-variable) for more information.
- This module is also supported for Windows targets.
options:
key_value:
description:
- The C(set_fact) module takes key=value pairs as variables to set
in the playbook scope. Or alternatively, accepts complex arguments
using the C(args:) statement.
required: true
cacheable:
description:
- This boolean converts the variable into an actual 'fact' which will also be added to the fact cache, if fact caching is enabled.
- Normally this module creates 'host level variables' and has much higher precedence, this option changes the nature and precedence
(by 7 steps) of the variable created.
https://docs.ansible.com/ansible/latest/user_guide/playbooks_variables.html#variable-precedence-where-should-i-put-a-variable
type: bool
default: 'no'
version_added: "2.4"
version_added: "1.2"
notes:
- "The `var=value` notation can only create strings or booleans.
If you want to create lists/arrays or dictionary/hashes use `var: [val1, val2]`"
- This module is also supported for Windows targets.
- Since 'cacheable' is now a module param, 'cacheable' is no longer a valid fact name as of 2.4.
'''
EXAMPLES = '''
# Example setting host facts using key=value pairs, note that this always creates strings or booleans
- set_fact: one_fact="something" other_fact="{{ local_var }}"
# Example setting host facts using complex arguments
- set_fact:
one_fact: something
other_fact: "{{ local_var * 2 }}"
another_fact: "{{ some_registered_var.results | map(attribute='ansible_facts.some_fact') | list }}"
# Example setting facts so that they will be persisted in the fact cache
- set_fact:
one_fact: something
other_fact: "{{ local_var * 2 }}"
cacheable: true
# As of 1.8, Ansible will convert boolean strings ('true', 'false', 'yes', 'no')
# to proper boolean values when using the key=value syntax, however it is still
# recommended that booleans be set using the complex argument style:
- set_fact:
one_fact: true
other_fact: false
'''
| gpl-3.0 |
akhilesh-k/Intelligent-Alarm | mat alarm/raspberrypi.py | 3 | 2014 | import RPi.GPIO as GPIO
import os
from gpiozero import Buzzer
from time import sleep
from flask import Flask,render_template.url_for,request
GPIO.setmode(GPIO.BCM)
but1=15
but2=16
but3=22
but4=19
led1=10
led2=11
led3=12
led4=13
buz=Buzzer(40)
GPIO.setup(int(buz),GPIO.OUT)
GPIO.setup(int(but1),GPIO.IN,pull_up_down=GPIO.PUD_UP)
GPIO.setup(int(but2),GPIO.IN,pull_up_down=GPIO.PUD_UP)
GPIO.setup(int(but3),GPIO.IN,pull_up_down=GPIO.PUD_UP)
GPIO.setup(int(but4),GPIO.IN,pull_up_down=GPIO.PUD_UP)
GPIO.setup(int(led1),GPIO.OUT)
GPIO.setup(int(led2),GPIO.OUT)
GPIO.setup(int(led3),GPIO.OUT)
GPIO.setup(int(led4),GPIO.OUT)
#GPIO.setup(
app = Flask(__name__)
@app.route('/')
def hello():
return render_template('main.html')#Return the main.html template to the web browser using the variables in the templateData dictionary
@app.route('/link',methods=["POST"])
def da():
hours=request.form['hours']
#minutes=request.form['mins']
#add to database
##compare
return ##reversewatch
@app.route('/asd')
def asd():
GPIO.output(led1,GPIO.HIGH)
buz.on()
while(1):
a=GPIO.Input(but1)
print a
if(a==1):
GPIO.output(led1,FALSE)
break
#---------------
GPIO.output(led2,GPIO.HIGH)#digitalWrite(led2,HIGH);
while(1):
b=GPIO.Input(but2)
if(b==1):
GPIO.output(led2,FALSE)
break
GPIO.output(led3,GPIO.HIGH)#digitalWrite(led3,HIGH);
while(1):
c=GPIO.Input(but3)
if(c==1):
GPIO.output(led3,FALSE)
break
GPIO.output(led4,GPIO.HIGH)#digitalWrite(led3,HIGH);
while(1):
d=GPIO.Input(but4)
if(d==1):
GPIO.output(led4,FALSE)
break
buz.off()
return #render_remplate(awqe)
@app.route('/')
def index():
return render_template('index.html')# Flask will look for index.html template in the templates folder, and render it.
if __name__ == '__main__':
app.run(host='0.0.0.0',port=5000, debug=TRUE)
| mit |
trik/djangae | djangae/wsgi.py | 4 | 2260 | from djangae.utils import on_production
def fix_c_whitelist():
from google.appengine.tools.devappserver2.python import sandbox
if '_sqlite3' not in sandbox._WHITE_LIST_C_MODULES:
sandbox._WHITE_LIST_C_MODULES.extend([
'_sqlite3',
'_ssl', # Workaround for App Engine bug #9246
'_socket'
])
# We do this globally for the local environment outside of dev_appserver
if not on_production():
fix_c_whitelist()
def fix_sandbox():
"""
This is the nastiest thing in the world...
This WSGI middleware is the first chance we get to hook into anything. On the dev_appserver
at this point the Python sandbox will have already been initialized. The sandbox replaces stuff
like the subprocess module, and the select module. As well as disallows _sqlite3. These things
are really REALLY useful for development.
So here we dismantle parts of the sandbox. Firstly we add _sqlite3 to the allowed C modules.
This only happens on the dev_appserver, it would only die on live. Everything is checked so that
changes are only made if they haven't been made already.
"""
if on_production():
return
from google.appengine.tools.devappserver2.python import sandbox
if '_sqlite3' not in sandbox._WHITE_LIST_C_MODULES:
fix_c_whitelist()
# Reload the system socket.py, because of bug #9246
import imp
import os
import ast
psocket = os.path.join(os.path.dirname(ast.__file__), 'socket.py')
imp.load_source('socket', psocket)
class DjangaeApplication(object):
def __init__(self, application):
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django import VERSION
for app in settings.INSTALLED_APPS:
if app.startswith("django."):
raise ImproperlyConfigured("You must place 'djangae' before any 'django' apps in INSTALLED_APPS")
elif app == "djangae":
break
self.wrapped_app = application
def __call__(self, environ, start_response):
fix_sandbox()
return self.wrapped_app(environ, start_response)
| bsd-3-clause |
mellanox-openstack/vsa | src/vsa/infra/infra.py | 1 | 7884 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 Mellanox Technologies, Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from os import path, popen
from vsa.infra.processcall import process_call
from vsa.infra.config import vsa_conf
_VERSION=None
def getVersion():
"""
The description of getVersion comes here.
@return
"""
global _VERSION
if _VERSION: return _VERSION
e,o=process_call(['rpm','-q','scsi-target-utils','vsa'],log=False)
v=o.splitlines()
e,inf=process_call(['uname','-r'],log=False)
version=[inf.strip()]+[i.strip() for i in v]
_VERSION=version
return version
def iif(test,tval,fval=''):
"""
The description of iif comes here.
@param test
@param tval
@param fval
@return
"""
if test : return tval
else : return fval
def b2YN(val):
"""
The description of b2YN comes here.
@param val
@return
"""
if val : return 'Y'
else : return 'N'
def val2str(val):
"""
The description of val2str comes here.
@param val
@return
"""
if isinstance(val,bool) : return b2YN(val)
if isinstance(val,list) : return ','.join(val)
return str(val)
def bulkupd(obj,dict):
"""
The description of bulkupd comes here.
@param obj
@param dict
@return
"""
for kk in dict.keys() :
obj.__dict__[kk]=dict[kk]
def printsz(val) :
"""
The description of printsz comes here.
@param val
@return
"""
if val==0 : return '0'
if val<2000 : return '%dMB' % val
else : return '%dGB' % (int(val/1024))
def txt2size(txt,size=0):
"""
The description of txt2size comes here.
@param txt
@param size
@return
"""
if not txt : return (0,0)
unit=txt[-1].lower()
if unit in ['m','g','%']: num=tstint(txt[:-1])
else : num=tstint(txt)
if num<0 : return (1,'not a valid integer')
if unit=='g' : num=num*1024
if unit=='%' :
if size : num=num*size/100
else : return (1,'cannot use %')
return (0,num)
def getnextidx(ls,first=0):
"""
The description of getnextidx comes here.
@param ls
@param first
@return
"""
ls.sort()
m=first
for l in ls :
if m==l : m+=1
else : return m
return m
def getnextspace(d,first=1): # find the first/smallest unassigned idx number in list
"""
The description of getnextspace comes here.
@param d
@param first
@return
"""
j=first
for i in sorted(map(int,d)) :
if i==j or i<j: j=i+1
else : break
return j
def pdict(dic): # nice dictionary print
"""
The description of pdict comes here.
@param dic
@return
"""
for i in dic.keys() :
print " ",i," = ",dic[i]
def safekey(dic,key,default=''):
"""
The description of safekey comes here.
@param dic
@param key
@param default
@return
"""
if dic.has_key(key) : return dic[key]
else : return default
def safepeek(ls) :
"""
The description of safepeek comes here.
@param ls
@return
"""
if len(ls)>0 : return ls[0]
else : return ''
def tstint(i,default=-1) :
"""
The description of tstint comes here.
@param i
@param default
@return
"""
try:
t = int(i)
return t
except ValueError:
return default
def str2dict(val):
"""
The description of str2dict comes here.
@param val
@return
"""
if not val.strip():
return (0, {})
l = val.strip().split(',')
dic = {}
for o in l:
sp = o.split('=', 1)
if len(sp) < 2:
sp += ['1']
dic[sp[0]] = sp[1]
return (0, dic)
def dict2str(val):
"""
The description of dict2str comes here.
@param val
@return
"""
pr=[]
for p in val.keys() :
if ' ' in val[p] : val[p]='"'+val[p]+'"'
pr+=['%s=%s'% (p,str(val[p]))]
return ','.join(pr)
def getunique(lst,pfx,start=1):
"""
The description of getunique comes here.
@param lst
@param pfx
@param start
@return
"""
# return a unique string that starts with pfx and is not repeated in lst
i=start
while pfx+str(i) in lst : i+=1
return pfx+str(i)
def confirm(txt,defans='y'):
"""
The description of confirm comes here.
@param txt
@param defans
@return
"""
ans='x'
while ans.lower() not in ['y','n','','yes','no']:
try:
ans=raw_input('%s [%s] ?' % (txt,defans))
except (KeyboardInterrupt, EOFError):
print
return ''
if ans.lower() not in ['y','n','','yes','no']:
print 'Please type y/n/yes/no as answer, try again'
if ans=='' : ans=defans
if ans.lower() in ['','y','yes'] : return 'y'
return 'n'
def hex2chrlist(k):
"""
The description of hex2chrlist comes here.
@param k
@return
"""
return ''.join([chr(int(k[i*2:i*2+2],16)) for i in range(len(k)/2)])
def chrlist2hex(x):
"""
The description of chrlist2hex comes here.
@param x
@return
"""
return ''.join(['%02X' % i for i in [ord(c) for c in x]])
def ha_rsc_status():
"""
The description of ha_rsc_status comes here.
@return
"""
if not path.isfile('/usr/bin/cl_status'):
return None
try:
c = popen('/usr/bin/cl_status hbstatus >/dev/null 2>&1 && /usr/bin/cl_status rscstatus 2>/dev/null').read().strip()
return c
except:
return None
# roles allowed in vsa config file
VSA_ROLES = ('standalone', 'standby', 'master', 'compute')
def get_vsa_role():
"""
The description of get_vsa_role comes here.
@return
"""
role = None
role = vsa_conf.safe_get('vsa', 'role', VSA_ROLES[0])
if role not in VSA_ROLES:
role = VSA_ROLES[0] #TODO: logit!
return role
def set_vsa_role(role):
"""
The description of set_vsa_role comes here.
@param role
@return
"""
if role not in VSA_ROLES:
return (1, 'invalid role')
vsa_conf.safe_set('vsa', 'role', role)
return (0,'')
def parse_cfg(arg,clist=[],optlist='',optdef={}):
"""
The description of parse_cfg comes here.
@param arg
@param clist
@param optlist
@param optdef
@return
"""
a=arg.strip().split()
opts={};
# test if the first arg matches the allowed categories (clist), if clist is not []
if clist and len(a)>0 and (not a[0].startswith('-')) and (a[0] not in clist) :
print '*** Unknown category: '+a[0]
print 'Category options are: '+','.join(clist)
return (1,'','','',{})
for o in optlist :
if optdef.has_key(o) : opts[o]=optdef[o]
else : opts[o]=''
argsl=[]; i=0;
while i<len(a):
if not a[i].startswith('-') :
argsl+=[a[i]]
i+=1
else : break
for o in a[i:] :
if not o.startswith('-') or len(o)<2 or (o[1] not in optlist):
print '*** Illegal Option: %s , valid options are %s, and must start with a "-"' % (o,optlist)
return (1,'','','',{})
if len(o)==2: opts[o[1]]='1'
else : opts[o[1]]=o[2:]
a0=''; a1=''; a2='';
if len(argsl)>0: a0=argsl[0]
if len(argsl)>1: a1=argsl[1]
if len(argsl)>2: a2=argsl[2]
return (0,a0,a1,a2,opts)
| apache-2.0 |
bjesus/wagtail | wagtail/wagtailsearch/views/frontend.py | 12 | 2871 | import json
from django.conf import settings
from django.shortcuts import render
from django.http import HttpResponse
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from wagtail.wagtailcore import models
from wagtail.wagtailsearch.models import Query
def search(
request,
template=None,
template_ajax=None,
results_per_page=10,
use_json=False,
json_attrs=['title', 'url'],
show_unpublished=False,
search_title_only=False,
extra_filters={},
path=None):
# Get default templates
if template is None:
if hasattr(settings, 'WAGTAILSEARCH_RESULTS_TEMPLATE'):
template = settings.WAGTAILSEARCH_RESULTS_TEMPLATE
else:
template = 'wagtailsearch/search_results.html'
if template_ajax is None:
if hasattr(settings, 'WAGTAILSEARCH_RESULTS_TEMPLATE_AJAX'):
template_ajax = settings.WAGTAILSEARCH_RESULTS_TEMPLATE_AJAX
else:
template_ajax = template
# Get query string and page from GET paramters
query_string = request.GET.get('q', '')
page = request.GET.get('page', request.GET.get('p', 1))
# Search
if query_string != '':
search_results = models.Page.search(
query_string,
show_unpublished=show_unpublished,
search_title_only=search_title_only,
extra_filters=extra_filters,
path=path if path else request.site.root_page.path
)
# Get query object
query = Query.get(query_string)
# Add hit
query.add_hit()
# Pagination
paginator = Paginator(search_results, results_per_page)
try:
search_results = paginator.page(page)
except PageNotAnInteger:
search_results = paginator.page(1)
except EmptyPage:
search_results = paginator.page(paginator.num_pages)
else:
query = None
search_results = None
if use_json: # Return a json response
if search_results:
search_results_json = []
for result in search_results:
result_specific = result.specific
search_results_json.append(dict(
(attr, getattr(result_specific, attr))
for attr in json_attrs
if hasattr(result_specific, attr)
))
return HttpResponse(json.dumps(search_results_json))
else:
return HttpResponse('[]')
else: # Render a template
if request.is_ajax() and template_ajax:
template = template_ajax
return render(request, template, dict(
query_string=query_string,
search_results=search_results,
is_ajax=request.is_ajax(),
query=query
))
| bsd-3-clause |
benchisell/photostream-bc | flask/lib/python2.7/site-packages/jinja2/testsuite/core_tags.py | 412 | 11858 | # -*- coding: utf-8 -*-
"""
jinja2.testsuite.core_tags
~~~~~~~~~~~~~~~~~~~~~~~~~~
Test the core tags like for and if.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import unittest
from jinja2.testsuite import JinjaTestCase
from jinja2 import Environment, TemplateSyntaxError, UndefinedError, \
DictLoader
env = Environment()
class ForLoopTestCase(JinjaTestCase):
def test_simple(self):
tmpl = env.from_string('{% for item in seq %}{{ item }}{% endfor %}')
assert tmpl.render(seq=list(range(10))) == '0123456789'
def test_else(self):
tmpl = env.from_string('{% for item in seq %}XXX{% else %}...{% endfor %}')
assert tmpl.render() == '...'
def test_empty_blocks(self):
tmpl = env.from_string('<{% for item in seq %}{% else %}{% endfor %}>')
assert tmpl.render() == '<>'
def test_context_vars(self):
tmpl = env.from_string('''{% for item in seq -%}
{{ loop.index }}|{{ loop.index0 }}|{{ loop.revindex }}|{{
loop.revindex0 }}|{{ loop.first }}|{{ loop.last }}|{{
loop.length }}###{% endfor %}''')
one, two, _ = tmpl.render(seq=[0, 1]).split('###')
(one_index, one_index0, one_revindex, one_revindex0, one_first,
one_last, one_length) = one.split('|')
(two_index, two_index0, two_revindex, two_revindex0, two_first,
two_last, two_length) = two.split('|')
assert int(one_index) == 1 and int(two_index) == 2
assert int(one_index0) == 0 and int(two_index0) == 1
assert int(one_revindex) == 2 and int(two_revindex) == 1
assert int(one_revindex0) == 1 and int(two_revindex0) == 0
assert one_first == 'True' and two_first == 'False'
assert one_last == 'False' and two_last == 'True'
assert one_length == two_length == '2'
def test_cycling(self):
tmpl = env.from_string('''{% for item in seq %}{{
loop.cycle('<1>', '<2>') }}{% endfor %}{%
for item in seq %}{{ loop.cycle(*through) }}{% endfor %}''')
output = tmpl.render(seq=list(range(4)), through=('<1>', '<2>'))
assert output == '<1><2>' * 4
def test_scope(self):
tmpl = env.from_string('{% for item in seq %}{% endfor %}{{ item }}')
output = tmpl.render(seq=list(range(10)))
assert not output
def test_varlen(self):
def inner():
for item in range(5):
yield item
tmpl = env.from_string('{% for item in iter %}{{ item }}{% endfor %}')
output = tmpl.render(iter=inner())
assert output == '01234'
def test_noniter(self):
tmpl = env.from_string('{% for item in none %}...{% endfor %}')
self.assert_raises(TypeError, tmpl.render)
def test_recursive(self):
tmpl = env.from_string('''{% for item in seq recursive -%}
[{{ item.a }}{% if item.b %}<{{ loop(item.b) }}>{% endif %}]
{%- endfor %}''')
assert tmpl.render(seq=[
dict(a=1, b=[dict(a=1), dict(a=2)]),
dict(a=2, b=[dict(a=1), dict(a=2)]),
dict(a=3, b=[dict(a='a')])
]) == '[1<[1][2]>][2<[1][2]>][3<[a]>]'
def test_recursive_depth0(self):
tmpl = env.from_string('''{% for item in seq recursive -%}
[{{ loop.depth0 }}:{{ item.a }}{% if item.b %}<{{ loop(item.b) }}>{% endif %}]
{%- endfor %}''')
self.assertEqual(tmpl.render(seq=[
dict(a=1, b=[dict(a=1), dict(a=2)]),
dict(a=2, b=[dict(a=1), dict(a=2)]),
dict(a=3, b=[dict(a='a')])
]), '[0:1<[1:1][1:2]>][0:2<[1:1][1:2]>][0:3<[1:a]>]')
def test_recursive_depth(self):
tmpl = env.from_string('''{% for item in seq recursive -%}
[{{ loop.depth }}:{{ item.a }}{% if item.b %}<{{ loop(item.b) }}>{% endif %}]
{%- endfor %}''')
self.assertEqual(tmpl.render(seq=[
dict(a=1, b=[dict(a=1), dict(a=2)]),
dict(a=2, b=[dict(a=1), dict(a=2)]),
dict(a=3, b=[dict(a='a')])
]), '[1:1<[2:1][2:2]>][1:2<[2:1][2:2]>][1:3<[2:a]>]')
def test_looploop(self):
tmpl = env.from_string('''{% for row in table %}
{%- set rowloop = loop -%}
{% for cell in row -%}
[{{ rowloop.index }}|{{ loop.index }}]
{%- endfor %}
{%- endfor %}''')
assert tmpl.render(table=['ab', 'cd']) == '[1|1][1|2][2|1][2|2]'
def test_reversed_bug(self):
tmpl = env.from_string('{% for i in items %}{{ i }}'
'{% if not loop.last %}'
',{% endif %}{% endfor %}')
assert tmpl.render(items=reversed([3, 2, 1])) == '1,2,3'
def test_loop_errors(self):
tmpl = env.from_string('''{% for item in [1] if loop.index
== 0 %}...{% endfor %}''')
self.assert_raises(UndefinedError, tmpl.render)
tmpl = env.from_string('''{% for item in [] %}...{% else
%}{{ loop }}{% endfor %}''')
assert tmpl.render() == ''
def test_loop_filter(self):
tmpl = env.from_string('{% for item in range(10) if item '
'is even %}[{{ item }}]{% endfor %}')
assert tmpl.render() == '[0][2][4][6][8]'
tmpl = env.from_string('''
{%- for item in range(10) if item is even %}[{{
loop.index }}:{{ item }}]{% endfor %}''')
assert tmpl.render() == '[1:0][2:2][3:4][4:6][5:8]'
def test_loop_unassignable(self):
self.assert_raises(TemplateSyntaxError, env.from_string,
'{% for loop in seq %}...{% endfor %}')
def test_scoped_special_var(self):
t = env.from_string('{% for s in seq %}[{{ loop.first }}{% for c in s %}'
'|{{ loop.first }}{% endfor %}]{% endfor %}')
assert t.render(seq=('ab', 'cd')) == '[True|True|False][False|True|False]'
def test_scoped_loop_var(self):
t = env.from_string('{% for x in seq %}{{ loop.first }}'
'{% for y in seq %}{% endfor %}{% endfor %}')
assert t.render(seq='ab') == 'TrueFalse'
t = env.from_string('{% for x in seq %}{% for y in seq %}'
'{{ loop.first }}{% endfor %}{% endfor %}')
assert t.render(seq='ab') == 'TrueFalseTrueFalse'
def test_recursive_empty_loop_iter(self):
t = env.from_string('''
{%- for item in foo recursive -%}{%- endfor -%}
''')
assert t.render(dict(foo=[])) == ''
def test_call_in_loop(self):
t = env.from_string('''
{%- macro do_something() -%}
[{{ caller() }}]
{%- endmacro %}
{%- for i in [1, 2, 3] %}
{%- call do_something() -%}
{{ i }}
{%- endcall %}
{%- endfor -%}
''')
assert t.render() == '[1][2][3]'
def test_scoping_bug(self):
t = env.from_string('''
{%- for item in foo %}...{{ item }}...{% endfor %}
{%- macro item(a) %}...{{ a }}...{% endmacro %}
{{- item(2) -}}
''')
assert t.render(foo=(1,)) == '...1......2...'
def test_unpacking(self):
tmpl = env.from_string('{% for a, b, c in [[1, 2, 3]] %}'
'{{ a }}|{{ b }}|{{ c }}{% endfor %}')
assert tmpl.render() == '1|2|3'
class IfConditionTestCase(JinjaTestCase):
def test_simple(self):
tmpl = env.from_string('''{% if true %}...{% endif %}''')
assert tmpl.render() == '...'
def test_elif(self):
tmpl = env.from_string('''{% if false %}XXX{% elif true
%}...{% else %}XXX{% endif %}''')
assert tmpl.render() == '...'
def test_else(self):
tmpl = env.from_string('{% if false %}XXX{% else %}...{% endif %}')
assert tmpl.render() == '...'
def test_empty(self):
tmpl = env.from_string('[{% if true %}{% else %}{% endif %}]')
assert tmpl.render() == '[]'
def test_complete(self):
tmpl = env.from_string('{% if a %}A{% elif b %}B{% elif c == d %}'
'C{% else %}D{% endif %}')
assert tmpl.render(a=0, b=False, c=42, d=42.0) == 'C'
def test_no_scope(self):
tmpl = env.from_string('{% if a %}{% set foo = 1 %}{% endif %}{{ foo }}')
assert tmpl.render(a=True) == '1'
tmpl = env.from_string('{% if true %}{% set foo = 1 %}{% endif %}{{ foo }}')
assert tmpl.render() == '1'
class MacrosTestCase(JinjaTestCase):
env = Environment(trim_blocks=True)
def test_simple(self):
tmpl = self.env.from_string('''\
{% macro say_hello(name) %}Hello {{ name }}!{% endmacro %}
{{ say_hello('Peter') }}''')
assert tmpl.render() == 'Hello Peter!'
def test_scoping(self):
tmpl = self.env.from_string('''\
{% macro level1(data1) %}
{% macro level2(data2) %}{{ data1 }}|{{ data2 }}{% endmacro %}
{{ level2('bar') }}{% endmacro %}
{{ level1('foo') }}''')
assert tmpl.render() == 'foo|bar'
def test_arguments(self):
tmpl = self.env.from_string('''\
{% macro m(a, b, c='c', d='d') %}{{ a }}|{{ b }}|{{ c }}|{{ d }}{% endmacro %}
{{ m() }}|{{ m('a') }}|{{ m('a', 'b') }}|{{ m(1, 2, 3) }}''')
assert tmpl.render() == '||c|d|a||c|d|a|b|c|d|1|2|3|d'
def test_varargs(self):
tmpl = self.env.from_string('''\
{% macro test() %}{{ varargs|join('|') }}{% endmacro %}\
{{ test(1, 2, 3) }}''')
assert tmpl.render() == '1|2|3'
def test_simple_call(self):
tmpl = self.env.from_string('''\
{% macro test() %}[[{{ caller() }}]]{% endmacro %}\
{% call test() %}data{% endcall %}''')
assert tmpl.render() == '[[data]]'
def test_complex_call(self):
tmpl = self.env.from_string('''\
{% macro test() %}[[{{ caller('data') }}]]{% endmacro %}\
{% call(data) test() %}{{ data }}{% endcall %}''')
assert tmpl.render() == '[[data]]'
def test_caller_undefined(self):
tmpl = self.env.from_string('''\
{% set caller = 42 %}\
{% macro test() %}{{ caller is not defined }}{% endmacro %}\
{{ test() }}''')
assert tmpl.render() == 'True'
def test_include(self):
self.env = Environment(loader=DictLoader({'include':
'{% macro test(foo) %}[{{ foo }}]{% endmacro %}'}))
tmpl = self.env.from_string('{% from "include" import test %}{{ test("foo") }}')
assert tmpl.render() == '[foo]'
def test_macro_api(self):
tmpl = self.env.from_string('{% macro foo(a, b) %}{% endmacro %}'
'{% macro bar() %}{{ varargs }}{{ kwargs }}{% endmacro %}'
'{% macro baz() %}{{ caller() }}{% endmacro %}')
assert tmpl.module.foo.arguments == ('a', 'b')
assert tmpl.module.foo.defaults == ()
assert tmpl.module.foo.name == 'foo'
assert not tmpl.module.foo.caller
assert not tmpl.module.foo.catch_kwargs
assert not tmpl.module.foo.catch_varargs
assert tmpl.module.bar.arguments == ()
assert tmpl.module.bar.defaults == ()
assert not tmpl.module.bar.caller
assert tmpl.module.bar.catch_kwargs
assert tmpl.module.bar.catch_varargs
assert tmpl.module.baz.caller
def test_callself(self):
tmpl = self.env.from_string('{% macro foo(x) %}{{ x }}{% if x > 1 %}|'
'{{ foo(x - 1) }}{% endif %}{% endmacro %}'
'{{ foo(5) }}')
assert tmpl.render() == '5|4|3|2|1'
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ForLoopTestCase))
suite.addTest(unittest.makeSuite(IfConditionTestCase))
suite.addTest(unittest.makeSuite(MacrosTestCase))
return suite
| bsd-3-clause |
ChristianKniep/QNIB | serverfiles/usr/local/lib/networkx-1.6/networkx/readwrite/p2g.py | 76 | 3279 | """
This module provides the following: read and write of p2g format
used in metabolic pathway studies.
See http://www.cs.purdue.edu/homes/koyuturk/pathway/ for a description.
The summary is included here:
A file that describes a uniquely labeled graph (with extension ".gr")
format looks like the following:
name
3 4
a
1 2
b
c
0 2
"name" is simply a description of what the graph corresponds to. The
second line displays the number of nodes and number of edges,
respectively. This sample graph contains three nodes labeled "a", "b",
and "c". The rest of the graph contains two lines for each node. The
first line for a node contains the node label. After the declaration
of the node label, the out-edges of that node in the graph are
provided. For instance, "a" is linked to nodes 1 and 2, which are
labeled "b" and "c", while the node labeled "b" has no outgoing
edges. Observe that node labeled "c" has an outgoing edge to
itself. Indeed, self-loops are allowed. Node index starts from 0.
"""
# Copyright (C) 2008-2012 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import networkx
from networkx.utils import is_string_like,open_file
__author__ = '\n'.join(['Willem Ligtenberg ([email protected])',
'Aric Hagberg ([email protected])'])
@open_file(1,mode='w')
def write_p2g(G, path, encoding = 'utf-8'):
"""Write NetworkX graph in p2g format.
Notes
-----
This format is meant to be used with directed graphs with
possible self loops.
"""
path.write(("%s\n"%G.name).encode(encoding))
path.write(("%s %s\n"%(G.order(),G.size())).encode(encoding))
nodes = G.nodes()
# make dictionary mapping nodes to integers
nodenumber=dict(zip(nodes,range(len(nodes))))
for n in nodes:
path.write(("%s\n"%n).encode(encoding))
for nbr in G.neighbors(n):
path.write(("%s "%nodenumber[nbr]).encode(encoding))
path.write("\n".encode(encoding))
@open_file(0,mode='r')
def read_p2g(path, encoding='utf-8'):
"""Read graph in p2g format from path.
Returns
-------
MultiDiGraph
Notes
-----
If you want a DiGraph (with no self loops allowed and no edge data)
use D=networkx.DiGraph(read_p2g(path))
"""
lines = (line.decode(encoding) for line in path)
G=parse_p2g(lines)
return G
def parse_p2g(lines):
"""Parse p2g format graph from string or iterable.
Returns
-------
MultiDiGraph
"""
description = next(lines).strip()
# are multiedges (parallel edges) allowed?
G=networkx.MultiDiGraph(name=description,selfloops=True)
nnodes,nedges=map(int,next(lines).split())
nodelabel={}
nbrs={}
# loop over the nodes keeping track of node labels and out neighbors
# defer adding edges until all node labels are known
for i in range(nnodes):
n=next(lines).strip()
nodelabel[i]=n
G.add_node(n)
nbrs[n]=map(int,next(lines).split())
# now we know all of the node labels so we can add the edges
# with the correct labels
for n in G:
for nbr in nbrs[n]:
G.add_edge(n,nodelabel[nbr])
return G
| gpl-2.0 |
krdlab/ansible-modules-core | system/hostname.py | 5 | 19910 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Hiroaki Nakamura <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: hostname
author:
- "Hiroaki Nakamura (@hnakamur)"
- "Hideki Saito (@saito-hideki)"
version_added: "1.4"
short_description: Manage hostname
requirements: [ hostname ]
description:
- Set system's hostname
- Currently implemented on Debian, Ubuntu, Fedora, RedHat, openSUSE, Linaro, ScientificLinux, Arch, CentOS, AMI.
- Any distribution that uses systemd as their init system
options:
name:
required: true
description:
- Name of the host
'''
EXAMPLES = '''
- hostname: name=web01
'''
import socket
from distutils.version import LooseVersion
# import module snippets
from ansible.module_utils.basic import *
class UnimplementedStrategy(object):
def __init__(self, module):
self.module = module
def get_current_hostname(self):
self.unimplemented_error()
def set_current_hostname(self, name):
self.unimplemented_error()
def get_permanent_hostname(self):
self.unimplemented_error()
def set_permanent_hostname(self, name):
self.unimplemented_error()
def unimplemented_error(self):
platform = get_platform()
distribution = get_distribution()
if distribution is not None:
msg_platform = '%s (%s)' % (platform, distribution)
else:
msg_platform = platform
self.module.fail_json(
msg='hostname module cannot be used on platform %s' % msg_platform)
class Hostname(object):
"""
This is a generic Hostname manipulation class that is subclassed
based on platform.
A subclass may wish to set different strategy instance to self.strategy.
All subclasses MUST define platform and distribution (which may be None).
"""
platform = 'Generic'
distribution = None
strategy_class = UnimplementedStrategy
def __new__(cls, *args, **kwargs):
return load_platform_subclass(Hostname, args, kwargs)
def __init__(self, module):
self.module = module
self.name = module.params['name']
self.strategy = self.strategy_class(module)
def get_current_hostname(self):
return self.strategy.get_current_hostname()
def set_current_hostname(self, name):
self.strategy.set_current_hostname(name)
def get_permanent_hostname(self):
return self.strategy.get_permanent_hostname()
def set_permanent_hostname(self, name):
self.strategy.set_permanent_hostname(name)
class GenericStrategy(object):
"""
This is a generic Hostname manipulation strategy class.
A subclass may wish to override some or all of these methods.
- get_current_hostname()
- get_permanent_hostname()
- set_current_hostname(name)
- set_permanent_hostname(name)
"""
def __init__(self, module):
self.module = module
self.hostname_cmd = self.module.get_bin_path('hostname', True)
def get_current_hostname(self):
cmd = [self.hostname_cmd]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" %
(rc, out, err))
return out.strip()
def set_current_hostname(self, name):
cmd = [self.hostname_cmd, name]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" %
(rc, out, err))
def get_permanent_hostname(self):
return None
def set_permanent_hostname(self, name):
pass
# ===========================================
class DebianStrategy(GenericStrategy):
"""
This is a Debian family Hostname manipulation strategy class - it edits
the /etc/hostname file.
"""
HOSTNAME_FILE = '/etc/hostname'
def get_permanent_hostname(self):
if not os.path.isfile(self.HOSTNAME_FILE):
try:
open(self.HOSTNAME_FILE, "a").write("")
except IOError, err:
self.module.fail_json(msg="failed to write file: %s" %
str(err))
try:
f = open(self.HOSTNAME_FILE)
try:
return f.read().strip()
finally:
f.close()
except Exception, err:
self.module.fail_json(msg="failed to read hostname: %s" %
str(err))
def set_permanent_hostname(self, name):
try:
f = open(self.HOSTNAME_FILE, 'w+')
try:
f.write("%s\n" % name)
finally:
f.close()
except Exception, err:
self.module.fail_json(msg="failed to update hostname: %s" %
str(err))
# ===========================================
class RedHatStrategy(GenericStrategy):
"""
This is a Redhat Hostname strategy class - it edits the
/etc/sysconfig/network file.
"""
NETWORK_FILE = '/etc/sysconfig/network'
def get_permanent_hostname(self):
try:
f = open(self.NETWORK_FILE, 'rb')
try:
for line in f.readlines():
if line.startswith('HOSTNAME'):
k, v = line.split('=')
return v.strip()
finally:
f.close()
except Exception, err:
self.module.fail_json(msg="failed to read hostname: %s" %
str(err))
def set_permanent_hostname(self, name):
try:
lines = []
found = False
f = open(self.NETWORK_FILE, 'rb')
try:
for line in f.readlines():
if line.startswith('HOSTNAME'):
lines.append("HOSTNAME=%s\n" % name)
found = True
else:
lines.append(line)
finally:
f.close()
if not found:
lines.append("HOSTNAME=%s\n" % name)
f = open(self.NETWORK_FILE, 'w+')
try:
f.writelines(lines)
finally:
f.close()
except Exception, err:
self.module.fail_json(msg="failed to update hostname: %s" %
str(err))
# ===========================================
class SystemdStrategy(GenericStrategy):
"""
This is a Systemd hostname manipulation strategy class - it uses
the hostnamectl command.
"""
def get_current_hostname(self):
cmd = ['hostname']
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" %
(rc, out, err))
return out.strip()
def set_current_hostname(self, name):
if len(name) > 64:
self.module.fail_json(msg="name cannot be longer than 64 characters on systemd servers, try a shorter name")
cmd = ['hostnamectl', '--transient', 'set-hostname', name]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" %
(rc, out, err))
def get_permanent_hostname(self):
cmd = 'hostnamectl --static status'
rc, out, err = self.module.run_command(cmd, use_unsafe_shell=True)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" %
(rc, out, err))
return out.strip()
def set_permanent_hostname(self, name):
if len(name) > 64:
self.module.fail_json(msg="name cannot be longer than 64 characters on systemd servers, try a shorter name")
cmd = ['hostnamectl', '--pretty', 'set-hostname', name]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" %
(rc, out, err))
cmd = ['hostnamectl', '--static', 'set-hostname', name]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" %
(rc, out, err))
# ===========================================
class OpenRCStrategy(GenericStrategy):
"""
This is a Gentoo (OpenRC) Hostname manipulation strategy class - it edits
the /etc/conf.d/hostname file.
"""
HOSTNAME_FILE = '/etc/conf.d/hostname'
def get_permanent_hostname(self):
try:
try:
f = open(self.HOSTNAME_FILE, 'r')
for line in f:
line = line.strip()
if line.startswith('hostname='):
return line[10:].strip('"')
except Exception, err:
self.module.fail_json(msg="failed to read hostname: %s" % str(err))
finally:
f.close()
return None
def set_permanent_hostname(self, name):
try:
try:
f = open(self.HOSTNAME_FILE, 'r')
lines = [x.strip() for x in f]
for i, line in enumerate(lines):
if line.startswith('hostname='):
lines[i] = 'hostname="%s"' % name
break
f.close()
f = open(self.HOSTNAME_FILE, 'w')
f.write('\n'.join(lines) + '\n')
except Exception, err:
self.module.fail_json(msg="failed to update hostname: %s" % str(err))
finally:
f.close()
# ===========================================
class OpenBSDStrategy(GenericStrategy):
"""
This is a OpenBSD family Hostname manipulation strategy class - it edits
the /etc/myname file.
"""
HOSTNAME_FILE = '/etc/myname'
def get_permanent_hostname(self):
if not os.path.isfile(self.HOSTNAME_FILE):
try:
open(self.HOSTNAME_FILE, "a").write("")
except IOError, err:
self.module.fail_json(msg="failed to write file: %s" %
str(err))
try:
f = open(self.HOSTNAME_FILE)
try:
return f.read().strip()
finally:
f.close()
except Exception, err:
self.module.fail_json(msg="failed to read hostname: %s" %
str(err))
def set_permanent_hostname(self, name):
try:
f = open(self.HOSTNAME_FILE, 'w+')
try:
f.write("%s\n" % name)
finally:
f.close()
except Exception, err:
self.module.fail_json(msg="failed to update hostname: %s" %
str(err))
# ===========================================
class SolarisStrategy(GenericStrategy):
"""
This is a Solaris11 or later Hostname manipulation strategy class - it
execute hostname command.
"""
def set_current_hostname(self, name):
cmd_option = '-t'
cmd = [self.hostname_cmd, cmd_option, name]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" %
(rc, out, err))
def get_permanent_hostname(self):
fmri = 'svc:/system/identity:node'
pattern = 'config/nodename'
cmd = '/usr/sbin/svccfg -s %s listprop -o value %s' % (fmri, pattern)
rc, out, err = self.module.run_command(cmd, use_unsafe_shell=True)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" %
(rc, out, err))
return out.strip()
def set_permanent_hostname(self, name):
cmd = [self.hostname_cmd, name]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" %
(rc, out, err))
# ===========================================
class FreeBSDStrategy(GenericStrategy):
"""
This is a FreeBSD hostname manipulation strategy class - it edits
the /etc/rc.conf.d/hostname file.
"""
HOSTNAME_FILE = '/etc/rc.conf.d/hostname'
def get_permanent_hostname(self):
if not os.path.isfile(self.HOSTNAME_FILE):
try:
open(self.HOSTNAME_FILE, "a").write("hostname=temporarystub\n")
except IOError, err:
self.module.fail_json(msg="failed to write file: %s" %
str(err))
try:
try:
f = open(self.HOSTNAME_FILE, 'r')
for line in f:
line = line.strip()
if line.startswith('hostname='):
return line[10:].strip('"')
except Exception, err:
self.module.fail_json(msg="failed to read hostname: %s" % str(err))
finally:
f.close()
return None
def set_permanent_hostname(self, name):
try:
try:
f = open(self.HOSTNAME_FILE, 'r')
lines = [x.strip() for x in f]
for i, line in enumerate(lines):
if line.startswith('hostname='):
lines[i] = 'hostname="%s"' % name
break
f.close()
f = open(self.HOSTNAME_FILE, 'w')
f.write('\n'.join(lines) + '\n')
except Exception, err:
self.module.fail_json(msg="failed to update hostname: %s" % str(err))
finally:
f.close()
# ===========================================
class FedoraHostname(Hostname):
platform = 'Linux'
distribution = 'Fedora'
strategy_class = SystemdStrategy
class SLESHostname(Hostname):
platform = 'Linux'
distribution = 'Suse linux enterprise server '
distribution_version = get_distribution_version()
if distribution_version and LooseVersion(distribution_version) >= LooseVersion("12"):
strategy_class = SystemdStrategy
else:
strategy_class = UnimplementedStrategy
class OpenSUSEHostname(Hostname):
platform = 'Linux'
distribution = 'Opensuse '
strategy_class = SystemdStrategy
class ArchHostname(Hostname):
platform = 'Linux'
distribution = 'Arch'
strategy_class = SystemdStrategy
class RedHat5Hostname(Hostname):
platform = 'Linux'
distribution = 'Redhat'
strategy_class = RedHatStrategy
class RedHatServerHostname(Hostname):
platform = 'Linux'
distribution = 'Red hat enterprise linux server'
distribution_version = get_distribution_version()
if distribution_version and LooseVersion(distribution_version) >= LooseVersion("7"):
strategy_class = SystemdStrategy
else:
strategy_class = RedHatStrategy
class RedHatWorkstationHostname(Hostname):
platform = 'Linux'
distribution = 'Red hat enterprise linux workstation'
distribution_version = get_distribution_version()
if distribution_version and LooseVersion(distribution_version) >= LooseVersion("7"):
strategy_class = SystemdStrategy
else:
strategy_class = RedHatStrategy
class CentOSHostname(Hostname):
platform = 'Linux'
distribution = 'Centos'
distribution_version = get_distribution_version()
if distribution_version and LooseVersion(distribution_version) >= LooseVersion("7"):
strategy_class = SystemdStrategy
else:
strategy_class = RedHatStrategy
class CentOSLinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Centos linux'
distribution_version = get_distribution_version()
if distribution_version and LooseVersion(distribution_version) >= LooseVersion("7"):
strategy_class = SystemdStrategy
else:
strategy_class = RedHatStrategy
class ScientificHostname(Hostname):
platform = 'Linux'
distribution = 'Scientific'
distribution_version = get_distribution_version()
if distribution_version and LooseVersion(distribution_version) >= LooseVersion("7"):
strategy_class = SystemdStrategy
else:
strategy_class = RedHatStrategy
class ScientificLinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Scientific linux'
distribution_version = get_distribution_version()
if distribution_version and LooseVersion(distribution_version) >= LooseVersion("7"):
strategy_class = SystemdStrategy
else:
strategy_class = RedHatStrategy
class OracleLinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Oracle linux server'
distribution_version = get_distribution_version()
if distribution_version and LooseVersion(distribution_version) >= LooseVersion("7"):
strategy_class = SystemdStrategy
else:
strategy_class = RedHatStrategy
class AmazonLinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Amazon'
strategy_class = RedHatStrategy
class DebianHostname(Hostname):
platform = 'Linux'
distribution = 'Debian'
strategy_class = DebianStrategy
class KaliHostname(Hostname):
platform = 'Linux'
distribution = 'Kali'
strategy_class = DebianStrategy
class UbuntuHostname(Hostname):
platform = 'Linux'
distribution = 'Ubuntu'
strategy_class = DebianStrategy
class LinuxmintHostname(Hostname):
platform = 'Linux'
distribution = 'Linuxmint'
strategy_class = DebianStrategy
class LinaroHostname(Hostname):
platform = 'Linux'
distribution = 'Linaro'
strategy_class = DebianStrategy
class GentooHostname(Hostname):
platform = 'Linux'
distribution = 'Gentoo base system'
strategy_class = OpenRCStrategy
class ALTLinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Altlinux'
strategy_class = RedHatStrategy
class OpenBSDHostname(Hostname):
platform = 'OpenBSD'
distribution = None
strategy_class = OpenBSDStrategy
class SolarisHostname(Hostname):
platform = 'SunOS'
distribution = None
strategy_class = SolarisStrategy
class FreeBSDHostname(Hostname):
platform = 'FreeBSD'
distribution = None
strategy_class = FreeBSDStrategy
# ===========================================
def main():
module = AnsibleModule(
argument_spec = dict(
name=dict(required=True, type='str')
)
)
hostname = Hostname(module)
changed = False
name = module.params['name']
current_name = hostname.get_current_hostname()
if current_name != name:
hostname.set_current_hostname(name)
changed = True
permanent_name = hostname.get_permanent_hostname()
if permanent_name != name:
hostname.set_permanent_hostname(name)
changed = True
module.exit_json(changed=changed, name=name,
ansible_facts=dict(ansible_hostname=name.split('.')[0],
ansible_nodename=name,
ansible_fqdn=socket.getfqdn(),
ansible_domain='.'.join(socket.getfqdn().split('.')[1:])))
main()
| gpl-3.0 |
jaantollander/Legendre | src/plotting/animate_series.py | 4 | 2747 | # coding=utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import matplotlib.animation as animation
import matplotlib.pyplot as plt
import numpy as np
import seaborn
from src.fourier_series import piecewise_analytic
def animate_series(series, max_degree):
series = series
x = series.x
x0 = series.x0
gen = series()
index = 0
step_function = piecewise_analytic.step_function(x, x0, 1, 0)
v_function = piecewise_analytic.v_function(x, x0, 1, -1, x0)
seaborn.set()
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(2 * 6, 2 * 8))
# Series
axes[0, 0].set(xlim=(np.min(x), np.max(x)),
ylim=(np.min(step_function)-0.5, np.max(step_function)+0.5))
axes[0, 1].set(xlim=(np.min(x), np.max(x)),
ylim=(np.min(v_function)-0.01, np.max(v_function)+0.01))
# Error
axes[1, 0].set(xlim=(np.min(x), np.max(x)), ylim=(10 ** -7, 1))
axes[1, 1].set(xlim=(np.min(x), np.max(x)), ylim=(10 ** -7, 1))
# Functions
axes[0, 0].plot(x, step_function)
axes[0, 1].plot(x, v_function)
l1, = axes[0, 0].plot([], [], marker='o', markersize=2.0, linestyle='-',
color='black', alpha=0.5)
l2, = axes[0, 1].plot([], [], marker='o', markersize=2.0, linestyle='-',
color='black', alpha=0.5)
l3, = axes[1, 0].semilogy([], [], marker='o', markersize=2.0, linestyle='-',
color='black', alpha=0.5)
l4, = axes[1, 1].semilogy([], [], marker='o', markersize=2.0, linestyle='-',
color='black', alpha=0.5)
def update(_):
# series expansion
deg, functions = next(gen)
step_function3 = functions[0]
step_function_series = step_function3[index]
step_function_error = np.abs(step_function - step_function_series)
l1.set_xdata(x)
l1.set_ydata(step_function_series)
l3.set_xdata(x)
l3.set_ydata(step_function_error)
if len(functions) == 2:
v_function3 = functions[1]
v_function_series = v_function3[index]
v_function_error = np.abs(v_function - v_function_series)
l2.set_xdata(x)
l2.set_ydata(v_function_series)
l4.set_xdata(x)
l4.set_ydata(v_function_error)
print(deg)
return l1, l2, l3, l4
anim = animation.FuncAnimation(fig, update,
frames=max_degree,
repeat=False,
interval=10,
blit=True)
plt.show()
| mit |
SYNHAK/spiff | spiff/donations/migrations/0001_initial.py | 1 | 7321 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Donation'
db.create_table(u'donations_donation', (
(u'lineitem_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['payment.LineItem'], unique=True, primary_key=True)),
))
db.send_create_signal(u'donations', ['Donation'])
# Adding model 'DonationSubscriptionPlan'
db.create_table(u'donations_donationsubscriptionplan', (
(u'subscriptionplan_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['payment.SubscriptionPlan'], unique=True, primary_key=True)),
('value', self.gf('django.db.models.fields.FloatField')()),
))
db.send_create_signal(u'donations', ['DonationSubscriptionPlan'])
def backwards(self, orm):
# Deleting model 'Donation'
db.delete_table(u'donations_donation')
# Deleting model 'DonationSubscriptionPlan'
db.delete_table(u'donations_donationsubscriptionplan')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'donations.donation': {
'Meta': {'object_name': 'Donation', '_ormbases': [u'payment.LineItem']},
u'lineitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['payment.LineItem']", 'unique': 'True', 'primary_key': 'True'})
},
u'donations.donationsubscriptionplan': {
'Meta': {'object_name': 'DonationSubscriptionPlan', '_ormbases': [u'payment.SubscriptionPlan']},
u'subscriptionplan_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['payment.SubscriptionPlan']", 'unique': 'True', 'primary_key': 'True'}),
'value': ('django.db.models.fields.FloatField', [], {})
},
u'payment.invoice': {
'Meta': {'object_name': 'Invoice'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'draft': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'dueDate': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'open': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'invoices'", 'to': u"orm['auth.User']"})
},
u'payment.lineitem': {
'Meta': {'object_name': 'LineItem'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': u"orm['payment.Invoice']"}),
'name': ('django.db.models.fields.TextField', [], {}),
'quantity': ('django.db.models.fields.FloatField', [], {'default': '1'}),
'unitPrice': ('django.db.models.fields.FloatField', [], {'default': '0'})
},
u'payment.subscriptionperiod': {
'Meta': {'object_name': 'SubscriptionPeriod'},
'dayOfMonth': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'monthOfYear': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'payment.subscriptionplan': {
'Meta': {'object_name': 'SubscriptionPlan'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'period': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['payment.SubscriptionPeriod']"})
}
}
complete_apps = ['donations'] | agpl-3.0 |
wargo32/Polcoin | qa/rpc-tests/mempool_coinbase_spends.py | 125 | 3785 | #!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test re-org scenarios with a mempool that contains transactions
# that spend (directly or indirectly) coinbase transactions.
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import os
import shutil
# Create one-input, one-output, no-fee transaction:
class MempoolCoinbaseTest(BitcoinTestFramework):
alert_filename = None # Set by setup_network
def setup_network(self):
args = ["-checkmempool", "-debug=mempool"]
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, args))
self.nodes.append(start_node(1, self.options.tmpdir, args))
connect_nodes(self.nodes[1], 0)
self.is_network_split = False
self.sync_all
def create_tx(self, from_txid, to_address, amount):
inputs = [{ "txid" : from_txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
signresult = self.nodes[0].signrawtransaction(rawtx)
assert_equal(signresult["complete"], True)
return signresult["hex"]
def run_test(self):
start_count = self.nodes[0].getblockcount()
# Mine three blocks. After this, nodes[0] blocks
# 101, 102, and 103 are spend-able.
new_blocks = self.nodes[1].generate(4)
self.sync_all()
node0_address = self.nodes[0].getnewaddress()
node1_address = self.nodes[1].getnewaddress()
# Three scenarios for re-orging coinbase spends in the memory pool:
# 1. Direct coinbase spend : spend_101
# 2. Indirect (coinbase spend in chain, child in mempool) : spend_102 and spend_102_1
# 3. Indirect (coinbase and child both in chain) : spend_103 and spend_103_1
# Use invalidatblock to make all of the above coinbase spends invalid (immature coinbase),
# and make sure the mempool code behaves correctly.
b = [ self.nodes[0].getblockhash(n) for n in range(102, 105) ]
coinbase_txids = [ self.nodes[0].getblock(h)['tx'][0] for h in b ]
spend_101_raw = self.create_tx(coinbase_txids[0], node1_address, 50)
spend_102_raw = self.create_tx(coinbase_txids[1], node0_address, 50)
spend_103_raw = self.create_tx(coinbase_txids[2], node0_address, 50)
# Broadcast and mine spend_102 and 103:
spend_102_id = self.nodes[0].sendrawtransaction(spend_102_raw)
spend_103_id = self.nodes[0].sendrawtransaction(spend_103_raw)
self.nodes[0].generate(1)
# Create 102_1 and 103_1:
spend_102_1_raw = self.create_tx(spend_102_id, node1_address, 50)
spend_103_1_raw = self.create_tx(spend_103_id, node1_address, 50)
# Broadcast and mine 103_1:
spend_103_1_id = self.nodes[0].sendrawtransaction(spend_103_1_raw)
self.nodes[0].generate(1)
# ... now put spend_101 and spend_102_1 in memory pools:
spend_101_id = self.nodes[0].sendrawtransaction(spend_101_raw)
spend_102_1_id = self.nodes[0].sendrawtransaction(spend_102_1_raw)
self.sync_all()
assert_equal(set(self.nodes[0].getrawmempool()), set([ spend_101_id, spend_102_1_id ]))
# Use invalidateblock to re-org back and make all those coinbase spends
# immature/invalid:
for node in self.nodes:
node.invalidateblock(new_blocks[0])
self.sync_all()
# mempool should be empty.
assert_equal(set(self.nodes[0].getrawmempool()), set())
if __name__ == '__main__':
MempoolCoinbaseTest().main()
| mit |
aiguofer/bokeh | tests/integration/plotting/test_plot_fill_properties.py | 5 | 2085 | from __future__ import absolute_import
import io
import os
from jinja2 import Template
from bokeh.util.string import decode_utf8
from bokeh.embed import file_html
from bokeh.models import Plot, Range1d, Circle, LinearAxis
from bokeh.resources import INLINE
from tests.integration.utils import has_no_console_errors
import pytest
pytestmark = pytest.mark.integration
HEIGHT = 600
WIDTH = 600
@pytest.mark.screenshot
def test_no_border_or_background_fill(output_file_url, selenium, screenshot):
# Have body background-color that should appear through the no-fill plot
template = Template("""
<!doctype html>
<html lang="en">
<head>
{{ bokeh_js }}
{{ bokeh_css}}
<style>
body { background-color: lightblue; }
</style>
</head>
<body>
{{ plot_script }}
{{ plot_div }}
</body>
</html>
""")
plot = Plot(plot_height=HEIGHT, plot_width=WIDTH,
x_range=Range1d(0, 10), y_range=Range1d(0, 10),
toolbar_location=None)
# This is the no-fill that we're testing
plot.background_fill_color = None
plot.border_fill_color = None
plot.add_glyph(Circle(x=3, y=3, size=50, fill_color='#ffffff'))
plot.add_glyph(Circle(x=6, y=6, size=50, fill_color='#ffffff'))
plot.add_layout(LinearAxis(major_label_text_color='#ffffff',
major_label_text_font_size="30pt"),
'left')
plot.add_layout(LinearAxis(major_label_text_color='#ffffff',
major_label_text_font_size="30pt"),
'below')
html = file_html(plot, INLINE, template=template)
# filename has to match test function + '.html' light
filepath = os.path.join(os.path.dirname(os.path.abspath(__file__)),
"test_no_border_or_background_fill.html")
with io.open(filepath, "w", encoding="utf-8") as f:
f.write(decode_utf8(html))
selenium.get(output_file_url)
assert has_no_console_errors(selenium)
assert screenshot.is_valid()
| bsd-3-clause |
gizeminci/espresso-1 | samples/python/hello_parallel_world.py | 13 | 1767 | #
# Copyright (C) 2013,2014 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import print_function
import ctypes
import sys
sys.setdlopenflags((sys.getdlopenflags() | ctypes.RTLD_GLOBAL ))
import espresso as es
import numpy
import code_info
print(code_info.electrostatics_defined())
es._espressoHandle.Tcl_Eval("thermostat lb 1.")
dev="cpu"
N=100
es.glob.time_step=0.01
es.glob.skin=1.
es.glob.box_l=[10., 10., 10.]
#print es.cu.device_list
#es.cu.device=0
es.lbfluid[dev].agrid=1
es.lbfluid[dev].dens=1
es.lbfluid[dev].visc=1
es.lbfluid[dev].friction=1
es.lbfluid[dev].tau=0.1
es.lbfluid[dev].ext_force=[1., 2., 3.,]
print(es.lbfluid[dev].ext_force)
print(es.lbfluid[dev].dens)
print(es.lbfluid[dev].visc)
print(es.lbfluid[dev].agrid)
es.lbfluid[dev].print_vtk_velocity="test.vtk"
#es.lb[dev].checkpoint_style=1
#es.lb[dev].checkpoint="cp.dat"
for i in range(N):
es.part[i].pos=numpy.random.random(3)*es.glob.box_l
es.inter[0,0].lennardJones = {"eps":1,"sigma":1,"shift":0.25}
es._espressoHandle.Tcl_Eval("integrate 100")
#for i in range(N):
# print es.part[i].pos
es._espressoHandle.die()
| gpl-3.0 |
Sing-Li/go-buildpack | builds/runtimes/python-2.7.6/lib/python2.7/ftplib.py | 57 | 36966 | """An FTP client class and some helper functions.
Based on RFC 959: File Transfer Protocol (FTP), by J. Postel and J. Reynolds
Example:
>>> from ftplib import FTP
>>> ftp = FTP('ftp.python.org') # connect to host, default port
>>> ftp.login() # default, i.e.: user anonymous, passwd anonymous@
'230 Guest login ok, access restrictions apply.'
>>> ftp.retrlines('LIST') # list directory contents
total 9
drwxr-xr-x 8 root wheel 1024 Jan 3 1994 .
drwxr-xr-x 8 root wheel 1024 Jan 3 1994 ..
drwxr-xr-x 2 root wheel 1024 Jan 3 1994 bin
drwxr-xr-x 2 root wheel 1024 Jan 3 1994 etc
d-wxrwxr-x 2 ftp wheel 1024 Sep 5 13:43 incoming
drwxr-xr-x 2 root wheel 1024 Nov 17 1993 lib
drwxr-xr-x 6 1094 wheel 1024 Sep 13 19:07 pub
drwxr-xr-x 3 root wheel 1024 Jan 3 1994 usr
-rw-r--r-- 1 root root 312 Aug 1 1994 welcome.msg
'226 Transfer complete.'
>>> ftp.quit()
'221 Goodbye.'
>>>
A nice test that reveals some of the network dialogue would be:
python ftplib.py -d localhost -l -p -l
"""
#
# Changes and improvements suggested by Steve Majewski.
# Modified by Jack to work on the mac.
# Modified by Siebren to support docstrings and PASV.
# Modified by Phil Schwartz to add storbinary and storlines callbacks.
# Modified by Giampaolo Rodola' to add TLS support.
#
import os
import sys
# Import SOCKS module if it exists, else standard socket module socket
try:
import SOCKS; socket = SOCKS; del SOCKS # import SOCKS as socket
from socket import getfqdn; socket.getfqdn = getfqdn; del getfqdn
except ImportError:
import socket
from socket import _GLOBAL_DEFAULT_TIMEOUT
__all__ = ["FTP","Netrc"]
# Magic number from <socket.h>
MSG_OOB = 0x1 # Process data out of band
# The standard FTP server control port
FTP_PORT = 21
# The sizehint parameter passed to readline() calls
MAXLINE = 8192
# Exception raised when an error or invalid response is received
class Error(Exception): pass
class error_reply(Error): pass # unexpected [123]xx reply
class error_temp(Error): pass # 4xx errors
class error_perm(Error): pass # 5xx errors
class error_proto(Error): pass # response does not begin with [1-5]
# All exceptions (hopefully) that may be raised here and that aren't
# (always) programming errors on our side
all_errors = (Error, IOError, EOFError)
# Line terminators (we always output CRLF, but accept any of CRLF, CR, LF)
CRLF = '\r\n'
# The class itself
class FTP:
'''An FTP client class.
To create a connection, call the class using these arguments:
host, user, passwd, acct, timeout
The first four arguments are all strings, and have default value ''.
timeout must be numeric and defaults to None if not passed,
meaning that no timeout will be set on any ftp socket(s)
If a timeout is passed, then this is now the default timeout for all ftp
socket operations for this instance.
Then use self.connect() with optional host and port argument.
To download a file, use ftp.retrlines('RETR ' + filename),
or ftp.retrbinary() with slightly different arguments.
To upload a file, use ftp.storlines() or ftp.storbinary(),
which have an open file as argument (see their definitions
below for details).
The download/upload functions first issue appropriate TYPE
and PORT or PASV commands.
'''
debugging = 0
host = ''
port = FTP_PORT
maxline = MAXLINE
sock = None
file = None
welcome = None
passiveserver = 1
# Initialization method (called by class instantiation).
# Initialize host to localhost, port to standard ftp port
# Optional arguments are host (for connect()),
# and user, passwd, acct (for login())
def __init__(self, host='', user='', passwd='', acct='',
timeout=_GLOBAL_DEFAULT_TIMEOUT):
self.timeout = timeout
if host:
self.connect(host)
if user:
self.login(user, passwd, acct)
def connect(self, host='', port=0, timeout=-999):
'''Connect to host. Arguments are:
- host: hostname to connect to (string, default previous host)
- port: port to connect to (integer, default previous port)
'''
if host != '':
self.host = host
if port > 0:
self.port = port
if timeout != -999:
self.timeout = timeout
self.sock = socket.create_connection((self.host, self.port), self.timeout)
self.af = self.sock.family
self.file = self.sock.makefile('rb')
self.welcome = self.getresp()
return self.welcome
def getwelcome(self):
'''Get the welcome message from the server.
(this is read and squirreled away by connect())'''
if self.debugging:
print '*welcome*', self.sanitize(self.welcome)
return self.welcome
def set_debuglevel(self, level):
'''Set the debugging level.
The required argument level means:
0: no debugging output (default)
1: print commands and responses but not body text etc.
2: also print raw lines read and sent before stripping CR/LF'''
self.debugging = level
debug = set_debuglevel
def set_pasv(self, val):
'''Use passive or active mode for data transfers.
With a false argument, use the normal PORT mode,
With a true argument, use the PASV command.'''
self.passiveserver = val
# Internal: "sanitize" a string for printing
def sanitize(self, s):
if s[:5] == 'pass ' or s[:5] == 'PASS ':
i = len(s)
while i > 5 and s[i-1] in '\r\n':
i = i-1
s = s[:5] + '*'*(i-5) + s[i:]
return repr(s)
# Internal: send one line to the server, appending CRLF
def putline(self, line):
line = line + CRLF
if self.debugging > 1: print '*put*', self.sanitize(line)
self.sock.sendall(line)
# Internal: send one command to the server (through putline())
def putcmd(self, line):
if self.debugging: print '*cmd*', self.sanitize(line)
self.putline(line)
# Internal: return one line from the server, stripping CRLF.
# Raise EOFError if the connection is closed
def getline(self):
line = self.file.readline(self.maxline + 1)
if len(line) > self.maxline:
raise Error("got more than %d bytes" % self.maxline)
if self.debugging > 1:
print '*get*', self.sanitize(line)
if not line: raise EOFError
if line[-2:] == CRLF: line = line[:-2]
elif line[-1:] in CRLF: line = line[:-1]
return line
# Internal: get a response from the server, which may possibly
# consist of multiple lines. Return a single string with no
# trailing CRLF. If the response consists of multiple lines,
# these are separated by '\n' characters in the string
def getmultiline(self):
line = self.getline()
if line[3:4] == '-':
code = line[:3]
while 1:
nextline = self.getline()
line = line + ('\n' + nextline)
if nextline[:3] == code and \
nextline[3:4] != '-':
break
return line
# Internal: get a response from the server.
# Raise various errors if the response indicates an error
def getresp(self):
resp = self.getmultiline()
if self.debugging: print '*resp*', self.sanitize(resp)
self.lastresp = resp[:3]
c = resp[:1]
if c in ('1', '2', '3'):
return resp
if c == '4':
raise error_temp, resp
if c == '5':
raise error_perm, resp
raise error_proto, resp
def voidresp(self):
"""Expect a response beginning with '2'."""
resp = self.getresp()
if resp[:1] != '2':
raise error_reply, resp
return resp
def abort(self):
'''Abort a file transfer. Uses out-of-band data.
This does not follow the procedure from the RFC to send Telnet
IP and Synch; that doesn't seem to work with the servers I've
tried. Instead, just send the ABOR command as OOB data.'''
line = 'ABOR' + CRLF
if self.debugging > 1: print '*put urgent*', self.sanitize(line)
self.sock.sendall(line, MSG_OOB)
resp = self.getmultiline()
if resp[:3] not in ('426', '225', '226'):
raise error_proto, resp
def sendcmd(self, cmd):
'''Send a command and return the response.'''
self.putcmd(cmd)
return self.getresp()
def voidcmd(self, cmd):
"""Send a command and expect a response beginning with '2'."""
self.putcmd(cmd)
return self.voidresp()
def sendport(self, host, port):
'''Send a PORT command with the current host and the given
port number.
'''
hbytes = host.split('.')
pbytes = [repr(port//256), repr(port%256)]
bytes = hbytes + pbytes
cmd = 'PORT ' + ','.join(bytes)
return self.voidcmd(cmd)
def sendeprt(self, host, port):
'''Send a EPRT command with the current host and the given port number.'''
af = 0
if self.af == socket.AF_INET:
af = 1
if self.af == socket.AF_INET6:
af = 2
if af == 0:
raise error_proto, 'unsupported address family'
fields = ['', repr(af), host, repr(port), '']
cmd = 'EPRT ' + '|'.join(fields)
return self.voidcmd(cmd)
def makeport(self):
'''Create a new socket and send a PORT command for it.'''
err = None
sock = None
for res in socket.getaddrinfo(None, 0, self.af, socket.SOCK_STREAM, 0, socket.AI_PASSIVE):
af, socktype, proto, canonname, sa = res
try:
sock = socket.socket(af, socktype, proto)
sock.bind(sa)
except socket.error, err:
if sock:
sock.close()
sock = None
continue
break
if sock is None:
if err is not None:
raise err
else:
raise socket.error("getaddrinfo returns an empty list")
sock.listen(1)
port = sock.getsockname()[1] # Get proper port
host = self.sock.getsockname()[0] # Get proper host
if self.af == socket.AF_INET:
resp = self.sendport(host, port)
else:
resp = self.sendeprt(host, port)
if self.timeout is not _GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(self.timeout)
return sock
def makepasv(self):
if self.af == socket.AF_INET:
host, port = parse227(self.sendcmd('PASV'))
else:
host, port = parse229(self.sendcmd('EPSV'), self.sock.getpeername())
return host, port
def ntransfercmd(self, cmd, rest=None):
"""Initiate a transfer over the data connection.
If the transfer is active, send a port command and the
transfer command, and accept the connection. If the server is
passive, send a pasv command, connect to it, and start the
transfer command. Either way, return the socket for the
connection and the expected size of the transfer. The
expected size may be None if it could not be determined.
Optional `rest' argument can be a string that is sent as the
argument to a REST command. This is essentially a server
marker used to tell the server to skip over any data up to the
given marker.
"""
size = None
if self.passiveserver:
host, port = self.makepasv()
conn = socket.create_connection((host, port), self.timeout)
try:
if rest is not None:
self.sendcmd("REST %s" % rest)
resp = self.sendcmd(cmd)
# Some servers apparently send a 200 reply to
# a LIST or STOR command, before the 150 reply
# (and way before the 226 reply). This seems to
# be in violation of the protocol (which only allows
# 1xx or error messages for LIST), so we just discard
# this response.
if resp[0] == '2':
resp = self.getresp()
if resp[0] != '1':
raise error_reply, resp
except:
conn.close()
raise
else:
sock = self.makeport()
try:
if rest is not None:
self.sendcmd("REST %s" % rest)
resp = self.sendcmd(cmd)
# See above.
if resp[0] == '2':
resp = self.getresp()
if resp[0] != '1':
raise error_reply, resp
conn, sockaddr = sock.accept()
if self.timeout is not _GLOBAL_DEFAULT_TIMEOUT:
conn.settimeout(self.timeout)
finally:
sock.close()
if resp[:3] == '150':
# this is conditional in case we received a 125
size = parse150(resp)
return conn, size
def transfercmd(self, cmd, rest=None):
"""Like ntransfercmd() but returns only the socket."""
return self.ntransfercmd(cmd, rest)[0]
def login(self, user = '', passwd = '', acct = ''):
'''Login, default anonymous.'''
if not user: user = 'anonymous'
if not passwd: passwd = ''
if not acct: acct = ''
if user == 'anonymous' and passwd in ('', '-'):
# If there is no anonymous ftp password specified
# then we'll just use anonymous@
# We don't send any other thing because:
# - We want to remain anonymous
# - We want to stop SPAM
# - We don't want to let ftp sites to discriminate by the user,
# host or country.
passwd = passwd + 'anonymous@'
resp = self.sendcmd('USER ' + user)
if resp[0] == '3': resp = self.sendcmd('PASS ' + passwd)
if resp[0] == '3': resp = self.sendcmd('ACCT ' + acct)
if resp[0] != '2':
raise error_reply, resp
return resp
def retrbinary(self, cmd, callback, blocksize=8192, rest=None):
"""Retrieve data in binary mode. A new port is created for you.
Args:
cmd: A RETR command.
callback: A single parameter callable to be called on each
block of data read.
blocksize: The maximum number of bytes to read from the
socket at one time. [default: 8192]
rest: Passed to transfercmd(). [default: None]
Returns:
The response code.
"""
self.voidcmd('TYPE I')
conn = self.transfercmd(cmd, rest)
while 1:
data = conn.recv(blocksize)
if not data:
break
callback(data)
conn.close()
return self.voidresp()
def retrlines(self, cmd, callback = None):
"""Retrieve data in line mode. A new port is created for you.
Args:
cmd: A RETR, LIST, NLST, or MLSD command.
callback: An optional single parameter callable that is called
for each line with the trailing CRLF stripped.
[default: print_line()]
Returns:
The response code.
"""
if callback is None: callback = print_line
resp = self.sendcmd('TYPE A')
conn = self.transfercmd(cmd)
fp = conn.makefile('rb')
while 1:
line = fp.readline(self.maxline + 1)
if len(line) > self.maxline:
raise Error("got more than %d bytes" % self.maxline)
if self.debugging > 2: print '*retr*', repr(line)
if not line:
break
if line[-2:] == CRLF:
line = line[:-2]
elif line[-1:] == '\n':
line = line[:-1]
callback(line)
fp.close()
conn.close()
return self.voidresp()
def storbinary(self, cmd, fp, blocksize=8192, callback=None, rest=None):
"""Store a file in binary mode. A new port is created for you.
Args:
cmd: A STOR command.
fp: A file-like object with a read(num_bytes) method.
blocksize: The maximum data size to read from fp and send over
the connection at once. [default: 8192]
callback: An optional single parameter callable that is called on
each block of data after it is sent. [default: None]
rest: Passed to transfercmd(). [default: None]
Returns:
The response code.
"""
self.voidcmd('TYPE I')
conn = self.transfercmd(cmd, rest)
while 1:
buf = fp.read(blocksize)
if not buf: break
conn.sendall(buf)
if callback: callback(buf)
conn.close()
return self.voidresp()
def storlines(self, cmd, fp, callback=None):
"""Store a file in line mode. A new port is created for you.
Args:
cmd: A STOR command.
fp: A file-like object with a readline() method.
callback: An optional single parameter callable that is called on
each line after it is sent. [default: None]
Returns:
The response code.
"""
self.voidcmd('TYPE A')
conn = self.transfercmd(cmd)
while 1:
buf = fp.readline(self.maxline + 1)
if len(buf) > self.maxline:
raise Error("got more than %d bytes" % self.maxline)
if not buf: break
if buf[-2:] != CRLF:
if buf[-1] in CRLF: buf = buf[:-1]
buf = buf + CRLF
conn.sendall(buf)
if callback: callback(buf)
conn.close()
return self.voidresp()
def acct(self, password):
'''Send new account name.'''
cmd = 'ACCT ' + password
return self.voidcmd(cmd)
def nlst(self, *args):
'''Return a list of files in a given directory (default the current).'''
cmd = 'NLST'
for arg in args:
cmd = cmd + (' ' + arg)
files = []
self.retrlines(cmd, files.append)
return files
def dir(self, *args):
'''List a directory in long form.
By default list current directory to stdout.
Optional last argument is callback function; all
non-empty arguments before it are concatenated to the
LIST command. (This *should* only be used for a pathname.)'''
cmd = 'LIST'
func = None
if args[-1:] and type(args[-1]) != type(''):
args, func = args[:-1], args[-1]
for arg in args:
if arg:
cmd = cmd + (' ' + arg)
self.retrlines(cmd, func)
def rename(self, fromname, toname):
'''Rename a file.'''
resp = self.sendcmd('RNFR ' + fromname)
if resp[0] != '3':
raise error_reply, resp
return self.voidcmd('RNTO ' + toname)
def delete(self, filename):
'''Delete a file.'''
resp = self.sendcmd('DELE ' + filename)
if resp[:3] in ('250', '200'):
return resp
else:
raise error_reply, resp
def cwd(self, dirname):
'''Change to a directory.'''
if dirname == '..':
try:
return self.voidcmd('CDUP')
except error_perm, msg:
if msg.args[0][:3] != '500':
raise
elif dirname == '':
dirname = '.' # does nothing, but could return error
cmd = 'CWD ' + dirname
return self.voidcmd(cmd)
def size(self, filename):
'''Retrieve the size of a file.'''
# The SIZE command is defined in RFC-3659
resp = self.sendcmd('SIZE ' + filename)
if resp[:3] == '213':
s = resp[3:].strip()
try:
return int(s)
except (OverflowError, ValueError):
return long(s)
def mkd(self, dirname):
'''Make a directory, return its full pathname.'''
resp = self.sendcmd('MKD ' + dirname)
return parse257(resp)
def rmd(self, dirname):
'''Remove a directory.'''
return self.voidcmd('RMD ' + dirname)
def pwd(self):
'''Return current working directory.'''
resp = self.sendcmd('PWD')
return parse257(resp)
def quit(self):
'''Quit, and close the connection.'''
resp = self.voidcmd('QUIT')
self.close()
return resp
def close(self):
'''Close the connection without assuming anything about it.'''
if self.file is not None:
self.file.close()
if self.sock is not None:
self.sock.close()
self.file = self.sock = None
try:
import ssl
except ImportError:
pass
else:
class FTP_TLS(FTP):
'''A FTP subclass which adds TLS support to FTP as described
in RFC-4217.
Connect as usual to port 21 implicitly securing the FTP control
connection before authenticating.
Securing the data connection requires user to explicitly ask
for it by calling prot_p() method.
Usage example:
>>> from ftplib import FTP_TLS
>>> ftps = FTP_TLS('ftp.python.org')
>>> ftps.login() # login anonymously previously securing control channel
'230 Guest login ok, access restrictions apply.'
>>> ftps.prot_p() # switch to secure data connection
'200 Protection level set to P'
>>> ftps.retrlines('LIST') # list directory content securely
total 9
drwxr-xr-x 8 root wheel 1024 Jan 3 1994 .
drwxr-xr-x 8 root wheel 1024 Jan 3 1994 ..
drwxr-xr-x 2 root wheel 1024 Jan 3 1994 bin
drwxr-xr-x 2 root wheel 1024 Jan 3 1994 etc
d-wxrwxr-x 2 ftp wheel 1024 Sep 5 13:43 incoming
drwxr-xr-x 2 root wheel 1024 Nov 17 1993 lib
drwxr-xr-x 6 1094 wheel 1024 Sep 13 19:07 pub
drwxr-xr-x 3 root wheel 1024 Jan 3 1994 usr
-rw-r--r-- 1 root root 312 Aug 1 1994 welcome.msg
'226 Transfer complete.'
>>> ftps.quit()
'221 Goodbye.'
>>>
'''
ssl_version = ssl.PROTOCOL_TLSv1
def __init__(self, host='', user='', passwd='', acct='', keyfile=None,
certfile=None, timeout=_GLOBAL_DEFAULT_TIMEOUT):
self.keyfile = keyfile
self.certfile = certfile
self._prot_p = False
FTP.__init__(self, host, user, passwd, acct, timeout)
def login(self, user='', passwd='', acct='', secure=True):
if secure and not isinstance(self.sock, ssl.SSLSocket):
self.auth()
return FTP.login(self, user, passwd, acct)
def auth(self):
'''Set up secure control connection by using TLS/SSL.'''
if isinstance(self.sock, ssl.SSLSocket):
raise ValueError("Already using TLS")
if self.ssl_version == ssl.PROTOCOL_TLSv1:
resp = self.voidcmd('AUTH TLS')
else:
resp = self.voidcmd('AUTH SSL')
self.sock = ssl.wrap_socket(self.sock, self.keyfile, self.certfile,
ssl_version=self.ssl_version)
self.file = self.sock.makefile(mode='rb')
return resp
def prot_p(self):
'''Set up secure data connection.'''
# PROT defines whether or not the data channel is to be protected.
# Though RFC-2228 defines four possible protection levels,
# RFC-4217 only recommends two, Clear and Private.
# Clear (PROT C) means that no security is to be used on the
# data-channel, Private (PROT P) means that the data-channel
# should be protected by TLS.
# PBSZ command MUST still be issued, but must have a parameter of
# '0' to indicate that no buffering is taking place and the data
# connection should not be encapsulated.
self.voidcmd('PBSZ 0')
resp = self.voidcmd('PROT P')
self._prot_p = True
return resp
def prot_c(self):
'''Set up clear text data connection.'''
resp = self.voidcmd('PROT C')
self._prot_p = False
return resp
# --- Overridden FTP methods
def ntransfercmd(self, cmd, rest=None):
conn, size = FTP.ntransfercmd(self, cmd, rest)
if self._prot_p:
conn = ssl.wrap_socket(conn, self.keyfile, self.certfile,
ssl_version=self.ssl_version)
return conn, size
def retrbinary(self, cmd, callback, blocksize=8192, rest=None):
self.voidcmd('TYPE I')
conn = self.transfercmd(cmd, rest)
try:
while 1:
data = conn.recv(blocksize)
if not data:
break
callback(data)
# shutdown ssl layer
if isinstance(conn, ssl.SSLSocket):
conn.unwrap()
finally:
conn.close()
return self.voidresp()
def retrlines(self, cmd, callback = None):
if callback is None: callback = print_line
resp = self.sendcmd('TYPE A')
conn = self.transfercmd(cmd)
fp = conn.makefile('rb')
try:
while 1:
line = fp.readline(self.maxline + 1)
if len(line) > self.maxline:
raise Error("got more than %d bytes" % self.maxline)
if self.debugging > 2: print '*retr*', repr(line)
if not line:
break
if line[-2:] == CRLF:
line = line[:-2]
elif line[-1:] == '\n':
line = line[:-1]
callback(line)
# shutdown ssl layer
if isinstance(conn, ssl.SSLSocket):
conn.unwrap()
finally:
fp.close()
conn.close()
return self.voidresp()
def storbinary(self, cmd, fp, blocksize=8192, callback=None, rest=None):
self.voidcmd('TYPE I')
conn = self.transfercmd(cmd, rest)
try:
while 1:
buf = fp.read(blocksize)
if not buf: break
conn.sendall(buf)
if callback: callback(buf)
# shutdown ssl layer
if isinstance(conn, ssl.SSLSocket):
conn.unwrap()
finally:
conn.close()
return self.voidresp()
def storlines(self, cmd, fp, callback=None):
self.voidcmd('TYPE A')
conn = self.transfercmd(cmd)
try:
while 1:
buf = fp.readline(self.maxline + 1)
if len(buf) > self.maxline:
raise Error("got more than %d bytes" % self.maxline)
if not buf: break
if buf[-2:] != CRLF:
if buf[-1] in CRLF: buf = buf[:-1]
buf = buf + CRLF
conn.sendall(buf)
if callback: callback(buf)
# shutdown ssl layer
if isinstance(conn, ssl.SSLSocket):
conn.unwrap()
finally:
conn.close()
return self.voidresp()
__all__.append('FTP_TLS')
all_errors = (Error, IOError, EOFError, ssl.SSLError)
_150_re = None
def parse150(resp):
'''Parse the '150' response for a RETR request.
Returns the expected transfer size or None; size is not guaranteed to
be present in the 150 message.
'''
if resp[:3] != '150':
raise error_reply, resp
global _150_re
if _150_re is None:
import re
_150_re = re.compile("150 .* \((\d+) bytes\)", re.IGNORECASE)
m = _150_re.match(resp)
if not m:
return None
s = m.group(1)
try:
return int(s)
except (OverflowError, ValueError):
return long(s)
_227_re = None
def parse227(resp):
'''Parse the '227' response for a PASV request.
Raises error_proto if it does not contain '(h1,h2,h3,h4,p1,p2)'
Return ('host.addr.as.numbers', port#) tuple.'''
if resp[:3] != '227':
raise error_reply, resp
global _227_re
if _227_re is None:
import re
_227_re = re.compile(r'(\d+),(\d+),(\d+),(\d+),(\d+),(\d+)')
m = _227_re.search(resp)
if not m:
raise error_proto, resp
numbers = m.groups()
host = '.'.join(numbers[:4])
port = (int(numbers[4]) << 8) + int(numbers[5])
return host, port
def parse229(resp, peer):
'''Parse the '229' response for a EPSV request.
Raises error_proto if it does not contain '(|||port|)'
Return ('host.addr.as.numbers', port#) tuple.'''
if resp[:3] != '229':
raise error_reply, resp
left = resp.find('(')
if left < 0: raise error_proto, resp
right = resp.find(')', left + 1)
if right < 0:
raise error_proto, resp # should contain '(|||port|)'
if resp[left + 1] != resp[right - 1]:
raise error_proto, resp
parts = resp[left + 1:right].split(resp[left+1])
if len(parts) != 5:
raise error_proto, resp
host = peer[0]
port = int(parts[3])
return host, port
def parse257(resp):
'''Parse the '257' response for a MKD or PWD request.
This is a response to a MKD or PWD request: a directory name.
Returns the directoryname in the 257 reply.'''
if resp[:3] != '257':
raise error_reply, resp
if resp[3:5] != ' "':
return '' # Not compliant to RFC 959, but UNIX ftpd does this
dirname = ''
i = 5
n = len(resp)
while i < n:
c = resp[i]
i = i+1
if c == '"':
if i >= n or resp[i] != '"':
break
i = i+1
dirname = dirname + c
return dirname
def print_line(line):
'''Default retrlines callback to print a line.'''
print line
def ftpcp(source, sourcename, target, targetname = '', type = 'I'):
'''Copy file from one FTP-instance to another.'''
if not targetname: targetname = sourcename
type = 'TYPE ' + type
source.voidcmd(type)
target.voidcmd(type)
sourcehost, sourceport = parse227(source.sendcmd('PASV'))
target.sendport(sourcehost, sourceport)
# RFC 959: the user must "listen" [...] BEFORE sending the
# transfer request.
# So: STOR before RETR, because here the target is a "user".
treply = target.sendcmd('STOR ' + targetname)
if treply[:3] not in ('125', '150'): raise error_proto # RFC 959
sreply = source.sendcmd('RETR ' + sourcename)
if sreply[:3] not in ('125', '150'): raise error_proto # RFC 959
source.voidresp()
target.voidresp()
class Netrc:
"""Class to parse & provide access to 'netrc' format files.
See the netrc(4) man page for information on the file format.
WARNING: This class is obsolete -- use module netrc instead.
"""
__defuser = None
__defpasswd = None
__defacct = None
def __init__(self, filename=None):
if filename is None:
if "HOME" in os.environ:
filename = os.path.join(os.environ["HOME"],
".netrc")
else:
raise IOError, \
"specify file to load or set $HOME"
self.__hosts = {}
self.__macros = {}
fp = open(filename, "r")
in_macro = 0
while 1:
line = fp.readline(self.maxline + 1)
if len(line) > self.maxline:
raise Error("got more than %d bytes" % self.maxline)
if not line: break
if in_macro and line.strip():
macro_lines.append(line)
continue
elif in_macro:
self.__macros[macro_name] = tuple(macro_lines)
in_macro = 0
words = line.split()
host = user = passwd = acct = None
default = 0
i = 0
while i < len(words):
w1 = words[i]
if i+1 < len(words):
w2 = words[i + 1]
else:
w2 = None
if w1 == 'default':
default = 1
elif w1 == 'machine' and w2:
host = w2.lower()
i = i + 1
elif w1 == 'login' and w2:
user = w2
i = i + 1
elif w1 == 'password' and w2:
passwd = w2
i = i + 1
elif w1 == 'account' and w2:
acct = w2
i = i + 1
elif w1 == 'macdef' and w2:
macro_name = w2
macro_lines = []
in_macro = 1
break
i = i + 1
if default:
self.__defuser = user or self.__defuser
self.__defpasswd = passwd or self.__defpasswd
self.__defacct = acct or self.__defacct
if host:
if host in self.__hosts:
ouser, opasswd, oacct = \
self.__hosts[host]
user = user or ouser
passwd = passwd or opasswd
acct = acct or oacct
self.__hosts[host] = user, passwd, acct
fp.close()
def get_hosts(self):
"""Return a list of hosts mentioned in the .netrc file."""
return self.__hosts.keys()
def get_account(self, host):
"""Returns login information for the named host.
The return value is a triple containing userid,
password, and the accounting field.
"""
host = host.lower()
user = passwd = acct = None
if host in self.__hosts:
user, passwd, acct = self.__hosts[host]
user = user or self.__defuser
passwd = passwd or self.__defpasswd
acct = acct or self.__defacct
return user, passwd, acct
def get_macros(self):
"""Return a list of all defined macro names."""
return self.__macros.keys()
def get_macro(self, macro):
"""Return a sequence of lines which define a named macro."""
return self.__macros[macro]
def test():
'''Test program.
Usage: ftp [-d] [-r[file]] host [-l[dir]] [-d[dir]] [-p] [file] ...
-d dir
-l list
-p password
'''
if len(sys.argv) < 2:
print test.__doc__
sys.exit(0)
debugging = 0
rcfile = None
while sys.argv[1] == '-d':
debugging = debugging+1
del sys.argv[1]
if sys.argv[1][:2] == '-r':
# get name of alternate ~/.netrc file:
rcfile = sys.argv[1][2:]
del sys.argv[1]
host = sys.argv[1]
ftp = FTP(host)
ftp.set_debuglevel(debugging)
userid = passwd = acct = ''
try:
netrc = Netrc(rcfile)
except IOError:
if rcfile is not None:
sys.stderr.write("Could not open account file"
" -- using anonymous login.")
else:
try:
userid, passwd, acct = netrc.get_account(host)
except KeyError:
# no account for host
sys.stderr.write(
"No account -- using anonymous login.")
ftp.login(userid, passwd, acct)
for file in sys.argv[2:]:
if file[:2] == '-l':
ftp.dir(file[2:])
elif file[:2] == '-d':
cmd = 'CWD'
if file[2:]: cmd = cmd + ' ' + file[2:]
resp = ftp.sendcmd(cmd)
elif file == '-p':
ftp.set_pasv(not ftp.passiveserver)
else:
ftp.retrbinary('RETR ' + file, \
sys.stdout.write, 1024)
ftp.quit()
if __name__ == '__main__':
test()
| mit |
albertomurillo/ansible | lib/ansible/modules/cloud/vmware/vmware_host_acceptance.py | 48 | 6958 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Abhijeet Kasurde <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: vmware_host_acceptance
short_description: Manage the host acceptance level of an ESXi host
description:
- This module can be used to manage the host acceptance level of an ESXi host.
- The host acceptance level controls the acceptance level of each VIB on a ESXi host.
version_added: '2.5'
author:
- Abhijeet Kasurde (@Akasurde)
notes:
- Tested on vSphere 6.5
requirements:
- python >= 2.6
- PyVmomi
options:
cluster_name:
description:
- Name of the cluster.
- Acceptance level of all ESXi host system in the given cluster will be managed.
- If C(esxi_hostname) is not given, this parameter is required.
esxi_hostname:
description:
- ESXi hostname.
- Acceptance level of this ESXi host system will be managed.
- If C(cluster_name) is not given, this parameter is required.
state:
description:
- Set or list acceptance level of the given ESXi host.
- 'If set to C(list), then will return current acceptance level of given host system/s.'
- If set to C(present), then will set given acceptance level.
choices: [ list, present ]
required: False
default: 'list'
acceptance_level:
description:
- Name of acceptance level.
- If set to C(partner), then accept only partner and VMware signed and certified VIBs.
- If set to C(vmware_certified), then accept only VIBs that are signed and certified by VMware.
- If set to C(vmware_accepted), then accept VIBs that have been accepted by VMware.
- If set to C(community), then accept all VIBs, even those that are not signed.
choices: [ community, partner, vmware_accepted, vmware_certified ]
required: False
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Set acceptance level to community for all ESXi Host in given Cluster
vmware_host_acceptance:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
cluster_name: cluster_name
acceptance_level: 'community'
state: present
delegate_to: localhost
register: cluster_acceptance_level
- name: Set acceptance level to vmware_accepted for the given ESXi Host
vmware_host_acceptance:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
esxi_hostname: '{{ esxi_hostname }}'
acceptance_level: 'vmware_accepted'
state: present
delegate_to: localhost
register: host_acceptance_level
- name: Get acceptance level from the given ESXi Host
vmware_host_acceptance:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
esxi_hostname: '{{ esxi_hostname }}'
state: list
delegate_to: localhost
register: host_acceptance_level
'''
RETURN = r'''
facts:
description:
- dict with hostname as key and dict with acceptance level facts, error as value
returned: facts
type: dict
sample: { "facts": { "localhost.localdomain": { "error": "NA", "level": "vmware_certified" }}}
'''
try:
from pyVmomi import vim
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi
from ansible.module_utils._text import to_native
class VMwareAccpetanceManager(PyVmomi):
def __init__(self, module):
super(VMwareAccpetanceManager, self).__init__(module)
cluster_name = self.params.get('cluster_name', None)
esxi_host_name = self.params.get('esxi_hostname', None)
self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
self.desired_state = self.params.get('state')
self.hosts_facts = {}
self.acceptance_level = self.params.get('acceptance_level')
def gather_acceptance_facts(self):
for host in self.hosts:
self.hosts_facts[host.name] = dict(level='', error='NA')
host_image_config_mgr = host.configManager.imageConfigManager
if host_image_config_mgr:
try:
self.hosts_facts[host.name]['level'] = host_image_config_mgr.HostImageConfigGetAcceptance()
except vim.fault.HostConfigFault as e:
self.hosts_facts[host.name]['error'] = to_native(e.msg)
def set_acceptance_level(self):
change = []
for host in self.hosts:
host_changed = False
if self.hosts_facts[host.name]['level'] != self.acceptance_level:
host_image_config_mgr = host.configManager.imageConfigManager
if host_image_config_mgr:
try:
if self.module.check_mode:
self.hosts_facts[host.name]['level'] = self.acceptance_level
else:
host_image_config_mgr.UpdateHostImageAcceptanceLevel(newAcceptanceLevel=self.acceptance_level)
self.hosts_facts[host.name]['level'] = host_image_config_mgr.HostImageConfigGetAcceptance()
host_changed = True
except vim.fault.HostConfigFault as e:
self.hosts_facts[host.name]['error'] = to_native(e.msg)
change.append(host_changed)
self.module.exit_json(changed=any(change), facts=self.hosts_facts)
def check_acceptance_state(self):
self.gather_acceptance_facts()
if self.desired_state == 'list':
self.module.exit_json(changed=False, facts=self.hosts_facts)
self.set_acceptance_level()
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
cluster_name=dict(type='str', required=False),
esxi_hostname=dict(type='str', required=False),
acceptance_level=dict(type='str',
choices=['community', 'partner', 'vmware_accepted', 'vmware_certified']
),
state=dict(type='str',
choices=['list', 'present'],
default='list'),
)
module = AnsibleModule(
argument_spec=argument_spec,
required_one_of=[
['cluster_name', 'esxi_hostname'],
],
required_if=[
['state', 'present', ['acceptance_level']],
],
supports_check_mode=True
)
vmware_host_accept_config = VMwareAccpetanceManager(module)
vmware_host_accept_config.check_acceptance_state()
if __name__ == "__main__":
main()
| gpl-3.0 |
Ravenm/2143-OOP-NASH | python3env/Lib/site-packages/PIL/_binary.py | 58 | 1408 | #
# The Python Imaging Library.
# $Id$
#
# Binary input/output support routines.
#
# Copyright (c) 1997-2003 by Secret Labs AB
# Copyright (c) 1995-2003 by Fredrik Lundh
# Copyright (c) 2012 by Brian Crowell
#
# See the README file for information on usage and redistribution.
#
from struct import unpack, pack
if bytes is str:
def i8(c):
return ord(c)
def o8(i):
return chr(i & 255)
else:
def i8(c):
return c if c.__class__ is int else c[0]
def o8(i):
return bytes((i & 255,))
# Input, le = little endian, be = big endian
# TODO: replace with more readable struct.unpack equivalent
def i16le(c, o=0):
"""
Converts a 2-bytes (16 bits) string to an integer.
c: string containing bytes to convert
o: offset of bytes to convert in string
"""
return unpack("<H", c[o:o+2])[0]
def i32le(c, o=0):
"""
Converts a 4-bytes (32 bits) string to an integer.
c: string containing bytes to convert
o: offset of bytes to convert in string
"""
return unpack("<I", c[o:o+4])[0]
def i16be(c, o=0):
return unpack(">H", c[o:o+2])[0]
def i32be(c, o=0):
return unpack(">I", c[o:o+4])[0]
# Output, le = little endian, be = big endian
def o16le(i):
return pack("<H", i)
def o32le(i):
return pack("<I", i)
def o16be(i):
return pack(">H", i)
def o32be(i):
return pack(">I", i)
# End of file
| cc0-1.0 |
gorjuce/odoo | addons/hw_scanner/__openerp__.py | 220 | 1738 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Barcode Scanner Hardware Driver',
'version': '1.0',
'category': 'Hardware Drivers',
'sequence': 6,
'summary': 'Hardware Driver for Barcode Scanners',
'website': 'https://www.odoo.com/page/point-of-sale',
'description': """
Barcode Scanner Hardware Driver
================================
This module allows the web client to access a remotely installed barcode
scanner, and is used by the posbox to provide barcode scanner support to the
point of sale module.
""",
'author': 'OpenERP SA',
'depends': ['hw_proxy'],
'external_dependencies': {'python': ['evdev']},
'test': [
],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
blakev/suds | suds/sax/date.py | 160 | 10456 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Nathan Van Gheem ([email protected])
"""
The I{xdate} module provides classes for converstion
between XML dates and python objects.
"""
from logging import getLogger
from suds import *
from suds.xsd import *
import time
import datetime as dt
import re
log = getLogger(__name__)
class Date:
"""
An XML date object.
Supported formats:
- YYYY-MM-DD
- YYYY-MM-DD(z|Z)
- YYYY-MM-DD+06:00
- YYYY-MM-DD-06:00
@ivar date: The object value.
@type date: B{datetime}.I{date}
"""
def __init__(self, date):
"""
@param date: The value of the object.
@type date: (date|str)
@raise ValueError: When I{date} is invalid.
"""
if isinstance(date, dt.date):
self.date = date
return
if isinstance(date, basestring):
self.date = self.__parse(date)
return
raise ValueError, type(date)
def year(self):
"""
Get the I{year} component.
@return: The year.
@rtype: int
"""
return self.date.year
def month(self):
"""
Get the I{month} component.
@return: The month.
@rtype: int
"""
return self.date.month
def day(self):
"""
Get the I{day} component.
@return: The day.
@rtype: int
"""
return self.date.day
def __parse(self, s):
"""
Parse the string date.
Supported formats:
- YYYY-MM-DD
- YYYY-MM-DD(z|Z)
- YYYY-MM-DD+06:00
- YYYY-MM-DD-06:00
Although, the TZ is ignored because it's meaningless
without the time, right?
@param s: A date string.
@type s: str
@return: A date object.
@rtype: I{date}
"""
try:
year, month, day = s[:10].split('-', 2)
year = int(year)
month = int(month)
day = int(day)
return dt.date(year, month, day)
except:
log.debug(s, exec_info=True)
raise ValueError, 'Invalid format "%s"' % s
def __str__(self):
return unicode(self)
def __unicode__(self):
return self.date.isoformat()
class Time:
"""
An XML time object.
Supported formats:
- HH:MI:SS
- HH:MI:SS(z|Z)
- HH:MI:SS.ms
- HH:MI:SS.ms(z|Z)
- HH:MI:SS(+|-)06:00
- HH:MI:SS.ms(+|-)06:00
@ivar tz: The timezone
@type tz: L{Timezone}
@ivar date: The object value.
@type date: B{datetime}.I{time}
"""
def __init__(self, time, adjusted=True):
"""
@param time: The value of the object.
@type time: (time|str)
@param adjusted: Adjust for I{local} Timezone.
@type adjusted: boolean
@raise ValueError: When I{time} is invalid.
"""
self.tz = Timezone()
if isinstance(time, dt.time):
self.time = time
return
if isinstance(time, basestring):
self.time = self.__parse(time)
if adjusted:
self.__adjust()
return
raise ValueError, type(time)
def hour(self):
"""
Get the I{hour} component.
@return: The hour.
@rtype: int
"""
return self.time.hour
def minute(self):
"""
Get the I{minute} component.
@return: The minute.
@rtype: int
"""
return self.time.minute
def second(self):
"""
Get the I{seconds} component.
@return: The seconds.
@rtype: int
"""
return self.time.second
def microsecond(self):
"""
Get the I{microsecond} component.
@return: The microsecond.
@rtype: int
"""
return self.time.microsecond
def __adjust(self):
"""
Adjust for TZ offset.
"""
if hasattr(self, 'offset'):
today = dt.date.today()
delta = self.tz.adjustment(self.offset)
d = dt.datetime.combine(today, self.time)
d = ( d + delta )
self.time = d.time()
def __parse(self, s):
"""
Parse the string date.
Patterns:
- HH:MI:SS
- HH:MI:SS(z|Z)
- HH:MI:SS.ms
- HH:MI:SS.ms(z|Z)
- HH:MI:SS(+|-)06:00
- HH:MI:SS.ms(+|-)06:00
@param s: A time string.
@type s: str
@return: A time object.
@rtype: B{datetime}.I{time}
"""
try:
offset = None
part = Timezone.split(s)
hour, minute, second = part[0].split(':', 2)
hour = int(hour)
minute = int(minute)
second, ms = self.__second(second)
if len(part) == 2:
self.offset = self.__offset(part[1])
if ms is None:
return dt.time(hour, minute, second)
else:
return dt.time(hour, minute, second, ms)
except:
log.debug(s, exec_info=True)
raise ValueError, 'Invalid format "%s"' % s
def __second(self, s):
"""
Parse the seconds and microseconds.
The microseconds are truncated to 999999 due to a restriction in
the python datetime.datetime object.
@param s: A string representation of the seconds.
@type s: str
@return: Tuple of (sec,ms)
@rtype: tuple.
"""
part = s.split('.')
if len(part) > 1:
return (int(part[0]), int(part[1][:6]))
else:
return (int(part[0]), None)
def __offset(self, s):
"""
Parse the TZ offset.
@param s: A string representation of the TZ offset.
@type s: str
@return: The signed offset in hours.
@rtype: str
"""
if len(s) == len('-00:00'):
return int(s[:3])
if len(s) == 0:
return self.tz.local
if len(s) == 1:
return 0
raise Exception()
def __str__(self):
return unicode(self)
def __unicode__(self):
time = self.time.isoformat()
if self.tz.local:
return '%s%+.2d:00' % (time, self.tz.local)
else:
return '%sZ' % time
class DateTime(Date,Time):
"""
An XML time object.
Supported formats:
- YYYY-MM-DDB{T}HH:MI:SS
- YYYY-MM-DDB{T}HH:MI:SS(z|Z)
- YYYY-MM-DDB{T}HH:MI:SS.ms
- YYYY-MM-DDB{T}HH:MI:SS.ms(z|Z)
- YYYY-MM-DDB{T}HH:MI:SS(+|-)06:00
- YYYY-MM-DDB{T}HH:MI:SS.ms(+|-)06:00
@ivar datetime: The object value.
@type datetime: B{datetime}.I{datedate}
"""
def __init__(self, date):
"""
@param date: The value of the object.
@type date: (datetime|str)
@raise ValueError: When I{tm} is invalid.
"""
if isinstance(date, dt.datetime):
Date.__init__(self, date.date())
Time.__init__(self, date.time())
self.datetime = \
dt.datetime.combine(self.date, self.time)
return
if isinstance(date, basestring):
part = date.split('T')
Date.__init__(self, part[0])
Time.__init__(self, part[1], 0)
self.datetime = \
dt.datetime.combine(self.date, self.time)
self.__adjust()
return
raise ValueError, type(date)
def __adjust(self):
"""
Adjust for TZ offset.
"""
if not hasattr(self, 'offset'):
return
delta = self.tz.adjustment(self.offset)
try:
d = ( self.datetime + delta )
self.datetime = d
self.date = d.date()
self.time = d.time()
except OverflowError:
log.warn('"%s" caused overflow, not-adjusted', self.datetime)
def __str__(self):
return unicode(self)
def __unicode__(self):
s = []
s.append(Date.__unicode__(self))
s.append(Time.__unicode__(self))
return 'T'.join(s)
class UTC(DateTime):
"""
Represents current UTC time.
"""
def __init__(self, date=None):
if date is None:
date = dt.datetime.utcnow()
DateTime.__init__(self, date)
self.tz.local = 0
class Timezone:
"""
Timezone object used to do TZ conversions
@cvar local: The (A) local TZ offset.
@type local: int
@cvar patten: The regex patten to match TZ.
@type patten: re.Pattern
"""
pattern = re.compile('([zZ])|([\-\+][0-9]{2}:[0-9]{2})')
LOCAL = ( 0-time.timezone/60/60 )
def __init__(self, offset=None):
if offset is None:
offset = self.LOCAL
self.local = offset
@classmethod
def split(cls, s):
"""
Split the TZ from string.
@param s: A string containing a timezone
@type s: basestring
@return: The split parts.
@rtype: tuple
"""
m = cls.pattern.search(s)
if m is None:
return (s,)
x = m.start(0)
return (s[:x], s[x:])
def adjustment(self, offset):
"""
Get the adjustment to the I{local} TZ.
@return: The delta between I{offset} and local TZ.
@rtype: B{datetime}.I{timedelta}
"""
delta = ( self.local - offset )
return dt.timedelta(hours=delta)
| lgpl-3.0 |
JakeBrand/CMPUT410-E6 | v1/lib/python2.7/site-packages/django/contrib/auth/urls.py | 113 | 1203 | # The views used below are normally mapped in django.contrib.admin.urls.py
# This URLs file is used to provide a reliable view deployment for test purposes.
# It is also provided as a convenience to those who want to deploy these URLs
# elsewhere.
from django.conf.urls import patterns, url
urlpatterns = patterns('',
url(r'^login/$', 'django.contrib.auth.views.login', name='login'),
url(r'^logout/$', 'django.contrib.auth.views.logout', name='logout'),
url(r'^password_change/$', 'django.contrib.auth.views.password_change', name='password_change'),
url(r'^password_change/done/$', 'django.contrib.auth.views.password_change_done', name='password_change_done'),
url(r'^password_reset/$', 'django.contrib.auth.views.password_reset', name='password_reset'),
url(r'^password_reset/done/$', 'django.contrib.auth.views.password_reset_done', name='password_reset_done'),
url(r'^reset/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
'django.contrib.auth.views.password_reset_confirm',
name='password_reset_confirm'),
url(r'^reset/done/$', 'django.contrib.auth.views.password_reset_complete', name='password_reset_complete'),
)
| apache-2.0 |
watspidererik/testenv | flask/lib/python2.7/site-packages/werkzeug/testsuite/urls.py | 145 | 15382 | # -*- coding: utf-8 -*-
"""
werkzeug.testsuite.urls
~~~~~~~~~~~~~~~~~~~~~~~
URL helper tests.
:copyright: (c) 2014 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import unittest
from werkzeug.testsuite import WerkzeugTestCase
from werkzeug.datastructures import OrderedMultiDict
from werkzeug import urls
from werkzeug._compat import text_type, NativeStringIO, BytesIO
class URLsTestCase(WerkzeugTestCase):
def test_replace(self):
url = urls.url_parse('http://de.wikipedia.org/wiki/Troll')
self.assert_strict_equal(url.replace(query='foo=bar'),
urls.url_parse('http://de.wikipedia.org/wiki/Troll?foo=bar'))
self.assert_strict_equal(url.replace(scheme='https'),
urls.url_parse('https://de.wikipedia.org/wiki/Troll'))
def test_quoting(self):
self.assert_strict_equal(urls.url_quote(u'\xf6\xe4\xfc'), '%C3%B6%C3%A4%C3%BC')
self.assert_strict_equal(urls.url_unquote(urls.url_quote(u'#%="\xf6')), u'#%="\xf6')
self.assert_strict_equal(urls.url_quote_plus('foo bar'), 'foo+bar')
self.assert_strict_equal(urls.url_unquote_plus('foo+bar'), u'foo bar')
self.assert_strict_equal(urls.url_quote_plus('foo+bar'), 'foo%2Bbar')
self.assert_strict_equal(urls.url_unquote_plus('foo%2Bbar'), u'foo+bar')
self.assert_strict_equal(urls.url_encode({b'a': None, b'b': b'foo bar'}), 'b=foo+bar')
self.assert_strict_equal(urls.url_encode({u'a': None, u'b': u'foo bar'}), 'b=foo+bar')
self.assert_strict_equal(urls.url_fix(u'http://de.wikipedia.org/wiki/Elf (Begriffsklärung)'),
'http://de.wikipedia.org/wiki/Elf%20(Begriffskl%C3%A4rung)')
self.assert_strict_equal(urls.url_quote_plus(42), '42')
self.assert_strict_equal(urls.url_quote(b'\xff'), '%FF')
def test_bytes_unquoting(self):
self.assert_strict_equal(urls.url_unquote(urls.url_quote(
u'#%="\xf6', charset='latin1'), charset=None), b'#%="\xf6')
def test_url_decoding(self):
x = urls.url_decode(b'foo=42&bar=23&uni=H%C3%A4nsel')
self.assert_strict_equal(x['foo'], u'42')
self.assert_strict_equal(x['bar'], u'23')
self.assert_strict_equal(x['uni'], u'Hänsel')
x = urls.url_decode(b'foo=42;bar=23;uni=H%C3%A4nsel', separator=b';')
self.assert_strict_equal(x['foo'], u'42')
self.assert_strict_equal(x['bar'], u'23')
self.assert_strict_equal(x['uni'], u'Hänsel')
x = urls.url_decode(b'%C3%9Ch=H%C3%A4nsel', decode_keys=True)
self.assert_strict_equal(x[u'Üh'], u'Hänsel')
def test_url_bytes_decoding(self):
x = urls.url_decode(b'foo=42&bar=23&uni=H%C3%A4nsel', charset=None)
self.assert_strict_equal(x[b'foo'], b'42')
self.assert_strict_equal(x[b'bar'], b'23')
self.assert_strict_equal(x[b'uni'], u'Hänsel'.encode('utf-8'))
def test_streamed_url_decoding(self):
item1 = u'a' * 100000
item2 = u'b' * 400
string = ('a=%s&b=%s&c=%s' % (item1, item2, item2)).encode('ascii')
gen = urls.url_decode_stream(BytesIO(string), limit=len(string),
return_iterator=True)
self.assert_strict_equal(next(gen), ('a', item1))
self.assert_strict_equal(next(gen), ('b', item2))
self.assert_strict_equal(next(gen), ('c', item2))
self.assert_raises(StopIteration, lambda: next(gen))
def test_stream_decoding_string_fails(self):
self.assert_raises(TypeError, urls.url_decode_stream, 'testing')
def test_url_encoding(self):
self.assert_strict_equal(urls.url_encode({'foo': 'bar 45'}), 'foo=bar+45')
d = {'foo': 1, 'bar': 23, 'blah': u'Hänsel'}
self.assert_strict_equal(urls.url_encode(d, sort=True), 'bar=23&blah=H%C3%A4nsel&foo=1')
self.assert_strict_equal(urls.url_encode(d, sort=True, separator=u';'), 'bar=23;blah=H%C3%A4nsel;foo=1')
def test_sorted_url_encode(self):
self.assert_strict_equal(urls.url_encode({u"a": 42, u"b": 23, 1: 1, 2: 2},
sort=True, key=lambda i: text_type(i[0])), '1=1&2=2&a=42&b=23')
self.assert_strict_equal(urls.url_encode({u'A': 1, u'a': 2, u'B': 3, 'b': 4}, sort=True,
key=lambda x: x[0].lower() + x[0]), 'A=1&a=2&B=3&b=4')
def test_streamed_url_encoding(self):
out = NativeStringIO()
urls.url_encode_stream({'foo': 'bar 45'}, out)
self.assert_strict_equal(out.getvalue(), 'foo=bar+45')
d = {'foo': 1, 'bar': 23, 'blah': u'Hänsel'}
out = NativeStringIO()
urls.url_encode_stream(d, out, sort=True)
self.assert_strict_equal(out.getvalue(), 'bar=23&blah=H%C3%A4nsel&foo=1')
out = NativeStringIO()
urls.url_encode_stream(d, out, sort=True, separator=u';')
self.assert_strict_equal(out.getvalue(), 'bar=23;blah=H%C3%A4nsel;foo=1')
gen = urls.url_encode_stream(d, sort=True)
self.assert_strict_equal(next(gen), 'bar=23')
self.assert_strict_equal(next(gen), 'blah=H%C3%A4nsel')
self.assert_strict_equal(next(gen), 'foo=1')
self.assert_raises(StopIteration, lambda: next(gen))
def test_url_fixing(self):
x = urls.url_fix(u'http://de.wikipedia.org/wiki/Elf (Begriffskl\xe4rung)')
self.assert_line_equal(x, 'http://de.wikipedia.org/wiki/Elf%20(Begriffskl%C3%A4rung)')
x = urls.url_fix("http://just.a.test/$-_.+!*'(),")
self.assert_equal(x, "http://just.a.test/$-_.+!*'(),")
def test_url_fixing_qs(self):
x = urls.url_fix(b'http://example.com/?foo=%2f%2f')
self.assert_line_equal(x, 'http://example.com/?foo=%2f%2f')
x = urls.url_fix('http://acronyms.thefreedictionary.com/Algebraic+Methods+of+Solving+the+Schr%C3%B6dinger+Equation')
self.assert_equal(x, 'http://acronyms.thefreedictionary.com/Algebraic+Methods+of+Solving+the+Schr%C3%B6dinger+Equation')
def test_iri_support(self):
self.assert_strict_equal(urls.uri_to_iri('http://xn--n3h.net/'),
u'http://\u2603.net/')
self.assert_strict_equal(
urls.uri_to_iri(b'http://%C3%BCser:p%C3%[email protected]/p%C3%A5th'),
u'http://\xfcser:p\xe4ssword@\u2603.net/p\xe5th')
self.assert_strict_equal(urls.iri_to_uri(u'http://☃.net/'), 'http://xn--n3h.net/')
self.assert_strict_equal(
urls.iri_to_uri(u'http://üser:pässword@☃.net/påth'),
'http://%C3%BCser:p%C3%[email protected]/p%C3%A5th')
self.assert_strict_equal(urls.uri_to_iri('http://test.com/%3Fmeh?foo=%26%2F'),
u'http://test.com/%3Fmeh?foo=%26%2F')
# this should work as well, might break on 2.4 because of a broken
# idna codec
self.assert_strict_equal(urls.uri_to_iri(b'/foo'), u'/foo')
self.assert_strict_equal(urls.iri_to_uri(u'/foo'), '/foo')
self.assert_strict_equal(urls.iri_to_uri(u'http://föö.com:8080/bam/baz'),
'http://xn--f-1gaa.com:8080/bam/baz')
def test_iri_safe_conversion(self):
self.assert_strict_equal(urls.iri_to_uri(u'magnet:?foo=bar'),
'magnet:?foo=bar')
self.assert_strict_equal(urls.iri_to_uri(u'itms-service://?foo=bar'),
'itms-service:?foo=bar')
self.assert_strict_equal(urls.iri_to_uri(u'itms-service://?foo=bar',
safe_conversion=True),
'itms-service://?foo=bar')
def test_iri_safe_quoting(self):
uri = 'http://xn--f-1gaa.com/%2F%25?q=%C3%B6&x=%3D%25#%25'
iri = u'http://föö.com/%2F%25?q=ö&x=%3D%25#%25'
self.assert_strict_equal(urls.uri_to_iri(uri), iri)
self.assert_strict_equal(urls.iri_to_uri(urls.uri_to_iri(uri)), uri)
def test_ordered_multidict_encoding(self):
d = OrderedMultiDict()
d.add('foo', 1)
d.add('foo', 2)
d.add('foo', 3)
d.add('bar', 0)
d.add('foo', 4)
self.assert_equal(urls.url_encode(d), 'foo=1&foo=2&foo=3&bar=0&foo=4')
def test_multidict_encoding(self):
d = OrderedMultiDict()
d.add('2013-10-10T23:26:05.657975+0000', '2013-10-10T23:26:05.657975+0000')
self.assert_equal(urls.url_encode(d), '2013-10-10T23%3A26%3A05.657975%2B0000=2013-10-10T23%3A26%3A05.657975%2B0000')
def test_href(self):
x = urls.Href('http://www.example.com/')
self.assert_strict_equal(x(u'foo'), 'http://www.example.com/foo')
self.assert_strict_equal(x.foo(u'bar'), 'http://www.example.com/foo/bar')
self.assert_strict_equal(x.foo(u'bar', x=42), 'http://www.example.com/foo/bar?x=42')
self.assert_strict_equal(x.foo(u'bar', class_=42), 'http://www.example.com/foo/bar?class=42')
self.assert_strict_equal(x.foo(u'bar', {u'class': 42}), 'http://www.example.com/foo/bar?class=42')
self.assert_raises(AttributeError, lambda: x.__blah__)
x = urls.Href('blah')
self.assert_strict_equal(x.foo(u'bar'), 'blah/foo/bar')
self.assert_raises(TypeError, x.foo, {u"foo": 23}, x=42)
x = urls.Href('')
self.assert_strict_equal(x('foo'), 'foo')
def test_href_url_join(self):
x = urls.Href(u'test')
self.assert_line_equal(x(u'foo:bar'), u'test/foo:bar')
self.assert_line_equal(x(u'http://example.com/'), u'test/http://example.com/')
self.assert_line_equal(x.a(), u'test/a')
def test_href_past_root(self):
base_href = urls.Href('http://www.blagga.com/1/2/3')
self.assert_strict_equal(base_href('../foo'), 'http://www.blagga.com/1/2/foo')
self.assert_strict_equal(base_href('../../foo'), 'http://www.blagga.com/1/foo')
self.assert_strict_equal(base_href('../../../foo'), 'http://www.blagga.com/foo')
self.assert_strict_equal(base_href('../../../../foo'), 'http://www.blagga.com/foo')
self.assert_strict_equal(base_href('../../../../../foo'), 'http://www.blagga.com/foo')
self.assert_strict_equal(base_href('../../../../../../foo'), 'http://www.blagga.com/foo')
def test_url_unquote_plus_unicode(self):
# was broken in 0.6
self.assert_strict_equal(urls.url_unquote_plus(u'\x6d'), u'\x6d')
self.assert_is(type(urls.url_unquote_plus(u'\x6d')), text_type)
def test_quoting_of_local_urls(self):
rv = urls.iri_to_uri(u'/foo\x8f')
self.assert_strict_equal(rv, '/foo%C2%8F')
self.assert_is(type(rv), str)
def test_url_attributes(self):
rv = urls.url_parse('http://foo%3a:bar%3a@[::1]:80/123?x=y#frag')
self.assert_strict_equal(rv.scheme, 'http')
self.assert_strict_equal(rv.auth, 'foo%3a:bar%3a')
self.assert_strict_equal(rv.username, u'foo:')
self.assert_strict_equal(rv.password, u'bar:')
self.assert_strict_equal(rv.raw_username, 'foo%3a')
self.assert_strict_equal(rv.raw_password, 'bar%3a')
self.assert_strict_equal(rv.host, '::1')
self.assert_equal(rv.port, 80)
self.assert_strict_equal(rv.path, '/123')
self.assert_strict_equal(rv.query, 'x=y')
self.assert_strict_equal(rv.fragment, 'frag')
rv = urls.url_parse(u'http://\N{SNOWMAN}.com/')
self.assert_strict_equal(rv.host, u'\N{SNOWMAN}.com')
self.assert_strict_equal(rv.ascii_host, 'xn--n3h.com')
def test_url_attributes_bytes(self):
rv = urls.url_parse(b'http://foo%3a:bar%3a@[::1]:80/123?x=y#frag')
self.assert_strict_equal(rv.scheme, b'http')
self.assert_strict_equal(rv.auth, b'foo%3a:bar%3a')
self.assert_strict_equal(rv.username, u'foo:')
self.assert_strict_equal(rv.password, u'bar:')
self.assert_strict_equal(rv.raw_username, b'foo%3a')
self.assert_strict_equal(rv.raw_password, b'bar%3a')
self.assert_strict_equal(rv.host, b'::1')
self.assert_equal(rv.port, 80)
self.assert_strict_equal(rv.path, b'/123')
self.assert_strict_equal(rv.query, b'x=y')
self.assert_strict_equal(rv.fragment, b'frag')
def test_url_joining(self):
self.assert_strict_equal(urls.url_join('/foo', '/bar'), '/bar')
self.assert_strict_equal(urls.url_join('http://example.com/foo', '/bar'),
'http://example.com/bar')
self.assert_strict_equal(urls.url_join('file:///tmp/', 'test.html'),
'file:///tmp/test.html')
self.assert_strict_equal(urls.url_join('file:///tmp/x', 'test.html'),
'file:///tmp/test.html')
self.assert_strict_equal(urls.url_join('file:///tmp/x', '../../../x.html'),
'file:///x.html')
def test_partial_unencoded_decode(self):
ref = u'foo=정상처리'.encode('euc-kr')
x = urls.url_decode(ref, charset='euc-kr')
self.assert_strict_equal(x['foo'], u'정상처리')
def test_iri_to_uri_idempotence_ascii_only(self):
uri = u'http://www.idempoten.ce'
uri = urls.iri_to_uri(uri)
self.assert_equal(urls.iri_to_uri(uri), uri)
def test_iri_to_uri_idempotence_non_ascii(self):
uri = u'http://\N{SNOWMAN}/\N{SNOWMAN}'
uri = urls.iri_to_uri(uri)
self.assert_equal(urls.iri_to_uri(uri), uri)
def test_uri_to_iri_idempotence_ascii_only(self):
uri = 'http://www.idempoten.ce'
uri = urls.uri_to_iri(uri)
self.assert_equal(urls.uri_to_iri(uri), uri)
def test_uri_to_iri_idempotence_non_ascii(self):
uri = 'http://xn--n3h/%E2%98%83'
uri = urls.uri_to_iri(uri)
self.assert_equal(urls.uri_to_iri(uri), uri)
def test_iri_to_uri_to_iri(self):
iri = u'http://föö.com/'
uri = urls.iri_to_uri(iri)
self.assert_equal(urls.uri_to_iri(uri), iri)
def test_uri_to_iri_to_uri(self):
uri = 'http://xn--f-rgao.com/%C3%9E'
iri = urls.uri_to_iri(uri)
self.assert_equal(urls.iri_to_uri(iri), uri)
def test_uri_iri_normalization(self):
uri = 'http://xn--f-rgao.com/%E2%98%90/fred?utf8=%E2%9C%93'
iri = u'http://föñ.com/\N{BALLOT BOX}/fred?utf8=\u2713'
tests = [
u'http://föñ.com/\N{BALLOT BOX}/fred?utf8=\u2713',
u'http://xn--f-rgao.com/\u2610/fred?utf8=\N{CHECK MARK}',
b'http://xn--f-rgao.com/%E2%98%90/fred?utf8=%E2%9C%93',
u'http://xn--f-rgao.com/%E2%98%90/fred?utf8=%E2%9C%93',
u'http://föñ.com/\u2610/fred?utf8=%E2%9C%93',
b'http://xn--f-rgao.com/\xe2\x98\x90/fred?utf8=\xe2\x9c\x93',
]
for test in tests:
self.assert_equal(urls.uri_to_iri(test), iri)
self.assert_equal(urls.iri_to_uri(test), uri)
self.assert_equal(urls.uri_to_iri(urls.iri_to_uri(test)), iri)
self.assert_equal(urls.iri_to_uri(urls.uri_to_iri(test)), uri)
self.assert_equal(urls.uri_to_iri(urls.uri_to_iri(test)), iri)
self.assert_equal(urls.iri_to_uri(urls.iri_to_uri(test)), uri)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(URLsTestCase))
return suite
| mit |
spookylukey/django-paypal | paypal/pro/helpers.py | 3 | 12559 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
import logging
import pprint
import time
import requests
from django.conf import settings
from django.forms.models import fields_for_model
from django.http import QueryDict
from django.utils import timezone
from django.utils.functional import cached_property
from django.utils.http import urlencode
from paypal.pro.exceptions import PayPalFailure
from paypal.pro.models import PayPalNVP
from paypal.utils import warn_untested
USER = settings.PAYPAL_WPP_USER
PASSWORD = settings.PAYPAL_WPP_PASSWORD
SIGNATURE = settings.PAYPAL_WPP_SIGNATURE
VERSION = 116.0
BASE_PARAMS = dict(USER=USER, PWD=PASSWORD, SIGNATURE=SIGNATURE, VERSION=VERSION)
ENDPOINT = "https://api-3t.paypal.com/nvp"
SANDBOX_ENDPOINT = "https://api-3t.sandbox.paypal.com/nvp"
EXPRESS_ENDPOINT = "https://www.paypal.com/webscr?cmd=_express-checkout&%s"
SANDBOX_EXPRESS_ENDPOINT = "https://www.sandbox.paypal.com/webscr?cmd=_express-checkout&%s"
log = logging.getLogger('paypal.pro')
def paypal_time(time_obj=None):
"""Returns a time suitable for PayPal time fields."""
warn_untested()
if time_obj is None:
time_obj = time.gmtime()
return time.strftime(PayPalNVP.TIMESTAMP_FORMAT, time_obj)
def paypaltime2datetime(s):
"""Convert a PayPal time string to a DateTime."""
naive = datetime.datetime.strptime(s, PayPalNVP.TIMESTAMP_FORMAT)
if not settings.USE_TZ:
return naive
else:
# TIMESTAMP_FORMAT is UTC
return timezone.make_aware(naive, timezone.utc)
class PayPalError(TypeError):
"""Error thrown when something is wrong."""
def express_endpoint():
if getattr(settings, 'PAYPAL_TEST', True):
return SANDBOX_EXPRESS_ENDPOINT
else:
return EXPRESS_ENDPOINT
def express_endpoint_for_token(token, commit=False):
"""
Returns the PayPal Express Checkout endpoint for a token.
Pass 'commit=True' if you will not prompt for confirmation when the user
returns to your site.
"""
pp_params = dict(token=token)
if commit:
pp_params['useraction'] = 'commit'
return express_endpoint() % urlencode(pp_params)
def strip_ip_port(ip_address):
"""
Strips the port from an IPv4 or IPv6 address, returns a unicode object.
"""
# IPv4 with or without port
if '.' in ip_address:
cleaned_ip = ip_address.split(':')[0]
# IPv6 with port
elif ']:' in ip_address:
# Remove the port following last ':', and then strip first and last chars for [].
cleaned_ip = ip_address.rpartition(':')[0][1:-1]
# IPv6 without port
else:
cleaned_ip = ip_address
return cleaned_ip
class PayPalWPP(object):
"""
Wrapper class for the PayPal Website Payments Pro.
Website Payments Pro Integration Guide:
https://cms.paypal.com/cms_content/US/en_US/files/developer/PP_WPP_IntegrationGuide.pdf
Name-Value Pair API Developer Guide and Reference:
https://cms.paypal.com/cms_content/US/en_US/files/developer/PP_NVPAPI_DeveloperGuide.pdf
"""
def __init__(self, request=None, params=BASE_PARAMS):
"""Required - USER / PWD / SIGNATURE / VERSION"""
self.request = request
if getattr(settings, 'PAYPAL_TEST', True):
self.endpoint = SANDBOX_ENDPOINT
else:
self.endpoint = ENDPOINT
self.signature_values = params
self.signature = urlencode(self.signature_values) + "&"
@cached_property
def NVP_FIELDS(self):
# Put this onto class and load lazily, because in some cases there is an
# import order problem if we put it at module level.
return list(fields_for_model(PayPalNVP).keys())
def doDirectPayment(self, params):
"""Call PayPal DoDirectPayment method."""
defaults = {"method": "DoDirectPayment", "paymentaction": "Sale"}
required = ["creditcardtype",
"acct",
"expdate",
"cvv2",
"ipaddress",
"firstname",
"lastname",
"street",
"city",
"state",
"countrycode",
"zip",
"amt",
]
nvp_obj = self._fetch(params, required, defaults)
if nvp_obj.flag:
raise PayPalFailure(nvp_obj.flag_info, nvp=nvp_obj)
# @@@ Could check cvv2match / avscode are both 'X' or '0'
# qd = django.http.QueryDict(nvp_obj.response)
# if qd.get('cvv2match') not in ['X', '0']:
# nvp_obj.set_flag("Invalid cvv2match: %s" % qd.get('cvv2match')
# if qd.get('avscode') not in ['X', '0']:
# nvp_obj.set_flag("Invalid avscode: %s" % qd.get('avscode')
return nvp_obj
def setExpressCheckout(self, params):
"""
Initiates an Express Checkout transaction.
Optionally, the SetExpressCheckout API operation can set up billing agreements for
reference transactions and recurring payments.
Returns a NVP instance - check for token and payerid to continue!
"""
if self._is_recurring(params):
params = self._recurring_setExpressCheckout_adapter(params)
defaults = {"method": "SetExpressCheckout", "noshipping": 1}
required = ["returnurl", "cancelurl", "paymentrequest_0_amt"]
nvp_obj = self._fetch(params, required, defaults)
if nvp_obj.flag:
raise PayPalFailure(nvp_obj.flag_info, nvp=nvp_obj)
return nvp_obj
def doExpressCheckoutPayment(self, params):
"""
Check the dude out:
"""
defaults = {"method": "DoExpressCheckoutPayment", "paymentaction": "Sale"}
required = ["paymentrequest_0_amt", "token", "payerid"]
nvp_obj = self._fetch(params, required, defaults)
if nvp_obj.flag:
raise PayPalFailure(nvp_obj.flag_info, nvp=nvp_obj)
return nvp_obj
def createRecurringPaymentsProfile(self, params, direct=False):
"""
Set direct to True to indicate that this is being called as a directPayment.
Returns True PayPal successfully creates the profile otherwise False.
"""
defaults = {"method": "CreateRecurringPaymentsProfile"}
required = ["profilestartdate", "billingperiod", "billingfrequency", "amt"]
# Direct payments require CC data
if direct:
required + ["creditcardtype", "acct", "expdate", "firstname", "lastname"]
else:
required + ["token", "payerid"]
nvp_obj = self._fetch(params, required, defaults)
# Flag if profile_type != ActiveProfile
if nvp_obj.flag:
raise PayPalFailure(nvp_obj.flag_info, nvp=nvp_obj)
return nvp_obj
def getExpressCheckoutDetails(self, params):
defaults = {"method": "GetExpressCheckoutDetails"}
required = ["token"]
nvp_obj = self._fetch(params, required, defaults)
if nvp_obj.flag:
raise PayPalFailure(nvp_obj.flag_info, nvp=nvp_obj)
return nvp_obj
def createBillingAgreement(self, params):
"""
Create a billing agreement for future use, without any initial payment
"""
defaults = {"method": "CreateBillingAgreement"}
required = ["token"]
nvp_obj = self._fetch(params, required, defaults)
if nvp_obj.flag:
raise PayPalFailure(nvp_obj.flag_info, nvp=nvp_obj)
return nvp_obj
def getTransactionDetails(self, params):
defaults = {"method": "GetTransactionDetails"}
required = ["transactionid"]
nvp_obj = self._fetch(params, required, defaults)
if nvp_obj.flag:
raise PayPalFailure(nvp_obj.flag_info, nvp=nvp_obj)
return nvp_obj
def massPay(self, params):
raise NotImplementedError
def getRecurringPaymentsProfileDetails(self, params):
raise NotImplementedError
def updateRecurringPaymentsProfile(self, params):
defaults = {"method": "UpdateRecurringPaymentsProfile"}
required = ["profileid"]
nvp_obj = self._fetch(params, required, defaults)
if nvp_obj.flag:
raise PayPalFailure(nvp_obj.flag_info, nvp=nvp_obj)
return nvp_obj
def billOutstandingAmount(self, params):
raise NotImplementedError
def manangeRecurringPaymentsProfileStatus(self, params, fail_silently=False):
"""
Requires `profileid` and `action` params.
Action must be either "Cancel", "Suspend", or "Reactivate".
"""
defaults = {"method": "ManageRecurringPaymentsProfileStatus"}
required = ["profileid", "action"]
nvp_obj = self._fetch(params, required, defaults)
# TODO: This fail silently check should be using the error code, but its not easy to access
flag_info_test_string = 'Invalid profile status for cancel action; profile should be active or suspended'
if nvp_obj.flag and not (fail_silently and nvp_obj.flag_info == flag_info_test_string):
raise PayPalFailure(nvp_obj.flag_info, nvp=nvp_obj)
return nvp_obj
def refundTransaction(self, params):
raise NotImplementedError
def doReferenceTransaction(self, params):
"""
Process a payment from a buyer's account, identified by a previous
transaction.
The `paymentaction` param defaults to "Sale", but may also contain the
values "Authorization" or "Order".
"""
defaults = {"method": "DoReferenceTransaction",
"paymentaction": "Sale"}
required = ["referenceid", "amt"]
nvp_obj = self._fetch(params, required, defaults)
if nvp_obj.flag:
raise PayPalFailure(nvp_obj.flag_info, nvp=nvp_obj)
return nvp_obj
def _is_recurring(self, params):
"""Returns True if the item passed is a recurring transaction."""
return 'billingfrequency' in params
def _recurring_setExpressCheckout_adapter(self, params):
"""
The recurring payment interface to SEC is different than the recurring payment
interface to ECP. This adapts a normal call to look like a SEC call.
"""
params['l_billingtype0'] = "RecurringPayments"
params['l_billingagreementdescription0'] = params['desc']
REMOVE = ["billingfrequency", "billingperiod", "profilestartdate", "desc"]
for k in params.keys():
if k in REMOVE:
del params[k]
return params
def _fetch(self, params, required, defaults):
"""Make the NVP request and store the response."""
defaults.update(params)
pp_params = self._check_and_update_params(required, defaults)
pp_string = self.signature + urlencode(pp_params)
response = self._request(pp_string)
response_params = self._parse_response(response)
log.debug('PayPal Request:\n%s\n', pprint.pformat(defaults))
log.debug('PayPal Response:\n%s\n', pprint.pformat(response_params))
# Gather all NVP parameters to pass to a new instance.
nvp_params = {}
tmpd = defaults.copy()
tmpd.update(response_params)
for k, v in tmpd.items():
if k in self.NVP_FIELDS:
nvp_params[str(k)] = v
# PayPal timestamp has to be formatted.
if 'timestamp' in nvp_params:
nvp_params['timestamp'] = paypaltime2datetime(nvp_params['timestamp'])
nvp_obj = PayPalNVP(**nvp_params)
nvp_obj.init(self.request, params, response_params)
nvp_obj.save()
return nvp_obj
def _request(self, data):
"""Moved out to make testing easier."""
return requests.post(self.endpoint, data=data.encode("ascii")).content
def _check_and_update_params(self, required, params):
"""
Ensure all required parameters were passed to the API call and format
them correctly.
"""
for r in required:
if r not in params:
raise PayPalError("Missing required param: %s" % r)
# Upper case all the parameters for PayPal.
return (dict((k.upper(), v) for k, v in params.items()))
def _parse_response(self, response):
"""Turn the PayPal response into a dict"""
q = QueryDict(response, encoding='UTF-8').dict()
return {k.lower(): v for k, v in q.items()}
| mit |
prodromou87/gem5 | src/arch/x86/isa/insts/general_purpose/rotate_and_shift/__init__.py | 91 | 2283 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
categories = ["rotate",
"shift"]
microcode = ""
for category in categories:
exec "import %s as cat" % category
microcode += cat.microcode
| bsd-3-clause |
stuart-warren/kubernetes | cluster/juju/charms/trusty/kubernetes-master/unit_tests/test_install.py | 145 | 4106 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mock import patch, Mock, MagicMock
from path import Path
import pytest
import sys
# Munge the python path so we can find our hook code
d = Path('__file__').parent.abspath() / 'hooks'
sys.path.insert(0, d.abspath())
# Import the modules from the hook
import install
class TestInstallHook():
@patch('install.path')
def test_update_rc_files(self, pmock):
"""
Test happy path on updating env files. Assuming everything
exists and is in place.
"""
pmock.return_value.lines.return_value = ['line1', 'line2']
install.update_rc_files(['test1', 'test2'])
pmock.return_value.write_lines.assert_called_with(['line1', 'line2',
'test1', 'test2'])
def test_update_rc_files_with_nonexistent_path(self):
"""
Test an unhappy path if the bashrc/users do not exist.
"""
with pytest.raises(OSError) as exinfo:
install.update_rc_files(['test1','test2'])
@patch('install.fetch')
@patch('install.hookenv')
def test_package_installation(self, hemock, ftmock):
"""
Verify we are calling the known essentials to build and syndicate
kubes.
"""
pkgs = ['build-essential', 'git',
'make', 'nginx', 'python-pip']
install.install_packages()
hemock.log.assert_called_with('Installing Debian packages')
ftmock.filter_installed_packages.assert_called_with(pkgs)
@patch('install.archiveurl.ArchiveUrlFetchHandler')
def test_go_download(self, aumock):
"""
Test that we are actually handing off to charm-helpers to
download a specific archive of Go. This is non-configurable so
its reasonably safe to assume we're going to always do this,
and when it changes we shall curse the brittleness of this test.
"""
ins_mock = aumock.return_value.install
install.download_go()
url = 'https://storage.googleapis.com/golang/go1.4.2.linux-amd64.tar.gz'
sha1='5020af94b52b65cc9b6f11d50a67e4bae07b0aff'
ins_mock.assert_called_with(url, '/usr/local', sha1, 'sha1')
@patch('install.subprocess')
def test_clone_repository(self, spmock):
"""
We're not using a unit-tested git library - so ensure our subprocess
call is consistent. If we change this, we want to know we've broken it.
"""
install.clone_repository()
repo = 'https://github.com/kubernetes/kubernetes.git'
direct = '/opt/kubernetes'
spmock.check_output.assert_called_with(['git', 'clone', repo, direct])
@patch('install.install_packages')
@patch('install.download_go')
@patch('install.clone_repository')
@patch('install.update_rc_files')
@patch('install.hookenv')
def test_install_main(self, hemock, urmock, crmock, dgmock, ipmock):
"""
Ensure the driver/main method is calling all the supporting methods.
"""
strings = [
'export GOROOT=/usr/local/go\n',
'export PATH=$PATH:$GOROOT/bin\n',
'export KUBE_MASTER_IP=0.0.0.0\n',
'export KUBERNETES_MASTER=http://$KUBE_MASTER_IP\n',
]
install.install()
crmock.assert_called_once()
dgmock.assert_called_once()
crmock.assert_called_once()
urmock.assert_called_with(strings)
hemock.open_port.assert_called_with(8080)
| apache-2.0 |
2uller/LotF | App/Lib/site-packages/PIL/PdfImagePlugin.py | 40 | 5558 | #
# The Python Imaging Library.
# $Id$
#
# PDF (Acrobat) file handling
#
# History:
# 1996-07-16 fl Created
# 1997-01-18 fl Fixed header
# 2004-02-21 fl Fixes for 1/L/CMYK images, etc.
# 2004-02-24 fl Fixes for 1 and P images.
#
# Copyright (c) 1997-2004 by Secret Labs AB. All rights reserved.
# Copyright (c) 1996-1997 by Fredrik Lundh.
#
# See the README file for information on usage and redistribution.
#
##
# Image plugin for PDF images (output only).
##
__version__ = "0.4"
import Image, ImageFile
import StringIO
#
# --------------------------------------------------------------------
# object ids:
# 1. catalogue
# 2. pages
# 3. image
# 4. page
# 5. page contents
def _obj(fp, obj, **dict):
fp.write("%d 0 obj\n" % obj)
if dict:
fp.write("<<\n")
for k, v in dict.items():
if v is not None:
fp.write("/%s %s\n" % (k, v))
fp.write(">>\n")
def _endobj(fp):
fp.write("endobj\n")
##
# (Internal) Image save plugin for the PDF format.
def _save(im, fp, filename):
resolution = im.encoderinfo.get("resolution", 72.0)
#
# make sure image data is available
im.load()
xref = [0]*(5+1) # placeholders
fp.write("%PDF-1.2\n")
fp.write("% created by PIL PDF driver " + __version__ + "\n")
#
# Get image characteristics
width, height = im.size
# FIXME: Should replace ASCIIHexDecode with RunLengthDecode (packbits)
# or LZWDecode (tiff/lzw compression). Note that PDF 1.2 also supports
# Flatedecode (zip compression).
bits = 8
params = None
if im.mode == "1":
filter = "/ASCIIHexDecode"
colorspace = "/DeviceGray"
procset = "/ImageB" # grayscale
bits = 1
elif im.mode == "L":
filter = "/DCTDecode"
# params = "<< /Predictor 15 /Columns %d >>" % (width-2)
colorspace = "/DeviceGray"
procset = "/ImageB" # grayscale
elif im.mode == "P":
filter = "/ASCIIHexDecode"
colorspace = "[ /Indexed /DeviceRGB 255 <"
palette = im.im.getpalette("RGB")
for i in range(256):
r = ord(palette[i*3])
g = ord(palette[i*3+1])
b = ord(palette[i*3+2])
colorspace = colorspace + "%02x%02x%02x " % (r, g, b)
colorspace = colorspace + "> ]"
procset = "/ImageI" # indexed color
elif im.mode == "RGB":
filter = "/DCTDecode"
colorspace = "/DeviceRGB"
procset = "/ImageC" # color images
elif im.mode == "CMYK":
filter = "/DCTDecode"
colorspace = "/DeviceCMYK"
procset = "/ImageC" # color images
else:
raise ValueError("cannot save mode %s" % im.mode)
#
# catalogue
xref[1] = fp.tell()
_obj(fp, 1, Type = "/Catalog",
Pages = "2 0 R")
_endobj(fp)
#
# pages
xref[2] = fp.tell()
_obj(fp, 2, Type = "/Pages",
Count = 1,
Kids = "[4 0 R]")
_endobj(fp)
#
# image
op = StringIO.StringIO()
if filter == "/ASCIIHexDecode":
if bits == 1:
# FIXME: the hex encoder doesn't support packed 1-bit
# images; do things the hard way...
data = im.tostring("raw", "1")
im = Image.new("L", (len(data), 1), None)
im.putdata(data)
ImageFile._save(im, op, [("hex", (0,0)+im.size, 0, im.mode)])
elif filter == "/DCTDecode":
ImageFile._save(im, op, [("jpeg", (0,0)+im.size, 0, im.mode)])
elif filter == "/FlateDecode":
ImageFile._save(im, op, [("zip", (0,0)+im.size, 0, im.mode)])
elif filter == "/RunLengthDecode":
ImageFile._save(im, op, [("packbits", (0,0)+im.size, 0, im.mode)])
else:
raise ValueError("unsupported PDF filter (%s)" % filter)
xref[3] = fp.tell()
_obj(fp, 3, Type = "/XObject",
Subtype = "/Image",
Width = width, # * 72.0 / resolution,
Height = height, # * 72.0 / resolution,
Length = len(op.getvalue()),
Filter = filter,
BitsPerComponent = bits,
DecodeParams = params,
ColorSpace = colorspace)
fp.write("stream\n")
fp.write(op.getvalue())
fp.write("\nendstream\n")
_endobj(fp)
#
# page
xref[4] = fp.tell()
_obj(fp, 4)
fp.write("<<\n/Type /Page\n/Parent 2 0 R\n"\
"/Resources <<\n/ProcSet [ /PDF %s ]\n"\
"/XObject << /image 3 0 R >>\n>>\n"\
"/MediaBox [ 0 0 %d %d ]\n/Contents 5 0 R\n>>\n" %\
(procset, int(width * 72.0 /resolution) , int(height * 72.0 / resolution)))
_endobj(fp)
#
# page contents
op = StringIO.StringIO()
op.write("q %d 0 0 %d 0 0 cm /image Do Q\n" % (int(width * 72.0 / resolution), int(height * 72.0 / resolution)))
xref[5] = fp.tell()
_obj(fp, 5, Length = len(op.getvalue()))
fp.write("stream\n")
fp.write(op.getvalue())
fp.write("\nendstream\n")
_endobj(fp)
#
# trailer
startxref = fp.tell()
fp.write("xref\n0 %d\n0000000000 65535 f \n" % len(xref))
for x in xref[1:]:
fp.write("%010d 00000 n \n" % x)
fp.write("trailer\n<<\n/Size %d\n/Root 1 0 R\n>>\n" % len(xref))
fp.write("startxref\n%d\n%%%%EOF\n" % startxref)
fp.flush()
#
# --------------------------------------------------------------------
Image.register_save("PDF", _save)
Image.register_extension("PDF", ".pdf")
Image.register_mime("PDF", "application/pdf")
| gpl-2.0 |
Erve1879/feedjack | feedjack/admin.py | 6 | 1387 | # -*- coding: utf-8 -*-
"""
feedjack
Gustavo Picón
admin.py
"""
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from feedjack import models
class LinkAdmin(admin.ModelAdmin):
pass
class SiteAdmin(admin.ModelAdmin):
list_display = ('url', 'name')
filter_vertical = ('links',)
class FeedAdmin(admin.ModelAdmin):
list_display = ('name', 'feed_url', 'title', 'last_modified', \
'is_active')
fieldsets = (
(None,
{'fields':('feed_url', 'name', 'shortname', 'is_active')}),
(_('Fields updated automatically by Feedjack'),
{'classes':('collapse',),
'fields':('title', 'tagline', 'link', 'etag', 'last_modified',
'last_checked'),
})
)
search_fields = ['feed_url', 'name', 'title']
class PostAdmin(admin.ModelAdmin):
list_display = ('title', 'link', 'author', 'date_modified')
search_fields = ['link', 'title']
date_hierarchy = 'date_modified'
filter_vertical = ('tags',)
class SubscriberAdmin(admin.ModelAdmin):
list_display = ('name', 'site', 'feed')
list_filter = ('site',)
admin.site.register(models.Link, LinkAdmin)
admin.site.register(models.Site, SiteAdmin)
admin.site.register(models.Feed, FeedAdmin)
admin.site.register(models.Post, PostAdmin)
admin.site.register(models.Subscriber, SubscriberAdmin)
#~
| bsd-3-clause |
Bitl/RBXLegacy-src | Cut/RBXLegacyDiscordBot/lib/cffi/api.py | 19 | 39647 | import sys, types
from .lock import allocate_lock
from .error import CDefError
from . import model
try:
callable
except NameError:
# Python 3.1
from collections import Callable
callable = lambda x: isinstance(x, Callable)
try:
basestring
except NameError:
# Python 3.x
basestring = str
class FFI(object):
r'''
The main top-level class that you instantiate once, or once per module.
Example usage:
ffi = FFI()
ffi.cdef("""
int printf(const char *, ...);
""")
C = ffi.dlopen(None) # standard library
-or-
C = ffi.verify() # use a C compiler: verify the decl above is right
C.printf("hello, %s!\n", ffi.new("char[]", "world"))
'''
def __init__(self, backend=None):
"""Create an FFI instance. The 'backend' argument is used to
select a non-default backend, mostly for tests.
"""
if backend is None:
# You need PyPy (>= 2.0 beta), or a CPython (>= 2.6) with
# _cffi_backend.so compiled.
import _cffi_backend as backend
from . import __version__
if backend.__version__ != __version__:
# bad version! Try to be as explicit as possible.
if hasattr(backend, '__file__'):
# CPython
raise Exception("Version mismatch: this is the 'cffi' package version %s, located in %r. When we import the top-level '_cffi_backend' extension module, we get version %s, located in %r. The two versions should be equal; check your installation." % (
__version__, __file__,
backend.__version__, backend.__file__))
else:
# PyPy
raise Exception("Version mismatch: this is the 'cffi' package version %s, located in %r. This interpreter comes with a built-in '_cffi_backend' module, which is version %s. The two versions should be equal; check your installation." % (
__version__, __file__, backend.__version__))
# (If you insist you can also try to pass the option
# 'backend=backend_ctypes.CTypesBackend()', but don't
# rely on it! It's probably not going to work well.)
from . import cparser
self._backend = backend
self._lock = allocate_lock()
self._parser = cparser.Parser()
self._cached_btypes = {}
self._parsed_types = types.ModuleType('parsed_types').__dict__
self._new_types = types.ModuleType('new_types').__dict__
self._function_caches = []
self._libraries = []
self._cdefsources = []
self._included_ffis = []
self._windows_unicode = None
self._init_once_cache = {}
self._cdef_version = None
self._embedding = None
if hasattr(backend, 'set_ffi'):
backend.set_ffi(self)
for name in backend.__dict__:
if name.startswith('RTLD_'):
setattr(self, name, getattr(backend, name))
#
with self._lock:
self.BVoidP = self._get_cached_btype(model.voidp_type)
self.BCharA = self._get_cached_btype(model.char_array_type)
if isinstance(backend, types.ModuleType):
# _cffi_backend: attach these constants to the class
if not hasattr(FFI, 'NULL'):
FFI.NULL = self.cast(self.BVoidP, 0)
FFI.CData, FFI.CType = backend._get_types()
else:
# ctypes backend: attach these constants to the instance
self.NULL = self.cast(self.BVoidP, 0)
self.CData, self.CType = backend._get_types()
self.buffer = backend.buffer
def cdef(self, csource, override=False, packed=False):
"""Parse the given C source. This registers all declared functions,
types, and global variables. The functions and global variables can
then be accessed via either 'ffi.dlopen()' or 'ffi.verify()'.
The types can be used in 'ffi.new()' and other functions.
If 'packed' is specified as True, all structs declared inside this
cdef are packed, i.e. laid out without any field alignment at all.
"""
self._cdef(csource, override=override, packed=packed)
def embedding_api(self, csource, packed=False):
self._cdef(csource, packed=packed, dllexport=True)
if self._embedding is None:
self._embedding = ''
def _cdef(self, csource, override=False, **options):
if not isinstance(csource, str): # unicode, on Python 2
if not isinstance(csource, basestring):
raise TypeError("cdef() argument must be a string")
csource = csource.encode('ascii')
with self._lock:
self._cdef_version = object()
self._parser.parse(csource, override=override, **options)
self._cdefsources.append(csource)
if override:
for cache in self._function_caches:
cache.clear()
finishlist = self._parser._recomplete
if finishlist:
self._parser._recomplete = []
for tp in finishlist:
tp.finish_backend_type(self, finishlist)
def dlopen(self, name, flags=0):
"""Load and return a dynamic library identified by 'name'.
The standard C library can be loaded by passing None.
Note that functions and types declared by 'ffi.cdef()' are not
linked to a particular library, just like C headers; in the
library we only look for the actual (untyped) symbols.
"""
assert isinstance(name, basestring) or name is None
with self._lock:
lib, function_cache = _make_ffi_library(self, name, flags)
self._function_caches.append(function_cache)
self._libraries.append(lib)
return lib
def _typeof_locked(self, cdecl):
# call me with the lock!
key = cdecl
if key in self._parsed_types:
return self._parsed_types[key]
#
if not isinstance(cdecl, str): # unicode, on Python 2
cdecl = cdecl.encode('ascii')
#
type = self._parser.parse_type(cdecl)
really_a_function_type = type.is_raw_function
if really_a_function_type:
type = type.as_function_pointer()
btype = self._get_cached_btype(type)
result = btype, really_a_function_type
self._parsed_types[key] = result
return result
def _typeof(self, cdecl, consider_function_as_funcptr=False):
# string -> ctype object
try:
result = self._parsed_types[cdecl]
except KeyError:
with self._lock:
result = self._typeof_locked(cdecl)
#
btype, really_a_function_type = result
if really_a_function_type and not consider_function_as_funcptr:
raise CDefError("the type %r is a function type, not a "
"pointer-to-function type" % (cdecl,))
return btype
def typeof(self, cdecl):
"""Parse the C type given as a string and return the
corresponding <ctype> object.
It can also be used on 'cdata' instance to get its C type.
"""
if isinstance(cdecl, basestring):
return self._typeof(cdecl)
if isinstance(cdecl, self.CData):
return self._backend.typeof(cdecl)
if isinstance(cdecl, types.BuiltinFunctionType):
res = _builtin_function_type(cdecl)
if res is not None:
return res
if (isinstance(cdecl, types.FunctionType)
and hasattr(cdecl, '_cffi_base_type')):
with self._lock:
return self._get_cached_btype(cdecl._cffi_base_type)
raise TypeError(type(cdecl))
def sizeof(self, cdecl):
"""Return the size in bytes of the argument. It can be a
string naming a C type, or a 'cdata' instance.
"""
if isinstance(cdecl, basestring):
BType = self._typeof(cdecl)
return self._backend.sizeof(BType)
else:
return self._backend.sizeof(cdecl)
def alignof(self, cdecl):
"""Return the natural alignment size in bytes of the C type
given as a string.
"""
if isinstance(cdecl, basestring):
cdecl = self._typeof(cdecl)
return self._backend.alignof(cdecl)
def offsetof(self, cdecl, *fields_or_indexes):
"""Return the offset of the named field inside the given
structure or array, which must be given as a C type name.
You can give several field names in case of nested structures.
You can also give numeric values which correspond to array
items, in case of an array type.
"""
if isinstance(cdecl, basestring):
cdecl = self._typeof(cdecl)
return self._typeoffsetof(cdecl, *fields_or_indexes)[1]
def new(self, cdecl, init=None):
"""Allocate an instance according to the specified C type and
return a pointer to it. The specified C type must be either a
pointer or an array: ``new('X *')`` allocates an X and returns
a pointer to it, whereas ``new('X[n]')`` allocates an array of
n X'es and returns an array referencing it (which works
mostly like a pointer, like in C). You can also use
``new('X[]', n)`` to allocate an array of a non-constant
length n.
The memory is initialized following the rules of declaring a
global variable in C: by default it is zero-initialized, but
an explicit initializer can be given which can be used to
fill all or part of the memory.
When the returned <cdata> object goes out of scope, the memory
is freed. In other words the returned <cdata> object has
ownership of the value of type 'cdecl' that it points to. This
means that the raw data can be used as long as this object is
kept alive, but must not be used for a longer time. Be careful
about that when copying the pointer to the memory somewhere
else, e.g. into another structure.
"""
if isinstance(cdecl, basestring):
cdecl = self._typeof(cdecl)
return self._backend.newp(cdecl, init)
def new_allocator(self, alloc=None, free=None,
should_clear_after_alloc=True):
"""Return a new allocator, i.e. a function that behaves like ffi.new()
but uses the provided low-level 'alloc' and 'free' functions.
'alloc' is called with the size as argument. If it returns NULL, a
MemoryError is raised. 'free' is called with the result of 'alloc'
as argument. Both can be either Python function or directly C
functions. If 'free' is None, then no free function is called.
If both 'alloc' and 'free' are None, the default is used.
If 'should_clear_after_alloc' is set to False, then the memory
returned by 'alloc' is assumed to be already cleared (or you are
fine with garbage); otherwise CFFI will clear it.
"""
compiled_ffi = self._backend.FFI()
allocator = compiled_ffi.new_allocator(alloc, free,
should_clear_after_alloc)
def allocate(cdecl, init=None):
if isinstance(cdecl, basestring):
cdecl = self._typeof(cdecl)
return allocator(cdecl, init)
return allocate
def cast(self, cdecl, source):
"""Similar to a C cast: returns an instance of the named C
type initialized with the given 'source'. The source is
casted between integers or pointers of any type.
"""
if isinstance(cdecl, basestring):
cdecl = self._typeof(cdecl)
return self._backend.cast(cdecl, source)
def string(self, cdata, maxlen=-1):
"""Return a Python string (or unicode string) from the 'cdata'.
If 'cdata' is a pointer or array of characters or bytes, returns
the null-terminated string. The returned string extends until
the first null character, or at most 'maxlen' characters. If
'cdata' is an array then 'maxlen' defaults to its length.
If 'cdata' is a pointer or array of wchar_t, returns a unicode
string following the same rules.
If 'cdata' is a single character or byte or a wchar_t, returns
it as a string or unicode string.
If 'cdata' is an enum, returns the value of the enumerator as a
string, or 'NUMBER' if the value is out of range.
"""
return self._backend.string(cdata, maxlen)
def unpack(self, cdata, length):
"""Unpack an array of C data of the given length,
returning a Python string/unicode/list.
If 'cdata' is a pointer to 'char', returns a byte string.
It does not stop at the first null. This is equivalent to:
ffi.buffer(cdata, length)[:]
If 'cdata' is a pointer to 'wchar_t', returns a unicode string.
'length' is measured in wchar_t's; it is not the size in bytes.
If 'cdata' is a pointer to anything else, returns a list of
'length' items. This is a faster equivalent to:
[cdata[i] for i in range(length)]
"""
return self._backend.unpack(cdata, length)
#def buffer(self, cdata, size=-1):
# """Return a read-write buffer object that references the raw C data
# pointed to by the given 'cdata'. The 'cdata' must be a pointer or
# an array. Can be passed to functions expecting a buffer, or directly
# manipulated with:
#
# buf[:] get a copy of it in a regular string, or
# buf[idx] as a single character
# buf[:] = ...
# buf[idx] = ... change the content
# """
# note that 'buffer' is a type, set on this instance by __init__
def from_buffer(self, python_buffer):
"""Return a <cdata 'char[]'> that points to the data of the
given Python object, which must support the buffer interface.
Note that this is not meant to be used on the built-in types
str or unicode (you can build 'char[]' arrays explicitly)
but only on objects containing large quantities of raw data
in some other format, like 'array.array' or numpy arrays.
"""
return self._backend.from_buffer(self.BCharA, python_buffer)
def memmove(self, dest, src, n):
"""ffi.memmove(dest, src, n) copies n bytes of memory from src to dest.
Like the C function memmove(), the memory areas may overlap;
apart from that it behaves like the C function memcpy().
'src' can be any cdata ptr or array, or any Python buffer object.
'dest' can be any cdata ptr or array, or a writable Python buffer
object. The size to copy, 'n', is always measured in bytes.
Unlike other methods, this one supports all Python buffer including
byte strings and bytearrays---but it still does not support
non-contiguous buffers.
"""
return self._backend.memmove(dest, src, n)
def callback(self, cdecl, python_callable=None, error=None, onerror=None):
"""Return a callback object or a decorator making such a
callback object. 'cdecl' must name a C function pointer type.
The callback invokes the specified 'python_callable' (which may
be provided either directly or via a decorator). Important: the
callback object must be manually kept alive for as long as the
callback may be invoked from the C level.
"""
def callback_decorator_wrap(python_callable):
if not callable(python_callable):
raise TypeError("the 'python_callable' argument "
"is not callable")
return self._backend.callback(cdecl, python_callable,
error, onerror)
if isinstance(cdecl, basestring):
cdecl = self._typeof(cdecl, consider_function_as_funcptr=True)
if python_callable is None:
return callback_decorator_wrap # decorator mode
else:
return callback_decorator_wrap(python_callable) # direct mode
def getctype(self, cdecl, replace_with=''):
"""Return a string giving the C type 'cdecl', which may be itself
a string or a <ctype> object. If 'replace_with' is given, it gives
extra text to append (or insert for more complicated C types), like
a variable name, or '*' to get actually the C type 'pointer-to-cdecl'.
"""
if isinstance(cdecl, basestring):
cdecl = self._typeof(cdecl)
replace_with = replace_with.strip()
if (replace_with.startswith('*')
and '&[' in self._backend.getcname(cdecl, '&')):
replace_with = '(%s)' % replace_with
elif replace_with and not replace_with[0] in '[(':
replace_with = ' ' + replace_with
return self._backend.getcname(cdecl, replace_with)
def gc(self, cdata, destructor):
"""Return a new cdata object that points to the same
data. Later, when this new cdata object is garbage-collected,
'destructor(old_cdata_object)' will be called.
"""
return self._backend.gcp(cdata, destructor)
def _get_cached_btype(self, type):
assert self._lock.acquire(False) is False
# call me with the lock!
try:
BType = self._cached_btypes[type]
except KeyError:
finishlist = []
BType = type.get_cached_btype(self, finishlist)
for type in finishlist:
type.finish_backend_type(self, finishlist)
return BType
def verify(self, source='', tmpdir=None, **kwargs):
"""Verify that the current ffi signatures compile on this
machine, and return a dynamic library object. The dynamic
library can be used to call functions and access global
variables declared in this 'ffi'. The library is compiled
by the C compiler: it gives you C-level API compatibility
(including calling macros). This is unlike 'ffi.dlopen()',
which requires binary compatibility in the signatures.
"""
from .verifier import Verifier, _caller_dir_pycache
#
# If set_unicode(True) was called, insert the UNICODE and
# _UNICODE macro declarations
if self._windows_unicode:
self._apply_windows_unicode(kwargs)
#
# Set the tmpdir here, and not in Verifier.__init__: it picks
# up the caller's directory, which we want to be the caller of
# ffi.verify(), as opposed to the caller of Veritier().
tmpdir = tmpdir or _caller_dir_pycache()
#
# Make a Verifier() and use it to load the library.
self.verifier = Verifier(self, source, tmpdir, **kwargs)
lib = self.verifier.load_library()
#
# Save the loaded library for keep-alive purposes, even
# if the caller doesn't keep it alive itself (it should).
self._libraries.append(lib)
return lib
def _get_errno(self):
return self._backend.get_errno()
def _set_errno(self, errno):
self._backend.set_errno(errno)
errno = property(_get_errno, _set_errno, None,
"the value of 'errno' from/to the C calls")
def getwinerror(self, code=-1):
return self._backend.getwinerror(code)
def _pointer_to(self, ctype):
with self._lock:
return model.pointer_cache(self, ctype)
def addressof(self, cdata, *fields_or_indexes):
"""Return the address of a <cdata 'struct-or-union'>.
If 'fields_or_indexes' are given, returns the address of that
field or array item in the structure or array, recursively in
case of nested structures.
"""
try:
ctype = self._backend.typeof(cdata)
except TypeError:
if '__addressof__' in type(cdata).__dict__:
return type(cdata).__addressof__(cdata, *fields_or_indexes)
raise
if fields_or_indexes:
ctype, offset = self._typeoffsetof(ctype, *fields_or_indexes)
else:
if ctype.kind == "pointer":
raise TypeError("addressof(pointer)")
offset = 0
ctypeptr = self._pointer_to(ctype)
return self._backend.rawaddressof(ctypeptr, cdata, offset)
def _typeoffsetof(self, ctype, field_or_index, *fields_or_indexes):
ctype, offset = self._backend.typeoffsetof(ctype, field_or_index)
for field1 in fields_or_indexes:
ctype, offset1 = self._backend.typeoffsetof(ctype, field1, 1)
offset += offset1
return ctype, offset
def include(self, ffi_to_include):
"""Includes the typedefs, structs, unions and enums defined
in another FFI instance. Usage is similar to a #include in C,
where a part of the program might include types defined in
another part for its own usage. Note that the include()
method has no effect on functions, constants and global
variables, which must anyway be accessed directly from the
lib object returned by the original FFI instance.
"""
if not isinstance(ffi_to_include, FFI):
raise TypeError("ffi.include() expects an argument that is also of"
" type cffi.FFI, not %r" % (
type(ffi_to_include).__name__,))
if ffi_to_include is self:
raise ValueError("self.include(self)")
with ffi_to_include._lock:
with self._lock:
self._parser.include(ffi_to_include._parser)
self._cdefsources.append('[')
self._cdefsources.extend(ffi_to_include._cdefsources)
self._cdefsources.append(']')
self._included_ffis.append(ffi_to_include)
def new_handle(self, x):
return self._backend.newp_handle(self.BVoidP, x)
def from_handle(self, x):
return self._backend.from_handle(x)
def set_unicode(self, enabled_flag):
"""Windows: if 'enabled_flag' is True, enable the UNICODE and
_UNICODE defines in C, and declare the types like TCHAR and LPTCSTR
to be (pointers to) wchar_t. If 'enabled_flag' is False,
declare these types to be (pointers to) plain 8-bit characters.
This is mostly for backward compatibility; you usually want True.
"""
if self._windows_unicode is not None:
raise ValueError("set_unicode() can only be called once")
enabled_flag = bool(enabled_flag)
if enabled_flag:
self.cdef("typedef wchar_t TBYTE;"
"typedef wchar_t TCHAR;"
"typedef const wchar_t *LPCTSTR;"
"typedef const wchar_t *PCTSTR;"
"typedef wchar_t *LPTSTR;"
"typedef wchar_t *PTSTR;"
"typedef TBYTE *PTBYTE;"
"typedef TCHAR *PTCHAR;")
else:
self.cdef("typedef char TBYTE;"
"typedef char TCHAR;"
"typedef const char *LPCTSTR;"
"typedef const char *PCTSTR;"
"typedef char *LPTSTR;"
"typedef char *PTSTR;"
"typedef TBYTE *PTBYTE;"
"typedef TCHAR *PTCHAR;")
self._windows_unicode = enabled_flag
def _apply_windows_unicode(self, kwds):
defmacros = kwds.get('define_macros', ())
if not isinstance(defmacros, (list, tuple)):
raise TypeError("'define_macros' must be a list or tuple")
defmacros = list(defmacros) + [('UNICODE', '1'),
('_UNICODE', '1')]
kwds['define_macros'] = defmacros
def _apply_embedding_fix(self, kwds):
# must include an argument like "-lpython2.7" for the compiler
def ensure(key, value):
lst = kwds.setdefault(key, [])
if value not in lst:
lst.append(value)
#
if '__pypy__' in sys.builtin_module_names:
import os
if sys.platform == "win32":
# we need 'libpypy-c.lib'. Current distributions of
# pypy (>= 4.1) contain it as 'libs/python27.lib'.
pythonlib = "python27"
if hasattr(sys, 'prefix'):
ensure('library_dirs', os.path.join(sys.prefix, 'libs'))
else:
# we need 'libpypy-c.{so,dylib}', which should be by
# default located in 'sys.prefix/bin' for installed
# systems.
if sys.version_info < (3,):
pythonlib = "pypy-c"
else:
pythonlib = "pypy3-c"
if hasattr(sys, 'prefix'):
ensure('library_dirs', os.path.join(sys.prefix, 'bin'))
# On uninstalled pypy's, the libpypy-c is typically found in
# .../pypy/goal/.
if hasattr(sys, 'prefix'):
ensure('library_dirs', os.path.join(sys.prefix, 'pypy', 'goal'))
else:
if sys.platform == "win32":
template = "python%d%d"
if hasattr(sys, 'gettotalrefcount'):
template += '_d'
else:
try:
import sysconfig
except ImportError: # 2.6
from distutils import sysconfig
template = "python%d.%d"
if sysconfig.get_config_var('DEBUG_EXT'):
template += sysconfig.get_config_var('DEBUG_EXT')
pythonlib = (template %
(sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff))
if hasattr(sys, 'abiflags'):
pythonlib += sys.abiflags
ensure('libraries', pythonlib)
if sys.platform == "win32":
ensure('extra_link_args', '/MANIFEST')
def set_source(self, module_name, source, source_extension='.c', **kwds):
import os
if hasattr(self, '_assigned_source'):
raise ValueError("set_source() cannot be called several times "
"per ffi object")
if not isinstance(module_name, basestring):
raise TypeError("'module_name' must be a string")
if os.sep in module_name or (os.altsep and os.altsep in module_name):
raise ValueError("'module_name' must not contain '/': use a dotted "
"name to make a 'package.module' location")
self._assigned_source = (str(module_name), source,
source_extension, kwds)
def distutils_extension(self, tmpdir='build', verbose=True):
from distutils.dir_util import mkpath
from .recompiler import recompile
#
if not hasattr(self, '_assigned_source'):
if hasattr(self, 'verifier'): # fallback, 'tmpdir' ignored
return self.verifier.get_extension()
raise ValueError("set_source() must be called before"
" distutils_extension()")
module_name, source, source_extension, kwds = self._assigned_source
if source is None:
raise TypeError("distutils_extension() is only for C extension "
"modules, not for dlopen()-style pure Python "
"modules")
mkpath(tmpdir)
ext, updated = recompile(self, module_name,
source, tmpdir=tmpdir, extradir=tmpdir,
source_extension=source_extension,
call_c_compiler=False, **kwds)
if verbose:
if updated:
sys.stderr.write("regenerated: %r\n" % (ext.sources[0],))
else:
sys.stderr.write("not modified: %r\n" % (ext.sources[0],))
return ext
def emit_c_code(self, filename):
from .recompiler import recompile
#
if not hasattr(self, '_assigned_source'):
raise ValueError("set_source() must be called before emit_c_code()")
module_name, source, source_extension, kwds = self._assigned_source
if source is None:
raise TypeError("emit_c_code() is only for C extension modules, "
"not for dlopen()-style pure Python modules")
recompile(self, module_name, source,
c_file=filename, call_c_compiler=False, **kwds)
def emit_python_code(self, filename):
from .recompiler import recompile
#
if not hasattr(self, '_assigned_source'):
raise ValueError("set_source() must be called before emit_c_code()")
module_name, source, source_extension, kwds = self._assigned_source
if source is not None:
raise TypeError("emit_python_code() is only for dlopen()-style "
"pure Python modules, not for C extension modules")
recompile(self, module_name, source,
c_file=filename, call_c_compiler=False, **kwds)
def compile(self, tmpdir='.', verbose=0, target=None, debug=None):
"""The 'target' argument gives the final file name of the
compiled DLL. Use '*' to force distutils' choice, suitable for
regular CPython C API modules. Use a file name ending in '.*'
to ask for the system's default extension for dynamic libraries
(.so/.dll/.dylib).
The default is '*' when building a non-embedded C API extension,
and (module_name + '.*') when building an embedded library.
"""
from .recompiler import recompile
#
if not hasattr(self, '_assigned_source'):
raise ValueError("set_source() must be called before compile()")
module_name, source, source_extension, kwds = self._assigned_source
return recompile(self, module_name, source, tmpdir=tmpdir,
target=target, source_extension=source_extension,
compiler_verbose=verbose, debug=debug, **kwds)
def init_once(self, func, tag):
# Read _init_once_cache[tag], which is either (False, lock) if
# we're calling the function now in some thread, or (True, result).
# Don't call setdefault() in most cases, to avoid allocating and
# immediately freeing a lock; but still use setdefaut() to avoid
# races.
try:
x = self._init_once_cache[tag]
except KeyError:
x = self._init_once_cache.setdefault(tag, (False, allocate_lock()))
# Common case: we got (True, result), so we return the result.
if x[0]:
return x[1]
# Else, it's a lock. Acquire it to serialize the following tests.
with x[1]:
# Read again from _init_once_cache the current status.
x = self._init_once_cache[tag]
if x[0]:
return x[1]
# Call the function and store the result back.
result = func()
self._init_once_cache[tag] = (True, result)
return result
def embedding_init_code(self, pysource):
if self._embedding:
raise ValueError("embedding_init_code() can only be called once")
# fix 'pysource' before it gets dumped into the C file:
# - remove empty lines at the beginning, so it starts at "line 1"
# - dedent, if all non-empty lines are indented
# - check for SyntaxErrors
import re
match = re.match(r'\s*\n', pysource)
if match:
pysource = pysource[match.end():]
lines = pysource.splitlines() or ['']
prefix = re.match(r'\s*', lines[0]).group()
for i in range(1, len(lines)):
line = lines[i]
if line.rstrip():
while not line.startswith(prefix):
prefix = prefix[:-1]
i = len(prefix)
lines = [line[i:]+'\n' for line in lines]
pysource = ''.join(lines)
#
compile(pysource, "cffi_init", "exec")
#
self._embedding = pysource
def def_extern(self, *args, **kwds):
raise ValueError("ffi.def_extern() is only available on API-mode FFI "
"objects")
def list_types(self):
"""Returns the user type names known to this FFI instance.
This returns a tuple containing three lists of names:
(typedef_names, names_of_structs, names_of_unions)
"""
typedefs = []
structs = []
unions = []
for key in self._parser._declarations:
if key.startswith('typedef '):
typedefs.append(key[8:])
elif key.startswith('struct '):
structs.append(key[7:])
elif key.startswith('union '):
unions.append(key[6:])
typedefs.sort()
structs.sort()
unions.sort()
return (typedefs, structs, unions)
def _load_backend_lib(backend, name, flags):
import os
if name is None:
if sys.platform != "win32":
return backend.load_library(None, flags)
name = "c" # Windows: load_library(None) fails, but this works
# (backward compatibility hack only)
first_error = None
if '.' in name or '/' in name or os.sep in name:
try:
return backend.load_library(name, flags)
except OSError as e:
first_error = e
import ctypes.util
path = ctypes.util.find_library(name)
if path is None:
msg = ("ctypes.util.find_library() did not manage "
"to locate a library called %r" % (name,))
if first_error is not None:
msg = "%s. Additionally, %s" % (first_error, msg)
raise OSError(msg)
return backend.load_library(path, flags)
def _make_ffi_library(ffi, libname, flags):
backend = ffi._backend
backendlib = _load_backend_lib(backend, libname, flags)
#
def accessor_function(name):
key = 'function ' + name
tp, _ = ffi._parser._declarations[key]
BType = ffi._get_cached_btype(tp)
value = backendlib.load_function(BType, name)
library.__dict__[name] = value
#
def accessor_variable(name):
key = 'variable ' + name
tp, _ = ffi._parser._declarations[key]
BType = ffi._get_cached_btype(tp)
read_variable = backendlib.read_variable
write_variable = backendlib.write_variable
setattr(FFILibrary, name, property(
lambda self: read_variable(BType, name),
lambda self, value: write_variable(BType, name, value)))
#
def addressof_var(name):
try:
return addr_variables[name]
except KeyError:
with ffi._lock:
if name not in addr_variables:
key = 'variable ' + name
tp, _ = ffi._parser._declarations[key]
BType = ffi._get_cached_btype(tp)
if BType.kind != 'array':
BType = model.pointer_cache(ffi, BType)
p = backendlib.load_function(BType, name)
addr_variables[name] = p
return addr_variables[name]
#
def accessor_constant(name):
raise NotImplementedError("non-integer constant '%s' cannot be "
"accessed from a dlopen() library" % (name,))
#
def accessor_int_constant(name):
library.__dict__[name] = ffi._parser._int_constants[name]
#
accessors = {}
accessors_version = [False]
addr_variables = {}
#
def update_accessors():
if accessors_version[0] is ffi._cdef_version:
return
#
for key, (tp, _) in ffi._parser._declarations.items():
if not isinstance(tp, model.EnumType):
tag, name = key.split(' ', 1)
if tag == 'function':
accessors[name] = accessor_function
elif tag == 'variable':
accessors[name] = accessor_variable
elif tag == 'constant':
accessors[name] = accessor_constant
else:
for i, enumname in enumerate(tp.enumerators):
def accessor_enum(name, tp=tp, i=i):
tp.check_not_partial()
library.__dict__[name] = tp.enumvalues[i]
accessors[enumname] = accessor_enum
for name in ffi._parser._int_constants:
accessors.setdefault(name, accessor_int_constant)
accessors_version[0] = ffi._cdef_version
#
def make_accessor(name):
with ffi._lock:
if name in library.__dict__ or name in FFILibrary.__dict__:
return # added by another thread while waiting for the lock
if name not in accessors:
update_accessors()
if name not in accessors:
raise AttributeError(name)
accessors[name](name)
#
class FFILibrary(object):
def __getattr__(self, name):
make_accessor(name)
return getattr(self, name)
def __setattr__(self, name, value):
try:
property = getattr(self.__class__, name)
except AttributeError:
make_accessor(name)
setattr(self, name, value)
else:
property.__set__(self, value)
def __dir__(self):
with ffi._lock:
update_accessors()
return accessors.keys()
def __addressof__(self, name):
if name in library.__dict__:
return library.__dict__[name]
if name in FFILibrary.__dict__:
return addressof_var(name)
make_accessor(name)
if name in library.__dict__:
return library.__dict__[name]
if name in FFILibrary.__dict__:
return addressof_var(name)
raise AttributeError("cffi library has no function or "
"global variable named '%s'" % (name,))
#
if libname is not None:
try:
if not isinstance(libname, str): # unicode, on Python 2
libname = libname.encode('utf-8')
FFILibrary.__name__ = 'FFILibrary_%s' % libname
except UnicodeError:
pass
library = FFILibrary()
return library, library.__dict__
def _builtin_function_type(func):
# a hack to make at least ffi.typeof(builtin_function) work,
# if the builtin function was obtained by 'vengine_cpy'.
import sys
try:
module = sys.modules[func.__module__]
ffi = module._cffi_original_ffi
types_of_builtin_funcs = module._cffi_types_of_builtin_funcs
tp = types_of_builtin_funcs[func]
except (KeyError, AttributeError, TypeError):
return None
else:
with ffi._lock:
return ffi._get_cached_btype(tp)
| gpl-3.0 |
onelife/rt-thread | bsp/stm32/stm32f407-realtouch/rtconfig.py | 1 | 1862 | import os
BUILD = 'debug'
# toolchains options
ARCH='arm'
CPU='cortex-m4'
CROSS_TOOL='gcc'
# bsp lib config
BSP_LIBRARY_TYPE = None
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC')
if os.getenv('RTT_ROOT'):
RTT_ROOT = os.getenv('RTT_ROOT')
# cross_tool provides the cross compiler
# EXEC_PATH is the compiler execute path, for example, CodeSourcery, Keil MDK, IAR
if CROSS_TOOL == 'gcc':
PLATFORM = 'gcc'
EXEC_PATH = r'/home/onelife/.platformio/packages/toolchain-gccarmnoneeabi/bin/'
else:
print('Unsupported compiler %s' % CROSS_TOOL)
exit(-1)
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
if PLATFORM == 'gcc':
# toolchains
PREFIX = 'arm-none-eabi-'
CC = PREFIX + 'gcc'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
CXX = PREFIX + 'g++'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'elf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -mcpu=cortex-m4 -mthumb -mfpu=fpv4-sp-d16 -mfloat-abi=hard -ffunction-sections -fdata-sections'
CFLAGS = DEVICE + ' -Dgcc'
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp -Wa,-mimplicit-it=thumb '
LFLAGS = DEVICE + ' -Wl,--gc-sections,-Map=realtouch.map,-cref,-u,Reset_Handler -T board/linker_scripts/link.lds'
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -gdwarf-2 -g'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2'
CXXFLAGS = CFLAGS
POST_ACTION = OBJCPY + ' -O binary $TARGET realtouch.bin\n' + SIZE + ' $TARGET \n'
else:
print('Unsupported toolchain %s' % PLATFORM)
exit(-1)
def dist_handle(BSP_ROOT):
import sys
cwd_path = os.getcwd()
sys.path.append(os.path.join(os.path.dirname(BSP_ROOT), 'tools'))
from sdk_dist import dist_do_building
dist_do_building(BSP_ROOT)
| gpl-2.0 |
furgerf/kaggle-projects | cancer/tf_network.py | 1 | 1220 | import tensorflow as tf
class TfNetwork(object):
def __init__(self, dimensionality):
"""
Creates a new TfNetwork. Note that it is apparently required to define
variables W and b even if they'll be later restored from a previous session.
dimensionality: (int) Number of dimensions (features) for the network
"""
self.x = tf.placeholder(tf.float32, [None, dimensionality])
self.W = tf.Variable(tf.random_normal([dimensionality, 1]), name='W')
self.b = tf.Variable(tf.random_normal([1]), name='b')
self.y = tf.matmul(self.x, self.W) + self.b
def load_variables(self, session):
"""
Loads variables W and b from the provided session.
"""
self.b = None
self.W = None
variables = tf.trainable_variables()
for variable in variables:
if variable.name == 'b:0':
self.b = variable
print('Found b', self.b, self.b.eval(session))
if variable.name == 'W:0':
self.W = variable
w = self.W.eval(session)
print('Found W', self.W, w.min(), '-', w.max(), '~', w.mean())
if self.b == None or self.W == None:
raise ValueError('Variable b or W was not found')
self.y = tf.matmul(self.x, self.W) + self.b
| apache-2.0 |
turbulenz/gyp | pylib/gyp/easy_xml.py | 12 | 5182 | # Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
import os
import locale
import sys
try:
# reduce moved to functools in python3.
reduce
except NameError:
from functools import reduce
def XmlToString(content, encoding='utf-8', pretty=False):
""" Writes the XML content to disk, touching the file only if it has changed.
Visual Studio files have a lot of pre-defined structures. This function makes
it easy to represent these structures as Python data structures, instead of
having to create a lot of function calls.
Each XML element of the content is represented as a list composed of:
1. The name of the element, a string,
2. The attributes of the element, a dictionary (optional), and
3+. The content of the element, if any. Strings are simple text nodes and
lists are child elements.
Example 1:
<test/>
becomes
['test']
Example 2:
<myelement a='value1' b='value2'>
<childtype>This is</childtype>
<childtype>it!</childtype>
</myelement>
becomes
['myelement', {'a':'value1', 'b':'value2'},
['childtype', 'This is'],
['childtype', 'it!'],
]
Args:
content: The structured content to be converted.
encoding: The encoding to report on the first XML line.
pretty: True if we want pretty printing with indents and new lines.
Returns:
The XML content as a string.
"""
# We create a huge list of all the elements of the file.
xml_parts = ['<?xml version="1.0" encoding="%s"?>' % encoding]
if pretty:
xml_parts.append('\n')
_ConstructContentList(xml_parts, content, pretty)
# Convert it to a string
return ''.join(xml_parts)
def _ConstructContentList(xml_parts, specification, pretty, level=0):
""" Appends the XML parts corresponding to the specification.
Args:
xml_parts: A list of XML parts to be appended to.
specification: The specification of the element. See EasyXml docs.
pretty: True if we want pretty printing with indents and new lines.
level: Indentation level.
"""
# The first item in a specification is the name of the element.
if pretty:
indentation = ' ' * level
new_line = '\n'
else:
indentation = ''
new_line = ''
name = specification[0]
if not isinstance(name, str):
raise Exception('The first item of an EasyXml specification should be '
'a string. Specification was ' + str(specification))
xml_parts.append(indentation + '<' + name)
# Optionally in second position is a dictionary of the attributes.
rest = specification[1:]
if rest and isinstance(rest[0], dict):
for at, val in sorted(rest[0].items()):
xml_parts.append(' %s="%s"' % (at, _XmlEscape(val, attr=True)))
rest = rest[1:]
if rest:
xml_parts.append('>')
all_strings = reduce(lambda x, y: x and isinstance(y, str), rest, True)
multi_line = not all_strings
if multi_line and new_line:
xml_parts.append(new_line)
for child_spec in rest:
# If it's a string, append a text node.
# Otherwise recurse over that child definition
if isinstance(child_spec, str):
xml_parts.append(_XmlEscape(child_spec))
else:
_ConstructContentList(xml_parts, child_spec, pretty, level + 1)
if multi_line and indentation:
xml_parts.append(indentation)
xml_parts.append('</%s>%s' % (name, new_line))
else:
xml_parts.append('/>%s' % new_line)
def WriteXmlIfChanged(content, path, encoding='utf-8', pretty=False,
win32=False):
""" Writes the XML content to disk, touching the file only if it has changed.
Args:
content: The structured content to be written.
path: Location of the file.
encoding: The encoding to report on the first line of the XML file.
pretty: True if we want pretty printing with indents and new lines.
"""
xml_string = XmlToString(content, encoding, pretty)
if win32 and os.linesep != '\r\n':
xml_string = xml_string.replace('\n', '\r\n')
default_encoding = locale.getdefaultlocale()[1]
if default_encoding and default_encoding.upper() != encoding.upper():
try:
xml_string = xml_string.decode(default_encoding).encode(encoding)
except AttributeError:
pass
# Get the old content
try:
f = open(path, 'r')
existing = f.read()
f.close()
except:
existing = None
# It has changed, write it
if existing != xml_string:
f = open(path, 'w')
f.write(xml_string)
f.close()
_xml_escape_map = {
'"': '"',
"'": ''',
'<': '<',
'>': '>',
'&': '&',
'\n': '
',
'\r': '
',
}
_xml_escape_re = re.compile(
"(%s)" % "|".join(map(re.escape, _xml_escape_map.keys())))
def _XmlEscape(value, attr=False):
""" Escape a string for inclusion in XML."""
def replace(match):
m = match.string[match.start() : match.end()]
# don't replace single quotes in attrs
if attr and m == "'":
return m
return _xml_escape_map[m]
return _xml_escape_re.sub(replace, value)
| bsd-3-clause |
hgl888/chromium-crosswalk-efl | tools/perf/page_sets/tough_scrolling_cases.py | 33 | 1471 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
class ToughScrollingCasesPage(page_module.Page):
def __init__(self, url, page_set):
super(ToughScrollingCasesPage, self).__init__(url=url, page_set=page_set)
def RunSmoothness(self, action_runner):
interaction = action_runner.BeginGestureInteraction(
'ScrollAction', is_smooth=True)
action_runner.ScrollPage()
interaction.End()
class ToughScrollingCasesPageSet(page_set_module.PageSet):
"""
Description: A collection of difficult scrolling tests
"""
def __init__(self):
super(ToughScrollingCasesPageSet, self).__init__()
urls_list = [
'file://tough_scrolling_cases/background_fixed.html',
'file://tough_scrolling_cases/cust_scrollbar.html',
'file://tough_scrolling_cases/div_scrolls.html',
'file://tough_scrolling_cases/fixed_nonstacking.html',
'file://tough_scrolling_cases/fixed_stacking.html',
'file://tough_scrolling_cases/iframe_scrolls.html',
'file://tough_scrolling_cases/simple.html',
'file://tough_scrolling_cases/wheel_body_prevdefault.html',
'file://tough_scrolling_cases/wheel_div_prevdefault.html'
]
for url in urls_list:
self.AddPage(ToughScrollingCasesPage(url, self))
| bsd-3-clause |
roderickm/textfsm | texttable.py | 1 | 32239 | #!/usr/bin/python2.6
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
"""A module to represent and manipulate tabular text data.
A table of rows, indexed on row number. Each row is a ordered dictionary of row
elements that maintains knowledge of the parent table and column headings.
Tables can be created from CSV input and in-turn supports a number of display
formats such as CSV and variable sized and justified rows.
"""
import copy
import textwrap
import terminal
class Error(Exception):
"""Base class for errors."""
class TableError(Error):
"""Error in TextTable."""
class Row(dict):
"""Represents a table row. We implement this as an ordered dictionary.
The order is the chronological order of data insertion. Methods are supplied
to make it behave like a regular dict() and list().
Attributes:
row: int, the row number in the container table. 0 is the header row.
table: A TextTable(), the associated container table.
"""
def __init__(self, *args, **kwargs):
super(Row, self).__init__(*args, **kwargs)
self._keys = list()
self._values = list()
self.row = None
self.table = None
self._color = None
def __getitem__(self, column):
"""Support for [] notation.
Args:
column: Tuple of column names, or a (str) column name, or positional
column number, 0-indexed.
Returns:
A list or string with column value(s).
Raises:
IndexError: The given column(s) were not found.
"""
if isinstance(column, (list, tuple)):
ret = []
for col in column:
ret.append(self[col])
return ret
# Perhaps we have a range like '1', ':-1' or '1:'.
try:
return self._values[column]
except (IndexError, TypeError):
pass
for i in xrange(len(self._keys)):
if self._keys[i] == column:
return self._values[i]
raise IndexError('No such column "%s" in row.' % column)
def __contains__(self, value):
return value in self._values
def __setitem__(self, column, value):
for i in xrange(len(self)):
if self._keys[i] == column:
self._values[i] = value
return
# No column found, add a new one.
self._keys.append(column)
self._values.append(value)
def __iter__(self):
return iter(self._values)
def __len__(self):
return len(self._keys)
def __str__(self):
ret = ''
for v in self._values:
ret += '%12s ' % v
ret += '\n'
return ret
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, str(self))
def get(self, column, default_value=None):
"""Get an item from the Row by column name.
Args:
column: Tuple of column names, or a (str) column name, or positional
column number, 0-indexed.
default_value: The value to use if the key is not found.
Returns:
A list or string with column value(s) or default_value if not found.
"""
if isinstance(column, (list, tuple)):
ret = []
for col in column:
ret.append(self.get(col, default_value))
return ret
# Perhaps we have a range like '1', ':-1' or '1:'.
try:
return self._values[column]
except (IndexError, TypeError):
pass
try:
return self[column]
except IndexError:
return default_value
def index(self, column): # pylint: disable=C6409
"""Fetches the column number (0 indexed).
Args:
column: A string, column to fetch the index of.
Returns:
An int, the row index number.
Raises:
ValueError: The specified column was not found.
"""
for i, key in enumerate(self._keys):
if key == column:
return i
raise ValueError('Column "%s" not found.' % column)
def iterkeys(self):
return iter(self._keys)
def items(self):
# TODO(harro): self.get(k) should work here but didn't ?
return [(k, self.__getitem__(k)) for k in self._keys]
def _GetValues(self):
"""Return the row's values."""
return self._values
def _GetHeader(self):
"""Return the row's header."""
return self._keys
def _SetHeader(self, values):
"""Set the row's header from a list."""
if self._values and len(values) != len(self._values):
raise ValueError('Header values not equal to existing data width.')
if not self._values:
for _ in xrange(len(values)):
self._values.append(None)
self._keys = list(values)
def _SetColour(self, value_list):
"""Sets row's colour attributes to a list of values in terminal.SGR."""
if value_list is None:
self._color = None
return
colors = []
for color in value_list:
if color in terminal.SGR:
colors.append(color)
elif color in terminal.FG_COLOR_WORDS:
colors += terminal.FG_COLOR_WORDS[color]
elif color in terminal.BG_COLOR_WORDS:
colors += terminal.BG_COLOR_WORDS[color]
else:
raise ValueError('Invalid colour specification.')
self._color = list(set(colors))
def _GetColour(self):
if self._color is None:
return None
return list(self._color)
def _SetValues(self, values):
"""Set values from supplied dictionary or list.
Args:
values: A Row, dict indexed by column name, or list.
Raises:
TypeError: Argument is not a list or dict, or list is not equal row
length or dictionary keys don't match.
"""
def _ToStr(value):
"""Convert individul list entries to string."""
if isinstance(value, (list, tuple)):
result = []
for val in value:
result.append(str(val))
return result
else:
return str(value)
# Row with identical header can be copied directly.
if isinstance(values, Row):
if self._keys != values.header:
raise TypeError('Attempt to append row with mismatched header.')
self._values = copy.deepcopy(values.values)
elif isinstance(values, dict):
for key in self._keys:
if key not in values:
raise TypeError('Dictionary key mismatch with row.')
for key in self._keys:
self[key] = _ToStr(values[key])
elif isinstance(values, list) or isinstance(values, tuple):
if len(values) != len(self._values):
raise TypeError('Supplied list length != row length')
for (index, value) in enumerate(values):
self._values[index] = _ToStr(value)
else:
raise TypeError('Supplied argument must be Row, dict or list, not %s',
type(values))
def Insert(self, key, value, row_index):
"""Inserts new values at a specified offset.
Args:
key: string for header value.
value: string for a data value.
row_index: Offset into row for data.
Raises:
IndexError: If the offset is out of bands.
"""
if row_index < 0:
row_index += len(self)
if not 0 <= row_index < len(self):
raise IndexError('Index "%s" is out of bounds.' % row_index)
new_row = Row()
for idx in self.header:
if self.index(idx) == row_index:
new_row[key] = value
new_row[idx] = self[idx]
self._keys = new_row.header
self._values = new_row.values
del new_row
color = property(_GetColour, _SetColour, doc='Colour spec of this row')
header = property(_GetHeader, _SetHeader, doc="List of row's headers.")
values = property(_GetValues, _SetValues, doc="List of row's values.")
class TextTable(object):
"""Class that provides data methods on a tabular format.
Data is stored as a list of Row() objects. The first row is always present as
the header row.
Attributes:
row_class: class, A class to use for the Row object.
separator: str, field separator when printing table.
"""
def __init__(self, row_class=Row):
"""Initialises a new table.
Args:
row_class: A class to use as the row object. This should be a
subclass of this module's Row() class.
"""
self.row_class = row_class
self.separator = ', '
self.Reset()
def Reset(self):
self._row_index = 1
self._table = [[]]
self._iterator = 0 # While loop row index
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, str(self))
def __str__(self):
"""Displays table with pretty formatting."""
return self.table
def __incr__(self, incr=1):
self._SetRowIndex(self._row_index +incr)
def __contains__(self, name):
"""Whether the given column header name exists."""
return name in self.header
def __getitem__(self, row):
"""Fetches the given row number."""
return self._table[row]
def __iter__(self):
"""Iterator that excludes the header row."""
return self.next()
def next(self):
# Maintain a counter so a row can know what index it is.
# Save the old value to support nested interations.
old_iter = self._iterator
try:
for r in self._table[1:]:
self._iterator = r.row
yield r
finally:
# Recover the original index after loop termination or exit with break.
self._iterator = old_iter
def __add__(self, other):
"""Merges two with identical columns."""
new_table = copy.copy(self)
for row in other:
new_table.Append(row)
return new_table
def __copy__(self):
"""Copy table instance."""
new_table = self.__class__()
new_table._table = [self.header]
for row in self[1:]:
new_table.Append(row)
return new_table
def Filter(self, function=None):
"""Construct Textable from the rows of which the function returns true.
Args:
function: A function applied to each row which returns a bool. If
function is None, all rows with empty column values are
removed.
Returns:
A new TextTable()
Raises:
TableError: When an invalid row entry is Append()'d
"""
flat = lambda x: x if isinstance(x, str) else ''.join([flat(y) for y in x])
if function is None:
function = lambda row: bool(flat(row.values))
new_table = self.__class__()
new_table._table = [self.header]
for row in self:
if function(row) is True:
new_table.Append(row)
return new_table
def Map(self, function):
"""Applies the function to every row in the table.
Args:
function: A function applied to each row.
Returns:
A new TextTable()
Raises:
TableError: When transform is not invalid row entry. The transform
must be compatible with Append().
"""
new_table = self.__class__()
new_table._table = [self.header]
for row in self:
filtered_row = function(row)
if filtered_row:
new_table.Append(filtered_row)
return new_table
# pylint: disable=C6409
# pylint: disable=W0622
def sort(self, cmp=None, key=None, reverse=False):
"""Sorts rows in the texttable.
Args:
cmp: func, non default sort algorithm to use.
key: func, applied to each element before sorting.
reverse: bool, reverse order of sort.
"""
def _DefaultKey(value):
"""Default key func is to create a list of all fields."""
result = []
for key in self.header:
# Try sorting as numerical value if possible.
try:
result.append(float(value[key]))
except ValueError:
result.append(value[key])
return result
key = key or _DefaultKey
# Exclude header by copying table.
new_table = self._table[1:]
new_table.sort(cmp, key, reverse)
# Regenerate the table with original header
self._table = [self.header]
self._table.extend(new_table)
# Re-write the 'row' attribute of each row
for index, row in enumerate(self._table):
row.row = index
# pylint: enable=W0622
def extend(self, table, keys=None):
"""Extends all rows in the texttable.
The rows are extended with the new columns from the table.
Args:
table: A texttable, the table to extend this table by.
keys: A set, the set of columns to use as the key. If None, the
row index is used.
Raises:
IndexError: If key is not a valid column name.
"""
if keys:
for k in keys:
if k not in self._Header():
raise IndexError("Unknown key: '%s'", k)
extend_with = []
for column in table.header:
if column not in self.header:
extend_with.append(column)
if not extend_with:
return
for column in extend_with:
self.AddColumn(column)
if not keys:
for row1, row2 in zip(self, table):
for column in extend_with:
row1[column] = row2[column]
return
for row1 in self:
for row2 in table:
for k in keys:
if row1[k] != row2[k]:
break
else:
for column in extend_with:
row1[column] = row2[column]
break
# pylint: enable=C6409
def Remove(self, row):
"""Removes a row from the table.
Args:
row: int, the row number to delete. Must be >= 1, as the header
cannot be removed.
Raises:
TableError: Attempt to remove nonexistent or header row.
"""
if row == 0 or row > self.size:
raise TableError('Attempt to remove header row')
new_table = []
# pylint: disable=E1103
for t_row in self._table:
if t_row.row != row:
new_table.append(t_row)
if t_row.row > row:
t_row.row -= 1
self._table = new_table
def _Header(self):
"""Returns the header row."""
return self._table[0]
def _GetRow(self, columns=None):
"""Returns the current row as a tuple."""
row = self._table[self._row_index]
if columns:
result = []
for col in columns:
if not col in self.header:
raise TableError('Column header %s not known in table.' % col)
result.append(row[self.header.index(col)])
row = result
return row
def _SetRow(self, new_values, row=0):
"""Sets the current row to new list.
Args:
new_values: List|dict of new values to insert into row.
row: int, Row to insert values into.
Raises:
TableError: If number of new values is not equal to row size.
"""
if not row:
row = self._row_index
if row > self.size:
raise TableError('Entry %s beyond table size %s.' % (row, self.size))
self._table[row].values = new_values
def _SetHeader(self, new_values):
"""Sets header of table to the given tuple.
Args:
new_values: Tuple of new header values.
"""
row = self.row_class()
row.row = 0
for v in new_values:
row[v] = v
self._table[0] = row
def _SetRowIndex(self, row):
if not row or row > self.size:
raise TableError('Entry %s beyond table size %s.' % (row, self.size))
self._row_index = row
def _GetRowIndex(self):
return self._row_index
def _GetSize(self):
"""Returns number of rows in table."""
if not self._table:
return 0
return len(self._table) - 1
def _GetTable(self):
"""Returns table, with column headers and separators.
Returns:
The whole table including headers as a string. Each row is
joined by a newline and each entry by self.separator.
"""
result = []
# Avoid the global lookup cost on each iteration.
lstr = str
for row in self._table:
result.append(
'%s\n' %
self.separator.join(lstr(v) for v in row))
return ''.join(result)
def _SetTable(self, table):
"""Sets table, with column headers and separators."""
if not isinstance(table, TextTable):
raise TypeError('Not an instance of TextTable.')
self.Reset()
self._table = copy.deepcopy(table._table) # pylint: disable=W0212
# Point parent table of each row back ourselves.
for row in self:
row.table = self
def _SmallestColSize(self, text):
"""Finds the largest indivisible word of a string.
...and thus the smallest possible column width that can contain that
word unsplit over rows.
Args:
text: A string of text potentially consisting of words.
Returns:
Integer size of the largest single word in the text.
"""
if not text:
return 0
stripped = terminal.StripAnsiText(text)
return max(len(word) for word in stripped.split())
def _TextJustify(self, text, col_size):
"""Formats text within column with white space padding.
A single space is prefixed, and a number of spaces are added as a
suffix such that the length of the resultant string equals the col_size.
If the length of the text exceeds the column width available then it
is split into words and returned as a list of string, each string
contains one or more words padded to the column size.
Args:
text: String of text to format.
col_size: integer size of column to pad out the text to.
Returns:
List of strings col_size in length.
Raises:
TableError: If col_size is too small to fit the words in the text.
"""
result = []
if '\n' in text:
for paragraph in text.split('\n'):
result.extend(self._TextJustify(paragraph, col_size))
return result
wrapper = textwrap.TextWrapper(width=col_size-2, break_long_words=False,
expand_tabs=False)
try:
text_list = wrapper.wrap(text)
except ValueError:
raise TableError('Field too small (minimum width: 3)')
if not text_list:
return [' '*col_size]
for current_line in text_list:
stripped_len = len(terminal.StripAnsiText(current_line))
ansi_color_adds = len(current_line) - stripped_len
# +2 for white space on either side.
if stripped_len + 2 > col_size:
raise TableError('String contains words that do not fit in column.')
result.append(' %-*s' % (col_size - 1 + ansi_color_adds, current_line))
return result
def FormattedTable(self, width=80, force_display=False, ml_delimiter=True,
color=True, display_header=True, columns=None):
"""Returns whole table, with whitespace padding and row delimiters.
Args:
width: An int, the max width we want the table to fit in.
force_display: A bool, if set to True will display table when the table
can't be made to fit to the width.
ml_delimiter: A bool, if set to False will not display the multi-line
delimiter.
color: A bool. If true, display any colours in row.colour.
display_header: A bool. If true, display header.
columns: A list of str, show only columns with these names.
Returns:
A string. The tabled output.
Raises:
TableError: Width too narrow to display table.
"""
def _FilteredCols():
"""Returns list of column names to display."""
if not columns:
return self._Header().values
return [col for col in self._Header().values if col in columns]
# Largest is the biggest data entry in a column.
largest = {}
# Smallest is the same as above but with linewrap i.e. largest unbroken
# word in the data stream.
smallest = {}
# largest == smallest for a column with a single word of data.
# Initialise largest and smallest for all columns.
for key in _FilteredCols():
largest[key] = 0
smallest[key] = 0
# Find the largest and smallest values.
# Include Title line in equation.
# pylint: disable=E1103
for row in self._table:
for key, value in row.items():
if key not in _FilteredCols():
continue
# Convert lists into a string.
if isinstance(value, list):
value = ', '.join(value)
value = terminal.StripAnsiText(value)
largest[key] = max(len(value), largest[key])
smallest[key] = max(self._SmallestColSize(value), smallest[key])
# pylint: enable-msg=E1103
min_total_width = 0
multi_word = []
# Bump up the size of each column to include minimum pad.
# Find all columns that can be wrapped (multi-line).
# And the minimum width needed to display all columns (even if wrapped).
for key in _FilteredCols():
# Each column is bracketed by a space on both sides.
# So increase size required accordingly.
largest[key] += 2
smallest[key] += 2
min_total_width += smallest[key]
# If column contains data that 'could' be split over multiple lines.
if largest[key] != smallest[key]:
multi_word.append(key)
# Check if we have enough space to display the table.
if min_total_width > width and not force_display:
raise TableError('Width too narrow to display table.')
# We have some columns that may need wrapping over several lines.
if multi_word:
# Find how much space is left over for the wrapped columns to use.
# Also find how much space we would need if they were not wrapped.
# These are 'spare_width' and 'desired_width' respectively.
desired_width = 0
spare_width = width - min_total_width
for key in multi_word:
spare_width += smallest[key]
desired_width += largest[key]
# Scale up the space we give each wrapped column.
# Proportional to its size relative to 'desired_width' for all columns.
# Rinse and repeat if we changed the wrap list in this iteration.
# Once done we will have a list of columns that definitely need wrapping.
done = False
while not done:
done = True
for key in multi_word:
# If we scale past the desired width for this particular column,
# then give it its desired width and remove it from the wrapped list.
if (largest[key] <=
round((largest[key] / float(desired_width)) * spare_width)):
smallest[key] = largest[key]
multi_word.remove(key)
spare_width -= smallest[key]
desired_width -= largest[key]
done = False
# If we scale below the minimum width for this particular column,
# then leave it at its minimum and remove it from the wrapped list.
elif (smallest[key] >=
round((largest[key] / float(desired_width)) * spare_width)):
multi_word.remove(key)
spare_width -= smallest[key]
desired_width -= largest[key]
done = False
# Repeat the scaling algorithm with the final wrap list.
# This time we assign the extra column space by increasing 'smallest'.
for key in multi_word:
smallest[key] = int(round((largest[key] / float(desired_width))
* spare_width))
total_width = 0
row_count = 0
result_dict = {}
# Format the header lines and add to result_dict.
# Find what the total width will be and use this for the ruled lines.
# Find how many rows are needed for the most wrapped line (row_count).
for key in _FilteredCols():
result_dict[key] = self._TextJustify(key, smallest[key])
if len(result_dict[key]) > row_count:
row_count = len(result_dict[key])
total_width += smallest[key]
# Store header in header_list, working down the wrapped rows.
header_list = []
for row_idx in xrange(row_count):
for key in _FilteredCols():
try:
header_list.append(result_dict[key][row_idx])
except IndexError:
# If no value than use whitespace of equal size.
header_list.append(' '*smallest[key])
header_list.append('\n')
# Format and store the body lines
result_dict = {}
body_list = []
# We separate multi line rows with a single line delimiter.
prev_muli_line = False
# Unless it is the first line in which there is already the header line.
first_line = True
for row in self:
row_count = 0
for key, value in row.items():
if key not in _FilteredCols():
continue
# Convert field contents to a string.
if isinstance(value, list):
value = ', '.join(value)
# Store results in result_dict and take note of wrapped line count.
result_dict[key] = self._TextJustify(value, smallest[key])
if len(result_dict[key]) > row_count:
row_count = len(result_dict[key])
if row_count > 1:
prev_muli_line = True
# If current or prior line was multi-line then include delimiter.
if not first_line and prev_muli_line and ml_delimiter:
body_list.append('-'*total_width + '\n')
if row_count == 1:
# Our current line was not wrapped, so clear flag.
prev_muli_line = False
row_list = []
for row_idx in xrange(row_count):
for key in _FilteredCols():
try:
row_list.append(result_dict[key][row_idx])
except IndexError:
# If no value than use whitespace of equal size.
row_list.append(' '*smallest[key])
row_list.append('\n')
if color and row.color is not None:
body_list.append(
terminal.AnsiText(''.join(row_list)[:-1],
command_list=row.color))
body_list.append('\n')
else:
body_list.append(''.join(row_list))
first_line = False
header = ''.join(header_list) + '='*total_width
if color and self._Header().color is not None:
header = terminal.AnsiText(header, command_list=self._Header().color)
# Add double line delimiter between header and main body.
if display_header:
return '%s\n%s' % (header, ''.join(body_list))
return '%s' % ''.join(body_list)
def LabelValueTable(self, label_list=None):
"""Returns whole table as rows of name/value pairs.
One (or more) column entries are used for the row prefix label.
The remaining columns are each displayed as a row entry with the
prefix labels appended.
Use the first column as the label if label_list is None.
Args:
label_list: A list of prefix labels to use.
Returns:
Label/Value formatted table.
Raises:
TableError: If specified label is not a column header of the table.
"""
label_list = label_list or self._Header()[0]
# Ensure all labels are valid.
for label in label_list:
if label not in self._Header():
raise TableError('Invalid label prefix: %s.' % label)
sorted_list = []
for header in self._Header():
if header in label_list:
sorted_list.append(header)
label_str = '# LABEL %s\n' % '.'.join(sorted_list)
body = []
for row in self:
# Some of the row values are pulled into the label, stored in label_prefix.
label_prefix = []
value_list = []
for key, value in row.items():
if key in sorted_list:
# Set prefix.
label_prefix.append(value)
else:
value_list.append('%s %s' % (key, value))
body.append(''.join(
['%s.%s\n' % ('.'.join(label_prefix), v) for v in value_list]))
return '%s%s' % (label_str, ''.join(body))
table = property(_GetTable, _SetTable, doc='Whole table')
row = property(_GetRow, _SetRow, doc='Current row')
header = property(_Header, _SetHeader, doc='List of header entries.')
row_index = property(_GetRowIndex, _SetRowIndex, doc='Current row.')
size = property(_GetSize, doc='Number of rows in table.')
def RowWith(self, column, value):
"""Retrieves the first non header row with the column of the given value.
Args:
column: str, the name of the column to check.
value: str, The value of the column to check.
Returns:
A Row() of the first row found, None otherwise.
Raises:
IndexError: The specified column does not exist.
"""
for row in self._table[1:]:
if row[column] == value:
return row
return None
def AddColumn(self, column, default='', col_index=-1):
"""Appends a new column to the table.
Args:
column: A string, name of the column to add.
default: Default value for entries. Defaults to ''.
col_index: Integer index for where to insert new column.
Raises:
TableError: Column name already exists.
"""
if column in self.table:
raise TableError('Column %r already in table.' % column)
if col_index == -1:
self._table[0][column] = column
for i in xrange(1, len(self._table)):
self._table[i][column] = default
else:
self._table[0].Insert(column, column, col_index)
for i in xrange(1, len(self._table)):
self._table[i].Insert(column, default, col_index)
def Append(self, new_values):
"""Adds a new row (list) to the table.
Args:
new_values: Tuple, dict, or Row() of new values to append as a row.
Raises:
TableError: Supplied tuple not equal to table width.
"""
newrow = self.NewRow()
newrow.values = new_values
self._table.append(newrow)
def NewRow(self, value=''):
"""Fetches a new, empty row, with headers populated.
Args:
value: Initial value to set each row entry to.
Returns:
A Row() object.
"""
newrow = self.row_class()
newrow.row = self.size + 1
newrow.table = self
headers = self._Header()
for header in headers:
newrow[header] = value
return newrow
def CsvToTable(self, buf, header=True, separator=','):
"""Parses buffer into tabular format.
Strips off comments (preceded by '#').
Optionally parses and indexes by first line (header).
Args:
buf: String file buffer containing CSV data.
header: Is the first line of buffer a header.
separator: String that CSV is separated by.
Returns:
int, the size of the table created.
Raises:
TableError: A parsing error occurred.
"""
self.Reset()
header_row = self.row_class()
if header:
line = buf.readline()
header_str = ''
while not header_str:
# Remove comments.
header_str = line.split('#')[0].strip()
if not header_str:
line = buf.readline()
header_list = header_str.split(separator)
header_length = len(header_list)
for entry in header_list:
entry = entry.strip()
if entry in header_row:
raise TableError('Duplicate header entry %r.' % entry)
header_row[entry] = entry
header_row.row = 0
self._table[0] = header_row
# xreadlines would be better but not supported by StringIO for testing.
for line in buf:
# Support commented lines, provide '#' is first character of line.
if line.startswith('#'):
continue
lst = line.split(separator)
lst = [l.strip() for l in lst]
if header and len(lst) != header_length:
# Silently drop illegal line entries
continue
if not header:
header_row = self.row_class()
header_length = len(lst)
header_row.values = dict(zip(xrange(header_length),
xrange(header_length)))
self._table[0] = header_row
header = True
continue
new_row = self.NewRow()
new_row.values = lst
header_row.row = self.size + 1
self._table.append(new_row)
return self.size
def index(self, name=None): # pylint: disable=C6409
"""Returns index number of supplied column name.
Args:
name: string of column name.
Raises:
TableError: If name not found.
Returns:
Index of the specified header entry.
"""
try:
return self.header.index(name)
except ValueError:
raise TableError('Unknown index name %s.' % name)
| apache-2.0 |
noroutine/ansible | lib/ansible/modules/cloud/azure/azure_rm_securitygroup.py | 2 | 27566 | #!/usr/bin/python
#
# Copyright (c) 2016 Matt Davis, <[email protected]>
# Chris Houseknecht, <[email protected]>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = '''
---
module: azure_rm_securitygroup
version_added: "2.1"
short_description: Manage Azure network security groups.
description:
- Create, update or delete a network security group. A security group contains Access Control List (ACL) rules
that allow or deny network traffic to subnets or individual network interfaces. A security group is created
with a set of default security rules and an empty set of security rules. Shape traffic flow by adding
rules to the empty set of security rules.
options:
default_rules:
description:
- The set of default rules automatically added to a security group at creation. In general default
rules will not be modified. Modify rules to shape the flow of traffic to or from a subnet or NIC. See
rules below for the makeup of a rule dict.
required: false
default: null
location:
description:
- Valid azure location. Defaults to location of the resource group.
default: resource_group location
required: false
name:
description:
- Name of the security group to operate on.
required: false
default: null
purge_default_rules:
description:
- Remove any existing rules not matching those defined in the default_rules parameter.
default: false
required: false
purge_rules:
description:
- Remove any existing rules not matching those defined in the rules parameters.
default: false
required: false
resource_group:
description:
- Name of the resource group the security group belongs to.
required: true
rules:
description:
- Set of rules shaping traffic flow to or from a subnet or NIC. Each rule is a dictionary.
required: false
default: null
suboptions:
name:
description:
- Unique name for the rule.
required: true
description:
description:
- Short description of the rule's purpose.
protocol:
description: Accepted traffic protocol.
choices:
- Udp
- Tcp
- "*"
default: "*"
source_port_range:
description:
- Port or range of ports from which traffic originates.
default: "*"
destination_port_range:
description:
- Port or range of ports to which traffic is headed.
default: "*"
source_address_prefix:
description:
- The CIDR or source IP range.
- Asterix C(*) can also be used to match all source IPs.
- Default tags such as C(VirtualNetwork), C(AzureLoadBalancer) and C(Internet) can also be used.
- If this is an ingress rule, specifies where network traffic originates from.
default: "*"
destination_address_prefix:
description:
- The destination address prefix.
- CIDR or destination IP range.
- Asterix C(*) can also be used to match all source IPs.
- Default tags such as C(VirtualNetwork), C(AzureLoadBalancer) and C(Internet) can also be used.
default: "*"
access:
description:
- Whether or not to allow the traffic flow.
choices:
- Allow
- Deny
default: Allow
priority:
description:
- Order in which to apply the rule. Must a unique integer between 100 and 4096 inclusive.
required: true
direction:
description:
- Indicates the direction of the traffic flow.
choices:
- Inbound
- Outbound
default: Inbound
state:
description:
- Assert the state of the security group. Set to 'present' to create or update a security group. Set to
'absent' to remove a security group.
default: present
required: false
choices:
- absent
- present
extends_documentation_fragment:
- azure
- azure_tags
author:
- "Chris Houseknecht (@chouseknecht)"
- "Matt Davis (@nitzmahone)"
'''
EXAMPLES = '''
# Create a security group
- azure_rm_securitygroup:
resource_group: mygroup
name: mysecgroup
purge_rules: yes
rules:
- name: DenySSH
protocol: TCP
destination_port_range: 22
access: Deny
priority: 100
direction: Inbound
- name: 'AllowSSH'
protocol: TCP
source_address_prefix: '174.109.158.0/24'
destination_port_range: 22
access: Allow
priority: 101
direction: Inbound
# Update rules on existing security group
- azure_rm_securitygroup:
resource_group: mygroup
name: mysecgroup
rules:
- name: DenySSH
protocol: TCP
destination_port_range: 22-23
access: Deny
priority: 100
direction: Inbound
- name: AllowSSHFromHome
protocol: TCP
source_address_prefix: '174.109.158.0/24'
destination_port_range: 22-23
access: Allow
priority: 102
direction: Inbound
tags:
testing: testing
delete: on-exit
# Delete security group
- azure_rm_securitygroup:
resource_group: mygroup
name: mysecgroup
state: absent
'''
RETURN = '''
state:
description: Current state of the security group.
returned: always
type: dict
sample: {
"default_rules": [
{
"access": "Allow",
"description": "Allow inbound traffic from all VMs in VNET",
"destination_address_prefix": "VirtualNetwork",
"destination_port_range": "*",
"direction": "Inbound",
"etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"',
"id": "/subscriptions/3f7e29ba-24e0-42f6-8d9c-5149a14bda37/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/defaultSecurityRules/AllowVnetInBound",
"name": "AllowVnetInBound",
"priority": 65000,
"protocol": "*",
"provisioning_state": "Succeeded",
"source_address_prefix": "VirtualNetwork",
"source_port_range": "*"
},
{
"access": "Allow",
"description": "Allow inbound traffic from azure load balancer",
"destination_address_prefix": "*",
"destination_port_range": "*",
"direction": "Inbound",
"etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"',
"id": "/subscriptions/3f7e29ba-24e0-42f6-8d9c-5149a14bda37/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/defaultSecurityRules/AllowAzureLoadBalancerInBound",
"name": "AllowAzureLoadBalancerInBound",
"priority": 65001,
"protocol": "*",
"provisioning_state": "Succeeded",
"source_address_prefix": "AzureLoadBalancer",
"source_port_range": "*"
},
{
"access": "Deny",
"description": "Deny all inbound traffic",
"destination_address_prefix": "*",
"destination_port_range": "*",
"direction": "Inbound",
"etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"',
"id": "/subscriptions/3f7e29ba-24e0-42f6-8d9c-5149a14bda37/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/defaultSecurityRules/DenyAllInBound",
"name": "DenyAllInBound",
"priority": 65500,
"protocol": "*",
"provisioning_state": "Succeeded",
"source_address_prefix": "*",
"source_port_range": "*"
},
{
"access": "Allow",
"description": "Allow outbound traffic from all VMs to all VMs in VNET",
"destination_address_prefix": "VirtualNetwork",
"destination_port_range": "*",
"direction": "Outbound",
"etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"',
"id": "/subscriptions/3f7e29ba-24e0-42f6-8d9c-5149a14bda37/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/defaultSecurityRules/AllowVnetOutBound",
"name": "AllowVnetOutBound",
"priority": 65000,
"protocol": "*",
"provisioning_state": "Succeeded",
"source_address_prefix": "VirtualNetwork",
"source_port_range": "*"
},
{
"access": "Allow",
"description": "Allow outbound traffic from all VMs to Internet",
"destination_address_prefix": "Internet",
"destination_port_range": "*",
"direction": "Outbound",
"etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"',
"id": "/subscriptions/3f7e29ba-24e0-42f6-8d9c-5149a14bda37/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/defaultSecurityRules/AllowInternetOutBound",
"name": "AllowInternetOutBound",
"priority": 65001,
"protocol": "*",
"provisioning_state": "Succeeded",
"source_address_prefix": "*",
"source_port_range": "*"
},
{
"access": "Deny",
"description": "Deny all outbound traffic",
"destination_address_prefix": "*",
"destination_port_range": "*",
"direction": "Outbound",
"etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"',
"id": "/subscriptions/3f7e29ba-24e0-42f6-8d9c-5149a14bda37/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/defaultSecurityRules/DenyAllOutBound",
"name": "DenyAllOutBound",
"priority": 65500,
"protocol": "*",
"provisioning_state": "Succeeded",
"source_address_prefix": "*",
"source_port_range": "*"
}
],
"id": "/subscriptions/3f7e29ba-24e0-42f6-8d9c-5149a14bda37/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/mysecgroup",
"location": "westus",
"name": "mysecgroup",
"network_interfaces": [],
"rules": [
{
"access": "Deny",
"description": null,
"destination_address_prefix": "*",
"destination_port_range": "22",
"direction": "Inbound",
"etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"',
"id": "/subscriptions/3f7e29ba-24e0-42f6-8d9c-5149a14bda37/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/securityRules/DenySSH",
"name": "DenySSH",
"priority": 100,
"protocol": "Tcp",
"provisioning_state": "Succeeded",
"source_address_prefix": "*",
"source_port_range": "*"
},
{
"access": "Allow",
"description": null,
"destination_address_prefix": "*",
"destination_port_range": "22",
"direction": "Inbound",
"etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"',
"id": "/subscriptions/3f7e29ba-24e0-42f6-8d9c-5149a14bda37/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/securityRules/AllowSSH",
"name": "AllowSSH",
"priority": 101,
"protocol": "Tcp",
"provisioning_state": "Succeeded",
"source_address_prefix": "174.109.158.0/24",
"source_port_range": "*"
}
],
"subnets": [],
"tags": {
"delete": "on-exit",
"foo": "bar",
"testing": "testing"
},
"type": "Microsoft.Network/networkSecurityGroups"
}
''' # NOQA
try:
from msrestazure.azure_exceptions import CloudError
from azure.mgmt.network.models import NetworkSecurityGroup, SecurityRule
from azure.mgmt.network.models import (
SecurityRuleAccess,
SecurityRuleDirection,
SecurityRuleProtocol
)
except ImportError:
# This is handled in azure_rm_common
pass
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
from ansible.module_utils.six import integer_types
def validate_rule(rule, rule_type=None):
'''
Apply defaults to a rule dictionary and check that all values are valid.
:param rule: rule dict
:param rule_type: Set to 'default' if the rule is part of the default set of rules.
:return: None
'''
if not rule.get('name'):
raise Exception("Rule name value is required.")
priority = rule.get('priority', None)
if not priority:
raise Exception("Rule priority is required.")
if not isinstance(priority, integer_types):
try:
priority = int(priority)
rule['priority'] = priority
except:
raise Exception("Rule priority attribute must be an integer.")
if rule_type != 'default' and (priority < 100 or priority > 4096):
raise Exception("Rule priority must be between 100 and 4096")
if not rule.get('access'):
rule['access'] = 'Allow'
access_names = [member.value for member in SecurityRuleAccess]
if rule['access'] not in access_names:
raise Exception("Rule access must be one of [{0}]".format(', '.join(access_names)))
if not rule.get('destination_address_prefix'):
rule['destination_address_prefix'] = '*'
if not rule.get('source_address_prefix'):
rule['source_address_prefix'] = '*'
if not rule.get('protocol'):
rule['protocol'] = '*'
protocol_names = [member.value for member in SecurityRuleProtocol]
if rule['protocol'] not in protocol_names:
raise Exception("Rule protocol must be one of [{0}]".format(', '.join(protocol_names)))
if not rule.get('direction'):
rule['direction'] = 'Inbound'
direction_names = [member.value for member in SecurityRuleDirection]
if rule['direction'] not in direction_names:
raise Exception("Rule direction must be one of [{0}]".format(', '.join(direction_names)))
if not rule.get('source_port_range'):
rule['source_port_range'] = '*'
if not rule.get('destination_port_range'):
rule['destination_port_range'] = '*'
def compare_rules(r, rule):
matched = False
changed = False
if r['name'] == rule['name']:
matched = True
if rule.get('description', None) != r['description']:
changed = True
r['description'] = rule['description']
if rule['protocol'] != r['protocol']:
changed = True
r['protocol'] = rule['protocol']
if str(rule['source_port_range']) != str(r['source_port_range']):
changed = True
r['source_port_range'] = str(rule['source_port_range'])
if str(rule['destination_port_range']) != str(r['destination_port_range']):
changed = True
r['destination_port_range'] = str(rule['destination_port_range'])
if rule['access'] != r['access']:
changed = True
r['access'] = rule['access']
if rule['priority'] != r['priority']:
changed = True
r['priority'] = rule['priority']
if rule['direction'] != r['direction']:
changed = True
r['direction'] = rule['direction']
if rule['source_address_prefix'] != str(r['source_address_prefix']):
changed = True
r['source_address_prefix'] = rule['source_address_prefix']
return matched, changed
def create_rule_instance(rule):
'''
Create an instance of SecurityRule from a dict.
:param rule: dict
:return: SecurityRule
'''
return SecurityRule(
protocol=rule['protocol'],
source_address_prefix=rule['source_address_prefix'],
destination_address_prefix=rule['destination_address_prefix'],
access=rule['access'],
direction=rule['direction'],
id=rule.get('id', None),
description=rule.get('description', None),
source_port_range=rule.get('source_port_range', None),
destination_port_range=rule.get('destination_port_range', None),
priority=rule.get('priority', None),
provisioning_state=rule.get('provisioning_state', None),
name=rule.get('name', None),
etag=rule.get('etag', None)
)
def create_rule_dict_from_obj(rule):
'''
Create a dict from an instance of a SecurityRule.
:param rule: SecurityRule
:return: dict
'''
return dict(
id=rule.id,
name=rule.name,
description=rule.description,
protocol=rule.protocol,
source_port_range=rule.source_port_range,
destination_port_range=rule.destination_port_range,
source_address_prefix=rule.source_address_prefix,
destination_address_prefix=rule.destination_address_prefix,
access=rule.access,
priority=rule.priority,
direction=rule.direction,
provisioning_state=rule.provisioning_state,
etag=rule.etag
)
def create_network_security_group_dict(nsg):
results = dict(
id=nsg.id,
name=nsg.name,
type=nsg.type,
location=nsg.location,
tags=nsg.tags,
)
results['rules'] = []
if nsg.security_rules:
for rule in nsg.security_rules:
results['rules'].append(create_rule_dict_from_obj(rule))
results['default_rules'] = []
if nsg.default_security_rules:
for rule in nsg.default_security_rules:
results['default_rules'].append(create_rule_dict_from_obj(rule))
results['network_interfaces'] = []
if nsg.network_interfaces:
for interface in nsg.network_interfaces:
results['network_interfaces'].append(interface.id)
results['subnets'] = []
if nsg.subnets:
for subnet in nsg.subnets:
results['subnets'].append(subnet.id)
return results
class AzureRMSecurityGroup(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
default_rules=dict(type='list'),
location=dict(type='str'),
name=dict(type='str', required=True),
purge_default_rules=dict(type='bool', default=False),
purge_rules=dict(type='bool', default=False),
resource_group=dict(required=True, type='str'),
rules=dict(type='list'),
state=dict(type='str', default='present', choices=['present', 'absent']),
)
self.default_rules = None
self.location = None
self.name = None
self.purge_default_rules = None
self.purge_rules = None
self.resource_group = None
self.rules = None
self.state = None
self.tags = None
self.results = dict(
changed=False,
state=dict()
)
super(AzureRMSecurityGroup, self).__init__(self.module_arg_spec,
supports_check_mode=True)
def exec_module(self, **kwargs):
for key in list(self.module_arg_spec.keys()) + ['tags']:
setattr(self, key, kwargs[key])
changed = False
results = dict()
resource_group = self.get_resource_group(self.resource_group)
if not self.location:
# Set default location
self.location = resource_group.location
if self.rules:
for rule in self.rules:
try:
validate_rule(rule)
except Exception as exc:
self.fail("Error validating rule {0} - {1}".format(rule, str(exc)))
if self.default_rules:
for rule in self.default_rules:
try:
validate_rule(rule, 'default')
except Exception as exc:
self.fail("Error validating default rule {0} - {1}".format(rule, str(exc)))
try:
nsg = self.network_client.network_security_groups.get(self.resource_group, self.name)
results = create_network_security_group_dict(nsg)
self.log("Found security group:")
self.log(results, pretty_print=True)
self.check_provisioning_state(nsg, self.state)
if self.state == 'present':
pass
elif self.state == 'absent':
self.log("CHANGED: security group found but state is 'absent'")
changed = True
except CloudError:
if self.state == 'present':
self.log("CHANGED: security group not found and state is 'present'")
changed = True
if self.state == 'present' and not changed:
# update the security group
self.log("Update security group {0}".format(self.name))
if self.rules:
for rule in self.rules:
rule_matched = False
for r in results['rules']:
match, changed = compare_rules(r, rule)
if changed:
changed = True
if match:
rule_matched = True
if not rule_matched:
changed = True
results['rules'].append(rule)
if self.purge_rules:
new_rules = []
for rule in results['rules']:
for r in self.rules:
if rule['name'] == r['name']:
new_rules.append(rule)
results['rules'] = new_rules
if self.default_rules:
for rule in self.default_rules:
rule_matched = False
for r in results['default_rules']:
match, changed = compare_rules(r, rule)
if changed:
changed = True
if match:
rule_matched = True
if not rule_matched:
changed = True
results['default_rules'].append(rule)
if self.purge_default_rules:
new_default_rules = []
for rule in results['default_rules']:
for r in self.default_rules:
if rule['name'] == r['name']:
new_default_rules.append(rule)
results['default_rules'] = new_default_rules
update_tags, results['tags'] = self.update_tags(results['tags'])
if update_tags:
changed = True
self.results['changed'] = changed
self.results['state'] = results
if not self.check_mode:
self.results['state'] = self.create_or_update(results)
elif self.state == 'present' and changed:
# create the security group
self.log("Create security group {0}".format(self.name))
if not self.location:
self.fail("Parameter error: location required when creating a security group.")
results['name'] = self.name
results['location'] = self.location
results['rules'] = []
results['default_rules'] = []
results['tags'] = {}
if self.rules:
results['rules'] = self.rules
if self.default_rules:
results['default_rules'] = self.default_rules
if self.tags:
results['tags'] = self.tags
self.results['changed'] = changed
self.results['state'] = results
if not self.check_mode:
self.results['state'] = self.create_or_update(results)
elif self.state == 'absent' and changed:
self.log("Delete security group {0}".format(self.name))
self.results['changed'] = changed
self.results['state'] = dict()
if not self.check_mode:
self.delete()
# the delete does not actually return anything. if no exception, then we'll assume
# it worked.
self.results['state']['status'] = 'Deleted'
return self.results
def create_or_update(self, results):
parameters = NetworkSecurityGroup()
if results.get('rules'):
parameters.security_rules = []
for rule in results.get('rules'):
parameters.security_rules.append(create_rule_instance(rule))
if results.get('default_rules'):
parameters.default_security_rules = []
for rule in results.get('default_rules'):
parameters.default_security_rules.append(create_rule_instance(rule))
parameters.tags = results.get('tags')
parameters.location = results.get('location')
try:
poller = self.network_client.network_security_groups.create_or_update(self.resource_group,
self.name,
parameters)
result = self.get_poller_result(poller)
except CloudError as exc:
self.fail("Error creating/updating security group {0} - {1}".format(self.name, str(exc)))
return create_network_security_group_dict(result)
def delete(self):
try:
poller = self.network_client.network_security_groups.delete(self.resource_group, self.name)
result = self.get_poller_result(poller)
except CloudError as exc:
raise Exception("Error deleting security group {0} - {1}".format(self.name, str(exc)))
return result
def main():
AzureRMSecurityGroup()
if __name__ == '__main__':
main()
| gpl-3.0 |
caisq/tensorflow | tensorflow/python/platform/app.py | 23 | 3660 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generic entry point script."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import errno as _errno
import sys as _sys
from tensorflow.python.platform import flags
from tensorflow.python.util.tf_export import tf_export
def _usage(shorthelp):
"""Writes __main__'s docstring to stdout with some help text.
Args:
shorthelp: bool, if True, prints only flags from the main module,
rather than all flags.
"""
doc = _sys.modules['__main__'].__doc__
if not doc:
doc = '\nUSAGE: %s [flags]\n' % _sys.argv[0]
doc = flags.text_wrap(doc, indent=' ', firstline_indent='')
else:
# Replace all '%s' with sys.argv[0], and all '%%' with '%'.
num_specifiers = doc.count('%') - 2 * doc.count('%%')
try:
doc %= (_sys.argv[0],) * num_specifiers
except (OverflowError, TypeError, ValueError):
# Just display the docstring as-is.
pass
if shorthelp:
flag_str = flags.FLAGS.main_module_help()
else:
flag_str = str(flags.FLAGS)
try:
_sys.stdout.write(doc)
if flag_str:
_sys.stdout.write('\nflags:\n')
_sys.stdout.write(flag_str)
_sys.stdout.write('\n')
except IOError as e:
# We avoid printing a huge backtrace if we get EPIPE, because
# "foo.par --help | less" is a frequent use case.
if e.errno != _errno.EPIPE:
raise
class _HelpFlag(flags.BooleanFlag):
"""Special boolean flag that displays usage and raises SystemExit."""
NAME = 'help'
SHORT_NAME = 'h'
def __init__(self):
super(_HelpFlag, self).__init__(
self.NAME, False, 'show this help', short_name=self.SHORT_NAME)
def parse(self, arg):
if arg:
_usage(shorthelp=True)
print()
print('Try --helpfull to get a list of all flags.')
_sys.exit(1)
class _HelpshortFlag(_HelpFlag):
"""--helpshort is an alias for --help."""
NAME = 'helpshort'
SHORT_NAME = None
class _HelpfullFlag(flags.BooleanFlag):
"""Display help for flags in main module and all dependent modules."""
def __init__(self):
super(_HelpfullFlag, self).__init__('helpfull', False, 'show full help')
def parse(self, arg):
if arg:
_usage(shorthelp=False)
_sys.exit(1)
_define_help_flags_called = False
def _define_help_flags():
global _define_help_flags_called
if not _define_help_flags_called:
flags.DEFINE_flag(_HelpFlag())
flags.DEFINE_flag(_HelpfullFlag())
flags.DEFINE_flag(_HelpshortFlag())
_define_help_flags_called = True
@tf_export('app.run')
def run(main=None, argv=None):
"""Runs the program with an optional 'main' function and 'argv' list."""
# Define help flags.
_define_help_flags()
# Parse known flags.
argv = flags.FLAGS(_sys.argv if argv is None else argv, known_only=True)
main = main or _sys.modules['__main__'].main
# Call the main function, passing through any arguments
# to the final program.
_sys.exit(main(argv))
| apache-2.0 |
ToonTownInfiniteRepo/ToontownInfinite | toontown/toon/DistributedBankerMgr.py | 1 | 6690 | from direct.gui.DirectGui import *
from pandac.PandaModules import *
from toontown.toonbase.ToontownGlobals import *
from toontown.toonbase.ToonBaseGlobal import *
from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from direct.distributed.ClockDelta import *
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
from toontown.estate import BankGUI
from toontown.estate.BankGlobals import *
from toontown.toontowngui import TTDialog
from toontown.catalog.CatalogFurnitureItem import FurnitureTypes
from toontown.catalog.CatalogFurnitureItem import FTScale
from direct.distributed.DistributedObject import DistributedObject
class DistributedBankerMgr(DistributedObject):
notify = directNotify.newCategory('DistributedBanker')
def __init__(self, cr):
DistributedObject.__init__(self, cr)
self.bankGui = None
self.bankTrack = None
self.bankDialog = None
self.hasLocalAvatar = 0
self.hasJarOut = 0
self.jarLods = []
self.bankSphereEvent = 'bankSphere'
self.bankSphereEnterEvent = 'enter' + self.bankSphereEvent
self.bankSphereExitEvent = 'exit' + self.bankSphereEvent
self.bankGuiDoneEvent = 'bankGuiDone'
return
def announceGenerate(self):
self.notify.debug('announceGenerate')
DistributedObject.announceGenerate(self)
def delete(self):
self.notify.debug('delete')
DistributedObject.delete(self)
def enterBank(self):
self.sendUpdate('avatarEnter', [])
def __handleBankDone(self, transactionAmount):
self.notify.debug('__handleBankDone(transactionAmount=%s' % (transactionAmount,))
self.sendUpdate('transferMoney', [transactionAmount])
self.ignore(self.bankGuiDoneEvent)
self.ignore(self.bankSphereExitEvent)
if self.bankGui is not None:
self.bankGui.destroy()
self.bankGui = None
return
def freeAvatar(self):
self.notify.debug('freeAvatar()')
if self.hasLocalAvatar:
base.localAvatar.posCamera(0, 0)
if base.cr.playGame.place != None:
base.cr.playGame.getPlace().setState('walk')
self.hasLocalAvatar = 0
return
def showBankGui(self):
if self.bankGui:
self.bankGui.destroy()
self.bankGui = BankGUI.BankGui(self.bankGuiDoneEvent)
self.accept(self.bankGuiDoneEvent, self.__handleBankDone)
def setMovie(self, mode, avId, timestamp):
self.notify.debug('setMovie(mode=%s, avId=%s, timestamp=%s)' % (mode, avId, timestamp))
timeStamp = globalClockDelta.localElapsedTime(timestamp)
isLocalToon = avId == base.localAvatar.doId
self.notify.info('setMovie: mode=%s, avId=%s, timeStamp=%s, isLocalToon=%s' % (mode,
avId,
timeStamp,
isLocalToon))
if mode == BANK_MOVIE_CLEAR:
self.notify.debug('setMovie: clear')
elif mode == BANK_MOVIE_GUI:
self.notify.debug('setMovie: gui')
track = Sequence()
track.append(Func(self.__takeOutToonJar, avId))
if isLocalToon:
track.append(Wait(3.0))
track.append(Func(self.showBankGui))
track.start()
self.bankTrack = track
elif mode == BANK_MOVIE_DEPOSIT:
self.notify.debug('setMovie: deposit')
self.__putAwayToonJar(avId)
elif mode == BANK_MOVIE_WITHDRAW:
self.notify.debug('setMovie: withdraw')
self.__putAwayToonJar(avId)
elif mode == BANK_MOVIE_NO_OP:
self.notify.debug('setMovie: no op')
self.__putAwayToonJar(avId)
elif mode == BANK_MOVIE_NOT_OWNER:
self.notify.debug('setMovie: not owner')
if isLocalToon:
self.bankDialog = TTDialog.TTDialog(dialogName='BankNotOwner', style=TTDialog.Acknowledge, text=TTLocalizer.DistributedBankNotOwner, text_wordwrap=15, fadeScreen=1, command=self.__clearDialog)
elif mode == BANK_MOVIE_NO_OWNER:
self.notify.debug('setMovie: no owner')
if isLocalToon:
self.bankDialog = TTDialog.TTDialog(dialogName='BankNoOwner', style=TTDialog.Acknowledge, text=TTLocalizer.DistributedBankNoOwner, text_wordwrap=15, fadeScreen=1, command=self.__clearDialog)
else:
self.notify.warning('unknown mode in setMovie: %s' % mode)
def __clearDialog(self, event):
self.notify.debug('__clearDialog(event=%s)' % (event,))
if self.bankDialog is not None:
self.bankDialog.cleanup()
self.bankDialog = None
self.freeAvatar()
return
def __attachToonJar(self, toon):
self.__removeToonJar()
for hand in toon.getRightHands():
self.jarLods.append(toon.jar.instanceTo(hand))
def __removeToonJar(self):
for jar in self.jarLods:
jar.removeNode()
self.jarLods = []
def __takeOutToonJar(self, avId):
self.notify.debug('__takeOutToonJar(avId=%s)' % (avId,))
toon = base.cr.doId2do.get(avId)
if toon == None:
return
track = Sequence()
walkToBank = Sequence(Func(toon.stopSmooth), Func(toon.loop, 'walk'), Func(toon.loop, 'neutral'), Func(toon.startSmooth))
track.append(walkToBank)
if not toon.jar:
toon.getJar()
self.__attachToonJar(toon)
jarAndBank = Parallel(LerpScaleInterval(toon.jar, 1.5, 1.0, blendType='easeOut'), ActorInterval(base.cr.doId2do[avId], 'bank', endTime=3.8))
track.append(jarAndBank)
track.append(Func(base.cr.doId2do[avId].pingpong, 'bank', fromFrame=48, toFrame=92))
track.start()
self.hasJarOut = 1
return
def __putAwayToonJar(self, avId):
self.notify.debug('__putAwayToonJar(avId=%s)' % (avId,))
toon = base.cr.doId2do.get(avId)
if toon is None:
return
if not self.hasJarOut:
return
self.hasJarOut = 0
if not toon.jar:
toon.getJar()
track = Sequence()
jarAndBank = Parallel(ActorInterval(base.cr.doId2do[avId], 'bank', startTime=2.0, endTime=0.0), LerpScaleInterval(toon.jar, 2.0, 0.0, blendType='easeIn'))
track.append(jarAndBank)
track.append(Func(self.__removeToonJar))
track.append(Func(toon.removeJar))
track.append(Func(toon.loop, 'neutral'))
if avId == base.localAvatar.doId:
track.append(Func(self.freeAvatar))
track.start()
self.bankTrack = track
return
| mit |
adamtiger/tensorflow | tensorflow/python/estimator/inputs/queues/feeding_functions_test.py | 59 | 13552 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests feeding functions using arrays and `DataFrames`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
from tensorflow.python.estimator.inputs.queues import feeding_functions as ff
from tensorflow.python.platform import test
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
def vals_to_list(a):
return {
key: val.tolist() if isinstance(val, np.ndarray) else val
for key, val in a.items()
}
class _FeedingFunctionsTestCase(test.TestCase):
"""Tests for feeding functions."""
def testArrayFeedFnBatchOne(self):
array = np.arange(32).reshape([16, 2])
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, 1)
# cycle around a couple times
for x in range(0, 100):
i = x % 16
expected = {
"index_placeholder": [i],
"value_placeholder": [[2 * i, 2 * i + 1]]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testArrayFeedFnBatchFive(self):
array = np.arange(32).reshape([16, 2])
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, 5)
# cycle around a couple times
for _ in range(0, 101, 2):
aff()
expected = {
"index_placeholder": [15, 0, 1, 2, 3],
"value_placeholder": [[30, 31], [0, 1], [2, 3], [4, 5], [6, 7]]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testArrayFeedFnBatchTwoWithOneEpoch(self):
array = np.arange(5) + 10
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, batch_size=2, num_epochs=1)
expected = {
"index_placeholder": [0, 1],
"value_placeholder": [10, 11]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
expected = {
"index_placeholder": [2, 3],
"value_placeholder": [12, 13]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
expected = {
"index_placeholder": [4],
"value_placeholder": [14]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testArrayFeedFnBatchOneHundred(self):
array = np.arange(32).reshape([16, 2])
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, 100)
expected = {
"index_placeholder":
list(range(0, 16)) * 6 + list(range(0, 4)),
"value_placeholder":
np.arange(32).reshape([16, 2]).tolist() * 6 +
[[0, 1], [2, 3], [4, 5], [6, 7]]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testArrayFeedFnBatchOneHundredWithSmallerArrayAndMultipleEpochs(self):
array = np.arange(2) + 10
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, batch_size=100, num_epochs=2)
expected = {
"index_placeholder": [0, 1, 0, 1],
"value_placeholder": [10, 11, 10, 11],
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchOne(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 64)
array2 = np.arange(64, 96)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 128))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, 1)
# cycle around a couple times
for x in range(0, 100):
i = x % 32
expected = {
"index_placeholder": [i + 96],
"a_placeholder": [32 + i],
"b_placeholder": [64 + i]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchFive(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 64)
array2 = np.arange(64, 96)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 128))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, 5)
# cycle around a couple times
for _ in range(0, 101, 2):
aff()
expected = {
"index_placeholder": [127, 96, 97, 98, 99],
"a_placeholder": [63, 32, 33, 34, 35],
"b_placeholder": [95, 64, 65, 66, 67]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchTwoWithOneEpoch(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 37)
array2 = np.arange(64, 69)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 101))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, batch_size=2, num_epochs=1)
expected = {
"index_placeholder": [96, 97],
"a_placeholder": [32, 33],
"b_placeholder": [64, 65]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
expected = {
"index_placeholder": [98, 99],
"a_placeholder": [34, 35],
"b_placeholder": [66, 67]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
expected = {
"index_placeholder": [100],
"a_placeholder": [36],
"b_placeholder": [68]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchOneHundred(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 64)
array2 = np.arange(64, 96)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 128))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, 100)
expected = {
"index_placeholder": list(range(96, 128)) * 3 + list(range(96, 100)),
"a_placeholder": list(range(32, 64)) * 3 + list(range(32, 36)),
"b_placeholder": list(range(64, 96)) * 3 + list(range(64, 68))
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchOneHundredWithSmallDataArrayAndMultipleEpochs(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 34)
array2 = np.arange(64, 66)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 98))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, batch_size=100, num_epochs=2)
expected = {
"index_placeholder": [96, 97, 96, 97],
"a_placeholder": [32, 33, 32, 33],
"b_placeholder": [64, 65, 64, 65]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testOrderedDictNumpyFeedFnBatchTwoWithOneEpoch(self):
a = np.arange(32, 37)
b = np.arange(64, 69)
x = {"a": a, "b": b}
ordered_dict_x = collections.OrderedDict(
sorted(x.items(), key=lambda t: t[0]))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._OrderedDictNumpyFeedFn(
placeholders, ordered_dict_x, batch_size=2, num_epochs=1)
expected = {
"index_placeholder": [0, 1],
"a_placeholder": [32, 33],
"b_placeholder": [64, 65]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
expected = {
"index_placeholder": [2, 3],
"a_placeholder": [34, 35],
"b_placeholder": [66, 67]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
expected = {
"index_placeholder": [4],
"a_placeholder": [36],
"b_placeholder": [68]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testOrderedDictNumpyFeedFnLargeBatchWithSmallArrayAndMultipleEpochs(self):
a = np.arange(32, 34)
b = np.arange(64, 66)
x = {"a": a, "b": b}
ordered_dict_x = collections.OrderedDict(
sorted(x.items(), key=lambda t: t[0]))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._OrderedDictNumpyFeedFn(
placeholders, ordered_dict_x, batch_size=100, num_epochs=2)
expected = {
"index_placeholder": [0, 1, 0, 1],
"a_placeholder": [32, 33, 32, 33],
"b_placeholder": [64, 65, 64, 65]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testFillArraySmall(self):
a = (np.ones(shape=[32, 32], dtype=np.int32).tolist() +
np.ones(shape=[32, 36], dtype=np.int32).tolist())
actual = np.ones(shape=[64, 36], dtype=np.int32)
ff._fill_array(actual, a)
expected = np.ones(shape=[64, 36], dtype=np.int32)
expected[:32, 32:] = 0
self.assertEqual(expected.tolist(), actual.tolist())
def testFillArrayLarge(self):
a = (np.ones(shape=[8, 8, 8, 8, 32], dtype=np.int32).tolist() +
np.ones(shape=[8, 8, 8, 8, 36], dtype=np.int32).tolist())
actual = np.ones(shape=[16, 8, 8, 8, 36], dtype=np.int32)
ff._fill_array(actual, a)
expected = np.ones(shape=[16, 8, 8, 8, 36], dtype=np.int32)
expected[:8, ..., 32:] = 0
self.assertEqual(expected.tolist(), actual.tolist())
def testFillArraySmallWithSpecifiedValue(self):
fill_value = 8
a = (np.ones(shape=[32, 32], dtype=np.int32).tolist() +
np.ones(shape=[32, 36], dtype=np.int32).tolist())
actual = np.ones(shape=[64, 36], dtype=np.int32)
ff._fill_array(actual, a, fill_value)
expected = np.ones(shape=[64, 36], dtype=np.int32)
expected[:32, 32:] = fill_value
self.assertEqual(expected.tolist(), actual.tolist())
def testFillArrayLargeWithSpecifiedValue(self):
fill_value = 8
a = (np.ones(shape=[8, 8, 8, 8, 32], dtype=np.int32).tolist() +
np.ones(shape=[8, 8, 8, 8, 36], dtype=np.int32).tolist())
actual = np.ones(shape=[16, 8, 8, 8, 36], dtype=np.int32)
ff._fill_array(actual, a, fill_value)
expected = np.ones(shape=[16, 8, 8, 8, 36], dtype=np.int32)
expected[:8, ..., 32:] = fill_value
self.assertEqual(expected.tolist(), actual.tolist())
def testPadIfNeededSmall(self):
a = (np.ones(shape=[32, 32], dtype=np.int32).tolist() +
np.ones(shape=[32, 36], dtype=np.int32).tolist())
a = list(map(np.array, a))
actual = ff._pad_if_needed(a)
expected = np.ones(shape=[64, 36], dtype=np.int32)
expected[:32, 32:] = 0
self.assertEqual(expected.tolist(), actual.tolist())
def testPadIfNeededLarge(self):
a = (np.ones(shape=[8, 8, 8, 8, 32], dtype=np.int32).tolist() +
np.ones(shape=[8, 8, 8, 8, 36], dtype=np.int32).tolist())
a = list(map(np.array, a))
actual = ff._pad_if_needed(a)
expected = np.ones(shape=[16, 8, 8, 8, 36], dtype=np.int32)
expected[:8, ..., 32:] = 0
self.assertEqual(expected.tolist(), actual.tolist())
def testPadIfNeededSmallWithSpecifiedValue(self):
fill_value = 8
a = (np.ones(shape=[32, 32], dtype=np.int32).tolist() +
np.ones(shape=[32, 36], dtype=np.int32).tolist())
a = list(map(np.array, a))
actual = ff._pad_if_needed(a, fill_value)
expected = np.ones(shape=[64, 36], dtype=np.int32)
expected[:32, 32:] = fill_value
self.assertEqual(expected.tolist(), actual.tolist())
def testPadIfNeededLargeWithSpecifiedValue(self):
fill_value = 8
a = (np.ones(shape=[8, 8, 8, 8, 32], dtype=np.int32).tolist() +
np.ones(shape=[8, 8, 8, 8, 36], dtype=np.int32).tolist())
a = list(map(np.array, a))
actual = ff._pad_if_needed(a, fill_value)
expected = np.ones(shape=[16, 8, 8, 8, 36], dtype=np.int32)
expected[:8, ..., 32:] = fill_value
self.assertEqual(expected.tolist(), actual.tolist())
def testPadIfNeededSmallWithSpecifiedNonNumericValue(self):
fill_value = False
a = (np.ones(shape=[32, 32], dtype=np.bool).tolist() +
np.ones(shape=[32, 36], dtype=np.bool).tolist())
a = list(map(np.array, a))
actual = ff._pad_if_needed(a, fill_value)
expected = np.ones(shape=[64, 36], dtype=np.bool)
expected[:32, 32:] = fill_value
self.assertEqual(expected.tolist(), actual.tolist())
def testPadIfNeededLargeWithSpecifiedNonNumericValue(self):
fill_value = False
a = (np.ones(shape=[8, 8, 8, 8, 32], dtype=np.bool).tolist() +
np.ones(shape=[8, 8, 8, 8, 36], dtype=np.bool).tolist())
a = list(map(np.array, a))
actual = ff._pad_if_needed(a, fill_value)
expected = np.ones(shape=[16, 8, 8, 8, 36], dtype=np.bool)
expected[:8, ..., 32:] = fill_value
self.assertEqual(expected.tolist(), actual.tolist())
if __name__ == "__main__":
test.main()
| apache-2.0 |
iglpdc/nipype | nipype/interfaces/freesurfer/tests/test_utils.py | 6 | 7427 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
import os
from tempfile import mkdtemp
from shutil import rmtree
import numpy as np
import nibabel as nif
from nipype.testing import (assert_equal, assert_not_equal,
assert_raises, skipif)
from nipype.interfaces.base import TraitError
import nipype.interfaces.freesurfer as fs
def no_freesurfer():
if fs.Info().version is None:
return True
else:
return False
def create_files_in_directory():
outdir = os.path.realpath(mkdtemp())
cwd = os.getcwd()
os.chdir(outdir)
filelist = ['a.nii', 'b.nii']
for f in filelist:
hdr = nif.Nifti1Header()
shape = (3, 3, 3, 4)
hdr.set_data_shape(shape)
img = np.random.random(shape)
nif.save(nif.Nifti1Image(img, np.eye(4), hdr),
os.path.join(outdir, f))
with open(os.path.join(outdir, 'reg.dat'), 'wt') as fp:
fp.write('dummy file')
filelist.append('reg.dat')
return filelist, outdir, cwd
def create_surf_file():
outdir = os.path.realpath(mkdtemp())
cwd = os.getcwd()
os.chdir(outdir)
surf = 'lh.a.nii'
hdr = nif.Nifti1Header()
shape = (1, 100, 1)
hdr.set_data_shape(shape)
img = np.random.random(shape)
nif.save(nif.Nifti1Image(img, np.eye(4), hdr),
os.path.join(outdir, surf))
return surf, outdir, cwd
def clean_directory(outdir, old_wd):
if os.path.exists(outdir):
rmtree(outdir)
os.chdir(old_wd)
@skipif(no_freesurfer)
def test_sample2surf():
s2s = fs.SampleToSurface()
# Test underlying command
yield assert_equal, s2s.cmd, 'mri_vol2surf'
# Test mandatory args exception
yield assert_raises, ValueError, s2s.run
# Create testing files
files, cwd, oldwd = create_files_in_directory()
# Test input settings
s2s.inputs.source_file = files[0]
s2s.inputs.reference_file = files[1]
s2s.inputs.hemi = "lh"
s2s.inputs.reg_file = files[2]
s2s.inputs.sampling_range = .5
s2s.inputs.sampling_units = "frac"
s2s.inputs.sampling_method = "point"
# Test a basic command line
yield assert_equal, s2s.cmdline, ("mri_vol2surf "
"--hemi lh --o %s --ref %s --reg reg.dat --projfrac 0.500 --mov %s"
% (os.path.join(cwd, "lh.a.mgz"), files[1], files[0]))
# Test identity
s2sish = fs.SampleToSurface(source_file=files[1], reference_file=files[0], hemi="rh")
yield assert_not_equal, s2s, s2sish
# Test hits file name creation
s2s.inputs.hits_file = True
yield assert_equal, s2s._get_outfilename("hits_file"), os.path.join(cwd, "lh.a_hits.mgz")
# Test that a 2-tuple range raises an error
def set_illegal_range():
s2s.inputs.sampling_range = (.2, .5)
yield assert_raises, TraitError, set_illegal_range
# Clean up our mess
clean_directory(cwd, oldwd)
@skipif(no_freesurfer)
def test_surfsmooth():
smooth = fs.SurfaceSmooth()
# Test underlying command
yield assert_equal, smooth.cmd, "mri_surf2surf"
# Test mandatory args exception
yield assert_raises, ValueError, smooth.run
# Create testing files
surf, cwd, oldwd = create_surf_file()
# Test input settings
smooth.inputs.in_file = surf
smooth.inputs.subject_id = "fsaverage"
fwhm = 5
smooth.inputs.fwhm = fwhm
smooth.inputs.hemi = "lh"
# Test the command line
yield assert_equal, smooth.cmdline, \
("mri_surf2surf --cortex --fwhm 5.0000 --hemi lh --sval %s --tval %s/lh.a_smooth%d.nii --s fsaverage" %
(surf, cwd, fwhm))
# Test identity
shmooth = fs.SurfaceSmooth(
subject_id="fsaverage", fwhm=6, in_file=surf, hemi="lh", out_file="lh.a_smooth.nii")
yield assert_not_equal, smooth, shmooth
# Clean up
clean_directory(cwd, oldwd)
@skipif(no_freesurfer)
def test_surfxfm():
xfm = fs.SurfaceTransform()
# Test underlying command
yield assert_equal, xfm.cmd, "mri_surf2surf"
# Test mandatory args exception
yield assert_raises, ValueError, xfm.run
# Create testing files
surf, cwd, oldwd = create_surf_file()
# Test input settings
xfm.inputs.source_file = surf
xfm.inputs.source_subject = "my_subject"
xfm.inputs.target_subject = "fsaverage"
xfm.inputs.hemi = "lh"
# Test the command line
yield assert_equal, xfm.cmdline, \
("mri_surf2surf --hemi lh --tval %s/lh.a.fsaverage.nii --sval %s --srcsubject my_subject --trgsubject fsaverage" %
(cwd, surf))
# Test identity
xfmish = fs.SurfaceTransform(
source_subject="fsaverage", target_subject="my_subject", source_file=surf, hemi="lh")
yield assert_not_equal, xfm, xfmish
# Clean up
clean_directory(cwd, oldwd)
@skipif(no_freesurfer)
def test_applymask():
masker = fs.ApplyMask()
filelist, testdir, origdir = create_files_in_directory()
# Test underlying command
yield assert_equal, masker.cmd, "mri_mask"
# Test exception with mandatory args absent
yield assert_raises, ValueError, masker.run
for input in ["in_file", "mask_file"]:
indict = {input: filelist[0]}
willbreak = fs.ApplyMask(**indict)
yield assert_raises, ValueError, willbreak.run
# Now test a basic command line
masker.inputs.in_file = filelist[0]
masker.inputs.mask_file = filelist[1]
outfile = os.path.join(testdir, "a_masked.nii")
yield assert_equal, masker.cmdline, "mri_mask a.nii b.nii %s" % outfile
# Now test that optional inputs get formatted properly
masker.inputs.mask_thresh = 2
yield assert_equal, masker.cmdline, "mri_mask -T 2.0000 a.nii b.nii %s" % outfile
masker.inputs.use_abs = True
yield assert_equal, masker.cmdline, "mri_mask -T 2.0000 -abs a.nii b.nii %s" % outfile
# Now clean up
clean_directory(testdir, origdir)
@skipif(no_freesurfer)
def test_surfshots():
fotos = fs.SurfaceSnapshots()
# Test underlying command
yield assert_equal, fotos.cmd, "tksurfer"
# Test mandatory args exception
yield assert_raises, ValueError, fotos.run
# Create testing files
files, cwd, oldwd = create_files_in_directory()
# Test input settins
fotos.inputs.subject_id = "fsaverage"
fotos.inputs.hemi = "lh"
fotos.inputs.surface = "pial"
# Test a basic command line
yield assert_equal, fotos.cmdline, "tksurfer fsaverage lh pial -tcl snapshots.tcl"
# Test identity
schmotos = fs.SurfaceSnapshots(subject_id="mysubject", hemi="rh", surface="white")
yield assert_not_equal, fotos, schmotos
# Test that the tcl script gets written
fotos._write_tcl_script()
yield assert_equal, True, os.path.exists("snapshots.tcl")
# Test that we can use a different tcl script
foo = open("other.tcl", "w").close()
fotos.inputs.tcl_script = "other.tcl"
yield assert_equal, fotos.cmdline, "tksurfer fsaverage lh pial -tcl other.tcl"
# Test that the interface crashes politely if graphics aren't enabled
try:
hold_display = os.environ["DISPLAY"]
del os.environ["DISPLAY"]
yield assert_raises, RuntimeError, fotos.run
os.environ["DISPLAY"] = hold_display
except KeyError:
pass
# Clean up our mess
clean_directory(cwd, oldwd)
| bsd-3-clause |
zvolsky/muzika | languages/zh.py | 152 | 10080 | # coding: utf8
{
'!langcode!': 'zh-tw',
'!langname!': '中文',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"更新" 是選擇性的條件式, 格式就像 "欄位1=\'值\'". 但是 JOIN 的資料不可以使用 update 或是 delete"',
'%s %%{row} deleted': '已刪除 %s 筆',
'%s %%{row} updated': '已更新 %s 筆',
'%s selected': '%s 已選擇',
'%Y-%m-%d': '%Y-%m-%d',
'%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S',
'(something like "it-it")': '(格式類似 "zh-tw")',
'A new version of web2py is available': '新版的 web2py 已發行',
'A new version of web2py is available: %s': '新版的 web2py 已發行: %s',
'about': '關於',
'About': '關於',
'About application': '關於本應用程式',
'Access Control': 'Access Control',
'Admin is disabled because insecure channel': '管理功能(Admin)在不安全連線環境下自動關閉',
'Admin is disabled because unsecure channel': '管理功能(Admin)在不安全連線環境下自動關閉',
'Administrative Interface': 'Administrative Interface',
'Administrative interface': '點此處進入管理介面',
'Administrator Password:': '管理員密碼:',
'Ajax Recipes': 'Ajax Recipes',
'appadmin is disabled because insecure channel': '因為來自非安全通道,管理介面關閉',
'Are you sure you want to delete file "%s"?': '確定要刪除檔案"%s"?',
'Are you sure you want to delete this object?': 'Are you sure you want to delete this object?',
'Are you sure you want to uninstall application "%s"': '確定要移除應用程式 "%s"',
'Are you sure you want to uninstall application "%s"?': '確定要移除應用程式 "%s"',
'ATTENTION: Login requires a secure (HTTPS) connection or running on localhost.': '注意: 登入管理帳號需要安全連線(HTTPS)或是在本機連線(localhost).',
'ATTENTION: TESTING IS NOT THREAD SAFE SO DO NOT PERFORM MULTIPLE TESTS CONCURRENTLY.': '注意: 因為在測試模式不保證多執行緒安全性,也就是說不可以同時執行多個測試案例',
'ATTENTION: you cannot edit the running application!': '注意:不可編輯正在執行的應用程式!',
'Authentication': '驗證',
'Available Databases and Tables': '可提供的資料庫和資料表',
'Buy this book': 'Buy this book',
'cache': '快取記憶體',
'Cache': 'Cache',
'Cache Keys': 'Cache Keys',
'Cannot be empty': '不可空白',
'Cannot compile: there are errors in your app. Debug it, correct errors and try again.': '無法編譯:應用程式中含有錯誤,請除錯後再試一次.',
'Change Password': '變更密碼',
'change password': '變更密碼',
'Check to delete': '打勾代表刪除',
'Check to delete:': '點選以示刪除:',
'Clear CACHE?': 'Clear CACHE?',
'Clear DISK': 'Clear DISK',
'Clear RAM': 'Clear RAM',
'Client IP': '客戶端網址(IP)',
'Community': 'Community',
'Components and Plugins': 'Components and Plugins',
'Controller': '控件',
'Controllers': '控件',
'Copyright': '版權所有',
'Create new application': '創建應用程式',
'Current request': '目前網路資料要求(request)',
'Current response': '目前網路資料回應(response)',
'Current session': '目前網路連線資訊(session)',
'customize me!': '請調整我!',
'data uploaded': '資料已上傳',
'Database': '資料庫',
'Database %s select': '已選擇 %s 資料庫',
'Date and Time': '日期和時間',
'db': 'db',
'DB Model': '資料庫模組',
'Delete': '刪除',
'Delete:': '刪除:',
'Demo': 'Demo',
'Deploy on Google App Engine': '配置到 Google App Engine',
'Deployment Recipes': 'Deployment Recipes',
'Description': '描述',
'DESIGN': '設計',
'design': '設計',
'Design for': '設計為了',
'DISK': 'DISK',
'Disk Cache Keys': 'Disk Cache Keys',
'Disk Cleared': 'Disk Cleared',
'Documentation': 'Documentation',
"Don't know what to do?": "Don't know what to do?",
'done!': '完成!',
'Download': 'Download',
'E-mail': '電子郵件',
'EDIT': '編輯',
'Edit': '編輯',
'Edit application': '編輯應用程式',
'Edit current record': '編輯當前紀錄',
'edit profile': '編輯設定檔',
'Edit Profile': '編輯設定檔',
'Edit This App': '編輯本應用程式',
'Editing file': '編輯檔案',
'Editing file "%s"': '編輯檔案"%s"',
'Email and SMS': 'Email and SMS',
'Error logs for "%(app)s"': '"%(app)s"的錯誤紀錄',
'Errors': 'Errors',
'export as csv file': '以逗號分隔檔(csv)格式匯出',
'FAQ': 'FAQ',
'First name': '名',
'Forms and Validators': 'Forms and Validators',
'Free Applications': 'Free Applications',
'Functions with no doctests will result in [passed] tests.': '沒有 doctests 的函式會顯示 [passed].',
'Group ID': '群組編號',
'Groups': 'Groups',
'Hello World': '嗨! 世界',
'Home': 'Home',
'How did you get here?': 'How did you get here?',
'import': 'import',
'Import/Export': '匯入/匯出',
'Index': '索引',
'insert new': '插入新資料',
'insert new %s': '插入新資料 %s',
'Installed applications': '已安裝應用程式',
'Internal State': '內部狀態',
'Introduction': 'Introduction',
'Invalid action': '不合法的動作(action)',
'Invalid email': '不合法的電子郵件',
'Invalid Query': '不合法的查詢',
'invalid request': '不合法的網路要求(request)',
'Key': 'Key',
'Language files (static strings) updated': '語言檔已更新',
'Languages': '各國語言',
'Last name': '姓',
'Last saved on:': '最後儲存時間:',
'Layout': '網頁配置',
'Layout Plugins': 'Layout Plugins',
'Layouts': 'Layouts',
'License for': '軟體版權為',
'Live Chat': 'Live Chat',
'login': '登入',
'Login': '登入',
'Login to the Administrative Interface': '登入到管理員介面',
'logout': '登出',
'Logout': '登出',
'Lost Password': '密碼遺忘',
'Main Menu': '主選單',
'Manage Cache': 'Manage Cache',
'Menu Model': '選單模組(menu)',
'Models': '資料模組',
'Modules': '程式模組',
'My Sites': 'My Sites',
'Name': '名字',
'New Record': '新紀錄',
'new record inserted': '已插入新紀錄',
'next 100 rows': '往後 100 筆',
'NO': '否',
'No databases in this application': '這應用程式不含資料庫',
'Online examples': '點此處進入線上範例',
'or import from csv file': '或是從逗號分隔檔(CSV)匯入',
'Origin': '原文',
'Original/Translation': '原文/翻譯',
'Other Plugins': 'Other Plugins',
'Other Recipes': 'Other Recipes',
'Overview': 'Overview',
'Password': '密碼',
"Password fields don't match": '密碼欄不匹配',
'Peeking at file': '選擇檔案',
'Plugins': 'Plugins',
'Powered by': '基於以下技術構建:',
'Preface': 'Preface',
'previous 100 rows': '往前 100 筆',
'Python': 'Python',
'Query:': '查詢:',
'Quick Examples': 'Quick Examples',
'RAM': 'RAM',
'RAM Cache Keys': 'RAM Cache Keys',
'Ram Cleared': 'Ram Cleared',
'Recipes': 'Recipes',
'Record': '紀錄',
'record does not exist': '紀錄不存在',
'Record ID': '紀錄編號',
'Record id': '紀錄編號',
'Register': '註冊',
'register': '註冊',
'Registration key': '註冊金鑰',
'Remember me (for 30 days)': '記住我(30 天)',
'Reset Password key': '重設密碼',
'Resolve Conflict file': '解決衝突檔案',
'Role': '角色',
'Rows in Table': '在資料表裏的資料',
'Rows selected': '筆資料被選擇',
'Saved file hash:': '檔案雜湊值已紀錄:',
'Semantic': 'Semantic',
'Services': 'Services',
'Size of cache:': 'Size of cache:',
'state': '狀態',
'Static files': '靜態檔案',
'Statistics': 'Statistics',
'Stylesheet': '網頁風格檔',
'submit': 'submit',
'Submit': '傳送',
'Support': 'Support',
'Sure you want to delete this object?': '確定要刪除此物件?',
'Table': '資料表',
'Table name': '資料表名稱',
'Testing application': '測試中的應用程式',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': '"查詢"是一個像 "db.表1.欄位1==\'值\'" 的條件式. 以"db.表1.欄位1==db.表2.欄位2"方式則相當於執行 JOIN SQL.',
'The Core': 'The Core',
'The output of the file is a dictionary that was rendered by the view %s': 'The output of the file is a dictionary that was rendered by the view %s',
'The Views': 'The Views',
'There are no controllers': '沒有控件(controllers)',
'There are no models': '沒有資料庫模組(models)',
'There are no modules': '沒有程式模組(modules)',
'There are no static files': '沒有靜態檔案',
'There are no translators, only default language is supported': '沒有翻譯檔,只支援原始語言',
'There are no views': '沒有視圖',
'This App': 'This App',
'This is the %(filename)s template': '這是%(filename)s檔案的樣板(template)',
'Ticket': '問題單',
'Time in Cache (h:m:s)': 'Time in Cache (h:m:s)',
'Timestamp': '時間標記',
'Twitter': 'Twitter',
'Unable to check for upgrades': '無法做升級檢查',
'Unable to download': '無法下載',
'Unable to download app': '無法下載應用程式',
'unable to parse csv file': '無法解析逗號分隔檔(csv)',
'Update:': '更新:',
'Upload existing application': '更新存在的應用程式',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': '使用下列方式來組合更複雜的條件式, (...)&(...) 代表同時存在的條件, (...)|(...) 代表擇一的條件, ~(...)則代表反向條件.',
'User %(id)s Logged-in': '使用者 %(id)s 已登入',
'User %(id)s Registered': '使用者 %(id)s 已註冊',
'User ID': '使用者編號',
'Verify Password': '驗證密碼',
'Videos': 'Videos',
'View': '視圖',
'Views': '視圖',
'Welcome %s': '歡迎 %s',
'Welcome to web2py': '歡迎使用 web2py',
'Welcome to web2py!': 'Welcome to web2py!',
'Which called the function %s located in the file %s': 'Which called the function %s located in the file %s',
'YES': '是',
'You are successfully running web2py': 'You are successfully running web2py',
'You can modify this application and adapt it to your needs': 'You can modify this application and adapt it to your needs',
'You visited the url %s': 'You visited the url %s',
}
| agpl-3.0 |
alexanderturner/ansible | lib/ansible/modules/cloud/univention/udm_group.py | 21 | 5387 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
# Copyright (c) 2016, Adfinis SyGroup AG
# Tobias Rueetschi <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: udm_group
version_added: "2.2"
author: "Tobias Rueetschi (@2-B)"
short_description: Manage of the posix group
description:
- "This module allows to manage user groups on a univention corporate server (UCS).
It uses the python API of the UCS to create a new object or edit it."
requirements:
- Python >= 2.6
options:
state:
required: false
default: "present"
choices: [ present, absent ]
description:
- Whether the group is present or not.
name:
required: true
description:
- Name of the posix group.
description:
required: false
description:
- Group description.
position:
required: false
description:
- define the whole ldap position of the group, e.g.
C(cn=g123m-1A,cn=classes,cn=schueler,cn=groups,ou=schule,dc=example,dc=com).
ou:
required: false
description:
- LDAP OU, e.g. school for LDAP OU C(ou=school,dc=example,dc=com).
subpath:
required: false
description:
- Subpath inside the OU, e.g. C(cn=classes,cn=students,cn=groups).
'''
EXAMPLES = '''
# Create a POSIX group
- udm_group:
name: g123m-1A
# Create a POSIX group with the exact DN
# C(cn=g123m-1A,cn=classes,cn=students,cn=groups,ou=school,dc=school,dc=example,dc=com)
- udm_group:
name: g123m-1A
subpath: 'cn=classes,cn=students,cn=groups'
ou: school
# or
- udm_group:
name: g123m-1A
position: 'cn=classes,cn=students,cn=groups,ou=school,dc=school,dc=example,dc=com'
'''
RETURN = '''# '''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.univention_umc import (
umc_module_for_add,
umc_module_for_edit,
ldap_search,
base_dn,
)
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True,
type='str'),
description = dict(default=None,
type='str'),
position = dict(default='',
type='str'),
ou = dict(default='',
type='str'),
subpath = dict(default='cn=groups',
type='str'),
state = dict(default='present',
choices=['present', 'absent'],
type='str')
),
supports_check_mode=True
)
name = module.params['name']
description = module.params['description']
position = module.params['position']
ou = module.params['ou']
subpath = module.params['subpath']
state = module.params['state']
changed = False
groups = list(ldap_search(
'(&(objectClass=posixGroup)(cn={}))'.format(name),
attr=['cn']
))
if position != '':
container = position
else:
if ou != '':
ou = 'ou={},'.format(ou)
if subpath != '':
subpath = '{},'.format(subpath)
container = '{}{}{}'.format(subpath, ou, base_dn())
group_dn = 'cn={},{}'.format(name, container)
exists = bool(len(groups))
if state == 'present':
try:
if not exists:
grp = umc_module_for_add('groups/group', container)
else:
grp = umc_module_for_edit('groups/group', group_dn)
grp['name'] = name
grp['description'] = description
diff = grp.diff()
changed = grp.diff() != []
if not module.check_mode:
if not exists:
grp.create()
else:
grp.modify()
except:
module.fail_json(
msg="Creating/editing group {} in {} failed".format(name, container)
)
if state == 'absent' and exists:
try:
grp = umc_module_for_edit('groups/group', group_dn)
if not module.check_mode:
grp.remove()
changed = True
except:
module.fail_json(
msg="Removing group {} failed".format(name)
)
module.exit_json(
changed=changed,
name=name,
diff=diff,
container=container
)
if __name__ == '__main__':
main()
| gpl-3.0 |
venediktov/vanilla-rtb | examples/bidder/generator/ico.py | 1 | 2403 | import random
max_campaigns = 100
required = {'ref': ['www.coinbase.com'], 'size': [300, 100], 'campaigns': []}
domain_list = ['www.coinbase.com', 'www.coindesk.com' , 'www.cointelegraph.com', 'www.coinmarketcap.com']
ico_domains = []
for n, ref in enumerate(domain_list):
ico_domains.append([ref, n])
file = open("../data/ico_domains", "w")
for ref, n in ico_domains:
file.write("%s\t%d\n" % (ref,n))
file.close()
size = [
[88,31],[120,60],[120,240],[120,600],[125,125],[160,600],[180,150],
[200,200],[200,446],[220,90],[234,60],[240,133],[240,400],[250,250],
[250,360],[292,30],[300,31],[300,50],[300,100],[300,250],[300,600],
[300,1050],[320,50],[320,100],[336,280],[468,60],[580,400],[728,90],
[750,100],[750,200],[750,300],[930,180],[950,90],[960,90],[970,66],
[970,90],[970,250],[980,90],[980,120]
]
position = [0, 1, 2]
max_bid = [1, 1000]
code = """<script>alert(" ad %d!");</script>"""
file = open("../data/ico_campaign", "w")
for domain, ref_id in ico_domains:
max_targetings = random.randint(1, max_campaigns/10)
start_pos = random.randint(1, max_campaigns-max_targetings)
for i in range(start_pos, start_pos+max_targetings+1):
file.write("%d\t%d\n" % (ref_id, i))
if domain == required['ref'][0] :
for i in range(start_pos, start_pos+max_targetings+1):
required['campaigns'].append(i)
file.close()
max_ad = 1
max_ads_in_campaign = 30
file = open("../data/ico_ads", "w")
for campaign_id in range(1, max_campaigns+1):
ads_in_campaign = random.randint(1, max_ads_in_campaign)
for ad_id in range(max_ad, max_ad+ads_in_campaign+1):
rand_size = random.choice(size),
file.write("%u\t%u\t%u\t%u\t%u\t%u\t%s\n" % (
ad_id,
campaign_id,
rand_size[0][0],
rand_size[0][1],
random.choice(position),
random.randint(max_bid[0], max_bid[1]),
code % (ad_id)
))
if campaign_id in required['campaigns']:
ads_in_campaign += 1
file.write("%u\t%u\t%u\t%u\t%u\t%u\t%s\n" % (
max_ad+ads_in_campaign,
campaign_id,
required['size'][0],
required['size'][1],
random.choice(position),
random.randint(max_bid[0], max_bid[1]),
code % (max_ad+ads_in_campaign)
))
max_ad+=ads_in_campaign+1
file.close()
| gpl-3.0 |
se4u/pylearn2 | pylearn2/gui/graph_2D.py | 28 | 4942 | """
Classes for making simple 2D visualizations.
"""
import numpy as N
from theano.compat.six.moves import xrange
from theano import config
class Graph2D(object):
"""
A class for plotting simple graphs in two dimensions.
Parameters
----------
shape : tuple
The shape of the display of the graph in (rows, cols)
format. Units are pixels
xlim : tuple
A tuple specifying (xmin, xmax). This determines what
portion of the real numbers are displayed in the graph.
ycenter : float
The coordinate of the center pixel along the y axis.
"""
def __init__(self, shape, xlim, ycenter):
self.xmin = 0.
self.xmax = 0.
self.set_shape(shape)
self.set_xlim(xlim)
self.set_ycenter(ycenter)
self.components = []
def set_shape(self, shape):
"""
Sets the shape of the display (in pixels)
Parameters
----------
shape : tuple
The (rows, columns) of the display.
"""
self.rows = shape[0]
self.cols = shape[1]
def set_xlim(self, xlim):
"""
Sets the range of space that is plotted in the graph.
Parameters
----------
xlim : tuple
The range (xmin, xmax)
"""
# x coordinate of center of leftmost pixel
self.xmin = xlim[0]
# x coordinate of center of rightmost pixel
self.xmax = xlim[1]
self.delta_x = (self.xmax-self.xmin)/float(self.cols-1)
def set_ycenter(self, ycenter):
"""
Sets the y coordinate of the central pixel of the display.
Parameters
----------
ycenter : float
The desired coordinate.
"""
self.delta_y = self.delta_x
self.ymin = ycenter - (self.rows / 2) * self.delta_y
self.ymax = self.ymin + (self.rows -1) * self.delta_y
def render(self):
"""
Renders the graph.
Returns
-------
output : ndarray
An ndarray in (rows, cols, RGB) format.
"""
rval = N.zeros((self.rows, self.cols, 3))
for component in self.components:
rval = component.render( prev_layer = rval, parent = self )
assert rval is not None
return rval
def get_coords_for_col(self, i):
"""
Returns the coordinates of every pixel in column i of the
graph.
Parameters
----------
i : int
Column index
Returns
-------
coords : ndarray
A vector containing the real-number coordinates of every
pixel in column i of the graph.
"""
X = N.zeros((self.rows,2),dtype=config.floatX)
X[:,0] = self.xmin + float(i) * self.delta_x
X[:,1] = self.ymin + N.cast[config.floatX](N.asarray(range(self.rows-1,-1,-1))) * self.delta_y
return X
class HeatMap(object):
"""
A class for plotting 2-D functions as heatmaps.
Parameters
----------
f : callable
A callable that takes a design matrix of 2D coordinates and returns a
vector containing the function value at those coordinates
normalizer : callable, optional
None or a callable that takes a 2D numpy array and returns a 2D numpy
array
render_mode : str
* 'o' : opaque.
* 'r' : render only to the (r)ed channel
"""
def __init__(self, f, normalizer=None, render_mode = 'o'):
self.f = f
self.normalizer = normalizer
self.render_mode = render_mode
def render(self, prev_layer, parent):
"""
Renders the heatmap.
Parameters
----------
prev_layer : numpy ndarray
An image that will be copied into the new output.
The new image will be rendered on top of the first
one, i.e., `prev_layer` will be visible through the
new heatmap if the new heatmap is not rendered in
fully opaque mode.
parent : Graph2D
A Graph2D object that defines the coordinate system
of the heatmap.
Returns
-------
img : The rendered heatmap
"""
my_img = prev_layer * 0.0
for i in xrange(prev_layer.shape[1]):
X = parent.get_coords_for_col(i)
f = self.f(X)
if len(f.shape) == 1:
for j in xrange(3):
my_img[:,i,j] = f
else:
my_img[:,i,:] = f
#end if
#end for i
if self.normalizer is not None:
my_img = self.normalizer(my_img)
assert my_img is not None
if self.render_mode == 'r':
my_img[:,:,1:] = prev_layer[:,:,1:]
elif self.render_mode == 'o':
pass
else:
raise NotImplementedError()
return my_img
| bsd-3-clause |
singingwolfboy/oauthlib | oauthlib/oauth2/rfc6749/clients/mobile_application.py | 85 | 8793 | # -*- coding: utf-8 -*-
"""
oauthlib.oauth2.rfc6749
~~~~~~~~~~~~~~~~~~~~~~~
This module is an implementation of various logic needed
for consuming and providing OAuth 2.0 RFC6749.
"""
from __future__ import absolute_import, unicode_literals
from .base import Client
from ..parameters import prepare_grant_uri
from ..parameters import parse_implicit_response
class MobileApplicationClient(Client):
"""A public client utilizing the implicit code grant workflow.
A user-agent-based application is a public client in which the
client code is downloaded from a web server and executes within a
user-agent (e.g. web browser) on the device used by the resource
owner. Protocol data and credentials are easily accessible (and
often visible) to the resource owner. Since such applications
reside within the user-agent, they can make seamless use of the
user-agent capabilities when requesting authorization.
The implicit grant type is used to obtain access tokens (it does not
support the issuance of refresh tokens) and is optimized for public
clients known to operate a particular redirection URI. These clients
are typically implemented in a browser using a scripting language
such as JavaScript.
As a redirection-based flow, the client must be capable of
interacting with the resource owner's user-agent (typically a web
browser) and capable of receiving incoming requests (via redirection)
from the authorization server.
Unlike the authorization code grant type in which the client makes
separate requests for authorization and access token, the client
receives the access token as the result of the authorization request.
The implicit grant type does not include client authentication, and
relies on the presence of the resource owner and the registration of
the redirection URI. Because the access token is encoded into the
redirection URI, it may be exposed to the resource owner and other
applications residing on the same device.
"""
def prepare_request_uri(self, uri, redirect_uri=None, scope=None,
state=None, **kwargs):
"""Prepare the implicit grant request URI.
The client constructs the request URI by adding the following
parameters to the query component of the authorization endpoint URI
using the "application/x-www-form-urlencoded" format, per `Appendix B`_:
:param redirect_uri: OPTIONAL. The redirect URI must be an absolute URI
and it should have been registerd with the OAuth
provider prior to use. As described in `Section 3.1.2`_.
:param scope: OPTIONAL. The scope of the access request as described by
Section 3.3`_. These may be any string but are commonly
URIs or various categories such as ``videos`` or ``documents``.
:param state: RECOMMENDED. An opaque value used by the client to maintain
state between the request and callback. The authorization
server includes this value when redirecting the user-agent back
to the client. The parameter SHOULD be used for preventing
cross-site request forgery as described in `Section 10.12`_.
:param kwargs: Extra arguments to include in the request URI.
In addition to supplied parameters, OAuthLib will append the ``client_id``
that was provided in the constructor as well as the mandatory ``response_type``
argument, set to ``token``::
>>> from oauthlib.oauth2 import MobileApplicationClient
>>> client = MobileApplicationClient('your_id')
>>> client.prepare_request_uri('https://example.com')
'https://example.com?client_id=your_id&response_type=token'
>>> client.prepare_request_uri('https://example.com', redirect_uri='https://a.b/callback')
'https://example.com?client_id=your_id&response_type=token&redirect_uri=https%3A%2F%2Fa.b%2Fcallback'
>>> client.prepare_request_uri('https://example.com', scope=['profile', 'pictures'])
'https://example.com?client_id=your_id&response_type=token&scope=profile+pictures'
>>> client.prepare_request_uri('https://example.com', foo='bar')
'https://example.com?client_id=your_id&response_type=token&foo=bar'
.. _`Appendix B`: http://tools.ietf.org/html/rfc6749#appendix-B
.. _`Section 2.2`: http://tools.ietf.org/html/rfc6749#section-2.2
.. _`Section 3.1.2`: http://tools.ietf.org/html/rfc6749#section-3.1.2
.. _`Section 3.3`: http://tools.ietf.org/html/rfc6749#section-3.3
.. _`Section 10.12`: http://tools.ietf.org/html/rfc6749#section-10.12
"""
return prepare_grant_uri(uri, self.client_id, 'token',
redirect_uri=redirect_uri, state=state, scope=scope, **kwargs)
def parse_request_uri_response(self, uri, state=None, scope=None):
"""Parse the response URI fragment.
If the resource owner grants the access request, the authorization
server issues an access token and delivers it to the client by adding
the following parameters to the fragment component of the redirection
URI using the "application/x-www-form-urlencoded" format:
:param uri: The callback URI that resulted from the user being redirected
back from the provider to you, the client.
:param state: The state provided in the authorization request.
:param scope: The scopes provided in the authorization request.
:return: Dictionary of token parameters.
:raises: OAuth2Error if response is invalid.
A successful response should always contain
**access_token**
The access token issued by the authorization server. Often
a random string.
**token_type**
The type of the token issued as described in `Section 7.1`_.
Commonly ``Bearer``.
**state**
If you provided the state parameter in the authorization phase, then
the provider is required to include that exact state value in the
response.
While it is not mandated it is recommended that the provider include
**expires_in**
The lifetime in seconds of the access token. For
example, the value "3600" denotes that the access token will
expire in one hour from the time the response was generated.
If omitted, the authorization server SHOULD provide the
expiration time via other means or document the default value.
**scope**
Providers may supply this in all responses but are required to only
if it has changed since the authorization request.
A few example responses can be seen below::
>>> response_uri = 'https://example.com/callback#access_token=sdlfkj452&state=ss345asyht&token_type=Bearer&scope=hello+world'
>>> from oauthlib.oauth2 import MobileApplicationClient
>>> client = MobileApplicationClient('your_id')
>>> client.parse_request_uri_response(response_uri)
{
'access_token': 'sdlfkj452',
'token_type': 'Bearer',
'state': 'ss345asyht',
'scope': [u'hello', u'world']
}
>>> client.parse_request_uri_response(response_uri, state='other')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "oauthlib/oauth2/rfc6749/__init__.py", line 598, in parse_request_uri_response
**scope**
File "oauthlib/oauth2/rfc6749/parameters.py", line 197, in parse_implicit_response
raise ValueError("Mismatching or missing state in params.")
ValueError: Mismatching or missing state in params.
>>> def alert_scope_changed(message, old, new):
... print(message, old, new)
...
>>> oauthlib.signals.scope_changed.connect(alert_scope_changed)
>>> client.parse_request_body_response(response_body, scope=['other'])
('Scope has changed from "other" to "hello world".', ['other'], ['hello', 'world'])
.. _`Section 7.1`: http://tools.ietf.org/html/rfc6749#section-7.1
.. _`Section 3.3`: http://tools.ietf.org/html/rfc6749#section-3.3
"""
self.token = parse_implicit_response(uri, state=state, scope=scope)
self._populate_attributes(self.token)
return self.token
| bsd-3-clause |
yashsharan/sympy | sympy/sets/sets.py | 5 | 63689 | from __future__ import print_function, division
from itertools import product
from sympy.core.sympify import (_sympify, sympify, converter,
SympifyError)
from sympy.core.basic import Basic
from sympy.core.expr import Expr
from sympy.core.singleton import Singleton, S
from sympy.core.evalf import EvalfMixin
from sympy.core.numbers import Float
from sympy.core.compatibility import (iterable, with_metaclass,
ordered, range, PY3)
from sympy.core.evaluate import global_evaluate
from sympy.core.function import FunctionClass
from sympy.core.mul import Mul
from sympy.core.relational import Eq
from sympy.core.symbol import Symbol, Dummy
from sympy.sets.contains import Contains
from sympy.utilities.misc import func_name, filldedent
from mpmath import mpi, mpf
from sympy.logic.boolalg import And, Or, Not, true, false
from sympy.utilities import subsets
class Set(Basic):
"""
The base class for any kind of set.
This is not meant to be used directly as a container of items. It does not
behave like the builtin ``set``; see :class:`FiniteSet` for that.
Real intervals are represented by the :class:`Interval` class and unions of
sets by the :class:`Union` class. The empty set is represented by the
:class:`EmptySet` class and available as a singleton as ``S.EmptySet``.
"""
is_number = False
is_iterable = False
is_interval = False
is_FiniteSet = False
is_Interval = False
is_ProductSet = False
is_Union = False
is_Intersection = None
is_EmptySet = None
is_UniversalSet = None
is_Complement = None
is_ComplexRegion = False
@staticmethod
def _infimum_key(expr):
"""
Return infimum (if possible) else S.Infinity.
"""
try:
infimum = expr.inf
assert infimum.is_comparable
except (NotImplementedError,
AttributeError, AssertionError, ValueError):
infimum = S.Infinity
return infimum
def union(self, other):
"""
Returns the union of 'self' and 'other'.
Examples
========
As a shortcut it is possible to use the '+' operator:
>>> from sympy import Interval, FiniteSet
>>> Interval(0, 1).union(Interval(2, 3))
[0, 1] U [2, 3]
>>> Interval(0, 1) + Interval(2, 3)
[0, 1] U [2, 3]
>>> Interval(1, 2, True, True) + FiniteSet(2, 3)
(1, 2] U {3}
Similarly it is possible to use the '-' operator for set differences:
>>> Interval(0, 2) - Interval(0, 1)
(1, 2]
>>> Interval(1, 3) - FiniteSet(2)
[1, 2) U (2, 3]
"""
return Union(self, other)
def intersect(self, other):
"""
Returns the intersection of 'self' and 'other'.
>>> from sympy import Interval
>>> Interval(1, 3).intersect(Interval(1, 2))
[1, 2]
>>> from sympy import imageset, Lambda, symbols, S
>>> n, m = symbols('n m')
>>> a = imageset(Lambda(n, 2*n), S.Integers)
>>> a.intersect(imageset(Lambda(m, 2*m + 1), S.Integers))
EmptySet()
"""
return Intersection(self, other)
def intersection(self, other):
"""
Alias for :meth:`intersect()`
"""
return self.intersect(other)
def _intersect(self, other):
"""
This function should only be used internally
self._intersect(other) returns a new, intersected set if self knows how
to intersect itself with other, otherwise it returns ``None``
When making a new set class you can be assured that other will not
be a :class:`Union`, :class:`FiniteSet`, or :class:`EmptySet`
Used within the :class:`Intersection` class
"""
return None
def is_disjoint(self, other):
"""
Returns True if 'self' and 'other' are disjoint
Examples
========
>>> from sympy import Interval
>>> Interval(0, 2).is_disjoint(Interval(1, 2))
False
>>> Interval(0, 2).is_disjoint(Interval(3, 4))
True
References
==========
.. [1] http://en.wikipedia.org/wiki/Disjoint_sets
"""
return self.intersect(other) == S.EmptySet
def isdisjoint(self, other):
"""
Alias for :meth:`is_disjoint()`
"""
return self.is_disjoint(other)
def _union(self, other):
"""
This function should only be used internally
self._union(other) returns a new, joined set if self knows how
to join itself with other, otherwise it returns ``None``.
It may also return a python set of SymPy Sets if they are somehow
simpler. If it does this it must be idempotent i.e. the sets returned
must return ``None`` with _union'ed with each other
Used within the :class:`Union` class
"""
return None
def complement(self, universe):
"""
The complement of 'self' w.r.t the given the universe.
Examples
========
>>> from sympy import Interval, S
>>> Interval(0, 1).complement(S.Reals)
(-oo, 0) U (1, oo)
>>> Interval(0, 1).complement(S.UniversalSet)
UniversalSet() \ [0, 1]
"""
return Complement(universe, self)
def _complement(self, other):
# this behaves as other - self
if isinstance(other, ProductSet):
# For each set consider it or it's complement
# We need at least one of the sets to be complemented
# Consider all 2^n combinations.
# We can conveniently represent these options easily using a
# ProductSet
# XXX: this doesn't work if the dimentions of the sets isn't same.
# A - B is essentially same as A if B has a different
# dimentionality than A
switch_sets = ProductSet(FiniteSet(o, o - s) for s, o in
zip(self.sets, other.sets))
product_sets = (ProductSet(*set) for set in switch_sets)
# Union of all combinations but this one
return Union(p for p in product_sets if p != other)
elif isinstance(other, Interval):
if isinstance(self, Interval) or isinstance(self, FiniteSet):
return Intersection(other, self.complement(S.Reals))
elif isinstance(other, Union):
return Union(o - self for o in other.args)
elif isinstance(other, Complement):
return Complement(other.args[0], Union(other.args[1], self), evaluate=False)
elif isinstance(other, EmptySet):
return S.EmptySet
elif isinstance(other, FiniteSet):
return FiniteSet(*[el for el in other if self.contains(el) != True])
def symmetric_difference(self, other):
"""
Returns symmetric difference of `self` and `other`.
Examples
========
>>> from sympy import Interval, S
>>> Interval(1, 3).symmetric_difference(S.Reals)
(-oo, 1) U (3, oo)
>>> Interval(1, 10).symmetric_difference(S.Reals)
(-oo, 1) U (10, oo)
>>> from sympy import S, EmptySet
>>> S.Reals.symmetric_difference(EmptySet())
(-oo, oo)
References
==========
.. [1] https://en.wikipedia.org/wiki/Symmetric_difference
"""
return SymmetricDifference(self, other)
def _symmetric_difference(self, other):
return Union(Complement(self, other), Complement(other, self))
@property
def inf(self):
"""
The infimum of 'self'
Examples
========
>>> from sympy import Interval, Union
>>> Interval(0, 1).inf
0
>>> Union(Interval(0, 1), Interval(2, 3)).inf
0
"""
return self._inf
@property
def _inf(self):
raise NotImplementedError("(%s)._inf" % self)
@property
def sup(self):
"""
The supremum of 'self'
Examples
========
>>> from sympy import Interval, Union
>>> Interval(0, 1).sup
1
>>> Union(Interval(0, 1), Interval(2, 3)).sup
3
"""
return self._sup
@property
def _sup(self):
raise NotImplementedError("(%s)._sup" % self)
def contains(self, other):
"""
Returns True if 'other' is contained in 'self' as an element.
As a shortcut it is possible to use the 'in' operator:
Examples
========
>>> from sympy import Interval
>>> Interval(0, 1).contains(0.5)
True
>>> 0.5 in Interval(0, 1)
True
"""
other = sympify(other, strict=True)
ret = sympify(self._contains(other))
if ret is None:
ret = Contains(other, self, evaluate=False)
return ret
def _contains(self, other):
raise NotImplementedError("(%s)._contains(%s)" % (self, other))
def is_subset(self, other):
"""
Returns True if 'self' is a subset of 'other'.
Examples
========
>>> from sympy import Interval
>>> Interval(0, 0.5).is_subset(Interval(0, 1))
True
>>> Interval(0, 1).is_subset(Interval(0, 1, left_open=True))
False
"""
if isinstance(other, Set):
return self.intersect(other) == self
else:
raise ValueError("Unknown argument '%s'" % other)
def issubset(self, other):
"""
Alias for :meth:`is_subset()`
"""
return self.is_subset(other)
def is_proper_subset(self, other):
"""
Returns True if 'self' is a proper subset of 'other'.
Examples
========
>>> from sympy import Interval
>>> Interval(0, 0.5).is_proper_subset(Interval(0, 1))
True
>>> Interval(0, 1).is_proper_subset(Interval(0, 1))
False
"""
if isinstance(other, Set):
return self != other and self.is_subset(other)
else:
raise ValueError("Unknown argument '%s'" % other)
def is_superset(self, other):
"""
Returns True if 'self' is a superset of 'other'.
Examples
========
>>> from sympy import Interval
>>> Interval(0, 0.5).is_superset(Interval(0, 1))
False
>>> Interval(0, 1).is_superset(Interval(0, 1, left_open=True))
True
"""
if isinstance(other, Set):
return other.is_subset(self)
else:
raise ValueError("Unknown argument '%s'" % other)
def issuperset(self, other):
"""
Alias for :meth:`is_superset()`
"""
return self.is_superset(other)
def is_proper_superset(self, other):
"""
Returns True if 'self' is a proper superset of 'other'.
Examples
========
>>> from sympy import Interval
>>> Interval(0, 1).is_proper_superset(Interval(0, 0.5))
True
>>> Interval(0, 1).is_proper_superset(Interval(0, 1))
False
"""
if isinstance(other, Set):
return self != other and self.is_superset(other)
else:
raise ValueError("Unknown argument '%s'" % other)
def _eval_powerset(self):
raise NotImplementedError('Power set not defined for: %s' % self.func)
def powerset(self):
"""
Find the Power set of 'self'.
Examples
========
>>> from sympy import FiniteSet, EmptySet
>>> A = EmptySet()
>>> A.powerset()
{EmptySet()}
>>> A = FiniteSet(1, 2)
>>> a, b, c = FiniteSet(1), FiniteSet(2), FiniteSet(1, 2)
>>> A.powerset() == FiniteSet(a, b, c, EmptySet())
True
References
==========
.. [1] http://en.wikipedia.org/wiki/Power_set
"""
return self._eval_powerset()
@property
def measure(self):
"""
The (Lebesgue) measure of 'self'
Examples
========
>>> from sympy import Interval, Union
>>> Interval(0, 1).measure
1
>>> Union(Interval(0, 1), Interval(2, 3)).measure
2
"""
return self._measure
@property
def boundary(self):
"""
The boundary or frontier of a set
A point x is on the boundary of a set S if
1. x is in the closure of S.
I.e. Every neighborhood of x contains a point in S.
2. x is not in the interior of S.
I.e. There does not exist an open set centered on x contained
entirely within S.
There are the points on the outer rim of S. If S is open then these
points need not actually be contained within S.
For example, the boundary of an interval is its start and end points.
This is true regardless of whether or not the interval is open.
Examples
========
>>> from sympy import Interval
>>> Interval(0, 1).boundary
{0, 1}
>>> Interval(0, 1, True, False).boundary
{0, 1}
"""
return self._boundary
@property
def is_open(self):
"""
Property method to check whether a set is open.
A set is open if and only if it has an empty intersection with its
boundary.
Examples
========
>>> from sympy import S
>>> S.Reals.is_open
True
"""
if not Intersection(self, self.boundary):
return True
# We can't confidently claim that an intersection exists
return None
@property
def is_closed(self):
"""
A property method to check whether a set is closed. A set is closed
if it's complement is an open set.
Examples
========
>>> from sympy import Interval
>>> Interval(0, 1).is_closed
True
"""
return self.boundary.is_subset(self)
@property
def closure(self):
"""
Property method which returns the closure of a set.
The closure is defined as the union of the set itself and its
boundary.
Examples
========
>>> from sympy import S, Interval
>>> S.Reals.closure
(-oo, oo)
>>> Interval(0, 1).closure
[0, 1]
"""
return self + self.boundary
@property
def interior(self):
"""
Property method which returns the interior of a set.
The interior of a set S consists all points of S that do not
belong to the boundary of S.
Examples
========
>>> from sympy import Interval
>>> Interval(0, 1).interior
(0, 1)
>>> Interval(0, 1).boundary.interior
EmptySet()
"""
return self - self.boundary
@property
def _boundary(self):
raise NotImplementedError()
def _eval_imageset(self, f):
from sympy.sets.fancysets import ImageSet
return ImageSet(f, self)
@property
def _measure(self):
raise NotImplementedError("(%s)._measure" % self)
def __add__(self, other):
return self.union(other)
def __or__(self, other):
return self.union(other)
def __and__(self, other):
return self.intersect(other)
def __mul__(self, other):
return ProductSet(self, other)
def __xor__(self, other):
return SymmetricDifference(self, other)
def __pow__(self, exp):
if not sympify(exp).is_Integer and exp >= 0:
raise ValueError("%s: Exponent must be a positive Integer" % exp)
return ProductSet([self]*exp)
def __sub__(self, other):
return Complement(self, other)
def __contains__(self, other):
symb = sympify(self.contains(other))
if not (symb is S.true or symb is S.false):
raise TypeError('contains did not evaluate to a bool: %r' % symb)
return bool(symb)
class ProductSet(Set):
"""
Represents a Cartesian Product of Sets.
Returns a Cartesian product given several sets as either an iterable
or individual arguments.
Can use '*' operator on any sets for convenient shorthand.
Examples
========
>>> from sympy import Interval, FiniteSet, ProductSet
>>> I = Interval(0, 5); S = FiniteSet(1, 2, 3)
>>> ProductSet(I, S)
[0, 5] x {1, 2, 3}
>>> (2, 2) in ProductSet(I, S)
True
>>> Interval(0, 1) * Interval(0, 1) # The unit square
[0, 1] x [0, 1]
>>> coin = FiniteSet('H', 'T')
>>> set(coin**2)
{(H, H), (H, T), (T, H), (T, T)}
Notes
=====
- Passes most operations down to the argument sets
- Flattens Products of ProductSets
References
==========
.. [1] http://en.wikipedia.org/wiki/Cartesian_product
"""
is_ProductSet = True
def __new__(cls, *sets, **assumptions):
def flatten(arg):
if isinstance(arg, Set):
if arg.is_ProductSet:
return sum(map(flatten, arg.args), [])
else:
return [arg]
elif iterable(arg):
return sum(map(flatten, arg), [])
raise TypeError("Input must be Sets or iterables of Sets")
sets = flatten(list(sets))
if EmptySet() in sets or len(sets) == 0:
return EmptySet()
if len(sets) == 1:
return sets[0]
return Basic.__new__(cls, *sets, **assumptions)
def _eval_Eq(self, other):
if not other.is_ProductSet:
return
if len(self.args) != len(other.args):
return false
return And(*(Eq(x, y) for x, y in zip(self.args, other.args)))
def _contains(self, element):
"""
'in' operator for ProductSets
Examples
========
>>> from sympy import Interval
>>> (2, 3) in Interval(0, 5) * Interval(0, 5)
True
>>> (10, 10) in Interval(0, 5) * Interval(0, 5)
False
Passes operation on to constituent sets
"""
try:
if len(element) != len(self.args):
return false
except TypeError: # maybe element isn't an iterable
return false
return And(*
[set.contains(item) for set, item in zip(self.sets, element)])
def _intersect(self, other):
"""
This function should only be used internally
See Set._intersect for docstring
"""
if not other.is_ProductSet:
return None
if len(other.args) != len(self.args):
return S.EmptySet
return ProductSet(a.intersect(b)
for a, b in zip(self.sets, other.sets))
def _union(self, other):
if other.is_subset(self):
return self
if not other.is_ProductSet:
return None
if len(other.args) != len(self.args):
return None
if self.args[0] == other.args[0]:
return self.args[0] * Union(ProductSet(self.args[1:]),
ProductSet(other.args[1:]))
if self.args[-1] == other.args[-1]:
return Union(ProductSet(self.args[:-1]),
ProductSet(other.args[:-1])) * self.args[-1]
return None
@property
def sets(self):
return self.args
@property
def _boundary(self):
return Union(ProductSet(b + b.boundary if i != j else b.boundary
for j, b in enumerate(self.sets))
for i, a in enumerate(self.sets))
@property
def is_iterable(self):
return all(set.is_iterable for set in self.sets)
def __iter__(self):
if self.is_iterable:
return product(*self.sets)
else:
raise TypeError("Not all constituent sets are iterable")
@property
def _measure(self):
measure = 1
for set in self.sets:
measure *= set.measure
return measure
def __len__(self):
return Mul(*[len(s) for s in self.args])
def __bool__(self):
return all([bool(s) for s in self.args])
__nonzero__ = __bool__
class Interval(Set, EvalfMixin):
"""
Represents a real interval as a Set.
Usage:
Returns an interval with end points "start" and "end".
For left_open=True (default left_open is False) the interval
will be open on the left. Similarly, for right_open=True the interval
will be open on the right.
Examples
========
>>> from sympy import Symbol, Interval
>>> Interval(0, 1)
[0, 1]
>>> Interval(0, 1, False, True)
[0, 1)
>>> Interval.Ropen(0, 1)
[0, 1)
>>> Interval.Lopen(0, 1)
(0, 1]
>>> Interval.open(0, 1)
(0, 1)
>>> a = Symbol('a', real=True)
>>> Interval(0, a)
[0, a]
Notes
=====
- Only real end points are supported
- Interval(a, b) with a > b will return the empty set
- Use the evalf() method to turn an Interval into an mpmath
'mpi' interval instance
References
==========
.. [1] http://en.wikipedia.org/wiki/Interval_%28mathematics%29
"""
is_Interval = True
def __new__(cls, start, end, left_open=False, right_open=False):
start = _sympify(start)
end = _sympify(end)
left_open = _sympify(left_open)
right_open = _sympify(right_open)
if not all(isinstance(a, (type(true), type(false)))
for a in [left_open, right_open]):
raise NotImplementedError(
"left_open and right_open can have only true/false values, "
"got %s and %s" % (left_open, right_open))
inftys = [S.Infinity, S.NegativeInfinity]
# Only allow real intervals (use symbols with 'is_real=True').
if not all(i.is_real is not False or i in inftys for i in (start, end)):
raise ValueError("Non-real intervals are not supported")
# evaluate if possible
if (end < start) == True:
return S.EmptySet
elif (end - start).is_negative:
return S.EmptySet
if end == start and (left_open or right_open):
return S.EmptySet
if end == start and not (left_open or right_open):
if start == S.Infinity or start == S.NegativeInfinity:
return S.EmptySet
return FiniteSet(end)
# Make sure infinite interval end points are open.
if start == S.NegativeInfinity:
left_open = true
if end == S.Infinity:
right_open = true
return Basic.__new__(cls, start, end, left_open, right_open)
@property
def start(self):
"""
The left end point of 'self'.
This property takes the same value as the 'inf' property.
Examples
========
>>> from sympy import Interval
>>> Interval(0, 1).start
0
"""
return self._args[0]
_inf = left = start
@classmethod
def open(cls, a, b):
"""Return an interval including neither boundary."""
return cls(a, b, True, True)
@classmethod
def Lopen(cls, a, b):
"""Return an interval not including the left boundary."""
return cls(a, b, True, False)
@classmethod
def Ropen(cls, a, b):
"""Return an interval not including the right boundary."""
return cls(a, b, False, True)
@property
def end(self):
"""
The right end point of 'self'.
This property takes the same value as the 'sup' property.
Examples
========
>>> from sympy import Interval
>>> Interval(0, 1).end
1
"""
return self._args[1]
_sup = right = end
@property
def left_open(self):
"""
True if 'self' is left-open.
Examples
========
>>> from sympy import Interval
>>> Interval(0, 1, left_open=True).left_open
True
>>> Interval(0, 1, left_open=False).left_open
False
"""
return self._args[2]
@property
def right_open(self):
"""
True if 'self' is right-open.
Examples
========
>>> from sympy import Interval
>>> Interval(0, 1, right_open=True).right_open
True
>>> Interval(0, 1, right_open=False).right_open
False
"""
return self._args[3]
def _intersect(self, other):
"""
This function should only be used internally
See Set._intersect for docstring
"""
if other.is_EmptySet:
return other
# We only know how to intersect with other intervals
if not other.is_Interval:
return None
# handle (-oo, oo)
infty = S.NegativeInfinity, S.Infinity
if self == Interval(*infty):
l, r = self.left, self.right
if l.is_real or l in infty or r.is_real or r in infty:
return other
# We can't intersect [0,3] with [x,6] -- we don't know if x>0 or x<0
if not self._is_comparable(other):
return None
empty = False
if self.start <= other.end and other.start <= self.end:
# Get topology right.
if self.start < other.start:
start = other.start
left_open = other.left_open
elif self.start > other.start:
start = self.start
left_open = self.left_open
else:
start = self.start
left_open = self.left_open or other.left_open
if self.end < other.end:
end = self.end
right_open = self.right_open
elif self.end > other.end:
end = other.end
right_open = other.right_open
else:
end = self.end
right_open = self.right_open or other.right_open
if end - start == 0 and (left_open or right_open):
empty = True
else:
empty = True
if empty:
return S.EmptySet
return Interval(start, end, left_open, right_open)
def _complement(self, other):
if other == S.Reals:
a = Interval(S.NegativeInfinity, self.start,
True, not self.left_open)
b = Interval(self.end, S.Infinity, not self.right_open, True)
return Union(a, b)
if isinstance(other, FiniteSet):
nums = [m for m in other.args if m.is_number]
if nums == []:
return None
return Set._complement(self, other)
def _union(self, other):
"""
This function should only be used internally
See Set._union for docstring
"""
if other.is_UniversalSet:
return S.UniversalSet
if other.is_Interval and self._is_comparable(other):
from sympy.functions.elementary.miscellaneous import Min, Max
# Non-overlapping intervals
end = Min(self.end, other.end)
start = Max(self.start, other.start)
if (end < start or
(end == start and (end not in self and end not in other))):
return None
else:
start = Min(self.start, other.start)
end = Max(self.end, other.end)
left_open = ((self.start != start or self.left_open) and
(other.start != start or other.left_open))
right_open = ((self.end != end or self.right_open) and
(other.end != end or other.right_open))
return Interval(start, end, left_open, right_open)
# If I have open end points and these endpoints are contained in other.
# But only in case, when endpoints are finite. Because
# interval does not contain oo or -oo.
open_left_in_other_and_finite = (self.left_open and
sympify(other.contains(self.start)) is S.true and
self.start.is_finite)
open_right_in_other_and_finite = (self.right_open and
sympify(other.contains(self.end)) is S.true and
self.end.is_finite)
if open_left_in_other_and_finite or open_right_in_other_and_finite:
# Fill in my end points and return
open_left = self.left_open and self.start not in other
open_right = self.right_open and self.end not in other
new_self = Interval(self.start, self.end, open_left, open_right)
return set((new_self, other))
return None
@property
def _boundary(self):
finite_points = [p for p in (self.start, self.end)
if abs(p) != S.Infinity]
return FiniteSet(*finite_points)
def _contains(self, other):
if not isinstance(other, Expr) or (
other is S.Infinity or
other is S.NegativeInfinity or
other is S.NaN or
other is S.ComplexInfinity) or other.is_real is False:
return false
if self.start is S.NegativeInfinity and self.end is S.Infinity:
if not other.is_real is None:
return other.is_real
if self.left_open:
expr = other > self.start
else:
expr = other >= self.start
if self.right_open:
expr = And(expr, other < self.end)
else:
expr = And(expr, other <= self.end)
return _sympify(expr)
def _eval_imageset(self, f):
from sympy.functions.elementary.miscellaneous import Min, Max
from sympy.solvers.solveset import solveset
from sympy.core.function import diff, Lambda
from sympy.series import limit
from sympy.calculus.singularities import singularities
# TODO: handle functions with infinitely many solutions (eg, sin, tan)
# TODO: handle multivariate functions
expr = f.expr
if len(expr.free_symbols) > 1 or len(f.variables) != 1:
return
var = f.variables[0]
if expr.is_Piecewise:
result = S.EmptySet
domain_set = self
for (p_expr, p_cond) in expr.args:
if p_cond is true:
intrvl = domain_set
else:
intrvl = p_cond.as_set()
intrvl = Intersection(domain_set, intrvl)
if p_expr.is_Number:
image = FiniteSet(p_expr)
else:
image = imageset(Lambda(var, p_expr), intrvl)
result = Union(result, image)
# remove the part which has been `imaged`
domain_set = Complement(domain_set, intrvl)
if domain_set.is_EmptySet:
break
return result
if not self.start.is_comparable or not self.end.is_comparable:
return
try:
sing = [x for x in singularities(expr, var)
if x.is_real and x in self]
except NotImplementedError:
return
if self.left_open:
_start = limit(expr, var, self.start, dir="+")
elif self.start not in sing:
_start = f(self.start)
if self.right_open:
_end = limit(expr, var, self.end, dir="-")
elif self.end not in sing:
_end = f(self.end)
if len(sing) == 0:
solns = list(solveset(diff(expr, var), var))
extr = [_start, _end] + [f(x) for x in solns
if x.is_real and x in self]
start, end = Min(*extr), Max(*extr)
left_open, right_open = False, False
if _start <= _end:
# the minimum or maximum value can occur simultaneously
# on both the edge of the interval and in some interior
# point
if start == _start and start not in solns:
left_open = self.left_open
if end == _end and end not in solns:
right_open = self.right_open
else:
if start == _end and start not in solns:
left_open = self.right_open
if end == _start and end not in solns:
right_open = self.left_open
return Interval(start, end, left_open, right_open)
else:
return imageset(f, Interval(self.start, sing[0],
self.left_open, True)) + \
Union(*[imageset(f, Interval(sing[i], sing[i + 1], True, True))
for i in range(0, len(sing) - 1)]) + \
imageset(f, Interval(sing[-1], self.end, True, self.right_open))
@property
def _measure(self):
return self.end - self.start
def to_mpi(self, prec=53):
return mpi(mpf(self.start._eval_evalf(prec)),
mpf(self.end._eval_evalf(prec)))
def _eval_evalf(self, prec):
return Interval(self.left._eval_evalf(prec),
self.right._eval_evalf(prec),
left_open=self.left_open, right_open=self.right_open)
def _is_comparable(self, other):
is_comparable = self.start.is_comparable
is_comparable &= self.end.is_comparable
is_comparable &= other.start.is_comparable
is_comparable &= other.end.is_comparable
return is_comparable
@property
def is_left_unbounded(self):
"""Return ``True`` if the left endpoint is negative infinity. """
return self.left is S.NegativeInfinity or self.left == Float("-inf")
@property
def is_right_unbounded(self):
"""Return ``True`` if the right endpoint is positive infinity. """
return self.right is S.Infinity or self.right == Float("+inf")
def as_relational(self, x):
"""Rewrite an interval in terms of inequalities and logic operators."""
x = sympify(x)
if self.right_open:
right = x < self.end
else:
right = x <= self.end
if self.left_open:
left = self.start < x
else:
left = self.start <= x
return And(left, right)
def _eval_Eq(self, other):
if not other.is_Interval:
if (other.is_Union or other.is_Complement or
other.is_Intersection or other.is_ProductSet):
return
return false
return And(Eq(self.left, other.left),
Eq(self.right, other.right),
self.left_open == other.left_open,
self.right_open == other.right_open)
class Union(Set, EvalfMixin):
"""
Represents a union of sets as a :class:`Set`.
Examples
========
>>> from sympy import Union, Interval
>>> Union(Interval(1, 2), Interval(3, 4))
[1, 2] U [3, 4]
The Union constructor will always try to merge overlapping intervals,
if possible. For example:
>>> Union(Interval(1, 2), Interval(2, 3))
[1, 3]
See Also
========
Intersection
References
==========
.. [1] http://en.wikipedia.org/wiki/Union_%28set_theory%29
"""
is_Union = True
def __new__(cls, *args, **kwargs):
evaluate = kwargs.get('evaluate', global_evaluate[0])
# flatten inputs to merge intersections and iterables
args = list(args)
def flatten(arg):
if isinstance(arg, Set):
if arg.is_Union:
return sum(map(flatten, arg.args), [])
else:
return [arg]
if iterable(arg): # and not isinstance(arg, Set) (implicit)
return sum(map(flatten, arg), [])
raise TypeError("Input must be Sets or iterables of Sets")
args = flatten(args)
# Union of no sets is EmptySet
if len(args) == 0:
return S.EmptySet
# Reduce sets using known rules
if evaluate:
return Union.reduce(args)
args = list(ordered(args, Set._infimum_key))
return Basic.__new__(cls, *args)
@staticmethod
def reduce(args):
"""
Simplify a :class:`Union` using known rules
We first start with global rules like
'Merge all FiniteSets'
Then we iterate through all pairs and ask the constituent sets if they
can simplify themselves with any other constituent
"""
# ===== Global Rules =====
# Merge all finite sets
finite_sets = [x for x in args if x.is_FiniteSet]
if len(finite_sets) > 1:
a = (x for set in finite_sets for x in set)
finite_set = FiniteSet(*a)
args = [finite_set] + [x for x in args if not x.is_FiniteSet]
# ===== Pair-wise Rules =====
# Here we depend on rules built into the constituent sets
args = set(args)
new_args = True
while(new_args):
for s in args:
new_args = False
for t in args - set((s,)):
new_set = s._union(t)
# This returns None if s does not know how to intersect
# with t. Returns the newly intersected set otherwise
if new_set is not None:
if not isinstance(new_set, set):
new_set = set((new_set, ))
new_args = (args - set((s, t))).union(new_set)
break
if new_args:
args = new_args
break
if len(args) == 1:
return args.pop()
else:
return Union(args, evaluate=False)
def _complement(self, universe):
# DeMorgan's Law
return Intersection(s.complement(universe) for s in self.args)
@property
def _inf(self):
# We use Min so that sup is meaningful in combination with symbolic
# interval end points.
from sympy.functions.elementary.miscellaneous import Min
return Min(*[set.inf for set in self.args])
@property
def _sup(self):
# We use Max so that sup is meaningful in combination with symbolic
# end points.
from sympy.functions.elementary.miscellaneous import Max
return Max(*[set.sup for set in self.args])
def _contains(self, other):
return Or(*[set.contains(other) for set in self.args])
@property
def _measure(self):
# Measure of a union is the sum of the measures of the sets minus
# the sum of their pairwise intersections plus the sum of their
# triple-wise intersections minus ... etc...
# Sets is a collection of intersections and a set of elementary
# sets which made up those intersections (called "sos" for set of sets)
# An example element might of this list might be:
# ( {A,B,C}, A.intersect(B).intersect(C) )
# Start with just elementary sets ( ({A}, A), ({B}, B), ... )
# Then get and subtract ( ({A,B}, (A int B), ... ) while non-zero
sets = [(FiniteSet(s), s) for s in self.args]
measure = 0
parity = 1
while sets:
# Add up the measure of these sets and add or subtract it to total
measure += parity * sum(inter.measure for sos, inter in sets)
# For each intersection in sets, compute the intersection with every
# other set not already part of the intersection.
sets = ((sos + FiniteSet(newset), newset.intersect(intersection))
for sos, intersection in sets for newset in self.args
if newset not in sos)
# Clear out sets with no measure
sets = [(sos, inter) for sos, inter in sets if inter.measure != 0]
# Clear out duplicates
sos_list = []
sets_list = []
for set in sets:
if set[0] in sos_list:
continue
else:
sos_list.append(set[0])
sets_list.append(set)
sets = sets_list
# Flip Parity - next time subtract/add if we added/subtracted here
parity *= -1
return measure
@property
def _boundary(self):
def boundary_of_set(i):
""" The boundary of set i minus interior of all other sets """
b = self.args[i].boundary
for j, a in enumerate(self.args):
if j != i:
b = b - a.interior
return b
return Union(map(boundary_of_set, range(len(self.args))))
def _eval_imageset(self, f):
return Union(imageset(f, arg) for arg in self.args)
def as_relational(self, symbol):
"""Rewrite a Union in terms of equalities and logic operators. """
return Or(*[set.as_relational(symbol) for set in self.args])
@property
def is_iterable(self):
return all(arg.is_iterable for arg in self.args)
def _eval_evalf(self, prec):
try:
return Union(set._eval_evalf(prec) for set in self.args)
except Exception:
raise TypeError("Not all sets are evalf-able")
def __iter__(self):
import itertools
# roundrobin recipe taken from itertools documentation:
# https://docs.python.org/2/library/itertools.html#recipes
def roundrobin(*iterables):
"roundrobin('ABC', 'D', 'EF') --> A D E B F C"
# Recipe credited to George Sakkis
pending = len(iterables)
if PY3:
nexts = itertools.cycle(iter(it).__next__ for it in iterables)
else:
nexts = itertools.cycle(iter(it).next for it in iterables)
while pending:
try:
for next in nexts:
yield next()
except StopIteration:
pending -= 1
nexts = itertools.cycle(itertools.islice(nexts, pending))
if all(set.is_iterable for set in self.args):
return roundrobin(*(iter(arg) for arg in self.args))
else:
raise TypeError("Not all constituent sets are iterable")
class Intersection(Set):
"""
Represents an intersection of sets as a :class:`Set`.
Examples
========
>>> from sympy import Intersection, Interval
>>> Intersection(Interval(1, 3), Interval(2, 4))
[2, 3]
We often use the .intersect method
>>> Interval(1,3).intersect(Interval(2,4))
[2, 3]
See Also
========
Union
References
==========
.. [1] http://en.wikipedia.org/wiki/Intersection_%28set_theory%29
"""
is_Intersection = True
def __new__(cls, *args, **kwargs):
evaluate = kwargs.get('evaluate', global_evaluate[0])
# flatten inputs to merge intersections and iterables
args = list(args)
def flatten(arg):
if isinstance(arg, Set):
if arg.is_Intersection:
return sum(map(flatten, arg.args), [])
else:
return [arg]
if iterable(arg): # and not isinstance(arg, Set) (implicit)
return sum(map(flatten, arg), [])
raise TypeError("Input must be Sets or iterables of Sets")
args = flatten(args)
if len(args) == 0:
return S.EmptySet
# args can't be ordered for Partition see issue #9608
if 'Partition' not in [type(a).__name__ for a in args]:
args = list(ordered(args, Set._infimum_key))
# Reduce sets using known rules
if evaluate:
return Intersection.reduce(args)
return Basic.__new__(cls, *args)
@property
def is_iterable(self):
return any(arg.is_iterable for arg in self.args)
@property
def _inf(self):
raise NotImplementedError()
@property
def _sup(self):
raise NotImplementedError()
def _eval_imageset(self, f):
return Intersection(imageset(f, arg) for arg in self.args)
def _contains(self, other):
return And(*[set.contains(other) for set in self.args])
def __iter__(self):
no_iter = True
for s in self.args:
if s.is_iterable:
no_iter = False
other_sets = set(self.args) - set((s,))
other = Intersection(other_sets, evaluate=False)
for x in s:
c = sympify(other.contains(x))
if c is S.true:
yield x
elif c is S.false:
pass
else:
yield c
if no_iter:
raise ValueError("None of the constituent sets are iterable")
@staticmethod
def _handle_finite_sets(args):
from sympy.core.logic import fuzzy_and, fuzzy_bool
from sympy.core.compatibility import zip_longest
from sympy.utilities.iterables import sift
sifted = sift(args, lambda x: x.is_FiniteSet)
fs_args = sifted.pop(True, [])
if not fs_args:
return
s = fs_args[0]
fs_args = fs_args[1:]
other = sifted.pop(False, [])
res = []
unk = []
for x in s:
c = fuzzy_and(fuzzy_bool(o.contains(x))
for o in fs_args + other)
if c:
res.append(x)
elif c is None:
unk.append(x)
else:
pass # drop arg
res = FiniteSet(
*res, evaluate=False) if res else S.EmptySet
if unk:
symbolic_s_list = [x for x in s if x.has(Symbol)]
non_symbolic_s = s - FiniteSet(
*symbolic_s_list, evaluate=False)
while fs_args:
v = fs_args.pop()
if all(i == j for i, j in zip_longest(
symbolic_s_list,
(x for x in v if x.has(Symbol)))):
# all the symbolic elements of `v` are the same
# as in `s` so remove the non-symbol containing
# expressions from `unk`, since they cannot be
# contained
for x in non_symbolic_s:
if x in unk:
unk.remove(x)
else:
# if only a subset of elements in `s` are
# contained in `v` then remove them from `v`
# and add this as a new arg
contained = [x for x in symbolic_s_list
if sympify(v.contains(x)) is S.true]
if contained != symbolic_s_list:
other.append(
v - FiniteSet(
*contained, evaluate=False))
else:
pass # for coverage
other_sets = Intersection(*other)
if not other_sets:
return S.EmptySet # b/c we use evaluate=False below
res += Intersection(
FiniteSet(*unk),
other_sets, evaluate=False)
return res
@staticmethod
def reduce(args):
"""
Return a simplified intersection by applying rules.
We first start with global rules like
'if any empty sets, return empty set' and 'distribute unions'.
Then we iterate through all pairs and ask the constituent sets if they
can simplify themselves with any other constituent
"""
from sympy.simplify.simplify import clear_coefficients
# ===== Global Rules =====
# If any EmptySets return EmptySet
if any(s.is_EmptySet for s in args):
return S.EmptySet
# Handle Finite sets
rv = Intersection._handle_finite_sets(args)
if rv is not None:
return rv
# If any of the sets are unions, return a Union of Intersections
for s in args:
if s.is_Union:
other_sets = set(args) - set((s,))
if len(other_sets) > 0:
other = Intersection(other_sets)
return Union(Intersection(arg, other) for arg in s.args)
else:
return Union(arg for arg in s.args)
for s in args:
if s.is_Complement:
args.remove(s)
other_sets = args + [s.args[0]]
return Complement(Intersection(*other_sets), s.args[1])
# At this stage we are guaranteed not to have any
# EmptySets, FiniteSets, or Unions in the intersection
# ===== Pair-wise Rules =====
# Here we depend on rules built into the constituent sets
args = set(args)
new_args = True
while(new_args):
for s in args:
new_args = False
for t in args - set((s,)):
new_set = s._intersect(t)
# This returns None if s does not know how to intersect
# with t. Returns the newly intersected set otherwise
if new_set is not None:
new_args = (args - set((s, t))).union(set((new_set, )))
break
if new_args:
args = new_args
break
if len(args) == 1:
return args.pop()
else:
return Intersection(args, evaluate=False)
def as_relational(self, symbol):
"""Rewrite an Intersection in terms of equalities and logic operators"""
return And(*[set.as_relational(symbol) for set in self.args])
class Complement(Set, EvalfMixin):
"""Represents the set difference or relative complement of a set with
another set.
`A - B = \{x \in A| x \\notin B\}`
Examples
========
>>> from sympy import Complement, FiniteSet
>>> Complement(FiniteSet(0, 1, 2), FiniteSet(1))
{0, 2}
See Also
=========
Intersection, Union
References
==========
.. [1] http://mathworld.wolfram.com/ComplementSet.html
"""
is_Complement = True
def __new__(cls, a, b, evaluate=True):
if evaluate:
return Complement.reduce(a, b)
return Basic.__new__(cls, a, b)
@staticmethod
def reduce(A, B):
"""
Simplify a :class:`Complement`.
"""
if B == S.UniversalSet or A.is_subset(B):
return EmptySet()
if isinstance(B, Union):
return Intersection(s.complement(A) for s in B.args)
result = B._complement(A)
if result != None:
return result
else:
return Complement(A, B, evaluate=False)
def _contains(self, other):
A = self.args[0]
B = self.args[1]
return And(A.contains(other), Not(B.contains(other)))
class EmptySet(with_metaclass(Singleton, Set)):
"""
Represents the empty set. The empty set is available as a singleton
as S.EmptySet.
Examples
========
>>> from sympy import S, Interval
>>> S.EmptySet
EmptySet()
>>> Interval(1, 2).intersect(S.EmptySet)
EmptySet()
See Also
========
UniversalSet
References
==========
.. [1] http://en.wikipedia.org/wiki/Empty_set
"""
is_EmptySet = True
is_FiniteSet = True
def _intersect(self, other):
return S.EmptySet
@property
def _measure(self):
return 0
def _contains(self, other):
return false
def as_relational(self, symbol):
return false
def __len__(self):
return 0
def _union(self, other):
return other
def __iter__(self):
return iter([])
def _eval_imageset(self, f):
return self
def _eval_powerset(self):
return FiniteSet(self)
@property
def _boundary(self):
return self
def _complement(self, other):
return other
def _symmetric_difference(self, other):
return other
class UniversalSet(with_metaclass(Singleton, Set)):
"""
Represents the set of all things.
The universal set is available as a singleton as S.UniversalSet
Examples
========
>>> from sympy import S, Interval
>>> S.UniversalSet
UniversalSet()
>>> Interval(1, 2).intersect(S.UniversalSet)
[1, 2]
See Also
========
EmptySet
References
==========
.. [1] http://en.wikipedia.org/wiki/Universal_set
"""
is_UniversalSet = True
def _intersect(self, other):
return other
def _complement(self, other):
return S.EmptySet
def _symmetric_difference(self, other):
return other
@property
def _measure(self):
return S.Infinity
def _contains(self, other):
return true
def as_relational(self, symbol):
return true
def _union(self, other):
return self
@property
def _boundary(self):
return EmptySet()
class FiniteSet(Set, EvalfMixin):
"""
Represents a finite set of discrete numbers
Examples
========
>>> from sympy import FiniteSet
>>> FiniteSet(1, 2, 3, 4)
{1, 2, 3, 4}
>>> 3 in FiniteSet(1, 2, 3, 4)
True
>>> members = [1, 2, 3, 4]
>>> FiniteSet(*members)
{1, 2, 3, 4}
References
==========
.. [1] http://en.wikipedia.org/wiki/Finite_set
"""
is_FiniteSet = True
is_iterable = True
def __new__(cls, *args, **kwargs):
evaluate = kwargs.get('evaluate', global_evaluate[0])
if evaluate:
args = list(map(sympify, args))
if len(args) == 0:
return EmptySet()
else:
args = list(map(sympify, args))
args = list(ordered(frozenset(tuple(args)), Set._infimum_key))
obj = Basic.__new__(cls, *args)
obj._elements = frozenset(args)
return obj
def _eval_Eq(self, other):
if not other.is_FiniteSet:
if (other.is_Union or other.is_Complement or
other.is_Intersection or other.is_ProductSet):
return
return false
if len(self) != len(other):
return false
return And(*(Eq(x, y) for x, y in zip(self.args, other.args)))
def __iter__(self):
return iter(self.args)
def _intersect(self, other):
"""
This function should only be used internally
See Set._intersect for docstring
"""
if isinstance(other, self.__class__):
return self.__class__(*(self._elements & other._elements))
return self.__class__(*[el for el in self if el in other])
def _complement(self, other):
if isinstance(other, Interval):
nums = sorted(m for m in self.args if m.is_number)
if other == S.Reals and nums != []:
syms = [m for m in self.args if m.is_Symbol]
# Reals cannot contain elements other than numbers and symbols.
intervals = [] # Build up a list of intervals between the elements
intervals += [Interval(S.NegativeInfinity, nums[0], True, True)]
for a, b in zip(nums[:-1], nums[1:]):
intervals.append(Interval(a, b, True, True)) # both open
intervals.append(Interval(nums[-1], S.Infinity, True, True))
if syms != []:
return Complement(Union(intervals, evaluate=False),
FiniteSet(*syms), evaluate=False)
else:
return Union(intervals, evaluate=False)
elif nums == []:
return None
elif isinstance(other, FiniteSet):
unk = []
for i in self:
c = sympify(other.contains(i))
if c is not S.true and c is not S.false:
unk.append(i)
unk = FiniteSet(*unk)
if unk == self:
return
not_true = []
for i in other:
c = sympify(self.contains(i))
if c is not S.true:
not_true.append(i)
return Complement(FiniteSet(*not_true), unk)
return Set._complement(self, other)
def _union(self, other):
"""
This function should only be used internally
See Set._union for docstring
"""
if other.is_FiniteSet:
return FiniteSet(*(self._elements | other._elements))
# If other set contains one of my elements, remove it from myself
if any(sympify(other.contains(x)) is S.true for x in self):
return set((
FiniteSet(*[x for x in self
if other.contains(x) != True]), other))
return None
def _contains(self, other):
"""
Tests whether an element, other, is in the set.
Relies on Python's set class. This tests for object equality
All inputs are sympified
Examples
========
>>> from sympy import FiniteSet
>>> 1 in FiniteSet(1, 2)
True
>>> 5 in FiniteSet(1, 2)
False
"""
r = false
for e in self._elements:
t = Eq(e, other, evaluate=True)
if isinstance(t, Eq):
t = t.simplify()
if t == true:
return t
elif t != false:
r = None
return r
def _eval_imageset(self, f):
return FiniteSet(*map(f, self))
@property
def _boundary(self):
return self
@property
def _inf(self):
from sympy.functions.elementary.miscellaneous import Min
return Min(*self)
@property
def _sup(self):
from sympy.functions.elementary.miscellaneous import Max
return Max(*self)
@property
def measure(self):
return 0
def __len__(self):
return len(self.args)
def as_relational(self, symbol):
"""Rewrite a FiniteSet in terms of equalities and logic operators. """
from sympy.core.relational import Eq
return Or(*[Eq(symbol, elem) for elem in self])
def compare(self, other):
return (hash(self) - hash(other))
def _eval_evalf(self, prec):
return FiniteSet(*[elem._eval_evalf(prec) for elem in self])
def _hashable_content(self):
return (self._elements,)
@property
def _sorted_args(self):
return tuple(ordered(self.args, Set._infimum_key))
def _eval_powerset(self):
return self.func(*[self.func(*s) for s in subsets(self.args)])
def __ge__(self, other):
if not isinstance(other, Set):
raise TypeError("Invalid comparison of set with %s" % func_name(other))
return other.is_subset(self)
def __gt__(self, other):
if not isinstance(other, Set):
raise TypeError("Invalid comparison of set with %s" % func_name(other))
return self.is_proper_superset(other)
def __le__(self, other):
if not isinstance(other, Set):
raise TypeError("Invalid comparison of set with %s" % func_name(other))
return self.is_subset(other)
def __lt__(self, other):
if not isinstance(other, Set):
raise TypeError("Invalid comparison of set with %s" % func_name(other))
return self.is_proper_subset(other)
converter[set] = lambda x: FiniteSet(*x)
converter[frozenset] = lambda x: FiniteSet(*x)
class SymmetricDifference(Set):
"""Represents the set of elements which are in either of the
sets and not in their intersection.
Examples
========
>>> from sympy import SymmetricDifference, FiniteSet
>>> SymmetricDifference(FiniteSet(1, 2, 3), FiniteSet(3, 4, 5))
{1, 2, 4, 5}
See Also
========
Complement, Union
References
==========
.. [1] http://en.wikipedia.org/wiki/Symmetric_difference
"""
is_SymmetricDifference = True
def __new__(cls, a, b, evaluate=True):
if evaluate:
return SymmetricDifference.reduce(a, b)
return Basic.__new__(cls, a, b)
@staticmethod
def reduce(A, B):
result = B._symmetric_difference(A)
if result is not None:
return result
else:
return SymmetricDifference(A, B, evaluate=False)
def imageset(*args):
r"""
Return an image of the set under transformation ``f``.
If this function can't compute the image, it returns an
unevaluated ImageSet object.
.. math::
{ f(x) | x \in self }
Examples
========
>>> from sympy import S, Interval, Symbol, imageset, sin, Lambda
>>> from sympy.abc import x, y
>>> imageset(x, 2*x, Interval(0, 2))
[0, 4]
>>> imageset(lambda x: 2*x, Interval(0, 2))
[0, 4]
>>> imageset(Lambda(x, sin(x)), Interval(-2, 1))
ImageSet(Lambda(x, sin(x)), [-2, 1])
>>> imageset(sin, Interval(-2, 1))
ImageSet(Lambda(x, sin(x)), [-2, 1])
>>> imageset(lambda y: x + y, Interval(-2, 1))
ImageSet(Lambda(_x, _x + x), [-2, 1])
Expressions applied to the set of Integers are simplified
to show as few negatives as possible and linear expressions
are converted to a canonical form. If this is not desirable
then the unevaluated ImageSet should be used.
>>> imageset(x, -2*x + 5, S.Integers)
ImageSet(Lambda(x, 2*x + 1), Integers())
See Also
========
sympy.sets.fancysets.ImageSet
"""
from sympy.core import Lambda
from sympy.sets.fancysets import ImageSet
from sympy.geometry.util import _uniquely_named_symbol
if len(args) not in (2, 3):
raise ValueError('imageset expects 2 or 3 args, got: %s' % len(args))
set = args[-1]
if not isinstance(set, Set):
name = func_name(set)
raise ValueError(
'last argument should be a set, not %s' % name)
if len(args) == 3:
f = Lambda(*args[:2])
elif len(args) == 2:
f = args[0]
if isinstance(f, Lambda):
pass
elif (
isinstance(f, FunctionClass) # like cos
or func_name(f) == '<lambda>'
):
var = _uniquely_named_symbol(Symbol('x'), f(Dummy()))
expr = f(var)
f = Lambda(var, expr)
else:
raise TypeError(filldedent('''
expecting lambda, Lambda, or FunctionClass, not \'%s\'''' %
func_name(f)))
r = set._eval_imageset(f)
if isinstance(r, ImageSet):
f, set = r.args
if f.variables[0] == f.expr:
return set
if isinstance(set, ImageSet):
if len(set.lamda.variables) == 1 and len(f.variables) == 1:
return imageset(Lambda(set.lamda.variables[0],
f.expr.subs(f.variables[0], set.lamda.expr)),
set.base_set)
if r is not None:
return r
return ImageSet(f, set)
| bsd-3-clause |
apache/airflow | airflow/kubernetes/volume.py | 2 | 1156 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use :mod:`kubernetes.client.models.V1Volume`."""
# flake8: noqa
import warnings
with warnings.catch_warnings():
from airflow.providers.cncf.kubernetes.backcompat.volume import Volume
warnings.warn(
"This module is deprecated. Please use `kubernetes.client.models.V1Volume`.",
DeprecationWarning,
stacklevel=2,
)
| apache-2.0 |
FlipperPA/wagtail | wagtail/tests/testapp/migrations/0042_simplechildpage_simpleparentpage.py | 17 | 1179 | # Generated by Django 2.1.7 on 2019-03-15 10:39
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0041_group_collection_permissions_verbose_name_plural'),
('tests', '0041_secretpage'),
]
operations = [
migrations.CreateModel(
name='SimpleChildPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='SimpleParentPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
]
| bsd-3-clause |
Tesora-Release/tesora-horizon | openstack_dashboard/dashboards/admin/instances/views.py | 18 | 7798 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 OpenStack Foundation
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import OrderedDict
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import tables
from horizon.utils import memoized
from openstack_dashboard import api
from openstack_dashboard.dashboards.admin.instances \
import forms as project_forms
from openstack_dashboard.dashboards.admin.instances \
import tables as project_tables
from openstack_dashboard.dashboards.project.instances import views
from openstack_dashboard.dashboards.project.instances.workflows \
import update_instance
# re-use console from project.instances.views to make reflection work
def console(args, **kvargs):
return views.console(args, **kvargs)
# re-use vnc from project.instances.views to make reflection work
def vnc(args, **kvargs):
return views.vnc(args, **kvargs)
# re-use spice from project.instances.views to make reflection work
def spice(args, **kvargs):
return views.spice(args, **kvargs)
# re-use rdp from project.instances.views to make reflection work
def rdp(args, **kvargs):
return views.rdp(args, **kvargs)
class AdminUpdateView(views.UpdateView):
workflow_class = update_instance.AdminUpdateInstance
success_url = reverse_lazy("horizon:admin:instances:index")
class AdminIndexView(tables.DataTableView):
table_class = project_tables.AdminInstancesTable
template_name = 'admin/instances/index.html'
page_title = _("Instances")
def has_more_data(self, table):
return self._more
def get_data(self):
instances = []
marker = self.request.GET.get(
project_tables.AdminInstancesTable._meta.pagination_param, None)
search_opts = self.get_filters({'marker': marker, 'paginate': True})
# Gather our tenants to correlate against IDs
try:
tenants, has_more = api.keystone.tenant_list(self.request)
except Exception:
tenants = []
msg = _('Unable to retrieve instance project information.')
exceptions.handle(self.request, msg)
if 'project' in search_opts:
ten_filter_ids = [t.id for t in tenants
if t.name == search_opts['project']]
del search_opts['project']
if len(ten_filter_ids) > 0:
search_opts['tenant_id'] = ten_filter_ids[0]
else:
self._more = False
return []
try:
instances, self._more = api.nova.server_list(
self.request,
search_opts=search_opts,
all_tenants=True)
except Exception:
self._more = False
exceptions.handle(self.request,
_('Unable to retrieve instance list.'))
if instances:
try:
api.network.servers_update_addresses(self.request, instances,
all_tenants=True)
except Exception:
exceptions.handle(
self.request,
message=_('Unable to retrieve IP addresses from Neutron.'),
ignore=True)
# Gather our flavors to correlate against IDs
try:
flavors = api.nova.flavor_list(self.request)
except Exception:
# If fails to retrieve flavor list, creates an empty list.
flavors = []
full_flavors = OrderedDict([(f.id, f) for f in flavors])
tenant_dict = OrderedDict([(t.id, t) for t in tenants])
# Loop through instances to get flavor and tenant info.
for inst in instances:
flavor_id = inst.flavor["id"]
try:
if flavor_id in full_flavors:
inst.full_flavor = full_flavors[flavor_id]
else:
# If the flavor_id is not in full_flavors list,
# gets it via nova api.
inst.full_flavor = api.nova.flavor_get(
self.request, flavor_id)
except Exception:
msg = _('Unable to retrieve instance size information.')
exceptions.handle(self.request, msg)
tenant = tenant_dict.get(inst.tenant_id, None)
inst.tenant_name = getattr(tenant, "name", None)
return instances
def get_filters(self, filters):
filter_field = self.table.get_filter_field()
filter_action = self.table._meta._filter_action
if filter_action.is_api_filter(filter_field):
filter_string = self.table.get_filter_string()
if filter_field and filter_string:
filters[filter_field] = filter_string
return filters
class LiveMigrateView(forms.ModalFormView):
form_class = project_forms.LiveMigrateForm
template_name = 'admin/instances/live_migrate.html'
context_object_name = 'instance'
success_url = reverse_lazy("horizon:admin:instances:index")
page_title = _("Live Migrate")
def get_context_data(self, **kwargs):
context = super(LiveMigrateView, self).get_context_data(**kwargs)
context["instance_id"] = self.kwargs['instance_id']
return context
@memoized.memoized_method
def get_hosts(self, *args, **kwargs):
try:
return api.nova.host_list(self.request)
except Exception:
redirect = reverse("horizon:admin:instances:index")
msg = _('Unable to retrieve host information.')
exceptions.handle(self.request, msg, redirect=redirect)
@memoized.memoized_method
def get_object(self, *args, **kwargs):
instance_id = self.kwargs['instance_id']
try:
return api.nova.server_get(self.request, instance_id)
except Exception:
redirect = reverse("horizon:admin:instances:index")
msg = _('Unable to retrieve instance details.')
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
initial = super(LiveMigrateView, self).get_initial()
_object = self.get_object()
if _object:
current_host = getattr(_object, 'OS-EXT-SRV-ATTR:host', '')
initial.update({'instance_id': self.kwargs['instance_id'],
'current_host': current_host,
'hosts': self.get_hosts()})
return initial
class DetailView(views.DetailView):
redirect_url = 'horizon:admin:instances:index'
image_url = 'horizon:admin:images:detail'
volume_url = 'horizon:admin:volumes:volumes:detail'
def _get_actions(self, instance):
table = project_tables.AdminInstancesTable(self.request)
return table.render_row_actions(instance)
| apache-2.0 |
chain/chain | vendor/github.com/facebook/rocksdb/tools/write_stress_runner.py | 42 | 2296 | #! /usr/bin/env python
import subprocess
import argparse
import random
import time
import sys
def generate_runtimes(total_runtime):
# combination of short runtimes and long runtimes, with heavier
# weight on short runtimes
possible_runtimes_sec = range(1, 10) + range(1, 20) + [100, 1000]
runtimes = []
while total_runtime > 0:
chosen = random.choice(possible_runtimes_sec)
chosen = min(chosen, total_runtime)
runtimes.append(chosen)
total_runtime -= chosen
return runtimes
def main(args):
runtimes = generate_runtimes(int(args.runtime_sec))
print "Going to execute write stress for " + str(runtimes)
first_time = True
for runtime in runtimes:
kill = random.choice([False, True])
cmd = './write_stress --runtime_sec=' + \
("-1" if kill else str(runtime))
if len(args.db) > 0:
cmd = cmd + ' --db=' + args.db
if first_time:
first_time = False
else:
# use current db
cmd = cmd + ' --destroy_db=false'
if random.choice([False, True]):
cmd = cmd + ' --delete_obsolete_files_with_fullscan=true'
if random.choice([False, True]):
cmd = cmd + ' --low_open_files_mode=true'
print("Running write_stress for %d seconds (%s): %s" %
(runtime, ("kill-mode" if kill else "clean-shutdown-mode"),
cmd))
child = subprocess.Popen([cmd], shell=True)
killtime = time.time() + runtime
while not kill or time.time() < killtime:
time.sleep(1)
if child.poll() is not None:
if child.returncode == 0:
break
else:
print("ERROR: write_stress died with exitcode=%d\n"
% child.returncode)
sys.exit(1)
if kill:
child.kill()
# breathe
time.sleep(3)
if __name__ == '__main__':
random.seed(time.time())
parser = argparse.ArgumentParser(description="This script runs and kills \
write_stress multiple times")
parser.add_argument("--runtime_sec", default='1000')
parser.add_argument("--db", default='')
args = parser.parse_args()
main(args)
| agpl-3.0 |
NCI-Cloud/horizon | openstack_dashboard/dashboards/project/vpn/tests.py | 22 | 33688 | # Copyright 2013, Mirantis Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from mox import IsA # noqa
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django import http
from horizon.workflows import views
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
from openstack_dashboard.dashboards.project.vpn import workflows
class VPNTests(test.TestCase):
class AttributeDict(dict):
def __getattr__(self, attr):
return self[attr]
def __setattr__(self, attr, value):
self[attr] = value
DASHBOARD = 'project'
INDEX_URL = reverse_lazy('horizon:%s:vpn:index' % DASHBOARD)
ADDIKEPOLICY_PATH = 'horizon:%s:vpn:addikepolicy' % DASHBOARD
ADDIPSECPOLICY_PATH = 'horizon:%s:vpn:addipsecpolicy' % DASHBOARD
ADDVPNSERVICE_PATH = 'horizon:%s:vpn:addvpnservice' % DASHBOARD
ADDVPNCONNECTION_PATH = 'horizon:%s:vpn:addipsecsiteconnection' % DASHBOARD
IKEPOLICY_DETAIL_PATH = 'horizon:%s:vpn:ikepolicydetails' % DASHBOARD
IPSECPOLICY_DETAIL_PATH = 'horizon:%s:vpn:ipsecpolicydetails' % DASHBOARD
VPNSERVICE_DETAIL_PATH = 'horizon:%s:vpn:vpnservicedetails' % DASHBOARD
VPNCONNECTION_DETAIL_PATH = 'horizon:%s:vpn:ipsecsiteconnectiondetails' %\
DASHBOARD
UPDATEIKEPOLICY_PATH = 'horizon:%s:vpn:update_ikepolicy' % DASHBOARD
UPDATEIPSECPOLICY_PATH = 'horizon:%s:vpn:update_ipsecpolicy' % DASHBOARD
UPDATEVPNSERVICE_PATH = 'horizon:%s:vpn:update_vpnservice' % DASHBOARD
UPDATEVPNCONNECTION_PATH = 'horizon:%s:vpn:update_ipsecsiteconnection' %\
DASHBOARD
def set_up_expect(self):
# retrieves vpnservices
api.vpn.vpnservice_list(
IsA(http.HttpRequest), tenant_id=self.tenant.id) \
.AndReturn(self.vpnservices.list())
# retrieves ikepolicies
api.vpn.ikepolicy_list(
IsA(http.HttpRequest), tenant_id=self.tenant.id) \
.AndReturn(self.ikepolicies.list())
# retrieves ipsecpolicies
api.vpn.ipsecpolicy_list(
IsA(http.HttpRequest), tenant_id=self.tenant.id) \
.AndReturn(self.ipsecpolicies.list())
# retrieves ipsecsiteconnections
api.vpn.ipsecsiteconnection_list(
IsA(http.HttpRequest), tenant_id=self.tenant.id) \
.AndReturn(self.ipsecsiteconnections.list())
def set_up_expect_with_exception(self):
api.vpn.vpnservice_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id).AndRaise(self.exceptions.neutron)
api.vpn.ikepolicy_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id).AndRaise(self.exceptions.neutron)
api.vpn.ipsecpolicy_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id).AndRaise(self.exceptions.neutron)
api.vpn.ipsecsiteconnection_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id).AndRaise(self.exceptions.neutron)
@test.create_stubs({api.vpn: ('ikepolicy_list', 'ipsecpolicy_list',
'vpnservice_list',
'ipsecsiteconnection_list')})
def test_index_vpnservices(self):
self.set_up_expect()
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL)
self.assertTemplateUsed(res, '%s/vpn/index.html'
% self.DASHBOARD)
self.assertTemplateUsed(res, 'horizon/common/_detail_table.html')
self.assertEqual(len(res.context['table'].data),
len(self.vpnservices.list()))
@test.create_stubs({api.vpn: ('ikepolicy_list', 'ipsecpolicy_list',
'vpnservice_list',
'ipsecsiteconnection_list')})
def test_index_ikepolicies(self):
self.set_up_expect()
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL + '?tab=vpntabs__ikepolicies')
self.assertTemplateUsed(res, '%s/vpn/index.html'
% self.DASHBOARD)
self.assertTemplateUsed(res, 'horizon/common/_detail_table.html')
self.assertEqual(len(res.context['ikepoliciestable_table'].data),
len(self.ikepolicies.list()))
@test.create_stubs({api.vpn: ('ikepolicy_list', 'ipsecpolicy_list',
'vpnservice_list',
'ipsecsiteconnection_list')})
def test_index_ipsecpolicies(self):
self.set_up_expect()
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL + '?tab=vpntabs__ipsecpolicies')
self.assertTemplateUsed(res, '%s/vpn/index.html'
% self.DASHBOARD)
self.assertTemplateUsed(res, 'horizon/common/_detail_table.html')
self.assertEqual(len(res.context['ipsecpoliciestable_table'].data),
len(self.ipsecpolicies.list()))
@test.create_stubs({api.vpn: ('ikepolicy_list', 'ipsecpolicy_list',
'vpnservice_list',
'ipsecsiteconnection_list')})
def test_index_ipsecsiteconnections(self):
self.set_up_expect()
self.mox.ReplayAll()
res = self.client.get(
self.INDEX_URL + '?tab=vpntabs__ipsecsiteconnections')
self.assertTemplateUsed(res, '%s/vpn/index.html'
% self.DASHBOARD)
self.assertTemplateUsed(res, 'horizon/common/_detail_table.html')
self.assertEqual(
len(res.context['ipsecsiteconnectionstable_table'].data),
len(self.ipsecsiteconnections.list()))
@test.create_stubs({api.vpn: ('ikepolicy_list', 'ipsecpolicy_list',
'vpnservice_list',
'ipsecsiteconnection_list')})
def test_index_exception_vpnservices(self):
self.set_up_expect_with_exception()
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL)
self.assertTemplateUsed(res, '%s/vpn/index.html'
% self.DASHBOARD)
self.assertTemplateUsed(res,
'horizon/common/_detail_table.html')
self.assertEqual(len(res.context['table'].data), 0)
@test.create_stubs({api.vpn: ('ikepolicy_list', 'ipsecpolicy_list',
'vpnservice_list',
'ipsecsiteconnection_list')})
def test_index_exception_ikepolicies(self):
self.set_up_expect_with_exception()
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL + '?tab=vpntabs__ikepolicies')
self.assertTemplateUsed(res, '%s/vpn/index.html'
% self.DASHBOARD)
self.assertTemplateUsed(res,
'horizon/common/_detail_table.html')
self.assertEqual(len(res.context['table'].data), 0)
@test.create_stubs({api.vpn: ('ikepolicy_list', 'ipsecpolicy_list',
'vpnservice_list',
'ipsecsiteconnection_list')})
def test_index_exception_ipsecpolicies(self):
self.set_up_expect_with_exception()
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL + '?tab=vpntabs__ipsecpolicies')
self.assertTemplateUsed(res, '%s/vpn/index.html'
% self.DASHBOARD)
self.assertTemplateUsed(res,
'horizon/common/_detail_table.html')
self.assertEqual(len(res.context['table'].data), 0)
@test.create_stubs({api.vpn: ('ikepolicy_list', 'ipsecpolicy_list',
'vpnservice_list',
'ipsecsiteconnection_list')})
def test_index_exception_ipsecsiteconnections(self):
self.set_up_expect_with_exception()
self.mox.ReplayAll()
res = self.client.get(
self.INDEX_URL + '?tab=vpntabs__ipsecsiteconnections')
self.assertTemplateUsed(res, '%s/vpn/index.html'
% self.DASHBOARD)
self.assertTemplateUsed(res,
'horizon/common/_detail_table.html')
self.assertEqual(len(res.context['table'].data), 0)
@test.create_stubs({api.neutron: ('network_list_for_tenant',
'router_list')})
def test_add_vpnservice_get(self):
networks = [{'subnets': [self.subnets.first(), ]}, ]
routers = self.routers.list()
api.neutron.network_list_for_tenant(
IsA(http.HttpRequest), self.tenant.id).AndReturn(networks)
api.neutron.router_list(
IsA(http.HttpRequest), tenant_id=self.tenant.id).AndReturn(routers)
self.mox.ReplayAll()
res = self.client.get(reverse(self.ADDVPNSERVICE_PATH))
workflow = res.context['workflow']
self.assertTemplateUsed(res, views.WorkflowView.template_name)
self.assertEqual(workflow.name, workflows.AddVPNService.name)
expected_objs = ['<AddVPNServiceStep: addvpnserviceaction>', ]
self.assertQuerysetEqual(workflow.steps, expected_objs)
@test.create_stubs({api.neutron: ('router_list',
'network_list_for_tenant'),
api.vpn: ('vpnservice_create', )})
def test_add_vpnservice_post(self):
vpnservice = self.vpnservices.first()
networks = [{'subnets': [self.subnets.first(), ]}, ]
routers = self.routers.list()
api.neutron.network_list_for_tenant(
IsA(http.HttpRequest), self.tenant.id).AndReturn(networks)
api.neutron.router_list(
IsA(http.HttpRequest), tenant_id=self.tenant.id).AndReturn(routers)
form_data = {'name': vpnservice['name'],
'description': vpnservice['description'],
'subnet_id': vpnservice['subnet_id'],
'router_id': vpnservice['router_id'],
'admin_state_up': vpnservice['admin_state_up']}
api.vpn.vpnservice_create(
IsA(http.HttpRequest), **form_data).AndReturn(vpnservice)
self.mox.ReplayAll()
res = self.client.post(reverse(self.ADDVPNSERVICE_PATH), form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
@test.create_stubs({api.neutron: ('router_list',
'network_list_for_tenant')})
def test_add_vpnservice_post_error(self):
vpnservice = self.vpnservices.first()
networks = [{'subnets': [self.subnets.first(), ]}, ]
routers = self.routers.list()
api.neutron.network_list_for_tenant(
IsA(http.HttpRequest), self.tenant.id).AndReturn(networks)
api.neutron.router_list(
IsA(http.HttpRequest), tenant_id=self.tenant.id).AndReturn(routers)
self.mox.ReplayAll()
form_data = {'name': vpnservice['name'],
'description': vpnservice['description'],
'subnet_id': '',
'router_id': '',
'admin_state_up': vpnservice['admin_state_up']}
res = self.client.post(reverse(self.ADDVPNSERVICE_PATH), form_data)
self.assertFormErrors(res, 2)
def test_add_ikepolicy_get(self):
res = self.client.get(reverse(self.ADDIKEPOLICY_PATH))
workflow = res.context['workflow']
self.assertTemplateUsed(res, views.WorkflowView.template_name)
self.assertEqual(workflow.name, workflows.AddIKEPolicy.name)
expected_objs = ['<AddIKEPolicyStep: addikepolicyaction>', ]
self.assertQuerysetEqual(workflow.steps, expected_objs)
@test.create_stubs({api.vpn: ('ikepolicy_create', )})
def test_add_ikepolicy_post(self):
ikepolicy = self.ikepolicies.first()
form_data = {'name': ikepolicy['name'],
'description': ikepolicy['description'],
'auth_algorithm': ikepolicy['auth_algorithm'],
'encryption_algorithm': ikepolicy[
'encryption_algorithm'],
'ike_version': ikepolicy['ike_version'],
'lifetime_units': ikepolicy['lifetime']['units'],
'lifetime_value': ikepolicy['lifetime']['value'],
'phase1_negotiation_mode': ikepolicy[
'phase1_negotiation_mode'],
'pfs': ikepolicy['pfs']}
api.vpn.ikepolicy_create(
IsA(http.HttpRequest), **form_data).AndReturn(ikepolicy)
self.mox.ReplayAll()
res = self.client.post(reverse(self.ADDIKEPOLICY_PATH), form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
def test_add_ikepolicy_post_error(self):
ikepolicy = self.ikepolicies.first()
form_data = {'name': ikepolicy['name'],
'description': ikepolicy['description'],
'auth_algorithm': ikepolicy['auth_algorithm'],
'encryption_algorithm': ikepolicy[
'encryption_algorithm'],
'ike_version': ikepolicy['ike_version'],
'lifetime_units': ikepolicy['lifetime']['units'],
'lifetime_value': 10,
'phase1_negotiation_mode': ikepolicy[
'phase1_negotiation_mode'],
'pfs': ikepolicy['pfs']}
res = self.client.post(reverse(self.ADDIKEPOLICY_PATH), form_data)
self.assertFormErrors(res, 1)
def test_add_ipsecpolicy_get(self):
res = self.client.get(reverse(self.ADDIPSECPOLICY_PATH))
workflow = res.context['workflow']
self.assertTemplateUsed(res, views.WorkflowView.template_name)
self.assertEqual(workflow.name, workflows.AddIPSecPolicy.name)
expected_objs = ['<AddIPSecPolicyStep: addipsecpolicyaction>', ]
self.assertQuerysetEqual(workflow.steps, expected_objs)
@test.create_stubs({api.vpn: ('ipsecpolicy_create', )})
def test_add_ipsecpolicy_post(self):
ipsecpolicy = self.ipsecpolicies.first()
form_data = {'name': ipsecpolicy['name'],
'description': ipsecpolicy['description'],
'auth_algorithm': ipsecpolicy['auth_algorithm'],
'encryption_algorithm': ipsecpolicy[
'encryption_algorithm'],
'encapsulation_mode': ipsecpolicy[
'encapsulation_mode'],
'lifetime_units': ipsecpolicy['lifetime']['units'],
'lifetime_value': ipsecpolicy['lifetime']['value'],
'pfs': ipsecpolicy['pfs'],
'transform_protocol': ipsecpolicy[
'transform_protocol']}
api.vpn.ipsecpolicy_create(
IsA(http.HttpRequest), **form_data).AndReturn(ipsecpolicy)
self.mox.ReplayAll()
res = self.client.post(reverse(self.ADDIPSECPOLICY_PATH), form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
def test_add_ipsecpolicy_post_error(self):
ipsecpolicy = self.ipsecpolicies.first()
form_data = {'name': ipsecpolicy['name'],
'description': ipsecpolicy['description'],
'auth_algorithm': ipsecpolicy['auth_algorithm'],
'encryption_algorithm': ipsecpolicy[
'encryption_algorithm'],
'encapsulation_mode': ipsecpolicy[
'encapsulation_mode'],
'lifetime_units': ipsecpolicy['lifetime']['units'],
'lifetime_value': 10,
'pfs': ipsecpolicy['pfs'],
'transform_protocol': ipsecpolicy[
'transform_protocol']}
res = self.client.post(reverse(self.ADDIPSECPOLICY_PATH), form_data)
self.assertFormErrors(res, 1)
@test.create_stubs({api.vpn: ('ikepolicy_list', 'ipsecpolicy_list',
'vpnservice_list')})
def test_add_ipsecsiteconnection_get(self):
ikepolicies = self.ikepolicies.list()
ipsecpolicies = self.ipsecpolicies.list()
vpnservices = self.vpnservices.list()
api.vpn.ikepolicy_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id).AndReturn(ikepolicies)
api.vpn.ipsecpolicy_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id).AndReturn(ipsecpolicies)
api.vpn.vpnservice_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id).AndReturn(vpnservices)
self.mox.ReplayAll()
res = self.client.get(reverse(self.ADDVPNCONNECTION_PATH))
workflow = res.context['workflow']
self.assertTemplateUsed(res, views.WorkflowView.template_name)
self.assertEqual(workflow.name, workflows.AddIPSecSiteConnection.name)
expected_objs = ['<AddIPSecSiteConnectionStep: '
'addipsecsiteconnectionaction>',
'<AddIPSecSiteConnectionOptionalStep: '
'addipsecsiteconnectionoptionalaction>', ]
self.assertQuerysetEqual(workflow.steps, expected_objs)
@test.create_stubs({api.vpn: ('ikepolicy_list', 'ipsecpolicy_list',
'vpnservice_list',
'ipsecsiteconnection_create')})
def test_add_ipsecsiteconnection_post(self):
self._test_add_ipsecsiteconnection_post()
@test.create_stubs({api.vpn: ('ikepolicy_list', 'ipsecpolicy_list',
'vpnservice_list',
'ipsecsiteconnection_create')})
def test_add_ipsecsiteconnection_post_single_subnet(self):
self._test_add_ipsecsiteconnection_post(subnet_list=False)
def _test_add_ipsecsiteconnection_post(self, subnet_list=True):
if subnet_list:
ipsecsiteconnection = self.ipsecsiteconnections.first()
else:
ipsecsiteconnection = self.ipsecsiteconnections.list()[1]
ikepolicies = self.ikepolicies.list()
ipsecpolicies = self.ipsecpolicies.list()
vpnservices = self.vpnservices.list()
api.vpn.ikepolicy_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id).AndReturn(ikepolicies)
api.vpn.ipsecpolicy_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id).AndReturn(ipsecpolicies)
api.vpn.vpnservice_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id).AndReturn(vpnservices)
form_data = {'name': ipsecsiteconnection['name'],
'description': ipsecsiteconnection['description'],
'dpd_action': ipsecsiteconnection['dpd']['action'],
'dpd_interval': ipsecsiteconnection['dpd']['interval'],
'dpd_timeout': ipsecsiteconnection['dpd']['timeout'],
'ikepolicy_id': ipsecsiteconnection['ikepolicy_id'],
'initiator': ipsecsiteconnection['initiator'],
'ipsecpolicy_id': ipsecsiteconnection[
'ipsecpolicy_id'],
'mtu': ipsecsiteconnection['mtu'],
'peer_address': ipsecsiteconnection['peer_address'],
'peer_cidrs': ipsecsiteconnection['peer_cidrs'],
'peer_id': ipsecsiteconnection['peer_id'],
'psk': ipsecsiteconnection['psk'],
'vpnservice_id': ipsecsiteconnection['vpnservice_id'],
'admin_state_up': ipsecsiteconnection[
'admin_state_up']}
api.vpn.ipsecsiteconnection_create(
IsA(http.HttpRequest), **form_data).AndReturn(ipsecsiteconnection)
self.mox.ReplayAll()
res = self.client.post(reverse(self.ADDVPNCONNECTION_PATH), form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
@test.create_stubs({api.vpn: ('ikepolicy_list', 'ipsecpolicy_list',
'vpnservice_list',
'ipsecsiteconnection_create')})
def test_add_ipsecsiteconnection_post_required_fields_error(self):
self._test_add_ipsecsiteconnection_post_error()
@test.create_stubs({api.vpn: ('ikepolicy_list', 'ipsecpolicy_list',
'vpnservice_list',
'ipsecsiteconnection_create')})
def test_add_ipsecsiteconnection_post_peer_cidrs_error(self):
self._test_add_ipsecsiteconnection_post_error(subnets=True)
def _test_add_ipsecsiteconnection_post_error(self, subnets=False):
ipsecsiteconnection = self.ipsecsiteconnections.first()
ikepolicies = self.ikepolicies.list()
ipsecpolicies = self.ipsecpolicies.list()
vpnservices = self.vpnservices.list()
api.vpn.ikepolicy_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id).AndReturn(ikepolicies)
api.vpn.ipsecpolicy_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id).AndReturn(ipsecpolicies)
api.vpn.vpnservice_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id).AndReturn(vpnservices)
self.mox.ReplayAll()
form_data = {'name': '',
'description': ipsecsiteconnection['description'],
'dpd_action': ipsecsiteconnection['dpd']['action'],
'dpd_interval': ipsecsiteconnection['dpd']['interval'],
'dpd_timeout': ipsecsiteconnection['dpd']['timeout'],
'ikepolicy_id': '',
'initiator': ipsecsiteconnection['initiator'],
'ipsecpolicy_id': '',
'mtu': ipsecsiteconnection['mtu'],
'peer_address': '',
'peer_cidrs': '',
'peer_id': '',
'psk': '',
'vpnservice_id': '',
'admin_state_up': ipsecsiteconnection[
'admin_state_up']}
if subnets:
form_data['peer_cidrs'] = '20.1.0.0/24; 21.1.0.0/24'
res = self.client.post(reverse(self.ADDVPNCONNECTION_PATH), form_data)
self.assertFormErrors(res, 8)
@test.create_stubs({api.vpn: ('vpnservice_get', )})
def test_update_vpnservice_get(self):
vpnservice = self.vpnservices.first()
api.vpn.vpnservice_get(IsA(http.HttpRequest), vpnservice.id)\
.AndReturn(vpnservice)
self.mox.ReplayAll()
res = self.client.get(
reverse(self.UPDATEVPNSERVICE_PATH, args=(vpnservice.id,)))
self.assertTemplateUsed(
res, 'project/vpn/update_vpnservice.html')
@test.create_stubs({api.vpn: ('vpnservice_get', 'vpnservice_update')})
def test_update_vpnservice_post(self):
vpnservice = self.vpnservices.first()
api.vpn.vpnservice_get(IsA(http.HttpRequest), vpnservice.id)\
.AndReturn(vpnservice)
data = {'name': vpnservice.name,
'description': vpnservice.description,
'admin_state_up': vpnservice.admin_state_up}
api.vpn.vpnservice_update(IsA(http.HttpRequest), vpnservice.id,
vpnservice=data).AndReturn(vpnservice)
self.mox.ReplayAll()
form_data = data.copy()
form_data['vpnservice_id'] = vpnservice.id
res = self.client.post(reverse(
self.UPDATEVPNSERVICE_PATH, args=(vpnservice.id,)), form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
@test.create_stubs({api.vpn: ('ikepolicy_get', )})
def test_update_ikepolicy_get(self):
ikepolicy = self.ikepolicies.first()
api.vpn.ikepolicy_get(IsA(http.HttpRequest), ikepolicy.id)\
.AndReturn(ikepolicy)
self.mox.ReplayAll()
res = self.client.get(
reverse(self.UPDATEIKEPOLICY_PATH, args=(ikepolicy.id,)))
self.assertTemplateUsed(
res, 'project/vpn/update_ikepolicy.html')
@test.create_stubs({api.vpn: ('ikepolicy_get', 'ikepolicy_update')})
def test_update_ikepolicy_post(self):
ikepolicy = self.ikepolicies.first()
api.vpn.ikepolicy_get(IsA(http.HttpRequest), ikepolicy.id)\
.AndReturn(ikepolicy)
data = {'name': ikepolicy.name,
'description': ikepolicy.description,
'auth_algorithm': ikepolicy.auth_algorithm,
'encryption_algorithm': ikepolicy.encryption_algorithm,
'ike_version': ikepolicy.ike_version,
'lifetime': ikepolicy.lifetime,
'pfs': ikepolicy.pfs,
'phase1_negotiation_mode': ikepolicy.phase1_negotiation_mode}
api.vpn.ikepolicy_update(IsA(http.HttpRequest), ikepolicy.id,
ikepolicy=data).AndReturn(ikepolicy)
self.mox.ReplayAll()
form_data = data.copy()
form_data.update({'lifetime_units': form_data['lifetime']['units'],
'lifetime_value': form_data['lifetime']['value'],
'ikepolicy_id': ikepolicy.id})
form_data.pop('lifetime')
res = self.client.post(reverse(
self.UPDATEIKEPOLICY_PATH, args=(ikepolicy.id,)), form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
@test.create_stubs({api.vpn: ('ipsecpolicy_get', )})
def test_update_ipsecpolicy_get(self):
ipsecpolicy = self.ipsecpolicies.first()
api.vpn.ipsecpolicy_get(IsA(http.HttpRequest), ipsecpolicy.id)\
.AndReturn(ipsecpolicy)
self.mox.ReplayAll()
res = self.client.get(
reverse(self.UPDATEIPSECPOLICY_PATH, args=(ipsecpolicy.id,)))
self.assertTemplateUsed(
res, 'project/vpn/update_ipsecpolicy.html')
@test.create_stubs({api.vpn: ('ipsecpolicy_get', 'ipsecpolicy_update')})
def test_update_ipsecpolicy_post(self):
ipsecpolicy = self.ipsecpolicies.first()
api.vpn.ipsecpolicy_get(IsA(http.HttpRequest), ipsecpolicy.id)\
.AndReturn(ipsecpolicy)
data = {'name': ipsecpolicy.name,
'description': ipsecpolicy.description,
'auth_algorithm': ipsecpolicy.auth_algorithm,
'encapsulation_mode': ipsecpolicy.encapsulation_mode,
'encryption_algorithm': ipsecpolicy.encryption_algorithm,
'lifetime': ipsecpolicy.lifetime,
'pfs': ipsecpolicy.pfs,
'transform_protocol': ipsecpolicy.transform_protocol}
api.vpn.ipsecpolicy_update(IsA(http.HttpRequest), ipsecpolicy.id,
ipsecpolicy=data).AndReturn(ipsecpolicy)
self.mox.ReplayAll()
form_data = data.copy()
form_data.update({'lifetime_units': form_data['lifetime']['units'],
'lifetime_value': form_data['lifetime']['value'],
'ipsecpolicy_id': ipsecpolicy.id})
form_data.pop('lifetime')
res = self.client.post(reverse(
self.UPDATEIPSECPOLICY_PATH, args=(ipsecpolicy.id,)), form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
@test.create_stubs({api.vpn: ('ipsecsiteconnection_get', )})
def test_update_ipsecsiteconnection_get(self):
ipsecsiteconnection = self.ipsecsiteconnections.first()
api.vpn.ipsecsiteconnection_get(
IsA(http.HttpRequest), ipsecsiteconnection.id)\
.AndReturn(ipsecsiteconnection)
self.mox.ReplayAll()
res = self.client.get(
reverse(self.UPDATEVPNCONNECTION_PATH,
args=(ipsecsiteconnection.id,)))
self.assertTemplateUsed(
res, 'project/vpn/update_ipsecsiteconnection.html')
@test.create_stubs({api.vpn: ('ipsecsiteconnection_get',
'ipsecsiteconnection_update')})
def test_update_ipsecsiteconnection_post(self):
ipsecsiteconnection = self.ipsecsiteconnections.first()
api.vpn.ipsecsiteconnection_get(
IsA(http.HttpRequest), ipsecsiteconnection.id)\
.AndReturn(ipsecsiteconnection)
data = {'name': ipsecsiteconnection.name,
'description': ipsecsiteconnection.description,
'peer_address': ipsecsiteconnection.peer_address,
'peer_id': ipsecsiteconnection.peer_id,
'peer_cidrs': ipsecsiteconnection.peer_cidrs,
'psk': ipsecsiteconnection.psk,
'mtu': ipsecsiteconnection.mtu,
'dpd': ipsecsiteconnection.dpd,
'initiator': ipsecsiteconnection.initiator,
'admin_state_up': ipsecsiteconnection.admin_state_up}
api.vpn.ipsecsiteconnection_update(
IsA(http.HttpRequest), ipsecsiteconnection.id,
ipsec_site_connection=data).AndReturn(ipsecsiteconnection)
self.mox.ReplayAll()
form_data = data.copy()
form_data.update({
'dpd_action': form_data['dpd']['action'],
'dpd_interval': form_data['dpd']['interval'],
'dpd_timeout': form_data['dpd']['timeout'],
'peer_cidrs': ", ".join(ipsecsiteconnection['peer_cidrs']),
'ipsecsiteconnection_id': ipsecsiteconnection.id,
})
form_data.pop('dpd')
res = self.client.post(
reverse(self.UPDATEVPNCONNECTION_PATH,
args=(ipsecsiteconnection.id,)), form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
@test.create_stubs({api.vpn: ('ikepolicy_list', 'ipsecpolicy_list',
'vpnservice_list',
'ipsecsiteconnection_list',
'vpnservice_delete',)})
def test_delete_vpnservice(self):
self.set_up_expect()
vpnservice = self.vpnservices.first()
api.vpn.vpnservice_delete(IsA(http.HttpRequest), vpnservice.id)
self.mox.ReplayAll()
form_data = {"action":
"vpnservicestable__deletevpnservice__%s" % vpnservice.id}
res = self.client.post(self.INDEX_URL, form_data)
self.assertNoFormErrors(res)
@test.create_stubs({api.vpn: ('ikepolicy_list', 'ipsecpolicy_list',
'vpnservice_list',
'ipsecsiteconnection_list',
'ikepolicy_delete',)})
def test_delete_ikepolicy(self):
self.set_up_expect()
ikepolicy = self.ikepolicies.first()
api.vpn.ikepolicy_delete(IsA(http.HttpRequest), ikepolicy.id)
self.mox.ReplayAll()
form_data = {"action":
"ikepoliciestable__deleteikepolicy__%s" % ikepolicy.id}
res = self.client.post(self.INDEX_URL, form_data)
self.assertNoFormErrors(res)
@test.create_stubs({api.vpn: ('ikepolicy_list', 'ipsecpolicy_list',
'vpnservice_list',
'ipsecsiteconnection_list',
'ipsecpolicy_delete',)})
def test_delete_ipsecpolicy(self):
self.set_up_expect()
ipsecpolicy = self.ipsecpolicies.first()
api.vpn.ipsecpolicy_delete(IsA(http.HttpRequest), ipsecpolicy.id)
self.mox.ReplayAll()
form_data = {"action":
"ipsecpoliciestable__deleteipsecpolicy__%s"
% ipsecpolicy.id}
res = self.client.post(self.INDEX_URL, form_data)
self.assertNoFormErrors(res)
@test.create_stubs({api.vpn: ('ikepolicy_list', 'ipsecpolicy_list',
'vpnservice_list',
'ipsecsiteconnection_list',
'ipsecsiteconnection_delete',)})
def test_delete_ipsecsiteconnection(self):
self.set_up_expect()
ipsecsiteconnection = self.ipsecsiteconnections.first()
api.vpn.ipsecsiteconnection_delete(
IsA(http.HttpRequest), ipsecsiteconnection.id)
self.mox.ReplayAll()
form_data = {"action":
"ipsecsiteconnectionstable__deleteipsecsiteconnection__%s"
% ipsecsiteconnection.id}
res = self.client.post(self.INDEX_URL, form_data)
self.assertNoFormErrors(res)
| apache-2.0 |
dharmabumstead/ansible | lib/ansible/module_utils/six/__init__.py | 60 | 31144 | # This code is strewn with things that are not defined on Python3 (unicode,
# long, etc) but they are all shielded by version checks. This is also an
# upstream vendored file that we're not going to modify on our own
# pylint: disable=undefined-variable
# Copyright (c) 2010-2017 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Utilities for writing code that runs on Python 2 and 3"""
from __future__ import absolute_import
import functools
import itertools
import operator
import sys
import types
__author__ = "Benjamin Peterson <[email protected]>"
__version__ = "1.11.0"
# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
PY34 = sys.version_info[0:2] >= (3, 4)
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result) # Invokes __set__.
try:
# This is a bit ugly, but it avoids running this again by
# removing this descriptor.
delattr(obj.__class__, self.name)
except AttributeError:
pass
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
def __getattr__(self, attr):
_module = self._resolve()
value = getattr(_module, attr)
setattr(self, attr, value)
return value
class _LazyModule(types.ModuleType):
def __init__(self, name):
super(_LazyModule, self).__init__(name)
self.__doc__ = self.__class__.__doc__
def __dir__(self):
attrs = ["__doc__", "__name__"]
attrs += [attr.name for attr in self._moved_attributes]
return attrs
# Subclasses should override this
_moved_attributes = []
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _SixMetaPathImporter(object):
"""
A meta path importer to import six.moves and its submodules.
This class implements a PEP302 finder and loader. It should be compatible
with Python 2.5 and all existing versions of Python3
"""
def __init__(self, six_module_name):
self.name = six_module_name
self.known_modules = {}
def _add_module(self, mod, *fullnames):
for fullname in fullnames:
self.known_modules[self.name + "." + fullname] = mod
def _get_module(self, fullname):
return self.known_modules[self.name + "." + fullname]
def find_module(self, fullname, path=None):
if fullname in self.known_modules:
return self
return None
def __get_module(self, fullname):
try:
return self.known_modules[fullname]
except KeyError:
raise ImportError("This loader does not know module " + fullname)
def load_module(self, fullname):
try:
# in case of a reload
return sys.modules[fullname]
except KeyError:
pass
mod = self.__get_module(fullname)
if isinstance(mod, MovedModule):
mod = mod._resolve()
else:
mod.__loader__ = self
sys.modules[fullname] = mod
return mod
def is_package(self, fullname):
"""
Return true, if the named module is a package.
We need this method to get correct spec objects with
Python 3.4 (see PEP451)
"""
return hasattr(self.__get_module(fullname), "__path__")
def get_code(self, fullname):
"""Return None
Required, if is_package is implemented"""
self.__get_module(fullname) # eventually raises ImportError
return None
get_source = get_code # same as get_code
_importer = _SixMetaPathImporter(__name__)
class _MovedItems(_LazyModule):
"""Lazy loading of moved objects"""
__path__ = [] # mark as package
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("intern", "__builtin__", "sys"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
MovedAttribute("getoutput", "commands", "subprocess"),
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("UserDict", "UserDict", "collections"),
MovedAttribute("UserList", "UserList", "collections"),
MovedAttribute("UserString", "UserString", "collections"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("email_mime_image", "email.MIMEImage", "email.mime.image"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("_thread", "thread", "_thread"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
]
# Add windows specific modules.
if sys.platform == "win32":
_moved_attributes += [
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
if isinstance(attr, MovedModule):
_importer._add_module(attr, "moves." + attr.name)
del attr
_MovedItems._moved_attributes = _moved_attributes
moves = _MovedItems(__name__ + ".moves")
_importer._add_module(moves, "moves")
class Module_six_moves_urllib_parse(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_parse"""
_urllib_parse_moved_attributes = [
MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
MovedAttribute("urljoin", "urlparse", "urllib.parse"),
MovedAttribute("urlparse", "urlparse", "urllib.parse"),
MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
MovedAttribute("quote", "urllib", "urllib.parse"),
MovedAttribute("quote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote", "urllib", "urllib.parse"),
MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote_to_bytes", "urllib", "urllib.parse", "unquote", "unquote_to_bytes"),
MovedAttribute("urlencode", "urllib", "urllib.parse"),
MovedAttribute("splitquery", "urllib", "urllib.parse"),
MovedAttribute("splittag", "urllib", "urllib.parse"),
MovedAttribute("splituser", "urllib", "urllib.parse"),
MovedAttribute("splitvalue", "urllib", "urllib.parse"),
MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
MovedAttribute("uses_params", "urlparse", "urllib.parse"),
MovedAttribute("uses_query", "urlparse", "urllib.parse"),
MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
]
for attr in _urllib_parse_moved_attributes:
setattr(Module_six_moves_urllib_parse, attr.name, attr)
del attr
Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
"moves.urllib_parse", "moves.urllib.parse")
class Module_six_moves_urllib_error(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_error"""
_urllib_error_moved_attributes = [
MovedAttribute("URLError", "urllib2", "urllib.error"),
MovedAttribute("HTTPError", "urllib2", "urllib.error"),
MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
]
for attr in _urllib_error_moved_attributes:
setattr(Module_six_moves_urllib_error, attr.name, attr)
del attr
Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
"moves.urllib_error", "moves.urllib.error")
class Module_six_moves_urllib_request(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_request"""
_urllib_request_moved_attributes = [
MovedAttribute("urlopen", "urllib2", "urllib.request"),
MovedAttribute("install_opener", "urllib2", "urllib.request"),
MovedAttribute("build_opener", "urllib2", "urllib.request"),
MovedAttribute("pathname2url", "urllib", "urllib.request"),
MovedAttribute("url2pathname", "urllib", "urllib.request"),
MovedAttribute("getproxies", "urllib", "urllib.request"),
MovedAttribute("Request", "urllib2", "urllib.request"),
MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
MovedAttribute("FileHandler", "urllib2", "urllib.request"),
MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
MovedAttribute("urlretrieve", "urllib", "urllib.request"),
MovedAttribute("urlcleanup", "urllib", "urllib.request"),
MovedAttribute("URLopener", "urllib", "urllib.request"),
MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
MovedAttribute("parse_http_list", "urllib2", "urllib.request"),
MovedAttribute("parse_keqv_list", "urllib2", "urllib.request"),
]
for attr in _urllib_request_moved_attributes:
setattr(Module_six_moves_urllib_request, attr.name, attr)
del attr
Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
"moves.urllib_request", "moves.urllib.request")
class Module_six_moves_urllib_response(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_response"""
_urllib_response_moved_attributes = [
MovedAttribute("addbase", "urllib", "urllib.response"),
MovedAttribute("addclosehook", "urllib", "urllib.response"),
MovedAttribute("addinfo", "urllib", "urllib.response"),
MovedAttribute("addinfourl", "urllib", "urllib.response"),
]
for attr in _urllib_response_moved_attributes:
setattr(Module_six_moves_urllib_response, attr.name, attr)
del attr
Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
"moves.urllib_response", "moves.urllib.response")
class Module_six_moves_urllib_robotparser(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_robotparser"""
_urllib_robotparser_moved_attributes = [
MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
]
for attr in _urllib_robotparser_moved_attributes:
setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
del attr
Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
"moves.urllib_robotparser", "moves.urllib.robotparser")
class Module_six_moves_urllib(types.ModuleType):
"""Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
__path__ = [] # mark as package
parse = _importer._get_module("moves.urllib_parse")
error = _importer._get_module("moves.urllib_error")
request = _importer._get_module("moves.urllib_request")
response = _importer._get_module("moves.urllib_response")
robotparser = _importer._get_module("moves.urllib_robotparser")
def __dir__(self):
return ['parse', 'error', 'request', 'response', 'robotparser']
_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
"moves.urllib")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_closure = "__closure__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_func_globals = "__globals__"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_closure = "func_closure"
_func_code = "func_code"
_func_defaults = "func_defaults"
_func_globals = "func_globals"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
if PY3:
def get_unbound_function(unbound):
return unbound
create_bound_method = types.MethodType
def create_unbound_method(func, cls):
return func
Iterator = object
else:
def get_unbound_function(unbound):
return unbound.im_func
def create_bound_method(func, obj):
return types.MethodType(func, obj, obj.__class__)
def create_unbound_method(func, cls):
return types.MethodType(func, None, cls)
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_closure = operator.attrgetter(_func_closure)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
get_function_globals = operator.attrgetter(_func_globals)
if PY3:
def iterkeys(d, **kw):
return iter(d.keys(**kw))
def itervalues(d, **kw):
return iter(d.values(**kw))
def iteritems(d, **kw):
return iter(d.items(**kw))
def iterlists(d, **kw):
return iter(d.lists(**kw))
viewkeys = operator.methodcaller("keys")
viewvalues = operator.methodcaller("values")
viewitems = operator.methodcaller("items")
else:
def iterkeys(d, **kw):
return d.iterkeys(**kw)
def itervalues(d, **kw):
return d.itervalues(**kw)
def iteritems(d, **kw):
return d.iteritems(**kw)
def iterlists(d, **kw):
return d.iterlists(**kw)
viewkeys = operator.methodcaller("viewkeys")
viewvalues = operator.methodcaller("viewvalues")
viewitems = operator.methodcaller("viewitems")
_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
_add_doc(iteritems,
"Return an iterator over the (key, value) pairs of a dictionary.")
_add_doc(iterlists,
"Return an iterator over the (key, [values]) pairs of a dictionary.")
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
unichr = chr
import struct
int2byte = struct.Struct(">B").pack
del struct
byte2int = operator.itemgetter(0)
indexbytes = operator.getitem
iterbytes = iter
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
_assertCountEqual = "assertCountEqual"
if sys.version_info[1] <= 1:
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
else:
_assertRaisesRegex = "assertRaisesRegex"
_assertRegex = "assertRegex"
else:
def b(s):
return s
# Workaround for standalone backslash
def u(s):
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
unichr = unichr
int2byte = chr
def byte2int(bs):
return ord(bs[0])
def indexbytes(buf, i):
return ord(buf[i])
iterbytes = functools.partial(itertools.imap, ord)
import StringIO
StringIO = BytesIO = StringIO.StringIO
_assertCountEqual = "assertItemsEqual"
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
def assertCountEqual(self, *args, **kwargs):
return getattr(self, _assertCountEqual)(*args, **kwargs)
def assertRaisesRegex(self, *args, **kwargs):
return getattr(self, _assertRaisesRegex)(*args, **kwargs)
def assertRegex(self, *args, **kwargs):
return getattr(self, _assertRegex)(*args, **kwargs)
if PY3:
exec_ = getattr(moves.builtins, "exec")
def reraise(tp, value, tb=None):
try:
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
finally:
value = None
tb = None
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
try:
raise tp, value, tb
finally:
tb = None
""")
if sys.version_info[:2] == (3, 2):
exec_("""def raise_from(value, from_value):
try:
if from_value is None:
raise value
raise value from from_value
finally:
value = None
""")
elif sys.version_info[:2] > (3, 2):
exec_("""def raise_from(value, from_value):
try:
raise value from from_value
finally:
value = None
""")
else:
def raise_from(value, from_value):
raise value
print_ = getattr(moves.builtins, "print", None)
if print_ is None:
def print_(*args, **kwargs):
"""The new-style print function for Python 2.4 and 2.5."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
# If the file has an encoding, encode unicode with it.
if (isinstance(fp, file) and
isinstance(data, unicode) and
fp.encoding is not None):
errors = getattr(fp, "errors", None)
if errors is None:
errors = "strict"
data = data.encode(fp.encoding, errors)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
if sys.version_info[:2] < (3, 3):
_print = print_
def print_(*args, **kwargs):
fp = kwargs.get("file", sys.stdout)
flush = kwargs.pop("flush", False)
_print(*args, **kwargs)
if flush and fp is not None:
fp.flush()
_add_doc(reraise, """Reraise an exception.""")
if sys.version_info[0:2] < (3, 4):
def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
def wrapper(f):
f = functools.wraps(wrapped, assigned, updated)(f)
f.__wrapped__ = wrapped
return f
return wrapper
else:
wraps = functools.wraps
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(type):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
@classmethod
def __prepare__(cls, name, this_bases):
return meta.__prepare__(name, bases)
return type.__new__(metaclass, 'temporary_class', (), {})
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
def python_2_unicode_compatible(klass):
"""
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
if PY2:
if '__str__' not in klass.__dict__:
raise ValueError("@python_2_unicode_compatible cannot be applied "
"to %s because it doesn't define __str__()." %
klass.__name__)
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
# Complete the moves implementation.
# This code is at the end of this module to speed up module loading.
# Turn this module into a package.
__path__ = [] # required for PEP 302 and PEP 451
__package__ = __name__ # see PEP 366 @ReservedAssignment
if globals().get("__spec__") is not None:
__spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
# Remove other six meta path importers, since they cause problems. This can
# happen if six is removed from sys.modules and then reloaded. (Setuptools does
# this for some reason.)
if sys.meta_path:
for i, importer in enumerate(sys.meta_path):
# Here's some real nastiness: Another "instance" of the six module might
# be floating around. Therefore, we can't use isinstance() to check for
# the six meta path importer, since the other six instance will have
# inserted an importer with different class.
if (type(importer).__name__ == "_SixMetaPathImporter" and
importer.name == __name__):
del sys.meta_path[i]
break
del i, importer
# Finally, add the importer to the meta path import hook.
sys.meta_path.append(_importer)
| gpl-3.0 |
marianosimone/listal-widgets | listal.py | 1 | 2180 | from bs4 import BeautifulSoup
from urllib.request import urlopen
import re
_image_finder = re.compile(r"img src='(.*)'")
_statuses = {
'Yes': 'used',
'Using': 'using',
'No': 'unused',
'used': 'Yes',
'using': 'Using',
'unused': 'No',
'': 'unused'
}
_status = {
'books': {
'wanted': ('wanted',),
'owned': ('owned',),
'used': ('read',),
'all': ('owned', 'wanted', 'read')
},
'movies': {
'wanted': ('wanted',),
'used': ('watched',),
'all': ('wanted', 'watched')
}
}
def guess_attributes(item):
return {
'title': item.find('title').text,
'image': _image_finder.search(item.find('description').text).groups()[0],
'status': _statuses[item.find('listal:used').text],
'since': item.find('pubdate').text,
'url': item.find('link').text,
}
def _get_items(url):
data = BeautifulSoup(urlopen(url).read(), "html.parser")
items = data.rss.find_all('item')
return [guess_attributes(item) for item in items]
def _get_data(user, collection, ownership, status):
base_url = 'http://%s.listal.com/rss/%s/%s/1?sortby=dateadded-desc' # TODO: Handle paging
items = []
used_query = ('&used=%s' % _statuses[status] ) if status != 'all' else ''
for s in _status[collection][ownership]:
items.extend(_get_items((base_url % (user, collection, s)) + used_query))
return items
def reading(user):
return _get_data(user, 'books', 'all', 'using')
def read(user):
return _get_data(user, 'books', 'used', 'all')
def _extract_list_element_data(row):
image, content = row.find_all('div', recursive=False)
first, second = content.find_all('div', recursive=False)
return {
'image': image.find('img')['src'],
'title': first.find('a').text,
'url': first.find('a')['href'],
'comment': second.find('div').decode_contents(formatter='html')
}
def list_details(name):
data = BeautifulSoup(urlopen('http://www.listal.com/list/%s' % name).read())
rows = data.body.find_all(attrs={'class': 'notesrow'})
return [_extract_list_element_data(row) for row in rows]
| unlicense |
adminneyk/codificacionproyectando | application/views/Generacion/Generacion/lib/openoffice/openoffice.org/basis3.4/program/python-core-2.6.1/lib/lib-tk/ScrolledText.py | 20 | 1638 | # A ScrolledText widget feels like a text widget but also has a
# vertical scroll bar on its right. (Later, options may be added to
# add a horizontal bar as well, to make the bars disappear
# automatically when not needed, to move them to the other side of the
# window, etc.)
#
# Configuration options are passed to the Text widget.
# A Frame widget is inserted between the master and the text, to hold
# the Scrollbar widget.
# Most methods calls are inherited from the Text widget; Pack methods
# are redirected to the Frame widget however.
from Tkinter import *
from Tkinter import _cnfmerge
class ScrolledText(Text):
def __init__(self, master=None, cnf=None, **kw):
if cnf is None:
cnf = {}
if kw:
cnf = _cnfmerge((cnf, kw))
fcnf = {}
for k in cnf.keys():
if type(k) == ClassType or k == 'name':
fcnf[k] = cnf[k]
del cnf[k]
self.frame = Frame(master, **fcnf)
self.vbar = Scrollbar(self.frame, name='vbar')
self.vbar.pack(side=RIGHT, fill=Y)
cnf['name'] = 'text'
Text.__init__(self, self.frame, **cnf)
self.pack(side=LEFT, fill=BOTH, expand=1)
self['yscrollcommand'] = self.vbar.set
self.vbar['command'] = self.yview
# Copy geometry methods of self.frame -- hack!
methods = Pack.__dict__.keys()
methods = methods + Grid.__dict__.keys()
methods = methods + Place.__dict__.keys()
for m in methods:
if m[0] != '_' and m != 'config' and m != 'configure':
setattr(self, m, getattr(self.frame, m))
| mit |
StructuralNeurobiologyLab/SyConnFS | syconnfs/representations/connectivity_helper.py | 1 | 4439 | import numpy as np
import super_segmentation as ss
import segmentation
import connectivity
def extract_connectivity_thread(args):
sj_obj_ids = args[0]
sj_version = args[1]
ssd_version = args[2]
working_dir = args[3]
ssd = ss.SuperSegmentationDataset(working_dir,
version=ssd_version)
sd = segmentation.SegmentationDataset("sj",
version=sj_version,
working_dir=working_dir)
cons = []
for sj_obj_id in sj_obj_ids:
sj = sd.get_segmentation_object(sj_obj_id)
con = extract_connectivity_information(sj, ssd)
if con is not None:
if len(cons) == 0:
cons = con
else:
cons = np.concatenate((cons, con))
return cons
def sv_id_to_partner_ids_vec(cs_ids):
sv_ids = np.right_shift(cs_ids, 32)
sv_ids = np.concatenate((sv_ids[:, None],
(cs_ids - np.left_shift(sv_ids, 32))[:, None]),
axis=1)
return sv_ids
def extract_connectivity_information(sj, ssd):
sj.load_attr_dict()
if not "connectivity" in sj.attr_dict:
return
ss_con_ids = ssd.id_changer[np.array(sj.attr_dict["connectivity"].keys(),
dtype=np.int)]
if len(ss_con_ids) == 0:
return
con_cnts = np.array(sj.attr_dict["connectivity"].values(), dtype=np.int)
# Removing intracellular sjs
ss_con_cnts = con_cnts[ss_con_ids[:, 0] != ss_con_ids[:, 1]]
if len(ss_con_cnts) == 0:
return
ss_con_ids = ss_con_ids[ss_con_ids[:, 0] != ss_con_ids[:, 1]]
# Adding the counts up
cs_ids = np.left_shift(np.max(ss_con_ids, axis=1), 32) + \
np.min(ss_con_ids, axis=1)
unique_cs_ids, idx = np.unique(cs_ids, return_inverse=True)
cs_con_cnts = np.bincount(idx, ss_con_cnts)
cs_con_cnts = cs_con_cnts / np.sum(cs_con_cnts)
# Going back to ssd domain
sso_ids = np.right_shift(unique_cs_ids, 32)
sso_ids = np.concatenate((sso_ids[:, None],
(unique_cs_ids -
np.left_shift(sso_ids, 32))[:, None]), axis=1)
# Threshold overlap
sso_ids = sso_ids[cs_con_cnts > .3]
if len(sso_ids) == 0:
return
cs_con_cnts = cs_con_cnts[cs_con_cnts > .3]
cs_con_cnts /= np.sum(cs_con_cnts)
sizes = sj.size * cs_con_cnts * np.product(sj.scaling) / 1e9
sj_ids = np.array([sj.id] * len(sizes))
sj_types = np.array([sj.attr_dict["type_ratio"]] * len(sizes))
sj_coords = np.array([sj.rep_coord] * len(sizes))
return np.concatenate([sso_ids, sj_ids[:, None], sizes[:, None], sj_types[:, None], sj_coords], axis=1)
def get_sso_specific_info_thread(args):
sso_ids = args[0]
sj_version = args[1]
ssd_version = args[2]
working_dir = args[3]
version = args[4]
ssd = ss.SuperSegmentationDataset(working_dir,
version=ssd_version)
cm = connectivity.ConnectivityMatrix(working_dir,
version=version,
sj_version=sj_version,
create=False)
axoness_entries = []
cell_types = {}
blacklist = []
shapes = {}
for sso_id in sso_ids:
print sso_id
sso = ssd.get_super_segmentation_object(sso_id)
if not sso.load_skeleton():
blacklist.append(sso_id)
continue
if "axoness" not in sso.skeleton:
blacklist.append(sso_id)
continue
if sso.cell_type is None:
blacklist.append(sso_id)
continue
con_mask, pos = np.where(cm.connectivity[:, :2] == sso_id)
sj_coords = cm.connectivity[con_mask, -3:]
sj_axoness = sso.axoness_for_coords(sj_coords)
con_ax = np.concatenate([con_mask[:, None], pos[:, None],
sj_axoness[:, None]], axis=1)
if len(axoness_entries) == 0:
axoness_entries = con_ax
else:
axoness_entries = np.concatenate((axoness_entries, con_ax))
cell_types[sso_id] = sso.cell_type
shapes[sso_id] = sso.shape
axoness_entries = np.array(axoness_entries, dtype=np.int)
return axoness_entries, cell_types, shapes, blacklist
| gpl-2.0 |
pjh5/nml502_spring2016 | porter_stemmer.py | 3 | 12210 | #!/usr/bin/env python
"""Porter Stemming Algorithm
This is the Porter stemming algorithm, ported to Python from the
version coded up in ANSI C by the author. It may be be regarded
as canonical, in that it follows the algorithm presented in
Porter, 1980, An algorithm for suffix stripping, Program, Vol. 14,
no. 3, pp 130-137,
only differing from it at the points maked --DEPARTURE-- below.
See also http://www.tartarus.org/~martin/PorterStemmer
The algorithm as described in the paper could be exactly replicated
by adjusting the points of DEPARTURE, but this is barely necessary,
because (a) the points of DEPARTURE are definitely improvements, and
(b) no encoding of the Porter stemmer I have seen is anything like
as exact as this version, even with the points of DEPARTURE!
Vivake Gupta ([email protected])
Release 1: January 2001
Further adjustments by Santiago Bruno ([email protected])
to allow word input not restricted to one word per line, leading
to:
release 2: July 2008
"""
import sys
class PorterStemmer:
def __init__(self):
"""The main part of the stemming algorithm starts here.
b is a buffer holding a word to be stemmed. The letters are in b[k0],
b[k0+1] ... ending at b[k]. In fact k0 = 0 in this demo program. k is
readjusted downwards as the stemming progresses. Zero termination is
not in fact used in the algorithm.
Note that only lower case sequences are stemmed. Forcing to lower case
should be done before stem(...) is called.
"""
self.b = "" # buffer for word to be stemmed
self.k = 0
self.k0 = 0
self.j = 0 # j is a general offset into the string
def cons(self, i):
"""cons(i) is TRUE <=> b[i] is a consonant."""
if self.b[i] == 'a' or self.b[i] == 'e' or self.b[i] == 'i' or self.b[i] == 'o' or self.b[i] == 'u':
return 0
if self.b[i] == 'y':
if i == self.k0:
return 1
else:
return (not self.cons(i - 1))
return 1
def m(self):
"""m() measures the number of consonant sequences between k0 and j.
if c is a consonant sequence and v a vowel sequence, and <..>
indicates arbitrary presence,
<c><v> gives 0
<c>vc<v> gives 1
<c>vcvc<v> gives 2
<c>vcvcvc<v> gives 3
....
"""
n = 0
i = self.k0
while 1:
if i > self.j:
return n
if not self.cons(i):
break
i = i + 1
i = i + 1
while 1:
while 1:
if i > self.j:
return n
if self.cons(i):
break
i = i + 1
i = i + 1
n = n + 1
while 1:
if i > self.j:
return n
if not self.cons(i):
break
i = i + 1
i = i + 1
def vowelinstem(self):
"""vowelinstem() is TRUE <=> k0,...j contains a vowel"""
for i in range(self.k0, self.j + 1):
if not self.cons(i):
return 1
return 0
def doublec(self, j):
"""doublec(j) is TRUE <=> j,(j-1) contain a double consonant."""
if j < (self.k0 + 1):
return 0
if (self.b[j] != self.b[j-1]):
return 0
return self.cons(j)
def cvc(self, i):
"""cvc(i) is TRUE <=> i-2,i-1,i has the form consonant - vowel - consonant
and also if the second c is not w,x or y. this is used when trying to
restore an e at the end of a short e.g.
cav(e), lov(e), hop(e), crim(e), but
snow, box, tray.
"""
if i < (self.k0 + 2) or not self.cons(i) or self.cons(i-1) or not self.cons(i-2):
return 0
ch = self.b[i]
if ch == 'w' or ch == 'x' or ch == 'y':
return 0
return 1
def ends(self, s):
"""ends(s) is TRUE <=> k0,...k ends with the string s."""
length = len(s)
if s[length - 1] != self.b[self.k]: # tiny speed-up
return 0
if length > (self.k - self.k0 + 1):
return 0
if self.b[self.k-length+1:self.k+1] != s:
return 0
self.j = self.k - length
return 1
def setto(self, s):
"""setto(s) sets (j+1),...k to the characters in the string s, readjusting k."""
length = len(s)
self.b = self.b[:self.j+1] + s + self.b[self.j+length+1:]
self.k = self.j + length
def r(self, s):
"""r(s) is used further down."""
if self.m() > 0:
self.setto(s)
def step1ab(self):
"""step1ab() gets rid of plurals and -ed or -ing. e.g.
caresses -> caress
ponies -> poni
ties -> ti
caress -> caress
cats -> cat
feed -> feed
agreed -> agree
disabled -> disable
matting -> mat
mating -> mate
meeting -> meet
milling -> mill
messing -> mess
meetings -> meet
"""
if self.b[self.k] == 's':
if self.ends("sses"):
self.k = self.k - 2
elif self.ends("ies"):
self.setto("i")
elif self.b[self.k - 1] != 's':
self.k = self.k - 1
if self.ends("eed"):
if self.m() > 0:
self.k = self.k - 1
elif (self.ends("ed") or self.ends("ing")) and self.vowelinstem():
self.k = self.j
if self.ends("at"): self.setto("ate")
elif self.ends("bl"): self.setto("ble")
elif self.ends("iz"): self.setto("ize")
elif self.doublec(self.k):
self.k = self.k - 1
ch = self.b[self.k]
if ch == 'l' or ch == 's' or ch == 'z':
self.k = self.k + 1
elif (self.m() == 1 and self.cvc(self.k)):
self.setto("e")
def step1c(self):
"""step1c() turns terminal y to i when there is another vowel in the stem."""
if (self.ends("y") and self.vowelinstem()):
self.b = self.b[:self.k] + 'i' + self.b[self.k+1:]
def step2(self):
"""step2() maps double suffices to single ones.
so -ization ( = -ize plus -ation) maps to -ize etc. note that the
string before the suffix must give m() > 0.
"""
if self.b[self.k - 1] == 'a':
if self.ends("ational"): self.r("ate")
elif self.ends("tional"): self.r("tion")
elif self.b[self.k - 1] == 'c':
if self.ends("enci"): self.r("ence")
elif self.ends("anci"): self.r("ance")
elif self.b[self.k - 1] == 'e':
if self.ends("izer"): self.r("ize")
elif self.b[self.k - 1] == 'l':
if self.ends("bli"): self.r("ble") # --DEPARTURE--
# To match the published algorithm, replace this phrase with
# if self.ends("abli"): self.r("able")
elif self.ends("alli"): self.r("al")
elif self.ends("entli"): self.r("ent")
elif self.ends("eli"): self.r("e")
elif self.ends("ousli"): self.r("ous")
elif self.b[self.k - 1] == 'o':
if self.ends("ization"): self.r("ize")
elif self.ends("ation"): self.r("ate")
elif self.ends("ator"): self.r("ate")
elif self.b[self.k - 1] == 's':
if self.ends("alism"): self.r("al")
elif self.ends("iveness"): self.r("ive")
elif self.ends("fulness"): self.r("ful")
elif self.ends("ousness"): self.r("ous")
elif self.b[self.k - 1] == 't':
if self.ends("aliti"): self.r("al")
elif self.ends("iviti"): self.r("ive")
elif self.ends("biliti"): self.r("ble")
elif self.b[self.k - 1] == 'g': # --DEPARTURE--
if self.ends("logi"): self.r("log")
# To match the published algorithm, delete this phrase
def step3(self):
"""step3() dels with -ic-, -full, -ness etc. similar strategy to step2."""
if self.b[self.k] == 'e':
if self.ends("icate"): self.r("ic")
elif self.ends("ative"): self.r("")
elif self.ends("alize"): self.r("al")
elif self.b[self.k] == 'i':
if self.ends("iciti"): self.r("ic")
elif self.b[self.k] == 'l':
if self.ends("ical"): self.r("ic")
elif self.ends("ful"): self.r("")
elif self.b[self.k] == 's':
if self.ends("ness"): self.r("")
def step4(self):
"""step4() takes off -ant, -ence etc., in context <c>vcvc<v>."""
if self.b[self.k - 1] == 'a':
if self.ends("al"): pass
else: return
elif self.b[self.k - 1] == 'c':
if self.ends("ance"): pass
elif self.ends("ence"): pass
else: return
elif self.b[self.k - 1] == 'e':
if self.ends("er"): pass
else: return
elif self.b[self.k - 1] == 'i':
if self.ends("ic"): pass
else: return
elif self.b[self.k - 1] == 'l':
if self.ends("able"): pass
elif self.ends("ible"): pass
else: return
elif self.b[self.k - 1] == 'n':
if self.ends("ant"): pass
elif self.ends("ement"): pass
elif self.ends("ment"): pass
elif self.ends("ent"): pass
else: return
elif self.b[self.k - 1] == 'o':
if self.ends("ion") and (self.b[self.j] == 's' or self.b[self.j] == 't'): pass
elif self.ends("ou"): pass
# takes care of -ous
else: return
elif self.b[self.k - 1] == 's':
if self.ends("ism"): pass
else: return
elif self.b[self.k - 1] == 't':
if self.ends("ate"): pass
elif self.ends("iti"): pass
else: return
elif self.b[self.k - 1] == 'u':
if self.ends("ous"): pass
else: return
elif self.b[self.k - 1] == 'v':
if self.ends("ive"): pass
else: return
elif self.b[self.k - 1] == 'z':
if self.ends("ize"): pass
else: return
else:
return
if self.m() > 1:
self.k = self.j
def step5(self):
"""step5() removes a final -e if m() > 1, and changes -ll to -l if
m() > 1.
"""
self.j = self.k
if self.b[self.k] == 'e':
a = self.m()
if a > 1 or (a == 1 and not self.cvc(self.k-1)):
self.k = self.k - 1
if self.b[self.k] == 'l' and self.doublec(self.k) and self.m() > 1:
self.k = self.k -1
def stem(self, p, i, j):
"""In stem(p,i,j), p is a char pointer, and the string to be stemmed
is from p[i] to p[j] inclusive. Typically i is zero and j is the
offset to the last character of a string, (p[j+1] == '\0'). The
stemmer adjusts the characters p[i] ... p[j] and returns the new
end-point of the string, k. Stemming never increases word length, so
i <= k <= j. To turn the stemmer into a module, declare 'stem' as
extern, and delete the remainder of this file.
"""
# copy the parameters into statics
self.b = p
self.k = j
self.k0 = i
if self.k <= self.k0 + 1:
return self.b # --DEPARTURE--
# With this line, strings of length 1 or 2 don't go through the
# stemming process, although no mention is made of this in the
# published algorithm. Remove the line to match the published
# algorithm.
self.step1ab()
self.step1c()
self.step2()
self.step3()
self.step4()
self.step5()
return self.b[self.k0:self.k+1]
| mit |
michelts/lettuce | tests/integration/lib/Django-1.3/tests/modeltests/m2m_through/tests.py | 90 | 12896 | from datetime import datetime
from operator import attrgetter
from django.test import TestCase
from models import Person, Group, Membership, CustomMembership, \
TestNoDefaultsOrNulls, PersonSelfRefM2M, Friendship
class M2mThroughTests(TestCase):
def setUp(self):
self.bob = Person.objects.create(name='Bob')
self.jim = Person.objects.create(name='Jim')
self.jane = Person.objects.create(name='Jane')
self.rock = Group.objects.create(name='Rock')
self.roll = Group.objects.create(name='Roll')
def test_m2m_through(self):
# We start out by making sure that the Group 'rock' has no members.
self.assertQuerysetEqual(
self.rock.members.all(),
[]
)
# To make Jim a member of Group Rock, simply create a Membership object.
m1 = Membership.objects.create(person=self.jim, group=self.rock)
# We can do the same for Jane and Rock.
m2 = Membership.objects.create(person=self.jane, group=self.rock)
# Let's check to make sure that it worked. Jane and Jim should be members of Rock.
self.assertQuerysetEqual(
self.rock.members.all(), [
'Jane',
'Jim'
],
attrgetter("name")
)
# Now we can add a bunch more Membership objects to test with.
m3 = Membership.objects.create(person=self.bob, group=self.roll)
m4 = Membership.objects.create(person=self.jim, group=self.roll)
m5 = Membership.objects.create(person=self.jane, group=self.roll)
# We can get Jim's Group membership as with any ForeignKey.
self.assertQuerysetEqual(
self.jim.group_set.all(), [
'Rock',
'Roll'
],
attrgetter("name")
)
# Querying the intermediary model works like normal.
self.assertEqual(
repr(Membership.objects.get(person=self.jane, group=self.rock)),
'<Membership: Jane is a member of Rock>'
)
# It's not only get that works. Filter works like normal as well.
self.assertQuerysetEqual(
Membership.objects.filter(person=self.jim), [
'<Membership: Jim is a member of Rock>',
'<Membership: Jim is a member of Roll>'
]
)
self.rock.members.clear()
# Now there will be no members of Rock.
self.assertQuerysetEqual(
self.rock.members.all(),
[]
)
def test_forward_descriptors(self):
# Due to complications with adding via an intermediary model,
# the add method is not provided.
self.assertRaises(AttributeError, lambda: self.rock.members.add(self.bob))
# Create is also disabled as it suffers from the same problems as add.
self.assertRaises(AttributeError, lambda: self.rock.members.create(name='Anne'))
# Remove has similar complications, and is not provided either.
self.assertRaises(AttributeError, lambda: self.rock.members.remove(self.jim))
m1 = Membership.objects.create(person=self.jim, group=self.rock)
m2 = Membership.objects.create(person=self.jane, group=self.rock)
# Here we back up the list of all members of Rock.
backup = list(self.rock.members.all())
# ...and we verify that it has worked.
self.assertEqual(
[p.name for p in backup],
['Jane', 'Jim']
)
# The clear function should still work.
self.rock.members.clear()
# Now there will be no members of Rock.
self.assertQuerysetEqual(
self.rock.members.all(),
[]
)
# Assignment should not work with models specifying a through model for many of
# the same reasons as adding.
self.assertRaises(AttributeError, setattr, self.rock, "members", backup)
# Let's re-save those instances that we've cleared.
m1.save()
m2.save()
# Verifying that those instances were re-saved successfully.
self.assertQuerysetEqual(
self.rock.members.all(),[
'Jane',
'Jim'
],
attrgetter("name")
)
def test_reverse_descriptors(self):
# Due to complications with adding via an intermediary model,
# the add method is not provided.
self.assertRaises(AttributeError, lambda: self.bob.group_set.add(self.rock))
# Create is also disabled as it suffers from the same problems as add.
self.assertRaises(AttributeError, lambda: self.bob.group_set.create(name="funk"))
# Remove has similar complications, and is not provided either.
self.assertRaises(AttributeError, lambda: self.jim.group_set.remove(self.rock))
m1 = Membership.objects.create(person=self.jim, group=self.rock)
m2 = Membership.objects.create(person=self.jim, group=self.roll)
# Here we back up the list of all of Jim's groups.
backup = list(self.jim.group_set.all())
self.assertEqual(
[g.name for g in backup],
['Rock', 'Roll']
)
# The clear function should still work.
self.jim.group_set.clear()
# Now Jim will be in no groups.
self.assertQuerysetEqual(
self.jim.group_set.all(),
[]
)
# Assignment should not work with models specifying a through model for many of
# the same reasons as adding.
self.assertRaises(AttributeError, setattr, self.jim, "group_set", backup)
# Let's re-save those instances that we've cleared.
m1.save()
m2.save()
# Verifying that those instances were re-saved successfully.
self.assertQuerysetEqual(
self.jim.group_set.all(),[
'Rock',
'Roll'
],
attrgetter("name")
)
def test_custom_tests(self):
# Let's see if we can query through our second relationship.
self.assertQuerysetEqual(
self.rock.custom_members.all(),
[]
)
# We can query in the opposite direction as well.
self.assertQuerysetEqual(
self.bob.custom.all(),
[]
)
cm1 = CustomMembership.objects.create(person=self.bob, group=self.rock)
cm2 = CustomMembership.objects.create(person=self.jim, group=self.rock)
# If we get the number of people in Rock, it should be both Bob and Jim.
self.assertQuerysetEqual(
self.rock.custom_members.all(),[
'Bob',
'Jim'
],
attrgetter("name")
)
# Bob should only be in one custom group.
self.assertQuerysetEqual(
self.bob.custom.all(),[
'Rock'
],
attrgetter("name")
)
# Let's make sure our new descriptors don't conflict with the FK related_name.
self.assertQuerysetEqual(
self.bob.custom_person_related_name.all(),[
'<CustomMembership: Bob is a member of Rock>'
]
)
def test_self_referential_tests(self):
# Let's first create a person who has no friends.
tony = PersonSelfRefM2M.objects.create(name="Tony")
self.assertQuerysetEqual(
tony.friends.all(),
[]
)
chris = PersonSelfRefM2M.objects.create(name="Chris")
f = Friendship.objects.create(first=tony, second=chris, date_friended=datetime.now())
# Tony should now show that Chris is his friend.
self.assertQuerysetEqual(
tony.friends.all(),[
'Chris'
],
attrgetter("name")
)
# But we haven't established that Chris is Tony's Friend.
self.assertQuerysetEqual(
chris.friends.all(),
[]
)
f2 = Friendship.objects.create(first=chris, second=tony, date_friended=datetime.now())
# Having added Chris as a friend, let's make sure that his friend set reflects
# that addition.
self.assertQuerysetEqual(
chris.friends.all(),[
'Tony'
],
attrgetter("name")
)
# Chris gets mad and wants to get rid of all of his friends.
chris.friends.clear()
# Now he should not have any more friends.
self.assertQuerysetEqual(
chris.friends.all(),
[]
)
# Since this isn't a symmetrical relation, Tony's friend link still exists.
self.assertQuerysetEqual(
tony.friends.all(),[
'Chris'
],
attrgetter("name")
)
def test_query_tests(self):
m1 = Membership.objects.create(person=self.jim, group=self.rock)
m2 = Membership.objects.create(person=self.jane, group=self.rock)
m3 = Membership.objects.create(person=self.bob, group=self.roll)
m4 = Membership.objects.create(person=self.jim, group=self.roll)
m5 = Membership.objects.create(person=self.jane, group=self.roll)
m2.invite_reason = "She was just awesome."
m2.date_joined = datetime(2006, 1, 1)
m2.save()
m3.date_joined = datetime(2004, 1, 1)
m3.save()
m5.date_joined = datetime(2004, 1, 1)
m5.save()
# We can query for the related model by using its attribute name (members, in
# this case).
self.assertQuerysetEqual(
Group.objects.filter(members__name='Bob'),[
'Roll'
],
attrgetter("name")
)
# To query through the intermediary model, we specify its model name.
# In this case, membership.
self.assertQuerysetEqual(
Group.objects.filter(membership__invite_reason="She was just awesome."),[
'Rock'
],
attrgetter("name")
)
# If we want to query in the reverse direction by the related model, use its
# model name (group, in this case).
self.assertQuerysetEqual(
Person.objects.filter(group__name="Rock"),[
'Jane',
'Jim'
],
attrgetter("name")
)
cm1 = CustomMembership.objects.create(person=self.bob, group=self.rock)
cm2 = CustomMembership.objects.create(person=self.jim, group=self.rock)
# If the m2m field has specified a related_name, using that will work.
self.assertQuerysetEqual(
Person.objects.filter(custom__name="Rock"),[
'Bob',
'Jim'
],
attrgetter("name")
)
# To query through the intermediary model in the reverse direction, we again
# specify its model name (membership, in this case).
self.assertQuerysetEqual(
Person.objects.filter(membership__invite_reason="She was just awesome."),[
'Jane'
],
attrgetter("name")
)
# Let's see all of the groups that Jane joined after 1 Jan 2005:
self.assertQuerysetEqual(
Group.objects.filter(membership__date_joined__gt=datetime(2005, 1, 1), membership__person=self.jane),[
'Rock'
],
attrgetter("name")
)
# Queries also work in the reverse direction: Now let's see all of the people
# that have joined Rock since 1 Jan 2005:
self.assertQuerysetEqual(
Person.objects.filter(membership__date_joined__gt=datetime(2005, 1, 1), membership__group=self.rock),[
'Jane',
'Jim'
],
attrgetter("name")
)
# Conceivably, queries through membership could return correct, but non-unique
# querysets. To demonstrate this, we query for all people who have joined a
# group after 2004:
self.assertQuerysetEqual(
Person.objects.filter(membership__date_joined__gt=datetime(2004, 1, 1)),[
'Jane',
'Jim',
'Jim'
],
attrgetter("name")
)
# Jim showed up twice, because he joined two groups ('Rock', and 'Roll'):
self.assertEqual(
[(m.person.name, m.group.name) for m in Membership.objects.filter(date_joined__gt=datetime(2004, 1, 1))],
[(u'Jane', u'Rock'), (u'Jim', u'Rock'), (u'Jim', u'Roll')]
)
# QuerySet's distinct() method can correct this problem.
self.assertQuerysetEqual(
Person.objects.filter(membership__date_joined__gt=datetime(2004, 1, 1)).distinct(),[
'Jane',
'Jim'
],
attrgetter("name")
)
| gpl-3.0 |
Metallice/android_kernel_samsung_espressowifi | tools/perf/scripts/python/sched-migration.py | 11215 | 11670 | #!/usr/bin/python
#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <[email protected]>
#
# perf script event handlers have been generated by perf script -g python
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import os
import sys
from collections import defaultdict
from UserList import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from SchedGui import *
threads = { 0 : "idle"}
def thread_name(pid):
return "%s:%d" % (threads[pid], pid)
class RunqueueEventUnknown:
@staticmethod
def color():
return None
def __repr__(self):
return "unknown"
class RunqueueEventSleep:
@staticmethod
def color():
return (0, 0, 0xff)
def __init__(self, sleeper):
self.sleeper = sleeper
def __repr__(self):
return "%s gone to sleep" % thread_name(self.sleeper)
class RunqueueEventWakeup:
@staticmethod
def color():
return (0xff, 0xff, 0)
def __init__(self, wakee):
self.wakee = wakee
def __repr__(self):
return "%s woke up" % thread_name(self.wakee)
class RunqueueEventFork:
@staticmethod
def color():
return (0, 0xff, 0)
def __init__(self, child):
self.child = child
def __repr__(self):
return "new forked task %s" % thread_name(self.child)
class RunqueueMigrateIn:
@staticmethod
def color():
return (0, 0xf0, 0xff)
def __init__(self, new):
self.new = new
def __repr__(self):
return "task migrated in %s" % thread_name(self.new)
class RunqueueMigrateOut:
@staticmethod
def color():
return (0xff, 0, 0xff)
def __init__(self, old):
self.old = old
def __repr__(self):
return "task migrated out %s" % thread_name(self.old)
class RunqueueSnapshot:
def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
self.tasks = tuple(tasks)
self.event = event
def sched_switch(self, prev, prev_state, next):
event = RunqueueEventUnknown()
if taskState(prev_state) == "R" and next in self.tasks \
and prev in self.tasks:
return self
if taskState(prev_state) != "R":
event = RunqueueEventSleep(prev)
next_tasks = list(self.tasks[:])
if prev in self.tasks:
if taskState(prev_state) != "R":
next_tasks.remove(prev)
elif taskState(prev_state) == "R":
next_tasks.append(prev)
if next not in next_tasks:
next_tasks.append(next)
return RunqueueSnapshot(next_tasks, event)
def migrate_out(self, old):
if old not in self.tasks:
return self
next_tasks = [task for task in self.tasks if task != old]
return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
def __migrate_in(self, new, event):
if new in self.tasks:
self.event = event
return self
next_tasks = self.tasks[:] + tuple([new])
return RunqueueSnapshot(next_tasks, event)
def migrate_in(self, new):
return self.__migrate_in(new, RunqueueMigrateIn(new))
def wake_up(self, new):
return self.__migrate_in(new, RunqueueEventWakeup(new))
def wake_up_new(self, new):
return self.__migrate_in(new, RunqueueEventFork(new))
def load(self):
""" Provide the number of tasks on the runqueue.
Don't count idle"""
return len(self.tasks) - 1
def __repr__(self):
ret = self.tasks.__repr__()
ret += self.origin_tostring()
return ret
class TimeSlice:
def __init__(self, start, prev):
self.start = start
self.prev = prev
self.end = start
# cpus that triggered the event
self.event_cpus = []
if prev is not None:
self.total_load = prev.total_load
self.rqs = prev.rqs.copy()
else:
self.rqs = defaultdict(RunqueueSnapshot)
self.total_load = 0
def __update_total_load(self, old_rq, new_rq):
diff = new_rq.load() - old_rq.load()
self.total_load += diff
def sched_switch(self, ts_list, prev, prev_state, next, cpu):
old_rq = self.prev.rqs[cpu]
new_rq = old_rq.sched_switch(prev, prev_state, next)
if old_rq is new_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def migrate(self, ts_list, new, old_cpu, new_cpu):
if old_cpu == new_cpu:
return
old_rq = self.prev.rqs[old_cpu]
out_rq = old_rq.migrate_out(new)
self.rqs[old_cpu] = out_rq
self.__update_total_load(old_rq, out_rq)
new_rq = self.prev.rqs[new_cpu]
in_rq = new_rq.migrate_in(new)
self.rqs[new_cpu] = in_rq
self.__update_total_load(new_rq, in_rq)
ts_list.append(self)
if old_rq is not out_rq:
self.event_cpus.append(old_cpu)
self.event_cpus.append(new_cpu)
def wake_up(self, ts_list, pid, cpu, fork):
old_rq = self.prev.rqs[cpu]
if fork:
new_rq = old_rq.wake_up_new(pid)
else:
new_rq = old_rq.wake_up(pid)
if new_rq is old_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def next(self, t):
self.end = t
return TimeSlice(t, self)
class TimeSliceList(UserList):
def __init__(self, arg = []):
self.data = arg
def get_time_slice(self, ts):
if len(self.data) == 0:
slice = TimeSlice(ts, TimeSlice(-1, None))
else:
slice = self.data[-1].next(ts)
return slice
def find_time_slice(self, ts):
start = 0
end = len(self.data)
found = -1
searching = True
while searching:
if start == end or start == end - 1:
searching = False
i = (end + start) / 2
if self.data[i].start <= ts and self.data[i].end >= ts:
found = i
end = i
continue
if self.data[i].end < ts:
start = i
elif self.data[i].start > ts:
end = i
return found
def set_root_win(self, win):
self.root_win = win
def mouse_down(self, cpu, t):
idx = self.find_time_slice(t)
if idx == -1:
return
ts = self[idx]
rq = ts.rqs[cpu]
raw = "CPU: %d\n" % cpu
raw += "Last event : %s\n" % rq.event.__repr__()
raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
raw += "Load = %d\n" % rq.load()
for t in rq.tasks:
raw += "%s \n" % thread_name(t)
self.root_win.update_summary(raw)
def update_rectangle_cpu(self, slice, cpu):
rq = slice.rqs[cpu]
if slice.total_load != 0:
load_rate = rq.load() / float(slice.total_load)
else:
load_rate = 0
red_power = int(0xff - (0xff * load_rate))
color = (0xff, red_power, red_power)
top_color = None
if cpu in slice.event_cpus:
top_color = rq.event.color()
self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
def fill_zone(self, start, end):
i = self.find_time_slice(start)
if i == -1:
return
for i in xrange(i, len(self.data)):
timeslice = self.data[i]
if timeslice.start > end:
return
for cpu in timeslice.rqs:
self.update_rectangle_cpu(timeslice, cpu)
def interval(self):
if len(self.data) == 0:
return (0, 0)
return (self.data[0].start, self.data[-1].end)
def nr_rectangles(self):
last_ts = self.data[-1]
max_cpu = 0
for cpu in last_ts.rqs:
if cpu > max_cpu:
max_cpu = cpu
return max_cpu
class SchedEventProxy:
def __init__(self):
self.current_tsk = defaultdict(lambda : -1)
self.timeslices = TimeSliceList()
def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
""" Ensure the task we sched out this cpu is really the one
we logged. Otherwise we may have missed traces """
on_cpu_task = self.current_tsk[headers.cpu]
if on_cpu_task != -1 and on_cpu_task != prev_pid:
print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
(headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
threads[prev_pid] = prev_comm
threads[next_pid] = next_comm
self.current_tsk[headers.cpu] = next_pid
ts = self.timeslices.get_time_slice(headers.ts())
ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
ts = self.timeslices.get_time_slice(headers.ts())
ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
def wake_up(self, headers, comm, pid, success, target_cpu, fork):
if success == 0:
return
ts = self.timeslices.get_time_slice(headers.ts())
ts.wake_up(self.timeslices, pid, target_cpu, fork)
def trace_begin():
global parser
parser = SchedEventProxy()
def trace_end():
app = wx.App(False)
timeslices = parser.timeslices
frame = RootFrame(timeslices, "Migration")
app.MainLoop()
def sched__sched_stat_runtime(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, runtime, vruntime):
pass
def sched__sched_stat_iowait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_sleep(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_process_fork(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
parent_comm, parent_pid, child_comm, child_pid):
pass
def sched__sched_process_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_free(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_migrate_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, orig_cpu,
dest_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
def sched__sched_switch(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio)
def sched__sched_wakeup_new(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 1)
def sched__sched_wakeup(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 0)
def sched__sched_wait_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
ret):
pass
def sched__sched_kthread_stop(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid):
pass
def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
pass
| gpl-2.0 |
yqm/sl4a | python/gdata/tests/atom_tests/service_test.py | 89 | 6868 | #!/usr/bin/python
#
# Copyright (C) 2006 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = '[email protected] (Jeff Scudder)'
import unittest
import atom.service
import atom.mock_http_core
import gdata.test_config as conf
class AtomServiceUnitTest(unittest.TestCase):
def testBuildUriWithNoParams(self):
x = atom.service.BuildUri('/base/feeds/snippets')
self.assert_(x == '/base/feeds/snippets')
def testBuildUriWithParams(self):
# Add parameters to a URI
x = atom.service.BuildUri('/base/feeds/snippets', url_params={'foo': 'bar',
'bq': 'digital camera'})
self.assert_(x == '/base/feeds/snippets?foo=bar&bq=digital+camera')
self.assert_(x.startswith('/base/feeds/snippets'))
self.assert_(x.count('?') == 1)
self.assert_(x.count('&') == 1)
self.assert_(x.index('?') < x.index('&'))
self.assert_(x.index('bq=digital+camera') != -1)
# Add parameters to a URI that already has parameters
x = atom.service.BuildUri('/base/feeds/snippets?bq=digital+camera',
url_params={'foo': 'bar', 'max-results': '250'})
self.assert_(x.startswith('/base/feeds/snippets?bq=digital+camera'))
self.assert_(x.count('?') == 1)
self.assert_(x.count('&') == 2)
self.assert_(x.index('?') < x.index('&'))
self.assert_(x.index('max-results=250') != -1)
self.assert_(x.index('foo=bar') != -1)
def testBuildUriWithoutParameterEscaping(self):
x = atom.service.BuildUri('/base/feeds/snippets',
url_params={'foo': ' bar', 'bq': 'digital camera'},
escape_params=False)
self.assert_(x.index('foo= bar') != -1)
self.assert_(x.index('bq=digital camera') != -1)
def testParseHttpUrl(self):
atom_service = atom.service.AtomService('code.google.com')
self.assertEquals(atom_service.server, 'code.google.com')
(host, port, ssl, path) = atom.service.ProcessUrl(atom_service,
'http://www.google.com/service/subservice?name=value')
self.assertEquals(ssl, False)
self.assertEquals(host, 'www.google.com')
self.assertEquals(port, 80)
self.assertEquals(path, '/service/subservice?name=value')
def testParseHttpUrlWithPort(self):
atom_service = atom.service.AtomService('code.google.com')
self.assertEquals(atom_service.server, 'code.google.com')
(host, port, ssl, path) = atom.service.ProcessUrl(atom_service,
'http://www.google.com:12/service/subservice?name=value&newname=newvalue')
self.assertEquals(ssl, False)
self.assertEquals(host, 'www.google.com')
self.assertEquals(port, 12)
self.assert_(path.startswith('/service/subservice?'))
self.assert_(path.find('name=value') >= len('/service/subservice?'))
self.assert_(path.find('newname=newvalue') >= len('/service/subservice?'))
def testParseHttpsUrl(self):
atom_service = atom.service.AtomService('code.google.com')
self.assertEquals(atom_service.server, 'code.google.com')
(host, port, ssl, path) = atom.service.ProcessUrl(atom_service,
'https://www.google.com/service/subservice?name=value&newname=newvalue')
self.assertEquals(ssl, True)
self.assertEquals(host, 'www.google.com')
self.assertEquals(port, 443)
self.assert_(path.startswith('/service/subservice?'))
self.assert_(path.find('name=value') >= len('/service/subservice?'))
self.assert_(path.find('newname=newvalue') >= len('/service/subservice?'))
def testParseHttpsUrlWithPort(self):
atom_service = atom.service.AtomService('code.google.com')
self.assertEquals(atom_service.server, 'code.google.com')
(host, port, ssl, path) = atom.service.ProcessUrl(atom_service,
'https://www.google.com:13981/service/subservice?name=value&newname=newvalue')
self.assertEquals(ssl, True)
self.assertEquals(host, 'www.google.com')
self.assertEquals(port, 13981)
self.assert_(path.startswith('/service/subservice?'))
self.assert_(path.find('name=value') >= len('/service/subservice?'))
self.assert_(path.find('newname=newvalue') >= len('/service/subservice?'))
def testSetBasicAuth(self):
client = atom.service.AtomService()
client.UseBasicAuth('foo', 'bar')
token = client.token_store.find_token('http://')
self.assert_(isinstance(token, atom.service.BasicAuthToken))
self.assertEquals(token.auth_header, 'Basic Zm9vOmJhcg==')
client.UseBasicAuth('','')
token = client.token_store.find_token('http://')
self.assert_(isinstance(token, atom.service.BasicAuthToken))
self.assertEquals(token.auth_header, 'Basic Og==')
def testProcessUrlWithStringForService(self):
(server, port, ssl, uri) = atom.service.ProcessUrl(
service='www.google.com', url='/base/feeds/items')
self.assertEquals(server, 'www.google.com')
self.assertEquals(port, 80)
self.assertEquals(ssl, False)
self.assert_(uri.startswith('/base/feeds/items'))
client = atom.service.AtomService()
client.server = 'www.google.com'
client.ssl = True
(server, port, ssl, uri) = atom.service.ProcessUrl(
service=client, url='/base/feeds/items')
self.assertEquals(server, 'www.google.com')
self.assertEquals(ssl, True)
self.assert_(uri.startswith('/base/feeds/items'))
(server, port, ssl, uri) = atom.service.ProcessUrl(service=None,
url='https://www.google.com/base/feeds/items')
self.assertEquals(server, 'www.google.com')
self.assertEquals(port, 443)
self.assertEquals(ssl, True)
self.assert_(uri.startswith('/base/feeds/items'))
def testHostHeaderContainsNonDefaultPort(self):
client = atom.service.AtomService()
client.http_client.v2_http_client = atom.mock_http_core.EchoHttpClient()
response = client.Get('http://example.com')
self.assertEqual(response.getheader('Echo-Host'), 'example.com:None')
response = client.Get('https://example.com')
self.assertEqual(response.getheader('Echo-Host'), 'example.com:None')
response = client.Get('https://example.com:8080')
self.assertEqual(response.getheader('Echo-Host'), 'example.com:8080')
response = client.Get('http://example.com:1234')
self.assertEqual(response.getheader('Echo-Host'), 'example.com:1234')
def suite():
return conf.build_suite([AtomServiceUnitTest])
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
dfang/odoo | addons/sale_stock/models/account_invoice.py | 15 | 2982 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import fields, models
class AccountInvoice(models.Model):
_inherit = 'account.invoice'
incoterms_id = fields.Many2one('stock.incoterms', string="Incoterms",
help="Incoterms are series of sales terms. They are used to divide transaction costs and responsibilities between buyer and seller and reflect state-of-the-art transportation practices.",
readonly=True, states={'draft': [('readonly', False)]})
class AccountInvoiceLine(models.Model):
_inherit = "account.invoice.line"
def _get_anglo_saxon_price_unit(self):
price_unit = super(AccountInvoiceLine,self)._get_anglo_saxon_price_unit()
# in case of anglo saxon with a product configured as invoiced based on delivery, with perpetual
# valuation and real price costing method, we must find the real price for the cost of good sold
if self.product_id.invoice_policy == "delivery":
for s_line in self.sale_line_ids:
# qtys already invoiced
qty_done = sum([x.uom_id._compute_quantity(x.quantity, x.product_id.uom_id) for x in s_line.invoice_lines if x.invoice_id.state in ('open', 'paid')])
quantity = self.uom_id._compute_quantity(self.quantity, self.product_id.uom_id)
# Put moves in fixed order by date executed
moves = self.env['stock.move']
for procurement in s_line.procurement_ids:
moves |= procurement.move_ids
moves.sorted(lambda x: x.date)
# Go through all the moves and do nothing until you get to qty_done
# Beyond qty_done we need to calculate the average of the price_unit
# on the moves we encounter.
average_price_unit = self._compute_average_price(qty_done, quantity, moves)
price_unit = average_price_unit or price_unit
price_unit = self.product_id.uom_id._compute_price(price_unit, self.uom_id)
return price_unit
def _compute_average_price(self, qty_done, quantity, moves):
average_price_unit = 0
qty_delivered = 0
invoiced_qty = 0
for move in moves:
if move.state != 'done':
continue
invoiced_qty += move.product_qty
if invoiced_qty <= qty_done:
continue
qty_to_consider = move.product_qty
if invoiced_qty - move.product_qty < qty_done:
qty_to_consider = invoiced_qty - qty_done
qty_to_consider = min(qty_to_consider, quantity - qty_delivered)
qty_delivered += qty_to_consider
average_price_unit = (average_price_unit * (qty_delivered - qty_to_consider) + move.price_unit * qty_to_consider) / qty_delivered
if qty_delivered == quantity:
break
return average_price_unit
| agpl-3.0 |
akash1808/nova_test_latest | nova/tests/unit/api/openstack/compute/contrib/test_availability_zone.py | 33 | 15688 | # Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import iso8601
from nova.api.openstack.compute.contrib import availability_zone as az_v2
from nova.api.openstack.compute import plugins
from nova.api.openstack.compute.plugins.v3 import availability_zone as az_v21
from nova.api.openstack.compute.plugins.v3 import servers as servers_v21
from nova.api.openstack.compute import servers as servers_v2
from nova.api.openstack import extensions
from nova import availability_zones
from nova.compute import api as compute_api
from nova.compute import flavors
from nova import context
from nova import db
from nova import exception
from nova import servicegroup
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_instance
from nova.tests.unit.image import fake
from nova.tests.unit import matchers
from nova.tests.unit.objects import test_service
from oslo_config import cfg
FAKE_UUID = fakes.FAKE_UUID
def fake_service_get_all(context, disabled=None):
def __fake_service(binary, availability_zone,
created_at, updated_at, host, disabled):
return dict(test_service.fake_service,
binary=binary,
availability_zone=availability_zone,
available_zones=availability_zone,
created_at=created_at,
updated_at=updated_at,
host=host,
disabled=disabled)
if disabled:
return [__fake_service("nova-compute", "zone-2",
datetime.datetime(2012, 11, 14, 9, 53, 25, 0),
datetime.datetime(2012, 12, 26, 14, 45, 25, 0),
"fake_host-1", True),
__fake_service("nova-scheduler", "internal",
datetime.datetime(2012, 11, 14, 9, 57, 3, 0),
datetime.datetime(2012, 12, 26, 14, 45, 25, 0),
"fake_host-1", True),
__fake_service("nova-network", "internal",
datetime.datetime(2012, 11, 16, 7, 25, 46, 0),
datetime.datetime(2012, 12, 26, 14, 45, 24, 0),
"fake_host-2", True)]
else:
return [__fake_service("nova-compute", "zone-1",
datetime.datetime(2012, 11, 14, 9, 53, 25, 0),
datetime.datetime(2012, 12, 26, 14, 45, 25, 0),
"fake_host-1", False),
__fake_service("nova-sched", "internal",
datetime.datetime(2012, 11, 14, 9, 57, 3, 0),
datetime.datetime(2012, 12, 26, 14, 45, 25, 0),
"fake_host-1", False),
__fake_service("nova-network", "internal",
datetime.datetime(2012, 11, 16, 7, 25, 46, 0),
datetime.datetime(2012, 12, 26, 14, 45, 24, 0),
"fake_host-2", False)]
def fake_service_is_up(self, service):
return service['binary'] != u"nova-network"
def fake_set_availability_zones(context, services):
return services
def fake_get_availability_zones(context):
return ['nova'], []
CONF = cfg.CONF
class AvailabilityZoneApiTestV21(test.NoDBTestCase):
availability_zone = az_v21
def setUp(self):
super(AvailabilityZoneApiTestV21, self).setUp()
availability_zones.reset_cache()
self.stubs.Set(db, 'service_get_all', fake_service_get_all)
self.stubs.Set(availability_zones, 'set_availability_zones',
fake_set_availability_zones)
self.stubs.Set(servicegroup.API, 'service_is_up', fake_service_is_up)
self.controller = self.availability_zone.AvailabilityZoneController()
self.req = fakes.HTTPRequest.blank('')
def test_filtered_availability_zones(self):
zones = ['zone1', 'internal']
expected = [{'zoneName': 'zone1',
'zoneState': {'available': True},
"hosts": None}]
result = self.controller._get_filtered_availability_zones(zones, True)
self.assertEqual(result, expected)
expected = [{'zoneName': 'zone1',
'zoneState': {'available': False},
"hosts": None}]
result = self.controller._get_filtered_availability_zones(zones,
False)
self.assertEqual(result, expected)
def test_availability_zone_index(self):
resp_dict = self.controller.index(self.req)
self.assertIn('availabilityZoneInfo', resp_dict)
zones = resp_dict['availabilityZoneInfo']
self.assertEqual(len(zones), 2)
self.assertEqual(zones[0]['zoneName'], u'zone-1')
self.assertTrue(zones[0]['zoneState']['available'])
self.assertIsNone(zones[0]['hosts'])
self.assertEqual(zones[1]['zoneName'], u'zone-2')
self.assertFalse(zones[1]['zoneState']['available'])
self.assertIsNone(zones[1]['hosts'])
def test_availability_zone_detail(self):
resp_dict = self.controller.detail(self.req)
self.assertIn('availabilityZoneInfo', resp_dict)
zones = resp_dict['availabilityZoneInfo']
self.assertEqual(len(zones), 3)
timestamp = iso8601.parse_date("2012-12-26T14:45:25Z")
nova_network_timestamp = iso8601.parse_date("2012-12-26T14:45:24Z")
expected = [{'zoneName': 'zone-1',
'zoneState': {'available': True},
'hosts': {'fake_host-1': {
'nova-compute': {'active': True, 'available': True,
'updated_at': timestamp}}}},
{'zoneName': 'internal',
'zoneState': {'available': True},
'hosts': {'fake_host-1': {
'nova-sched': {'active': True, 'available': True,
'updated_at': timestamp}},
'fake_host-2': {
'nova-network': {
'active': True,
'available': False,
'updated_at': nova_network_timestamp}}}},
{'zoneName': 'zone-2',
'zoneState': {'available': False},
'hosts': None}]
self.assertEqual(expected, zones)
def test_availability_zone_detail_no_services(self):
expected_response = {'availabilityZoneInfo':
[{'zoneState': {'available': True},
'hosts': {},
'zoneName': 'nova'}]}
self.stubs.Set(availability_zones, 'get_availability_zones',
fake_get_availability_zones)
resp_dict = self.controller.detail(self.req)
self.assertThat(resp_dict,
matchers.DictMatches(expected_response))
class AvailabilityZoneApiTestV2(AvailabilityZoneApiTestV21):
availability_zone = az_v2
def setUp(self):
super(AvailabilityZoneApiTestV2, self).setUp()
self.req = fakes.HTTPRequest.blank('', use_admin_context=True)
self.non_admin_req = fakes.HTTPRequest.blank('')
def test_availability_zone_detail_with_non_admin(self):
self.assertRaises(exception.AdminRequired,
self.controller.detail, self.non_admin_req)
class ServersControllerCreateTestV21(test.TestCase):
base_url = '/v2/fake/'
def setUp(self):
"""Shared implementation for tests below that create instance."""
super(ServersControllerCreateTestV21, self).setUp()
self.instance_cache_num = 0
self._set_up_controller()
def instance_create(context, inst):
inst_type = flavors.get_flavor_by_flavor_id(3)
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
def_image_ref = 'http://localhost/images/%s' % image_uuid
self.instance_cache_num += 1
instance = fake_instance.fake_db_instance(**{
'id': self.instance_cache_num,
'display_name': inst['display_name'] or 'test',
'uuid': FAKE_UUID,
'instance_type': inst_type,
'access_ip_v4': '1.2.3.4',
'access_ip_v6': 'fead::1234',
'image_ref': inst.get('image_ref', def_image_ref),
'user_id': 'fake',
'project_id': 'fake',
'availability_zone': 'nova',
'reservation_id': inst['reservation_id'],
"created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
"updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
"progress": 0,
"fixed_ips": [],
"task_state": "",
"vm_state": "",
"root_device_name": inst.get('root_device_name', 'vda'),
})
return instance
fake.stub_out_image_service(self.stubs)
self.stubs.Set(db, 'instance_create', instance_create)
self.req = fakes.HTTPRequest.blank('')
def _set_up_controller(self):
ext_info = plugins.LoadedExtensionInfo()
self.controller = servers_v21.ServersController(
extension_info=ext_info)
CONF.set_override('extensions_blacklist',
'os-availability-zone',
'osapi_v3')
self.no_availability_zone_controller = servers_v21.ServersController(
extension_info=ext_info)
def _verify_no_availability_zone(self, **kwargs):
self.assertNotIn('availability_zone', kwargs)
def _test_create_extra(self, params, controller):
image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
server = dict(name='server_test', imageRef=image_uuid, flavorRef=2)
server.update(params)
body = dict(server=server)
server = controller.create(self.req, body=body).obj['server']
def test_create_instance_with_availability_zone_disabled(self):
params = {'availability_zone': 'foo'}
old_create = compute_api.API.create
def create(*args, **kwargs):
self._verify_no_availability_zone(**kwargs)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self._test_create_extra(params, self.no_availability_zone_controller)
def _create_instance_with_availability_zone(self, zone_name):
def create(*args, **kwargs):
self.assertIn('availability_zone', kwargs)
self.assertEqual('nova', kwargs['availability_zone'])
return old_create(*args, **kwargs)
old_create = compute_api.API.create
self.stubs.Set(compute_api.API, 'create', create)
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = ('http://localhost' + self.base_url + 'flavors/3')
body = {
'server': {
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
'metadata': {
'hello': 'world',
'open': 'stack',
},
'availability_zone': zone_name,
},
}
admin_context = context.get_admin_context()
db.service_create(admin_context, {'host': 'host1_zones',
'binary': "nova-compute",
'topic': 'compute',
'report_count': 0})
agg = db.aggregate_create(admin_context,
{'name': 'agg1'}, {'availability_zone': 'nova'})
db.aggregate_host_add(admin_context, agg['id'], 'host1_zones')
return self.req, body
def test_create_instance_with_availability_zone(self):
zone_name = 'nova'
req, body = self._create_instance_with_availability_zone(zone_name)
res = self.controller.create(req, body=body).obj
server = res['server']
self.assertEqual(fakes.FAKE_UUID, server['id'])
def test_create_instance_with_invalid_availability_zone_too_long(self):
zone_name = 'a' * 256
req, body = self._create_instance_with_availability_zone(zone_name)
self.assertRaises(exception.ValidationError,
self.controller.create, req, body=body)
def test_create_instance_with_invalid_availability_zone_too_short(self):
zone_name = ''
req, body = self._create_instance_with_availability_zone(zone_name)
self.assertRaises(exception.ValidationError,
self.controller.create, req, body=body)
def test_create_instance_with_invalid_availability_zone_not_str(self):
zone_name = 111
req, body = self._create_instance_with_availability_zone(zone_name)
self.assertRaises(exception.ValidationError,
self.controller.create, req, body=body)
def test_create_instance_without_availability_zone(self):
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = ('http://localhost' + self.base_url + 'flavors/3')
body = {
'server': {
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
'metadata': {
'hello': 'world',
'open': 'stack',
},
},
}
res = self.controller.create(self.req, body=body).obj
server = res['server']
self.assertEqual(fakes.FAKE_UUID, server['id'])
class ServersControllerCreateTestV2(ServersControllerCreateTestV21):
def _set_up_controller(self):
ext_mgr = extensions.ExtensionManager()
ext_mgr.extensions = {'os-availability-zone': 'fake'}
self.controller = servers_v2.Controller(ext_mgr)
ext_mgr_no_az = extensions.ExtensionManager()
ext_mgr_no_az.extensions = {}
self.no_availability_zone_controller = servers_v2.Controller(
ext_mgr_no_az)
def _verify_no_availability_zone(self, **kwargs):
self.assertIsNone(kwargs['availability_zone'])
def test_create_instance_with_invalid_availability_zone_too_long(self):
# NOTE: v2.0 API does not check this bad request case.
# So we skip this test for v2.0 API.
pass
def test_create_instance_with_invalid_availability_zone_too_short(self):
# NOTE: v2.0 API does not check this bad request case.
# So we skip this test for v2.0 API.
pass
def test_create_instance_with_invalid_availability_zone_not_str(self):
# NOTE: v2.0 API does not check this bad request case.
# So we skip this test for v2.0 API.
pass
| apache-2.0 |
caphrim007/ansible | lib/ansible/plugins/strategy/free.py | 8 | 13129 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
strategy: free
short_description: Executes tasks without waiting for all hosts
description:
- Task execution is as fast as possible per batch as defined by C(serial) (default all).
Ansible will not wait for other hosts to finish the current task before queuing more tasks for other hosts.
All hosts are still attempted for the current task, but it prevents blocking new tasks for hosts that have already finished.
- With the free strategy, unlike the default linear strategy, a host that is slow or stuck on a specific task
won't hold up the rest of the hosts and tasks.
version_added: "2.0"
author: Ansible Core Team
'''
import time
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.playbook.included_file import IncludedFile
from ansible.plugins.loader import action_loader
from ansible.plugins.strategy import StrategyBase
from ansible.template import Templar
from ansible.module_utils._text import to_text
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class StrategyModule(StrategyBase):
def _filter_notified_hosts(self, notified_hosts):
'''
Filter notified hosts accordingly to strategy
'''
# We act only on hosts that are ready to flush handlers
return [host for host in notified_hosts
if host in self._flushed_hosts and self._flushed_hosts[host]]
def __init__(self, tqm):
super(StrategyModule, self).__init__(tqm)
self._host_pinned = False
def run(self, iterator, play_context):
'''
The "free" strategy is a bit more complex, in that it allows tasks to
be sent to hosts as quickly as they can be processed. This means that
some hosts may finish very quickly if run tasks result in little or no
work being done versus other systems.
The algorithm used here also tries to be more "fair" when iterating
through hosts by remembering the last host in the list to be given a task
and starting the search from there as opposed to the top of the hosts
list again, which would end up favoring hosts near the beginning of the
list.
'''
# the last host to be given a task
last_host = 0
result = self._tqm.RUN_OK
# start with all workers being counted as being free
workers_free = len(self._workers)
work_to_do = True
while work_to_do and not self._tqm._terminated:
hosts_left = self.get_hosts_left(iterator)
if len(hosts_left) == 0:
self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
result = False
break
work_to_do = False # assume we have no more work to do
starting_host = last_host # save current position so we know when we've looped back around and need to break
# try and find an unblocked host with a task to run
host_results = []
while True:
host = hosts_left[last_host]
display.debug("next free host: %s" % host)
host_name = host.get_name()
# peek at the next task for the host, to see if there's
# anything to do do for this host
(state, task) = iterator.get_next_task_for_host(host, peek=True)
display.debug("free host state: %s" % state, host=host_name)
display.debug("free host task: %s" % task, host=host_name)
if host_name not in self._tqm._unreachable_hosts and task:
# set the flag so the outer loop knows we've still found
# some work which needs to be done
work_to_do = True
display.debug("this host has work to do", host=host_name)
# check to see if this host is blocked (still executing a previous task)
if host_name not in self._blocked_hosts or not self._blocked_hosts[host_name]:
# pop the task, mark the host blocked, and queue it
self._blocked_hosts[host_name] = True
(state, task) = iterator.get_next_task_for_host(host)
try:
action = action_loader.get(task.action, class_only=True)
except KeyError:
# we don't care here, because the action may simply not have a
# corresponding action plugin
action = None
display.debug("getting variables", host=host_name)
task_vars = self._variable_manager.get_vars(play=iterator._play, host=host, task=task)
self.add_tqm_variables(task_vars, play=iterator._play)
templar = Templar(loader=self._loader, variables=task_vars)
display.debug("done getting variables", host=host_name)
try:
task.name = to_text(templar.template(task.name, fail_on_undefined=False), nonstring='empty')
display.debug("done templating", host=host_name)
except:
# just ignore any errors during task name templating,
# we don't care if it just shows the raw name
display.debug("templating failed for some reason", host=host_name)
run_once = templar.template(task.run_once) or action and getattr(action, 'BYPASS_HOST_LOOP', False)
if run_once:
if action and getattr(action, 'BYPASS_HOST_LOOP', False):
raise AnsibleError("The '%s' module bypasses the host loop, which is currently not supported in the free strategy "
"and would instead execute for every host in the inventory list." % task.action, obj=task._ds)
else:
display.warning("Using run_once with the free strategy is not currently supported. This task will still be "
"executed for every host in the inventory list.")
# check to see if this task should be skipped, due to it being a member of a
# role which has already run (and whether that role allows duplicate execution)
if task._role and task._role.has_run(host):
# If there is no metadata, the default behavior is to not allow duplicates,
# if there is metadata, check to see if the allow_duplicates flag was set to true
if task._role._metadata is None or task._role._metadata and not task._role._metadata.allow_duplicates:
display.debug("'%s' skipped because role has already run" % task, host=host_name)
del self._blocked_hosts[host_name]
continue
if task.action == 'meta':
self._execute_meta(task, play_context, iterator, target_host=host)
self._blocked_hosts[host_name] = False
else:
# handle step if needed, skip meta actions as they are used internally
if not self._step or self._take_step(task, host_name):
if task.any_errors_fatal:
display.warning("Using any_errors_fatal with the free strategy is not supported, "
"as tasks are executed independently on each host")
self._tqm.send_callback('v2_playbook_on_task_start', task, is_conditional=False)
self._queue_task(host, task, task_vars, play_context)
# each task is counted as a worker being busy
workers_free -= 1
del task_vars
else:
display.debug("%s is blocked, skipping for now" % host_name)
# all workers have tasks to do (and the current host isn't done with the play).
# loop back to starting host and break out
if self._host_pinned and workers_free == 0 and work_to_do:
last_host = starting_host
break
# move on to the next host and make sure we
# haven't gone past the end of our hosts list
last_host += 1
if last_host > len(hosts_left) - 1:
last_host = 0
# if we've looped around back to the start, break out
if last_host == starting_host:
break
results = self._process_pending_results(iterator)
host_results.extend(results)
# each result is counted as a worker being free again
workers_free += len(results)
self.update_active_connections(results)
try:
included_files = IncludedFile.process_include_results(
host_results,
iterator=iterator,
loader=self._loader,
variable_manager=self._variable_manager
)
except AnsibleError as e:
return self._tqm.RUN_ERROR
if len(included_files) > 0:
all_blocks = dict((host, []) for host in hosts_left)
for included_file in included_files:
display.debug("collecting new blocks for %s" % included_file)
try:
if included_file._is_role:
new_ir = self._copy_included_file(included_file)
new_blocks, handler_blocks = new_ir.get_block_list(
play=iterator._play,
variable_manager=self._variable_manager,
loader=self._loader,
)
self._tqm.update_handler_list([handler for handler_block in handler_blocks for handler in handler_block.block])
else:
new_blocks = self._load_included_file(included_file, iterator=iterator)
except AnsibleError as e:
for host in included_file._hosts:
iterator.mark_host_failed(host)
display.warning(str(e))
continue
for new_block in new_blocks:
task_vars = self._variable_manager.get_vars(play=iterator._play, task=included_file._task)
final_block = new_block.filter_tagged_tasks(play_context, task_vars)
for host in hosts_left:
if host in included_file._hosts:
all_blocks[host].append(final_block)
display.debug("done collecting new blocks for %s" % included_file)
display.debug("adding all collected blocks from %d included file(s) to iterator" % len(included_files))
for host in hosts_left:
iterator.add_tasks(host, all_blocks[host])
display.debug("done adding collected blocks to iterator")
# pause briefly so we don't spin lock
time.sleep(C.DEFAULT_INTERNAL_POLL_INTERVAL)
# collect all the final results
results = self._wait_on_pending_results(iterator)
# run the base class run() method, which executes the cleanup function
# and runs any outstanding handlers which have been triggered
return super(StrategyModule, self).run(iterator, play_context, result)
| gpl-3.0 |
nan86150/ImageFusion | lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/cp949prober.py | 2801 | 1782 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCKRDistributionAnalysis
from .mbcssm import CP949SMModel
class CP949Prober(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(CP949SMModel)
# NOTE: CP949 is a superset of EUC-KR, so the distribution should be
# not different.
self._mDistributionAnalyzer = EUCKRDistributionAnalysis()
self.reset()
def get_charset_name(self):
return "CP949"
| mit |
JFriel/honours_project | venv/lib/python2.7/site-packages/nltk/sem/chat80.py | 7 | 25797 | # Natural Language Toolkit: Chat-80 KB Reader
# See http://www.w3.org/TR/swbp-skos-core-guide/
#
# Copyright (C) 2001-2016 NLTK Project
# Author: Ewan Klein <[email protected]>,
# URL: <http://nltk.sourceforge.net>
# For license information, see LICENSE.TXT
"""
Overview
========
Chat-80 was a natural language system which allowed the user to
interrogate a Prolog knowledge base in the domain of world
geography. It was developed in the early '80s by Warren and Pereira; see
``http://www.aclweb.org/anthology/J82-3002.pdf`` for a description and
``http://www.cis.upenn.edu/~pereira/oldies.html`` for the source
files.
This module contains functions to extract data from the Chat-80
relation files ('the world database'), and convert then into a format
that can be incorporated in the FOL models of
``nltk.sem.evaluate``. The code assumes that the Prolog
input files are available in the NLTK corpora directory.
The Chat-80 World Database consists of the following files::
world0.pl
rivers.pl
cities.pl
countries.pl
contain.pl
borders.pl
This module uses a slightly modified version of ``world0.pl``, in which
a set of Prolog rules have been omitted. The modified file is named
``world1.pl``. Currently, the file ``rivers.pl`` is not read in, since
it uses a list rather than a string in the second field.
Reading Chat-80 Files
=====================
Chat-80 relations are like tables in a relational database. The
relation acts as the name of the table; the first argument acts as the
'primary key'; and subsequent arguments are further fields in the
table. In general, the name of the table provides a label for a unary
predicate whose extension is all the primary keys. For example,
relations in ``cities.pl`` are of the following form::
'city(athens,greece,1368).'
Here, ``'athens'`` is the key, and will be mapped to a member of the
unary predicate *city*.
The fields in the table are mapped to binary predicates. The first
argument of the predicate is the primary key, while the second
argument is the data in the relevant field. Thus, in the above
example, the third field is mapped to the binary predicate
*population_of*, whose extension is a set of pairs such as
``'(athens, 1368)'``.
An exception to this general framework is required by the relations in
the files ``borders.pl`` and ``contains.pl``. These contain facts of the
following form::
'borders(albania,greece).'
'contains0(africa,central_africa).'
We do not want to form a unary concept out the element in
the first field of these records, and we want the label of the binary
relation just to be ``'border'``/``'contain'`` respectively.
In order to drive the extraction process, we use 'relation metadata bundles'
which are Python dictionaries such as the following::
city = {'label': 'city',
'closures': [],
'schema': ['city', 'country', 'population'],
'filename': 'cities.pl'}
According to this, the file ``city['filename']`` contains a list of
relational tuples (or more accurately, the corresponding strings in
Prolog form) whose predicate symbol is ``city['label']`` and whose
relational schema is ``city['schema']``. The notion of a ``closure`` is
discussed in the next section.
Concepts
========
In order to encapsulate the results of the extraction, a class of
``Concept`` objects is introduced. A ``Concept`` object has a number of
attributes, in particular a ``prefLabel`` and ``extension``, which make
it easier to inspect the output of the extraction. In addition, the
``extension`` can be further processed: in the case of the ``'border'``
relation, we check that the relation is symmetric, and in the case
of the ``'contain'`` relation, we carry out the transitive
closure. The closure properties associated with a concept is
indicated in the relation metadata, as indicated earlier.
The ``extension`` of a ``Concept`` object is then incorporated into a
``Valuation`` object.
Persistence
===========
The functions ``val_dump`` and ``val_load`` are provided to allow a
valuation to be stored in a persistent database and re-loaded, rather
than having to be re-computed each time.
Individuals and Lexical Items
=============================
As well as deriving relations from the Chat-80 data, we also create a
set of individual constants, one for each entity in the domain. The
individual constants are string-identical to the entities. For
example, given a data item such as ``'zloty'``, we add to the valuation
a pair ``('zloty', 'zloty')``. In order to parse English sentences that
refer to these entities, we also create a lexical item such as the
following for each individual constant::
PropN[num=sg, sem=<\P.(P zloty)>] -> 'Zloty'
The set of rules is written to the file ``chat_pnames.cfg`` in the
current directory.
"""
from __future__ import print_function, unicode_literals
import re
import shelve
import os
import sys
import nltk.data
from nltk.compat import string_types, python_2_unicode_compatible
###########################################################################
# Chat-80 relation metadata bundles needed to build the valuation
###########################################################################
borders = {'rel_name': 'borders',
'closures': ['symmetric'],
'schema': ['region', 'border'],
'filename': 'borders.pl'}
contains = {'rel_name': 'contains0',
'closures': ['transitive'],
'schema': ['region', 'contain'],
'filename': 'contain.pl'}
city = {'rel_name': 'city',
'closures': [],
'schema': ['city', 'country', 'population'],
'filename': 'cities.pl'}
country = {'rel_name': 'country',
'closures': [],
'schema': ['country', 'region', 'latitude', 'longitude',
'area', 'population', 'capital', 'currency'],
'filename': 'countries.pl'}
circle_of_lat = {'rel_name': 'circle_of_latitude',
'closures': [],
'schema': ['circle_of_latitude', 'degrees'],
'filename': 'world1.pl'}
circle_of_long = {'rel_name': 'circle_of_longitude',
'closures': [],
'schema': ['circle_of_longitude', 'degrees'],
'filename': 'world1.pl'}
continent = {'rel_name': 'continent',
'closures': [],
'schema': ['continent'],
'filename': 'world1.pl'}
region = {'rel_name': 'in_continent',
'closures': [],
'schema': ['region', 'continent'],
'filename': 'world1.pl'}
ocean = {'rel_name': 'ocean',
'closures': [],
'schema': ['ocean'],
'filename': 'world1.pl'}
sea = {'rel_name': 'sea',
'closures': [],
'schema': ['sea'],
'filename': 'world1.pl'}
items = ['borders', 'contains', 'city', 'country', 'circle_of_lat',
'circle_of_long', 'continent', 'region', 'ocean', 'sea']
items = tuple(sorted(items))
item_metadata = {
'borders': borders,
'contains': contains,
'city': city,
'country': country,
'circle_of_lat': circle_of_lat,
'circle_of_long': circle_of_long,
'continent': continent,
'region': region,
'ocean': ocean,
'sea': sea
}
rels = item_metadata.values()
not_unary = ['borders.pl', 'contain.pl']
###########################################################################
@python_2_unicode_compatible
class Concept(object):
"""
A Concept class, loosely based on SKOS
(http://www.w3.org/TR/swbp-skos-core-guide/).
"""
def __init__(self, prefLabel, arity, altLabels=[], closures=[], extension=set()):
"""
:param prefLabel: the preferred label for the concept
:type prefLabel: str
:param arity: the arity of the concept
:type arity: int
@keyword altLabels: other (related) labels
:type altLabels: list
@keyword closures: closure properties of the extension \
(list items can be ``symmetric``, ``reflexive``, ``transitive``)
:type closures: list
@keyword extension: the extensional value of the concept
:type extension: set
"""
self.prefLabel = prefLabel
self.arity = arity
self.altLabels = altLabels
self.closures = closures
#keep _extension internally as a set
self._extension = extension
#public access is via a list (for slicing)
self.extension = sorted(list(extension))
def __str__(self):
#_extension = ''
#for element in sorted(self.extension):
#if isinstance(element, tuple):
#element = '(%s, %s)' % (element)
#_extension += element + ', '
#_extension = _extension[:-1]
return "Label = '%s'\nArity = %s\nExtension = %s" % \
(self.prefLabel, self.arity, self.extension)
def __repr__(self):
return "Concept('%s')" % self.prefLabel
def augment(self, data):
"""
Add more data to the ``Concept``'s extension set.
:param data: a new semantic value
:type data: string or pair of strings
:rtype: set
"""
self._extension.add(data)
self.extension = sorted(list(self._extension))
return self._extension
def _make_graph(self, s):
"""
Convert a set of pairs into an adjacency linked list encoding of a graph.
"""
g = {}
for (x, y) in s:
if x in g:
g[x].append(y)
else:
g[x] = [y]
return g
def _transclose(self, g):
"""
Compute the transitive closure of a graph represented as a linked list.
"""
for x in g:
for adjacent in g[x]:
# check that adjacent is a key
if adjacent in g:
for y in g[adjacent]:
if y not in g[x]:
g[x].append(y)
return g
def _make_pairs(self, g):
"""
Convert an adjacency linked list back into a set of pairs.
"""
pairs = []
for node in g:
for adjacent in g[node]:
pairs.append((node, adjacent))
return set(pairs)
def close(self):
"""
Close a binary relation in the ``Concept``'s extension set.
:return: a new extension for the ``Concept`` in which the
relation is closed under a given property
"""
from nltk.sem import is_rel
assert is_rel(self._extension)
if 'symmetric' in self.closures:
pairs = []
for (x, y) in self._extension:
pairs.append((y, x))
sym = set(pairs)
self._extension = self._extension.union(sym)
if 'transitive' in self.closures:
all = self._make_graph(self._extension)
closed = self._transclose(all)
trans = self._make_pairs(closed)
#print sorted(trans)
self._extension = self._extension.union(trans)
self.extension = sorted(list(self._extension))
def clause2concepts(filename, rel_name, schema, closures=[]):
"""
Convert a file of Prolog clauses into a list of ``Concept`` objects.
:param filename: filename containing the relations
:type filename: str
:param rel_name: name of the relation
:type rel_name: str
:param schema: the schema used in a set of relational tuples
:type schema: list
:param closures: closure properties for the extension of the concept
:type closures: list
:return: a list of ``Concept`` objects
:rtype: list
"""
concepts = []
# position of the subject of a binary relation
subj = 0
# label of the 'primary key'
pkey = schema[0]
# fields other than the primary key
fields = schema[1:]
# convert a file into a list of lists
records = _str2records(filename, rel_name)
# add a unary concept corresponding to the set of entities
# in the primary key position
# relations in 'not_unary' are more like ordinary binary relations
if not filename in not_unary:
concepts.append(unary_concept(pkey, subj, records))
# add a binary concept for each non-key field
for field in fields:
obj = schema.index(field)
concepts.append(binary_concept(field, closures, subj, obj, records))
return concepts
def cities2table(filename, rel_name, dbname, verbose=False, setup=False):
"""
Convert a file of Prolog clauses into a database table.
This is not generic, since it doesn't allow arbitrary
schemas to be set as a parameter.
Intended usage::
cities2table('cities.pl', 'city', 'city.db', verbose=True, setup=True)
:param filename: filename containing the relations
:type filename: str
:param rel_name: name of the relation
:type rel_name: str
:param dbname: filename of persistent store
:type schema: str
"""
import sqlite3
records = _str2records(filename, rel_name)
connection = sqlite3.connect(dbname)
cur = connection.cursor()
if setup:
cur.execute('''CREATE TABLE city_table
(City text, Country text, Population int)''')
table_name = "city_table"
for t in records:
cur.execute('insert into %s values (?,?,?)' % table_name, t)
if verbose:
print("inserting values into %s: " % table_name, t)
connection.commit()
if verbose:
print("Committing update to %s" % dbname)
cur.close()
def sql_query(dbname, query):
"""
Execute an SQL query over a database.
:param dbname: filename of persistent store
:type schema: str
:param query: SQL query
:type rel_name: str
"""
import sqlite3
try:
path = nltk.data.find(dbname)
connection = sqlite3.connect(str(path))
cur = connection.cursor()
return cur.execute(query)
except (ValueError, sqlite3.OperationalError):
import warnings
warnings.warn("Make sure the database file %s is installed and uncompressed." % dbname)
raise
def _str2records(filename, rel):
"""
Read a file into memory and convert each relation clause into a list.
"""
recs = []
contents = nltk.data.load("corpora/chat80/%s" % filename, format="text")
for line in contents.splitlines():
if line.startswith(rel):
line = re.sub(rel+r'\(', '', line)
line = re.sub(r'\)\.$', '', line)
record = line.split(',')
recs.append(record)
return recs
def unary_concept(label, subj, records):
"""
Make a unary concept out of the primary key in a record.
A record is a list of entities in some relation, such as
``['france', 'paris']``, where ``'france'`` is acting as the primary
key.
:param label: the preferred label for the concept
:type label: string
:param subj: position in the record of the subject of the predicate
:type subj: int
:param records: a list of records
:type records: list of lists
:return: ``Concept`` of arity 1
:rtype: Concept
"""
c = Concept(label, arity=1, extension=set())
for record in records:
c.augment(record[subj])
return c
def binary_concept(label, closures, subj, obj, records):
"""
Make a binary concept out of the primary key and another field in a record.
A record is a list of entities in some relation, such as
``['france', 'paris']``, where ``'france'`` is acting as the primary
key, and ``'paris'`` stands in the ``'capital_of'`` relation to
``'france'``.
More generally, given a record such as ``['a', 'b', 'c']``, where
label is bound to ``'B'``, and ``obj`` bound to 1, the derived
binary concept will have label ``'B_of'``, and its extension will
be a set of pairs such as ``('a', 'b')``.
:param label: the base part of the preferred label for the concept
:type label: str
:param closures: closure properties for the extension of the concept
:type closures: list
:param subj: position in the record of the subject of the predicate
:type subj: int
:param obj: position in the record of the object of the predicate
:type obj: int
:param records: a list of records
:type records: list of lists
:return: ``Concept`` of arity 2
:rtype: Concept
"""
if not label == 'border' and not label == 'contain':
label = label + '_of'
c = Concept(label, arity=2, closures=closures, extension=set())
for record in records:
c.augment((record[subj], record[obj]))
# close the concept's extension according to the properties in closures
c.close()
return c
def process_bundle(rels):
"""
Given a list of relation metadata bundles, make a corresponding
dictionary of concepts, indexed by the relation name.
:param rels: bundle of metadata needed for constructing a concept
:type rels: list(dict)
:return: a dictionary of concepts, indexed by the relation name.
:rtype: dict(str): Concept
"""
concepts = {}
for rel in rels:
rel_name = rel['rel_name']
closures = rel['closures']
schema = rel['schema']
filename = rel['filename']
concept_list = clause2concepts(filename, rel_name, schema, closures)
for c in concept_list:
label = c.prefLabel
if (label in concepts):
for data in c.extension:
concepts[label].augment(data)
concepts[label].close()
else:
concepts[label] = c
return concepts
def make_valuation(concepts, read=False, lexicon=False):
"""
Convert a list of ``Concept`` objects into a list of (label, extension) pairs;
optionally create a ``Valuation`` object.
:param concepts: concepts
:type concepts: list(Concept)
:param read: if ``True``, ``(symbol, set)`` pairs are read into a ``Valuation``
:type read: bool
:rtype: list or Valuation
"""
vals = []
for c in concepts:
vals.append((c.prefLabel, c.extension))
if lexicon: read = True
if read:
from nltk.sem import Valuation
val = Valuation({})
val.update(vals)
# add labels for individuals
val = label_indivs(val, lexicon=lexicon)
return val
else:
return vals
def val_dump(rels, db):
"""
Make a ``Valuation`` from a list of relation metadata bundles and dump to
persistent database.
:param rels: bundle of metadata needed for constructing a concept
:type rels: list of dict
:param db: name of file to which data is written.
The suffix '.db' will be automatically appended.
:type db: str
"""
concepts = process_bundle(rels).values()
valuation = make_valuation(concepts, read=True)
db_out = shelve.open(db, 'n')
db_out.update(valuation)
db_out.close()
def val_load(db):
"""
Load a ``Valuation`` from a persistent database.
:param db: name of file from which data is read.
The suffix '.db' should be omitted from the name.
:type db: str
"""
dbname = db+".db"
if not os.access(dbname, os.R_OK):
sys.exit("Cannot read file: %s" % dbname)
else:
db_in = shelve.open(db)
from nltk.sem import Valuation
val = Valuation(db_in)
# val.read(db_in.items())
return val
#def alpha(str):
#"""
#Utility to filter out non-alphabetic constants.
#:param str: candidate constant
#:type str: string
#:rtype: bool
#"""
#try:
#int(str)
#return False
#except ValueError:
## some unknown values in records are labeled '?'
#if not str == '?':
#return True
def label_indivs(valuation, lexicon=False):
"""
Assign individual constants to the individuals in the domain of a ``Valuation``.
Given a valuation with an entry of the form ``{'rel': {'a': True}}``,
add a new entry ``{'a': 'a'}``.
:type valuation: Valuation
:rtype: Valuation
"""
# collect all the individuals into a domain
domain = valuation.domain
# convert the domain into a sorted list of alphabetic terms
# use the same string as a label
pairs = [(e, e) for e in domain]
if lexicon:
lex = make_lex(domain)
with open("chat_pnames.cfg", 'w') as outfile:
outfile.writelines(lex)
# read the pairs into the valuation
valuation.update(pairs)
return valuation
def make_lex(symbols):
"""
Create lexical CFG rules for each individual symbol.
Given a valuation with an entry of the form ``{'zloty': 'zloty'}``,
create a lexical rule for the proper name 'Zloty'.
:param symbols: a list of individual constants in the semantic representation
:type symbols: sequence -- set(str)
:rtype: list(str)
"""
lex = []
header = """
##################################################################
# Lexical rules automatically generated by running 'chat80.py -x'.
##################################################################
"""
lex.append(header)
template = "PropN[num=sg, sem=<\P.(P %s)>] -> '%s'\n"
for s in symbols:
parts = s.split('_')
caps = [p.capitalize() for p in parts]
pname = '_'.join(caps)
rule = template % (s, pname)
lex.append(rule)
return lex
###########################################################################
# Interface function to emulate other corpus readers
###########################################################################
def concepts(items = items):
"""
Build a list of concepts corresponding to the relation names in ``items``.
:param items: names of the Chat-80 relations to extract
:type items: list(str)
:return: the ``Concept`` objects which are extracted from the relations
:rtype: list(Concept)
"""
if isinstance(items, string_types): items = (items,)
rels = [item_metadata[r] for r in items]
concept_map = process_bundle(rels)
return concept_map.values()
###########################################################################
def main():
import sys
from optparse import OptionParser
description = \
"""
Extract data from the Chat-80 Prolog files and convert them into a
Valuation object for use in the NLTK semantics package.
"""
opts = OptionParser(description=description)
opts.set_defaults(verbose=True, lex=False, vocab=False)
opts.add_option("-s", "--store", dest="outdb",
help="store a valuation in DB", metavar="DB")
opts.add_option("-l", "--load", dest="indb",
help="load a stored valuation from DB", metavar="DB")
opts.add_option("-c", "--concepts", action="store_true",
help="print concepts instead of a valuation")
opts.add_option("-r", "--relation", dest="label",
help="print concept with label REL (check possible labels with '-v' option)", metavar="REL")
opts.add_option("-q", "--quiet", action="store_false", dest="verbose",
help="don't print out progress info")
opts.add_option("-x", "--lex", action="store_true", dest="lex",
help="write a file of lexical entries for country names, then exit")
opts.add_option("-v", "--vocab", action="store_true", dest="vocab",
help="print out the vocabulary of concept labels and their arity, then exit")
(options, args) = opts.parse_args()
if options.outdb and options.indb:
opts.error("Options --store and --load are mutually exclusive")
if options.outdb:
# write the valuation to a persistent database
if options.verbose:
outdb = options.outdb+".db"
print("Dumping a valuation to %s" % outdb)
val_dump(rels, options.outdb)
sys.exit(0)
else:
# try to read in a valuation from a database
if options.indb is not None:
dbname = options.indb+".db"
if not os.access(dbname, os.R_OK):
sys.exit("Cannot read file: %s" % dbname)
else:
valuation = val_load(options.indb)
# we need to create the valuation from scratch
else:
# build some concepts
concept_map = process_bundle(rels)
concepts = concept_map.values()
# just print out the vocabulary
if options.vocab:
items = sorted([(c.arity, c.prefLabel) for c in concepts])
for (arity, label) in items:
print(label, arity)
sys.exit(0)
# show all the concepts
if options.concepts:
for c in concepts:
print(c)
print()
if options.label:
print(concept_map[options.label])
sys.exit(0)
else:
# turn the concepts into a Valuation
if options.lex:
if options.verbose:
print("Writing out lexical rules")
make_valuation(concepts, lexicon=True)
else:
valuation = make_valuation(concepts, read=True)
print(valuation)
def sql_demo():
"""
Print out every row from the 'city.db' database.
"""
print()
print("Using SQL to extract rows from 'city.db' RDB.")
for row in sql_query('corpora/city_database/city.db', "SELECT * FROM city_table"):
print(row)
if __name__ == '__main__':
main()
sql_demo()
| gpl-3.0 |
adlius/osf.io | api_tests/registrations/filters/test_filters.py | 6 | 5027 | # -*- coding: utf-8 -*-
from nose.tools import * # noqa:
from osf.models import Node, Registration
from framework.auth.core import Auth
from osf_tests.factories import (
AuthUserFactory,
NodeFactory,
ProjectFactory,
RegistrationFactory,
)
class RegistrationListFilteringMixin(object):
def setUp(self):
super(RegistrationListFilteringMixin, self).setUp()
assert self.url, 'Subclasses of RegistrationListFilteringMixin must define self.url'
self.user = AuthUserFactory()
self.user_two = AuthUserFactory()
self.A = ProjectFactory(creator=self.user)
self.B1 = NodeFactory(parent=self.A, creator=self.user)
self.B2 = NodeFactory(parent=self.A, creator=self.user)
self.C1 = NodeFactory(parent=self.B1, creator=self.user)
self.C2 = NodeFactory(parent=self.B2, creator=self.user)
self.D2 = NodeFactory(parent=self.C2, creator=self.user)
self.A.add_contributor(self.user_two, save=True)
self.node_A = RegistrationFactory(project=self.A, creator=self.user)
self.node_B2 = RegistrationFactory(project=self.B2, creator=self.user)
self.parent_url = '{}filter[parent]='.format(self.url)
self.parent_url_ne = '{}filter[parent][ne]=null'.format(self.url)
self.root_url = '{}filter[root]='.format(self.url)
self.tags_url = '{}filter[tags]='.format(self.url)
self.contributors_url = '{}filter[contributors]='.format(self.url)
def test_parent_filter_null(self):
expected = [self.node_A._id, self.node_B2._id]
res = self.app.get(
'{}null'.format(
self.parent_url),
auth=self.user.auth)
actual = [node['id'] for node in res.json['data']]
assert_equal(set(expected), set(actual))
def test_parent_filter_ne_null(self):
expected = list(Registration.objects.exclude(parent_nodes=None).values_list('guids___id', flat=True))
res = self.app.get(self.parent_url_ne,
auth=self.user.auth)
actual = [node['id'] for node in res.json['data']]
assert_equal(set(expected), set(actual))
def test_parent_filter_equals_returns_one(self):
expected = [n._id for n in self.node_B2.get_nodes()]
res = self.app.get(
'{}{}'.format(
self.parent_url,
self.node_B2._id),
auth=self.user.auth)
actual = [node['id'] for node in res.json['data']]
assert_equal(len(actual), 1)
assert_equal(expected, actual)
def test_parent_filter_equals_returns_multiple(self):
expected = [n._id for n in self.node_A.get_nodes()]
res = self.app.get(
'{}{}'.format(
self.parent_url,
self.node_A._id),
auth=self.user.auth)
actual = [node['id'] for node in res.json['data']]
assert_equal(len(actual), 2)
assert_equal(set(expected), set(actual))
def test_root_filter_null(self):
res = self.app.get(
'{}null'.format(
self.root_url),
auth=self.user.auth,
expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['source']['parameter'], 'filter')
def test_root_filter_equals_returns_branch(self):
expected = [n._id for n in Node.objects.get_children(self.node_B2)]
expected.append(self.node_B2._id)
res = self.app.get(
'{}{}'.format(
self.root_url,
self.node_B2._id),
auth=self.user.auth)
actual = [node['id'] for node in res.json['data']]
assert_equal(set(expected), set(actual))
def test_root_filter_equals_returns_tree(self):
expected = [n._id for n in Node.objects.get_children(self.node_A)]
expected.append(self.node_A._id)
res = self.app.get(
'{}{}'.format(
self.root_url,
self.node_A._id),
auth=self.user.auth)
actual = [node['id'] for node in res.json['data']]
assert_equal(len(actual), 6)
assert_equal(set(expected), set(actual))
def test_tag_filter(self):
self.node_A.add_tag('nerd', auth=Auth(self.node_A.creator), save=True)
expected = [self.node_A._id]
res = self.app.get('{}nerd'.format(self.tags_url), auth=self.user.auth)
actual = [node['id'] for node in res.json['data']]
assert_equal(expected, actual)
res = self.app.get('{}bird'.format(self.tags_url), auth=self.user.auth)
actual = [node['id'] for node in res.json['data']]
assert_equal([], actual)
def test_contributor_filter(self):
expected = [self.node_A._id]
res = self.app.get(
'{}{}'.format(
self.contributors_url,
self.user_two._id),
auth=self.user.auth)
actual = [node['id'] for node in res.json['data']]
assert_equal(expected, actual)
| apache-2.0 |
AleksNeStu/ggrc-core | test/selenium/src/lib/constants/objects.py | 1 | 3523 | # Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Constants and methods for work with objects."""
import sys
# objects
PROGRAMS = "programs"
WORKFLOWS = "workflows"
AUDITS = "audits"
ASSESSMENTS = "assessments"
ASSESSMENT_TEMPLATES = "assessment_templates"
ISSUES = "issues"
DIRECTIVES = "directives"
REGULATIONS = "regulations"
POLICIES = "policies"
STANDARDS = "standards"
CONTRACTS = "contracts"
CLAUSES = "clauses"
SECTIONS = "sections"
CONTROLS = "controls"
OBJECTIVES = "objectives"
PEOPLE = "people"
ORG_GROUPS = "org_groups"
VENDORS = "vendors"
ACCESS_GROUPS = "access_groups"
SYSTEMS = "systems"
PROCESSES = "processes"
DATA_ASSETS = "data_assets"
PRODUCTS = "products"
PROJECTS = "projects"
FACILITIES = "facilities"
MARKETS = "markets"
RISKS = "risks"
THREATS = "threats"
RISK_ASSESSMENTS = "risk_assessments"
CUSTOM_ATTRIBUTES = "custom_attribute_definitions"
ALL_SNAPSHOTABLE_OBJS = (
ACCESS_GROUPS, CLAUSES, CONTRACTS, CONTROLS, DATA_ASSETS, FACILITIES,
MARKETS, OBJECTIVES, ORG_GROUPS, POLICIES, PROCESSES, PRODUCTS,
REGULATIONS, SECTIONS, STANDARDS, SYSTEMS, VENDORS, RISKS, THREATS,
RISK_ASSESSMENTS, PROJECTS
)
ALL_CA_OBJS = ALL_SNAPSHOTABLE_OBJS + (
WORKFLOWS, PROGRAMS, AUDITS, ISSUES, ASSESSMENTS, PEOPLE)
def _get_singular(plurals):
"""
Return: list of basestring: Capitalized object names in singular form
"""
singulars = []
for name in plurals:
name = name.lower()
if name == PEOPLE:
singular = "person"
elif name == POLICIES:
singular = "policy"
elif name == PROCESSES:
singular = "process"
elif name == FACILITIES:
singular = "facility"
else:
singular = name[:-1]
singulars.append(singular.upper())
return singulars
def _get_plural(singulars):
"""
Return: list of basestring: Capitalized object names in plural form
"""
plurals = []
for name in singulars:
name = name.lower()
if name == "people":
plural = PEOPLE
elif name == "policy":
plural = POLICIES
elif name == "process":
plural = PROCESSES
elif name == "facility":
plural = FACILITIES
else:
plural = name + "s"
plurals.append(plural.upper())
return plurals
def get_singular(plural, title=False):
"""Transform object name to singular and lower or title form.
Example: risk_assessments -> risk_assessment
"""
_singular = _get_singular([plural])[0]
if title:
_singular = _singular.title()
else:
_singular = _singular.lower()
return _singular
def get_plural(singular, title=False):
"""Transform object name to plural and lower form or title form.
Example: risk_assessment -> risk_assessments
"""
_plural = _get_plural([singular])[0]
if title:
_plural = _plural.title()
else:
_plural = _plural.lower()
return _plural
def get_normal_form(obj_name, with_space=True):
"""Transform object name to title form.
Example:
if with_space=True then risk_assessments -> Risk Assessments
if with_space=False then risk_assessments -> RiskAssessments
"""
normal = obj_name.replace("_", " ").title()
if with_space is True:
return normal
elif with_space is False:
return normal.replace(" ", "")
ALL_PLURAL = [k for k in globals().keys() if
not k.startswith("_") and "ALL" not in k and k.isupper()]
ALL_SINGULAR = _get_singular(ALL_PLURAL)
ALL_OBJS = [getattr(sys.modules[__name__], obj) for obj in
sys.modules[__name__].ALL_PLURAL]
| apache-2.0 |
corradio/electricitymap | parsers/CH.py | 1 | 2907 | #!/usr/bin/env python3
import arrow
from . import ENTSOE
import logging
import requests
def fetch_swiss_exchanges(session, target_datetime, logger):
"""Returns the total exchanges of Switzerland with its neighboring countries."""
swiss_transmissions = {}
for exchange_key in ['AT', 'DE', 'IT', 'FR']:
exchanges = ENTSOE.fetch_exchange(zone_key1='CH',
zone_key2=exchange_key,
session=session,
target_datetime=target_datetime,
logger=logger)
if not exchanges:
continue
for exchange in exchanges:
datetime = exchange['datetime']
if datetime not in swiss_transmissions:
swiss_transmissions[datetime] = exchange['netFlow']
else:
swiss_transmissions[datetime] += exchange['netFlow']
return swiss_transmissions
def fetch_swiss_consumption(session, target_datetime, logger):
"""Returns the total consumption of Switzerland."""
consumptions = ENTSOE.fetch_consumption(zone_key='CH',
session=session,
target_datetime=target_datetime,
logger=logger)
return {c['datetime']: c['consumption'] for c in consumptions}
def fetch_production(zone_key='CH', session=None, target_datetime=None,
logger=logging.getLogger(__name__)):
"""
Returns the total production by type for Switzerland.
Currently the majority of the run-of-river production is missing.
The difference between the sum of all production types and the total production is allocated as 'unknown'.
The total production is calculated as sum of the consumption, storage and net imports.
"""
now = arrow.get(target_datetime, 'Europe/Zurich') if target_datetime else arrow.now(tz='Europe/Zurich')
r = session or requests.session()
exchanges = fetch_swiss_exchanges(r, now, logger)
consumptions = fetch_swiss_consumption(r, now, logger)
productions = ENTSOE.fetch_production(zone_key=zone_key, session=r, target_datetime=now, logger=logger)
if not productions:
return
for p in productions:
dt = p['datetime']
if dt not in exchanges or dt not in consumptions:
continue
known_production = sum([x or 0 for x in p['production'].values()])
storage = sum([x or 0 for x in p['storage'].values()])
total_production = consumptions[dt] + storage + exchanges[dt]
unknown_production = total_production - known_production
p['production']['unknown'] = unknown_production if unknown_production > 0 else 0
return productions
if __name__ == '__main__':
print(fetch_production())
| gpl-3.0 |
indhub/mxnet | example/reinforcement-learning/parallel_actor_critic/envs.py | 52 | 1875 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
class Atari8080Preprocessor(object):
def __init__(self):
self.prev = None
self.obs_size = 80*80
def reset(self):
self.prev = None
def preprocess(self, img):
"""
Preprocess a 210x160x3 uint8 frame into a 6400 (80x80) (1 x input_size)
float vector.
"""
# Crop, down-sample, erase background and set foreground to 1.
# See https://gist.github.com/karpathy/a4166c7fe253700972fcbc77e4ea32c5
img = img[35:195]
img = img[::2, ::2, 0]
img[img == 144] = 0
img[img == 109] = 0
img[img != 0] = 1
curr = np.expand_dims(img.astype(np.float).ravel(), axis=0)
# Subtract the last preprocessed image.
diff = (curr - self.prev if self.prev is not None
else np.zeros((1, curr.shape[1])))
self.prev = curr
return diff
class IdentityPreprocessor(object):
def __init__(self, obs_size):
self.obs_size = obs_size
def reset(self):
pass
def preprocess(self, x):
return x
| apache-2.0 |
vng/omim | 3party/freetype/src/tools/glnames.py | 165 | 105237 | #!/usr/bin/env python
#
#
# FreeType 2 glyph name builder
#
# Copyright 1996-2015 by
# David Turner, Robert Wilhelm, and Werner Lemberg.
#
# This file is part of the FreeType project, and may only be used, modified,
# and distributed under the terms of the FreeType project license,
# LICENSE.TXT. By continuing to use, modify, or distribute this file you
# indicate that you have read the license and understand and accept it
# fully.
"""\
usage: %s <output-file>
This python script generates the glyph names tables defined in the
`psnames' module.
Its single argument is the name of the header file to be created.
"""
import sys, string, struct, re, os.path
# This table lists the glyphs according to the Macintosh specification.
# It is used by the TrueType Postscript names table.
#
# See
#
# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6post.html
#
# for the official list.
#
mac_standard_names = \
[
# 0
".notdef", ".null", "nonmarkingreturn", "space", "exclam",
"quotedbl", "numbersign", "dollar", "percent", "ampersand",
# 10
"quotesingle", "parenleft", "parenright", "asterisk", "plus",
"comma", "hyphen", "period", "slash", "zero",
# 20
"one", "two", "three", "four", "five",
"six", "seven", "eight", "nine", "colon",
# 30
"semicolon", "less", "equal", "greater", "question",
"at", "A", "B", "C", "D",
# 40
"E", "F", "G", "H", "I",
"J", "K", "L", "M", "N",
# 50
"O", "P", "Q", "R", "S",
"T", "U", "V", "W", "X",
# 60
"Y", "Z", "bracketleft", "backslash", "bracketright",
"asciicircum", "underscore", "grave", "a", "b",
# 70
"c", "d", "e", "f", "g",
"h", "i", "j", "k", "l",
# 80
"m", "n", "o", "p", "q",
"r", "s", "t", "u", "v",
# 90
"w", "x", "y", "z", "braceleft",
"bar", "braceright", "asciitilde", "Adieresis", "Aring",
# 100
"Ccedilla", "Eacute", "Ntilde", "Odieresis", "Udieresis",
"aacute", "agrave", "acircumflex", "adieresis", "atilde",
# 110
"aring", "ccedilla", "eacute", "egrave", "ecircumflex",
"edieresis", "iacute", "igrave", "icircumflex", "idieresis",
# 120
"ntilde", "oacute", "ograve", "ocircumflex", "odieresis",
"otilde", "uacute", "ugrave", "ucircumflex", "udieresis",
# 130
"dagger", "degree", "cent", "sterling", "section",
"bullet", "paragraph", "germandbls", "registered", "copyright",
# 140
"trademark", "acute", "dieresis", "notequal", "AE",
"Oslash", "infinity", "plusminus", "lessequal", "greaterequal",
# 150
"yen", "mu", "partialdiff", "summation", "product",
"pi", "integral", "ordfeminine", "ordmasculine", "Omega",
# 160
"ae", "oslash", "questiondown", "exclamdown", "logicalnot",
"radical", "florin", "approxequal", "Delta", "guillemotleft",
# 170
"guillemotright", "ellipsis", "nonbreakingspace", "Agrave", "Atilde",
"Otilde", "OE", "oe", "endash", "emdash",
# 180
"quotedblleft", "quotedblright", "quoteleft", "quoteright", "divide",
"lozenge", "ydieresis", "Ydieresis", "fraction", "currency",
# 190
"guilsinglleft", "guilsinglright", "fi", "fl", "daggerdbl",
"periodcentered", "quotesinglbase", "quotedblbase", "perthousand",
"Acircumflex",
# 200
"Ecircumflex", "Aacute", "Edieresis", "Egrave", "Iacute",
"Icircumflex", "Idieresis", "Igrave", "Oacute", "Ocircumflex",
# 210
"apple", "Ograve", "Uacute", "Ucircumflex", "Ugrave",
"dotlessi", "circumflex", "tilde", "macron", "breve",
# 220
"dotaccent", "ring", "cedilla", "hungarumlaut", "ogonek",
"caron", "Lslash", "lslash", "Scaron", "scaron",
# 230
"Zcaron", "zcaron", "brokenbar", "Eth", "eth",
"Yacute", "yacute", "Thorn", "thorn", "minus",
# 240
"multiply", "onesuperior", "twosuperior", "threesuperior", "onehalf",
"onequarter", "threequarters", "franc", "Gbreve", "gbreve",
# 250
"Idotaccent", "Scedilla", "scedilla", "Cacute", "cacute",
"Ccaron", "ccaron", "dcroat"
]
# The list of standard `SID' glyph names. For the official list,
# see Annex A of document at
#
# http://partners.adobe.com/public/developer/en/font/5176.CFF.pdf .
#
sid_standard_names = \
[
# 0
".notdef", "space", "exclam", "quotedbl", "numbersign",
"dollar", "percent", "ampersand", "quoteright", "parenleft",
# 10
"parenright", "asterisk", "plus", "comma", "hyphen",
"period", "slash", "zero", "one", "two",
# 20
"three", "four", "five", "six", "seven",
"eight", "nine", "colon", "semicolon", "less",
# 30
"equal", "greater", "question", "at", "A",
"B", "C", "D", "E", "F",
# 40
"G", "H", "I", "J", "K",
"L", "M", "N", "O", "P",
# 50
"Q", "R", "S", "T", "U",
"V", "W", "X", "Y", "Z",
# 60
"bracketleft", "backslash", "bracketright", "asciicircum", "underscore",
"quoteleft", "a", "b", "c", "d",
# 70
"e", "f", "g", "h", "i",
"j", "k", "l", "m", "n",
# 80
"o", "p", "q", "r", "s",
"t", "u", "v", "w", "x",
# 90
"y", "z", "braceleft", "bar", "braceright",
"asciitilde", "exclamdown", "cent", "sterling", "fraction",
# 100
"yen", "florin", "section", "currency", "quotesingle",
"quotedblleft", "guillemotleft", "guilsinglleft", "guilsinglright", "fi",
# 110
"fl", "endash", "dagger", "daggerdbl", "periodcentered",
"paragraph", "bullet", "quotesinglbase", "quotedblbase", "quotedblright",
# 120
"guillemotright", "ellipsis", "perthousand", "questiondown", "grave",
"acute", "circumflex", "tilde", "macron", "breve",
# 130
"dotaccent", "dieresis", "ring", "cedilla", "hungarumlaut",
"ogonek", "caron", "emdash", "AE", "ordfeminine",
# 140
"Lslash", "Oslash", "OE", "ordmasculine", "ae",
"dotlessi", "lslash", "oslash", "oe", "germandbls",
# 150
"onesuperior", "logicalnot", "mu", "trademark", "Eth",
"onehalf", "plusminus", "Thorn", "onequarter", "divide",
# 160
"brokenbar", "degree", "thorn", "threequarters", "twosuperior",
"registered", "minus", "eth", "multiply", "threesuperior",
# 170
"copyright", "Aacute", "Acircumflex", "Adieresis", "Agrave",
"Aring", "Atilde", "Ccedilla", "Eacute", "Ecircumflex",
# 180
"Edieresis", "Egrave", "Iacute", "Icircumflex", "Idieresis",
"Igrave", "Ntilde", "Oacute", "Ocircumflex", "Odieresis",
# 190
"Ograve", "Otilde", "Scaron", "Uacute", "Ucircumflex",
"Udieresis", "Ugrave", "Yacute", "Ydieresis", "Zcaron",
# 200
"aacute", "acircumflex", "adieresis", "agrave", "aring",
"atilde", "ccedilla", "eacute", "ecircumflex", "edieresis",
# 210
"egrave", "iacute", "icircumflex", "idieresis", "igrave",
"ntilde", "oacute", "ocircumflex", "odieresis", "ograve",
# 220
"otilde", "scaron", "uacute", "ucircumflex", "udieresis",
"ugrave", "yacute", "ydieresis", "zcaron", "exclamsmall",
# 230
"Hungarumlautsmall", "dollaroldstyle", "dollarsuperior", "ampersandsmall",
"Acutesmall",
"parenleftsuperior", "parenrightsuperior", "twodotenleader",
"onedotenleader", "zerooldstyle",
# 240
"oneoldstyle", "twooldstyle", "threeoldstyle", "fouroldstyle",
"fiveoldstyle",
"sixoldstyle", "sevenoldstyle", "eightoldstyle", "nineoldstyle",
"commasuperior",
# 250
"threequartersemdash", "periodsuperior", "questionsmall", "asuperior",
"bsuperior",
"centsuperior", "dsuperior", "esuperior", "isuperior", "lsuperior",
# 260
"msuperior", "nsuperior", "osuperior", "rsuperior", "ssuperior",
"tsuperior", "ff", "ffi", "ffl", "parenleftinferior",
# 270
"parenrightinferior", "Circumflexsmall", "hyphensuperior", "Gravesmall",
"Asmall",
"Bsmall", "Csmall", "Dsmall", "Esmall", "Fsmall",
# 280
"Gsmall", "Hsmall", "Ismall", "Jsmall", "Ksmall",
"Lsmall", "Msmall", "Nsmall", "Osmall", "Psmall",
# 290
"Qsmall", "Rsmall", "Ssmall", "Tsmall", "Usmall",
"Vsmall", "Wsmall", "Xsmall", "Ysmall", "Zsmall",
# 300
"colonmonetary", "onefitted", "rupiah", "Tildesmall", "exclamdownsmall",
"centoldstyle", "Lslashsmall", "Scaronsmall", "Zcaronsmall",
"Dieresissmall",
# 310
"Brevesmall", "Caronsmall", "Dotaccentsmall", "Macronsmall", "figuredash",
"hypheninferior", "Ogoneksmall", "Ringsmall", "Cedillasmall",
"questiondownsmall",
# 320
"oneeighth", "threeeighths", "fiveeighths", "seveneighths", "onethird",
"twothirds", "zerosuperior", "foursuperior", "fivesuperior",
"sixsuperior",
# 330
"sevensuperior", "eightsuperior", "ninesuperior", "zeroinferior",
"oneinferior",
"twoinferior", "threeinferior", "fourinferior", "fiveinferior",
"sixinferior",
# 340
"seveninferior", "eightinferior", "nineinferior", "centinferior",
"dollarinferior",
"periodinferior", "commainferior", "Agravesmall", "Aacutesmall",
"Acircumflexsmall",
# 350
"Atildesmall", "Adieresissmall", "Aringsmall", "AEsmall", "Ccedillasmall",
"Egravesmall", "Eacutesmall", "Ecircumflexsmall", "Edieresissmall",
"Igravesmall",
# 360
"Iacutesmall", "Icircumflexsmall", "Idieresissmall", "Ethsmall",
"Ntildesmall",
"Ogravesmall", "Oacutesmall", "Ocircumflexsmall", "Otildesmall",
"Odieresissmall",
# 370
"OEsmall", "Oslashsmall", "Ugravesmall", "Uacutesmall",
"Ucircumflexsmall",
"Udieresissmall", "Yacutesmall", "Thornsmall", "Ydieresissmall",
"001.000",
# 380
"001.001", "001.002", "001.003", "Black", "Bold",
"Book", "Light", "Medium", "Regular", "Roman",
# 390
"Semibold"
]
# This table maps character codes of the Adobe Standard Type 1
# encoding to glyph indices in the sid_standard_names table.
#
t1_standard_encoding = \
[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
19, 20, 21, 22, 23, 24, 25, 26, 27, 28,
29, 30, 31, 32, 33, 34, 35, 36, 37, 38,
39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
49, 50, 51, 52, 53, 54, 55, 56, 57, 58,
59, 60, 61, 62, 63, 64, 65, 66, 67, 68,
69, 70, 71, 72, 73, 74, 75, 76, 77, 78,
79, 80, 81, 82, 83, 84, 85, 86, 87, 88,
89, 90, 91, 92, 93, 94, 95, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 96, 97, 98, 99, 100, 101, 102, 103, 104,
105, 106, 107, 108, 109, 110, 0, 111, 112, 113,
114, 0, 115, 116, 117, 118, 119, 120, 121, 122,
0, 123, 0, 124, 125, 126, 127, 128, 129, 130,
131, 0, 132, 133, 0, 134, 135, 136, 137, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 138, 0, 139, 0, 0,
0, 0, 140, 141, 142, 143, 0, 0, 0, 0,
0, 144, 0, 0, 0, 145, 0, 0, 146, 147,
148, 149, 0, 0, 0, 0
]
# This table maps character codes of the Adobe Expert Type 1
# encoding to glyph indices in the sid_standard_names table.
#
t1_expert_encoding = \
[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 1, 229, 230, 0, 231, 232, 233, 234,
235, 236, 237, 238, 13, 14, 15, 99, 239, 240,
241, 242, 243, 244, 245, 246, 247, 248, 27, 28,
249, 250, 251, 252, 0, 253, 254, 255, 256, 257,
0, 0, 0, 258, 0, 0, 259, 260, 261, 262,
0, 0, 263, 264, 265, 0, 266, 109, 110, 267,
268, 269, 0, 270, 271, 272, 273, 274, 275, 276,
277, 278, 279, 280, 281, 282, 283, 284, 285, 286,
287, 288, 289, 290, 291, 292, 293, 294, 295, 296,
297, 298, 299, 300, 301, 302, 303, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 304, 305, 306, 0, 0, 307, 308, 309, 310,
311, 0, 312, 0, 0, 313, 0, 0, 314, 315,
0, 0, 316, 317, 318, 0, 0, 0, 158, 155,
163, 319, 320, 321, 322, 323, 324, 325, 0, 0,
326, 150, 164, 169, 327, 328, 329, 330, 331, 332,
333, 334, 335, 336, 337, 338, 339, 340, 341, 342,
343, 344, 345, 346, 347, 348, 349, 350, 351, 352,
353, 354, 355, 356, 357, 358, 359, 360, 361, 362,
363, 364, 365, 366, 367, 368, 369, 370, 371, 372,
373, 374, 375, 376, 377, 378
]
# This data has been taken literally from the files `glyphlist.txt'
# and `zapfdingbats.txt' version 2.0, Sept 2002. It is available from
#
# http://sourceforge.net/adobe/aglfn/
#
adobe_glyph_list = """\
A;0041
AE;00C6
AEacute;01FC
AEmacron;01E2
AEsmall;F7E6
Aacute;00C1
Aacutesmall;F7E1
Abreve;0102
Abreveacute;1EAE
Abrevecyrillic;04D0
Abrevedotbelow;1EB6
Abrevegrave;1EB0
Abrevehookabove;1EB2
Abrevetilde;1EB4
Acaron;01CD
Acircle;24B6
Acircumflex;00C2
Acircumflexacute;1EA4
Acircumflexdotbelow;1EAC
Acircumflexgrave;1EA6
Acircumflexhookabove;1EA8
Acircumflexsmall;F7E2
Acircumflextilde;1EAA
Acute;F6C9
Acutesmall;F7B4
Acyrillic;0410
Adblgrave;0200
Adieresis;00C4
Adieresiscyrillic;04D2
Adieresismacron;01DE
Adieresissmall;F7E4
Adotbelow;1EA0
Adotmacron;01E0
Agrave;00C0
Agravesmall;F7E0
Ahookabove;1EA2
Aiecyrillic;04D4
Ainvertedbreve;0202
Alpha;0391
Alphatonos;0386
Amacron;0100
Amonospace;FF21
Aogonek;0104
Aring;00C5
Aringacute;01FA
Aringbelow;1E00
Aringsmall;F7E5
Asmall;F761
Atilde;00C3
Atildesmall;F7E3
Aybarmenian;0531
B;0042
Bcircle;24B7
Bdotaccent;1E02
Bdotbelow;1E04
Becyrillic;0411
Benarmenian;0532
Beta;0392
Bhook;0181
Blinebelow;1E06
Bmonospace;FF22
Brevesmall;F6F4
Bsmall;F762
Btopbar;0182
C;0043
Caarmenian;053E
Cacute;0106
Caron;F6CA
Caronsmall;F6F5
Ccaron;010C
Ccedilla;00C7
Ccedillaacute;1E08
Ccedillasmall;F7E7
Ccircle;24B8
Ccircumflex;0108
Cdot;010A
Cdotaccent;010A
Cedillasmall;F7B8
Chaarmenian;0549
Cheabkhasiancyrillic;04BC
Checyrillic;0427
Chedescenderabkhasiancyrillic;04BE
Chedescendercyrillic;04B6
Chedieresiscyrillic;04F4
Cheharmenian;0543
Chekhakassiancyrillic;04CB
Cheverticalstrokecyrillic;04B8
Chi;03A7
Chook;0187
Circumflexsmall;F6F6
Cmonospace;FF23
Coarmenian;0551
Csmall;F763
D;0044
DZ;01F1
DZcaron;01C4
Daarmenian;0534
Dafrican;0189
Dcaron;010E
Dcedilla;1E10
Dcircle;24B9
Dcircumflexbelow;1E12
Dcroat;0110
Ddotaccent;1E0A
Ddotbelow;1E0C
Decyrillic;0414
Deicoptic;03EE
Delta;2206
Deltagreek;0394
Dhook;018A
Dieresis;F6CB
DieresisAcute;F6CC
DieresisGrave;F6CD
Dieresissmall;F7A8
Digammagreek;03DC
Djecyrillic;0402
Dlinebelow;1E0E
Dmonospace;FF24
Dotaccentsmall;F6F7
Dslash;0110
Dsmall;F764
Dtopbar;018B
Dz;01F2
Dzcaron;01C5
Dzeabkhasiancyrillic;04E0
Dzecyrillic;0405
Dzhecyrillic;040F
E;0045
Eacute;00C9
Eacutesmall;F7E9
Ebreve;0114
Ecaron;011A
Ecedillabreve;1E1C
Echarmenian;0535
Ecircle;24BA
Ecircumflex;00CA
Ecircumflexacute;1EBE
Ecircumflexbelow;1E18
Ecircumflexdotbelow;1EC6
Ecircumflexgrave;1EC0
Ecircumflexhookabove;1EC2
Ecircumflexsmall;F7EA
Ecircumflextilde;1EC4
Ecyrillic;0404
Edblgrave;0204
Edieresis;00CB
Edieresissmall;F7EB
Edot;0116
Edotaccent;0116
Edotbelow;1EB8
Efcyrillic;0424
Egrave;00C8
Egravesmall;F7E8
Eharmenian;0537
Ehookabove;1EBA
Eightroman;2167
Einvertedbreve;0206
Eiotifiedcyrillic;0464
Elcyrillic;041B
Elevenroman;216A
Emacron;0112
Emacronacute;1E16
Emacrongrave;1E14
Emcyrillic;041C
Emonospace;FF25
Encyrillic;041D
Endescendercyrillic;04A2
Eng;014A
Enghecyrillic;04A4
Enhookcyrillic;04C7
Eogonek;0118
Eopen;0190
Epsilon;0395
Epsilontonos;0388
Ercyrillic;0420
Ereversed;018E
Ereversedcyrillic;042D
Escyrillic;0421
Esdescendercyrillic;04AA
Esh;01A9
Esmall;F765
Eta;0397
Etarmenian;0538
Etatonos;0389
Eth;00D0
Ethsmall;F7F0
Etilde;1EBC
Etildebelow;1E1A
Euro;20AC
Ezh;01B7
Ezhcaron;01EE
Ezhreversed;01B8
F;0046
Fcircle;24BB
Fdotaccent;1E1E
Feharmenian;0556
Feicoptic;03E4
Fhook;0191
Fitacyrillic;0472
Fiveroman;2164
Fmonospace;FF26
Fourroman;2163
Fsmall;F766
G;0047
GBsquare;3387
Gacute;01F4
Gamma;0393
Gammaafrican;0194
Gangiacoptic;03EA
Gbreve;011E
Gcaron;01E6
Gcedilla;0122
Gcircle;24BC
Gcircumflex;011C
Gcommaaccent;0122
Gdot;0120
Gdotaccent;0120
Gecyrillic;0413
Ghadarmenian;0542
Ghemiddlehookcyrillic;0494
Ghestrokecyrillic;0492
Gheupturncyrillic;0490
Ghook;0193
Gimarmenian;0533
Gjecyrillic;0403
Gmacron;1E20
Gmonospace;FF27
Grave;F6CE
Gravesmall;F760
Gsmall;F767
Gsmallhook;029B
Gstroke;01E4
H;0048
H18533;25CF
H18543;25AA
H18551;25AB
H22073;25A1
HPsquare;33CB
Haabkhasiancyrillic;04A8
Hadescendercyrillic;04B2
Hardsigncyrillic;042A
Hbar;0126
Hbrevebelow;1E2A
Hcedilla;1E28
Hcircle;24BD
Hcircumflex;0124
Hdieresis;1E26
Hdotaccent;1E22
Hdotbelow;1E24
Hmonospace;FF28
Hoarmenian;0540
Horicoptic;03E8
Hsmall;F768
Hungarumlaut;F6CF
Hungarumlautsmall;F6F8
Hzsquare;3390
I;0049
IAcyrillic;042F
IJ;0132
IUcyrillic;042E
Iacute;00CD
Iacutesmall;F7ED
Ibreve;012C
Icaron;01CF
Icircle;24BE
Icircumflex;00CE
Icircumflexsmall;F7EE
Icyrillic;0406
Idblgrave;0208
Idieresis;00CF
Idieresisacute;1E2E
Idieresiscyrillic;04E4
Idieresissmall;F7EF
Idot;0130
Idotaccent;0130
Idotbelow;1ECA
Iebrevecyrillic;04D6
Iecyrillic;0415
Ifraktur;2111
Igrave;00CC
Igravesmall;F7EC
Ihookabove;1EC8
Iicyrillic;0418
Iinvertedbreve;020A
Iishortcyrillic;0419
Imacron;012A
Imacroncyrillic;04E2
Imonospace;FF29
Iniarmenian;053B
Iocyrillic;0401
Iogonek;012E
Iota;0399
Iotaafrican;0196
Iotadieresis;03AA
Iotatonos;038A
Ismall;F769
Istroke;0197
Itilde;0128
Itildebelow;1E2C
Izhitsacyrillic;0474
Izhitsadblgravecyrillic;0476
J;004A
Jaarmenian;0541
Jcircle;24BF
Jcircumflex;0134
Jecyrillic;0408
Jheharmenian;054B
Jmonospace;FF2A
Jsmall;F76A
K;004B
KBsquare;3385
KKsquare;33CD
Kabashkircyrillic;04A0
Kacute;1E30
Kacyrillic;041A
Kadescendercyrillic;049A
Kahookcyrillic;04C3
Kappa;039A
Kastrokecyrillic;049E
Kaverticalstrokecyrillic;049C
Kcaron;01E8
Kcedilla;0136
Kcircle;24C0
Kcommaaccent;0136
Kdotbelow;1E32
Keharmenian;0554
Kenarmenian;053F
Khacyrillic;0425
Kheicoptic;03E6
Khook;0198
Kjecyrillic;040C
Klinebelow;1E34
Kmonospace;FF2B
Koppacyrillic;0480
Koppagreek;03DE
Ksicyrillic;046E
Ksmall;F76B
L;004C
LJ;01C7
LL;F6BF
Lacute;0139
Lambda;039B
Lcaron;013D
Lcedilla;013B
Lcircle;24C1
Lcircumflexbelow;1E3C
Lcommaaccent;013B
Ldot;013F
Ldotaccent;013F
Ldotbelow;1E36
Ldotbelowmacron;1E38
Liwnarmenian;053C
Lj;01C8
Ljecyrillic;0409
Llinebelow;1E3A
Lmonospace;FF2C
Lslash;0141
Lslashsmall;F6F9
Lsmall;F76C
M;004D
MBsquare;3386
Macron;F6D0
Macronsmall;F7AF
Macute;1E3E
Mcircle;24C2
Mdotaccent;1E40
Mdotbelow;1E42
Menarmenian;0544
Mmonospace;FF2D
Msmall;F76D
Mturned;019C
Mu;039C
N;004E
NJ;01CA
Nacute;0143
Ncaron;0147
Ncedilla;0145
Ncircle;24C3
Ncircumflexbelow;1E4A
Ncommaaccent;0145
Ndotaccent;1E44
Ndotbelow;1E46
Nhookleft;019D
Nineroman;2168
Nj;01CB
Njecyrillic;040A
Nlinebelow;1E48
Nmonospace;FF2E
Nowarmenian;0546
Nsmall;F76E
Ntilde;00D1
Ntildesmall;F7F1
Nu;039D
O;004F
OE;0152
OEsmall;F6FA
Oacute;00D3
Oacutesmall;F7F3
Obarredcyrillic;04E8
Obarreddieresiscyrillic;04EA
Obreve;014E
Ocaron;01D1
Ocenteredtilde;019F
Ocircle;24C4
Ocircumflex;00D4
Ocircumflexacute;1ED0
Ocircumflexdotbelow;1ED8
Ocircumflexgrave;1ED2
Ocircumflexhookabove;1ED4
Ocircumflexsmall;F7F4
Ocircumflextilde;1ED6
Ocyrillic;041E
Odblacute;0150
Odblgrave;020C
Odieresis;00D6
Odieresiscyrillic;04E6
Odieresissmall;F7F6
Odotbelow;1ECC
Ogoneksmall;F6FB
Ograve;00D2
Ogravesmall;F7F2
Oharmenian;0555
Ohm;2126
Ohookabove;1ECE
Ohorn;01A0
Ohornacute;1EDA
Ohorndotbelow;1EE2
Ohorngrave;1EDC
Ohornhookabove;1EDE
Ohorntilde;1EE0
Ohungarumlaut;0150
Oi;01A2
Oinvertedbreve;020E
Omacron;014C
Omacronacute;1E52
Omacrongrave;1E50
Omega;2126
Omegacyrillic;0460
Omegagreek;03A9
Omegaroundcyrillic;047A
Omegatitlocyrillic;047C
Omegatonos;038F
Omicron;039F
Omicrontonos;038C
Omonospace;FF2F
Oneroman;2160
Oogonek;01EA
Oogonekmacron;01EC
Oopen;0186
Oslash;00D8
Oslashacute;01FE
Oslashsmall;F7F8
Osmall;F76F
Ostrokeacute;01FE
Otcyrillic;047E
Otilde;00D5
Otildeacute;1E4C
Otildedieresis;1E4E
Otildesmall;F7F5
P;0050
Pacute;1E54
Pcircle;24C5
Pdotaccent;1E56
Pecyrillic;041F
Peharmenian;054A
Pemiddlehookcyrillic;04A6
Phi;03A6
Phook;01A4
Pi;03A0
Piwrarmenian;0553
Pmonospace;FF30
Psi;03A8
Psicyrillic;0470
Psmall;F770
Q;0051
Qcircle;24C6
Qmonospace;FF31
Qsmall;F771
R;0052
Raarmenian;054C
Racute;0154
Rcaron;0158
Rcedilla;0156
Rcircle;24C7
Rcommaaccent;0156
Rdblgrave;0210
Rdotaccent;1E58
Rdotbelow;1E5A
Rdotbelowmacron;1E5C
Reharmenian;0550
Rfraktur;211C
Rho;03A1
Ringsmall;F6FC
Rinvertedbreve;0212
Rlinebelow;1E5E
Rmonospace;FF32
Rsmall;F772
Rsmallinverted;0281
Rsmallinvertedsuperior;02B6
S;0053
SF010000;250C
SF020000;2514
SF030000;2510
SF040000;2518
SF050000;253C
SF060000;252C
SF070000;2534
SF080000;251C
SF090000;2524
SF100000;2500
SF110000;2502
SF190000;2561
SF200000;2562
SF210000;2556
SF220000;2555
SF230000;2563
SF240000;2551
SF250000;2557
SF260000;255D
SF270000;255C
SF280000;255B
SF360000;255E
SF370000;255F
SF380000;255A
SF390000;2554
SF400000;2569
SF410000;2566
SF420000;2560
SF430000;2550
SF440000;256C
SF450000;2567
SF460000;2568
SF470000;2564
SF480000;2565
SF490000;2559
SF500000;2558
SF510000;2552
SF520000;2553
SF530000;256B
SF540000;256A
Sacute;015A
Sacutedotaccent;1E64
Sampigreek;03E0
Scaron;0160
Scarondotaccent;1E66
Scaronsmall;F6FD
Scedilla;015E
Schwa;018F
Schwacyrillic;04D8
Schwadieresiscyrillic;04DA
Scircle;24C8
Scircumflex;015C
Scommaaccent;0218
Sdotaccent;1E60
Sdotbelow;1E62
Sdotbelowdotaccent;1E68
Seharmenian;054D
Sevenroman;2166
Shaarmenian;0547
Shacyrillic;0428
Shchacyrillic;0429
Sheicoptic;03E2
Shhacyrillic;04BA
Shimacoptic;03EC
Sigma;03A3
Sixroman;2165
Smonospace;FF33
Softsigncyrillic;042C
Ssmall;F773
Stigmagreek;03DA
T;0054
Tau;03A4
Tbar;0166
Tcaron;0164
Tcedilla;0162
Tcircle;24C9
Tcircumflexbelow;1E70
Tcommaaccent;0162
Tdotaccent;1E6A
Tdotbelow;1E6C
Tecyrillic;0422
Tedescendercyrillic;04AC
Tenroman;2169
Tetsecyrillic;04B4
Theta;0398
Thook;01AC
Thorn;00DE
Thornsmall;F7FE
Threeroman;2162
Tildesmall;F6FE
Tiwnarmenian;054F
Tlinebelow;1E6E
Tmonospace;FF34
Toarmenian;0539
Tonefive;01BC
Tonesix;0184
Tonetwo;01A7
Tretroflexhook;01AE
Tsecyrillic;0426
Tshecyrillic;040B
Tsmall;F774
Twelveroman;216B
Tworoman;2161
U;0055
Uacute;00DA
Uacutesmall;F7FA
Ubreve;016C
Ucaron;01D3
Ucircle;24CA
Ucircumflex;00DB
Ucircumflexbelow;1E76
Ucircumflexsmall;F7FB
Ucyrillic;0423
Udblacute;0170
Udblgrave;0214
Udieresis;00DC
Udieresisacute;01D7
Udieresisbelow;1E72
Udieresiscaron;01D9
Udieresiscyrillic;04F0
Udieresisgrave;01DB
Udieresismacron;01D5
Udieresissmall;F7FC
Udotbelow;1EE4
Ugrave;00D9
Ugravesmall;F7F9
Uhookabove;1EE6
Uhorn;01AF
Uhornacute;1EE8
Uhorndotbelow;1EF0
Uhorngrave;1EEA
Uhornhookabove;1EEC
Uhorntilde;1EEE
Uhungarumlaut;0170
Uhungarumlautcyrillic;04F2
Uinvertedbreve;0216
Ukcyrillic;0478
Umacron;016A
Umacroncyrillic;04EE
Umacrondieresis;1E7A
Umonospace;FF35
Uogonek;0172
Upsilon;03A5
Upsilon1;03D2
Upsilonacutehooksymbolgreek;03D3
Upsilonafrican;01B1
Upsilondieresis;03AB
Upsilondieresishooksymbolgreek;03D4
Upsilonhooksymbol;03D2
Upsilontonos;038E
Uring;016E
Ushortcyrillic;040E
Usmall;F775
Ustraightcyrillic;04AE
Ustraightstrokecyrillic;04B0
Utilde;0168
Utildeacute;1E78
Utildebelow;1E74
V;0056
Vcircle;24CB
Vdotbelow;1E7E
Vecyrillic;0412
Vewarmenian;054E
Vhook;01B2
Vmonospace;FF36
Voarmenian;0548
Vsmall;F776
Vtilde;1E7C
W;0057
Wacute;1E82
Wcircle;24CC
Wcircumflex;0174
Wdieresis;1E84
Wdotaccent;1E86
Wdotbelow;1E88
Wgrave;1E80
Wmonospace;FF37
Wsmall;F777
X;0058
Xcircle;24CD
Xdieresis;1E8C
Xdotaccent;1E8A
Xeharmenian;053D
Xi;039E
Xmonospace;FF38
Xsmall;F778
Y;0059
Yacute;00DD
Yacutesmall;F7FD
Yatcyrillic;0462
Ycircle;24CE
Ycircumflex;0176
Ydieresis;0178
Ydieresissmall;F7FF
Ydotaccent;1E8E
Ydotbelow;1EF4
Yericyrillic;042B
Yerudieresiscyrillic;04F8
Ygrave;1EF2
Yhook;01B3
Yhookabove;1EF6
Yiarmenian;0545
Yicyrillic;0407
Yiwnarmenian;0552
Ymonospace;FF39
Ysmall;F779
Ytilde;1EF8
Yusbigcyrillic;046A
Yusbigiotifiedcyrillic;046C
Yuslittlecyrillic;0466
Yuslittleiotifiedcyrillic;0468
Z;005A
Zaarmenian;0536
Zacute;0179
Zcaron;017D
Zcaronsmall;F6FF
Zcircle;24CF
Zcircumflex;1E90
Zdot;017B
Zdotaccent;017B
Zdotbelow;1E92
Zecyrillic;0417
Zedescendercyrillic;0498
Zedieresiscyrillic;04DE
Zeta;0396
Zhearmenian;053A
Zhebrevecyrillic;04C1
Zhecyrillic;0416
Zhedescendercyrillic;0496
Zhedieresiscyrillic;04DC
Zlinebelow;1E94
Zmonospace;FF3A
Zsmall;F77A
Zstroke;01B5
a;0061
aabengali;0986
aacute;00E1
aadeva;0906
aagujarati;0A86
aagurmukhi;0A06
aamatragurmukhi;0A3E
aarusquare;3303
aavowelsignbengali;09BE
aavowelsigndeva;093E
aavowelsigngujarati;0ABE
abbreviationmarkarmenian;055F
abbreviationsigndeva;0970
abengali;0985
abopomofo;311A
abreve;0103
abreveacute;1EAF
abrevecyrillic;04D1
abrevedotbelow;1EB7
abrevegrave;1EB1
abrevehookabove;1EB3
abrevetilde;1EB5
acaron;01CE
acircle;24D0
acircumflex;00E2
acircumflexacute;1EA5
acircumflexdotbelow;1EAD
acircumflexgrave;1EA7
acircumflexhookabove;1EA9
acircumflextilde;1EAB
acute;00B4
acutebelowcmb;0317
acutecmb;0301
acutecomb;0301
acutedeva;0954
acutelowmod;02CF
acutetonecmb;0341
acyrillic;0430
adblgrave;0201
addakgurmukhi;0A71
adeva;0905
adieresis;00E4
adieresiscyrillic;04D3
adieresismacron;01DF
adotbelow;1EA1
adotmacron;01E1
ae;00E6
aeacute;01FD
aekorean;3150
aemacron;01E3
afii00208;2015
afii08941;20A4
afii10017;0410
afii10018;0411
afii10019;0412
afii10020;0413
afii10021;0414
afii10022;0415
afii10023;0401
afii10024;0416
afii10025;0417
afii10026;0418
afii10027;0419
afii10028;041A
afii10029;041B
afii10030;041C
afii10031;041D
afii10032;041E
afii10033;041F
afii10034;0420
afii10035;0421
afii10036;0422
afii10037;0423
afii10038;0424
afii10039;0425
afii10040;0426
afii10041;0427
afii10042;0428
afii10043;0429
afii10044;042A
afii10045;042B
afii10046;042C
afii10047;042D
afii10048;042E
afii10049;042F
afii10050;0490
afii10051;0402
afii10052;0403
afii10053;0404
afii10054;0405
afii10055;0406
afii10056;0407
afii10057;0408
afii10058;0409
afii10059;040A
afii10060;040B
afii10061;040C
afii10062;040E
afii10063;F6C4
afii10064;F6C5
afii10065;0430
afii10066;0431
afii10067;0432
afii10068;0433
afii10069;0434
afii10070;0435
afii10071;0451
afii10072;0436
afii10073;0437
afii10074;0438
afii10075;0439
afii10076;043A
afii10077;043B
afii10078;043C
afii10079;043D
afii10080;043E
afii10081;043F
afii10082;0440
afii10083;0441
afii10084;0442
afii10085;0443
afii10086;0444
afii10087;0445
afii10088;0446
afii10089;0447
afii10090;0448
afii10091;0449
afii10092;044A
afii10093;044B
afii10094;044C
afii10095;044D
afii10096;044E
afii10097;044F
afii10098;0491
afii10099;0452
afii10100;0453
afii10101;0454
afii10102;0455
afii10103;0456
afii10104;0457
afii10105;0458
afii10106;0459
afii10107;045A
afii10108;045B
afii10109;045C
afii10110;045E
afii10145;040F
afii10146;0462
afii10147;0472
afii10148;0474
afii10192;F6C6
afii10193;045F
afii10194;0463
afii10195;0473
afii10196;0475
afii10831;F6C7
afii10832;F6C8
afii10846;04D9
afii299;200E
afii300;200F
afii301;200D
afii57381;066A
afii57388;060C
afii57392;0660
afii57393;0661
afii57394;0662
afii57395;0663
afii57396;0664
afii57397;0665
afii57398;0666
afii57399;0667
afii57400;0668
afii57401;0669
afii57403;061B
afii57407;061F
afii57409;0621
afii57410;0622
afii57411;0623
afii57412;0624
afii57413;0625
afii57414;0626
afii57415;0627
afii57416;0628
afii57417;0629
afii57418;062A
afii57419;062B
afii57420;062C
afii57421;062D
afii57422;062E
afii57423;062F
afii57424;0630
afii57425;0631
afii57426;0632
afii57427;0633
afii57428;0634
afii57429;0635
afii57430;0636
afii57431;0637
afii57432;0638
afii57433;0639
afii57434;063A
afii57440;0640
afii57441;0641
afii57442;0642
afii57443;0643
afii57444;0644
afii57445;0645
afii57446;0646
afii57448;0648
afii57449;0649
afii57450;064A
afii57451;064B
afii57452;064C
afii57453;064D
afii57454;064E
afii57455;064F
afii57456;0650
afii57457;0651
afii57458;0652
afii57470;0647
afii57505;06A4
afii57506;067E
afii57507;0686
afii57508;0698
afii57509;06AF
afii57511;0679
afii57512;0688
afii57513;0691
afii57514;06BA
afii57519;06D2
afii57534;06D5
afii57636;20AA
afii57645;05BE
afii57658;05C3
afii57664;05D0
afii57665;05D1
afii57666;05D2
afii57667;05D3
afii57668;05D4
afii57669;05D5
afii57670;05D6
afii57671;05D7
afii57672;05D8
afii57673;05D9
afii57674;05DA
afii57675;05DB
afii57676;05DC
afii57677;05DD
afii57678;05DE
afii57679;05DF
afii57680;05E0
afii57681;05E1
afii57682;05E2
afii57683;05E3
afii57684;05E4
afii57685;05E5
afii57686;05E6
afii57687;05E7
afii57688;05E8
afii57689;05E9
afii57690;05EA
afii57694;FB2A
afii57695;FB2B
afii57700;FB4B
afii57705;FB1F
afii57716;05F0
afii57717;05F1
afii57718;05F2
afii57723;FB35
afii57793;05B4
afii57794;05B5
afii57795;05B6
afii57796;05BB
afii57797;05B8
afii57798;05B7
afii57799;05B0
afii57800;05B2
afii57801;05B1
afii57802;05B3
afii57803;05C2
afii57804;05C1
afii57806;05B9
afii57807;05BC
afii57839;05BD
afii57841;05BF
afii57842;05C0
afii57929;02BC
afii61248;2105
afii61289;2113
afii61352;2116
afii61573;202C
afii61574;202D
afii61575;202E
afii61664;200C
afii63167;066D
afii64937;02BD
agrave;00E0
agujarati;0A85
agurmukhi;0A05
ahiragana;3042
ahookabove;1EA3
aibengali;0990
aibopomofo;311E
aideva;0910
aiecyrillic;04D5
aigujarati;0A90
aigurmukhi;0A10
aimatragurmukhi;0A48
ainarabic;0639
ainfinalarabic;FECA
aininitialarabic;FECB
ainmedialarabic;FECC
ainvertedbreve;0203
aivowelsignbengali;09C8
aivowelsigndeva;0948
aivowelsigngujarati;0AC8
akatakana;30A2
akatakanahalfwidth;FF71
akorean;314F
alef;05D0
alefarabic;0627
alefdageshhebrew;FB30
aleffinalarabic;FE8E
alefhamzaabovearabic;0623
alefhamzaabovefinalarabic;FE84
alefhamzabelowarabic;0625
alefhamzabelowfinalarabic;FE88
alefhebrew;05D0
aleflamedhebrew;FB4F
alefmaddaabovearabic;0622
alefmaddaabovefinalarabic;FE82
alefmaksuraarabic;0649
alefmaksurafinalarabic;FEF0
alefmaksurainitialarabic;FEF3
alefmaksuramedialarabic;FEF4
alefpatahhebrew;FB2E
alefqamatshebrew;FB2F
aleph;2135
allequal;224C
alpha;03B1
alphatonos;03AC
amacron;0101
amonospace;FF41
ampersand;0026
ampersandmonospace;FF06
ampersandsmall;F726
amsquare;33C2
anbopomofo;3122
angbopomofo;3124
angkhankhuthai;0E5A
angle;2220
anglebracketleft;3008
anglebracketleftvertical;FE3F
anglebracketright;3009
anglebracketrightvertical;FE40
angleleft;2329
angleright;232A
angstrom;212B
anoteleia;0387
anudattadeva;0952
anusvarabengali;0982
anusvaradeva;0902
anusvaragujarati;0A82
aogonek;0105
apaatosquare;3300
aparen;249C
apostrophearmenian;055A
apostrophemod;02BC
apple;F8FF
approaches;2250
approxequal;2248
approxequalorimage;2252
approximatelyequal;2245
araeaekorean;318E
araeakorean;318D
arc;2312
arighthalfring;1E9A
aring;00E5
aringacute;01FB
aringbelow;1E01
arrowboth;2194
arrowdashdown;21E3
arrowdashleft;21E0
arrowdashright;21E2
arrowdashup;21E1
arrowdblboth;21D4
arrowdbldown;21D3
arrowdblleft;21D0
arrowdblright;21D2
arrowdblup;21D1
arrowdown;2193
arrowdownleft;2199
arrowdownright;2198
arrowdownwhite;21E9
arrowheaddownmod;02C5
arrowheadleftmod;02C2
arrowheadrightmod;02C3
arrowheadupmod;02C4
arrowhorizex;F8E7
arrowleft;2190
arrowleftdbl;21D0
arrowleftdblstroke;21CD
arrowleftoverright;21C6
arrowleftwhite;21E6
arrowright;2192
arrowrightdblstroke;21CF
arrowrightheavy;279E
arrowrightoverleft;21C4
arrowrightwhite;21E8
arrowtableft;21E4
arrowtabright;21E5
arrowup;2191
arrowupdn;2195
arrowupdnbse;21A8
arrowupdownbase;21A8
arrowupleft;2196
arrowupleftofdown;21C5
arrowupright;2197
arrowupwhite;21E7
arrowvertex;F8E6
asciicircum;005E
asciicircummonospace;FF3E
asciitilde;007E
asciitildemonospace;FF5E
ascript;0251
ascriptturned;0252
asmallhiragana;3041
asmallkatakana;30A1
asmallkatakanahalfwidth;FF67
asterisk;002A
asteriskaltonearabic;066D
asteriskarabic;066D
asteriskmath;2217
asteriskmonospace;FF0A
asterisksmall;FE61
asterism;2042
asuperior;F6E9
asymptoticallyequal;2243
at;0040
atilde;00E3
atmonospace;FF20
atsmall;FE6B
aturned;0250
aubengali;0994
aubopomofo;3120
audeva;0914
augujarati;0A94
augurmukhi;0A14
aulengthmarkbengali;09D7
aumatragurmukhi;0A4C
auvowelsignbengali;09CC
auvowelsigndeva;094C
auvowelsigngujarati;0ACC
avagrahadeva;093D
aybarmenian;0561
ayin;05E2
ayinaltonehebrew;FB20
ayinhebrew;05E2
b;0062
babengali;09AC
backslash;005C
backslashmonospace;FF3C
badeva;092C
bagujarati;0AAC
bagurmukhi;0A2C
bahiragana;3070
bahtthai;0E3F
bakatakana;30D0
bar;007C
barmonospace;FF5C
bbopomofo;3105
bcircle;24D1
bdotaccent;1E03
bdotbelow;1E05
beamedsixteenthnotes;266C
because;2235
becyrillic;0431
beharabic;0628
behfinalarabic;FE90
behinitialarabic;FE91
behiragana;3079
behmedialarabic;FE92
behmeeminitialarabic;FC9F
behmeemisolatedarabic;FC08
behnoonfinalarabic;FC6D
bekatakana;30D9
benarmenian;0562
bet;05D1
beta;03B2
betasymbolgreek;03D0
betdagesh;FB31
betdageshhebrew;FB31
bethebrew;05D1
betrafehebrew;FB4C
bhabengali;09AD
bhadeva;092D
bhagujarati;0AAD
bhagurmukhi;0A2D
bhook;0253
bihiragana;3073
bikatakana;30D3
bilabialclick;0298
bindigurmukhi;0A02
birusquare;3331
blackcircle;25CF
blackdiamond;25C6
blackdownpointingtriangle;25BC
blackleftpointingpointer;25C4
blackleftpointingtriangle;25C0
blacklenticularbracketleft;3010
blacklenticularbracketleftvertical;FE3B
blacklenticularbracketright;3011
blacklenticularbracketrightvertical;FE3C
blacklowerlefttriangle;25E3
blacklowerrighttriangle;25E2
blackrectangle;25AC
blackrightpointingpointer;25BA
blackrightpointingtriangle;25B6
blacksmallsquare;25AA
blacksmilingface;263B
blacksquare;25A0
blackstar;2605
blackupperlefttriangle;25E4
blackupperrighttriangle;25E5
blackuppointingsmalltriangle;25B4
blackuppointingtriangle;25B2
blank;2423
blinebelow;1E07
block;2588
bmonospace;FF42
bobaimaithai;0E1A
bohiragana;307C
bokatakana;30DC
bparen;249D
bqsquare;33C3
braceex;F8F4
braceleft;007B
braceleftbt;F8F3
braceleftmid;F8F2
braceleftmonospace;FF5B
braceleftsmall;FE5B
bracelefttp;F8F1
braceleftvertical;FE37
braceright;007D
bracerightbt;F8FE
bracerightmid;F8FD
bracerightmonospace;FF5D
bracerightsmall;FE5C
bracerighttp;F8FC
bracerightvertical;FE38
bracketleft;005B
bracketleftbt;F8F0
bracketleftex;F8EF
bracketleftmonospace;FF3B
bracketlefttp;F8EE
bracketright;005D
bracketrightbt;F8FB
bracketrightex;F8FA
bracketrightmonospace;FF3D
bracketrighttp;F8F9
breve;02D8
brevebelowcmb;032E
brevecmb;0306
breveinvertedbelowcmb;032F
breveinvertedcmb;0311
breveinverteddoublecmb;0361
bridgebelowcmb;032A
bridgeinvertedbelowcmb;033A
brokenbar;00A6
bstroke;0180
bsuperior;F6EA
btopbar;0183
buhiragana;3076
bukatakana;30D6
bullet;2022
bulletinverse;25D8
bulletoperator;2219
bullseye;25CE
c;0063
caarmenian;056E
cabengali;099A
cacute;0107
cadeva;091A
cagujarati;0A9A
cagurmukhi;0A1A
calsquare;3388
candrabindubengali;0981
candrabinducmb;0310
candrabindudeva;0901
candrabindugujarati;0A81
capslock;21EA
careof;2105
caron;02C7
caronbelowcmb;032C
caroncmb;030C
carriagereturn;21B5
cbopomofo;3118
ccaron;010D
ccedilla;00E7
ccedillaacute;1E09
ccircle;24D2
ccircumflex;0109
ccurl;0255
cdot;010B
cdotaccent;010B
cdsquare;33C5
cedilla;00B8
cedillacmb;0327
cent;00A2
centigrade;2103
centinferior;F6DF
centmonospace;FFE0
centoldstyle;F7A2
centsuperior;F6E0
chaarmenian;0579
chabengali;099B
chadeva;091B
chagujarati;0A9B
chagurmukhi;0A1B
chbopomofo;3114
cheabkhasiancyrillic;04BD
checkmark;2713
checyrillic;0447
chedescenderabkhasiancyrillic;04BF
chedescendercyrillic;04B7
chedieresiscyrillic;04F5
cheharmenian;0573
chekhakassiancyrillic;04CC
cheverticalstrokecyrillic;04B9
chi;03C7
chieuchacirclekorean;3277
chieuchaparenkorean;3217
chieuchcirclekorean;3269
chieuchkorean;314A
chieuchparenkorean;3209
chochangthai;0E0A
chochanthai;0E08
chochingthai;0E09
chochoethai;0E0C
chook;0188
cieucacirclekorean;3276
cieucaparenkorean;3216
cieuccirclekorean;3268
cieuckorean;3148
cieucparenkorean;3208
cieucuparenkorean;321C
circle;25CB
circlemultiply;2297
circleot;2299
circleplus;2295
circlepostalmark;3036
circlewithlefthalfblack;25D0
circlewithrighthalfblack;25D1
circumflex;02C6
circumflexbelowcmb;032D
circumflexcmb;0302
clear;2327
clickalveolar;01C2
clickdental;01C0
clicklateral;01C1
clickretroflex;01C3
club;2663
clubsuitblack;2663
clubsuitwhite;2667
cmcubedsquare;33A4
cmonospace;FF43
cmsquaredsquare;33A0
coarmenian;0581
colon;003A
colonmonetary;20A1
colonmonospace;FF1A
colonsign;20A1
colonsmall;FE55
colontriangularhalfmod;02D1
colontriangularmod;02D0
comma;002C
commaabovecmb;0313
commaaboverightcmb;0315
commaaccent;F6C3
commaarabic;060C
commaarmenian;055D
commainferior;F6E1
commamonospace;FF0C
commareversedabovecmb;0314
commareversedmod;02BD
commasmall;FE50
commasuperior;F6E2
commaturnedabovecmb;0312
commaturnedmod;02BB
compass;263C
congruent;2245
contourintegral;222E
control;2303
controlACK;0006
controlBEL;0007
controlBS;0008
controlCAN;0018
controlCR;000D
controlDC1;0011
controlDC2;0012
controlDC3;0013
controlDC4;0014
controlDEL;007F
controlDLE;0010
controlEM;0019
controlENQ;0005
controlEOT;0004
controlESC;001B
controlETB;0017
controlETX;0003
controlFF;000C
controlFS;001C
controlGS;001D
controlHT;0009
controlLF;000A
controlNAK;0015
controlRS;001E
controlSI;000F
controlSO;000E
controlSOT;0002
controlSTX;0001
controlSUB;001A
controlSYN;0016
controlUS;001F
controlVT;000B
copyright;00A9
copyrightsans;F8E9
copyrightserif;F6D9
cornerbracketleft;300C
cornerbracketlefthalfwidth;FF62
cornerbracketleftvertical;FE41
cornerbracketright;300D
cornerbracketrighthalfwidth;FF63
cornerbracketrightvertical;FE42
corporationsquare;337F
cosquare;33C7
coverkgsquare;33C6
cparen;249E
cruzeiro;20A2
cstretched;0297
curlyand;22CF
curlyor;22CE
currency;00A4
cyrBreve;F6D1
cyrFlex;F6D2
cyrbreve;F6D4
cyrflex;F6D5
d;0064
daarmenian;0564
dabengali;09A6
dadarabic;0636
dadeva;0926
dadfinalarabic;FEBE
dadinitialarabic;FEBF
dadmedialarabic;FEC0
dagesh;05BC
dageshhebrew;05BC
dagger;2020
daggerdbl;2021
dagujarati;0AA6
dagurmukhi;0A26
dahiragana;3060
dakatakana;30C0
dalarabic;062F
dalet;05D3
daletdagesh;FB33
daletdageshhebrew;FB33
dalethatafpatah;05D3 05B2
dalethatafpatahhebrew;05D3 05B2
dalethatafsegol;05D3 05B1
dalethatafsegolhebrew;05D3 05B1
dalethebrew;05D3
dalethiriq;05D3 05B4
dalethiriqhebrew;05D3 05B4
daletholam;05D3 05B9
daletholamhebrew;05D3 05B9
daletpatah;05D3 05B7
daletpatahhebrew;05D3 05B7
daletqamats;05D3 05B8
daletqamatshebrew;05D3 05B8
daletqubuts;05D3 05BB
daletqubutshebrew;05D3 05BB
daletsegol;05D3 05B6
daletsegolhebrew;05D3 05B6
daletsheva;05D3 05B0
daletshevahebrew;05D3 05B0
dalettsere;05D3 05B5
dalettserehebrew;05D3 05B5
dalfinalarabic;FEAA
dammaarabic;064F
dammalowarabic;064F
dammatanaltonearabic;064C
dammatanarabic;064C
danda;0964
dargahebrew;05A7
dargalefthebrew;05A7
dasiapneumatacyrilliccmb;0485
dblGrave;F6D3
dblanglebracketleft;300A
dblanglebracketleftvertical;FE3D
dblanglebracketright;300B
dblanglebracketrightvertical;FE3E
dblarchinvertedbelowcmb;032B
dblarrowleft;21D4
dblarrowright;21D2
dbldanda;0965
dblgrave;F6D6
dblgravecmb;030F
dblintegral;222C
dbllowline;2017
dbllowlinecmb;0333
dbloverlinecmb;033F
dblprimemod;02BA
dblverticalbar;2016
dblverticallineabovecmb;030E
dbopomofo;3109
dbsquare;33C8
dcaron;010F
dcedilla;1E11
dcircle;24D3
dcircumflexbelow;1E13
dcroat;0111
ddabengali;09A1
ddadeva;0921
ddagujarati;0AA1
ddagurmukhi;0A21
ddalarabic;0688
ddalfinalarabic;FB89
dddhadeva;095C
ddhabengali;09A2
ddhadeva;0922
ddhagujarati;0AA2
ddhagurmukhi;0A22
ddotaccent;1E0B
ddotbelow;1E0D
decimalseparatorarabic;066B
decimalseparatorpersian;066B
decyrillic;0434
degree;00B0
dehihebrew;05AD
dehiragana;3067
deicoptic;03EF
dekatakana;30C7
deleteleft;232B
deleteright;2326
delta;03B4
deltaturned;018D
denominatorminusonenumeratorbengali;09F8
dezh;02A4
dhabengali;09A7
dhadeva;0927
dhagujarati;0AA7
dhagurmukhi;0A27
dhook;0257
dialytikatonos;0385
dialytikatonoscmb;0344
diamond;2666
diamondsuitwhite;2662
dieresis;00A8
dieresisacute;F6D7
dieresisbelowcmb;0324
dieresiscmb;0308
dieresisgrave;F6D8
dieresistonos;0385
dihiragana;3062
dikatakana;30C2
dittomark;3003
divide;00F7
divides;2223
divisionslash;2215
djecyrillic;0452
dkshade;2593
dlinebelow;1E0F
dlsquare;3397
dmacron;0111
dmonospace;FF44
dnblock;2584
dochadathai;0E0E
dodekthai;0E14
dohiragana;3069
dokatakana;30C9
dollar;0024
dollarinferior;F6E3
dollarmonospace;FF04
dollaroldstyle;F724
dollarsmall;FE69
dollarsuperior;F6E4
dong;20AB
dorusquare;3326
dotaccent;02D9
dotaccentcmb;0307
dotbelowcmb;0323
dotbelowcomb;0323
dotkatakana;30FB
dotlessi;0131
dotlessj;F6BE
dotlessjstrokehook;0284
dotmath;22C5
dottedcircle;25CC
doubleyodpatah;FB1F
doubleyodpatahhebrew;FB1F
downtackbelowcmb;031E
downtackmod;02D5
dparen;249F
dsuperior;F6EB
dtail;0256
dtopbar;018C
duhiragana;3065
dukatakana;30C5
dz;01F3
dzaltone;02A3
dzcaron;01C6
dzcurl;02A5
dzeabkhasiancyrillic;04E1
dzecyrillic;0455
dzhecyrillic;045F
e;0065
eacute;00E9
earth;2641
ebengali;098F
ebopomofo;311C
ebreve;0115
ecandradeva;090D
ecandragujarati;0A8D
ecandravowelsigndeva;0945
ecandravowelsigngujarati;0AC5
ecaron;011B
ecedillabreve;1E1D
echarmenian;0565
echyiwnarmenian;0587
ecircle;24D4
ecircumflex;00EA
ecircumflexacute;1EBF
ecircumflexbelow;1E19
ecircumflexdotbelow;1EC7
ecircumflexgrave;1EC1
ecircumflexhookabove;1EC3
ecircumflextilde;1EC5
ecyrillic;0454
edblgrave;0205
edeva;090F
edieresis;00EB
edot;0117
edotaccent;0117
edotbelow;1EB9
eegurmukhi;0A0F
eematragurmukhi;0A47
efcyrillic;0444
egrave;00E8
egujarati;0A8F
eharmenian;0567
ehbopomofo;311D
ehiragana;3048
ehookabove;1EBB
eibopomofo;311F
eight;0038
eightarabic;0668
eightbengali;09EE
eightcircle;2467
eightcircleinversesansserif;2791
eightdeva;096E
eighteencircle;2471
eighteenparen;2485
eighteenperiod;2499
eightgujarati;0AEE
eightgurmukhi;0A6E
eighthackarabic;0668
eighthangzhou;3028
eighthnotebeamed;266B
eightideographicparen;3227
eightinferior;2088
eightmonospace;FF18
eightoldstyle;F738
eightparen;247B
eightperiod;248F
eightpersian;06F8
eightroman;2177
eightsuperior;2078
eightthai;0E58
einvertedbreve;0207
eiotifiedcyrillic;0465
ekatakana;30A8
ekatakanahalfwidth;FF74
ekonkargurmukhi;0A74
ekorean;3154
elcyrillic;043B
element;2208
elevencircle;246A
elevenparen;247E
elevenperiod;2492
elevenroman;217A
ellipsis;2026
ellipsisvertical;22EE
emacron;0113
emacronacute;1E17
emacrongrave;1E15
emcyrillic;043C
emdash;2014
emdashvertical;FE31
emonospace;FF45
emphasismarkarmenian;055B
emptyset;2205
enbopomofo;3123
encyrillic;043D
endash;2013
endashvertical;FE32
endescendercyrillic;04A3
eng;014B
engbopomofo;3125
enghecyrillic;04A5
enhookcyrillic;04C8
enspace;2002
eogonek;0119
eokorean;3153
eopen;025B
eopenclosed;029A
eopenreversed;025C
eopenreversedclosed;025E
eopenreversedhook;025D
eparen;24A0
epsilon;03B5
epsilontonos;03AD
equal;003D
equalmonospace;FF1D
equalsmall;FE66
equalsuperior;207C
equivalence;2261
erbopomofo;3126
ercyrillic;0440
ereversed;0258
ereversedcyrillic;044D
escyrillic;0441
esdescendercyrillic;04AB
esh;0283
eshcurl;0286
eshortdeva;090E
eshortvowelsigndeva;0946
eshreversedloop;01AA
eshsquatreversed;0285
esmallhiragana;3047
esmallkatakana;30A7
esmallkatakanahalfwidth;FF6A
estimated;212E
esuperior;F6EC
eta;03B7
etarmenian;0568
etatonos;03AE
eth;00F0
etilde;1EBD
etildebelow;1E1B
etnahtafoukhhebrew;0591
etnahtafoukhlefthebrew;0591
etnahtahebrew;0591
etnahtalefthebrew;0591
eturned;01DD
eukorean;3161
euro;20AC
evowelsignbengali;09C7
evowelsigndeva;0947
evowelsigngujarati;0AC7
exclam;0021
exclamarmenian;055C
exclamdbl;203C
exclamdown;00A1
exclamdownsmall;F7A1
exclammonospace;FF01
exclamsmall;F721
existential;2203
ezh;0292
ezhcaron;01EF
ezhcurl;0293
ezhreversed;01B9
ezhtail;01BA
f;0066
fadeva;095E
fagurmukhi;0A5E
fahrenheit;2109
fathaarabic;064E
fathalowarabic;064E
fathatanarabic;064B
fbopomofo;3108
fcircle;24D5
fdotaccent;1E1F
feharabic;0641
feharmenian;0586
fehfinalarabic;FED2
fehinitialarabic;FED3
fehmedialarabic;FED4
feicoptic;03E5
female;2640
ff;FB00
ffi;FB03
ffl;FB04
fi;FB01
fifteencircle;246E
fifteenparen;2482
fifteenperiod;2496
figuredash;2012
filledbox;25A0
filledrect;25AC
finalkaf;05DA
finalkafdagesh;FB3A
finalkafdageshhebrew;FB3A
finalkafhebrew;05DA
finalkafqamats;05DA 05B8
finalkafqamatshebrew;05DA 05B8
finalkafsheva;05DA 05B0
finalkafshevahebrew;05DA 05B0
finalmem;05DD
finalmemhebrew;05DD
finalnun;05DF
finalnunhebrew;05DF
finalpe;05E3
finalpehebrew;05E3
finaltsadi;05E5
finaltsadihebrew;05E5
firsttonechinese;02C9
fisheye;25C9
fitacyrillic;0473
five;0035
fivearabic;0665
fivebengali;09EB
fivecircle;2464
fivecircleinversesansserif;278E
fivedeva;096B
fiveeighths;215D
fivegujarati;0AEB
fivegurmukhi;0A6B
fivehackarabic;0665
fivehangzhou;3025
fiveideographicparen;3224
fiveinferior;2085
fivemonospace;FF15
fiveoldstyle;F735
fiveparen;2478
fiveperiod;248C
fivepersian;06F5
fiveroman;2174
fivesuperior;2075
fivethai;0E55
fl;FB02
florin;0192
fmonospace;FF46
fmsquare;3399
fofanthai;0E1F
fofathai;0E1D
fongmanthai;0E4F
forall;2200
four;0034
fourarabic;0664
fourbengali;09EA
fourcircle;2463
fourcircleinversesansserif;278D
fourdeva;096A
fourgujarati;0AEA
fourgurmukhi;0A6A
fourhackarabic;0664
fourhangzhou;3024
fourideographicparen;3223
fourinferior;2084
fourmonospace;FF14
fournumeratorbengali;09F7
fouroldstyle;F734
fourparen;2477
fourperiod;248B
fourpersian;06F4
fourroman;2173
foursuperior;2074
fourteencircle;246D
fourteenparen;2481
fourteenperiod;2495
fourthai;0E54
fourthtonechinese;02CB
fparen;24A1
fraction;2044
franc;20A3
g;0067
gabengali;0997
gacute;01F5
gadeva;0917
gafarabic;06AF
gaffinalarabic;FB93
gafinitialarabic;FB94
gafmedialarabic;FB95
gagujarati;0A97
gagurmukhi;0A17
gahiragana;304C
gakatakana;30AC
gamma;03B3
gammalatinsmall;0263
gammasuperior;02E0
gangiacoptic;03EB
gbopomofo;310D
gbreve;011F
gcaron;01E7
gcedilla;0123
gcircle;24D6
gcircumflex;011D
gcommaaccent;0123
gdot;0121
gdotaccent;0121
gecyrillic;0433
gehiragana;3052
gekatakana;30B2
geometricallyequal;2251
gereshaccenthebrew;059C
gereshhebrew;05F3
gereshmuqdamhebrew;059D
germandbls;00DF
gershayimaccenthebrew;059E
gershayimhebrew;05F4
getamark;3013
ghabengali;0998
ghadarmenian;0572
ghadeva;0918
ghagujarati;0A98
ghagurmukhi;0A18
ghainarabic;063A
ghainfinalarabic;FECE
ghaininitialarabic;FECF
ghainmedialarabic;FED0
ghemiddlehookcyrillic;0495
ghestrokecyrillic;0493
gheupturncyrillic;0491
ghhadeva;095A
ghhagurmukhi;0A5A
ghook;0260
ghzsquare;3393
gihiragana;304E
gikatakana;30AE
gimarmenian;0563
gimel;05D2
gimeldagesh;FB32
gimeldageshhebrew;FB32
gimelhebrew;05D2
gjecyrillic;0453
glottalinvertedstroke;01BE
glottalstop;0294
glottalstopinverted;0296
glottalstopmod;02C0
glottalstopreversed;0295
glottalstopreversedmod;02C1
glottalstopreversedsuperior;02E4
glottalstopstroke;02A1
glottalstopstrokereversed;02A2
gmacron;1E21
gmonospace;FF47
gohiragana;3054
gokatakana;30B4
gparen;24A2
gpasquare;33AC
gradient;2207
grave;0060
gravebelowcmb;0316
gravecmb;0300
gravecomb;0300
gravedeva;0953
gravelowmod;02CE
gravemonospace;FF40
gravetonecmb;0340
greater;003E
greaterequal;2265
greaterequalorless;22DB
greatermonospace;FF1E
greaterorequivalent;2273
greaterorless;2277
greateroverequal;2267
greatersmall;FE65
gscript;0261
gstroke;01E5
guhiragana;3050
guillemotleft;00AB
guillemotright;00BB
guilsinglleft;2039
guilsinglright;203A
gukatakana;30B0
guramusquare;3318
gysquare;33C9
h;0068
haabkhasiancyrillic;04A9
haaltonearabic;06C1
habengali;09B9
hadescendercyrillic;04B3
hadeva;0939
hagujarati;0AB9
hagurmukhi;0A39
haharabic;062D
hahfinalarabic;FEA2
hahinitialarabic;FEA3
hahiragana;306F
hahmedialarabic;FEA4
haitusquare;332A
hakatakana;30CF
hakatakanahalfwidth;FF8A
halantgurmukhi;0A4D
hamzaarabic;0621
hamzadammaarabic;0621 064F
hamzadammatanarabic;0621 064C
hamzafathaarabic;0621 064E
hamzafathatanarabic;0621 064B
hamzalowarabic;0621
hamzalowkasraarabic;0621 0650
hamzalowkasratanarabic;0621 064D
hamzasukunarabic;0621 0652
hangulfiller;3164
hardsigncyrillic;044A
harpoonleftbarbup;21BC
harpoonrightbarbup;21C0
hasquare;33CA
hatafpatah;05B2
hatafpatah16;05B2
hatafpatah23;05B2
hatafpatah2f;05B2
hatafpatahhebrew;05B2
hatafpatahnarrowhebrew;05B2
hatafpatahquarterhebrew;05B2
hatafpatahwidehebrew;05B2
hatafqamats;05B3
hatafqamats1b;05B3
hatafqamats28;05B3
hatafqamats34;05B3
hatafqamatshebrew;05B3
hatafqamatsnarrowhebrew;05B3
hatafqamatsquarterhebrew;05B3
hatafqamatswidehebrew;05B3
hatafsegol;05B1
hatafsegol17;05B1
hatafsegol24;05B1
hatafsegol30;05B1
hatafsegolhebrew;05B1
hatafsegolnarrowhebrew;05B1
hatafsegolquarterhebrew;05B1
hatafsegolwidehebrew;05B1
hbar;0127
hbopomofo;310F
hbrevebelow;1E2B
hcedilla;1E29
hcircle;24D7
hcircumflex;0125
hdieresis;1E27
hdotaccent;1E23
hdotbelow;1E25
he;05D4
heart;2665
heartsuitblack;2665
heartsuitwhite;2661
hedagesh;FB34
hedageshhebrew;FB34
hehaltonearabic;06C1
heharabic;0647
hehebrew;05D4
hehfinalaltonearabic;FBA7
hehfinalalttwoarabic;FEEA
hehfinalarabic;FEEA
hehhamzaabovefinalarabic;FBA5
hehhamzaaboveisolatedarabic;FBA4
hehinitialaltonearabic;FBA8
hehinitialarabic;FEEB
hehiragana;3078
hehmedialaltonearabic;FBA9
hehmedialarabic;FEEC
heiseierasquare;337B
hekatakana;30D8
hekatakanahalfwidth;FF8D
hekutaarusquare;3336
henghook;0267
herutusquare;3339
het;05D7
hethebrew;05D7
hhook;0266
hhooksuperior;02B1
hieuhacirclekorean;327B
hieuhaparenkorean;321B
hieuhcirclekorean;326D
hieuhkorean;314E
hieuhparenkorean;320D
hihiragana;3072
hikatakana;30D2
hikatakanahalfwidth;FF8B
hiriq;05B4
hiriq14;05B4
hiriq21;05B4
hiriq2d;05B4
hiriqhebrew;05B4
hiriqnarrowhebrew;05B4
hiriqquarterhebrew;05B4
hiriqwidehebrew;05B4
hlinebelow;1E96
hmonospace;FF48
hoarmenian;0570
hohipthai;0E2B
hohiragana;307B
hokatakana;30DB
hokatakanahalfwidth;FF8E
holam;05B9
holam19;05B9
holam26;05B9
holam32;05B9
holamhebrew;05B9
holamnarrowhebrew;05B9
holamquarterhebrew;05B9
holamwidehebrew;05B9
honokhukthai;0E2E
hookabovecomb;0309
hookcmb;0309
hookpalatalizedbelowcmb;0321
hookretroflexbelowcmb;0322
hoonsquare;3342
horicoptic;03E9
horizontalbar;2015
horncmb;031B
hotsprings;2668
house;2302
hparen;24A3
hsuperior;02B0
hturned;0265
huhiragana;3075
huiitosquare;3333
hukatakana;30D5
hukatakanahalfwidth;FF8C
hungarumlaut;02DD
hungarumlautcmb;030B
hv;0195
hyphen;002D
hypheninferior;F6E5
hyphenmonospace;FF0D
hyphensmall;FE63
hyphensuperior;F6E6
hyphentwo;2010
i;0069
iacute;00ED
iacyrillic;044F
ibengali;0987
ibopomofo;3127
ibreve;012D
icaron;01D0
icircle;24D8
icircumflex;00EE
icyrillic;0456
idblgrave;0209
ideographearthcircle;328F
ideographfirecircle;328B
ideographicallianceparen;323F
ideographiccallparen;323A
ideographiccentrecircle;32A5
ideographicclose;3006
ideographiccomma;3001
ideographiccommaleft;FF64
ideographiccongratulationparen;3237
ideographiccorrectcircle;32A3
ideographicearthparen;322F
ideographicenterpriseparen;323D
ideographicexcellentcircle;329D
ideographicfestivalparen;3240
ideographicfinancialcircle;3296
ideographicfinancialparen;3236
ideographicfireparen;322B
ideographichaveparen;3232
ideographichighcircle;32A4
ideographiciterationmark;3005
ideographiclaborcircle;3298
ideographiclaborparen;3238
ideographicleftcircle;32A7
ideographiclowcircle;32A6
ideographicmedicinecircle;32A9
ideographicmetalparen;322E
ideographicmoonparen;322A
ideographicnameparen;3234
ideographicperiod;3002
ideographicprintcircle;329E
ideographicreachparen;3243
ideographicrepresentparen;3239
ideographicresourceparen;323E
ideographicrightcircle;32A8
ideographicsecretcircle;3299
ideographicselfparen;3242
ideographicsocietyparen;3233
ideographicspace;3000
ideographicspecialparen;3235
ideographicstockparen;3231
ideographicstudyparen;323B
ideographicsunparen;3230
ideographicsuperviseparen;323C
ideographicwaterparen;322C
ideographicwoodparen;322D
ideographiczero;3007
ideographmetalcircle;328E
ideographmooncircle;328A
ideographnamecircle;3294
ideographsuncircle;3290
ideographwatercircle;328C
ideographwoodcircle;328D
ideva;0907
idieresis;00EF
idieresisacute;1E2F
idieresiscyrillic;04E5
idotbelow;1ECB
iebrevecyrillic;04D7
iecyrillic;0435
ieungacirclekorean;3275
ieungaparenkorean;3215
ieungcirclekorean;3267
ieungkorean;3147
ieungparenkorean;3207
igrave;00EC
igujarati;0A87
igurmukhi;0A07
ihiragana;3044
ihookabove;1EC9
iibengali;0988
iicyrillic;0438
iideva;0908
iigujarati;0A88
iigurmukhi;0A08
iimatragurmukhi;0A40
iinvertedbreve;020B
iishortcyrillic;0439
iivowelsignbengali;09C0
iivowelsigndeva;0940
iivowelsigngujarati;0AC0
ij;0133
ikatakana;30A4
ikatakanahalfwidth;FF72
ikorean;3163
ilde;02DC
iluyhebrew;05AC
imacron;012B
imacroncyrillic;04E3
imageorapproximatelyequal;2253
imatragurmukhi;0A3F
imonospace;FF49
increment;2206
infinity;221E
iniarmenian;056B
integral;222B
integralbottom;2321
integralbt;2321
integralex;F8F5
integraltop;2320
integraltp;2320
intersection;2229
intisquare;3305
invbullet;25D8
invcircle;25D9
invsmileface;263B
iocyrillic;0451
iogonek;012F
iota;03B9
iotadieresis;03CA
iotadieresistonos;0390
iotalatin;0269
iotatonos;03AF
iparen;24A4
irigurmukhi;0A72
ismallhiragana;3043
ismallkatakana;30A3
ismallkatakanahalfwidth;FF68
issharbengali;09FA
istroke;0268
isuperior;F6ED
iterationhiragana;309D
iterationkatakana;30FD
itilde;0129
itildebelow;1E2D
iubopomofo;3129
iucyrillic;044E
ivowelsignbengali;09BF
ivowelsigndeva;093F
ivowelsigngujarati;0ABF
izhitsacyrillic;0475
izhitsadblgravecyrillic;0477
j;006A
jaarmenian;0571
jabengali;099C
jadeva;091C
jagujarati;0A9C
jagurmukhi;0A1C
jbopomofo;3110
jcaron;01F0
jcircle;24D9
jcircumflex;0135
jcrossedtail;029D
jdotlessstroke;025F
jecyrillic;0458
jeemarabic;062C
jeemfinalarabic;FE9E
jeeminitialarabic;FE9F
jeemmedialarabic;FEA0
jeharabic;0698
jehfinalarabic;FB8B
jhabengali;099D
jhadeva;091D
jhagujarati;0A9D
jhagurmukhi;0A1D
jheharmenian;057B
jis;3004
jmonospace;FF4A
jparen;24A5
jsuperior;02B2
k;006B
kabashkircyrillic;04A1
kabengali;0995
kacute;1E31
kacyrillic;043A
kadescendercyrillic;049B
kadeva;0915
kaf;05DB
kafarabic;0643
kafdagesh;FB3B
kafdageshhebrew;FB3B
kaffinalarabic;FEDA
kafhebrew;05DB
kafinitialarabic;FEDB
kafmedialarabic;FEDC
kafrafehebrew;FB4D
kagujarati;0A95
kagurmukhi;0A15
kahiragana;304B
kahookcyrillic;04C4
kakatakana;30AB
kakatakanahalfwidth;FF76
kappa;03BA
kappasymbolgreek;03F0
kapyeounmieumkorean;3171
kapyeounphieuphkorean;3184
kapyeounpieupkorean;3178
kapyeounssangpieupkorean;3179
karoriisquare;330D
kashidaautoarabic;0640
kashidaautonosidebearingarabic;0640
kasmallkatakana;30F5
kasquare;3384
kasraarabic;0650
kasratanarabic;064D
kastrokecyrillic;049F
katahiraprolongmarkhalfwidth;FF70
kaverticalstrokecyrillic;049D
kbopomofo;310E
kcalsquare;3389
kcaron;01E9
kcedilla;0137
kcircle;24DA
kcommaaccent;0137
kdotbelow;1E33
keharmenian;0584
kehiragana;3051
kekatakana;30B1
kekatakanahalfwidth;FF79
kenarmenian;056F
kesmallkatakana;30F6
kgreenlandic;0138
khabengali;0996
khacyrillic;0445
khadeva;0916
khagujarati;0A96
khagurmukhi;0A16
khaharabic;062E
khahfinalarabic;FEA6
khahinitialarabic;FEA7
khahmedialarabic;FEA8
kheicoptic;03E7
khhadeva;0959
khhagurmukhi;0A59
khieukhacirclekorean;3278
khieukhaparenkorean;3218
khieukhcirclekorean;326A
khieukhkorean;314B
khieukhparenkorean;320A
khokhaithai;0E02
khokhonthai;0E05
khokhuatthai;0E03
khokhwaithai;0E04
khomutthai;0E5B
khook;0199
khorakhangthai;0E06
khzsquare;3391
kihiragana;304D
kikatakana;30AD
kikatakanahalfwidth;FF77
kiroguramusquare;3315
kiromeetorusquare;3316
kirosquare;3314
kiyeokacirclekorean;326E
kiyeokaparenkorean;320E
kiyeokcirclekorean;3260
kiyeokkorean;3131
kiyeokparenkorean;3200
kiyeoksioskorean;3133
kjecyrillic;045C
klinebelow;1E35
klsquare;3398
kmcubedsquare;33A6
kmonospace;FF4B
kmsquaredsquare;33A2
kohiragana;3053
kohmsquare;33C0
kokaithai;0E01
kokatakana;30B3
kokatakanahalfwidth;FF7A
kooposquare;331E
koppacyrillic;0481
koreanstandardsymbol;327F
koroniscmb;0343
kparen;24A6
kpasquare;33AA
ksicyrillic;046F
ktsquare;33CF
kturned;029E
kuhiragana;304F
kukatakana;30AF
kukatakanahalfwidth;FF78
kvsquare;33B8
kwsquare;33BE
l;006C
labengali;09B2
lacute;013A
ladeva;0932
lagujarati;0AB2
lagurmukhi;0A32
lakkhangyaothai;0E45
lamaleffinalarabic;FEFC
lamalefhamzaabovefinalarabic;FEF8
lamalefhamzaaboveisolatedarabic;FEF7
lamalefhamzabelowfinalarabic;FEFA
lamalefhamzabelowisolatedarabic;FEF9
lamalefisolatedarabic;FEFB
lamalefmaddaabovefinalarabic;FEF6
lamalefmaddaaboveisolatedarabic;FEF5
lamarabic;0644
lambda;03BB
lambdastroke;019B
lamed;05DC
lameddagesh;FB3C
lameddageshhebrew;FB3C
lamedhebrew;05DC
lamedholam;05DC 05B9
lamedholamdagesh;05DC 05B9 05BC
lamedholamdageshhebrew;05DC 05B9 05BC
lamedholamhebrew;05DC 05B9
lamfinalarabic;FEDE
lamhahinitialarabic;FCCA
laminitialarabic;FEDF
lamjeeminitialarabic;FCC9
lamkhahinitialarabic;FCCB
lamlamhehisolatedarabic;FDF2
lammedialarabic;FEE0
lammeemhahinitialarabic;FD88
lammeeminitialarabic;FCCC
lammeemjeeminitialarabic;FEDF FEE4 FEA0
lammeemkhahinitialarabic;FEDF FEE4 FEA8
largecircle;25EF
lbar;019A
lbelt;026C
lbopomofo;310C
lcaron;013E
lcedilla;013C
lcircle;24DB
lcircumflexbelow;1E3D
lcommaaccent;013C
ldot;0140
ldotaccent;0140
ldotbelow;1E37
ldotbelowmacron;1E39
leftangleabovecmb;031A
lefttackbelowcmb;0318
less;003C
lessequal;2264
lessequalorgreater;22DA
lessmonospace;FF1C
lessorequivalent;2272
lessorgreater;2276
lessoverequal;2266
lesssmall;FE64
lezh;026E
lfblock;258C
lhookretroflex;026D
lira;20A4
liwnarmenian;056C
lj;01C9
ljecyrillic;0459
ll;F6C0
lladeva;0933
llagujarati;0AB3
llinebelow;1E3B
llladeva;0934
llvocalicbengali;09E1
llvocalicdeva;0961
llvocalicvowelsignbengali;09E3
llvocalicvowelsigndeva;0963
lmiddletilde;026B
lmonospace;FF4C
lmsquare;33D0
lochulathai;0E2C
logicaland;2227
logicalnot;00AC
logicalnotreversed;2310
logicalor;2228
lolingthai;0E25
longs;017F
lowlinecenterline;FE4E
lowlinecmb;0332
lowlinedashed;FE4D
lozenge;25CA
lparen;24A7
lslash;0142
lsquare;2113
lsuperior;F6EE
ltshade;2591
luthai;0E26
lvocalicbengali;098C
lvocalicdeva;090C
lvocalicvowelsignbengali;09E2
lvocalicvowelsigndeva;0962
lxsquare;33D3
m;006D
mabengali;09AE
macron;00AF
macronbelowcmb;0331
macroncmb;0304
macronlowmod;02CD
macronmonospace;FFE3
macute;1E3F
madeva;092E
magujarati;0AAE
magurmukhi;0A2E
mahapakhhebrew;05A4
mahapakhlefthebrew;05A4
mahiragana;307E
maichattawalowleftthai;F895
maichattawalowrightthai;F894
maichattawathai;0E4B
maichattawaupperleftthai;F893
maieklowleftthai;F88C
maieklowrightthai;F88B
maiekthai;0E48
maiekupperleftthai;F88A
maihanakatleftthai;F884
maihanakatthai;0E31
maitaikhuleftthai;F889
maitaikhuthai;0E47
maitholowleftthai;F88F
maitholowrightthai;F88E
maithothai;0E49
maithoupperleftthai;F88D
maitrilowleftthai;F892
maitrilowrightthai;F891
maitrithai;0E4A
maitriupperleftthai;F890
maiyamokthai;0E46
makatakana;30DE
makatakanahalfwidth;FF8F
male;2642
mansyonsquare;3347
maqafhebrew;05BE
mars;2642
masoracirclehebrew;05AF
masquare;3383
mbopomofo;3107
mbsquare;33D4
mcircle;24DC
mcubedsquare;33A5
mdotaccent;1E41
mdotbelow;1E43
meemarabic;0645
meemfinalarabic;FEE2
meeminitialarabic;FEE3
meemmedialarabic;FEE4
meemmeeminitialarabic;FCD1
meemmeemisolatedarabic;FC48
meetorusquare;334D
mehiragana;3081
meizierasquare;337E
mekatakana;30E1
mekatakanahalfwidth;FF92
mem;05DE
memdagesh;FB3E
memdageshhebrew;FB3E
memhebrew;05DE
menarmenian;0574
merkhahebrew;05A5
merkhakefulahebrew;05A6
merkhakefulalefthebrew;05A6
merkhalefthebrew;05A5
mhook;0271
mhzsquare;3392
middledotkatakanahalfwidth;FF65
middot;00B7
mieumacirclekorean;3272
mieumaparenkorean;3212
mieumcirclekorean;3264
mieumkorean;3141
mieumpansioskorean;3170
mieumparenkorean;3204
mieumpieupkorean;316E
mieumsioskorean;316F
mihiragana;307F
mikatakana;30DF
mikatakanahalfwidth;FF90
minus;2212
minusbelowcmb;0320
minuscircle;2296
minusmod;02D7
minusplus;2213
minute;2032
miribaarusquare;334A
mirisquare;3349
mlonglegturned;0270
mlsquare;3396
mmcubedsquare;33A3
mmonospace;FF4D
mmsquaredsquare;339F
mohiragana;3082
mohmsquare;33C1
mokatakana;30E2
mokatakanahalfwidth;FF93
molsquare;33D6
momathai;0E21
moverssquare;33A7
moverssquaredsquare;33A8
mparen;24A8
mpasquare;33AB
mssquare;33B3
msuperior;F6EF
mturned;026F
mu;00B5
mu1;00B5
muasquare;3382
muchgreater;226B
muchless;226A
mufsquare;338C
mugreek;03BC
mugsquare;338D
muhiragana;3080
mukatakana;30E0
mukatakanahalfwidth;FF91
mulsquare;3395
multiply;00D7
mumsquare;339B
munahhebrew;05A3
munahlefthebrew;05A3
musicalnote;266A
musicalnotedbl;266B
musicflatsign;266D
musicsharpsign;266F
mussquare;33B2
muvsquare;33B6
muwsquare;33BC
mvmegasquare;33B9
mvsquare;33B7
mwmegasquare;33BF
mwsquare;33BD
n;006E
nabengali;09A8
nabla;2207
nacute;0144
nadeva;0928
nagujarati;0AA8
nagurmukhi;0A28
nahiragana;306A
nakatakana;30CA
nakatakanahalfwidth;FF85
napostrophe;0149
nasquare;3381
nbopomofo;310B
nbspace;00A0
ncaron;0148
ncedilla;0146
ncircle;24DD
ncircumflexbelow;1E4B
ncommaaccent;0146
ndotaccent;1E45
ndotbelow;1E47
nehiragana;306D
nekatakana;30CD
nekatakanahalfwidth;FF88
newsheqelsign;20AA
nfsquare;338B
ngabengali;0999
ngadeva;0919
ngagujarati;0A99
ngagurmukhi;0A19
ngonguthai;0E07
nhiragana;3093
nhookleft;0272
nhookretroflex;0273
nieunacirclekorean;326F
nieunaparenkorean;320F
nieuncieuckorean;3135
nieuncirclekorean;3261
nieunhieuhkorean;3136
nieunkorean;3134
nieunpansioskorean;3168
nieunparenkorean;3201
nieunsioskorean;3167
nieuntikeutkorean;3166
nihiragana;306B
nikatakana;30CB
nikatakanahalfwidth;FF86
nikhahitleftthai;F899
nikhahitthai;0E4D
nine;0039
ninearabic;0669
ninebengali;09EF
ninecircle;2468
ninecircleinversesansserif;2792
ninedeva;096F
ninegujarati;0AEF
ninegurmukhi;0A6F
ninehackarabic;0669
ninehangzhou;3029
nineideographicparen;3228
nineinferior;2089
ninemonospace;FF19
nineoldstyle;F739
nineparen;247C
nineperiod;2490
ninepersian;06F9
nineroman;2178
ninesuperior;2079
nineteencircle;2472
nineteenparen;2486
nineteenperiod;249A
ninethai;0E59
nj;01CC
njecyrillic;045A
nkatakana;30F3
nkatakanahalfwidth;FF9D
nlegrightlong;019E
nlinebelow;1E49
nmonospace;FF4E
nmsquare;339A
nnabengali;09A3
nnadeva;0923
nnagujarati;0AA3
nnagurmukhi;0A23
nnnadeva;0929
nohiragana;306E
nokatakana;30CE
nokatakanahalfwidth;FF89
nonbreakingspace;00A0
nonenthai;0E13
nonuthai;0E19
noonarabic;0646
noonfinalarabic;FEE6
noonghunnaarabic;06BA
noonghunnafinalarabic;FB9F
noonhehinitialarabic;FEE7 FEEC
nooninitialarabic;FEE7
noonjeeminitialarabic;FCD2
noonjeemisolatedarabic;FC4B
noonmedialarabic;FEE8
noonmeeminitialarabic;FCD5
noonmeemisolatedarabic;FC4E
noonnoonfinalarabic;FC8D
notcontains;220C
notelement;2209
notelementof;2209
notequal;2260
notgreater;226F
notgreaternorequal;2271
notgreaternorless;2279
notidentical;2262
notless;226E
notlessnorequal;2270
notparallel;2226
notprecedes;2280
notsubset;2284
notsucceeds;2281
notsuperset;2285
nowarmenian;0576
nparen;24A9
nssquare;33B1
nsuperior;207F
ntilde;00F1
nu;03BD
nuhiragana;306C
nukatakana;30CC
nukatakanahalfwidth;FF87
nuktabengali;09BC
nuktadeva;093C
nuktagujarati;0ABC
nuktagurmukhi;0A3C
numbersign;0023
numbersignmonospace;FF03
numbersignsmall;FE5F
numeralsigngreek;0374
numeralsignlowergreek;0375
numero;2116
nun;05E0
nundagesh;FB40
nundageshhebrew;FB40
nunhebrew;05E0
nvsquare;33B5
nwsquare;33BB
nyabengali;099E
nyadeva;091E
nyagujarati;0A9E
nyagurmukhi;0A1E
o;006F
oacute;00F3
oangthai;0E2D
obarred;0275
obarredcyrillic;04E9
obarreddieresiscyrillic;04EB
obengali;0993
obopomofo;311B
obreve;014F
ocandradeva;0911
ocandragujarati;0A91
ocandravowelsigndeva;0949
ocandravowelsigngujarati;0AC9
ocaron;01D2
ocircle;24DE
ocircumflex;00F4
ocircumflexacute;1ED1
ocircumflexdotbelow;1ED9
ocircumflexgrave;1ED3
ocircumflexhookabove;1ED5
ocircumflextilde;1ED7
ocyrillic;043E
odblacute;0151
odblgrave;020D
odeva;0913
odieresis;00F6
odieresiscyrillic;04E7
odotbelow;1ECD
oe;0153
oekorean;315A
ogonek;02DB
ogonekcmb;0328
ograve;00F2
ogujarati;0A93
oharmenian;0585
ohiragana;304A
ohookabove;1ECF
ohorn;01A1
ohornacute;1EDB
ohorndotbelow;1EE3
ohorngrave;1EDD
ohornhookabove;1EDF
ohorntilde;1EE1
ohungarumlaut;0151
oi;01A3
oinvertedbreve;020F
okatakana;30AA
okatakanahalfwidth;FF75
okorean;3157
olehebrew;05AB
omacron;014D
omacronacute;1E53
omacrongrave;1E51
omdeva;0950
omega;03C9
omega1;03D6
omegacyrillic;0461
omegalatinclosed;0277
omegaroundcyrillic;047B
omegatitlocyrillic;047D
omegatonos;03CE
omgujarati;0AD0
omicron;03BF
omicrontonos;03CC
omonospace;FF4F
one;0031
onearabic;0661
onebengali;09E7
onecircle;2460
onecircleinversesansserif;278A
onedeva;0967
onedotenleader;2024
oneeighth;215B
onefitted;F6DC
onegujarati;0AE7
onegurmukhi;0A67
onehackarabic;0661
onehalf;00BD
onehangzhou;3021
oneideographicparen;3220
oneinferior;2081
onemonospace;FF11
onenumeratorbengali;09F4
oneoldstyle;F731
oneparen;2474
oneperiod;2488
onepersian;06F1
onequarter;00BC
oneroman;2170
onesuperior;00B9
onethai;0E51
onethird;2153
oogonek;01EB
oogonekmacron;01ED
oogurmukhi;0A13
oomatragurmukhi;0A4B
oopen;0254
oparen;24AA
openbullet;25E6
option;2325
ordfeminine;00AA
ordmasculine;00BA
orthogonal;221F
oshortdeva;0912
oshortvowelsigndeva;094A
oslash;00F8
oslashacute;01FF
osmallhiragana;3049
osmallkatakana;30A9
osmallkatakanahalfwidth;FF6B
ostrokeacute;01FF
osuperior;F6F0
otcyrillic;047F
otilde;00F5
otildeacute;1E4D
otildedieresis;1E4F
oubopomofo;3121
overline;203E
overlinecenterline;FE4A
overlinecmb;0305
overlinedashed;FE49
overlinedblwavy;FE4C
overlinewavy;FE4B
overscore;00AF
ovowelsignbengali;09CB
ovowelsigndeva;094B
ovowelsigngujarati;0ACB
p;0070
paampssquare;3380
paasentosquare;332B
pabengali;09AA
pacute;1E55
padeva;092A
pagedown;21DF
pageup;21DE
pagujarati;0AAA
pagurmukhi;0A2A
pahiragana;3071
paiyannoithai;0E2F
pakatakana;30D1
palatalizationcyrilliccmb;0484
palochkacyrillic;04C0
pansioskorean;317F
paragraph;00B6
parallel;2225
parenleft;0028
parenleftaltonearabic;FD3E
parenleftbt;F8ED
parenleftex;F8EC
parenleftinferior;208D
parenleftmonospace;FF08
parenleftsmall;FE59
parenleftsuperior;207D
parenlefttp;F8EB
parenleftvertical;FE35
parenright;0029
parenrightaltonearabic;FD3F
parenrightbt;F8F8
parenrightex;F8F7
parenrightinferior;208E
parenrightmonospace;FF09
parenrightsmall;FE5A
parenrightsuperior;207E
parenrighttp;F8F6
parenrightvertical;FE36
partialdiff;2202
paseqhebrew;05C0
pashtahebrew;0599
pasquare;33A9
patah;05B7
patah11;05B7
patah1d;05B7
patah2a;05B7
patahhebrew;05B7
patahnarrowhebrew;05B7
patahquarterhebrew;05B7
patahwidehebrew;05B7
pazerhebrew;05A1
pbopomofo;3106
pcircle;24DF
pdotaccent;1E57
pe;05E4
pecyrillic;043F
pedagesh;FB44
pedageshhebrew;FB44
peezisquare;333B
pefinaldageshhebrew;FB43
peharabic;067E
peharmenian;057A
pehebrew;05E4
pehfinalarabic;FB57
pehinitialarabic;FB58
pehiragana;307A
pehmedialarabic;FB59
pekatakana;30DA
pemiddlehookcyrillic;04A7
perafehebrew;FB4E
percent;0025
percentarabic;066A
percentmonospace;FF05
percentsmall;FE6A
period;002E
periodarmenian;0589
periodcentered;00B7
periodhalfwidth;FF61
periodinferior;F6E7
periodmonospace;FF0E
periodsmall;FE52
periodsuperior;F6E8
perispomenigreekcmb;0342
perpendicular;22A5
perthousand;2030
peseta;20A7
pfsquare;338A
phabengali;09AB
phadeva;092B
phagujarati;0AAB
phagurmukhi;0A2B
phi;03C6
phi1;03D5
phieuphacirclekorean;327A
phieuphaparenkorean;321A
phieuphcirclekorean;326C
phieuphkorean;314D
phieuphparenkorean;320C
philatin;0278
phinthuthai;0E3A
phisymbolgreek;03D5
phook;01A5
phophanthai;0E1E
phophungthai;0E1C
phosamphaothai;0E20
pi;03C0
pieupacirclekorean;3273
pieupaparenkorean;3213
pieupcieuckorean;3176
pieupcirclekorean;3265
pieupkiyeokkorean;3172
pieupkorean;3142
pieupparenkorean;3205
pieupsioskiyeokkorean;3174
pieupsioskorean;3144
pieupsiostikeutkorean;3175
pieupthieuthkorean;3177
pieuptikeutkorean;3173
pihiragana;3074
pikatakana;30D4
pisymbolgreek;03D6
piwrarmenian;0583
plus;002B
plusbelowcmb;031F
pluscircle;2295
plusminus;00B1
plusmod;02D6
plusmonospace;FF0B
plussmall;FE62
plussuperior;207A
pmonospace;FF50
pmsquare;33D8
pohiragana;307D
pointingindexdownwhite;261F
pointingindexleftwhite;261C
pointingindexrightwhite;261E
pointingindexupwhite;261D
pokatakana;30DD
poplathai;0E1B
postalmark;3012
postalmarkface;3020
pparen;24AB
precedes;227A
prescription;211E
primemod;02B9
primereversed;2035
product;220F
projective;2305
prolongedkana;30FC
propellor;2318
propersubset;2282
propersuperset;2283
proportion;2237
proportional;221D
psi;03C8
psicyrillic;0471
psilipneumatacyrilliccmb;0486
pssquare;33B0
puhiragana;3077
pukatakana;30D7
pvsquare;33B4
pwsquare;33BA
q;0071
qadeva;0958
qadmahebrew;05A8
qafarabic;0642
qaffinalarabic;FED6
qafinitialarabic;FED7
qafmedialarabic;FED8
qamats;05B8
qamats10;05B8
qamats1a;05B8
qamats1c;05B8
qamats27;05B8
qamats29;05B8
qamats33;05B8
qamatsde;05B8
qamatshebrew;05B8
qamatsnarrowhebrew;05B8
qamatsqatanhebrew;05B8
qamatsqatannarrowhebrew;05B8
qamatsqatanquarterhebrew;05B8
qamatsqatanwidehebrew;05B8
qamatsquarterhebrew;05B8
qamatswidehebrew;05B8
qarneyparahebrew;059F
qbopomofo;3111
qcircle;24E0
qhook;02A0
qmonospace;FF51
qof;05E7
qofdagesh;FB47
qofdageshhebrew;FB47
qofhatafpatah;05E7 05B2
qofhatafpatahhebrew;05E7 05B2
qofhatafsegol;05E7 05B1
qofhatafsegolhebrew;05E7 05B1
qofhebrew;05E7
qofhiriq;05E7 05B4
qofhiriqhebrew;05E7 05B4
qofholam;05E7 05B9
qofholamhebrew;05E7 05B9
qofpatah;05E7 05B7
qofpatahhebrew;05E7 05B7
qofqamats;05E7 05B8
qofqamatshebrew;05E7 05B8
qofqubuts;05E7 05BB
qofqubutshebrew;05E7 05BB
qofsegol;05E7 05B6
qofsegolhebrew;05E7 05B6
qofsheva;05E7 05B0
qofshevahebrew;05E7 05B0
qoftsere;05E7 05B5
qoftserehebrew;05E7 05B5
qparen;24AC
quarternote;2669
qubuts;05BB
qubuts18;05BB
qubuts25;05BB
qubuts31;05BB
qubutshebrew;05BB
qubutsnarrowhebrew;05BB
qubutsquarterhebrew;05BB
qubutswidehebrew;05BB
question;003F
questionarabic;061F
questionarmenian;055E
questiondown;00BF
questiondownsmall;F7BF
questiongreek;037E
questionmonospace;FF1F
questionsmall;F73F
quotedbl;0022
quotedblbase;201E
quotedblleft;201C
quotedblmonospace;FF02
quotedblprime;301E
quotedblprimereversed;301D
quotedblright;201D
quoteleft;2018
quoteleftreversed;201B
quotereversed;201B
quoteright;2019
quoterightn;0149
quotesinglbase;201A
quotesingle;0027
quotesinglemonospace;FF07
r;0072
raarmenian;057C
rabengali;09B0
racute;0155
radeva;0930
radical;221A
radicalex;F8E5
radoverssquare;33AE
radoverssquaredsquare;33AF
radsquare;33AD
rafe;05BF
rafehebrew;05BF
ragujarati;0AB0
ragurmukhi;0A30
rahiragana;3089
rakatakana;30E9
rakatakanahalfwidth;FF97
ralowerdiagonalbengali;09F1
ramiddlediagonalbengali;09F0
ramshorn;0264
ratio;2236
rbopomofo;3116
rcaron;0159
rcedilla;0157
rcircle;24E1
rcommaaccent;0157
rdblgrave;0211
rdotaccent;1E59
rdotbelow;1E5B
rdotbelowmacron;1E5D
referencemark;203B
reflexsubset;2286
reflexsuperset;2287
registered;00AE
registersans;F8E8
registerserif;F6DA
reharabic;0631
reharmenian;0580
rehfinalarabic;FEAE
rehiragana;308C
rehyehaleflamarabic;0631 FEF3 FE8E 0644
rekatakana;30EC
rekatakanahalfwidth;FF9A
resh;05E8
reshdageshhebrew;FB48
reshhatafpatah;05E8 05B2
reshhatafpatahhebrew;05E8 05B2
reshhatafsegol;05E8 05B1
reshhatafsegolhebrew;05E8 05B1
reshhebrew;05E8
reshhiriq;05E8 05B4
reshhiriqhebrew;05E8 05B4
reshholam;05E8 05B9
reshholamhebrew;05E8 05B9
reshpatah;05E8 05B7
reshpatahhebrew;05E8 05B7
reshqamats;05E8 05B8
reshqamatshebrew;05E8 05B8
reshqubuts;05E8 05BB
reshqubutshebrew;05E8 05BB
reshsegol;05E8 05B6
reshsegolhebrew;05E8 05B6
reshsheva;05E8 05B0
reshshevahebrew;05E8 05B0
reshtsere;05E8 05B5
reshtserehebrew;05E8 05B5
reversedtilde;223D
reviahebrew;0597
reviamugrashhebrew;0597
revlogicalnot;2310
rfishhook;027E
rfishhookreversed;027F
rhabengali;09DD
rhadeva;095D
rho;03C1
rhook;027D
rhookturned;027B
rhookturnedsuperior;02B5
rhosymbolgreek;03F1
rhotichookmod;02DE
rieulacirclekorean;3271
rieulaparenkorean;3211
rieulcirclekorean;3263
rieulhieuhkorean;3140
rieulkiyeokkorean;313A
rieulkiyeoksioskorean;3169
rieulkorean;3139
rieulmieumkorean;313B
rieulpansioskorean;316C
rieulparenkorean;3203
rieulphieuphkorean;313F
rieulpieupkorean;313C
rieulpieupsioskorean;316B
rieulsioskorean;313D
rieulthieuthkorean;313E
rieultikeutkorean;316A
rieulyeorinhieuhkorean;316D
rightangle;221F
righttackbelowcmb;0319
righttriangle;22BF
rihiragana;308A
rikatakana;30EA
rikatakanahalfwidth;FF98
ring;02DA
ringbelowcmb;0325
ringcmb;030A
ringhalfleft;02BF
ringhalfleftarmenian;0559
ringhalfleftbelowcmb;031C
ringhalfleftcentered;02D3
ringhalfright;02BE
ringhalfrightbelowcmb;0339
ringhalfrightcentered;02D2
rinvertedbreve;0213
rittorusquare;3351
rlinebelow;1E5F
rlongleg;027C
rlonglegturned;027A
rmonospace;FF52
rohiragana;308D
rokatakana;30ED
rokatakanahalfwidth;FF9B
roruathai;0E23
rparen;24AD
rrabengali;09DC
rradeva;0931
rragurmukhi;0A5C
rreharabic;0691
rrehfinalarabic;FB8D
rrvocalicbengali;09E0
rrvocalicdeva;0960
rrvocalicgujarati;0AE0
rrvocalicvowelsignbengali;09C4
rrvocalicvowelsigndeva;0944
rrvocalicvowelsigngujarati;0AC4
rsuperior;F6F1
rtblock;2590
rturned;0279
rturnedsuperior;02B4
ruhiragana;308B
rukatakana;30EB
rukatakanahalfwidth;FF99
rupeemarkbengali;09F2
rupeesignbengali;09F3
rupiah;F6DD
ruthai;0E24
rvocalicbengali;098B
rvocalicdeva;090B
rvocalicgujarati;0A8B
rvocalicvowelsignbengali;09C3
rvocalicvowelsigndeva;0943
rvocalicvowelsigngujarati;0AC3
s;0073
sabengali;09B8
sacute;015B
sacutedotaccent;1E65
sadarabic;0635
sadeva;0938
sadfinalarabic;FEBA
sadinitialarabic;FEBB
sadmedialarabic;FEBC
sagujarati;0AB8
sagurmukhi;0A38
sahiragana;3055
sakatakana;30B5
sakatakanahalfwidth;FF7B
sallallahoualayhewasallamarabic;FDFA
samekh;05E1
samekhdagesh;FB41
samekhdageshhebrew;FB41
samekhhebrew;05E1
saraaathai;0E32
saraaethai;0E41
saraaimaimalaithai;0E44
saraaimaimuanthai;0E43
saraamthai;0E33
saraathai;0E30
saraethai;0E40
saraiileftthai;F886
saraiithai;0E35
saraileftthai;F885
saraithai;0E34
saraothai;0E42
saraueeleftthai;F888
saraueethai;0E37
saraueleftthai;F887
sarauethai;0E36
sarauthai;0E38
sarauuthai;0E39
sbopomofo;3119
scaron;0161
scarondotaccent;1E67
scedilla;015F
schwa;0259
schwacyrillic;04D9
schwadieresiscyrillic;04DB
schwahook;025A
scircle;24E2
scircumflex;015D
scommaaccent;0219
sdotaccent;1E61
sdotbelow;1E63
sdotbelowdotaccent;1E69
seagullbelowcmb;033C
second;2033
secondtonechinese;02CA
section;00A7
seenarabic;0633
seenfinalarabic;FEB2
seeninitialarabic;FEB3
seenmedialarabic;FEB4
segol;05B6
segol13;05B6
segol1f;05B6
segol2c;05B6
segolhebrew;05B6
segolnarrowhebrew;05B6
segolquarterhebrew;05B6
segoltahebrew;0592
segolwidehebrew;05B6
seharmenian;057D
sehiragana;305B
sekatakana;30BB
sekatakanahalfwidth;FF7E
semicolon;003B
semicolonarabic;061B
semicolonmonospace;FF1B
semicolonsmall;FE54
semivoicedmarkkana;309C
semivoicedmarkkanahalfwidth;FF9F
sentisquare;3322
sentosquare;3323
seven;0037
sevenarabic;0667
sevenbengali;09ED
sevencircle;2466
sevencircleinversesansserif;2790
sevendeva;096D
seveneighths;215E
sevengujarati;0AED
sevengurmukhi;0A6D
sevenhackarabic;0667
sevenhangzhou;3027
sevenideographicparen;3226
seveninferior;2087
sevenmonospace;FF17
sevenoldstyle;F737
sevenparen;247A
sevenperiod;248E
sevenpersian;06F7
sevenroman;2176
sevensuperior;2077
seventeencircle;2470
seventeenparen;2484
seventeenperiod;2498
seventhai;0E57
sfthyphen;00AD
shaarmenian;0577
shabengali;09B6
shacyrillic;0448
shaddaarabic;0651
shaddadammaarabic;FC61
shaddadammatanarabic;FC5E
shaddafathaarabic;FC60
shaddafathatanarabic;0651 064B
shaddakasraarabic;FC62
shaddakasratanarabic;FC5F
shade;2592
shadedark;2593
shadelight;2591
shademedium;2592
shadeva;0936
shagujarati;0AB6
shagurmukhi;0A36
shalshelethebrew;0593
shbopomofo;3115
shchacyrillic;0449
sheenarabic;0634
sheenfinalarabic;FEB6
sheeninitialarabic;FEB7
sheenmedialarabic;FEB8
sheicoptic;03E3
sheqel;20AA
sheqelhebrew;20AA
sheva;05B0
sheva115;05B0
sheva15;05B0
sheva22;05B0
sheva2e;05B0
shevahebrew;05B0
shevanarrowhebrew;05B0
shevaquarterhebrew;05B0
shevawidehebrew;05B0
shhacyrillic;04BB
shimacoptic;03ED
shin;05E9
shindagesh;FB49
shindageshhebrew;FB49
shindageshshindot;FB2C
shindageshshindothebrew;FB2C
shindageshsindot;FB2D
shindageshsindothebrew;FB2D
shindothebrew;05C1
shinhebrew;05E9
shinshindot;FB2A
shinshindothebrew;FB2A
shinsindot;FB2B
shinsindothebrew;FB2B
shook;0282
sigma;03C3
sigma1;03C2
sigmafinal;03C2
sigmalunatesymbolgreek;03F2
sihiragana;3057
sikatakana;30B7
sikatakanahalfwidth;FF7C
siluqhebrew;05BD
siluqlefthebrew;05BD
similar;223C
sindothebrew;05C2
siosacirclekorean;3274
siosaparenkorean;3214
sioscieuckorean;317E
sioscirclekorean;3266
sioskiyeokkorean;317A
sioskorean;3145
siosnieunkorean;317B
siosparenkorean;3206
siospieupkorean;317D
siostikeutkorean;317C
six;0036
sixarabic;0666
sixbengali;09EC
sixcircle;2465
sixcircleinversesansserif;278F
sixdeva;096C
sixgujarati;0AEC
sixgurmukhi;0A6C
sixhackarabic;0666
sixhangzhou;3026
sixideographicparen;3225
sixinferior;2086
sixmonospace;FF16
sixoldstyle;F736
sixparen;2479
sixperiod;248D
sixpersian;06F6
sixroman;2175
sixsuperior;2076
sixteencircle;246F
sixteencurrencydenominatorbengali;09F9
sixteenparen;2483
sixteenperiod;2497
sixthai;0E56
slash;002F
slashmonospace;FF0F
slong;017F
slongdotaccent;1E9B
smileface;263A
smonospace;FF53
sofpasuqhebrew;05C3
softhyphen;00AD
softsigncyrillic;044C
sohiragana;305D
sokatakana;30BD
sokatakanahalfwidth;FF7F
soliduslongoverlaycmb;0338
solidusshortoverlaycmb;0337
sorusithai;0E29
sosalathai;0E28
sosothai;0E0B
sosuathai;0E2A
space;0020
spacehackarabic;0020
spade;2660
spadesuitblack;2660
spadesuitwhite;2664
sparen;24AE
squarebelowcmb;033B
squarecc;33C4
squarecm;339D
squarediagonalcrosshatchfill;25A9
squarehorizontalfill;25A4
squarekg;338F
squarekm;339E
squarekmcapital;33CE
squareln;33D1
squarelog;33D2
squaremg;338E
squaremil;33D5
squaremm;339C
squaremsquared;33A1
squareorthogonalcrosshatchfill;25A6
squareupperlefttolowerrightfill;25A7
squareupperrighttolowerleftfill;25A8
squareverticalfill;25A5
squarewhitewithsmallblack;25A3
srsquare;33DB
ssabengali;09B7
ssadeva;0937
ssagujarati;0AB7
ssangcieuckorean;3149
ssanghieuhkorean;3185
ssangieungkorean;3180
ssangkiyeokkorean;3132
ssangnieunkorean;3165
ssangpieupkorean;3143
ssangsioskorean;3146
ssangtikeutkorean;3138
ssuperior;F6F2
sterling;00A3
sterlingmonospace;FFE1
strokelongoverlaycmb;0336
strokeshortoverlaycmb;0335
subset;2282
subsetnotequal;228A
subsetorequal;2286
succeeds;227B
suchthat;220B
suhiragana;3059
sukatakana;30B9
sukatakanahalfwidth;FF7D
sukunarabic;0652
summation;2211
sun;263C
superset;2283
supersetnotequal;228B
supersetorequal;2287
svsquare;33DC
syouwaerasquare;337C
t;0074
tabengali;09A4
tackdown;22A4
tackleft;22A3
tadeva;0924
tagujarati;0AA4
tagurmukhi;0A24
taharabic;0637
tahfinalarabic;FEC2
tahinitialarabic;FEC3
tahiragana;305F
tahmedialarabic;FEC4
taisyouerasquare;337D
takatakana;30BF
takatakanahalfwidth;FF80
tatweelarabic;0640
tau;03C4
tav;05EA
tavdages;FB4A
tavdagesh;FB4A
tavdageshhebrew;FB4A
tavhebrew;05EA
tbar;0167
tbopomofo;310A
tcaron;0165
tccurl;02A8
tcedilla;0163
tcheharabic;0686
tchehfinalarabic;FB7B
tchehinitialarabic;FB7C
tchehmedialarabic;FB7D
tchehmeeminitialarabic;FB7C FEE4
tcircle;24E3
tcircumflexbelow;1E71
tcommaaccent;0163
tdieresis;1E97
tdotaccent;1E6B
tdotbelow;1E6D
tecyrillic;0442
tedescendercyrillic;04AD
teharabic;062A
tehfinalarabic;FE96
tehhahinitialarabic;FCA2
tehhahisolatedarabic;FC0C
tehinitialarabic;FE97
tehiragana;3066
tehjeeminitialarabic;FCA1
tehjeemisolatedarabic;FC0B
tehmarbutaarabic;0629
tehmarbutafinalarabic;FE94
tehmedialarabic;FE98
tehmeeminitialarabic;FCA4
tehmeemisolatedarabic;FC0E
tehnoonfinalarabic;FC73
tekatakana;30C6
tekatakanahalfwidth;FF83
telephone;2121
telephoneblack;260E
telishagedolahebrew;05A0
telishaqetanahebrew;05A9
tencircle;2469
tenideographicparen;3229
tenparen;247D
tenperiod;2491
tenroman;2179
tesh;02A7
tet;05D8
tetdagesh;FB38
tetdageshhebrew;FB38
tethebrew;05D8
tetsecyrillic;04B5
tevirhebrew;059B
tevirlefthebrew;059B
thabengali;09A5
thadeva;0925
thagujarati;0AA5
thagurmukhi;0A25
thalarabic;0630
thalfinalarabic;FEAC
thanthakhatlowleftthai;F898
thanthakhatlowrightthai;F897
thanthakhatthai;0E4C
thanthakhatupperleftthai;F896
theharabic;062B
thehfinalarabic;FE9A
thehinitialarabic;FE9B
thehmedialarabic;FE9C
thereexists;2203
therefore;2234
theta;03B8
theta1;03D1
thetasymbolgreek;03D1
thieuthacirclekorean;3279
thieuthaparenkorean;3219
thieuthcirclekorean;326B
thieuthkorean;314C
thieuthparenkorean;320B
thirteencircle;246C
thirteenparen;2480
thirteenperiod;2494
thonangmonthothai;0E11
thook;01AD
thophuthaothai;0E12
thorn;00FE
thothahanthai;0E17
thothanthai;0E10
thothongthai;0E18
thothungthai;0E16
thousandcyrillic;0482
thousandsseparatorarabic;066C
thousandsseparatorpersian;066C
three;0033
threearabic;0663
threebengali;09E9
threecircle;2462
threecircleinversesansserif;278C
threedeva;0969
threeeighths;215C
threegujarati;0AE9
threegurmukhi;0A69
threehackarabic;0663
threehangzhou;3023
threeideographicparen;3222
threeinferior;2083
threemonospace;FF13
threenumeratorbengali;09F6
threeoldstyle;F733
threeparen;2476
threeperiod;248A
threepersian;06F3
threequarters;00BE
threequartersemdash;F6DE
threeroman;2172
threesuperior;00B3
threethai;0E53
thzsquare;3394
tihiragana;3061
tikatakana;30C1
tikatakanahalfwidth;FF81
tikeutacirclekorean;3270
tikeutaparenkorean;3210
tikeutcirclekorean;3262
tikeutkorean;3137
tikeutparenkorean;3202
tilde;02DC
tildebelowcmb;0330
tildecmb;0303
tildecomb;0303
tildedoublecmb;0360
tildeoperator;223C
tildeoverlaycmb;0334
tildeverticalcmb;033E
timescircle;2297
tipehahebrew;0596
tipehalefthebrew;0596
tippigurmukhi;0A70
titlocyrilliccmb;0483
tiwnarmenian;057F
tlinebelow;1E6F
tmonospace;FF54
toarmenian;0569
tohiragana;3068
tokatakana;30C8
tokatakanahalfwidth;FF84
tonebarextrahighmod;02E5
tonebarextralowmod;02E9
tonebarhighmod;02E6
tonebarlowmod;02E8
tonebarmidmod;02E7
tonefive;01BD
tonesix;0185
tonetwo;01A8
tonos;0384
tonsquare;3327
topatakthai;0E0F
tortoiseshellbracketleft;3014
tortoiseshellbracketleftsmall;FE5D
tortoiseshellbracketleftvertical;FE39
tortoiseshellbracketright;3015
tortoiseshellbracketrightsmall;FE5E
tortoiseshellbracketrightvertical;FE3A
totaothai;0E15
tpalatalhook;01AB
tparen;24AF
trademark;2122
trademarksans;F8EA
trademarkserif;F6DB
tretroflexhook;0288
triagdn;25BC
triaglf;25C4
triagrt;25BA
triagup;25B2
ts;02A6
tsadi;05E6
tsadidagesh;FB46
tsadidageshhebrew;FB46
tsadihebrew;05E6
tsecyrillic;0446
tsere;05B5
tsere12;05B5
tsere1e;05B5
tsere2b;05B5
tserehebrew;05B5
tserenarrowhebrew;05B5
tserequarterhebrew;05B5
tserewidehebrew;05B5
tshecyrillic;045B
tsuperior;F6F3
ttabengali;099F
ttadeva;091F
ttagujarati;0A9F
ttagurmukhi;0A1F
tteharabic;0679
ttehfinalarabic;FB67
ttehinitialarabic;FB68
ttehmedialarabic;FB69
tthabengali;09A0
tthadeva;0920
tthagujarati;0AA0
tthagurmukhi;0A20
tturned;0287
tuhiragana;3064
tukatakana;30C4
tukatakanahalfwidth;FF82
tusmallhiragana;3063
tusmallkatakana;30C3
tusmallkatakanahalfwidth;FF6F
twelvecircle;246B
twelveparen;247F
twelveperiod;2493
twelveroman;217B
twentycircle;2473
twentyhangzhou;5344
twentyparen;2487
twentyperiod;249B
two;0032
twoarabic;0662
twobengali;09E8
twocircle;2461
twocircleinversesansserif;278B
twodeva;0968
twodotenleader;2025
twodotleader;2025
twodotleadervertical;FE30
twogujarati;0AE8
twogurmukhi;0A68
twohackarabic;0662
twohangzhou;3022
twoideographicparen;3221
twoinferior;2082
twomonospace;FF12
twonumeratorbengali;09F5
twooldstyle;F732
twoparen;2475
twoperiod;2489
twopersian;06F2
tworoman;2171
twostroke;01BB
twosuperior;00B2
twothai;0E52
twothirds;2154
u;0075
uacute;00FA
ubar;0289
ubengali;0989
ubopomofo;3128
ubreve;016D
ucaron;01D4
ucircle;24E4
ucircumflex;00FB
ucircumflexbelow;1E77
ucyrillic;0443
udattadeva;0951
udblacute;0171
udblgrave;0215
udeva;0909
udieresis;00FC
udieresisacute;01D8
udieresisbelow;1E73
udieresiscaron;01DA
udieresiscyrillic;04F1
udieresisgrave;01DC
udieresismacron;01D6
udotbelow;1EE5
ugrave;00F9
ugujarati;0A89
ugurmukhi;0A09
uhiragana;3046
uhookabove;1EE7
uhorn;01B0
uhornacute;1EE9
uhorndotbelow;1EF1
uhorngrave;1EEB
uhornhookabove;1EED
uhorntilde;1EEF
uhungarumlaut;0171
uhungarumlautcyrillic;04F3
uinvertedbreve;0217
ukatakana;30A6
ukatakanahalfwidth;FF73
ukcyrillic;0479
ukorean;315C
umacron;016B
umacroncyrillic;04EF
umacrondieresis;1E7B
umatragurmukhi;0A41
umonospace;FF55
underscore;005F
underscoredbl;2017
underscoremonospace;FF3F
underscorevertical;FE33
underscorewavy;FE4F
union;222A
universal;2200
uogonek;0173
uparen;24B0
upblock;2580
upperdothebrew;05C4
upsilon;03C5
upsilondieresis;03CB
upsilondieresistonos;03B0
upsilonlatin;028A
upsilontonos;03CD
uptackbelowcmb;031D
uptackmod;02D4
uragurmukhi;0A73
uring;016F
ushortcyrillic;045E
usmallhiragana;3045
usmallkatakana;30A5
usmallkatakanahalfwidth;FF69
ustraightcyrillic;04AF
ustraightstrokecyrillic;04B1
utilde;0169
utildeacute;1E79
utildebelow;1E75
uubengali;098A
uudeva;090A
uugujarati;0A8A
uugurmukhi;0A0A
uumatragurmukhi;0A42
uuvowelsignbengali;09C2
uuvowelsigndeva;0942
uuvowelsigngujarati;0AC2
uvowelsignbengali;09C1
uvowelsigndeva;0941
uvowelsigngujarati;0AC1
v;0076
vadeva;0935
vagujarati;0AB5
vagurmukhi;0A35
vakatakana;30F7
vav;05D5
vavdagesh;FB35
vavdagesh65;FB35
vavdageshhebrew;FB35
vavhebrew;05D5
vavholam;FB4B
vavholamhebrew;FB4B
vavvavhebrew;05F0
vavyodhebrew;05F1
vcircle;24E5
vdotbelow;1E7F
vecyrillic;0432
veharabic;06A4
vehfinalarabic;FB6B
vehinitialarabic;FB6C
vehmedialarabic;FB6D
vekatakana;30F9
venus;2640
verticalbar;007C
verticallineabovecmb;030D
verticallinebelowcmb;0329
verticallinelowmod;02CC
verticallinemod;02C8
vewarmenian;057E
vhook;028B
vikatakana;30F8
viramabengali;09CD
viramadeva;094D
viramagujarati;0ACD
visargabengali;0983
visargadeva;0903
visargagujarati;0A83
vmonospace;FF56
voarmenian;0578
voicediterationhiragana;309E
voicediterationkatakana;30FE
voicedmarkkana;309B
voicedmarkkanahalfwidth;FF9E
vokatakana;30FA
vparen;24B1
vtilde;1E7D
vturned;028C
vuhiragana;3094
vukatakana;30F4
w;0077
wacute;1E83
waekorean;3159
wahiragana;308F
wakatakana;30EF
wakatakanahalfwidth;FF9C
wakorean;3158
wasmallhiragana;308E
wasmallkatakana;30EE
wattosquare;3357
wavedash;301C
wavyunderscorevertical;FE34
wawarabic;0648
wawfinalarabic;FEEE
wawhamzaabovearabic;0624
wawhamzaabovefinalarabic;FE86
wbsquare;33DD
wcircle;24E6
wcircumflex;0175
wdieresis;1E85
wdotaccent;1E87
wdotbelow;1E89
wehiragana;3091
weierstrass;2118
wekatakana;30F1
wekorean;315E
weokorean;315D
wgrave;1E81
whitebullet;25E6
whitecircle;25CB
whitecircleinverse;25D9
whitecornerbracketleft;300E
whitecornerbracketleftvertical;FE43
whitecornerbracketright;300F
whitecornerbracketrightvertical;FE44
whitediamond;25C7
whitediamondcontainingblacksmalldiamond;25C8
whitedownpointingsmalltriangle;25BF
whitedownpointingtriangle;25BD
whiteleftpointingsmalltriangle;25C3
whiteleftpointingtriangle;25C1
whitelenticularbracketleft;3016
whitelenticularbracketright;3017
whiterightpointingsmalltriangle;25B9
whiterightpointingtriangle;25B7
whitesmallsquare;25AB
whitesmilingface;263A
whitesquare;25A1
whitestar;2606
whitetelephone;260F
whitetortoiseshellbracketleft;3018
whitetortoiseshellbracketright;3019
whiteuppointingsmalltriangle;25B5
whiteuppointingtriangle;25B3
wihiragana;3090
wikatakana;30F0
wikorean;315F
wmonospace;FF57
wohiragana;3092
wokatakana;30F2
wokatakanahalfwidth;FF66
won;20A9
wonmonospace;FFE6
wowaenthai;0E27
wparen;24B2
wring;1E98
wsuperior;02B7
wturned;028D
wynn;01BF
x;0078
xabovecmb;033D
xbopomofo;3112
xcircle;24E7
xdieresis;1E8D
xdotaccent;1E8B
xeharmenian;056D
xi;03BE
xmonospace;FF58
xparen;24B3
xsuperior;02E3
y;0079
yaadosquare;334E
yabengali;09AF
yacute;00FD
yadeva;092F
yaekorean;3152
yagujarati;0AAF
yagurmukhi;0A2F
yahiragana;3084
yakatakana;30E4
yakatakanahalfwidth;FF94
yakorean;3151
yamakkanthai;0E4E
yasmallhiragana;3083
yasmallkatakana;30E3
yasmallkatakanahalfwidth;FF6C
yatcyrillic;0463
ycircle;24E8
ycircumflex;0177
ydieresis;00FF
ydotaccent;1E8F
ydotbelow;1EF5
yeharabic;064A
yehbarreearabic;06D2
yehbarreefinalarabic;FBAF
yehfinalarabic;FEF2
yehhamzaabovearabic;0626
yehhamzaabovefinalarabic;FE8A
yehhamzaaboveinitialarabic;FE8B
yehhamzaabovemedialarabic;FE8C
yehinitialarabic;FEF3
yehmedialarabic;FEF4
yehmeeminitialarabic;FCDD
yehmeemisolatedarabic;FC58
yehnoonfinalarabic;FC94
yehthreedotsbelowarabic;06D1
yekorean;3156
yen;00A5
yenmonospace;FFE5
yeokorean;3155
yeorinhieuhkorean;3186
yerahbenyomohebrew;05AA
yerahbenyomolefthebrew;05AA
yericyrillic;044B
yerudieresiscyrillic;04F9
yesieungkorean;3181
yesieungpansioskorean;3183
yesieungsioskorean;3182
yetivhebrew;059A
ygrave;1EF3
yhook;01B4
yhookabove;1EF7
yiarmenian;0575
yicyrillic;0457
yikorean;3162
yinyang;262F
yiwnarmenian;0582
ymonospace;FF59
yod;05D9
yoddagesh;FB39
yoddageshhebrew;FB39
yodhebrew;05D9
yodyodhebrew;05F2
yodyodpatahhebrew;FB1F
yohiragana;3088
yoikorean;3189
yokatakana;30E8
yokatakanahalfwidth;FF96
yokorean;315B
yosmallhiragana;3087
yosmallkatakana;30E7
yosmallkatakanahalfwidth;FF6E
yotgreek;03F3
yoyaekorean;3188
yoyakorean;3187
yoyakthai;0E22
yoyingthai;0E0D
yparen;24B4
ypogegrammeni;037A
ypogegrammenigreekcmb;0345
yr;01A6
yring;1E99
ysuperior;02B8
ytilde;1EF9
yturned;028E
yuhiragana;3086
yuikorean;318C
yukatakana;30E6
yukatakanahalfwidth;FF95
yukorean;3160
yusbigcyrillic;046B
yusbigiotifiedcyrillic;046D
yuslittlecyrillic;0467
yuslittleiotifiedcyrillic;0469
yusmallhiragana;3085
yusmallkatakana;30E5
yusmallkatakanahalfwidth;FF6D
yuyekorean;318B
yuyeokorean;318A
yyabengali;09DF
yyadeva;095F
z;007A
zaarmenian;0566
zacute;017A
zadeva;095B
zagurmukhi;0A5B
zaharabic;0638
zahfinalarabic;FEC6
zahinitialarabic;FEC7
zahiragana;3056
zahmedialarabic;FEC8
zainarabic;0632
zainfinalarabic;FEB0
zakatakana;30B6
zaqefgadolhebrew;0595
zaqefqatanhebrew;0594
zarqahebrew;0598
zayin;05D6
zayindagesh;FB36
zayindageshhebrew;FB36
zayinhebrew;05D6
zbopomofo;3117
zcaron;017E
zcircle;24E9
zcircumflex;1E91
zcurl;0291
zdot;017C
zdotaccent;017C
zdotbelow;1E93
zecyrillic;0437
zedescendercyrillic;0499
zedieresiscyrillic;04DF
zehiragana;305C
zekatakana;30BC
zero;0030
zeroarabic;0660
zerobengali;09E6
zerodeva;0966
zerogujarati;0AE6
zerogurmukhi;0A66
zerohackarabic;0660
zeroinferior;2080
zeromonospace;FF10
zerooldstyle;F730
zeropersian;06F0
zerosuperior;2070
zerothai;0E50
zerowidthjoiner;FEFF
zerowidthnonjoiner;200C
zerowidthspace;200B
zeta;03B6
zhbopomofo;3113
zhearmenian;056A
zhebrevecyrillic;04C2
zhecyrillic;0436
zhedescendercyrillic;0497
zhedieresiscyrillic;04DD
zihiragana;3058
zikatakana;30B8
zinorhebrew;05AE
zlinebelow;1E95
zmonospace;FF5A
zohiragana;305E
zokatakana;30BE
zparen;24B5
zretroflexhook;0290
zstroke;01B6
zuhiragana;305A
zukatakana;30BA
a100;275E
a101;2761
a102;2762
a103;2763
a104;2764
a105;2710
a106;2765
a107;2766
a108;2767
a109;2660
a10;2721
a110;2665
a111;2666
a112;2663
a117;2709
a118;2708
a119;2707
a11;261B
a120;2460
a121;2461
a122;2462
a123;2463
a124;2464
a125;2465
a126;2466
a127;2467
a128;2468
a129;2469
a12;261E
a130;2776
a131;2777
a132;2778
a133;2779
a134;277A
a135;277B
a136;277C
a137;277D
a138;277E
a139;277F
a13;270C
a140;2780
a141;2781
a142;2782
a143;2783
a144;2784
a145;2785
a146;2786
a147;2787
a148;2788
a149;2789
a14;270D
a150;278A
a151;278B
a152;278C
a153;278D
a154;278E
a155;278F
a156;2790
a157;2791
a158;2792
a159;2793
a15;270E
a160;2794
a161;2192
a162;27A3
a163;2194
a164;2195
a165;2799
a166;279B
a167;279C
a168;279D
a169;279E
a16;270F
a170;279F
a171;27A0
a172;27A1
a173;27A2
a174;27A4
a175;27A5
a176;27A6
a177;27A7
a178;27A8
a179;27A9
a17;2711
a180;27AB
a181;27AD
a182;27AF
a183;27B2
a184;27B3
a185;27B5
a186;27B8
a187;27BA
a188;27BB
a189;27BC
a18;2712
a190;27BD
a191;27BE
a192;279A
a193;27AA
a194;27B6
a195;27B9
a196;2798
a197;27B4
a198;27B7
a199;27AC
a19;2713
a1;2701
a200;27AE
a201;27B1
a202;2703
a203;2750
a204;2752
a205;276E
a206;2770
a20;2714
a21;2715
a22;2716
a23;2717
a24;2718
a25;2719
a26;271A
a27;271B
a28;271C
a29;2722
a2;2702
a30;2723
a31;2724
a32;2725
a33;2726
a34;2727
a35;2605
a36;2729
a37;272A
a38;272B
a39;272C
a3;2704
a40;272D
a41;272E
a42;272F
a43;2730
a44;2731
a45;2732
a46;2733
a47;2734
a48;2735
a49;2736
a4;260E
a50;2737
a51;2738
a52;2739
a53;273A
a54;273B
a55;273C
a56;273D
a57;273E
a58;273F
a59;2740
a5;2706
a60;2741
a61;2742
a62;2743
a63;2744
a64;2745
a65;2746
a66;2747
a67;2748
a68;2749
a69;274A
a6;271D
a70;274B
a71;25CF
a72;274D
a73;25A0
a74;274F
a75;2751
a76;25B2
a77;25BC
a78;25C6
a79;2756
a7;271E
a81;25D7
a82;2758
a83;2759
a84;275A
a85;276F
a86;2771
a87;2772
a88;2773
a89;2768
a8;271F
a90;2769
a91;276C
a92;276D
a93;276A
a94;276B
a95;2774
a96;2775
a97;275B
a98;275C
a99;275D
a9;2720
"""
# string table management
#
class StringTable:
def __init__( self, name_list, master_table_name ):
self.names = name_list
self.master_table = master_table_name
self.indices = {}
index = 0
for name in name_list:
self.indices[name] = index
index += len( name ) + 1
self.total = index
def dump( self, file ):
write = file.write
write( " static const char " + self.master_table +
"[" + repr( self.total ) + "] =\n" )
write( " {\n" )
line = ""
for name in self.names:
line += " '"
line += string.join( ( re.findall( ".", name ) ), "','" )
line += "', 0,\n"
write( line + " };\n\n\n" )
def dump_sublist( self, file, table_name, macro_name, sublist ):
write = file.write
write( "#define " + macro_name + " " + repr( len( sublist ) ) + "\n\n" )
write( " /* Values are offsets into the `" +
self.master_table + "' table */\n\n" )
write( " static const short " + table_name +
"[" + macro_name + "] =\n" )
write( " {\n" )
line = " "
comma = ""
col = 0
for name in sublist:
line += comma
line += "%4d" % self.indices[name]
col += 1
comma = ","
if col == 14:
col = 0
comma = ",\n "
write( line + "\n };\n\n\n" )
# We now store the Adobe Glyph List in compressed form. The list is put
# into a data structure called `trie' (because it has a tree-like
# appearance). Consider, for example, that you want to store the
# following name mapping:
#
# A => 1
# Aacute => 6
# Abalon => 2
# Abstract => 4
#
# It is possible to store the entries as follows.
#
# A => 1
# |
# +-acute => 6
# |
# +-b
# |
# +-alon => 2
# |
# +-stract => 4
#
# We see that each node in the trie has:
#
# - one or more `letters'
# - an optional value
# - zero or more child nodes
#
# The first step is to call
#
# root = StringNode( "", 0 )
# for word in map.values():
# root.add( word, map[word] )
#
# which creates a large trie where each node has only one children.
#
# Executing
#
# root = root.optimize()
#
# optimizes the trie by merging the letters of successive nodes whenever
# possible.
#
# Each node of the trie is stored as follows.
#
# - First the node's letter, according to the following scheme. We
# use the fact that in the AGL no name contains character codes > 127.
#
# name bitsize description
# ----------------------------------------------------------------
# notlast 1 Set to 1 if this is not the last letter
# in the word.
# ascii 7 The letter's ASCII value.
#
# - The letter is followed by a children count and the value of the
# current key (if any). Again we can do some optimization because all
# AGL entries are from the BMP; this means that 16 bits are sufficient
# to store its Unicode values. Additionally, no node has more than
# 127 children.
#
# name bitsize description
# -----------------------------------------
# hasvalue 1 Set to 1 if a 16-bit Unicode value follows.
# num_children 7 Number of children. Can be 0 only if
# `hasvalue' is set to 1.
# value 16 Optional Unicode value.
#
# - A node is finished by a list of 16bit absolute offsets to the
# children, which must be sorted in increasing order of their first
# letter.
#
# For simplicity, all 16bit quantities are stored in big-endian order.
#
# The root node has first letter = 0, and no value.
#
class StringNode:
def __init__( self, letter, value ):
self.letter = letter
self.value = value
self.children = {}
def __cmp__( self, other ):
return ord( self.letter[0] ) - ord( other.letter[0] )
def add( self, word, value ):
if len( word ) == 0:
self.value = value
return
letter = word[0]
word = word[1:]
if self.children.has_key( letter ):
child = self.children[letter]
else:
child = StringNode( letter, 0 )
self.children[letter] = child
child.add( word, value )
def optimize( self ):
# optimize all children first
children = self.children.values()
self.children = {}
for child in children:
self.children[child.letter[0]] = child.optimize()
# don't optimize if there's a value,
# if we don't have any child or if we
# have more than one child
if ( self.value != 0 ) or ( not children ) or len( children ) > 1:
return self
child = children[0]
self.letter += child.letter
self.value = child.value
self.children = child.children
return self
def dump_debug( self, write, margin ):
# this is used during debugging
line = margin + "+-"
if len( self.letter ) == 0:
line += "<NOLETTER>"
else:
line += self.letter
if self.value:
line += " => " + repr( self.value )
write( line + "\n" )
if self.children:
margin += "| "
for child in self.children.values():
child.dump_debug( write, margin )
def locate( self, index ):
self.index = index
if len( self.letter ) > 0:
index += len( self.letter ) + 1
else:
index += 2
if self.value != 0:
index += 2
children = self.children.values()
children.sort()
index += 2 * len( children )
for child in children:
index = child.locate( index )
return index
def store( self, storage ):
# write the letters
l = len( self.letter )
if l == 0:
storage += struct.pack( "B", 0 )
else:
for n in range( l ):
val = ord( self.letter[n] )
if n < l - 1:
val += 128
storage += struct.pack( "B", val )
# write the count
children = self.children.values()
children.sort()
count = len( children )
if self.value != 0:
storage += struct.pack( "!BH", count + 128, self.value )
else:
storage += struct.pack( "B", count )
for child in children:
storage += struct.pack( "!H", child.index )
for child in children:
storage = child.store( storage )
return storage
def adobe_glyph_values():
"""return the list of glyph names and their unicode values"""
lines = string.split( adobe_glyph_list, '\n' )
glyphs = []
values = []
for line in lines:
if line:
fields = string.split( line, ';' )
# print fields[1] + ' - ' + fields[0]
subfields = string.split( fields[1], ' ' )
if len( subfields ) == 1:
glyphs.append( fields[0] )
values.append( fields[1] )
return glyphs, values
def filter_glyph_names( alist, filter ):
"""filter `alist' by taking _out_ all glyph names that are in `filter'"""
count = 0
extras = []
for name in alist:
try:
filtered_index = filter.index( name )
except:
extras.append( name )
return extras
def dump_encoding( file, encoding_name, encoding_list ):
"""dump a given encoding"""
write = file.write
write( " /* the following are indices into the SID name table */\n" )
write( " static const unsigned short " + encoding_name +
"[" + repr( len( encoding_list ) ) + "] =\n" )
write( " {\n" )
line = " "
comma = ""
col = 0
for value in encoding_list:
line += comma
line += "%3d" % value
comma = ","
col += 1
if col == 16:
col = 0
comma = ",\n "
write( line + "\n };\n\n\n" )
def dump_array( the_array, write, array_name ):
"""dumps a given encoding"""
write( " static const unsigned char " + array_name +
"[" + repr( len( the_array ) ) + "L] =\n" )
write( " {\n" )
line = ""
comma = " "
col = 0
for value in the_array:
line += comma
line += "%3d" % ord( value )
comma = ","
col += 1
if col == 16:
col = 0
comma = ",\n "
if len( line ) > 1024:
write( line )
line = ""
write( line + "\n };\n\n\n" )
def main():
"""main program body"""
if len( sys.argv ) != 2:
print __doc__ % sys.argv[0]
sys.exit( 1 )
file = open( sys.argv[1], "w\n" )
write = file.write
count_sid = len( sid_standard_names )
# `mac_extras' contains the list of glyph names in the Macintosh standard
# encoding which are not in the SID Standard Names.
#
mac_extras = filter_glyph_names( mac_standard_names, sid_standard_names )
# `base_list' contains the names of our final glyph names table.
# It consists of the `mac_extras' glyph names, followed by the SID
# standard names.
#
mac_extras_count = len( mac_extras )
base_list = mac_extras + sid_standard_names
write( "/***************************************************************************/\n" )
write( "/* */\n" )
write( "/* %-71s*/\n" % os.path.basename( sys.argv[1] ) )
write( "/* */\n" )
write( "/* PostScript glyph names. */\n" )
write( "/* */\n" )
write( "/* Copyright 2005-2015 by */\n" )
write( "/* David Turner, Robert Wilhelm, and Werner Lemberg. */\n" )
write( "/* */\n" )
write( "/* This file is part of the FreeType project, and may only be used, */\n" )
write( "/* modified, and distributed under the terms of the FreeType project */\n" )
write( "/* license, LICENSE.TXT. By continuing to use, modify, or distribute */\n" )
write( "/* this file you indicate that you have read the license and */\n" )
write( "/* understand and accept it fully. */\n" )
write( "/* */\n" )
write( "/***************************************************************************/\n" )
write( "\n" )
write( "\n" )
write( " /* This file has been generated automatically -- do not edit! */\n" )
write( "\n" )
write( "\n" )
# dump final glyph list (mac extras + sid standard names)
#
st = StringTable( base_list, "ft_standard_glyph_names" )
st.dump( file )
st.dump_sublist( file, "ft_mac_names",
"FT_NUM_MAC_NAMES", mac_standard_names )
st.dump_sublist( file, "ft_sid_names",
"FT_NUM_SID_NAMES", sid_standard_names )
dump_encoding( file, "t1_standard_encoding", t1_standard_encoding )
dump_encoding( file, "t1_expert_encoding", t1_expert_encoding )
# dump the AGL in its compressed form
#
agl_glyphs, agl_values = adobe_glyph_values()
dict = StringNode( "", 0 )
for g in range( len( agl_glyphs ) ):
dict.add( agl_glyphs[g], eval( "0x" + agl_values[g] ) )
dict = dict.optimize()
dict_len = dict.locate( 0 )
dict_array = dict.store( "" )
write( """\
/*
* This table is a compressed version of the Adobe Glyph List (AGL),
* optimized for efficient searching. It has been generated by the
* `glnames.py' python script located in the `src/tools' directory.
*
* The lookup function to get the Unicode value for a given string
* is defined below the table.
*/
#ifdef FT_CONFIG_OPTION_ADOBE_GLYPH_LIST
""" )
dump_array( dict_array, write, "ft_adobe_glyph_list" )
# write the lookup routine now
#
write( """\
/*
* This function searches the compressed table efficiently.
*/
static unsigned long
ft_get_adobe_glyph_index( const char* name,
const char* limit )
{
int c = 0;
int count, min, max;
const unsigned char* p = ft_adobe_glyph_list;
if ( name == 0 || name >= limit )
goto NotFound;
c = *name++;
count = p[1];
p += 2;
min = 0;
max = count;
while ( min < max )
{
int mid = ( min + max ) >> 1;
const unsigned char* q = p + mid * 2;
int c2;
q = ft_adobe_glyph_list + ( ( (int)q[0] << 8 ) | q[1] );
c2 = q[0] & 127;
if ( c2 == c )
{
p = q;
goto Found;
}
if ( c2 < c )
min = mid + 1;
else
max = mid;
}
goto NotFound;
Found:
for (;;)
{
/* assert (*p & 127) == c */
if ( name >= limit )
{
if ( (p[0] & 128) == 0 &&
(p[1] & 128) != 0 )
return (unsigned long)( ( (int)p[2] << 8 ) | p[3] );
goto NotFound;
}
c = *name++;
if ( p[0] & 128 )
{
p++;
if ( c != (p[0] & 127) )
goto NotFound;
continue;
}
p++;
count = p[0] & 127;
if ( p[0] & 128 )
p += 2;
p++;
for ( ; count > 0; count--, p += 2 )
{
int offset = ( (int)p[0] << 8 ) | p[1];
const unsigned char* q = ft_adobe_glyph_list + offset;
if ( c == ( q[0] & 127 ) )
{
p = q;
goto NextIter;
}
}
goto NotFound;
NextIter:
;
}
NotFound:
return 0;
}
#endif /* FT_CONFIG_OPTION_ADOBE_GLYPH_LIST */
""" )
if 0: # generate unit test, or don't
#
# now write the unit test to check that everything works OK
#
write( "#ifdef TEST\n\n" )
write( "static const char* const the_names[] = {\n" )
for name in agl_glyphs:
write( ' "' + name + '",\n' )
write( " 0\n};\n" )
write( "static const unsigned long the_values[] = {\n" )
for val in agl_values:
write( ' 0x' + val + ',\n' )
write( " 0\n};\n" )
write( """
#include <stdlib.h>
#include <stdio.h>
int
main( void )
{
int result = 0;
const char* const* names = the_names;
const unsigned long* values = the_values;
for ( ; *names; names++, values++ )
{
const char* name = *names;
unsigned long reference = *values;
unsigned long value;
value = ft_get_adobe_glyph_index( name, name + strlen( name ) );
if ( value != reference )
{
result = 1;
fprintf( stderr, "name '%s' => %04x instead of %04x\\n",
name, value, reference );
}
}
return result;
}
""" )
write( "#endif /* TEST */\n" )
write("\n/* END */\n")
# Now run the main routine
#
main()
# END
| apache-2.0 |
openfun/edx-platform | pavelib/paver_tests/test_jshint.py | 130 | 1694 | """
Tests for paver quality tasks
"""
import unittest
from mock import patch
import pavelib.quality
from paver.easy import BuildFailure
class TestPaverJsHint(unittest.TestCase):
"""
For testing run_jshint
"""
def setUp(self):
super(TestPaverJsHint, self).setUp()
# Mock the paver @needs decorator
self._mock_paver_needs = patch.object(pavelib.quality.run_jshint, 'needs').start()
self._mock_paver_needs.return_value = 0
# Mock shell commands
patcher = patch('pavelib.quality.sh')
self._mock_paver_sh = patcher.start()
# Cleanup mocks
self.addCleanup(patcher.stop)
self.addCleanup(self._mock_paver_needs.stop)
@patch.object(pavelib.quality, '_write_metric')
@patch.object(pavelib.quality, '_prepare_report_dir')
@patch.object(pavelib.quality, '_get_count_from_last_line')
def test_jshint_violation_number_not_found(self, mock_count, mock_report_dir, mock_write_metric): # pylint: disable=unused-argument
"""
run_jshint encounters an error parsing the jshint output log
"""
mock_count.return_value = None
with self.assertRaises(BuildFailure):
pavelib.quality.run_jshint("")
@patch.object(pavelib.quality, '_write_metric')
@patch.object(pavelib.quality, '_prepare_report_dir')
@patch.object(pavelib.quality, '_get_count_from_last_line')
def test_jshint_vanilla(self, mock_count, mock_report_dir, mock_write_metric): # pylint: disable=unused-argument
"""
jshint finds violations, but a limit was not set
"""
mock_count.return_value = 1
pavelib.quality.run_jshint("")
| agpl-3.0 |
edmond-chhung/linkchecker | linkcheck/director/interrupt.py | 9 | 1819 | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2006-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Status message handling"""
import time
from . import task
from .. import log, LOG_CHECK, strformat
class Interrupt (task.CheckedTask):
"""Thread that raises KeyboardInterrupt after a specified duration.
This gives us a portable SIGALRM implementation.
The duration is checked every 5 seconds.
"""
WaitSeconds = 5
def __init__ (self, duration):
"""Initialize the task.
@param duration: raise KeyboardInterrupt after given number of seconds
@ptype duration: int
"""
super(Interrupt, self).__init__()
self.duration = duration
def run_checked (self):
"""Wait and raise KeyboardInterrupt after."""
self.start_time = time.time()
self.setName("Interrupt")
while not self.stopped(self.WaitSeconds):
duration = time.time() - self.start_time
if duration > self.duration:
log.warn(LOG_CHECK, "Interrupt after %s" % strformat.strduration_long(duration))
raise KeyboardInterrupt()
| gpl-2.0 |
lichuan261/wuand | XX-Net/python27/1.0/lib/pprint.py | 71 | 11777 | # Author: Fred L. Drake, Jr.
# [email protected]
#
# This is a simple little module I wrote to make life easier. I didn't
# see anything quite like it in the library, though I may have overlooked
# something. I wrote this when I was trying to read some heavily nested
# tuples with fairly non-descriptive content. This is modeled very much
# after Lisp/Scheme - style pretty-printing of lists. If you find it
# useful, thank small children who sleep at night.
"""Support to pretty-print lists, tuples, & dictionaries recursively.
Very simple, but useful, especially in debugging data structures.
Classes
-------
PrettyPrinter()
Handle pretty-printing operations onto a stream using a configured
set of formatting parameters.
Functions
---------
pformat()
Format a Python object into a pretty-printed representation.
pprint()
Pretty-print a Python object to a stream [default is sys.stdout].
saferepr()
Generate a 'standard' repr()-like value, but protect against recursive
data structures.
"""
import sys as _sys
import warnings
try:
from cStringIO import StringIO as _StringIO
except ImportError:
from StringIO import StringIO as _StringIO
__all__ = ["pprint","pformat","isreadable","isrecursive","saferepr",
"PrettyPrinter"]
# cache these for faster access:
_commajoin = ", ".join
_id = id
_len = len
_type = type
def pprint(object, stream=None, indent=1, width=80, depth=None):
"""Pretty-print a Python object to a stream [default is sys.stdout]."""
printer = PrettyPrinter(
stream=stream, indent=indent, width=width, depth=depth)
printer.pprint(object)
def pformat(object, indent=1, width=80, depth=None):
"""Format a Python object into a pretty-printed representation."""
return PrettyPrinter(indent=indent, width=width, depth=depth).pformat(object)
def saferepr(object):
"""Version of repr() which can handle recursive data structures."""
return _safe_repr(object, {}, None, 0)[0]
def isreadable(object):
"""Determine if saferepr(object) is readable by eval()."""
return _safe_repr(object, {}, None, 0)[1]
def isrecursive(object):
"""Determine if object requires a recursive representation."""
return _safe_repr(object, {}, None, 0)[2]
def _sorted(iterable):
with warnings.catch_warnings():
if _sys.py3kwarning:
warnings.filterwarnings("ignore", "comparing unequal types "
"not supported", DeprecationWarning)
return sorted(iterable)
class PrettyPrinter:
def __init__(self, indent=1, width=80, depth=None, stream=None):
"""Handle pretty printing operations onto a stream using a set of
configured parameters.
indent
Number of spaces to indent for each level of nesting.
width
Attempted maximum number of columns in the output.
depth
The maximum depth to print out nested structures.
stream
The desired output stream. If omitted (or false), the standard
output stream available at construction will be used.
"""
indent = int(indent)
width = int(width)
assert indent >= 0, "indent must be >= 0"
assert depth is None or depth > 0, "depth must be > 0"
assert width, "width must be != 0"
self._depth = depth
self._indent_per_level = indent
self._width = width
if stream is not None:
self._stream = stream
else:
self._stream = _sys.stdout
def pprint(self, object):
self._format(object, self._stream, 0, 0, {}, 0)
self._stream.write("\n")
def pformat(self, object):
sio = _StringIO()
self._format(object, sio, 0, 0, {}, 0)
return sio.getvalue()
def isrecursive(self, object):
return self.format(object, {}, 0, 0)[2]
def isreadable(self, object):
s, readable, recursive = self.format(object, {}, 0, 0)
return readable and not recursive
def _format(self, object, stream, indent, allowance, context, level):
level = level + 1
objid = _id(object)
if objid in context:
stream.write(_recursion(object))
self._recursive = True
self._readable = False
return
rep = self._repr(object, context, level - 1)
typ = _type(object)
sepLines = _len(rep) > (self._width - 1 - indent - allowance)
write = stream.write
if self._depth and level > self._depth:
write(rep)
return
r = getattr(typ, "__repr__", None)
if issubclass(typ, dict) and r is dict.__repr__:
write('{')
if self._indent_per_level > 1:
write((self._indent_per_level - 1) * ' ')
length = _len(object)
if length:
context[objid] = 1
indent = indent + self._indent_per_level
items = _sorted(object.items())
key, ent = items[0]
rep = self._repr(key, context, level)
write(rep)
write(': ')
self._format(ent, stream, indent + _len(rep) + 2,
allowance + 1, context, level)
if length > 1:
for key, ent in items[1:]:
rep = self._repr(key, context, level)
if sepLines:
write(',\n%s%s: ' % (' '*indent, rep))
else:
write(', %s: ' % rep)
self._format(ent, stream, indent + _len(rep) + 2,
allowance + 1, context, level)
indent = indent - self._indent_per_level
del context[objid]
write('}')
return
if ((issubclass(typ, list) and r is list.__repr__) or
(issubclass(typ, tuple) and r is tuple.__repr__) or
(issubclass(typ, set) and r is set.__repr__) or
(issubclass(typ, frozenset) and r is frozenset.__repr__)
):
length = _len(object)
if issubclass(typ, list):
write('[')
endchar = ']'
elif issubclass(typ, tuple):
write('(')
endchar = ')'
else:
if not length:
write(rep)
return
write(typ.__name__)
write('([')
endchar = '])'
indent += len(typ.__name__) + 1
object = _sorted(object)
if self._indent_per_level > 1 and sepLines:
write((self._indent_per_level - 1) * ' ')
if length:
context[objid] = 1
indent = indent + self._indent_per_level
self._format(object[0], stream, indent, allowance + 1,
context, level)
if length > 1:
for ent in object[1:]:
if sepLines:
write(',\n' + ' '*indent)
else:
write(', ')
self._format(ent, stream, indent,
allowance + 1, context, level)
indent = indent - self._indent_per_level
del context[objid]
if issubclass(typ, tuple) and length == 1:
write(',')
write(endchar)
return
write(rep)
def _repr(self, object, context, level):
repr, readable, recursive = self.format(object, context.copy(),
self._depth, level)
if not readable:
self._readable = False
if recursive:
self._recursive = True
return repr
def format(self, object, context, maxlevels, level):
"""Format object for a specific context, returning a string
and flags indicating whether the representation is 'readable'
and whether the object represents a recursive construct.
"""
return _safe_repr(object, context, maxlevels, level)
# Return triple (repr_string, isreadable, isrecursive).
def _safe_repr(object, context, maxlevels, level):
typ = _type(object)
if typ is str:
if 'locale' not in _sys.modules:
return repr(object), True, False
if "'" in object and '"' not in object:
closure = '"'
quotes = {'"': '\\"'}
else:
closure = "'"
quotes = {"'": "\\'"}
qget = quotes.get
sio = _StringIO()
write = sio.write
for char in object:
if char.isalpha():
write(char)
else:
write(qget(char, repr(char)[1:-1]))
return ("%s%s%s" % (closure, sio.getvalue(), closure)), True, False
r = getattr(typ, "__repr__", None)
if issubclass(typ, dict) and r is dict.__repr__:
if not object:
return "{}", True, False
objid = _id(object)
if maxlevels and level >= maxlevels:
return "{...}", False, objid in context
if objid in context:
return _recursion(object), False, True
context[objid] = 1
readable = True
recursive = False
components = []
append = components.append
level += 1
saferepr = _safe_repr
for k, v in _sorted(object.items()):
krepr, kreadable, krecur = saferepr(k, context, maxlevels, level)
vrepr, vreadable, vrecur = saferepr(v, context, maxlevels, level)
append("%s: %s" % (krepr, vrepr))
readable = readable and kreadable and vreadable
if krecur or vrecur:
recursive = True
del context[objid]
return "{%s}" % _commajoin(components), readable, recursive
if (issubclass(typ, list) and r is list.__repr__) or \
(issubclass(typ, tuple) and r is tuple.__repr__):
if issubclass(typ, list):
if not object:
return "[]", True, False
format = "[%s]"
elif _len(object) == 1:
format = "(%s,)"
else:
if not object:
return "()", True, False
format = "(%s)"
objid = _id(object)
if maxlevels and level >= maxlevels:
return format % "...", False, objid in context
if objid in context:
return _recursion(object), False, True
context[objid] = 1
readable = True
recursive = False
components = []
append = components.append
level += 1
for o in object:
orepr, oreadable, orecur = _safe_repr(o, context, maxlevels, level)
append(orepr)
if not oreadable:
readable = False
if orecur:
recursive = True
del context[objid]
return format % _commajoin(components), readable, recursive
rep = repr(object)
return rep, (rep and not rep.startswith('<')), False
def _recursion(object):
return ("<Recursion on %s with id=%s>"
% (_type(object).__name__, _id(object)))
def _perfcheck(object=None):
import time
if object is None:
object = [("string", (1, 2), [3, 4], {5: 6, 7: 8})] * 100000
p = PrettyPrinter()
t1 = time.time()
_safe_repr(object, {}, None, 0)
t2 = time.time()
p.pformat(object)
t3 = time.time()
print "_safe_repr:", t2 - t1
print "pformat:", t3 - t2
if __name__ == "__main__":
_perfcheck()
| gpl-2.0 |
johnnymo87/simple-db-migrate | tests/config_test.py | 1 | 11813 | import os
import unittest
from simple_db_migrate.config import Config, FileConfig
class ConfigTest(unittest.TestCase):
def test_it_should_parse_migrations_dir_with_one_relative_dir(self):
dirs = Config._parse_migrations_dir('.')
self.assertEqual(1, len(dirs))
self.assertEqual(os.path.abspath('.'), dirs[0])
def test_it_should_parse_migrations_dir_with_multiple_relative_dirs(self):
dirs = Config._parse_migrations_dir('test:migrations:./a/relative/path:another/path')
self.assertEqual(4, len(dirs))
self.assertEqual(os.path.abspath('test'), dirs[0])
self.assertEqual(os.path.abspath('migrations'), dirs[1])
self.assertEqual(os.path.abspath('./a/relative/path'), dirs[2])
self.assertEqual(os.path.abspath('another/path'), dirs[3])
def test_it_should_parse_migrations_dir_with_one_absolute_dir(self):
dirs = Config._parse_migrations_dir(os.path.abspath('.'))
self.assertEqual(1, len(dirs))
self.assertEqual(os.path.abspath('.'), dirs[0])
def test_it_should_parse_migrations_dir_with_multiple_absolute_dirs(self):
dirs = Config._parse_migrations_dir('%s:%s:%s:%s' % (
os.path.abspath('test'), os.path.abspath('migrations'),
os.path.abspath('./a/relative/path'), os.path.abspath('another/path'))
)
self.assertEqual(4, len(dirs))
self.assertEqual(os.path.abspath('test'), dirs[0])
self.assertEqual(os.path.abspath('migrations'), dirs[1])
self.assertEqual(os.path.abspath('./a/relative/path'), dirs[2])
self.assertEqual(os.path.abspath('another/path'), dirs[3])
def test_it_should_parse_migrations_dir_with_mixed_relative_and_absolute_dirs(self):
dirs = Config._parse_migrations_dir('%s:%s:%s:%s' % ('/tmp/test', '.', './a/relative/path', os.path.abspath('another/path')))
self.assertEqual(4, len(dirs))
self.assertEqual('/tmp/test', dirs[0])
self.assertEqual(os.path.abspath('.'), dirs[1])
self.assertEqual(os.path.abspath('./a/relative/path'), dirs[2])
self.assertEqual(os.path.abspath('another/path'), dirs[3])
def test_it_should_parse_migrations_dir_with_relative_dirs_using_config_dir_parameter_as_base_path(self):
dirs = Config._parse_migrations_dir(
'%s:%s:%s:%s' % ('/tmp/test', '.', './a/relative/path', os.path.abspath('another/path')),
config_dir='/base/path_to_relative_dirs'
)
self.assertEqual(4, len(dirs))
self.assertEqual('/tmp/test', dirs[0])
self.assertEqual('/base/path_to_relative_dirs', dirs[1])
self.assertEqual('/base/path_to_relative_dirs/a/relative/path', dirs[2])
self.assertEqual(os.path.abspath('another/path'), dirs[3])
def test_it_should_return_value_from_a_dict(self):
_dict = {"some_key": "some_value"}
self.assertEqual("some_value", Config._get(_dict, "some_key"))
def test_it_should_return_value_from_a_dict_even_if_a_default_value_given(self):
_dict = {"some_key": "some_value"}
self.assertEqual("some_value", Config._get(_dict, "some_key", "default_value"))
def test_it_should_return_default_value_for_an_none_dict_value(self):
_dict = {"some_key": None}
self.assertEqual("default_value", Config._get(_dict, "some_key", "default_value"))
def test_it_should_return_default_value_for_an_inexistent_dict_value(self):
_dict = {"some_key": "some_value"}
self.assertEqual("default_value", Config._get(_dict, "ANOTHER_KEY", "default_value"))
def test_it_should_raise_exception_for_an_inexistent_dict_value_without_specify_a_default_value(self):
_dict = {"some_key": "some_value"}
try:
Config._get(_dict, "ANOTHER_KEY")
except Exception as e:
self.assertEqual("invalid key ('ANOTHER_KEY')", str(e))
def test_it_should_accept_non_empty_string_and_false_as_default_value(self):
_dict = {"some_key": "some_value"}
self.assertEqual(None, Config._get(_dict,"ANOTHER_KEY", None))
self.assertEqual("", Config._get(_dict,"ANOTHER_KEY", ""))
self.assertEqual(False, Config._get(_dict,"ANOTHER_KEY", False))
def test_it_should_save_config_values(self):
config = Config()
initial = str(config)
config.put("some_key", "some_value")
self.assertNotEqual(initial, str(config))
def test_it_should_not_update_saved_config_values(self):
config = Config()
config.put("some_key", "some_value")
try:
config.put("some_key", "another_value")
except Exception as e:
self.assertEqual("the configuration key 'some_key' already exists and you cannot override any configuration", str(e))
def test_it_should_remove_saved_config_values(self):
config = Config()
config.put("some_key", "some_value")
initial = str(config)
config.remove("some_key")
self.assertNotEqual(initial, str(config))
def test_it_should_raise_exception_when_removing_an_inexistent_config_value(self):
config = Config()
config.put("some_key", "some_value")
try:
config.remove("ANOTHER_KEY")
except Exception as e:
self.assertEqual("invalid configuration key ('another_key')", str(e))
def test_it_should_return_previous_saved_config_values(self):
config = Config()
config.put("some_key", "some_value")
self.assertEqual("some_value", config.get("some_key"))
def test_it_should_accept_initial_values_as_configuration(self):
config = Config({"some_key": "some_value"})
self.assertEqual("some_value", config.get("some_key"))
def test_it_should_return_default_value_for_an_inexistent_config_value(self):
config = Config()
config.put("some_key", "some_value")
self.assertEqual("default_value", config.get("another_key", "default_value"))
def test_it_should_raise_exception_for_an_inexistent_config_value_without_specify_a_default_value(self):
config = Config()
config.put("some_key", "some_value")
try:
config.get("ANOTHER_KEY")
except Exception as e:
self.assertEqual("invalid key ('another_key')", str(e))
def test_it_should_accept_non_empty_string_and_false_as_default_value(self):
config = Config()
config.put("some_key", "some_value")
self.assertEqual(None, config.get("ANOTHER_KEY", None))
self.assertEqual("", config.get("ANOTHER_KEY", ""))
self.assertEqual(False, config.get("ANOTHER_KEY", False))
def test_it_should_update_value_to_a_non_existing_key(self):
config = Config()
config.update("some_key", "some_value")
self.assertEqual("some_value", config.get("some_key"))
def test_it_should_update_value_to_a_existing_key(self):
config = Config()
config.put("some_key", "original_value")
config.update("some_key", "some_value")
self.assertEqual("some_value", config.get("some_key"))
def test_it_should_update_value_to_a_existing_key_keeping_original_value_if_new_value_is_none_false_or_empty_string(self):
config = Config()
config.put("some_key", "original_value")
config.update("some_key", None)
self.assertEqual("original_value", config.get("some_key"))
config.update("some_key", False)
self.assertEqual("original_value", config.get("some_key"))
config.update("some_key", "")
self.assertEqual("original_value", config.get("some_key"))
def test_it_should_transform_keys_to_lower_case(self):
config = Config()
config.put("sOmE_kEy", "original_value")
self.assertEqual("original_value", config.get("SoMe_KeY"))
config.update("sOMe_kEy", "new_value")
self.assertEqual("new_value", config.get("some_KEY"))
config.remove("SOME_KEY")
self.assertRaises(Exception, config.get, "sOMe_KEY")
def test_it_should_transform_keys_to_lower_case_on_init(self):
config = Config({"sOmE_kEy": "original_value"})
self.assertEqual(["some_key"] ,config._config.keys())
class FileConfigTest(unittest.TestCase):
def setUp(self):
config_file = '''
DATABASE_HOST = 'localhost'
DATABASE_USER = 'root'
DATABASE_PASSWORD = ''
DATABASE_NAME = 'migration_example'
ENV1_DATABASE_NAME = 'migration_example_env1'
UTC_TIMESTAMP = True
DATABASE_ANY_CUSTOM_VARIABLE = 'Some Value'
SOME_ENV_DATABASE_ANY_CUSTOM_VARIABLE = 'Other Value'
DATABASE_OTHER_CUSTOM_VARIABLE = 'Value'
'''
f = open('sample.conf', 'w')
f.write("%s\nDATABASE_MIGRATIONS_DIR = 'example'" % config_file)
f.close()
f = open('sample2.conf', 'w')
f.write("%s" % config_file)
f.close()
f = open('sample.py', 'w')
f.write('import os\n')
f.write("%s\nDATABASE_MIGRATIONS_DIR = 'example'" % config_file)
f.close()
def tearDown(self):
os.remove('sample.conf')
os.remove('sample2.conf')
os.remove('sample.py')
def test_it_should_extend_from_config_class(self):
config = FileConfig(os.path.abspath('sample.conf'))
self.assertTrue(isinstance(config, Config))
def test_it_should_read_config_file(self):
config_path = os.path.abspath('sample.conf')
config = FileConfig(config_path)
self.assertEquals(config.get('database_host'), 'localhost')
self.assertEquals(config.get('database_user'), 'root')
self.assertEquals(config.get('database_password'), '')
self.assertEquals(config.get('database_name'), 'migration_example')
self.assertEquals(config.get("database_migrations_dir"), [os.path.abspath('example')])
self.assertEquals(config.get('utc_timestamp'), True)
def test_it_should_use_configuration_by_environment(self):
config_path = os.path.abspath('sample.conf')
config = FileConfig(config_path, "env1")
self.assertEquals('migration_example_env1', config.get('database_name'))
self.assertEquals('root', config.get('database_user'))
def test_it_should_accept_environment_in_any_case(self):
config_path = os.path.abspath('sample.conf')
config = FileConfig(config_path, "EnV1")
self.assertEquals('migration_example_env1', config.get('database_name'))
self.assertEquals('root', config.get('database_user'))
def test_it_should_stop_execution_when_an_invalid_key_is_requested(self):
config_path = os.path.abspath('sample.conf')
config = FileConfig(config_path)
try:
config.get('invalid_config')
self.fail('it should not pass here')
except Exception as e:
self.assertEqual("invalid key ('invalid_config')", str(e))
def test_it_should_get_any_database_custom_variable(self):
config_path = os.path.abspath('sample.conf')
config = FileConfig(config_path)
self.assertEqual('Some Value', config.get('database_any_custom_variable'))
def test_it_should_get_any_database_custom_variable_using_environment(self):
config_path = os.path.abspath('sample.conf')
config = FileConfig(config_path, 'some_env')
self.assertEqual('Other Value', config.get('database_any_custom_variable'))
self.assertEqual('Value', config.get('database_other_custom_variable'))
def test_it_should_accept_a_configuration_file_without_migrations_dir_key(self):
config_path = os.path.abspath('sample2.conf')
config = FileConfig(config_path)
self.assertEqual("no_migrations_dir_key", config.get('migrations_dir', "no_migrations_dir_key"))
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
mdeemer/XlsxWriter | xlsxwriter/test/comparison/test_chart_data_labels18.py | 8 | 1837 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, [email protected]
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'chart_data_labels18.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'column'})
chart.axis_ids = [45740416, 45747584]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({
'values': '=Sheet1!$A$1:$A$5',
'data_labels': {'value': 1, 'category': 1},
})
chart.add_series({
'values': '=Sheet1!$B$1:$B$5',
'data_labels': {'value': 1, 'category': 1, 'separator': ';'},
})
chart.add_series({
'values': '=Sheet1!$C$1:$C$5',
'data_labels': {'value': 1, 'category': 1, 'separator': '.'},
})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| bsd-2-clause |
rhiever/bokeh | bokeh/sphinxext/collapsible_code_block.py | 43 | 3128 | """ Display code blocks in collapsible sections when outputting
to HTML.
Usage
-----
This directive takes a heading to use for the collapsible code block::
.. collapsible-code-block:: python
:heading: Some Code
from __future__ import print_function
print("Hello, Bokeh!")
Options
-------
This directive is identical to the standard ``code-block`` directive
that Sphinx supplies, with the addition of one new option:
heading : string
A heading to put for the collapsible block. Clicking the heading
expands or collapes the block
Examples
--------
The inline example code above produces the following output:
----
.. collapsible-code-block:: python
:heading: Some Code
from __future__ import print_function
print("Hello, Bokeh!")
"""
from __future__ import absolute_import
from docutils import nodes
from docutils.parsers.rst.directives import unchanged
from os.path import basename
import jinja2
from sphinx.directives.code import CodeBlock
PROLOGUE_TEMPLATE = jinja2.Template(u"""
<div class="panel-group" id="accordion" role="tablist" aria-multiselectable="true">
<div class="panel panel-default">
<div class="panel-heading" role="tab" id="heading-{{ id }}">
<h4 class="panel-title">
<a class="collapsed" data-toggle="collapse" data-parent="#accordion" href="#collapse-{{ id }}" aria-expanded="false" aria-controls="collapse-{{ id }}">
{{ heading }}
</a>
</h4>
</div>
<div id="collapse-{{ id }}" class="panel-collapse collapse" role="tabpanel" aria-labelledby="heading-{{ id }}">
<div class="panel-body">
""")
EPILOGUE_TEMPLATE = jinja2.Template(u"""
</div>
</div>
</div>
</div>
""")
class collapsible_code_block(nodes.General, nodes.Element):
pass
class CollapsibleCodeBlock(CodeBlock):
option_spec = CodeBlock.option_spec
option_spec.update(heading=unchanged)
def run(self):
env = self.state.document.settings.env
rst_source = self.state_machine.node.document['source']
rst_filename = basename(rst_source)
target_id = "%s.ccb-%d" % (rst_filename, env.new_serialno('bokeh-plot'))
target_id = target_id.replace(".", "-")
target_node = nodes.target('', '', ids=[target_id])
node = collapsible_code_block()
node['target_id'] = target_id
node['heading'] = self.options.get('heading', "Code")
cb = CodeBlock.run(self)
node.setup_child(cb[0])
node.children.append(cb[0])
return [target_node, node]
def html_visit_collapsible_code_block(self, node):
self.body.append(
PROLOGUE_TEMPLATE.render(
id=node['target_id'],
heading=node['heading']
)
)
def html_depart_collapsible_code_block(self, node):
self.body.append(EPILOGUE_TEMPLATE.render())
def setup(app):
app.add_node(
collapsible_code_block,
html=(
html_visit_collapsible_code_block,
html_depart_collapsible_code_block
)
)
app.add_directive('collapsible-code-block', CollapsibleCodeBlock)
| bsd-3-clause |
admcrae/tensorflow | tensorflow/contrib/learn/python/learn/estimators/rnn_common.py | 27 | 12727 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common operations for RNN Estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import metrics
from tensorflow.contrib import rnn as contrib_rnn
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import prediction_key
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
# NOTE(jtbates): As of February 10, 2017, some of the `RNNKeys` have been
# removed and replaced with values from `prediction_key.PredictionKey`. The key
# `RNNKeys.PREDICTIONS_KEY` has been replaced by
# `prediction_key.PredictionKey.SCORES` for regression and
# `prediction_key.PredictionKey.CLASSES` for classification. The key
# `RNNKeys.PROBABILITIES_KEY` has been replaced by
# `prediction_key.PredictionKey.PROBABILITIES`.
class RNNKeys(object):
FINAL_STATE_KEY = 'final_state'
LABELS_KEY = '__labels__'
SEQUENCE_LENGTH_KEY = 'sequence_length'
STATE_PREFIX = 'rnn_cell_state'
class PredictionType(object):
"""Enum-like values for the type of prediction that the model makes.
"""
SINGLE_VALUE = 1
MULTIPLE_VALUE = 2
_CELL_TYPES = {'basic_rnn': contrib_rnn.BasicRNNCell,
'lstm': contrib_rnn.LSTMCell,
'gru': contrib_rnn.GRUCell,}
def _get_single_cell(cell_type, num_units):
"""Constructs and return a single `RNNCell`.
Args:
cell_type: Either a string identifying the `RNNCell` type or a subclass of
`RNNCell`.
num_units: The number of units in the `RNNCell`.
Returns:
An initialized `RNNCell`.
Raises:
ValueError: `cell_type` is an invalid `RNNCell` name.
TypeError: `cell_type` is not a string or a subclass of `RNNCell`.
"""
cell_type = _CELL_TYPES.get(cell_type, cell_type)
if not cell_type or not issubclass(cell_type, contrib_rnn.RNNCell):
raise ValueError('The supported cell types are {}; got {}'.format(
list(_CELL_TYPES.keys()), cell_type))
return cell_type(num_units=num_units)
def construct_rnn_cell(num_units, cell_type='basic_rnn',
dropout_keep_probabilities=None):
"""Constructs cells, applies dropout and assembles a `MultiRNNCell`.
The cell type chosen by DynamicRNNEstimator.__init__() is the same as
returned by this function when called with the same arguments.
Args:
num_units: A single `int` or a list/tuple of `int`s. The size of the
`RNNCell`s.
cell_type: A string identifying the `RNNCell` type or a subclass of
`RNNCell`.
dropout_keep_probabilities: a list of dropout probabilities or `None`. If a
list is given, it must have length `len(cell_type) + 1`.
Returns:
An initialized `RNNCell`.
"""
if not isinstance(num_units, (list, tuple)):
num_units = (num_units,)
cells = [_get_single_cell(cell_type, n) for n in num_units]
if dropout_keep_probabilities:
cells = apply_dropout(cells, dropout_keep_probabilities)
if len(cells) == 1:
return cells[0]
return contrib_rnn.MultiRNNCell(cells)
def apply_dropout(cells, dropout_keep_probabilities, random_seed=None):
"""Applies dropout to the outputs and inputs of `cell`.
Args:
cells: A list of `RNNCell`s.
dropout_keep_probabilities: a list whose elements are either floats in
`[0.0, 1.0]` or `None`. It must have length one greater than `cells`.
random_seed: Seed for random dropout.
Returns:
A list of `RNNCell`s, the result of applying the supplied dropouts.
Raises:
ValueError: If `len(dropout_keep_probabilities) != len(cells) + 1`.
"""
if len(dropout_keep_probabilities) != len(cells) + 1:
raise ValueError(
'The number of dropout probabilites must be one greater than the '
'number of cells. Got {} cells and {} dropout probabilities.'.format(
len(cells), len(dropout_keep_probabilities)))
wrapped_cells = [
contrib_rnn.DropoutWrapper(cell, prob, 1.0, seed=random_seed)
for cell, prob in zip(cells[:-1], dropout_keep_probabilities[:-2])
]
wrapped_cells.append(
contrib_rnn.DropoutWrapper(cells[-1], dropout_keep_probabilities[-2],
dropout_keep_probabilities[-1]))
return wrapped_cells
def get_eval_metric_ops(problem_type, prediction_type, sequence_length,
prediction_dict, labels):
"""Returns eval metric ops for given `problem_type` and `prediction_type`.
Args:
problem_type: `ProblemType.CLASSIFICATION` or
`ProblemType.LINEAR_REGRESSION`.
prediction_type: `PredictionType.SINGLE_VALUE` or
`PredictionType.MULTIPLE_VALUE`.
sequence_length: A `Tensor` with shape `[batch_size]` and dtype `int32`
containing the length of each sequence in the batch. If `None`, sequences
are assumed to be unpadded.
prediction_dict: A dict of prediction tensors.
labels: The label `Tensor`.
Returns:
A `dict` mapping strings to the result of calling the metric_fn.
"""
eval_metric_ops = {}
if problem_type == constants.ProblemType.CLASSIFICATION:
# Multi value classification
if prediction_type == PredictionType.MULTIPLE_VALUE:
mask_predictions, mask_labels = mask_activations_and_labels(
prediction_dict[prediction_key.PredictionKey.CLASSES], labels,
sequence_length)
eval_metric_ops['accuracy'] = metrics.streaming_accuracy(
predictions=mask_predictions, labels=mask_labels)
# Single value classification
elif prediction_type == PredictionType.SINGLE_VALUE:
eval_metric_ops['accuracy'] = metrics.streaming_accuracy(
predictions=prediction_dict[prediction_key.PredictionKey.CLASSES],
labels=labels)
elif problem_type == constants.ProblemType.LINEAR_REGRESSION:
# Multi value regression
if prediction_type == PredictionType.MULTIPLE_VALUE:
pass
# Single value regression
elif prediction_type == PredictionType.SINGLE_VALUE:
pass
return eval_metric_ops
def select_last_activations(activations, sequence_lengths):
"""Selects the nth set of activations for each n in `sequence_length`.
Reuturns a `Tensor` of shape `[batch_size, k]`. If `sequence_length` is not
`None`, then `output[i, :] = activations[i, sequence_length[i], :]`. If
`sequence_length` is `None`, then `output[i, :] = activations[i, -1, :]`.
Args:
activations: A `Tensor` with shape `[batch_size, padded_length, k]`.
sequence_lengths: A `Tensor` with shape `[batch_size]` or `None`.
Returns:
A `Tensor` of shape `[batch_size, k]`.
"""
with ops.name_scope(
'select_last_activations', values=[activations, sequence_lengths]):
activations_shape = array_ops.shape(activations)
batch_size = activations_shape[0]
padded_length = activations_shape[1]
num_label_columns = activations_shape[2]
if sequence_lengths is None:
sequence_lengths = padded_length
reshaped_activations = array_ops.reshape(activations,
[-1, num_label_columns])
indices = math_ops.range(batch_size) * padded_length + sequence_lengths - 1
last_activations = array_ops.gather(reshaped_activations, indices)
last_activations.set_shape(
[activations.get_shape()[0], activations.get_shape()[2]])
return last_activations
def mask_activations_and_labels(activations, labels, sequence_lengths):
"""Remove entries outside `sequence_lengths` and returned flattened results.
Args:
activations: Output of the RNN, shape `[batch_size, padded_length, k]`.
labels: Label values, shape `[batch_size, padded_length]`.
sequence_lengths: A `Tensor` of shape `[batch_size]` with the unpadded
length of each sequence. If `None`, then each sequence is unpadded.
Returns:
activations_masked: `logit` values with those beyond `sequence_lengths`
removed for each batch. Batches are then concatenated. Shape
`[tf.sum(sequence_lengths), k]` if `sequence_lengths` is not `None` and
shape `[batch_size * padded_length, k]` otherwise.
labels_masked: Label values after removing unneeded entries. Shape
`[tf.sum(sequence_lengths)]` if `sequence_lengths` is not `None` and shape
`[batch_size * padded_length]` otherwise.
"""
with ops.name_scope(
'mask_activations_and_labels',
values=[activations, labels, sequence_lengths]):
labels_shape = array_ops.shape(labels)
batch_size = labels_shape[0]
padded_length = labels_shape[1]
if sequence_lengths is None:
flattened_dimension = padded_length * batch_size
activations_masked = array_ops.reshape(activations,
[flattened_dimension, -1])
labels_masked = array_ops.reshape(labels, [flattened_dimension])
else:
mask = array_ops.sequence_mask(sequence_lengths, padded_length)
activations_masked = array_ops.boolean_mask(activations, mask)
labels_masked = array_ops.boolean_mask(labels, mask)
return activations_masked, labels_masked
def multi_value_predictions(activations, target_column, problem_type,
predict_probabilities):
"""Maps `activations` from the RNN to predictions for multi value models.
If `predict_probabilities` is `False`, this function returns a `dict`
containing single entry with key `prediction_key.PredictionKey.CLASSES` for
`problem_type` `ProblemType.CLASSIFICATION` or
`prediction_key.PredictionKey.SCORE` for `problem_type`
`ProblemType.LINEAR_REGRESSION`.
If `predict_probabilities` is `True`, it will contain a second entry with key
`prediction_key.PredictionKey.PROBABILITIES`. The
value of this entry is a `Tensor` of probabilities with shape
`[batch_size, padded_length, num_classes]`.
Note that variable length inputs will yield some predictions that don't have
meaning. For example, if `sequence_length = [3, 2]`, then prediction `[1, 2]`
has no meaningful interpretation.
Args:
activations: Output from an RNN. Should have dtype `float32` and shape
`[batch_size, padded_length, ?]`.
target_column: An initialized `TargetColumn`, calculate predictions.
problem_type: Either `ProblemType.CLASSIFICATION` or
`ProblemType.LINEAR_REGRESSION`.
predict_probabilities: A Python boolean, indicating whether probabilities
should be returned. Should only be set to `True` for
classification/logistic regression problems.
Returns:
A `dict` mapping strings to `Tensors`.
"""
with ops.name_scope('MultiValuePrediction'):
activations_shape = array_ops.shape(activations)
flattened_activations = array_ops.reshape(activations,
[-1, activations_shape[2]])
prediction_dict = {}
if predict_probabilities:
flat_probabilities = target_column.logits_to_predictions(
flattened_activations, proba=True)
flat_predictions = math_ops.argmax(flat_probabilities, 1)
if target_column.num_label_columns == 1:
probability_shape = array_ops.concat([activations_shape[:2], [2]], 0)
else:
probability_shape = activations_shape
probabilities = array_ops.reshape(
flat_probabilities,
probability_shape,
name=prediction_key.PredictionKey.PROBABILITIES)
prediction_dict[
prediction_key.PredictionKey.PROBABILITIES] = probabilities
else:
flat_predictions = target_column.logits_to_predictions(
flattened_activations, proba=False)
predictions_name = (prediction_key.PredictionKey.CLASSES
if problem_type == constants.ProblemType.CLASSIFICATION
else prediction_key.PredictionKey.SCORES)
predictions = array_ops.reshape(
flat_predictions, [activations_shape[0], activations_shape[1]],
name=predictions_name)
prediction_dict[predictions_name] = predictions
return prediction_dict
| apache-2.0 |
BruceDai/crosswalk-test-suite | webapi/tct-csp-w3c-tests/csp-py/csp_sandbox_empty_inline-manual.py | 30 | 2574 | def main(request, response):
import simplejson as json
f = file('config.json')
source = f.read()
s = json.JSONDecoder().decode(source)
url1 = "http://" + s['host'] + ":" + str(s['ports']['http'][1])
url2 = "http://" + s['host'] + ":" + str(s['ports']['http'][0])
_CSP = "sandbox "
response.headers.set("Content-Security-Policy", _CSP)
response.headers.set("X-Content-Security-Policy", _CSP)
response.headers.set("X-WebKit-CSP", _CSP)
return """<!DOCTYPE html>
<!--
Copyright (c) 2013 Intel Corporation.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of works must retain the original copyright notice, this list
of conditions and the following disclaimer.sandbox
* Redistributions in binary form must reproduce the original copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this work without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Authors:
Hao, Yunfei <[email protected]>
-->
<html>
<head>
<title>CSP Test: csp_sandbox_empty_inline</title>
<link rel="author" title="Intel" href="http://www.intel.com"/>
<link rel="help" href="http://www.w3.org/TR/2012/CR-CSP-20121115/#sandbox-optional"/>
<meta name="flags" content=""/>
<meta name="assert" content="sandbox"/>
<meta charset="utf-8"/>
</head>
<body>
<p>Test passes if there is <strong>no</strong> text "FAIL" below.</p>
<div id="test" style="display:red"></div>
<script>
document.getElementById("test").innerHTML = "FAIL";
</script>
</html> """
| bsd-3-clause |
avedaee/DIRAC | Core/scripts/dirac-admin-bdii-cluster.py | 1 | 1471 | #! /usr/bin/env python
########################################################################
# $HeadURL$
# File : dirac-admin-bdii-cluster
# Author : Adria Casajus
########################################################################
"""
Check info on BDII for Cluster
"""
__RCSID__ = "$Id$"
from DIRAC.Core.Base import Script
Script.registerSwitch( "H:", "host=", "BDII host" )
Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1],
'Usage:',
' %s [option|cfgfile] ... CE' % Script.scriptName,
'Arguments:',
' CE: Name of the CE(ie: ce111.cern.ch)'] ) )
Script.parseCommandLine( ignoreErrors = True )
args = Script.getPositionalArgs()
from DIRAC import exit as DIRACExit
from DIRAC.Interfaces.API.DiracAdmin import DiracAdmin
if not len( args ) == 1:
Script.showHelp()
ce = args[0]
host = None
for unprocSw in Script.getUnprocessedSwitches():
if unprocSw[0] in ( "h", "host" ):
host = unprocSw[1]
diracAdmin = DiracAdmin()
result = diracAdmin.getBDIICluster( ce, host = host )
if not ['OK']:
print result['Message']
DIRACExit( 2 )
ces = result['Value']
for ce in ces:
print "Cluster: %s {" % ce.get( 'GlueClusterName', 'Unknown' )
for item in ce.iteritems():
print "%s: %s" % item
print "}"
| gpl-3.0 |
gurneyalex/OpenUpgrade | addons/base_iban/__openerp__.py | 125 | 1768 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'IBAN Bank Accounts',
'version': '1.0',
'category': 'Hidden/Dependency',
'description': """
This module installs the base for IBAN (International Bank Account Number) bank accounts and checks for it's validity.
======================================================================================================================
The ability to extract the correctly represented local accounts from IBAN accounts
with a single statement.
""",
'author': 'OpenERP SA',
'website': 'http://www.openerp.com',
'depends': ['base'],
'data': ['base_iban_data.xml' , 'base_iban_view.xml'],
'installable': True,
'auto_install': False,
'images': ['images/base_iban1.jpeg'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
valentin-krasontovitsch/ansible | lib/ansible/plugins/action/bigip.py | 37 | 4448 | #
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import sys
import copy
from ansible import constants as C
from ansible.module_utils._text import to_text
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.common.utils import load_provider
from ansible.plugins.action.network import ActionModule as ActionNetworkModule
from ansible.utils.display import Display
try:
from library.module_utils.network.f5.common import f5_provider_spec
except Exception:
from ansible.module_utils.network.f5.common import f5_provider_spec
display = Display()
class ActionModule(ActionNetworkModule):
def run(self, tmp=None, task_vars=None):
del tmp # tmp no longer has any effect
self._config_module = True if self._task.action == 'bigip_imish_config' else False
socket_path = None
transport = 'rest'
if self._play_context.connection == 'network_cli':
provider = self._task.args.get('provider', {})
if any(provider.values()):
display.warning("'provider' is unnecessary when using 'network_cli' and will be ignored")
elif self._play_context.connection == 'local':
provider = load_provider(f5_provider_spec, self._task.args)
transport = provider['transport'] or transport
display.vvvv('connection transport is %s' % transport, self._play_context.remote_addr)
if transport == 'cli':
pc = copy.deepcopy(self._play_context)
pc.connection = 'network_cli'
pc.network_os = 'bigip'
pc.remote_addr = provider.get('server', self._play_context.remote_addr)
pc.port = int(provider['server_port'] or self._play_context.port or 22)
pc.remote_user = provider.get('user', self._play_context.connection_user)
pc.password = provider.get('password', self._play_context.password)
pc.private_key_file = provider['ssh_keyfile'] or self._play_context.private_key_file
command_timeout = int(provider['timeout'] or C.PERSISTENT_COMMAND_TIMEOUT)
display.vvv('using connection plugin %s' % pc.connection, pc.remote_addr)
connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin)
connection.set_options(direct={'persistent_command_timeout': command_timeout})
socket_path = connection.run()
display.vvvv('socket_path: %s' % socket_path, pc.remote_addr)
if not socket_path:
return {
'failed': True,
'msg': 'Unable to open shell. Please see: '
'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'
}
task_vars['ansible_socket'] = socket_path
if (self._play_context.connection == 'local' and transport == 'cli') or self._play_context.connection == 'network_cli':
# make sure we are in the right cli context which should be
# enable mode and not config module
if socket_path is None:
socket_path = self._connection.socket_path
conn = Connection(socket_path)
out = conn.get_prompt()
while '(config' in to_text(out, errors='surrogate_then_replace').strip():
display.vvvv('wrong context, sending exit to device', self._play_context.remote_addr)
conn.send_command('exit')
out = conn.get_prompt()
result = super(ActionModule, self).run(task_vars=task_vars)
return result
| gpl-3.0 |
CarlFK/steve | steve/util.py | 1 | 15762 | #######################################################################
# This file is part of steve.
#
# Copyright (C) 2012-2014 Will Kahn-Greene
# Licensed under the Simplified BSD License. See LICENSE for full
# license.
#######################################################################
import argparse
import ConfigParser
import datetime
import json
import os
import string
import sys
import textwrap
import unicodedata
from functools import wraps
from urlparse import urlparse
import html2text
def is_youtube(url):
parsed = urlparse(url)
return parsed.netloc.startswith(
('www.youtube.com', 'youtube.com', 'youtu.be'))
ALLOWED_LETTERS = string.ascii_letters + string.digits + '-_'
class SteveException(Exception):
"""Base steve exception"""
def __init__(self, *args, **kwargs):
self.__dict__.update(kwargs)
super(SteveException, self).__init__(*args)
class ConfigNotFound(SteveException):
"""Denotes the config file couldn't be found"""
pass
class BetterArgumentParser(argparse.ArgumentParser):
def __init__(self, *args, **kw):
if 'byline' in kw:
self.byline = kw.pop('byline')
else:
self.byline = None
argparse.ArgumentParser.__init__(self, *args, **kw)
def print_byline(self, file=None):
if file is None:
file = sys.stdout
if self.byline:
self._print_message(self.byline + '\n', file)
def print_usage(self, file=None):
self.print_byline(file)
argparse.ArgumentParser.print_usage(self, file)
def with_config(fun):
"""Decorator that passes config as first argument
:raises ConfigNotFound: if the config file can't be found
This calls :py:func:`get_project_config`. If that returns a
configuration object, then this passes that as the first argument
to the decorated function. If :py:func:`get_project_config` doesn't
return a config object, then this raises :py:exc:`ConfigNotFound`.
Example:
>>> @with_config
... def config_printer(cfg):
... print 'Config!: {0!r}'.format(cfg)
...
>>> config_printer() # if it found a config
Config! ...
>>> config_printer() # if it didn't find a config
Traceback
...
steve.util.ConfigNotFound: steve.ini could not be found.
"""
@wraps(fun)
def _with_config(*args, **kw):
cfg = get_project_config()
return fun(cfg, *args, **kw)
return _with_config
def get_project_config():
"""Finds and opens the config file in the current directory
:raises ConfigNotFound: if the config file can't be found
:returns: config file
"""
# TODO: Should we support parent directories, too?
projectpath = os.getcwd()
path = os.path.join(projectpath, 'steve.ini')
if not os.path.exists(path):
raise ConfigNotFound('steve.ini could not be found.')
cp = ConfigParser.ConfigParser()
cp.read(path)
# TODO: This is a little dirty since we're inserting stuff into
# the config file if it's not there, but so it goes.
try:
cp.get('project', 'projectpath')
except ConfigParser.NoOptionError:
cp.set('project', 'projectpath', projectpath)
# If STEVE_CRED_FILE is specified in the environment or there's a
# cred_file in the config file, then open the file and pull the
# API information from there:
#
# * api_url
# * username
# * api_key
#
# This allows people to have a whole bunch of steve project
# directories and store their credentials in a central location.
cred_file = None
try:
cred_file = os.environ['STEVE_CRED_FILE']
except KeyError:
try:
cred_file = cp.get('project', 'cred_file')
except ConfigParser.NoOptionError:
pass
if cred_file:
cred_file = os.path.abspath(cred_file)
if os.path.exists(cred_file):
cfp = ConfigParser.ConfigParser()
cfp.read(cred_file)
cp.set('project', 'api_url', cfp.get('default', 'api_url'))
cp.set('project', 'username', cfp.get('default', 'username'))
cp.set('project', 'api_key', cfp.get('default', 'api_key'))
return cp
def get_from_config(cfg, key, section='project',
error='"{key}" must be defined in steve.ini file.'):
"""Retrieves specified key from config or errors
:arg cfg: the configuration
:arg key: key to retrieve
:arg section: the section to retrieve the key from
:arg error: the error to print to stderr if the key is not
there or if the value is empty. ``{key}`` gets filled in
with the key
"""
try:
value = cfg.get(section, key)
if value:
return value.strip()
except ConfigParser.NoOptionError:
pass
err(error.format(key=key))
return None
def load_tags_file(config):
"""Opens the tags file and loads tags
The tags file is either specified in the ``tagsfile`` config
entry or it's ``PROJECTPATH/tags.txt``.
The file consists of a list of tags---one per line. Blank lines
and lines that start with ``#`` are removed.
This will read the file and return the list of tags.
If the file doesn't exist, this returns an empty list.
:arg config: the project config file
:returns: list of strings
"""
projectpath = config.get('project', 'projectpath')
try:
tagsfile = config.get('project', 'tagsfile')
except ConfigParser.NoOptionError:
tagsfile = 'tags.txt'
tagsfile = os.path.join(projectpath, tagsfile)
if not os.path.exists(tagsfile):
return []
fp = open(tagsfile, 'r')
tags = [tag.strip() for tag in fp.readlines()]
fp.close()
# Nix blank lines and lines that start with #.
return [tag for tag in tags if tag and not tag.startswith('#')]
def convert_to_json(structure):
def convert(obj):
if isinstance(obj, (datetime.datetime, datetime.date)):
return obj.strftime('%Y-%m-%d')
return obj
return json.dumps(structure, indent=2, sort_keys=True, default=convert)
def generate_filename(text):
filename = text.replace(' ', '_')
filename = ''.join([c for c in filename if c in ALLOWED_LETTERS])
return filename
def get_video_requirements():
fn = os.path.join(os.path.dirname(__file__), 'video_reqs.json')
fp = open(fn)
data = json.load(fp)
fp.close()
return data
def _required(data):
if (data['null'] or data['has_default'] or data['empty_strings']):
return False
return True
def verify_video_data(data, category=None):
"""Verify the data in a single json file for a video.
:param data: The parsed contents of a JSON file. This should be a
Python dict.
:param category: The category as specified in the steve.ini file.
If the steve.ini has a category, then every data file either
has to have the same category or no category at all.
This is None if no category is specified in which case every
data file has to have a category.
:returns: list of error strings.
"""
# TODO: rewrite this to return a dict of fieldname -> list of
# errors
errors = []
requirements = get_video_requirements()
# First, verify the data is correct.
for req in requirements:
key = req['name']
if key == 'category':
# Category is a special case since we can specify it
# in the steve.ini file.
if not category and key not in data:
errors.append(
'"category" must be in either steve.ini or data file')
elif (key in data and (
category is not None and data[key] != category)):
errors.append(
'"{0}" field does not match steve.ini category'.format(
key))
elif key not in data:
# Required data must be there.
# TODO: We add title here because this is the client side
# of the API and that's a special case that's differen
# than the data model which is where the video_reqs.json
# are derived. That should get fixed in richard.
if _required(req) or key == 'title':
errors.append('"{0}" field is required'.format(key))
elif req['type'] == 'IntegerField':
if not isinstance(data[key], int):
if (_required(req)
or (not _required(req) and data[key] is not None)):
errors.append('"{0}" field must be an int'.format(key))
elif req['choices'] and data[key] not in req['choices']:
errors.append(
'"{0}" field must be one of {1}'.format(
key, req['choices']))
elif req['type'] == 'TextField':
if not req['empty_strings'] and not data[key]:
errors.append(
'"{0}" field can\'t be an empty string'.format(key))
elif not data[key]:
continue
elif req['type'] == 'TextArrayField':
for mem in data[key]:
if not mem:
errors.append(
'"{0}" field has empty strings in it'.format(key))
break
elif req['type'] == 'BooleanField':
if data[key] not in (True, False):
errors.append('"{0}" field has non-boolean value'.format(key))
required_keys = [req['name'] for req in requirements]
# Second check to make sure there aren't fields that shouldn't
# be there.
for key in data.keys():
# Ignore special cases. These will be there if the data
# was pulled via the richard API or if we did a push.
if key in ['id', 'updated']:
continue
if key not in required_keys:
errors.append('"{0}" field shouldn\'t be there.'.format(key))
return errors
def verify_json_files(json_files, category=None):
"""Verifies the data in a bunch of json files.
Prints the output
:param json_files: list of (filename, parsed json data) tuples to
call :py:func:`verify_video_data` on
:param category: The category as specified in the steve.ini file.
If the steve.ini has a category, then every data file either
has to have the same category or no category at all.
This is None if no category is specified in which case every
data file has to have a category.
:returns: dict mapping filenames to list of error strings
"""
filename_to_errors = {}
for filename, data in json_files:
filename_to_errors[filename] = verify_video_data(data, category)
return filename_to_errors
def wrap(text, indent=''):
return (
textwrap.TextWrapper(initial_indent=indent, subsequent_indent=indent)
.wrap(text))
def wrap_paragraphs(text):
text = ['\n'.join(textwrap.wrap(mem)) for mem in text.split('\n\n')]
return '\n\n'.join(text)
def err(*output, **kw):
"""Writes output to stderr.
:arg wrap: If you set ``wrap=False``, then ``err`` won't textwrap
the output.
"""
output = 'Error: ' + ' '.join([str(o) for o in output])
if kw.get('wrap') is not False:
output = '\n'.join(wrap(output, kw.get('indent', '')))
elif kw.get('indent'):
indent = kw['indent']
output = indent + ('\n' + indent).join(output.splitlines())
sys.stderr.write(output + '\n')
def stringify(blob):
if isinstance(blob, unicode):
return unicodedata.normalize('NFKD', blob).encode('ascii', 'ignore')
return str(blob)
def out(*output, **kw):
"""Writes output to stdout.
:arg wrap: If you set ``wrap=False``, then ``out`` won't textwrap
the output.
"""
output = ' '.join([stringify(o) for o in output])
if kw.get('wrap') is not False:
output = '\n'.join(wrap(output, kw.get('indent', '')))
elif kw.get('indent'):
indent = kw['indent']
output = indent + ('\n' + indent).join(output.splitlines())
sys.stdout.write(output + '\n')
def load_json_files(config):
"""Parses and returns all video files for a project
:arg config: the configuration object
:returns: list of (filename, data) tuples where filename is the
string for the json file and data is a Python dict of
metadata.
"""
projectpath = config.get('project', 'projectpath')
jsonpath = os.path.join(projectpath, 'json')
if not os.path.exists(jsonpath):
return []
files = [f for f in os.listdir(jsonpath) if f.endswith('.json')]
files = [os.path.join('json', f) for f in files]
files.sort()
data = []
for fn in files:
try:
fp = open(fn, 'r')
data.append((fn, json.load(fp)))
fp.close()
except Exception, e:
err('Problem with {0}'.format(fn), wrap=False)
raise e
return data
def save_json_files(config, data, **kw):
"""Saves a bunch of files to json format
:arg config: the configuration object
:arg data: list of (filename, data) tuples where filename is the
string for the json file and data is a Python dict of metadata
.. Note::
This is the `save` side of :py:func:`load_json_files`. The output
of that function is the `data` argument for this one.
"""
if 'indent' not in kw:
kw['indent'] = 2
if 'sort_keys' not in kw:
kw['sort_keys'] = True
projectpath = config.get('project', 'projectpath')
jsonpath = os.path.join(projectpath, 'json')
if not os.path.exists(jsonpath):
os.makedirs(jsonpath)
for fn, contents in data:
fp = open(fn, 'w')
json.dump(contents, fp, **kw)
fp.close()
def save_json_file(config, filename, contents, **kw):
"""Saves a single json file
:arg config: configuration object
:arg filename: filename
:arg contents: python dict to save
:arg kw: any keyword arguments accepted by `json.dump`
"""
if 'indent' not in kw:
kw['indent'] = 2
if 'sort_keys' not in kw:
kw['sort_keys'] = True
fp = open(filename, 'w')
json.dump(contents, fp, **kw)
fp.close()
def scrape_videos(url):
"""Scrapes a url for video data. Returns list of dicts.
:arg url: The url to fetch data from
:returns: list of dicts
>>> scrape_videos('https://www.youtube.com/user/PyConDE/videos')
[...]
"""
# FIXME: generate list of available scrapers.
# FIXME: run url through all available scrapers.
from steve.scrapers import YoutubeScraper
return YoutubeScraper().scrape(url)
def scrapevideo(video_url):
"""Scrapes the url and fixes the data
:arg video_url: Url of video to scrape.
:returns: Python dict of metadata
Example:
>>> scrapevideo('http://www.youtube.com/watch?v=ywToByBkOTc')
{'url': 'http://www.youtube.com/watch?v=ywToByBkOTc', ...}
"""
# FIXME: reimplement
raise NotImplementedError
def html_to_markdown(text):
"""Converts an HTML string to equivalent Markdown
:arg text: the HTML string to convert
:returns: Markdown string
Example:
>>> html_to_markdown('<p>this is <b>html</b>!</p>')
u'this is **html**!'
"""
return html2text.html2text(text).strip()
def get_video_id(url):
"""Returns the db ID from a full richard site URL
Example:
>>> get_video_id("http://pyvideo.org/video/2822/make-api-calls-wicked-fast-with-redis")
'2822'
"""
v_id = url.split('/video/')[1].split('/')[0]
return v_id
| bsd-2-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.