repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
ampax/edx-platform-backup | common/lib/capa/capa/tests/__init__.py | 45 | 2261 | """Tools for helping with testing capa."""
import gettext
import os
import os.path
import fs.osfs
from capa.capa_problem import LoncapaProblem, LoncapaSystem
from capa.inputtypes import Status
from mock import Mock, MagicMock
import xml.sax.saxutils as saxutils
TEST_DIR = os.path.dirname(os.path.realpath(__file__))
def tst_render_template(template, context):
"""
A test version of render to template. Renders to the repr of the context, completely ignoring
the template name. To make the output valid xml, quotes the content, and wraps it in a <div>
"""
return '<div>{0}</div>'.format(saxutils.escape(repr(context)))
def calledback_url(dispatch='score_update'):
return dispatch
xqueue_interface = MagicMock()
xqueue_interface.send_to_queue.return_value = (0, 'Success!')
def test_capa_system():
"""
Construct a mock LoncapaSystem instance.
"""
the_system = Mock(
spec=LoncapaSystem,
ajax_url='/dummy-ajax-url',
anonymous_student_id='student',
cache=None,
can_execute_unsafe_code=lambda: False,
get_python_lib_zip=lambda: None,
DEBUG=True,
filestore=fs.osfs.OSFS(os.path.join(TEST_DIR, "test_files")),
i18n=gettext.NullTranslations(),
node_path=os.environ.get("NODE_PATH", "/usr/local/lib/node_modules"),
render_template=tst_render_template,
seed=0,
STATIC_URL='/dummy-static/',
STATUS_CLASS=Status,
xqueue={'interface': xqueue_interface, 'construct_callback': calledback_url, 'default_queuename': 'testqueue', 'waittime': 10},
)
return the_system
def new_loncapa_problem(xml, capa_system=None, seed=723):
"""Construct a `LoncapaProblem` suitable for unit tests."""
return LoncapaProblem(xml, id='1', seed=seed, capa_system=capa_system or test_capa_system())
def load_fixture(relpath):
"""
Return a `unicode` object representing the contents
of the fixture file at the given path within a test_files directory
in the same directory as the test file.
"""
abspath = os.path.join(os.path.dirname(__file__), 'test_files', relpath)
with open(abspath) as fixture_file:
contents = fixture_file.read()
return contents.decode('utf8')
| agpl-3.0 | 7,281,259,527,744,206,000 | 30.402778 | 135 | 0.678019 | false |
lmazuel/azure-sdk-for-python | azure-mgmt-datafactory/azure/mgmt/datafactory/models/eloqua_source.py | 2 | 2076 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .copy_source import CopySource
class EloquaSource(CopySource):
"""A copy activity Eloqua server source.
:param additional_properties: Unmatched properties from the message are
deserialized this collection
:type additional_properties: dict[str, object]
:param source_retry_count: Source retry count. Type: integer (or
Expression with resultType integer).
:type source_retry_count: object
:param source_retry_wait: Source retry wait. Type: string (or Expression
with resultType string), pattern:
((\\d+)\\.)?(\\d\\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
:type source_retry_wait: object
:param type: Constant filled by server.
:type type: str
:param query: A query to retrieve data from source. Type: string (or
Expression with resultType string).
:type query: object
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'source_retry_count': {'key': 'sourceRetryCount', 'type': 'object'},
'source_retry_wait': {'key': 'sourceRetryWait', 'type': 'object'},
'type': {'key': 'type', 'type': 'str'},
'query': {'key': 'query', 'type': 'object'},
}
def __init__(self, additional_properties=None, source_retry_count=None, source_retry_wait=None, query=None):
super(EloquaSource, self).__init__(additional_properties=additional_properties, source_retry_count=source_retry_count, source_retry_wait=source_retry_wait)
self.query = query
self.type = 'EloquaSource'
| mit | -7,365,701,283,696,140,000 | 40.52 | 163 | 0.612717 | false |
mrgloom/HPOlib | HPOlib/config_parser/parse.py | 5 | 2553 | ##
# wrapping: A program making it easy to use hyperparameter
# optimization software.
# Copyright (C) 2013 Katharina Eggensperger and Matthias Feurer
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import logging
import ConfigParser
logger = logging.getLogger("HPOlib.config_parser.parse")
def parse_config(config_files, allow_no_value=True, optimizer_version="",
cli_values=None):
if type(config_files) is str:
if not os.path.isfile(config_files):
raise Exception('%s is not a valid file\n' % os.path.join(
os.getcwd(), config_files))
else:
for config_file in config_files:
if not os.path.isfile(config_file):
raise Exception('%s is not a valid file\n' % os.path.join(
os.getcwd(), config_file))
config = ConfigParser.SafeConfigParser(allow_no_value=allow_no_value)
config.read(config_files)
if cli_values is not None:
config.readfp(cli_values)
return config
def check_config(config):
# --------------------------------------------------------------------------
# Check critical values
# --------------------------------------------------------------------------
if not config.has_option('HPOLIB', 'number_of_jobs') or \
config.get('HPOLIB', 'number_of_jobs') == '':
raise Exception('number_of_jobs not specified in .cfg')
if not config.has_option('HPOLIB', 'result_on_terminate') or \
config.get('HPOLIB', 'result_on_terminate') == '':
raise Exception('No result_on_terminate specified in .cfg')
if not config.has_option('HPOLIB', 'function') or \
config.get('HPOLIB', 'function') == '':
raise Exception('No function specified in .cfg')
if config.getint('HPOLIB', "number_cv_folds") < 1:
raise Exception("The number of crossvalidation folds must be at least one!")
return True
| gpl-3.0 | -7,317,918,995,666,956,000 | 40.177419 | 84 | 0.62358 | false |
loandy/billy | billy/web/api/tests/test_committees.py | 4 | 1386 | from .base import BaseTestCase
class CommitteesSearchTestCase(BaseTestCase):
url_tmpl = '/api/v1/committees/'
data = dict(state='ex', chamber='lower')
def test_count(self):
self.assertEquals(
len(self.json),
self.db.committees.find(self.data).count())
def test_correct_keys_present(self):
expected_keys = set([
u'level', u'country', u'updated_at', u'parent_id',
u'state', u'subcommittee', u'committee', u'chamber', u'id', 'all_ids'])
self.assertEquals(set(self.json[0]), expected_keys)
def test_status(self):
self.assert_200()
class CommitteeLookupTestCase(BaseTestCase):
url_tmpl = '/api/v1/committees/{committee_id}/'
url_args = dict(committee_id='EXC000001')
def test_state(self):
'''Make sure the returned data has the correct
level field value.
'''
self.assertEquals(self.json['state'], 'ex')
def test_correct_keys_present(self):
expected_keys = set([
u'members', u'level', u'country', u'updated_at',
u'parent_id', u'state', u'subcommittee',
u'committee', u'chamber', u'id', 'all_ids'])
self.assertEquals(set(self.json), expected_keys)
def test_id(self):
self.assertEquals(self.json['id'], 'EXC000001')
def test_status(self):
self.assert_200()
| bsd-3-clause | -1,823,769,695,010,079,700 | 29.130435 | 83 | 0.602453 | false |
lepistone/babel | setup.py | 7 | 2969 | # -*- coding: utf-8 -*-
import os
import sys
import subprocess
from setuptools import setup
sys.path.append(os.path.join('doc', 'common'))
try:
from doctools import build_doc, test_doc
except ImportError:
build_doc = test_doc = None
from distutils.cmd import Command
class import_cldr(Command):
description = 'imports and converts the CLDR data'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
c = subprocess.Popen([sys.executable, 'scripts/download_import_cldr.py'])
c.wait()
setup(
name='Babel',
version='3.0-dev',
description='Internationalization utilities',
long_description=\
"""A collection of tools for internationalizing Python applications.""",
author='Armin Ronacher',
author_email='[email protected]',
license='BSD',
url='http://babel.pocoo.org/',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Topic :: Software Development :: Libraries :: Python Modules',
],
packages=['babel', 'babel.messages', 'babel.localtime'],
package_data={'babel': ['global.dat', 'localedata/*.dat']},
install_requires=[
# This version identifier is currently necessary as
# pytz otherwise does not install on pip 1.4 or
# higher.
'pytz>=0a',
],
cmdclass={'build_doc': build_doc, 'test_doc': test_doc,
'import_cldr': import_cldr},
zip_safe=False,
# Note when adding extractors: builtin extractors we also want to
# work if packages are not installed to simplify testing. If you
# add an extractor here also manually add it to the "extract"
# function in babel.messages.extract.
entry_points="""
[console_scripts]
pybabel = babel.messages.frontend:main
[distutils.commands]
compile_catalog = babel.messages.frontend:compile_catalog
extract_messages = babel.messages.frontend:extract_messages
init_catalog = babel.messages.frontend:init_catalog
update_catalog = babel.messages.frontend:update_catalog
[distutils.setup_keywords]
message_extractors = babel.messages.frontend:check_message_extractors
[babel.checkers]
num_plurals = babel.messages.checkers:num_plurals
python_format = babel.messages.checkers:python_format
[babel.extractors]
ignore = babel.messages.extract:extract_nothing
python = babel.messages.extract:extract_python
javascript = babel.messages.extract:extract_javascript
"""
)
| bsd-3-clause | 5,674,326,644,623,981,000 | 29.608247 | 81 | 0.66386 | false |
mozillazg/django-cron | django_cron/tests.py | 2 | 6953 | import threading
from time import sleep
from datetime import timedelta
from django import db
from django.utils import unittest
from django.core.management import call_command
from django.test.utils import override_settings
from django.test.client import Client
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from freezegun import freeze_time
from django_cron.helpers import humanize_duration
from django_cron.models import CronJobLog
class OutBuffer(object):
content = []
modified = False
_str_cache = ''
def write(self, *args):
self.content.extend(args)
self.modified = True
def str_content(self):
if self.modified:
self._str_cache = ''.join((str(x) for x in self.content))
self.modified = False
return self._str_cache
class TestCase(unittest.TestCase):
success_cron = 'test_crons.TestSucessCronJob'
error_cron = 'test_crons.TestErrorCronJob'
five_mins_cron = 'test_crons.Test5minsCronJob'
run_at_times_cron = 'test_crons.TestRunAtTimesCronJob'
wait_3sec_cron = 'test_crons.Wait3secCronJob'
does_not_exist_cron = 'ThisCronObviouslyDoesntExist'
test_failed_runs_notification_cron = 'django_cron.cron.FailedRunsNotificationCronJob'
def setUp(self):
CronJobLog.objects.all().delete()
def test_success_cron(self):
logs_count = CronJobLog.objects.all().count()
call_command('runcrons', self.success_cron, force=True)
self.assertEqual(CronJobLog.objects.all().count(), logs_count + 1)
def test_failed_cron(self):
logs_count = CronJobLog.objects.all().count()
call_command('runcrons', self.error_cron, force=True)
self.assertEqual(CronJobLog.objects.all().count(), logs_count + 1)
def test_not_exists_cron(self):
logs_count = CronJobLog.objects.all().count()
out_buffer = OutBuffer()
call_command('runcrons', self.does_not_exist_cron, force=True, stdout=out_buffer)
self.assertIn('Make sure these are valid cron class names', out_buffer.str_content())
self.assertIn(self.does_not_exist_cron, out_buffer.str_content())
self.assertEqual(CronJobLog.objects.all().count(), logs_count)
@override_settings(DJANGO_CRON_LOCK_BACKEND='django_cron.backends.lock.file.FileLock')
def test_file_locking_backend(self):
logs_count = CronJobLog.objects.all().count()
call_command('runcrons', self.success_cron, force=True)
self.assertEqual(CronJobLog.objects.all().count(), logs_count + 1)
def test_runs_every_mins(self):
logs_count = CronJobLog.objects.all().count()
with freeze_time("2014-01-01 00:00:00"):
call_command('runcrons', self.five_mins_cron)
self.assertEqual(CronJobLog.objects.all().count(), logs_count + 1)
with freeze_time("2014-01-01 00:04:59"):
call_command('runcrons', self.five_mins_cron)
self.assertEqual(CronJobLog.objects.all().count(), logs_count + 1)
with freeze_time("2014-01-01 00:05:01"):
call_command('runcrons', self.five_mins_cron)
self.assertEqual(CronJobLog.objects.all().count(), logs_count + 2)
def test_runs_at_time(self):
logs_count = CronJobLog.objects.all().count()
with freeze_time("2014-01-01 00:00:01"):
call_command('runcrons', self.run_at_times_cron)
self.assertEqual(CronJobLog.objects.all().count(), logs_count + 1)
with freeze_time("2014-01-01 00:04:50"):
call_command('runcrons', self.run_at_times_cron)
self.assertEqual(CronJobLog.objects.all().count(), logs_count + 1)
with freeze_time("2014-01-01 00:05:01"):
call_command('runcrons', self.run_at_times_cron)
self.assertEqual(CronJobLog.objects.all().count(), logs_count + 2)
def test_admin(self):
password = 'test'
user = User.objects.create_superuser(
'test',
'[email protected]',
password
)
self.client = Client()
self.client.login(username=user.username, password=password)
# edit CronJobLog object
call_command('runcrons', self.success_cron, force=True)
log = CronJobLog.objects.all()[0]
url = reverse('admin:django_cron_cronjoblog_change', args=(log.id,))
response = self.client.get(url)
self.assertIn('Cron job logs', str(response.content))
def run_cronjob_in_thread(self, logs_count):
call_command('runcrons', self.wait_3sec_cron)
self.assertEqual(CronJobLog.objects.all().count(), logs_count + 1)
db.close_old_connections()
def test_cache_locking_backend(self):
"""
with cache locking backend
"""
logs_count = CronJobLog.objects.all().count()
t = threading.Thread(target=self.run_cronjob_in_thread, args=(logs_count,))
t.daemon = True
t.start()
# this shouldn't get running
sleep(0.1) # to avoid race condition
call_command('runcrons', self.wait_3sec_cron)
t.join(10)
self.assertEqual(CronJobLog.objects.all().count(), logs_count + 1)
# TODO: this test doesn't pass - seems that second cronjob is locking file
# however it should throw an exception that file is locked by other cronjob
# @override_settings(
# DJANGO_CRON_LOCK_BACKEND='django_cron.backends.lock.file.FileLock',
# DJANGO_CRON_LOCKFILE_PATH=os.path.join(os.getcwd())
# )
# def test_file_locking_backend_in_thread(self):
# """
# with file locking backend
# """
# logs_count = CronJobLog.objects.all().count()
# t = threading.Thread(target=self.run_cronjob_in_thread, args=(logs_count,))
# t.daemon = True
# t.start()
# # this shouldn't get running
# sleep(1) # to avoid race condition
# call_command('runcrons', self.wait_3sec_cron)
# t.join(10)
# self.assertEqual(CronJobLog.objects.all().count(), logs_count + 1)
def test_failed_runs_notification(self):
CronJobLog.objects.all().delete()
logs_count = CronJobLog.objects.all().count()
for i in range(10):
call_command('runcrons', self.error_cron, force=True)
call_command('runcrons', self.test_failed_runs_notification_cron)
self.assertEqual(CronJobLog.objects.all().count(), logs_count + 11)
def test_humanize_duration(self):
test_subjects = (
(timedelta(days=1, hours=1, minutes=1, seconds=1), '1 day, 1 hour, 1 minute, 1 second'),
(timedelta(days=2), '2 days'),
(timedelta(days=15, minutes=4), '15 days, 4 minutes'),
(timedelta(), '< 1 second'),
)
for duration, humanized in test_subjects:
self.assertEqual(
humanize_duration(duration),
humanized
)
| mit | -6,735,179,121,292,212,000 | 37.414365 | 100 | 0.638286 | false |
7kbird/chrome | third_party/cython/src/Cython/Debugger/Cygdb.py | 90 | 4855 | #!/usr/bin/env python
"""
The Cython debugger
The current directory should contain a directory named 'cython_debug', or a
path to the cython project directory should be given (the parent directory of
cython_debug).
Additional gdb args can be provided only if a path to the project directory is
given.
"""
import os
import sys
import glob
import tempfile
import textwrap
import subprocess
import optparse
import logging
logger = logging.getLogger(__name__)
def make_command_file(path_to_debug_info, prefix_code='', no_import=False):
if not no_import:
pattern = os.path.join(path_to_debug_info,
'cython_debug',
'cython_debug_info_*')
debug_files = glob.glob(pattern)
if not debug_files:
sys.exit('%s.\nNo debug files were found in %s. Aborting.' % (
usage, os.path.abspath(path_to_debug_info)))
fd, tempfilename = tempfile.mkstemp()
f = os.fdopen(fd, 'w')
try:
f.write(prefix_code)
f.write('set breakpoint pending on\n')
f.write("set print pretty on\n")
f.write('python from Cython.Debugger import libcython, libpython\n')
if no_import:
# don't do this, this overrides file command in .gdbinit
# f.write("file %s\n" % sys.executable)
pass
else:
path = os.path.join(path_to_debug_info, "cython_debug", "interpreter")
interpreter_file = open(path)
try:
interpreter = interpreter_file.read()
finally:
interpreter_file.close()
f.write("file %s\n" % interpreter)
f.write('\n'.join('cy import %s\n' % fn for fn in debug_files))
f.write(textwrap.dedent('''\
python
import sys
try:
gdb.lookup_type('PyModuleObject')
except RuntimeError:
sys.stderr.write(
'Python was not compiled with debug symbols (or it was '
'stripped). Some functionality may not work (properly).\\n')
end
source .cygdbinit
'''))
finally:
f.close()
return tempfilename
usage = "Usage: cygdb [options] [PATH [-- GDB_ARGUMENTS]]"
def main(path_to_debug_info=None, gdb_argv=None, no_import=False):
"""
Start the Cython debugger. This tells gdb to import the Cython and Python
extensions (libcython.py and libpython.py) and it enables gdb's pending
breakpoints.
path_to_debug_info is the path to the Cython build directory
gdb_argv is the list of options to gdb
no_import tells cygdb whether it should import debug information
"""
parser = optparse.OptionParser(usage=usage)
parser.add_option("--gdb-executable",
dest="gdb", default='gdb',
help="gdb executable to use [default: gdb]")
parser.add_option("--verbose", "-v",
dest="verbosity", action="count", default=0,
help="Verbose mode. Multiple -v options increase the verbosity")
(options, args) = parser.parse_args()
if path_to_debug_info is None:
if len(args) > 1:
path_to_debug_info = args[0]
else:
path_to_debug_info = os.curdir
if gdb_argv is None:
gdb_argv = args[1:]
if path_to_debug_info == '--':
no_import = True
logging_level = logging.WARN
if options.verbosity == 1:
logging_level = logging.INFO
if options.verbosity == 2:
logging_level = logging.DEBUG
logging.basicConfig(level=logging_level)
logger.info("verbosity = %r", options.verbosity)
logger.debug("options = %r; args = %r", options, args)
logger.debug("Done parsing command-line options. path_to_debug_info = %r, gdb_argv = %r",
path_to_debug_info, gdb_argv)
tempfilename = make_command_file(path_to_debug_info, no_import=no_import)
logger.info("Launching %s with command file: %s and gdb_argv: %s",
options.gdb, tempfilename, gdb_argv)
logger.debug('Command file (%s) contains: """\n%s"""', tempfilename, open(tempfilename).read())
logger.info("Spawning %s...", options.gdb)
p = subprocess.Popen([options.gdb, '-command', tempfilename] + gdb_argv)
logger.info("Spawned %s (pid %d)", options.gdb, p.pid)
while True:
try:
logger.debug("Waiting for gdb (pid %d) to exit...", p.pid)
ret = p.wait()
logger.debug("Wait for gdb (pid %d) to exit is done. Returned: %r", p.pid, ret)
except KeyboardInterrupt:
pass
else:
break
logger.debug("Removing temp command file: %s", tempfilename)
os.remove(tempfilename)
logger.debug("Removed temp command file: %s", tempfilename)
| bsd-3-clause | 1,109,199,760,930,856,400 | 34.181159 | 99 | 0.595881 | false |
g-vidal/mraa | tests/mock/i2c_checks_write_byte.py | 21 | 2079 | #!/usr/bin/env python
# Author: Alex Tereschenko <[email protected]>
# Copyright (c) 2016 Alex Tereschenko.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import mraa as m
import unittest as u
from i2c_checks_shared import *
class I2cChecksWriteByte(u.TestCase):
def setUp(self):
self.i2c = m.I2c(MRAA_I2C_BUS_NUM)
def tearDown(self):
del self.i2c
def test_i2c_write_byte(self):
self.i2c.address(MRAA_MOCK_I2C_ADDR)
test_byte = 0xEE
self.assertEqual(self.i2c.writeByte(test_byte),
m.SUCCESS,
"I2C writeByte() did not return success")
self.assertEqual(self.i2c.readByte(),
test_byte,
"I2C readByte() after writeByte() returned unexpected data")
def test_i2c_write_byte_invalid_addr(self):
self.i2c.address(MRAA_MOCK_I2C_ADDR - 1)
self.assertEqual(self.i2c.writeByte(0xEE),
m.ERROR_UNSPECIFIED,
"I2C writeByte() to invalid address did not return error")
if __name__ == "__main__":
u.main()
| mit | 4,999,197,521,143,444,000 | 37.5 | 81 | 0.70178 | false |
redreamality/learning-to-rank | lerot/evaluation/LetorNdcgEval.py | 1 | 1546 | # This file is part of Lerot.
#
# Lerot is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Lerot is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Lerot. If not, see <http://www.gnu.org/licenses/>.
# KH, 2012/06/20
from numpy import log2
from .AbstractEval import AbstractEval
class LetorNdcgEval(AbstractEval):
"""Compute NDCG as implemented in the Letor toolkit."""
def get_dcg(self, labels, cutoff=-1):
if (cutoff == -1):
cutoff = len(labels)
dcg = 0
# [0:cutoff] returns the labels up to min(len(labels), cutoff)
for r, label in enumerate(labels[0:cutoff]):
# use log2(1 + r), to be consistent with the implementation in the
# letor 4 evaluation tools (and wikipedia, on 6/27/2012), even
# though this makes discounting slightly inconsistent (indices are
# zero-based, so using log2(2 + r) would be more consistent)
if r == 0:
dcg += 2 ** label - 1
else:
dcg += (2 ** label - 1) / log2(1 + r)
return dcg
| gpl-3.0 | 5,792,174,630,002,202,000 | 37.65 | 78 | 0.653299 | false |
Changaco/oh-mainline | vendor/packages/whoosh/src/whoosh/lang/porter2.py | 117 | 8314 | """An implementation of the Porter2 stemming algorithm.
See http://snowball.tartarus.org/algorithms/english/stemmer.html
Adapted from pyporter2 by Michael Dirolf.
This algorithm is more correct but (at least in this implementation)
several times slower than the original porter algorithm as implemented
in stemming.porter.
"""
import re
r_exp = re.compile(r"[^aeiouy]*[aeiouy]+[^aeiouy](\w*)")
ewss_exp1 = re.compile(r"^[aeiouy][^aeiouy]$")
ewss_exp2 = re.compile(r".*[^aeiouy][aeiouy][^aeiouywxY]$")
ccy_exp = re.compile(r"([aeiouy])y")
s1a_exp = re.compile(r"[aeiouy].")
s1b_exp = re.compile(r"[aeiouy]")
def get_r1(word):
# exceptional forms
if word.startswith('gener') or word.startswith('arsen'):
return 5
if word.startswith('commun'):
return 6
# normal form
match = r_exp.match(word)
if match:
return match.start(1)
return len(word)
def get_r2(word):
match = r_exp.match(word, get_r1(word))
if match:
return match.start(1)
return len(word)
def ends_with_short_syllable(word):
if len(word) == 2:
if ewss_exp1.match(word):
return True
if ewss_exp2.match(word):
return True
return False
def is_short_word(word):
if ends_with_short_syllable(word):
if get_r1(word) == len(word):
return True
return False
def remove_initial_apostrophe(word):
if word.startswith("'"):
return word[1:]
return word
def capitalize_consonant_ys(word):
if word.startswith('y'):
word = 'Y' + word[1:]
return ccy_exp.sub('\g<1>Y', word)
def step_0(word):
if word.endswith("'s'"):
return word[:-3]
if word.endswith("'s"):
return word[:-2]
if word.endswith("'"):
return word[:-1]
return word
def step_1a(word):
if word.endswith('sses'):
return word[:-4] + 'ss'
if word.endswith('ied') or word.endswith('ies'):
if len(word) > 4:
return word[:-3] + 'i'
else:
return word[:-3] + 'ie'
if word.endswith('us') or word.endswith('ss'):
return word
if word.endswith('s'):
preceding = word[:-1]
if s1a_exp.search(preceding):
return preceding
return word
return word
doubles = ('bb', 'dd', 'ff', 'gg', 'mm', 'nn', 'pp', 'rr', 'tt')
def ends_with_double(word):
for double in doubles:
if word.endswith(double):
return True
return False
def step_1b_helper(word):
if word.endswith('at') or word.endswith('bl') or word.endswith('iz'):
return word + 'e'
if ends_with_double(word):
return word[:-1]
if is_short_word(word):
return word + 'e'
return word
s1b_suffixes = ('ed', 'edly', 'ing', 'ingly')
def step_1b(word, r1):
if word.endswith('eedly'):
if len(word) - 5 >= r1:
return word[:-3]
return word
if word.endswith('eed'):
if len(word) - 3 >= r1:
return word[:-1]
return word
for suffix in s1b_suffixes:
if word.endswith(suffix):
preceding = word[:-len(suffix)]
if s1b_exp.search(preceding):
return step_1b_helper(preceding)
return word
return word
def step_1c(word):
if word.endswith('y') or word.endswith('Y') and len(word) > 1:
if word[-2] not in 'aeiouy':
if len(word) > 2:
return word[:-1] + 'i'
return word
def step_2_helper(word, r1, end, repl, prev):
if word.endswith(end):
if len(word) - len(end) >= r1:
if prev == []:
return word[:-len(end)] + repl
for p in prev:
if word[:-len(end)].endswith(p):
return word[:-len(end)] + repl
return word
return None
s2_triples = (('ization', 'ize', []),
('ational', 'ate', []),
('fulness', 'ful', []),
('ousness', 'ous', []),
('iveness', 'ive', []),
('tional', 'tion', []),
('biliti', 'ble', []),
('lessli', 'less', []),
('entli', 'ent', []),
('ation', 'ate', []),
('alism', 'al', []),
('aliti', 'al', []),
('ousli', 'ous', []),
('iviti', 'ive', []),
('fulli', 'ful', []),
('enci', 'ence', []),
('anci', 'ance', []),
('abli', 'able', []),
('izer', 'ize', []),
('ator', 'ate', []),
('alli', 'al', []),
('bli', 'ble', []),
('ogi', 'og', ['l']),
('li', '', ['c', 'd', 'e', 'g', 'h', 'k', 'm', 'n', 'r', 't']))
def step_2(word, r1):
for trip in s2_triples:
attempt = step_2_helper(word, r1, trip[0], trip[1], trip[2])
if attempt:
return attempt
return word
def step_3_helper(word, r1, r2, end, repl, r2_necessary):
if word.endswith(end):
if len(word) - len(end) >= r1:
if not r2_necessary:
return word[:-len(end)] + repl
else:
if len(word) - len(end) >= r2:
return word[:-len(end)] + repl
return word
return None
s3_triples = (('ational', 'ate', False),
('tional', 'tion', False),
('alize', 'al', False),
('icate', 'ic', False),
('iciti', 'ic', False),
('ative', '', True),
('ical', 'ic', False),
('ness', '', False),
('ful', '', False))
def step_3(word, r1, r2):
for trip in s3_triples:
attempt = step_3_helper(word, r1, r2, trip[0], trip[1], trip[2])
if attempt:
return attempt
return word
s4_delete_list = ('al', 'ance', 'ence', 'er', 'ic', 'able', 'ible', 'ant', 'ement',
'ment', 'ent', 'ism', 'ate', 'iti', 'ous', 'ive', 'ize')
def step_4(word, r2):
for end in s4_delete_list:
if word.endswith(end):
if len(word) - len(end) >= r2:
return word[:-len(end)]
return word
if word.endswith('sion') or word.endswith('tion'):
if len(word) - 3 >= r2:
return word[:-3]
return word
def step_5(word, r1, r2):
if word.endswith('l'):
if len(word) - 1 >= r2 and word[-2] == 'l':
return word[:-1]
return word
if word.endswith('e'):
if len(word) - 1 >= r2:
return word[:-1]
if len(word) - 1 >= r1 and not ends_with_short_syllable(word[:-1]):
return word[:-1]
return word
def normalize_ys(word):
return word.replace('Y', 'y')
exceptional_forms = {'skis': 'ski',
'skies': 'sky',
'dying': 'die',
'lying': 'lie',
'tying': 'tie',
'idly': 'idl',
'gently': 'gentl',
'ugly': 'ugli',
'early': 'earli',
'only': 'onli',
'singly': 'singl',
'sky': 'sky',
'news': 'news',
'howe': 'howe',
'atlas': 'atlas',
'cosmos': 'cosmos',
'bias': 'bias',
'andes': 'andes'}
exceptional_early_exit_post_1a = frozenset(['inning', 'outing', 'canning', 'herring',
'earring', 'proceed', 'exceed', 'succeed'])
def stem(word):
if len(word) <= 2:
return word
word = remove_initial_apostrophe(word)
# handle some exceptional forms
if word in exceptional_forms:
return exceptional_forms[word]
word = capitalize_consonant_ys(word)
r1 = get_r1(word)
r2 = get_r2(word)
word = step_0(word)
word = step_1a(word)
# handle some more exceptional forms
if word in exceptional_early_exit_post_1a:
return word
word = step_1b(word, r1)
word = step_1c(word)
word = step_2(word, r1)
word = step_3(word, r1, r2)
word = step_4(word, r2)
word = step_5(word, r1, r2)
word = normalize_ys(word)
return word
| agpl-3.0 | -350,356,380,627,512,960 | 25.5623 | 87 | 0.479192 | false |
lmacken/moksha | moksha/tests/functional/test_csrf.py | 2 | 4881 | # -*- coding: utf-8 -*-
# This file is part of Moksha.
# Copyright (C) 2008-2010 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Integration tests for Moksha's CSRF protection.
These tests are meant to ensure the validity of Moksha's CSRF WSGI middleware
and repoze.who metadata provider plugin.
"""
from moksha.tests import TestController
class TestCSRFProtection(TestController):
application_under_test = 'main'
def test_csrf_protection(self):
# Test for anonymous requests
resp = self.app.get('/', status=200)
assert 'moksha_base_url = "/"' in resp
assert 'moksha_csrf_token = ""' in resp
assert 'moksha_userid = ""' in resp
# Requesting a protected area
resp = self.app.get('/moksha_admin/', status=302)
assert resp.location.startswith('http://localhost/login') or \
resp.location.startswith('/login'), resp.location
# Getting the login form:
resp = resp.follow(status=200)
form = resp.form
# Submitting the login form:
form['login'] = u'manager'
form['password'] = 'managepass'
post_login = form.submit(status=302)
# Being redirected to the initially requested page:
assert post_login.location.startswith('http://localhost/post_login') or\
post_login.location.startswith('/post_login')
initial_page = post_login.follow(status=302)
assert 'authtkt' in initial_page.request.cookies, \
"Session cookie wasn't defined: %s" % initial_page.request.cookies
assert initial_page.location.startswith('http://localhost/moksha_admin/'), \
initial_page.location
assert '_csrf_token=' in initial_page.location, "Login not redirected with CSRF token"
token = initial_page.location.split('_csrf_token=')[1]
# Now ensure that the token also also being injected in the page
resp = initial_page.follow(status=200)
assert 'moksha_csrf_token' in resp
assert token == resp.body.split('moksha_csrf_token')[1].split(';')[0].split('"')[1], \
"CSRF token not set in response body!"
# Make sure we can get to the page with the token
resp = self.app.post('/moksha_admin/', {'_csrf_token': token}, status=200)
assert 'moksha_csrf_token' in resp, resp
assert 'moksha_csrf_token = ""' not in resp, "CSRF token not set!"
assert token == resp.body.split('moksha_csrf_token')[1].split(';')[0].split('"')[1], \
"CSRF token not set in response body!"
# Make sure we can't get back to the page without the token
resp = self.app.get('/moksha_admin/', status=302)
assert 'The resource was found at /post_logout' in resp or \
'The resource was found at /login' in resp or \
'The resource was found at http://localhost/login' in resp
# Make sure that we can't get back after we got rejected once
resp = self.app.post('/moksha_admin/', {'_csrf_token': token}, status=302)
assert 'The resource was found at /login' in resp or \
'The resource was found at http://localhost/login' in resp
# Ensure the token gets removed
resp = self.app.get('/', status=200)
assert 'moksha_base_url = "/"' in resp
assert 'moksha_csrf_token = ""' in resp
assert 'moksha_userid = ""' in resp
# Ok, now log back in...
resp = self.app.get('/moksha_admin/', status=302)
resp = resp.follow(status=200)
form = resp.form
form['login'] = u'manager'
form['password'] = 'managepass'
post_login = form.submit(status=302)
initial_page = post_login.follow(status=302)
assert '_csrf_token=' in initial_page.location, "Login not redirected with CSRF token"
newtoken = initial_page.location.split('_csrf_token=')[1]
# For some reason logging out sometimes doesn't give us a new session cookie
#assert newtoken != token, "Did not receieve a new token!!"
# Now, make sure we reject invalid tokens
resp = self.app.post('/moksha_admin/', {'_csrf_token': token + ' '}, status=302)
assert 'The resource was found at /post_logout' in resp or \
'The resource was found at http://localhost/post_logout' in resp
| apache-2.0 | 384,985,529,124,389,100 | 42.972973 | 94 | 0.63573 | false |
lahosken/pants | src/python/pants/backend/jvm/subsystems/jar_tool.py | 8 | 1493 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.backend.jvm.subsystems.jvm_tool_mixin import JvmToolMixin
from pants.base.workunit import WorkUnitLabel
from pants.java.jar.jar_dependency import JarDependency
from pants.subsystem.subsystem import Subsystem
class JarTool(JvmToolMixin, Subsystem):
options_scope = 'jar-tool'
@classmethod
def register_options(cls, register):
super(JarTool, cls).register_options(register)
cls.register_jvm_tool(register,
'jar-tool',
classpath=[
JarDependency(org='org.pantsbuild', name='jar-tool', rev='0.0.10'),
])
def run(self, context, runjava, args):
return runjava(self.tool_classpath_from_products(context.products, 'jar-tool',
scope=self.options_scope),
'org.pantsbuild.tools.jar.Main',
jvm_options=self.get_options().jvm_options,
args=args,
workunit_name='jar-tool',
workunit_labels=[WorkUnitLabel.TOOL, WorkUnitLabel.JVM,
WorkUnitLabel.NAILGUN, WorkUnitLabel.SUPPRESS_LABEL])
| apache-2.0 | -6,835,539,581,936,233,000 | 42.911765 | 95 | 0.61152 | false |
ioram7/keystone-federado-pgid2013 | build/lib.linux-x86_64-2.7/keystone/common/sql/migrate_repo/versions/019_fixup_role.py | 4 | 1070 | import json
import uuid
import sqlalchemy as sql
from sqlalchemy import orm
from keystone import config
from keystone import exception
CONF = config.CONF
def upgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
role_table = sql.Table('role', meta, autoload=True)
# name should be 255 characters to match fresh database
role_table.c.name.alter(type=sql.String(length=255))
# blank 'extra' field should be "{}"
none = None
update = role_table.update().where(role_table.c.extra == none).values(
{role_table.c.extra: "{}"})
migrate_engine.execute(update)
def downgrade(migrate_engine):
# this fixes bugs in migration 001 and 007 that result in discrepancies
# between fresh databases and databases updated from 004 (folsom).
# the changes fixing 007 will be rolled back in 007's rollback if
# the user desires to return to a state before the existence of the extra
# column.
# the name length change reflects the current default and should not be
# rolled back.
pass
| apache-2.0 | -4,534,094,979,491,441,000 | 27.918919 | 77 | 0.707477 | false |
dabiboo/youtube-dl | test/test_cache.py | 177 | 1575 | #!/usr/bin/env python
# coding: utf-8
from __future__ import unicode_literals
import shutil
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from test.helper import FakeYDL
from youtube_dl.cache import Cache
def _is_empty(d):
return not bool(os.listdir(d))
def _mkdir(d):
if not os.path.exists(d):
os.mkdir(d)
class TestCache(unittest.TestCase):
def setUp(self):
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
TESTDATA_DIR = os.path.join(TEST_DIR, 'testdata')
_mkdir(TESTDATA_DIR)
self.test_dir = os.path.join(TESTDATA_DIR, 'cache_test')
self.tearDown()
def tearDown(self):
if os.path.exists(self.test_dir):
shutil.rmtree(self.test_dir)
def test_cache(self):
ydl = FakeYDL({
'cachedir': self.test_dir,
})
c = Cache(ydl)
obj = {'x': 1, 'y': ['ä', '\\a', True]}
self.assertEqual(c.load('test_cache', 'k.'), None)
c.store('test_cache', 'k.', obj)
self.assertEqual(c.load('test_cache', 'k2'), None)
self.assertFalse(_is_empty(self.test_dir))
self.assertEqual(c.load('test_cache', 'k.'), obj)
self.assertEqual(c.load('test_cache', 'y'), None)
self.assertEqual(c.load('test_cache2', 'k.'), None)
c.remove()
self.assertFalse(os.path.exists(self.test_dir))
self.assertEqual(c.load('test_cache', 'k.'), None)
if __name__ == '__main__':
unittest.main()
| unlicense | -3,083,836,465,976,276,500 | 25.677966 | 79 | 0.595934 | false |
alfonsodev/ansible-modules-extras | monitoring/zabbix_screen.py | 59 | 16469 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013-2014, Epic Games, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: zabbix_screen
short_description: Zabbix screen creates/updates/deletes
description:
- This module allows you to create, modify and delete Zabbix screens and associated graph data.
version_added: "2.0"
author:
- "(@cove)"
- "Tony Minfei Ding"
- "Harrison Gu (@harrisongu)"
requirements:
- "python >= 2.6"
- zabbix-api
options:
server_url:
description:
- Url of Zabbix server, with protocol (http or https).
required: true
aliases: [ "url" ]
login_user:
description:
- Zabbix user name.
required: true
login_password:
description:
- Zabbix user password.
required: true
timeout:
description:
- The timeout of API request (seconds).
default: 10
zabbix_screens:
description:
- List of screens to be created/updated/deleted(see example).
- If the screen(s) already been added, the screen(s) name won't be updated.
- When creating or updating screen(s), C(screen_name), C(host_group) are required.
- When deleting screen(s), the C(screen_name) is required.
- 'The available states are: C(present) (default) and C(absent). If the screen(s) already exists, and the state is not C(absent), the screen(s) will just be updated as needed.'
required: true
notes:
- Too many concurrent updates to the same screen may cause Zabbix to return errors, see examples for a workaround if needed.
'''
EXAMPLES = '''
# Create/update a screen.
- name: Create a new screen or update an existing screen's items
local_action:
module: zabbix_screen
server_url: http://monitor.example.com
login_user: username
login_password: password
screens:
- screen_name: ExampleScreen1
host_group: Example group1
state: present
graph_names:
- Example graph1
- Example graph2
graph_width: 200
graph_height: 100
# Create/update multi-screen
- name: Create two of new screens or update the existing screens' items
local_action:
module: zabbix_screen
server_url: http://monitor.example.com
login_user: username
login_password: password
screens:
- screen_name: ExampleScreen1
host_group: Example group1
state: present
graph_names:
- Example graph1
- Example graph2
graph_width: 200
graph_height: 100
- screen_name: ExampleScreen2
host_group: Example group2
state: present
graph_names:
- Example graph1
- Example graph2
graph_width: 200
graph_height: 100
# Limit the Zabbix screen creations to one host since Zabbix can return an error when doing concurent updates
- name: Create a new screen or update an existing screen's items
local_action:
module: zabbix_screen
server_url: http://monitor.example.com
login_user: username
login_password: password
state: present
screens:
- screen_name: ExampleScreen
host_group: Example group
state: present
graph_names:
- Example graph1
- Example graph2
graph_width: 200
graph_height: 100
when: inventory_hostname==groups['group_name'][0]
'''
try:
from zabbix_api import ZabbixAPI, ZabbixAPISubClass
from zabbix_api import ZabbixAPIException
from zabbix_api import Already_Exists
HAS_ZABBIX_API = True
except ImportError:
HAS_ZABBIX_API = False
# Extend the ZabbixAPI
# Since the zabbix-api python module too old (version 1.0, and there's no higher version so far), it doesn't support the 'screenitem' api call,
# we have to inherit the ZabbixAPI class to add 'screenitem' support.
class ZabbixAPIExtends(ZabbixAPI):
screenitem = None
def __init__(self, server, timeout, **kwargs):
ZabbixAPI.__init__(self, server, timeout=timeout)
self.screenitem = ZabbixAPISubClass(self, dict({"prefix": "screenitem"}, **kwargs))
class Screen(object):
def __init__(self, module, zbx):
self._module = module
self._zapi = zbx
# get group id by group name
def get_host_group_id(self, group_name):
if group_name == "":
self._module.fail_json(msg="group_name is required")
hostGroup_list = self._zapi.hostgroup.get({'output': 'extend', 'filter': {'name': group_name}})
if len(hostGroup_list) < 1:
self._module.fail_json(msg="Host group not found: %s" % group_name)
else:
hostGroup_id = hostGroup_list[0]['groupid']
return hostGroup_id
# get monitored host_id by host_group_id
def get_host_ids_by_group_id(self, group_id):
host_list = self._zapi.host.get({'output': 'extend', 'groupids': group_id, 'monitored_hosts': 1})
if len(host_list) < 1:
self._module.fail_json(msg="No host in the group.")
else:
host_ids = []
for i in host_list:
host_id = i['hostid']
host_ids.append(host_id)
return host_ids
# get screen
def get_screen_id(self, screen_name):
if screen_name == "":
self._module.fail_json(msg="screen_name is required")
try:
screen_id_list = self._zapi.screen.get({'output': 'extend', 'search': {"name": screen_name}})
if len(screen_id_list) >= 1:
screen_id = screen_id_list[0]['screenid']
return screen_id
return None
except Exception as e:
self._module.fail_json(msg="Failed to get screen %s from Zabbix: %s" % (screen_name, e))
# create screen
def create_screen(self, screen_name, h_size, v_size):
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
screen = self._zapi.screen.create({'name': screen_name, 'hsize': h_size, 'vsize': v_size})
return screen['screenids'][0]
except Exception as e:
self._module.fail_json(msg="Failed to create screen %s: %s" % (screen_name, e))
# update screen
def update_screen(self, screen_id, screen_name, h_size, v_size):
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.screen.update({'screenid': screen_id, 'hsize': h_size, 'vsize': v_size})
except Exception as e:
self._module.fail_json(msg="Failed to update screen %s: %s" % (screen_name, e))
# delete screen
def delete_screen(self, screen_id, screen_name):
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.screen.delete([screen_id])
except Exception as e:
self._module.fail_json(msg="Failed to delete screen %s: %s" % (screen_name, e))
# get graph ids
def get_graph_ids(self, hosts, graph_name_list):
graph_id_lists = []
vsize = 1
for host in hosts:
graph_id_list = self.get_graphs_by_host_id(graph_name_list, host)
size = len(graph_id_list)
if size > 0:
graph_id_lists.extend(graph_id_list)
if vsize < size:
vsize = size
return graph_id_lists, vsize
# getGraphs
def get_graphs_by_host_id(self, graph_name_list, host_id):
graph_ids = []
for graph_name in graph_name_list:
graphs_list = self._zapi.graph.get({'output': 'extend', 'search': {'name': graph_name}, 'hostids': host_id})
graph_id_list = []
if len(graphs_list) > 0:
for graph in graphs_list:
graph_id = graph['graphid']
graph_id_list.append(graph_id)
if len(graph_id_list) > 0:
graph_ids.extend(graph_id_list)
return graph_ids
# get screen items
def get_screen_items(self, screen_id):
screen_item_list = self._zapi.screenitem.get({'output': 'extend', 'screenids': screen_id})
return screen_item_list
# delete screen items
def delete_screen_items(self, screen_id, screen_item_id_list):
try:
if len(screen_item_id_list) == 0:
return True
screen_item_list = self.get_screen_items(screen_id)
if len(screen_item_list) > 0:
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.screenitem.delete(screen_item_id_list)
return True
return False
except ZabbixAPIException:
pass
# get screen's hsize and vsize
def get_hsize_vsize(self, hosts, v_size):
h_size = len(hosts)
if h_size == 1:
if v_size == 1:
h_size = 1
elif v_size in range(2, 9):
h_size = 2
else:
h_size = 3
v_size = (v_size - 1) / h_size + 1
return h_size, v_size
# create screen_items
def create_screen_items(self, screen_id, hosts, graph_name_list, width, height, h_size):
if len(hosts) < 4:
if width is None or width < 0:
width = 500
else:
if width is None or width < 0:
width = 200
if height is None or height < 0:
height = 100
try:
# when there're only one host, only one row is not good.
if len(hosts) == 1:
graph_id_list = self.get_graphs_by_host_id(graph_name_list, hosts[0])
for i, graph_id in enumerate(graph_id_list):
if graph_id is not None:
self._zapi.screenitem.create({'screenid': screen_id, 'resourcetype': 0, 'resourceid': graph_id,
'width': width, 'height': height,
'x': i % h_size, 'y': i / h_size, 'colspan': 1, 'rowspan': 1,
'elements': 0, 'valign': 0, 'halign': 0,
'style': 0, 'dynamic': 0, 'sort_triggers': 0})
else:
for i, host in enumerate(hosts):
graph_id_list = self.get_graphs_by_host_id(graph_name_list, host)
for j, graph_id in enumerate(graph_id_list):
if graph_id is not None:
self._zapi.screenitem.create({'screenid': screen_id, 'resourcetype': 0, 'resourceid': graph_id,
'width': width, 'height': height,
'x': i, 'y': j, 'colspan': 1, 'rowspan': 1,
'elements': 0, 'valign': 0, 'halign': 0,
'style': 0, 'dynamic': 0, 'sort_triggers': 0})
except Already_Exists:
pass
def main():
module = AnsibleModule(
argument_spec=dict(
server_url=dict(required=True, aliases=['url']),
login_user=dict(required=True),
login_password=dict(required=True, no_log=True),
timeout=dict(type='int', default=10),
screens=dict(type='dict', required=True)
),
supports_check_mode=True
)
if not HAS_ZABBIX_API:
module.fail_json(msg="Missing requried zabbix-api module (check docs or install with: pip install zabbix-api)")
server_url = module.params['server_url']
login_user = module.params['login_user']
login_password = module.params['login_password']
timeout = module.params['timeout']
screens = module.params['screens']
zbx = None
# login to zabbix
try:
zbx = ZabbixAPIExtends(server_url, timeout=timeout)
zbx.login(login_user, login_password)
except Exception, e:
module.fail_json(msg="Failed to connect to Zabbix server: %s" % e)
screen = Screen(module, zbx)
created_screens = []
changed_screens = []
deleted_screens = []
for zabbix_screen in screens:
screen_name = zabbix_screen['screen_name']
screen_id = screen.get_screen_id(screen_name)
state = "absent" if "state" in zabbix_screen and zabbix_screen['state'] == "absent" else "present"
if state == "absent":
if screen_id:
screen_item_list = screen.get_screen_items(screen_id)
screen_item_id_list = []
for screen_item in screen_item_list:
screen_item_id = screen_item['screenitemid']
screen_item_id_list.append(screen_item_id)
screen.delete_screen_items(screen_id, screen_item_id_list)
screen.delete_screen(screen_id, screen_name)
deleted_screens.append(screen_name)
else:
host_group = zabbix_screen['host_group']
graph_names = zabbix_screen['graph_names']
graph_width = None
if 'graph_width' in zabbix_screen:
graph_width = zabbix_screen['graph_width']
graph_height = None
if 'graph_height' in zabbix_screen:
graph_height = zabbix_screen['graph_height']
host_group_id = screen.get_host_group_id(host_group)
hosts = screen.get_host_ids_by_group_id(host_group_id)
screen_item_id_list = []
resource_id_list = []
graph_ids, v_size = screen.get_graph_ids(hosts, graph_names)
h_size, v_size = screen.get_hsize_vsize(hosts, v_size)
if not screen_id:
# create screen
screen_id = screen.create_screen(screen_name, h_size, v_size)
screen.create_screen_items(screen_id, hosts, graph_names, graph_width, graph_height, h_size)
created_screens.append(screen_name)
else:
screen_item_list = screen.get_screen_items(screen_id)
for screen_item in screen_item_list:
screen_item_id = screen_item['screenitemid']
resource_id = screen_item['resourceid']
screen_item_id_list.append(screen_item_id)
resource_id_list.append(resource_id)
# when the screen items changed, then update
if graph_ids != resource_id_list:
deleted = screen.delete_screen_items(screen_id, screen_item_id_list)
if deleted:
screen.update_screen(screen_id, screen_name, h_size, v_size)
screen.create_screen_items(screen_id, hosts, graph_names, graph_width, graph_height, h_size)
changed_screens.append(screen_name)
if created_screens and changed_screens:
module.exit_json(changed=True, result="Successfully created screen(s): %s, and updated screen(s): %s" % (",".join(created_screens), ",".join(changed_screens)))
elif created_screens:
module.exit_json(changed=True, result="Successfully created screen(s): %s" % ",".join(created_screens))
elif changed_screens:
module.exit_json(changed=True, result="Successfully updated screen(s): %s" % ",".join(changed_screens))
elif deleted_screens:
module.exit_json(changed=True, result="Successfully deleted screen(s): %s" % ",".join(deleted_screens))
else:
module.exit_json(changed=False)
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 | -7,755,471,344,981,753,000 | 38.305489 | 188 | 0.572227 | false |
proxysh/Safejumper-for-Mac | buildmac/Resources/env/lib/python2.7/site-packages/pyparsing.py | 213 | 229867 | # module pyparsing.py
#
# Copyright (c) 2003-2016 Paul T. McGuire
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__doc__ = \
"""
pyparsing module - Classes and methods to define and execute parsing grammars
The pyparsing module is an alternative approach to creating and executing simple grammars,
vs. the traditional lex/yacc approach, or the use of regular expressions. With pyparsing, you
don't need to learn a new syntax for defining grammars or matching expressions - the parsing module
provides a library of classes that you use to construct the grammar directly in Python.
Here is a program to parse "Hello, World!" (or any greeting of the form
C{"<salutation>, <addressee>!"}), built up using L{Word}, L{Literal}, and L{And} elements
(L{'+'<ParserElement.__add__>} operator gives L{And} expressions, strings are auto-converted to
L{Literal} expressions)::
from pyparsing import Word, alphas
# define grammar of a greeting
greet = Word(alphas) + "," + Word(alphas) + "!"
hello = "Hello, World!"
print (hello, "->", greet.parseString(hello))
The program outputs the following::
Hello, World! -> ['Hello', ',', 'World', '!']
The Python representation of the grammar is quite readable, owing to the self-explanatory
class names, and the use of '+', '|' and '^' operators.
The L{ParseResults} object returned from L{ParserElement.parseString<ParserElement.parseString>} can be accessed as a nested list, a dictionary, or an
object with named attributes.
The pyparsing module handles some of the problems that are typically vexing when writing text parsers:
- extra or missing whitespace (the above program will also handle "Hello,World!", "Hello , World !", etc.)
- quoted strings
- embedded comments
"""
__version__ = "2.1.10"
__versionTime__ = "07 Oct 2016 01:31 UTC"
__author__ = "Paul McGuire <[email protected]>"
import string
from weakref import ref as wkref
import copy
import sys
import warnings
import re
import sre_constants
import collections
import pprint
import traceback
import types
from datetime import datetime
try:
from _thread import RLock
except ImportError:
from threading import RLock
try:
from collections import OrderedDict as _OrderedDict
except ImportError:
try:
from ordereddict import OrderedDict as _OrderedDict
except ImportError:
_OrderedDict = None
#~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) )
__all__ = [
'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty',
'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal',
'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or',
'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException',
'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException',
'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter',
'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore',
'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col',
'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString',
'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'hexnums',
'htmlComment', 'javaStyleComment', 'line', 'lineEnd', 'lineStart', 'lineno',
'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral',
'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables',
'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity',
'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd',
'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute',
'indentedBlock', 'originalTextFor', 'ungroup', 'infixNotation','locatedExpr', 'withClass',
'CloseMatch', 'tokenMap', 'pyparsing_common',
]
system_version = tuple(sys.version_info)[:3]
PY_3 = system_version[0] == 3
if PY_3:
_MAX_INT = sys.maxsize
basestring = str
unichr = chr
_ustr = str
# build list of single arg builtins, that can be used as parse actions
singleArgBuiltins = [sum, len, sorted, reversed, list, tuple, set, any, all, min, max]
else:
_MAX_INT = sys.maxint
range = xrange
def _ustr(obj):
"""Drop-in replacement for str(obj) that tries to be Unicode friendly. It first tries
str(obj). If that fails with a UnicodeEncodeError, then it tries unicode(obj). It
then < returns the unicode object | encodes it with the default encoding | ... >.
"""
if isinstance(obj,unicode):
return obj
try:
# If this works, then _ustr(obj) has the same behaviour as str(obj), so
# it won't break any existing code.
return str(obj)
except UnicodeEncodeError:
# Else encode it
ret = unicode(obj).encode(sys.getdefaultencoding(), 'xmlcharrefreplace')
xmlcharref = Regex('&#\d+;')
xmlcharref.setParseAction(lambda t: '\\u' + hex(int(t[0][2:-1]))[2:])
return xmlcharref.transformString(ret)
# build list of single arg builtins, tolerant of Python version, that can be used as parse actions
singleArgBuiltins = []
import __builtin__
for fname in "sum len sorted reversed list tuple set any all min max".split():
try:
singleArgBuiltins.append(getattr(__builtin__,fname))
except AttributeError:
continue
_generatorType = type((y for y in range(1)))
def _xml_escape(data):
"""Escape &, <, >, ", ', etc. in a string of data."""
# ampersand must be replaced first
from_symbols = '&><"\''
to_symbols = ('&'+s+';' for s in "amp gt lt quot apos".split())
for from_,to_ in zip(from_symbols, to_symbols):
data = data.replace(from_, to_)
return data
class _Constants(object):
pass
alphas = string.ascii_uppercase + string.ascii_lowercase
nums = "0123456789"
hexnums = nums + "ABCDEFabcdef"
alphanums = alphas + nums
_bslash = chr(92)
printables = "".join(c for c in string.printable if c not in string.whitespace)
class ParseBaseException(Exception):
"""base exception class for all parsing runtime exceptions"""
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__( self, pstr, loc=0, msg=None, elem=None ):
self.loc = loc
if msg is None:
self.msg = pstr
self.pstr = ""
else:
self.msg = msg
self.pstr = pstr
self.parserElement = elem
self.args = (pstr, loc, msg)
@classmethod
def _from_exception(cls, pe):
"""
internal factory method to simplify creating one type of ParseException
from another - avoids having __init__ signature conflicts among subclasses
"""
return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement)
def __getattr__( self, aname ):
"""supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
"""
if( aname == "lineno" ):
return lineno( self.loc, self.pstr )
elif( aname in ("col", "column") ):
return col( self.loc, self.pstr )
elif( aname == "line" ):
return line( self.loc, self.pstr )
else:
raise AttributeError(aname)
def __str__( self ):
return "%s (at char %d), (line:%d, col:%d)" % \
( self.msg, self.loc, self.lineno, self.column )
def __repr__( self ):
return _ustr(self)
def markInputline( self, markerString = ">!<" ):
"""Extracts the exception line from the input string, and marks
the location of the exception with a special symbol.
"""
line_str = self.line
line_column = self.column - 1
if markerString:
line_str = "".join((line_str[:line_column],
markerString, line_str[line_column:]))
return line_str.strip()
def __dir__(self):
return "lineno col line".split() + dir(type(self))
class ParseException(ParseBaseException):
"""
Exception thrown when parse expressions don't match class;
supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
Example::
try:
Word(nums).setName("integer").parseString("ABC")
except ParseException as pe:
print(pe)
print("column: {}".format(pe.col))
prints::
Expected integer (at char 0), (line:1, col:1)
column: 1
"""
pass
class ParseFatalException(ParseBaseException):
"""user-throwable exception thrown when inconsistent parse content
is found; stops all parsing immediately"""
pass
class ParseSyntaxException(ParseFatalException):
"""just like L{ParseFatalException}, but thrown internally when an
L{ErrorStop<And._ErrorStop>} ('-' operator) indicates that parsing is to stop
immediately because an unbacktrackable syntax error has been found"""
pass
#~ class ReparseException(ParseBaseException):
#~ """Experimental class - parse actions can raise this exception to cause
#~ pyparsing to reparse the input string:
#~ - with a modified input string, and/or
#~ - with a modified start location
#~ Set the values of the ReparseException in the constructor, and raise the
#~ exception in a parse action to cause pyparsing to use the new string/location.
#~ Setting the values as None causes no change to be made.
#~ """
#~ def __init_( self, newstring, restartLoc ):
#~ self.newParseText = newstring
#~ self.reparseLoc = restartLoc
class RecursiveGrammarException(Exception):
"""exception thrown by L{ParserElement.validate} if the grammar could be improperly recursive"""
def __init__( self, parseElementList ):
self.parseElementTrace = parseElementList
def __str__( self ):
return "RecursiveGrammarException: %s" % self.parseElementTrace
class _ParseResultsWithOffset(object):
def __init__(self,p1,p2):
self.tup = (p1,p2)
def __getitem__(self,i):
return self.tup[i]
def __repr__(self):
return repr(self.tup[0])
def setOffset(self,i):
self.tup = (self.tup[0],i)
class ParseResults(object):
"""
Structured parse results, to provide multiple means of access to the parsed data:
- as a list (C{len(results)})
- by list index (C{results[0], results[1]}, etc.)
- by attribute (C{results.<resultsName>} - see L{ParserElement.setResultsName})
Example::
integer = Word(nums)
date_str = (integer.setResultsName("year") + '/'
+ integer.setResultsName("month") + '/'
+ integer.setResultsName("day"))
# equivalent form:
# date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
# parseString returns a ParseResults object
result = date_str.parseString("1999/12/31")
def test(s, fn=repr):
print("%s -> %s" % (s, fn(eval(s))))
test("list(result)")
test("result[0]")
test("result['month']")
test("result.day")
test("'month' in result")
test("'minutes' in result")
test("result.dump()", str)
prints::
list(result) -> ['1999', '/', '12', '/', '31']
result[0] -> '1999'
result['month'] -> '12'
result.day -> '31'
'month' in result -> True
'minutes' in result -> False
result.dump() -> ['1999', '/', '12', '/', '31']
- day: 31
- month: 12
- year: 1999
"""
def __new__(cls, toklist=None, name=None, asList=True, modal=True ):
if isinstance(toklist, cls):
return toklist
retobj = object.__new__(cls)
retobj.__doinit = True
return retobj
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__( self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance ):
if self.__doinit:
self.__doinit = False
self.__name = None
self.__parent = None
self.__accumNames = {}
self.__asList = asList
self.__modal = modal
if toklist is None:
toklist = []
if isinstance(toklist, list):
self.__toklist = toklist[:]
elif isinstance(toklist, _generatorType):
self.__toklist = list(toklist)
else:
self.__toklist = [toklist]
self.__tokdict = dict()
if name is not None and name:
if not modal:
self.__accumNames[name] = 0
if isinstance(name,int):
name = _ustr(name) # will always return a str, but use _ustr for consistency
self.__name = name
if not (isinstance(toklist, (type(None), basestring, list)) and toklist in (None,'',[])):
if isinstance(toklist,basestring):
toklist = [ toklist ]
if asList:
if isinstance(toklist,ParseResults):
self[name] = _ParseResultsWithOffset(toklist.copy(),0)
else:
self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]),0)
self[name].__name = name
else:
try:
self[name] = toklist[0]
except (KeyError,TypeError,IndexError):
self[name] = toklist
def __getitem__( self, i ):
if isinstance( i, (int,slice) ):
return self.__toklist[i]
else:
if i not in self.__accumNames:
return self.__tokdict[i][-1][0]
else:
return ParseResults([ v[0] for v in self.__tokdict[i] ])
def __setitem__( self, k, v, isinstance=isinstance ):
if isinstance(v,_ParseResultsWithOffset):
self.__tokdict[k] = self.__tokdict.get(k,list()) + [v]
sub = v[0]
elif isinstance(k,(int,slice)):
self.__toklist[k] = v
sub = v
else:
self.__tokdict[k] = self.__tokdict.get(k,list()) + [_ParseResultsWithOffset(v,0)]
sub = v
if isinstance(sub,ParseResults):
sub.__parent = wkref(self)
def __delitem__( self, i ):
if isinstance(i,(int,slice)):
mylen = len( self.__toklist )
del self.__toklist[i]
# convert int to slice
if isinstance(i, int):
if i < 0:
i += mylen
i = slice(i, i+1)
# get removed indices
removed = list(range(*i.indices(mylen)))
removed.reverse()
# fixup indices in token dictionary
for name,occurrences in self.__tokdict.items():
for j in removed:
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(value, position - (position > j))
else:
del self.__tokdict[i]
def __contains__( self, k ):
return k in self.__tokdict
def __len__( self ): return len( self.__toklist )
def __bool__(self): return ( not not self.__toklist )
__nonzero__ = __bool__
def __iter__( self ): return iter( self.__toklist )
def __reversed__( self ): return iter( self.__toklist[::-1] )
def _iterkeys( self ):
if hasattr(self.__tokdict, "iterkeys"):
return self.__tokdict.iterkeys()
else:
return iter(self.__tokdict)
def _itervalues( self ):
return (self[k] for k in self._iterkeys())
def _iteritems( self ):
return ((k, self[k]) for k in self._iterkeys())
if PY_3:
keys = _iterkeys
"""Returns an iterator of all named result keys (Python 3.x only)."""
values = _itervalues
"""Returns an iterator of all named result values (Python 3.x only)."""
items = _iteritems
"""Returns an iterator of all named result key-value tuples (Python 3.x only)."""
else:
iterkeys = _iterkeys
"""Returns an iterator of all named result keys (Python 2.x only)."""
itervalues = _itervalues
"""Returns an iterator of all named result values (Python 2.x only)."""
iteritems = _iteritems
"""Returns an iterator of all named result key-value tuples (Python 2.x only)."""
def keys( self ):
"""Returns all named result keys (as a list in Python 2.x, as an iterator in Python 3.x)."""
return list(self.iterkeys())
def values( self ):
"""Returns all named result values (as a list in Python 2.x, as an iterator in Python 3.x)."""
return list(self.itervalues())
def items( self ):
"""Returns all named result key-values (as a list of tuples in Python 2.x, as an iterator in Python 3.x)."""
return list(self.iteritems())
def haskeys( self ):
"""Since keys() returns an iterator, this method is helpful in bypassing
code that looks for the existence of any defined results names."""
return bool(self.__tokdict)
def pop( self, *args, **kwargs):
"""
Removes and returns item at specified index (default=C{last}).
Supports both C{list} and C{dict} semantics for C{pop()}. If passed no
argument or an integer argument, it will use C{list} semantics
and pop tokens from the list of parsed tokens. If passed a
non-integer argument (most likely a string), it will use C{dict}
semantics and pop the corresponding value from any defined
results names. A second default return value argument is
supported, just as in C{dict.pop()}.
Example::
def remove_first(tokens):
tokens.pop(0)
print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
print(OneOrMore(Word(nums)).addParseAction(remove_first).parseString("0 123 321")) # -> ['123', '321']
label = Word(alphas)
patt = label("LABEL") + OneOrMore(Word(nums))
print(patt.parseString("AAB 123 321").dump())
# Use pop() in a parse action to remove named result (note that corresponding value is not
# removed from list form of results)
def remove_LABEL(tokens):
tokens.pop("LABEL")
return tokens
patt.addParseAction(remove_LABEL)
print(patt.parseString("AAB 123 321").dump())
prints::
['AAB', '123', '321']
- LABEL: AAB
['AAB', '123', '321']
"""
if not args:
args = [-1]
for k,v in kwargs.items():
if k == 'default':
args = (args[0], v)
else:
raise TypeError("pop() got an unexpected keyword argument '%s'" % k)
if (isinstance(args[0], int) or
len(args) == 1 or
args[0] in self):
index = args[0]
ret = self[index]
del self[index]
return ret
else:
defaultvalue = args[1]
return defaultvalue
def get(self, key, defaultValue=None):
"""
Returns named result matching the given key, or if there is no
such name, then returns the given C{defaultValue} or C{None} if no
C{defaultValue} is specified.
Similar to C{dict.get()}.
Example::
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
result = date_str.parseString("1999/12/31")
print(result.get("year")) # -> '1999'
print(result.get("hour", "not specified")) # -> 'not specified'
print(result.get("hour")) # -> None
"""
if key in self:
return self[key]
else:
return defaultValue
def insert( self, index, insStr ):
"""
Inserts new element at location index in the list of parsed tokens.
Similar to C{list.insert()}.
Example::
print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
# use a parse action to insert the parse location in the front of the parsed results
def insert_locn(locn, tokens):
tokens.insert(0, locn)
print(OneOrMore(Word(nums)).addParseAction(insert_locn).parseString("0 123 321")) # -> [0, '0', '123', '321']
"""
self.__toklist.insert(index, insStr)
# fixup indices in token dictionary
for name,occurrences in self.__tokdict.items():
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(value, position + (position > index))
def append( self, item ):
"""
Add single element to end of ParseResults list of elements.
Example::
print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
# use a parse action to compute the sum of the parsed integers, and add it to the end
def append_sum(tokens):
tokens.append(sum(map(int, tokens)))
print(OneOrMore(Word(nums)).addParseAction(append_sum).parseString("0 123 321")) # -> ['0', '123', '321', 444]
"""
self.__toklist.append(item)
def extend( self, itemseq ):
"""
Add sequence of elements to end of ParseResults list of elements.
Example::
patt = OneOrMore(Word(alphas))
# use a parse action to append the reverse of the matched strings, to make a palindrome
def make_palindrome(tokens):
tokens.extend(reversed([t[::-1] for t in tokens]))
return ''.join(tokens)
print(patt.addParseAction(make_palindrome).parseString("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl'
"""
if isinstance(itemseq, ParseResults):
self += itemseq
else:
self.__toklist.extend(itemseq)
def clear( self ):
"""
Clear all elements and results names.
"""
del self.__toklist[:]
self.__tokdict.clear()
def __getattr__( self, name ):
try:
return self[name]
except KeyError:
return ""
if name in self.__tokdict:
if name not in self.__accumNames:
return self.__tokdict[name][-1][0]
else:
return ParseResults([ v[0] for v in self.__tokdict[name] ])
else:
return ""
def __add__( self, other ):
ret = self.copy()
ret += other
return ret
def __iadd__( self, other ):
if other.__tokdict:
offset = len(self.__toklist)
addoffset = lambda a: offset if a<0 else a+offset
otheritems = other.__tokdict.items()
otherdictitems = [(k, _ParseResultsWithOffset(v[0],addoffset(v[1])) )
for (k,vlist) in otheritems for v in vlist]
for k,v in otherdictitems:
self[k] = v
if isinstance(v[0],ParseResults):
v[0].__parent = wkref(self)
self.__toklist += other.__toklist
self.__accumNames.update( other.__accumNames )
return self
def __radd__(self, other):
if isinstance(other,int) and other == 0:
# useful for merging many ParseResults using sum() builtin
return self.copy()
else:
# this may raise a TypeError - so be it
return other + self
def __repr__( self ):
return "(%s, %s)" % ( repr( self.__toklist ), repr( self.__tokdict ) )
def __str__( self ):
return '[' + ', '.join(_ustr(i) if isinstance(i, ParseResults) else repr(i) for i in self.__toklist) + ']'
def _asStringList( self, sep='' ):
out = []
for item in self.__toklist:
if out and sep:
out.append(sep)
if isinstance( item, ParseResults ):
out += item._asStringList()
else:
out.append( _ustr(item) )
return out
def asList( self ):
"""
Returns the parse results as a nested list of matching tokens, all converted to strings.
Example::
patt = OneOrMore(Word(alphas))
result = patt.parseString("sldkj lsdkj sldkj")
# even though the result prints in string-like form, it is actually a pyparsing ParseResults
print(type(result), result) # -> <class 'pyparsing.ParseResults'> ['sldkj', 'lsdkj', 'sldkj']
# Use asList() to create an actual list
result_list = result.asList()
print(type(result_list), result_list) # -> <class 'list'> ['sldkj', 'lsdkj', 'sldkj']
"""
return [res.asList() if isinstance(res,ParseResults) else res for res in self.__toklist]
def asDict( self ):
"""
Returns the named parse results as a nested dictionary.
Example::
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
result = date_str.parseString('12/31/1999')
print(type(result), repr(result)) # -> <class 'pyparsing.ParseResults'> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]})
result_dict = result.asDict()
print(type(result_dict), repr(result_dict)) # -> <class 'dict'> {'day': '1999', 'year': '12', 'month': '31'}
# even though a ParseResults supports dict-like access, sometime you just need to have a dict
import json
print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable
print(json.dumps(result.asDict())) # -> {"month": "31", "day": "1999", "year": "12"}
"""
if PY_3:
item_fn = self.items
else:
item_fn = self.iteritems
def toItem(obj):
if isinstance(obj, ParseResults):
if obj.haskeys():
return obj.asDict()
else:
return [toItem(v) for v in obj]
else:
return obj
return dict((k,toItem(v)) for k,v in item_fn())
def copy( self ):
"""
Returns a new copy of a C{ParseResults} object.
"""
ret = ParseResults( self.__toklist )
ret.__tokdict = self.__tokdict.copy()
ret.__parent = self.__parent
ret.__accumNames.update( self.__accumNames )
ret.__name = self.__name
return ret
def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ):
"""
(Deprecated) Returns the parse results as XML. Tags are created for tokens and lists that have defined results names.
"""
nl = "\n"
out = []
namedItems = dict((v[1],k) for (k,vlist) in self.__tokdict.items()
for v in vlist)
nextLevelIndent = indent + " "
# collapse out indents if formatting is not desired
if not formatted:
indent = ""
nextLevelIndent = ""
nl = ""
selfTag = None
if doctag is not None:
selfTag = doctag
else:
if self.__name:
selfTag = self.__name
if not selfTag:
if namedItemsOnly:
return ""
else:
selfTag = "ITEM"
out += [ nl, indent, "<", selfTag, ">" ]
for i,res in enumerate(self.__toklist):
if isinstance(res,ParseResults):
if i in namedItems:
out += [ res.asXML(namedItems[i],
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted)]
else:
out += [ res.asXML(None,
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted)]
else:
# individual token, see if there is a name for it
resTag = None
if i in namedItems:
resTag = namedItems[i]
if not resTag:
if namedItemsOnly:
continue
else:
resTag = "ITEM"
xmlBodyText = _xml_escape(_ustr(res))
out += [ nl, nextLevelIndent, "<", resTag, ">",
xmlBodyText,
"</", resTag, ">" ]
out += [ nl, indent, "</", selfTag, ">" ]
return "".join(out)
def __lookup(self,sub):
for k,vlist in self.__tokdict.items():
for v,loc in vlist:
if sub is v:
return k
return None
def getName(self):
"""
Returns the results name for this token expression. Useful when several
different expressions might match at a particular location.
Example::
integer = Word(nums)
ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d")
house_number_expr = Suppress('#') + Word(nums, alphanums)
user_data = (Group(house_number_expr)("house_number")
| Group(ssn_expr)("ssn")
| Group(integer)("age"))
user_info = OneOrMore(user_data)
result = user_info.parseString("22 111-22-3333 #221B")
for item in result:
print(item.getName(), ':', item[0])
prints::
age : 22
ssn : 111-22-3333
house_number : 221B
"""
if self.__name:
return self.__name
elif self.__parent:
par = self.__parent()
if par:
return par.__lookup(self)
else:
return None
elif (len(self) == 1 and
len(self.__tokdict) == 1 and
next(iter(self.__tokdict.values()))[0][1] in (0,-1)):
return next(iter(self.__tokdict.keys()))
else:
return None
def dump(self, indent='', depth=0, full=True):
"""
Diagnostic method for listing out the contents of a C{ParseResults}.
Accepts an optional C{indent} argument so that this string can be embedded
in a nested display of other data.
Example::
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
result = date_str.parseString('12/31/1999')
print(result.dump())
prints::
['12', '/', '31', '/', '1999']
- day: 1999
- month: 31
- year: 12
"""
out = []
NL = '\n'
out.append( indent+_ustr(self.asList()) )
if full:
if self.haskeys():
items = sorted((str(k), v) for k,v in self.items())
for k,v in items:
if out:
out.append(NL)
out.append( "%s%s- %s: " % (indent,(' '*depth), k) )
if isinstance(v,ParseResults):
if v:
out.append( v.dump(indent,depth+1) )
else:
out.append(_ustr(v))
else:
out.append(repr(v))
elif any(isinstance(vv,ParseResults) for vv in self):
v = self
for i,vv in enumerate(v):
if isinstance(vv,ParseResults):
out.append("\n%s%s[%d]:\n%s%s%s" % (indent,(' '*(depth)),i,indent,(' '*(depth+1)),vv.dump(indent,depth+1) ))
else:
out.append("\n%s%s[%d]:\n%s%s%s" % (indent,(' '*(depth)),i,indent,(' '*(depth+1)),_ustr(vv)))
return "".join(out)
def pprint(self, *args, **kwargs):
"""
Pretty-printer for parsed results as a list, using the C{pprint} module.
Accepts additional positional or keyword args as defined for the
C{pprint.pprint} method. (U{http://docs.python.org/3/library/pprint.html#pprint.pprint})
Example::
ident = Word(alphas, alphanums)
num = Word(nums)
func = Forward()
term = ident | num | Group('(' + func + ')')
func <<= ident + Group(Optional(delimitedList(term)))
result = func.parseString("fna a,b,(fnb c,d,200),100")
result.pprint(width=40)
prints::
['fna',
['a',
'b',
['(', 'fnb', ['c', 'd', '200'], ')'],
'100']]
"""
pprint.pprint(self.asList(), *args, **kwargs)
# add support for pickle protocol
def __getstate__(self):
return ( self.__toklist,
( self.__tokdict.copy(),
self.__parent is not None and self.__parent() or None,
self.__accumNames,
self.__name ) )
def __setstate__(self,state):
self.__toklist = state[0]
(self.__tokdict,
par,
inAccumNames,
self.__name) = state[1]
self.__accumNames = {}
self.__accumNames.update(inAccumNames)
if par is not None:
self.__parent = wkref(par)
else:
self.__parent = None
def __getnewargs__(self):
return self.__toklist, self.__name, self.__asList, self.__modal
def __dir__(self):
return (dir(type(self)) + list(self.keys()))
collections.MutableMapping.register(ParseResults)
def col (loc,strg):
"""Returns current column within a string, counting newlines as line separators.
The first column is number 1.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
s = strg
return 1 if 0<loc<len(s) and s[loc-1] == '\n' else loc - s.rfind("\n", 0, loc)
def lineno(loc,strg):
"""Returns current line number within a string, counting newlines as line separators.
The first line is number 1.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
return strg.count("\n",0,loc) + 1
def line( loc, strg ):
"""Returns the line of text containing loc within a string, counting newlines as line separators.
"""
lastCR = strg.rfind("\n", 0, loc)
nextCR = strg.find("\n", loc)
if nextCR >= 0:
return strg[lastCR+1:nextCR]
else:
return strg[lastCR+1:]
def _defaultStartDebugAction( instring, loc, expr ):
print (("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % ( lineno(loc,instring), col(loc,instring) )))
def _defaultSuccessDebugAction( instring, startloc, endloc, expr, toks ):
print ("Matched " + _ustr(expr) + " -> " + str(toks.asList()))
def _defaultExceptionDebugAction( instring, loc, expr, exc ):
print ("Exception raised:" + _ustr(exc))
def nullDebugAction(*args):
"""'Do-nothing' debug action, to suppress debugging output during parsing."""
pass
# Only works on Python 3.x - nonlocal is toxic to Python 2 installs
#~ 'decorator to trim function calls to match the arity of the target'
#~ def _trim_arity(func, maxargs=3):
#~ if func in singleArgBuiltins:
#~ return lambda s,l,t: func(t)
#~ limit = 0
#~ foundArity = False
#~ def wrapper(*args):
#~ nonlocal limit,foundArity
#~ while 1:
#~ try:
#~ ret = func(*args[limit:])
#~ foundArity = True
#~ return ret
#~ except TypeError:
#~ if limit == maxargs or foundArity:
#~ raise
#~ limit += 1
#~ continue
#~ return wrapper
# this version is Python 2.x-3.x cross-compatible
'decorator to trim function calls to match the arity of the target'
def _trim_arity(func, maxargs=2):
if func in singleArgBuiltins:
return lambda s,l,t: func(t)
limit = [0]
foundArity = [False]
# traceback return data structure changed in Py3.5 - normalize back to plain tuples
if system_version[:2] >= (3,5):
def extract_stack(limit=0):
# special handling for Python 3.5.0 - extra deep call stack by 1
offset = -3 if system_version == (3,5,0) else -2
frame_summary = traceback.extract_stack(limit=-offset+limit-1)[offset]
return [(frame_summary.filename, frame_summary.lineno)]
def extract_tb(tb, limit=0):
frames = traceback.extract_tb(tb, limit=limit)
frame_summary = frames[-1]
return [(frame_summary.filename, frame_summary.lineno)]
else:
extract_stack = traceback.extract_stack
extract_tb = traceback.extract_tb
# synthesize what would be returned by traceback.extract_stack at the call to
# user's parse action 'func', so that we don't incur call penalty at parse time
LINE_DIFF = 6
# IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND
# THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!!
this_line = extract_stack(limit=2)[-1]
pa_call_line_synth = (this_line[0], this_line[1]+LINE_DIFF)
def wrapper(*args):
while 1:
try:
ret = func(*args[limit[0]:])
foundArity[0] = True
return ret
except TypeError:
# re-raise TypeErrors if they did not come from our arity testing
if foundArity[0]:
raise
else:
try:
tb = sys.exc_info()[-1]
if not extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth:
raise
finally:
del tb
if limit[0] <= maxargs:
limit[0] += 1
continue
raise
# copy func name to wrapper for sensible debug output
func_name = "<parse action>"
try:
func_name = getattr(func, '__name__',
getattr(func, '__class__').__name__)
except Exception:
func_name = str(func)
wrapper.__name__ = func_name
return wrapper
class ParserElement(object):
"""Abstract base level parser element class."""
DEFAULT_WHITE_CHARS = " \n\t\r"
verbose_stacktrace = False
@staticmethod
def setDefaultWhitespaceChars( chars ):
r"""
Overrides the default whitespace chars
Example::
# default whitespace chars are space, <TAB> and newline
OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def', 'ghi', 'jkl']
# change to just treat newline as significant
ParserElement.setDefaultWhitespaceChars(" \t")
OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def']
"""
ParserElement.DEFAULT_WHITE_CHARS = chars
@staticmethod
def inlineLiteralsUsing(cls):
"""
Set class to be used for inclusion of string literals into a parser.
Example::
# default literal class used is Literal
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31']
# change to Suppress
ParserElement.inlineLiteralsUsing(Suppress)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
date_str.parseString("1999/12/31") # -> ['1999', '12', '31']
"""
ParserElement._literalStringClass = cls
def __init__( self, savelist=False ):
self.parseAction = list()
self.failAction = None
#~ self.name = "<unknown>" # don't define self.name, let subclasses try/except upcall
self.strRepr = None
self.resultsName = None
self.saveAsList = savelist
self.skipWhitespace = True
self.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
self.copyDefaultWhiteChars = True
self.mayReturnEmpty = False # used when checking for left-recursion
self.keepTabs = False
self.ignoreExprs = list()
self.debug = False
self.streamlined = False
self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index
self.errmsg = ""
self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all)
self.debugActions = ( None, None, None ) #custom debug actions
self.re = None
self.callPreparse = True # used to avoid redundant calls to preParse
self.callDuringTry = False
def copy( self ):
"""
Make a copy of this C{ParserElement}. Useful for defining different parse actions
for the same parsing pattern, using copies of the original parse element.
Example::
integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
integerK = integer.copy().addParseAction(lambda toks: toks[0]*1024) + Suppress("K")
integerM = integer.copy().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")
print(OneOrMore(integerK | integerM | integer).parseString("5K 100 640K 256M"))
prints::
[5120, 100, 655360, 268435456]
Equivalent form of C{expr.copy()} is just C{expr()}::
integerM = integer().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")
"""
cpy = copy.copy( self )
cpy.parseAction = self.parseAction[:]
cpy.ignoreExprs = self.ignoreExprs[:]
if self.copyDefaultWhiteChars:
cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
return cpy
def setName( self, name ):
"""
Define name for this expression, makes debugging and exception messages clearer.
Example::
Word(nums).parseString("ABC") # -> Exception: Expected W:(0123...) (at char 0), (line:1, col:1)
Word(nums).setName("integer").parseString("ABC") # -> Exception: Expected integer (at char 0), (line:1, col:1)
"""
self.name = name
self.errmsg = "Expected " + self.name
if hasattr(self,"exception"):
self.exception.msg = self.errmsg
return self
def setResultsName( self, name, listAllMatches=False ):
"""
Define name for referencing matching tokens as a nested attribute
of the returned parse results.
NOTE: this returns a *copy* of the original C{ParserElement} object;
this is so that the client can define a basic element, such as an
integer, and reference it in multiple places with different names.
You can also set results names using the abbreviated syntax,
C{expr("name")} in place of C{expr.setResultsName("name")} -
see L{I{__call__}<__call__>}.
Example::
date_str = (integer.setResultsName("year") + '/'
+ integer.setResultsName("month") + '/'
+ integer.setResultsName("day"))
# equivalent form:
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
"""
newself = self.copy()
if name.endswith("*"):
name = name[:-1]
listAllMatches=True
newself.resultsName = name
newself.modalResults = not listAllMatches
return newself
def setBreak(self,breakFlag = True):
"""Method to invoke the Python pdb debugger when this element is
about to be parsed. Set C{breakFlag} to True to enable, False to
disable.
"""
if breakFlag:
_parseMethod = self._parse
def breaker(instring, loc, doActions=True, callPreParse=True):
import pdb
pdb.set_trace()
return _parseMethod( instring, loc, doActions, callPreParse )
breaker._originalParseMethod = _parseMethod
self._parse = breaker
else:
if hasattr(self._parse,"_originalParseMethod"):
self._parse = self._parse._originalParseMethod
return self
def setParseAction( self, *fns, **kwargs ):
"""
Define action to perform when successfully matching parse element definition.
Parse action fn is a callable method with 0-3 arguments, called as C{fn(s,loc,toks)},
C{fn(loc,toks)}, C{fn(toks)}, or just C{fn()}, where:
- s = the original string being parsed (see note below)
- loc = the location of the matching substring
- toks = a list of the matched tokens, packaged as a C{L{ParseResults}} object
If the functions in fns modify the tokens, they can return them as the return
value from fn, and the modified list of tokens will replace the original.
Otherwise, fn does not need to return any value.
Optional keyword arguments:
- callDuringTry = (default=C{False}) indicate if parse action should be run during lookaheads and alternate testing
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{parseString}<parseString>} for more information
on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
Example::
integer = Word(nums)
date_str = integer + '/' + integer + '/' + integer
date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31']
# use parse action to convert to ints at parse time
integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
date_str = integer + '/' + integer + '/' + integer
# note that integer fields are now ints, not strings
date_str.parseString("1999/12/31") # -> [1999, '/', 12, '/', 31]
"""
self.parseAction = list(map(_trim_arity, list(fns)))
self.callDuringTry = kwargs.get("callDuringTry", False)
return self
def addParseAction( self, *fns, **kwargs ):
"""
Add parse action to expression's list of parse actions. See L{I{setParseAction}<setParseAction>}.
See examples in L{I{copy}<copy>}.
"""
self.parseAction += list(map(_trim_arity, list(fns)))
self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
return self
def addCondition(self, *fns, **kwargs):
"""Add a boolean predicate function to expression's list of parse actions. See
L{I{setParseAction}<setParseAction>} for function call signatures. Unlike C{setParseAction},
functions passed to C{addCondition} need to return boolean success/fail of the condition.
Optional keyword arguments:
- message = define a custom message to be used in the raised exception
- fatal = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise ParseException
Example::
integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
year_int = integer.copy()
year_int.addCondition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later")
date_str = year_int + '/' + integer + '/' + integer
result = date_str.parseString("1999/12/31") # -> Exception: Only support years 2000 and later (at char 0), (line:1, col:1)
"""
msg = kwargs.get("message", "failed user-defined condition")
exc_type = ParseFatalException if kwargs.get("fatal", False) else ParseException
for fn in fns:
def pa(s,l,t):
if not bool(_trim_arity(fn)(s,l,t)):
raise exc_type(s,l,msg)
self.parseAction.append(pa)
self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
return self
def setFailAction( self, fn ):
"""Define action to perform if parsing fails at this expression.
Fail acton fn is a callable function that takes the arguments
C{fn(s,loc,expr,err)} where:
- s = string being parsed
- loc = location where expression match was attempted and failed
- expr = the parse expression that failed
- err = the exception thrown
The function returns no value. It may throw C{L{ParseFatalException}}
if it is desired to stop parsing immediately."""
self.failAction = fn
return self
def _skipIgnorables( self, instring, loc ):
exprsFound = True
while exprsFound:
exprsFound = False
for e in self.ignoreExprs:
try:
while 1:
loc,dummy = e._parse( instring, loc )
exprsFound = True
except ParseException:
pass
return loc
def preParse( self, instring, loc ):
if self.ignoreExprs:
loc = self._skipIgnorables( instring, loc )
if self.skipWhitespace:
wt = self.whiteChars
instrlen = len(instring)
while loc < instrlen and instring[loc] in wt:
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
return loc, []
def postParse( self, instring, loc, tokenlist ):
return tokenlist
#~ @profile
def _parseNoCache( self, instring, loc, doActions=True, callPreParse=True ):
debugging = ( self.debug ) #and doActions )
if debugging or self.failAction:
#~ print ("Match",self,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))
if (self.debugActions[0] ):
self.debugActions[0]( instring, loc, self )
if callPreParse and self.callPreparse:
preloc = self.preParse( instring, loc )
else:
preloc = loc
tokensStart = preloc
try:
try:
loc,tokens = self.parseImpl( instring, preloc, doActions )
except IndexError:
raise ParseException( instring, len(instring), self.errmsg, self )
except ParseBaseException as err:
#~ print ("Exception raised:", err)
if self.debugActions[2]:
self.debugActions[2]( instring, tokensStart, self, err )
if self.failAction:
self.failAction( instring, tokensStart, self, err )
raise
else:
if callPreParse and self.callPreparse:
preloc = self.preParse( instring, loc )
else:
preloc = loc
tokensStart = preloc
if self.mayIndexError or loc >= len(instring):
try:
loc,tokens = self.parseImpl( instring, preloc, doActions )
except IndexError:
raise ParseException( instring, len(instring), self.errmsg, self )
else:
loc,tokens = self.parseImpl( instring, preloc, doActions )
tokens = self.postParse( instring, loc, tokens )
retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults )
if self.parseAction and (doActions or self.callDuringTry):
if debugging:
try:
for fn in self.parseAction:
tokens = fn( instring, tokensStart, retTokens )
if tokens is not None:
retTokens = ParseResults( tokens,
self.resultsName,
asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
modal=self.modalResults )
except ParseBaseException as err:
#~ print "Exception raised in user parse action:", err
if (self.debugActions[2] ):
self.debugActions[2]( instring, tokensStart, self, err )
raise
else:
for fn in self.parseAction:
tokens = fn( instring, tokensStart, retTokens )
if tokens is not None:
retTokens = ParseResults( tokens,
self.resultsName,
asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
modal=self.modalResults )
if debugging:
#~ print ("Matched",self,"->",retTokens.asList())
if (self.debugActions[1] ):
self.debugActions[1]( instring, tokensStart, loc, self, retTokens )
return loc, retTokens
def tryParse( self, instring, loc ):
try:
return self._parse( instring, loc, doActions=False )[0]
except ParseFatalException:
raise ParseException( instring, loc, self.errmsg, self)
def canParseNext(self, instring, loc):
try:
self.tryParse(instring, loc)
except (ParseException, IndexError):
return False
else:
return True
class _UnboundedCache(object):
def __init__(self):
cache = {}
self.not_in_cache = not_in_cache = object()
def get(self, key):
return cache.get(key, not_in_cache)
def set(self, key, value):
cache[key] = value
def clear(self):
cache.clear()
self.get = types.MethodType(get, self)
self.set = types.MethodType(set, self)
self.clear = types.MethodType(clear, self)
if _OrderedDict is not None:
class _FifoCache(object):
def __init__(self, size):
self.not_in_cache = not_in_cache = object()
cache = _OrderedDict()
def get(self, key):
return cache.get(key, not_in_cache)
def set(self, key, value):
cache[key] = value
if len(cache) > size:
cache.popitem(False)
def clear(self):
cache.clear()
self.get = types.MethodType(get, self)
self.set = types.MethodType(set, self)
self.clear = types.MethodType(clear, self)
else:
class _FifoCache(object):
def __init__(self, size):
self.not_in_cache = not_in_cache = object()
cache = {}
key_fifo = collections.deque([], size)
def get(self, key):
return cache.get(key, not_in_cache)
def set(self, key, value):
cache[key] = value
if len(cache) > size:
cache.pop(key_fifo.popleft(), None)
key_fifo.append(key)
def clear(self):
cache.clear()
key_fifo.clear()
self.get = types.MethodType(get, self)
self.set = types.MethodType(set, self)
self.clear = types.MethodType(clear, self)
# argument cache for optimizing repeated calls when backtracking through recursive expressions
packrat_cache = {} # this is set later by enabledPackrat(); this is here so that resetCache() doesn't fail
packrat_cache_lock = RLock()
packrat_cache_stats = [0, 0]
# this method gets repeatedly called during backtracking with the same arguments -
# we can cache these arguments and save ourselves the trouble of re-parsing the contained expression
def _parseCache( self, instring, loc, doActions=True, callPreParse=True ):
HIT, MISS = 0, 1
lookup = (self, instring, loc, callPreParse, doActions)
with ParserElement.packrat_cache_lock:
cache = ParserElement.packrat_cache
value = cache.get(lookup)
if value is cache.not_in_cache:
ParserElement.packrat_cache_stats[MISS] += 1
try:
value = self._parseNoCache(instring, loc, doActions, callPreParse)
except ParseBaseException as pe:
# cache a copy of the exception, without the traceback
cache.set(lookup, pe.__class__(*pe.args))
raise
else:
cache.set(lookup, (value[0], value[1].copy()))
return value
else:
ParserElement.packrat_cache_stats[HIT] += 1
if isinstance(value, Exception):
raise value
return (value[0], value[1].copy())
_parse = _parseNoCache
@staticmethod
def resetCache():
ParserElement.packrat_cache.clear()
ParserElement.packrat_cache_stats[:] = [0] * len(ParserElement.packrat_cache_stats)
_packratEnabled = False
@staticmethod
def enablePackrat(cache_size_limit=128):
"""Enables "packrat" parsing, which adds memoizing to the parsing logic.
Repeated parse attempts at the same string location (which happens
often in many complex grammars) can immediately return a cached value,
instead of re-executing parsing/validating code. Memoizing is done of
both valid results and parsing exceptions.
Parameters:
- cache_size_limit - (default=C{128}) - if an integer value is provided
will limit the size of the packrat cache; if None is passed, then
the cache size will be unbounded; if 0 is passed, the cache will
be effectively disabled.
This speedup may break existing programs that use parse actions that
have side-effects. For this reason, packrat parsing is disabled when
you first import pyparsing. To activate the packrat feature, your
program must call the class method C{ParserElement.enablePackrat()}. If
your program uses C{psyco} to "compile as you go", you must call
C{enablePackrat} before calling C{psyco.full()}. If you do not do this,
Python will crash. For best results, call C{enablePackrat()} immediately
after importing pyparsing.
Example::
import pyparsing
pyparsing.ParserElement.enablePackrat()
"""
if not ParserElement._packratEnabled:
ParserElement._packratEnabled = True
if cache_size_limit is None:
ParserElement.packrat_cache = ParserElement._UnboundedCache()
else:
ParserElement.packrat_cache = ParserElement._FifoCache(cache_size_limit)
ParserElement._parse = ParserElement._parseCache
def parseString( self, instring, parseAll=False ):
"""
Execute the parse expression with the given string.
This is the main interface to the client code, once the complete
expression has been built.
If you want the grammar to require that the entire input string be
successfully parsed, then set C{parseAll} to True (equivalent to ending
the grammar with C{L{StringEnd()}}).
Note: C{parseString} implicitly calls C{expandtabs()} on the input string,
in order to report proper column numbers in parse actions.
If the input string contains tabs and
the grammar uses parse actions that use the C{loc} argument to index into the
string being parsed, you can ensure you have a consistent view of the input
string by:
- calling C{parseWithTabs} on your grammar before calling C{parseString}
(see L{I{parseWithTabs}<parseWithTabs>})
- define your parse action using the full C{(s,loc,toks)} signature, and
reference the input string using the parse action's C{s} argument
- explictly expand the tabs in your input string before calling
C{parseString}
Example::
Word('a').parseString('aaaaabaaa') # -> ['aaaaa']
Word('a').parseString('aaaaabaaa', parseAll=True) # -> Exception: Expected end of text
"""
ParserElement.resetCache()
if not self.streamlined:
self.streamline()
#~ self.saveAsList = True
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = instring.expandtabs()
try:
loc, tokens = self._parse( instring, 0 )
if parseAll:
loc = self.preParse( instring, loc )
se = Empty() + StringEnd()
se._parse( instring, loc )
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
else:
return tokens
def scanString( self, instring, maxMatches=_MAX_INT, overlap=False ):
"""
Scan the input string for expression matches. Each match will return the
matching tokens, start location, and end location. May be called with optional
C{maxMatches} argument, to clip scanning after 'n' matches are found. If
C{overlap} is specified, then overlapping matches will be reported.
Note that the start and end locations are reported relative to the string
being parsed. See L{I{parseString}<parseString>} for more information on parsing
strings with embedded tabs.
Example::
source = "sldjf123lsdjjkf345sldkjf879lkjsfd987"
print(source)
for tokens,start,end in Word(alphas).scanString(source):
print(' '*start + '^'*(end-start))
print(' '*start + tokens[0])
prints::
sldjf123lsdjjkf345sldkjf879lkjsfd987
^^^^^
sldjf
^^^^^^^
lsdjjkf
^^^^^^
sldkjf
^^^^^^
lkjsfd
"""
if not self.streamlined:
self.streamline()
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = _ustr(instring).expandtabs()
instrlen = len(instring)
loc = 0
preparseFn = self.preParse
parseFn = self._parse
ParserElement.resetCache()
matches = 0
try:
while loc <= instrlen and matches < maxMatches:
try:
preloc = preparseFn( instring, loc )
nextLoc,tokens = parseFn( instring, preloc, callPreParse=False )
except ParseException:
loc = preloc+1
else:
if nextLoc > loc:
matches += 1
yield tokens, preloc, nextLoc
if overlap:
nextloc = preparseFn( instring, loc )
if nextloc > loc:
loc = nextLoc
else:
loc += 1
else:
loc = nextLoc
else:
loc = preloc+1
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def transformString( self, instring ):
"""
Extension to C{L{scanString}}, to modify matching text with modified tokens that may
be returned from a parse action. To use C{transformString}, define a grammar and
attach a parse action to it that modifies the returned token list.
Invoking C{transformString()} on a target string will then scan for matches,
and replace the matched text patterns according to the logic in the parse
action. C{transformString()} returns the resulting transformed string.
Example::
wd = Word(alphas)
wd.setParseAction(lambda toks: toks[0].title())
print(wd.transformString("now is the winter of our discontent made glorious summer by this sun of york."))
Prints::
Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York.
"""
out = []
lastE = 0
# force preservation of <TAB>s, to minimize unwanted transformation of string, and to
# keep string locs straight between transformString and scanString
self.keepTabs = True
try:
for t,s,e in self.scanString( instring ):
out.append( instring[lastE:s] )
if t:
if isinstance(t,ParseResults):
out += t.asList()
elif isinstance(t,list):
out += t
else:
out.append(t)
lastE = e
out.append(instring[lastE:])
out = [o for o in out if o]
return "".join(map(_ustr,_flatten(out)))
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def searchString( self, instring, maxMatches=_MAX_INT ):
"""
Another extension to C{L{scanString}}, simplifying the access to the tokens found
to match the given parse expression. May be called with optional
C{maxMatches} argument, to clip searching after 'n' matches are found.
Example::
# a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters
cap_word = Word(alphas.upper(), alphas.lower())
print(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity"))
prints::
['More', 'Iron', 'Lead', 'Gold', 'I']
"""
try:
return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ])
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def split(self, instring, maxsplit=_MAX_INT, includeSeparators=False):
"""
Generator method to split a string using the given expression as a separator.
May be called with optional C{maxsplit} argument, to limit the number of splits;
and the optional C{includeSeparators} argument (default=C{False}), if the separating
matching text should be included in the split results.
Example::
punc = oneOf(list(".,;:/-!?"))
print(list(punc.split("This, this?, this sentence, is badly punctuated!")))
prints::
['This', ' this', '', ' this sentence', ' is badly punctuated', '']
"""
splits = 0
last = 0
for t,s,e in self.scanString(instring, maxMatches=maxsplit):
yield instring[last:s]
if includeSeparators:
yield t[0]
last = e
yield instring[last:]
def __add__(self, other ):
"""
Implementation of + operator - returns C{L{And}}. Adding strings to a ParserElement
converts them to L{Literal}s by default.
Example::
greet = Word(alphas) + "," + Word(alphas) + "!"
hello = "Hello, World!"
print (hello, "->", greet.parseString(hello))
Prints::
Hello, World! -> ['Hello', ',', 'World', '!']
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return And( [ self, other ] )
def __radd__(self, other ):
"""
Implementation of + operator when left operand is not a C{L{ParserElement}}
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other + self
def __sub__(self, other):
"""
Implementation of - operator, returns C{L{And}} with error stop
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return And( [ self, And._ErrorStop(), other ] )
def __rsub__(self, other ):
"""
Implementation of - operator when left operand is not a C{L{ParserElement}}
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other - self
def __mul__(self,other):
"""
Implementation of * operator, allows use of C{expr * 3} in place of
C{expr + expr + expr}. Expressions may also me multiplied by a 2-integer
tuple, similar to C{{min,max}} multipliers in regular expressions. Tuples
may also include C{None} as in:
- C{expr*(n,None)} or C{expr*(n,)} is equivalent
to C{expr*n + L{ZeroOrMore}(expr)}
(read as "at least n instances of C{expr}")
- C{expr*(None,n)} is equivalent to C{expr*(0,n)}
(read as "0 to n instances of C{expr}")
- C{expr*(None,None)} is equivalent to C{L{ZeroOrMore}(expr)}
- C{expr*(1,None)} is equivalent to C{L{OneOrMore}(expr)}
Note that C{expr*(None,n)} does not raise an exception if
more than n exprs exist in the input stream; that is,
C{expr*(None,n)} does not enforce a maximum number of expr
occurrences. If this behavior is desired, then write
C{expr*(None,n) + ~expr}
"""
if isinstance(other,int):
minElements, optElements = other,0
elif isinstance(other,tuple):
other = (other + (None, None))[:2]
if other[0] is None:
other = (0, other[1])
if isinstance(other[0],int) and other[1] is None:
if other[0] == 0:
return ZeroOrMore(self)
if other[0] == 1:
return OneOrMore(self)
else:
return self*other[0] + ZeroOrMore(self)
elif isinstance(other[0],int) and isinstance(other[1],int):
minElements, optElements = other
optElements -= minElements
else:
raise TypeError("cannot multiply 'ParserElement' and ('%s','%s') objects", type(other[0]),type(other[1]))
else:
raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other))
if minElements < 0:
raise ValueError("cannot multiply ParserElement by negative value")
if optElements < 0:
raise ValueError("second tuple value must be greater or equal to first tuple value")
if minElements == optElements == 0:
raise ValueError("cannot multiply ParserElement by 0 or (0,0)")
if (optElements):
def makeOptionalList(n):
if n>1:
return Optional(self + makeOptionalList(n-1))
else:
return Optional(self)
if minElements:
if minElements == 1:
ret = self + makeOptionalList(optElements)
else:
ret = And([self]*minElements) + makeOptionalList(optElements)
else:
ret = makeOptionalList(optElements)
else:
if minElements == 1:
ret = self
else:
ret = And([self]*minElements)
return ret
def __rmul__(self, other):
return self.__mul__(other)
def __or__(self, other ):
"""
Implementation of | operator - returns C{L{MatchFirst}}
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return MatchFirst( [ self, other ] )
def __ror__(self, other ):
"""
Implementation of | operator when left operand is not a C{L{ParserElement}}
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other | self
def __xor__(self, other ):
"""
Implementation of ^ operator - returns C{L{Or}}
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return Or( [ self, other ] )
def __rxor__(self, other ):
"""
Implementation of ^ operator when left operand is not a C{L{ParserElement}}
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other ^ self
def __and__(self, other ):
"""
Implementation of & operator - returns C{L{Each}}
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return Each( [ self, other ] )
def __rand__(self, other ):
"""
Implementation of & operator when left operand is not a C{L{ParserElement}}
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other & self
def __invert__( self ):
"""
Implementation of ~ operator - returns C{L{NotAny}}
"""
return NotAny( self )
def __call__(self, name=None):
"""
Shortcut for C{L{setResultsName}}, with C{listAllMatches=False}.
If C{name} is given with a trailing C{'*'} character, then C{listAllMatches} will be
passed as C{True}.
If C{name} is omitted, same as calling C{L{copy}}.
Example::
# these are equivalent
userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno")
userdata = Word(alphas)("name") + Word(nums+"-")("socsecno")
"""
if name is not None:
return self.setResultsName(name)
else:
return self.copy()
def suppress( self ):
"""
Suppresses the output of this C{ParserElement}; useful to keep punctuation from
cluttering up returned output.
"""
return Suppress( self )
def leaveWhitespace( self ):
"""
Disables the skipping of whitespace before matching the characters in the
C{ParserElement}'s defined pattern. This is normally only used internally by
the pyparsing module, but may be needed in some whitespace-sensitive grammars.
"""
self.skipWhitespace = False
return self
def setWhitespaceChars( self, chars ):
"""
Overrides the default whitespace chars
"""
self.skipWhitespace = True
self.whiteChars = chars
self.copyDefaultWhiteChars = False
return self
def parseWithTabs( self ):
"""
Overrides default behavior to expand C{<TAB>}s to spaces before parsing the input string.
Must be called before C{parseString} when the input grammar contains elements that
match C{<TAB>} characters.
"""
self.keepTabs = True
return self
def ignore( self, other ):
"""
Define expression to be ignored (e.g., comments) while doing pattern
matching; may be called repeatedly, to define multiple comment or other
ignorable patterns.
Example::
patt = OneOrMore(Word(alphas))
patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj']
patt.ignore(cStyleComment)
patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj', 'lskjd']
"""
if isinstance(other, basestring):
other = Suppress(other)
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
self.ignoreExprs.append(other)
else:
self.ignoreExprs.append( Suppress( other.copy() ) )
return self
def setDebugActions( self, startAction, successAction, exceptionAction ):
"""
Enable display of debugging messages while doing pattern matching.
"""
self.debugActions = (startAction or _defaultStartDebugAction,
successAction or _defaultSuccessDebugAction,
exceptionAction or _defaultExceptionDebugAction)
self.debug = True
return self
def setDebug( self, flag=True ):
"""
Enable display of debugging messages while doing pattern matching.
Set C{flag} to True to enable, False to disable.
Example::
wd = Word(alphas).setName("alphaword")
integer = Word(nums).setName("numword")
term = wd | integer
# turn on debugging for wd
wd.setDebug()
OneOrMore(term).parseString("abc 123 xyz 890")
prints::
Match alphaword at loc 0(1,1)
Matched alphaword -> ['abc']
Match alphaword at loc 3(1,4)
Exception raised:Expected alphaword (at char 4), (line:1, col:5)
Match alphaword at loc 7(1,8)
Matched alphaword -> ['xyz']
Match alphaword at loc 11(1,12)
Exception raised:Expected alphaword (at char 12), (line:1, col:13)
Match alphaword at loc 15(1,16)
Exception raised:Expected alphaword (at char 15), (line:1, col:16)
The output shown is that produced by the default debug actions - custom debug actions can be
specified using L{setDebugActions}. Prior to attempting
to match the C{wd} expression, the debugging message C{"Match <exprname> at loc <n>(<line>,<col>)"}
is shown. Then if the parse succeeds, a C{"Matched"} message is shown, or an C{"Exception raised"}
message is shown. Also note the use of L{setName} to assign a human-readable name to the expression,
which makes debugging and exception messages easier to understand - for instance, the default
name created for the C{Word} expression without calling C{setName} is C{"W:(ABCD...)"}.
"""
if flag:
self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction )
else:
self.debug = False
return self
def __str__( self ):
return self.name
def __repr__( self ):
return _ustr(self)
def streamline( self ):
self.streamlined = True
self.strRepr = None
return self
def checkRecursion( self, parseElementList ):
pass
def validate( self, validateTrace=[] ):
"""
Check defined expressions for valid structure, check for infinite recursive definitions.
"""
self.checkRecursion( [] )
def parseFile( self, file_or_filename, parseAll=False ):
"""
Execute the parse expression on the given file or filename.
If a filename is specified (instead of a file object),
the entire file is opened, read, and closed before parsing.
"""
try:
file_contents = file_or_filename.read()
except AttributeError:
with open(file_or_filename, "r") as f:
file_contents = f.read()
try:
return self.parseString(file_contents, parseAll)
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def __eq__(self,other):
if isinstance(other, ParserElement):
return self is other or vars(self) == vars(other)
elif isinstance(other, basestring):
return self.matches(other)
else:
return super(ParserElement,self)==other
def __ne__(self,other):
return not (self == other)
def __hash__(self):
return hash(id(self))
def __req__(self,other):
return self == other
def __rne__(self,other):
return not (self == other)
def matches(self, testString, parseAll=True):
"""
Method for quick testing of a parser against a test string. Good for simple
inline microtests of sub expressions while building up larger parser.
Parameters:
- testString - to test against this expression for a match
- parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests
Example::
expr = Word(nums)
assert expr.matches("100")
"""
try:
self.parseString(_ustr(testString), parseAll=parseAll)
return True
except ParseBaseException:
return False
def runTests(self, tests, parseAll=True, comment='#', fullDump=True, printResults=True, failureTests=False):
"""
Execute the parse expression on a series of test strings, showing each
test, the parsed results or where the parse failed. Quick and easy way to
run a parse expression against a list of sample strings.
Parameters:
- tests - a list of separate test strings, or a multiline string of test strings
- parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests
- comment - (default=C{'#'}) - expression for indicating embedded comments in the test
string; pass None to disable comment filtering
- fullDump - (default=C{True}) - dump results as list followed by results names in nested outline;
if False, only dump nested list
- printResults - (default=C{True}) prints test output to stdout
- failureTests - (default=C{False}) indicates if these tests are expected to fail parsing
Returns: a (success, results) tuple, where success indicates that all tests succeeded
(or failed if C{failureTests} is True), and the results contain a list of lines of each
test's output
Example::
number_expr = pyparsing_common.number.copy()
result = number_expr.runTests('''
# unsigned integer
100
# negative integer
-100
# float with scientific notation
6.02e23
# integer with scientific notation
1e-12
''')
print("Success" if result[0] else "Failed!")
result = number_expr.runTests('''
# stray character
100Z
# missing leading digit before '.'
-.100
# too many '.'
3.14.159
''', failureTests=True)
print("Success" if result[0] else "Failed!")
prints::
# unsigned integer
100
[100]
# negative integer
-100
[-100]
# float with scientific notation
6.02e23
[6.02e+23]
# integer with scientific notation
1e-12
[1e-12]
Success
# stray character
100Z
^
FAIL: Expected end of text (at char 3), (line:1, col:4)
# missing leading digit before '.'
-.100
^
FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1)
# too many '.'
3.14.159
^
FAIL: Expected end of text (at char 4), (line:1, col:5)
Success
Each test string must be on a single line. If you want to test a string that spans multiple
lines, create a test like this::
expr.runTest(r"this is a test\\n of strings that spans \\n 3 lines")
(Note that this is a raw string literal, you must include the leading 'r'.)
"""
if isinstance(tests, basestring):
tests = list(map(str.strip, tests.rstrip().splitlines()))
if isinstance(comment, basestring):
comment = Literal(comment)
allResults = []
comments = []
success = True
for t in tests:
if comment is not None and comment.matches(t, False) or comments and not t:
comments.append(t)
continue
if not t:
continue
out = ['\n'.join(comments), t]
comments = []
try:
t = t.replace(r'\n','\n')
result = self.parseString(t, parseAll=parseAll)
out.append(result.dump(full=fullDump))
success = success and not failureTests
except ParseBaseException as pe:
fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else ""
if '\n' in t:
out.append(line(pe.loc, t))
out.append(' '*(col(pe.loc,t)-1) + '^' + fatal)
else:
out.append(' '*pe.loc + '^' + fatal)
out.append("FAIL: " + str(pe))
success = success and failureTests
result = pe
except Exception as exc:
out.append("FAIL-EXCEPTION: " + str(exc))
success = success and failureTests
result = exc
if printResults:
if fullDump:
out.append('')
print('\n'.join(out))
allResults.append((t, result))
return success, allResults
class Token(ParserElement):
"""
Abstract C{ParserElement} subclass, for defining atomic matching patterns.
"""
def __init__( self ):
super(Token,self).__init__( savelist=False )
class Empty(Token):
"""
An empty token, will always match.
"""
def __init__( self ):
super(Empty,self).__init__()
self.name = "Empty"
self.mayReturnEmpty = True
self.mayIndexError = False
class NoMatch(Token):
"""
A token that will never match.
"""
def __init__( self ):
super(NoMatch,self).__init__()
self.name = "NoMatch"
self.mayReturnEmpty = True
self.mayIndexError = False
self.errmsg = "Unmatchable token"
def parseImpl( self, instring, loc, doActions=True ):
raise ParseException(instring, loc, self.errmsg, self)
class Literal(Token):
"""
Token to exactly match a specified string.
Example::
Literal('blah').parseString('blah') # -> ['blah']
Literal('blah').parseString('blahfooblah') # -> ['blah']
Literal('blah').parseString('bla') # -> Exception: Expected "blah"
For case-insensitive matching, use L{CaselessLiteral}.
For keyword matching (force word break before and after the matched string),
use L{Keyword} or L{CaselessKeyword}.
"""
def __init__( self, matchString ):
super(Literal,self).__init__()
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn("null string passed to Literal; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.__class__ = Empty
self.name = '"%s"' % _ustr(self.match)
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
self.mayIndexError = False
# Performance tuning: this routine gets called a *lot*
# if this is a single character match string and the first character matches,
# short-circuit as quickly as possible, and avoid calling startswith
#~ @profile
def parseImpl( self, instring, loc, doActions=True ):
if (instring[loc] == self.firstMatchChar and
(self.matchLen==1 or instring.startswith(self.match,loc)) ):
return loc+self.matchLen, self.match
raise ParseException(instring, loc, self.errmsg, self)
_L = Literal
ParserElement._literalStringClass = Literal
class Keyword(Token):
"""
Token to exactly match a specified string as a keyword, that is, it must be
immediately followed by a non-keyword character. Compare with C{L{Literal}}:
- C{Literal("if")} will match the leading C{'if'} in C{'ifAndOnlyIf'}.
- C{Keyword("if")} will not; it will only match the leading C{'if'} in C{'if x=1'}, or C{'if(y==2)'}
Accepts two optional constructor arguments in addition to the keyword string:
- C{identChars} is a string of characters that would be valid identifier characters,
defaulting to all alphanumerics + "_" and "$"
- C{caseless} allows case-insensitive matching, default is C{False}.
Example::
Keyword("start").parseString("start") # -> ['start']
Keyword("start").parseString("starting") # -> Exception
For case-insensitive matching, use L{CaselessKeyword}.
"""
DEFAULT_KEYWORD_CHARS = alphanums+"_$"
def __init__( self, matchString, identChars=None, caseless=False ):
super(Keyword,self).__init__()
if identChars is None:
identChars = Keyword.DEFAULT_KEYWORD_CHARS
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn("null string passed to Keyword; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.name = '"%s"' % self.match
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
self.mayIndexError = False
self.caseless = caseless
if caseless:
self.caselessmatch = matchString.upper()
identChars = identChars.upper()
self.identChars = set(identChars)
def parseImpl( self, instring, loc, doActions=True ):
if self.caseless:
if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) and
(loc == 0 or instring[loc-1].upper() not in self.identChars) ):
return loc+self.matchLen, self.match
else:
if (instring[loc] == self.firstMatchChar and
(self.matchLen==1 or instring.startswith(self.match,loc)) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen] not in self.identChars) and
(loc == 0 or instring[loc-1] not in self.identChars) ):
return loc+self.matchLen, self.match
raise ParseException(instring, loc, self.errmsg, self)
def copy(self):
c = super(Keyword,self).copy()
c.identChars = Keyword.DEFAULT_KEYWORD_CHARS
return c
@staticmethod
def setDefaultKeywordChars( chars ):
"""Overrides the default Keyword chars
"""
Keyword.DEFAULT_KEYWORD_CHARS = chars
class CaselessLiteral(Literal):
"""
Token to match a specified string, ignoring case of letters.
Note: the matched results will always be in the case of the given
match string, NOT the case of the input text.
Example::
OneOrMore(CaselessLiteral("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD', 'CMD']
(Contrast with example for L{CaselessKeyword}.)
"""
def __init__( self, matchString ):
super(CaselessLiteral,self).__init__( matchString.upper() )
# Preserve the defining literal.
self.returnString = matchString
self.name = "'%s'" % self.returnString
self.errmsg = "Expected " + self.name
def parseImpl( self, instring, loc, doActions=True ):
if instring[ loc:loc+self.matchLen ].upper() == self.match:
return loc+self.matchLen, self.returnString
raise ParseException(instring, loc, self.errmsg, self)
class CaselessKeyword(Keyword):
"""
Caseless version of L{Keyword}.
Example::
OneOrMore(CaselessKeyword("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD']
(Contrast with example for L{CaselessLiteral}.)
"""
def __init__( self, matchString, identChars=None ):
super(CaselessKeyword,self).__init__( matchString, identChars, caseless=True )
def parseImpl( self, instring, loc, doActions=True ):
if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) ):
return loc+self.matchLen, self.match
raise ParseException(instring, loc, self.errmsg, self)
class CloseMatch(Token):
"""
A variation on L{Literal} which matches "close" matches, that is,
strings with at most 'n' mismatching characters. C{CloseMatch} takes parameters:
- C{match_string} - string to be matched
- C{maxMismatches} - (C{default=1}) maximum number of mismatches allowed to count as a match
The results from a successful parse will contain the matched text from the input string and the following named results:
- C{mismatches} - a list of the positions within the match_string where mismatches were found
- C{original} - the original match_string used to compare against the input string
If C{mismatches} is an empty list, then the match was an exact match.
Example::
patt = CloseMatch("ATCATCGAATGGA")
patt.parseString("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']})
patt.parseString("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1)
# exact match
patt.parseString("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']})
# close match allowing up to 2 mismatches
patt = CloseMatch("ATCATCGAATGGA", maxMismatches=2)
patt.parseString("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']})
"""
def __init__(self, match_string, maxMismatches=1):
super(CloseMatch,self).__init__()
self.name = match_string
self.match_string = match_string
self.maxMismatches = maxMismatches
self.errmsg = "Expected %r (with up to %d mismatches)" % (self.match_string, self.maxMismatches)
self.mayIndexError = False
self.mayReturnEmpty = False
def parseImpl( self, instring, loc, doActions=True ):
start = loc
instrlen = len(instring)
maxloc = start + len(self.match_string)
if maxloc <= instrlen:
match_string = self.match_string
match_stringloc = 0
mismatches = []
maxMismatches = self.maxMismatches
for match_stringloc,s_m in enumerate(zip(instring[loc:maxloc], self.match_string)):
src,mat = s_m
if src != mat:
mismatches.append(match_stringloc)
if len(mismatches) > maxMismatches:
break
else:
loc = match_stringloc + 1
results = ParseResults([instring[start:loc]])
results['original'] = self.match_string
results['mismatches'] = mismatches
return loc, results
raise ParseException(instring, loc, self.errmsg, self)
class Word(Token):
"""
Token for matching words composed of allowed character sets.
Defined with string containing all allowed initial characters,
an optional string containing allowed body characters (if omitted,
defaults to the initial character set), and an optional minimum,
maximum, and/or exact length. The default value for C{min} is 1 (a
minimum value < 1 is not valid); the default values for C{max} and C{exact}
are 0, meaning no maximum or exact length restriction. An optional
C{excludeChars} parameter can list characters that might be found in
the input C{bodyChars} string; useful to define a word of all printables
except for one or two characters, for instance.
L{srange} is useful for defining custom character set strings for defining
C{Word} expressions, using range notation from regular expression character sets.
A common mistake is to use C{Word} to match a specific literal string, as in
C{Word("Address")}. Remember that C{Word} uses the string argument to define
I{sets} of matchable characters. This expression would match "Add", "AAA",
"dAred", or any other word made up of the characters 'A', 'd', 'r', 'e', and 's'.
To match an exact literal string, use L{Literal} or L{Keyword}.
pyparsing includes helper strings for building Words:
- L{alphas}
- L{nums}
- L{alphanums}
- L{hexnums}
- L{alphas8bit} (alphabetic characters in ASCII range 128-255 - accented, tilded, umlauted, etc.)
- L{punc8bit} (non-alphabetic characters in ASCII range 128-255 - currency, symbols, superscripts, diacriticals, etc.)
- L{printables} (any non-whitespace character)
Example::
# a word composed of digits
integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9"))
# a word with a leading capital, and zero or more lowercase
capital_word = Word(alphas.upper(), alphas.lower())
# hostnames are alphanumeric, with leading alpha, and '-'
hostname = Word(alphas, alphanums+'-')
# roman numeral (not a strict parser, accepts invalid mix of characters)
roman = Word("IVXLCDM")
# any string of non-whitespace characters, except for ','
csv_value = Word(printables, excludeChars=",")
"""
def __init__( self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False, excludeChars=None ):
super(Word,self).__init__()
if excludeChars:
initChars = ''.join(c for c in initChars if c not in excludeChars)
if bodyChars:
bodyChars = ''.join(c for c in bodyChars if c not in excludeChars)
self.initCharsOrig = initChars
self.initChars = set(initChars)
if bodyChars :
self.bodyCharsOrig = bodyChars
self.bodyChars = set(bodyChars)
else:
self.bodyCharsOrig = initChars
self.bodyChars = set(initChars)
self.maxSpecified = max > 0
if min < 1:
raise ValueError("cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted")
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.asKeyword = asKeyword
if ' ' not in self.initCharsOrig+self.bodyCharsOrig and (min==1 and max==0 and exact==0):
if self.bodyCharsOrig == self.initCharsOrig:
self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig)
elif len(self.initCharsOrig) == 1:
self.reString = "%s[%s]*" % \
(re.escape(self.initCharsOrig),
_escapeRegexRangeChars(self.bodyCharsOrig),)
else:
self.reString = "[%s][%s]*" % \
(_escapeRegexRangeChars(self.initCharsOrig),
_escapeRegexRangeChars(self.bodyCharsOrig),)
if self.asKeyword:
self.reString = r"\b"+self.reString+r"\b"
try:
self.re = re.compile( self.reString )
except Exception:
self.re = None
def parseImpl( self, instring, loc, doActions=True ):
if self.re:
result = self.re.match(instring,loc)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
return loc, result.group()
if not(instring[ loc ] in self.initChars):
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
instrlen = len(instring)
bodychars = self.bodyChars
maxloc = start + self.maxLen
maxloc = min( maxloc, instrlen )
while loc < maxloc and instring[loc] in bodychars:
loc += 1
throwException = False
if loc - start < self.minLen:
throwException = True
if self.maxSpecified and loc < instrlen and instring[loc] in bodychars:
throwException = True
if self.asKeyword:
if (start>0 and instring[start-1] in bodychars) or (loc<instrlen and instring[loc] in bodychars):
throwException = True
if throwException:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
def __str__( self ):
try:
return super(Word,self).__str__()
except Exception:
pass
if self.strRepr is None:
def charsAsStr(s):
if len(s)>4:
return s[:4]+"..."
else:
return s
if ( self.initCharsOrig != self.bodyCharsOrig ):
self.strRepr = "W:(%s,%s)" % ( charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig) )
else:
self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig)
return self.strRepr
class Regex(Token):
"""
Token for matching strings that match a given regular expression.
Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module.
If the given regex contains named groups (defined using C{(?P<name>...)}), these will be preserved as
named parse results.
Example::
realnum = Regex(r"[+-]?\d+\.\d*")
date = Regex(r'(?P<year>\d{4})-(?P<month>\d\d?)-(?P<day>\d\d?)')
# ref: http://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression
roman = Regex(r"M{0,4}(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})")
"""
compiledREtype = type(re.compile("[A-Z]"))
def __init__( self, pattern, flags=0):
"""The parameters C{pattern} and C{flags} are passed to the C{re.compile()} function as-is. See the Python C{re} module for an explanation of the acceptable patterns and flags."""
super(Regex,self).__init__()
if isinstance(pattern, basestring):
if not pattern:
warnings.warn("null string passed to Regex; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.pattern = pattern
self.flags = flags
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
except sre_constants.error:
warnings.warn("invalid pattern (%s) passed to Regex" % pattern,
SyntaxWarning, stacklevel=2)
raise
elif isinstance(pattern, Regex.compiledREtype):
self.re = pattern
self.pattern = \
self.reString = str(pattern)
self.flags = flags
else:
raise ValueError("Regex may only be constructed with a string or a compiled RE object")
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
result = self.re.match(instring,loc)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
d = result.groupdict()
ret = ParseResults(result.group())
if d:
for k in d:
ret[k] = d[k]
return loc,ret
def __str__( self ):
try:
return super(Regex,self).__str__()
except Exception:
pass
if self.strRepr is None:
self.strRepr = "Re:(%s)" % repr(self.pattern)
return self.strRepr
class QuotedString(Token):
r"""
Token for matching strings that are delimited by quoting characters.
Defined with the following parameters:
- quoteChar - string of one or more characters defining the quote delimiting string
- escChar - character to escape quotes, typically backslash (default=C{None})
- escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=C{None})
- multiline - boolean indicating whether quotes can span multiple lines (default=C{False})
- unquoteResults - boolean indicating whether the matched text should be unquoted (default=C{True})
- endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=C{None} => same as quoteChar)
- convertWhitespaceEscapes - convert escaped whitespace (C{'\t'}, C{'\n'}, etc.) to actual whitespace (default=C{True})
Example::
qs = QuotedString('"')
print(qs.searchString('lsjdf "This is the quote" sldjf'))
complex_qs = QuotedString('{{', endQuoteChar='}}')
print(complex_qs.searchString('lsjdf {{This is the "quote"}} sldjf'))
sql_qs = QuotedString('"', escQuote='""')
print(sql_qs.searchString('lsjdf "This is the quote with ""embedded"" quotes" sldjf'))
prints::
[['This is the quote']]
[['This is the "quote"']]
[['This is the quote with "embedded" quotes']]
"""
def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None, convertWhitespaceEscapes=True):
super(QuotedString,self).__init__()
# remove white space from quote chars - wont work anyway
quoteChar = quoteChar.strip()
if not quoteChar:
warnings.warn("quoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
raise SyntaxError()
if endQuoteChar is None:
endQuoteChar = quoteChar
else:
endQuoteChar = endQuoteChar.strip()
if not endQuoteChar:
warnings.warn("endQuoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
raise SyntaxError()
self.quoteChar = quoteChar
self.quoteCharLen = len(quoteChar)
self.firstQuoteChar = quoteChar[0]
self.endQuoteChar = endQuoteChar
self.endQuoteCharLen = len(endQuoteChar)
self.escChar = escChar
self.escQuote = escQuote
self.unquoteResults = unquoteResults
self.convertWhitespaceEscapes = convertWhitespaceEscapes
if multiline:
self.flags = re.MULTILINE | re.DOTALL
self.pattern = r'%s(?:[^%s%s]' % \
( re.escape(self.quoteChar),
_escapeRegexRangeChars(self.endQuoteChar[0]),
(escChar is not None and _escapeRegexRangeChars(escChar) or '') )
else:
self.flags = 0
self.pattern = r'%s(?:[^%s\n\r%s]' % \
( re.escape(self.quoteChar),
_escapeRegexRangeChars(self.endQuoteChar[0]),
(escChar is not None and _escapeRegexRangeChars(escChar) or '') )
if len(self.endQuoteChar) > 1:
self.pattern += (
'|(?:' + ')|(?:'.join("%s[^%s]" % (re.escape(self.endQuoteChar[:i]),
_escapeRegexRangeChars(self.endQuoteChar[i]))
for i in range(len(self.endQuoteChar)-1,0,-1)) + ')'
)
if escQuote:
self.pattern += (r'|(?:%s)' % re.escape(escQuote))
if escChar:
self.pattern += (r'|(?:%s.)' % re.escape(escChar))
self.escCharReplacePattern = re.escape(self.escChar)+"(.)"
self.pattern += (r')*%s' % re.escape(self.endQuoteChar))
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
except sre_constants.error:
warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern,
SyntaxWarning, stacklevel=2)
raise
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
result = instring[loc] == self.firstQuoteChar and self.re.match(instring,loc) or None
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
ret = result.group()
if self.unquoteResults:
# strip off quotes
ret = ret[self.quoteCharLen:-self.endQuoteCharLen]
if isinstance(ret,basestring):
# replace escaped whitespace
if '\\' in ret and self.convertWhitespaceEscapes:
ws_map = {
r'\t' : '\t',
r'\n' : '\n',
r'\f' : '\f',
r'\r' : '\r',
}
for wslit,wschar in ws_map.items():
ret = ret.replace(wslit, wschar)
# replace escaped characters
if self.escChar:
ret = re.sub(self.escCharReplacePattern,"\g<1>",ret)
# replace escaped quotes
if self.escQuote:
ret = ret.replace(self.escQuote, self.endQuoteChar)
return loc, ret
def __str__( self ):
try:
return super(QuotedString,self).__str__()
except Exception:
pass
if self.strRepr is None:
self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar)
return self.strRepr
class CharsNotIn(Token):
"""
Token for matching words composed of characters I{not} in a given set (will
include whitespace in matched characters if not listed in the provided exclusion set - see example).
Defined with string containing all disallowed characters, and an optional
minimum, maximum, and/or exact length. The default value for C{min} is 1 (a
minimum value < 1 is not valid); the default values for C{max} and C{exact}
are 0, meaning no maximum or exact length restriction.
Example::
# define a comma-separated-value as anything that is not a ','
csv_value = CharsNotIn(',')
print(delimitedList(csv_value).parseString("dkls,lsdkjf,s12 34,@!#,213"))
prints::
['dkls', 'lsdkjf', 's12 34', '@!#', '213']
"""
def __init__( self, notChars, min=1, max=0, exact=0 ):
super(CharsNotIn,self).__init__()
self.skipWhitespace = False
self.notChars = notChars
if min < 1:
raise ValueError("cannot specify a minimum length < 1; use Optional(CharsNotIn()) if zero-length char group is permitted")
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = ( self.minLen == 0 )
self.mayIndexError = False
def parseImpl( self, instring, loc, doActions=True ):
if instring[loc] in self.notChars:
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
notchars = self.notChars
maxlen = min( start+self.maxLen, len(instring) )
while loc < maxlen and \
(instring[loc] not in notchars):
loc += 1
if loc - start < self.minLen:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
def __str__( self ):
try:
return super(CharsNotIn, self).__str__()
except Exception:
pass
if self.strRepr is None:
if len(self.notChars) > 4:
self.strRepr = "!W:(%s...)" % self.notChars[:4]
else:
self.strRepr = "!W:(%s)" % self.notChars
return self.strRepr
class White(Token):
"""
Special matching class for matching whitespace. Normally, whitespace is ignored
by pyparsing grammars. This class is included when some whitespace structures
are significant. Define with a string containing the whitespace characters to be
matched; default is C{" \\t\\r\\n"}. Also takes optional C{min}, C{max}, and C{exact} arguments,
as defined for the C{L{Word}} class.
"""
whiteStrs = {
" " : "<SPC>",
"\t": "<TAB>",
"\n": "<LF>",
"\r": "<CR>",
"\f": "<FF>",
}
def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0):
super(White,self).__init__()
self.matchWhite = ws
self.setWhitespaceChars( "".join(c for c in self.whiteChars if c not in self.matchWhite) )
#~ self.leaveWhitespace()
self.name = ("".join(White.whiteStrs[c] for c in self.matchWhite))
self.mayReturnEmpty = True
self.errmsg = "Expected " + self.name
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
def parseImpl( self, instring, loc, doActions=True ):
if not(instring[ loc ] in self.matchWhite):
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
maxloc = start + self.maxLen
maxloc = min( maxloc, len(instring) )
while loc < maxloc and instring[loc] in self.matchWhite:
loc += 1
if loc - start < self.minLen:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
class _PositionToken(Token):
def __init__( self ):
super(_PositionToken,self).__init__()
self.name=self.__class__.__name__
self.mayReturnEmpty = True
self.mayIndexError = False
class GoToColumn(_PositionToken):
"""
Token to advance to a specific column of input text; useful for tabular report scraping.
"""
def __init__( self, colno ):
super(GoToColumn,self).__init__()
self.col = colno
def preParse( self, instring, loc ):
if col(loc,instring) != self.col:
instrlen = len(instring)
if self.ignoreExprs:
loc = self._skipIgnorables( instring, loc )
while loc < instrlen and instring[loc].isspace() and col( loc, instring ) != self.col :
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
thiscol = col( loc, instring )
if thiscol > self.col:
raise ParseException( instring, loc, "Text not in expected column", self )
newloc = loc + self.col - thiscol
ret = instring[ loc: newloc ]
return newloc, ret
class LineStart(_PositionToken):
"""
Matches if current position is at the beginning of a line within the parse string
Example::
test = '''\
AAA this line
AAA and this line
AAA but not this one
B AAA and definitely not this one
'''
for t in (LineStart() + 'AAA' + restOfLine).searchString(test):
print(t)
Prints::
['AAA', ' this line']
['AAA', ' and this line']
"""
def __init__( self ):
super(LineStart,self).__init__()
self.errmsg = "Expected start of line"
def parseImpl( self, instring, loc, doActions=True ):
if col(loc, instring) == 1:
return loc, []
raise ParseException(instring, loc, self.errmsg, self)
class LineEnd(_PositionToken):
"""
Matches if current position is at the end of a line within the parse string
"""
def __init__( self ):
super(LineEnd,self).__init__()
self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") )
self.errmsg = "Expected end of line"
def parseImpl( self, instring, loc, doActions=True ):
if loc<len(instring):
if instring[loc] == "\n":
return loc+1, "\n"
else:
raise ParseException(instring, loc, self.errmsg, self)
elif loc == len(instring):
return loc+1, []
else:
raise ParseException(instring, loc, self.errmsg, self)
class StringStart(_PositionToken):
"""
Matches if current position is at the beginning of the parse string
"""
def __init__( self ):
super(StringStart,self).__init__()
self.errmsg = "Expected start of text"
def parseImpl( self, instring, loc, doActions=True ):
if loc != 0:
# see if entire string up to here is just whitespace and ignoreables
if loc != self.preParse( instring, 0 ):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class StringEnd(_PositionToken):
"""
Matches if current position is at the end of the parse string
"""
def __init__( self ):
super(StringEnd,self).__init__()
self.errmsg = "Expected end of text"
def parseImpl( self, instring, loc, doActions=True ):
if loc < len(instring):
raise ParseException(instring, loc, self.errmsg, self)
elif loc == len(instring):
return loc+1, []
elif loc > len(instring):
return loc, []
else:
raise ParseException(instring, loc, self.errmsg, self)
class WordStart(_PositionToken):
"""
Matches if the current position is at the beginning of a Word, and
is not preceded by any character in a given set of C{wordChars}
(default=C{printables}). To emulate the C{\b} behavior of regular expressions,
use C{WordStart(alphanums)}. C{WordStart} will also match at the beginning of
the string being parsed, or at the beginning of a line.
"""
def __init__(self, wordChars = printables):
super(WordStart,self).__init__()
self.wordChars = set(wordChars)
self.errmsg = "Not at the start of a word"
def parseImpl(self, instring, loc, doActions=True ):
if loc != 0:
if (instring[loc-1] in self.wordChars or
instring[loc] not in self.wordChars):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class WordEnd(_PositionToken):
"""
Matches if the current position is at the end of a Word, and
is not followed by any character in a given set of C{wordChars}
(default=C{printables}). To emulate the C{\b} behavior of regular expressions,
use C{WordEnd(alphanums)}. C{WordEnd} will also match at the end of
the string being parsed, or at the end of a line.
"""
def __init__(self, wordChars = printables):
super(WordEnd,self).__init__()
self.wordChars = set(wordChars)
self.skipWhitespace = False
self.errmsg = "Not at the end of a word"
def parseImpl(self, instring, loc, doActions=True ):
instrlen = len(instring)
if instrlen>0 and loc<instrlen:
if (instring[loc] in self.wordChars or
instring[loc-1] not in self.wordChars):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class ParseExpression(ParserElement):
"""
Abstract subclass of ParserElement, for combining and post-processing parsed tokens.
"""
def __init__( self, exprs, savelist = False ):
super(ParseExpression,self).__init__(savelist)
if isinstance( exprs, _generatorType ):
exprs = list(exprs)
if isinstance( exprs, basestring ):
self.exprs = [ ParserElement._literalStringClass( exprs ) ]
elif isinstance( exprs, collections.Iterable ):
exprs = list(exprs)
# if sequence of strings provided, wrap with Literal
if all(isinstance(expr, basestring) for expr in exprs):
exprs = map(ParserElement._literalStringClass, exprs)
self.exprs = list(exprs)
else:
try:
self.exprs = list( exprs )
except TypeError:
self.exprs = [ exprs ]
self.callPreparse = False
def __getitem__( self, i ):
return self.exprs[i]
def append( self, other ):
self.exprs.append( other )
self.strRepr = None
return self
def leaveWhitespace( self ):
"""Extends C{leaveWhitespace} defined in base class, and also invokes C{leaveWhitespace} on
all contained expressions."""
self.skipWhitespace = False
self.exprs = [ e.copy() for e in self.exprs ]
for e in self.exprs:
e.leaveWhitespace()
return self
def ignore( self, other ):
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
super( ParseExpression, self).ignore( other )
for e in self.exprs:
e.ignore( self.ignoreExprs[-1] )
else:
super( ParseExpression, self).ignore( other )
for e in self.exprs:
e.ignore( self.ignoreExprs[-1] )
return self
def __str__( self ):
try:
return super(ParseExpression,self).__str__()
except Exception:
pass
if self.strRepr is None:
self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.exprs) )
return self.strRepr
def streamline( self ):
super(ParseExpression,self).streamline()
for e in self.exprs:
e.streamline()
# collapse nested And's of the form And( And( And( a,b), c), d) to And( a,b,c,d )
# but only if there are no parse actions or resultsNames on the nested And's
# (likewise for Or's and MatchFirst's)
if ( len(self.exprs) == 2 ):
other = self.exprs[0]
if ( isinstance( other, self.__class__ ) and
not(other.parseAction) and
other.resultsName is None and
not other.debug ):
self.exprs = other.exprs[:] + [ self.exprs[1] ]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
other = self.exprs[-1]
if ( isinstance( other, self.__class__ ) and
not(other.parseAction) and
other.resultsName is None and
not other.debug ):
self.exprs = self.exprs[:-1] + other.exprs[:]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
self.errmsg = "Expected " + _ustr(self)
return self
def setResultsName( self, name, listAllMatches=False ):
ret = super(ParseExpression,self).setResultsName(name,listAllMatches)
return ret
def validate( self, validateTrace=[] ):
tmp = validateTrace[:]+[self]
for e in self.exprs:
e.validate(tmp)
self.checkRecursion( [] )
def copy(self):
ret = super(ParseExpression,self).copy()
ret.exprs = [e.copy() for e in self.exprs]
return ret
class And(ParseExpression):
"""
Requires all given C{ParseExpression}s to be found in the given order.
Expressions may be separated by whitespace.
May be constructed using the C{'+'} operator.
May also be constructed using the C{'-'} operator, which will suppress backtracking.
Example::
integer = Word(nums)
name_expr = OneOrMore(Word(alphas))
expr = And([integer("id"),name_expr("name"),integer("age")])
# more easily written as:
expr = integer("id") + name_expr("name") + integer("age")
"""
class _ErrorStop(Empty):
def __init__(self, *args, **kwargs):
super(And._ErrorStop,self).__init__(*args, **kwargs)
self.name = '-'
self.leaveWhitespace()
def __init__( self, exprs, savelist = True ):
super(And,self).__init__(exprs, savelist)
self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
self.setWhitespaceChars( self.exprs[0].whiteChars )
self.skipWhitespace = self.exprs[0].skipWhitespace
self.callPreparse = True
def parseImpl( self, instring, loc, doActions=True ):
# pass False as last arg to _parse for first element, since we already
# pre-parsed the string as part of our And pre-parsing
loc, resultlist = self.exprs[0]._parse( instring, loc, doActions, callPreParse=False )
errorStop = False
for e in self.exprs[1:]:
if isinstance(e, And._ErrorStop):
errorStop = True
continue
if errorStop:
try:
loc, exprtokens = e._parse( instring, loc, doActions )
except ParseSyntaxException:
raise
except ParseBaseException as pe:
pe.__traceback__ = None
raise ParseSyntaxException._from_exception(pe)
except IndexError:
raise ParseSyntaxException(instring, len(instring), self.errmsg, self)
else:
loc, exprtokens = e._parse( instring, loc, doActions )
if exprtokens or exprtokens.haskeys():
resultlist += exprtokens
return loc, resultlist
def __iadd__(self, other ):
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
return self.append( other ) #And( [ self, other ] )
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
if not e.mayReturnEmpty:
break
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
class Or(ParseExpression):
"""
Requires that at least one C{ParseExpression} is found.
If two expressions match, the expression that matches the longest string will be used.
May be constructed using the C{'^'} operator.
Example::
# construct Or using '^' operator
number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums))
print(number.searchString("123 3.1416 789"))
prints::
[['123'], ['3.1416'], ['789']]
"""
def __init__( self, exprs, savelist = False ):
super(Or,self).__init__(exprs, savelist)
if self.exprs:
self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
else:
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
maxExcLoc = -1
maxException = None
matches = []
for e in self.exprs:
try:
loc2 = e.tryParse( instring, loc )
except ParseException as err:
err.__traceback__ = None
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(instring,len(instring),e.errmsg,self)
maxExcLoc = len(instring)
else:
# save match among all matches, to retry longest to shortest
matches.append((loc2, e))
if matches:
matches.sort(key=lambda x: -x[0])
for _,e in matches:
try:
return e._parse( instring, loc, doActions )
except ParseException as err:
err.__traceback__ = None
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
if maxException is not None:
maxException.msg = self.errmsg
raise maxException
else:
raise ParseException(instring, loc, "no defined alternatives to match", self)
def __ixor__(self, other ):
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
return self.append( other ) #Or( [ self, other ] )
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " ^ ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class MatchFirst(ParseExpression):
"""
Requires that at least one C{ParseExpression} is found.
If two expressions match, the first one listed is the one that will match.
May be constructed using the C{'|'} operator.
Example::
# construct MatchFirst using '|' operator
# watch the order of expressions to match
number = Word(nums) | Combine(Word(nums) + '.' + Word(nums))
print(number.searchString("123 3.1416 789")) # Fail! -> [['123'], ['3'], ['1416'], ['789']]
# put more selective expression first
number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums)
print(number.searchString("123 3.1416 789")) # Better -> [['123'], ['3.1416'], ['789']]
"""
def __init__( self, exprs, savelist = False ):
super(MatchFirst,self).__init__(exprs, savelist)
if self.exprs:
self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
else:
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
maxExcLoc = -1
maxException = None
for e in self.exprs:
try:
ret = e._parse( instring, loc, doActions )
return ret
except ParseException as err:
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(instring,len(instring),e.errmsg,self)
maxExcLoc = len(instring)
# only got here if no expression matched, raise exception for match that made it the furthest
else:
if maxException is not None:
maxException.msg = self.errmsg
raise maxException
else:
raise ParseException(instring, loc, "no defined alternatives to match", self)
def __ior__(self, other ):
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
return self.append( other ) #MatchFirst( [ self, other ] )
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " | ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class Each(ParseExpression):
"""
Requires all given C{ParseExpression}s to be found, but in any order.
Expressions may be separated by whitespace.
May be constructed using the C{'&'} operator.
Example::
color = oneOf("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN")
shape_type = oneOf("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON")
integer = Word(nums)
shape_attr = "shape:" + shape_type("shape")
posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn")
color_attr = "color:" + color("color")
size_attr = "size:" + integer("size")
# use Each (using operator '&') to accept attributes in any order
# (shape and posn are required, color and size are optional)
shape_spec = shape_attr & posn_attr & Optional(color_attr) & Optional(size_attr)
shape_spec.runTests('''
shape: SQUARE color: BLACK posn: 100, 120
shape: CIRCLE size: 50 color: BLUE posn: 50,80
color:GREEN size:20 shape:TRIANGLE posn:20,40
'''
)
prints::
shape: SQUARE color: BLACK posn: 100, 120
['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']]
- color: BLACK
- posn: ['100', ',', '120']
- x: 100
- y: 120
- shape: SQUARE
shape: CIRCLE size: 50 color: BLUE posn: 50,80
['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']]
- color: BLUE
- posn: ['50', ',', '80']
- x: 50
- y: 80
- shape: CIRCLE
- size: 50
color: GREEN size: 20 shape: TRIANGLE posn: 20,40
['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']]
- color: GREEN
- posn: ['20', ',', '40']
- x: 20
- y: 40
- shape: TRIANGLE
- size: 20
"""
def __init__( self, exprs, savelist = True ):
super(Each,self).__init__(exprs, savelist)
self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
self.skipWhitespace = True
self.initExprGroups = True
def parseImpl( self, instring, loc, doActions=True ):
if self.initExprGroups:
self.opt1map = dict((id(e.expr),e) for e in self.exprs if isinstance(e,Optional))
opt1 = [ e.expr for e in self.exprs if isinstance(e,Optional) ]
opt2 = [ e for e in self.exprs if e.mayReturnEmpty and not isinstance(e,Optional)]
self.optionals = opt1 + opt2
self.multioptionals = [ e.expr for e in self.exprs if isinstance(e,ZeroOrMore) ]
self.multirequired = [ e.expr for e in self.exprs if isinstance(e,OneOrMore) ]
self.required = [ e for e in self.exprs if not isinstance(e,(Optional,ZeroOrMore,OneOrMore)) ]
self.required += self.multirequired
self.initExprGroups = False
tmpLoc = loc
tmpReqd = self.required[:]
tmpOpt = self.optionals[:]
matchOrder = []
keepMatching = True
while keepMatching:
tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired
failed = []
for e in tmpExprs:
try:
tmpLoc = e.tryParse( instring, tmpLoc )
except ParseException:
failed.append(e)
else:
matchOrder.append(self.opt1map.get(id(e),e))
if e in tmpReqd:
tmpReqd.remove(e)
elif e in tmpOpt:
tmpOpt.remove(e)
if len(failed) == len(tmpExprs):
keepMatching = False
if tmpReqd:
missing = ", ".join(_ustr(e) for e in tmpReqd)
raise ParseException(instring,loc,"Missing one or more required elements (%s)" % missing )
# add any unmatched Optionals, in case they have default values defined
matchOrder += [e for e in self.exprs if isinstance(e,Optional) and e.expr in tmpOpt]
resultlist = []
for e in matchOrder:
loc,results = e._parse(instring,loc,doActions)
resultlist.append(results)
finalResults = sum(resultlist, ParseResults([]))
return loc, finalResults
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " & ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class ParseElementEnhance(ParserElement):
"""
Abstract subclass of C{ParserElement}, for combining and post-processing parsed tokens.
"""
def __init__( self, expr, savelist=False ):
super(ParseElementEnhance,self).__init__(savelist)
if isinstance( expr, basestring ):
if issubclass(ParserElement._literalStringClass, Token):
expr = ParserElement._literalStringClass(expr)
else:
expr = ParserElement._literalStringClass(Literal(expr))
self.expr = expr
self.strRepr = None
if expr is not None:
self.mayIndexError = expr.mayIndexError
self.mayReturnEmpty = expr.mayReturnEmpty
self.setWhitespaceChars( expr.whiteChars )
self.skipWhitespace = expr.skipWhitespace
self.saveAsList = expr.saveAsList
self.callPreparse = expr.callPreparse
self.ignoreExprs.extend(expr.ignoreExprs)
def parseImpl( self, instring, loc, doActions=True ):
if self.expr is not None:
return self.expr._parse( instring, loc, doActions, callPreParse=False )
else:
raise ParseException("",loc,self.errmsg,self)
def leaveWhitespace( self ):
self.skipWhitespace = False
self.expr = self.expr.copy()
if self.expr is not None:
self.expr.leaveWhitespace()
return self
def ignore( self, other ):
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
super( ParseElementEnhance, self).ignore( other )
if self.expr is not None:
self.expr.ignore( self.ignoreExprs[-1] )
else:
super( ParseElementEnhance, self).ignore( other )
if self.expr is not None:
self.expr.ignore( self.ignoreExprs[-1] )
return self
def streamline( self ):
super(ParseElementEnhance,self).streamline()
if self.expr is not None:
self.expr.streamline()
return self
def checkRecursion( self, parseElementList ):
if self in parseElementList:
raise RecursiveGrammarException( parseElementList+[self] )
subRecCheckList = parseElementList[:] + [ self ]
if self.expr is not None:
self.expr.checkRecursion( subRecCheckList )
def validate( self, validateTrace=[] ):
tmp = validateTrace[:]+[self]
if self.expr is not None:
self.expr.validate(tmp)
self.checkRecursion( [] )
def __str__( self ):
try:
return super(ParseElementEnhance,self).__str__()
except Exception:
pass
if self.strRepr is None and self.expr is not None:
self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.expr) )
return self.strRepr
class FollowedBy(ParseElementEnhance):
"""
Lookahead matching of the given parse expression. C{FollowedBy}
does I{not} advance the parsing position within the input string, it only
verifies that the specified parse expression matches at the current
position. C{FollowedBy} always returns a null token list.
Example::
# use FollowedBy to match a label only if it is followed by a ':'
data_word = Word(alphas)
label = data_word + FollowedBy(':')
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
OneOrMore(attr_expr).parseString("shape: SQUARE color: BLACK posn: upper left").pprint()
prints::
[['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']]
"""
def __init__( self, expr ):
super(FollowedBy,self).__init__(expr)
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
self.expr.tryParse( instring, loc )
return loc, []
class NotAny(ParseElementEnhance):
"""
Lookahead to disallow matching with the given parse expression. C{NotAny}
does I{not} advance the parsing position within the input string, it only
verifies that the specified parse expression does I{not} match at the current
position. Also, C{NotAny} does I{not} skip over leading whitespace. C{NotAny}
always returns a null token list. May be constructed using the '~' operator.
Example::
"""
def __init__( self, expr ):
super(NotAny,self).__init__(expr)
#~ self.leaveWhitespace()
self.skipWhitespace = False # do NOT use self.leaveWhitespace(), don't want to propagate to exprs
self.mayReturnEmpty = True
self.errmsg = "Found unwanted token, "+_ustr(self.expr)
def parseImpl( self, instring, loc, doActions=True ):
if self.expr.canParseNext(instring, loc):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "~{" + _ustr(self.expr) + "}"
return self.strRepr
class _MultipleMatch(ParseElementEnhance):
def __init__( self, expr, stopOn=None):
super(_MultipleMatch, self).__init__(expr)
self.saveAsList = True
ender = stopOn
if isinstance(ender, basestring):
ender = ParserElement._literalStringClass(ender)
self.not_ender = ~ender if ender is not None else None
def parseImpl( self, instring, loc, doActions=True ):
self_expr_parse = self.expr._parse
self_skip_ignorables = self._skipIgnorables
check_ender = self.not_ender is not None
if check_ender:
try_not_ender = self.not_ender.tryParse
# must be at least one (but first see if we are the stopOn sentinel;
# if so, fail)
if check_ender:
try_not_ender(instring, loc)
loc, tokens = self_expr_parse( instring, loc, doActions, callPreParse=False )
try:
hasIgnoreExprs = (not not self.ignoreExprs)
while 1:
if check_ender:
try_not_ender(instring, loc)
if hasIgnoreExprs:
preloc = self_skip_ignorables( instring, loc )
else:
preloc = loc
loc, tmptokens = self_expr_parse( instring, preloc, doActions )
if tmptokens or tmptokens.haskeys():
tokens += tmptokens
except (ParseException,IndexError):
pass
return loc, tokens
class OneOrMore(_MultipleMatch):
"""
Repetition of one or more of the given expression.
Parameters:
- expr - expression that must match one or more times
- stopOn - (default=C{None}) - expression for a terminating sentinel
(only required if the sentinel would ordinarily match the repetition
expression)
Example::
data_word = Word(alphas)
label = data_word + FollowedBy(':')
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))
text = "shape: SQUARE posn: upper left color: BLACK"
OneOrMore(attr_expr).parseString(text).pprint() # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']]
# use stopOn attribute for OneOrMore to avoid reading label string as part of the data
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
OneOrMore(attr_expr).parseString(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']]
# could also be written as
(attr_expr * (1,)).parseString(text).pprint()
"""
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + _ustr(self.expr) + "}..."
return self.strRepr
class ZeroOrMore(_MultipleMatch):
"""
Optional repetition of zero or more of the given expression.
Parameters:
- expr - expression that must match zero or more times
- stopOn - (default=C{None}) - expression for a terminating sentinel
(only required if the sentinel would ordinarily match the repetition
expression)
Example: similar to L{OneOrMore}
"""
def __init__( self, expr, stopOn=None):
super(ZeroOrMore,self).__init__(expr, stopOn=stopOn)
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
try:
return super(ZeroOrMore, self).parseImpl(instring, loc, doActions)
except (ParseException,IndexError):
return loc, []
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "[" + _ustr(self.expr) + "]..."
return self.strRepr
class _NullToken(object):
def __bool__(self):
return False
__nonzero__ = __bool__
def __str__(self):
return ""
_optionalNotMatched = _NullToken()
class Optional(ParseElementEnhance):
"""
Optional matching of the given expression.
Parameters:
- expr - expression that must match zero or more times
- default (optional) - value to be returned if the optional expression is not found.
Example::
# US postal code can be a 5-digit zip, plus optional 4-digit qualifier
zip = Combine(Word(nums, exact=5) + Optional('-' + Word(nums, exact=4)))
zip.runTests('''
# traditional ZIP code
12345
# ZIP+4 form
12101-0001
# invalid ZIP
98765-
''')
prints::
# traditional ZIP code
12345
['12345']
# ZIP+4 form
12101-0001
['12101-0001']
# invalid ZIP
98765-
^
FAIL: Expected end of text (at char 5), (line:1, col:6)
"""
def __init__( self, expr, default=_optionalNotMatched ):
super(Optional,self).__init__( expr, savelist=False )
self.saveAsList = self.expr.saveAsList
self.defaultValue = default
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
try:
loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
except (ParseException,IndexError):
if self.defaultValue is not _optionalNotMatched:
if self.expr.resultsName:
tokens = ParseResults([ self.defaultValue ])
tokens[self.expr.resultsName] = self.defaultValue
else:
tokens = [ self.defaultValue ]
else:
tokens = []
return loc, tokens
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "[" + _ustr(self.expr) + "]"
return self.strRepr
class SkipTo(ParseElementEnhance):
"""
Token for skipping over all undefined text until the matched expression is found.
Parameters:
- expr - target expression marking the end of the data to be skipped
- include - (default=C{False}) if True, the target expression is also parsed
(the skipped text and target expression are returned as a 2-element list).
- ignore - (default=C{None}) used to define grammars (typically quoted strings and
comments) that might contain false matches to the target expression
- failOn - (default=C{None}) define expressions that are not allowed to be
included in the skipped test; if found before the target expression is found,
the SkipTo is not a match
Example::
report = '''
Outstanding Issues Report - 1 Jan 2000
# | Severity | Description | Days Open
-----+----------+-------------------------------------------+-----------
101 | Critical | Intermittent system crash | 6
94 | Cosmetic | Spelling error on Login ('log|n') | 14
79 | Minor | System slow when running too many reports | 47
'''
integer = Word(nums)
SEP = Suppress('|')
# use SkipTo to simply match everything up until the next SEP
# - ignore quoted strings, so that a '|' character inside a quoted string does not match
# - parse action will call token.strip() for each matched token, i.e., the description body
string_data = SkipTo(SEP, ignore=quotedString)
string_data.setParseAction(tokenMap(str.strip))
ticket_expr = (integer("issue_num") + SEP
+ string_data("sev") + SEP
+ string_data("desc") + SEP
+ integer("days_open"))
for tkt in ticket_expr.searchString(report):
print tkt.dump()
prints::
['101', 'Critical', 'Intermittent system crash', '6']
- days_open: 6
- desc: Intermittent system crash
- issue_num: 101
- sev: Critical
['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14']
- days_open: 14
- desc: Spelling error on Login ('log|n')
- issue_num: 94
- sev: Cosmetic
['79', 'Minor', 'System slow when running too many reports', '47']
- days_open: 47
- desc: System slow when running too many reports
- issue_num: 79
- sev: Minor
"""
def __init__( self, other, include=False, ignore=None, failOn=None ):
super( SkipTo, self ).__init__( other )
self.ignoreExpr = ignore
self.mayReturnEmpty = True
self.mayIndexError = False
self.includeMatch = include
self.asList = False
if isinstance(failOn, basestring):
self.failOn = ParserElement._literalStringClass(failOn)
else:
self.failOn = failOn
self.errmsg = "No match found for "+_ustr(self.expr)
def parseImpl( self, instring, loc, doActions=True ):
startloc = loc
instrlen = len(instring)
expr = self.expr
expr_parse = self.expr._parse
self_failOn_canParseNext = self.failOn.canParseNext if self.failOn is not None else None
self_ignoreExpr_tryParse = self.ignoreExpr.tryParse if self.ignoreExpr is not None else None
tmploc = loc
while tmploc <= instrlen:
if self_failOn_canParseNext is not None:
# break if failOn expression matches
if self_failOn_canParseNext(instring, tmploc):
break
if self_ignoreExpr_tryParse is not None:
# advance past ignore expressions
while 1:
try:
tmploc = self_ignoreExpr_tryParse(instring, tmploc)
except ParseBaseException:
break
try:
expr_parse(instring, tmploc, doActions=False, callPreParse=False)
except (ParseException, IndexError):
# no match, advance loc in string
tmploc += 1
else:
# matched skipto expr, done
break
else:
# ran off the end of the input string without matching skipto expr, fail
raise ParseException(instring, loc, self.errmsg, self)
# build up return values
loc = tmploc
skiptext = instring[startloc:loc]
skipresult = ParseResults(skiptext)
if self.includeMatch:
loc, mat = expr_parse(instring,loc,doActions,callPreParse=False)
skipresult += mat
return loc, skipresult
class Forward(ParseElementEnhance):
"""
Forward declaration of an expression to be defined later -
used for recursive grammars, such as algebraic infix notation.
When the expression is known, it is assigned to the C{Forward} variable using the '<<' operator.
Note: take care when assigning to C{Forward} not to overlook precedence of operators.
Specifically, '|' has a lower precedence than '<<', so that::
fwdExpr << a | b | c
will actually be evaluated as::
(fwdExpr << a) | b | c
thereby leaving b and c out as parseable alternatives. It is recommended that you
explicitly group the values inserted into the C{Forward}::
fwdExpr << (a | b | c)
Converting to use the '<<=' operator instead will avoid this problem.
See L{ParseResults.pprint} for an example of a recursive parser created using
C{Forward}.
"""
def __init__( self, other=None ):
super(Forward,self).__init__( other, savelist=False )
def __lshift__( self, other ):
if isinstance( other, basestring ):
other = ParserElement._literalStringClass(other)
self.expr = other
self.strRepr = None
self.mayIndexError = self.expr.mayIndexError
self.mayReturnEmpty = self.expr.mayReturnEmpty
self.setWhitespaceChars( self.expr.whiteChars )
self.skipWhitespace = self.expr.skipWhitespace
self.saveAsList = self.expr.saveAsList
self.ignoreExprs.extend(self.expr.ignoreExprs)
return self
def __ilshift__(self, other):
return self << other
def leaveWhitespace( self ):
self.skipWhitespace = False
return self
def streamline( self ):
if not self.streamlined:
self.streamlined = True
if self.expr is not None:
self.expr.streamline()
return self
def validate( self, validateTrace=[] ):
if self not in validateTrace:
tmp = validateTrace[:]+[self]
if self.expr is not None:
self.expr.validate(tmp)
self.checkRecursion([])
def __str__( self ):
if hasattr(self,"name"):
return self.name
return self.__class__.__name__ + ": ..."
# stubbed out for now - creates awful memory and perf issues
self._revertClass = self.__class__
self.__class__ = _ForwardNoRecurse
try:
if self.expr is not None:
retString = _ustr(self.expr)
else:
retString = "None"
finally:
self.__class__ = self._revertClass
return self.__class__.__name__ + ": " + retString
def copy(self):
if self.expr is not None:
return super(Forward,self).copy()
else:
ret = Forward()
ret <<= self
return ret
class _ForwardNoRecurse(Forward):
def __str__( self ):
return "..."
class TokenConverter(ParseElementEnhance):
"""
Abstract subclass of C{ParseExpression}, for converting parsed results.
"""
def __init__( self, expr, savelist=False ):
super(TokenConverter,self).__init__( expr )#, savelist )
self.saveAsList = False
class Combine(TokenConverter):
"""
Converter to concatenate all matching tokens to a single string.
By default, the matching patterns must also be contiguous in the input string;
this can be disabled by specifying C{'adjacent=False'} in the constructor.
Example::
real = Word(nums) + '.' + Word(nums)
print(real.parseString('3.1416')) # -> ['3', '.', '1416']
# will also erroneously match the following
print(real.parseString('3. 1416')) # -> ['3', '.', '1416']
real = Combine(Word(nums) + '.' + Word(nums))
print(real.parseString('3.1416')) # -> ['3.1416']
# no match when there are internal spaces
print(real.parseString('3. 1416')) # -> Exception: Expected W:(0123...)
"""
def __init__( self, expr, joinString="", adjacent=True ):
super(Combine,self).__init__( expr )
# suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself
if adjacent:
self.leaveWhitespace()
self.adjacent = adjacent
self.skipWhitespace = True
self.joinString = joinString
self.callPreparse = True
def ignore( self, other ):
if self.adjacent:
ParserElement.ignore(self, other)
else:
super( Combine, self).ignore( other )
return self
def postParse( self, instring, loc, tokenlist ):
retToks = tokenlist.copy()
del retToks[:]
retToks += ParseResults([ "".join(tokenlist._asStringList(self.joinString)) ], modal=self.modalResults)
if self.resultsName and retToks.haskeys():
return [ retToks ]
else:
return retToks
class Group(TokenConverter):
"""
Converter to return the matched tokens as a list - useful for returning tokens of C{L{ZeroOrMore}} and C{L{OneOrMore}} expressions.
Example::
ident = Word(alphas)
num = Word(nums)
term = ident | num
func = ident + Optional(delimitedList(term))
print(func.parseString("fn a,b,100")) # -> ['fn', 'a', 'b', '100']
func = ident + Group(Optional(delimitedList(term)))
print(func.parseString("fn a,b,100")) # -> ['fn', ['a', 'b', '100']]
"""
def __init__( self, expr ):
super(Group,self).__init__( expr )
self.saveAsList = True
def postParse( self, instring, loc, tokenlist ):
return [ tokenlist ]
class Dict(TokenConverter):
"""
Converter to return a repetitive expression as a list, but also as a dictionary.
Each element can also be referenced using the first token in the expression as its key.
Useful for tabular report scraping when the first column can be used as a item key.
Example::
data_word = Word(alphas)
label = data_word + FollowedBy(':')
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))
text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
# print attributes as plain groups
print(OneOrMore(attr_expr).parseString(text).dump())
# instead of OneOrMore(expr), parse using Dict(OneOrMore(Group(expr))) - Dict will auto-assign names
result = Dict(OneOrMore(Group(attr_expr))).parseString(text)
print(result.dump())
# access named fields as dict entries, or output as dict
print(result['shape'])
print(result.asDict())
prints::
['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap']
[['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
- color: light blue
- posn: upper left
- shape: SQUARE
- texture: burlap
SQUARE
{'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'}
See more examples at L{ParseResults} of accessing fields by results name.
"""
def __init__( self, expr ):
super(Dict,self).__init__( expr )
self.saveAsList = True
def postParse( self, instring, loc, tokenlist ):
for i,tok in enumerate(tokenlist):
if len(tok) == 0:
continue
ikey = tok[0]
if isinstance(ikey,int):
ikey = _ustr(tok[0]).strip()
if len(tok)==1:
tokenlist[ikey] = _ParseResultsWithOffset("",i)
elif len(tok)==2 and not isinstance(tok[1],ParseResults):
tokenlist[ikey] = _ParseResultsWithOffset(tok[1],i)
else:
dictvalue = tok.copy() #ParseResults(i)
del dictvalue[0]
if len(dictvalue)!= 1 or (isinstance(dictvalue,ParseResults) and dictvalue.haskeys()):
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue,i)
else:
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0],i)
if self.resultsName:
return [ tokenlist ]
else:
return tokenlist
class Suppress(TokenConverter):
"""
Converter for ignoring the results of a parsed expression.
Example::
source = "a, b, c,d"
wd = Word(alphas)
wd_list1 = wd + ZeroOrMore(',' + wd)
print(wd_list1.parseString(source))
# often, delimiters that are useful during parsing are just in the
# way afterward - use Suppress to keep them out of the parsed output
wd_list2 = wd + ZeroOrMore(Suppress(',') + wd)
print(wd_list2.parseString(source))
prints::
['a', ',', 'b', ',', 'c', ',', 'd']
['a', 'b', 'c', 'd']
(See also L{delimitedList}.)
"""
def postParse( self, instring, loc, tokenlist ):
return []
def suppress( self ):
return self
class OnlyOnce(object):
"""
Wrapper for parse actions, to ensure they are only called once.
"""
def __init__(self, methodCall):
self.callable = _trim_arity(methodCall)
self.called = False
def __call__(self,s,l,t):
if not self.called:
results = self.callable(s,l,t)
self.called = True
return results
raise ParseException(s,l,"")
def reset(self):
self.called = False
def traceParseAction(f):
"""
Decorator for debugging parse actions.
When the parse action is called, this decorator will print C{">> entering I{method-name}(line:I{current_source_line}, I{parse_location}, I{matched_tokens})".}
When the parse action completes, the decorator will print C{"<<"} followed by the returned value, or any exception that the parse action raised.
Example::
wd = Word(alphas)
@traceParseAction
def remove_duplicate_chars(tokens):
return ''.join(sorted(set(''.join(tokens)))
wds = OneOrMore(wd).setParseAction(remove_duplicate_chars)
print(wds.parseString("slkdjs sld sldd sdlf sdljf"))
prints::
>>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {}))
<<leaving remove_duplicate_chars (ret: 'dfjkls')
['dfjkls']
"""
f = _trim_arity(f)
def z(*paArgs):
thisFunc = f.__name__
s,l,t = paArgs[-3:]
if len(paArgs)>3:
thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc
sys.stderr.write( ">>entering %s(line: '%s', %d, %r)\n" % (thisFunc,line(l,s),l,t) )
try:
ret = f(*paArgs)
except Exception as exc:
sys.stderr.write( "<<leaving %s (exception: %s)\n" % (thisFunc,exc) )
raise
sys.stderr.write( "<<leaving %s (ret: %r)\n" % (thisFunc,ret) )
return ret
try:
z.__name__ = f.__name__
except AttributeError:
pass
return z
#
# global helpers
#
def delimitedList( expr, delim=",", combine=False ):
"""
Helper to define a delimited list of expressions - the delimiter defaults to ','.
By default, the list elements and delimiters can have intervening whitespace, and
comments, but this can be overridden by passing C{combine=True} in the constructor.
If C{combine} is set to C{True}, the matching tokens are returned as a single token
string, with the delimiters included; otherwise, the matching tokens are returned
as a list of tokens, with the delimiters suppressed.
Example::
delimitedList(Word(alphas)).parseString("aa,bb,cc") # -> ['aa', 'bb', 'cc']
delimitedList(Word(hexnums), delim=':', combine=True).parseString("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE']
"""
dlName = _ustr(expr)+" ["+_ustr(delim)+" "+_ustr(expr)+"]..."
if combine:
return Combine( expr + ZeroOrMore( delim + expr ) ).setName(dlName)
else:
return ( expr + ZeroOrMore( Suppress( delim ) + expr ) ).setName(dlName)
def countedArray( expr, intExpr=None ):
"""
Helper to define a counted list of expressions.
This helper defines a pattern of the form::
integer expr expr expr...
where the leading integer tells how many expr expressions follow.
The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed.
If C{intExpr} is specified, it should be a pyparsing expression that produces an integer value.
Example::
countedArray(Word(alphas)).parseString('2 ab cd ef') # -> ['ab', 'cd']
# in this parser, the leading integer value is given in binary,
# '10' indicating that 2 values are in the array
binaryConstant = Word('01').setParseAction(lambda t: int(t[0], 2))
countedArray(Word(alphas), intExpr=binaryConstant).parseString('10 ab cd ef') # -> ['ab', 'cd']
"""
arrayExpr = Forward()
def countFieldParseAction(s,l,t):
n = t[0]
arrayExpr << (n and Group(And([expr]*n)) or Group(empty))
return []
if intExpr is None:
intExpr = Word(nums).setParseAction(lambda t:int(t[0]))
else:
intExpr = intExpr.copy()
intExpr.setName("arrayLen")
intExpr.addParseAction(countFieldParseAction, callDuringTry=True)
return ( intExpr + arrayExpr ).setName('(len) ' + _ustr(expr) + '...')
def _flatten(L):
ret = []
for i in L:
if isinstance(i,list):
ret.extend(_flatten(i))
else:
ret.append(i)
return ret
def matchPreviousLiteral(expr):
"""
Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks
for a 'repeat' of a previous expression. For example::
first = Word(nums)
second = matchPreviousLiteral(first)
matchExpr = first + ":" + second
will match C{"1:1"}, but not C{"1:2"}. Because this matches a
previous literal, will also match the leading C{"1:1"} in C{"1:10"}.
If this is not desired, use C{matchPreviousExpr}.
Do I{not} use with packrat parsing enabled.
"""
rep = Forward()
def copyTokenToRepeater(s,l,t):
if t:
if len(t) == 1:
rep << t[0]
else:
# flatten t tokens
tflat = _flatten(t.asList())
rep << And(Literal(tt) for tt in tflat)
else:
rep << Empty()
expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
rep.setName('(prev) ' + _ustr(expr))
return rep
def matchPreviousExpr(expr):
"""
Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks
for a 'repeat' of a previous expression. For example::
first = Word(nums)
second = matchPreviousExpr(first)
matchExpr = first + ":" + second
will match C{"1:1"}, but not C{"1:2"}. Because this matches by
expressions, will I{not} match the leading C{"1:1"} in C{"1:10"};
the expressions are evaluated first, and then compared, so
C{"1"} is compared with C{"10"}.
Do I{not} use with packrat parsing enabled.
"""
rep = Forward()
e2 = expr.copy()
rep <<= e2
def copyTokenToRepeater(s,l,t):
matchTokens = _flatten(t.asList())
def mustMatchTheseTokens(s,l,t):
theseTokens = _flatten(t.asList())
if theseTokens != matchTokens:
raise ParseException("",0,"")
rep.setParseAction( mustMatchTheseTokens, callDuringTry=True )
expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
rep.setName('(prev) ' + _ustr(expr))
return rep
def _escapeRegexRangeChars(s):
#~ escape these chars: ^-]
for c in r"\^-]":
s = s.replace(c,_bslash+c)
s = s.replace("\n",r"\n")
s = s.replace("\t",r"\t")
return _ustr(s)
def oneOf( strs, caseless=False, useRegex=True ):
"""
Helper to quickly define a set of alternative Literals, and makes sure to do
longest-first testing when there is a conflict, regardless of the input order,
but returns a C{L{MatchFirst}} for best performance.
Parameters:
- strs - a string of space-delimited literals, or a collection of string literals
- caseless - (default=C{False}) - treat all literals as caseless
- useRegex - (default=C{True}) - as an optimization, will generate a Regex
object; otherwise, will generate a C{MatchFirst} object (if C{caseless=True}, or
if creating a C{Regex} raises an exception)
Example::
comp_oper = oneOf("< = > <= >= !=")
var = Word(alphas)
number = Word(nums)
term = var | number
comparison_expr = term + comp_oper + term
print(comparison_expr.searchString("B = 12 AA=23 B<=AA AA>12"))
prints::
[['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']]
"""
if caseless:
isequal = ( lambda a,b: a.upper() == b.upper() )
masks = ( lambda a,b: b.upper().startswith(a.upper()) )
parseElementClass = CaselessLiteral
else:
isequal = ( lambda a,b: a == b )
masks = ( lambda a,b: b.startswith(a) )
parseElementClass = Literal
symbols = []
if isinstance(strs,basestring):
symbols = strs.split()
elif isinstance(strs, collections.Iterable):
symbols = list(strs)
else:
warnings.warn("Invalid argument to oneOf, expected string or iterable",
SyntaxWarning, stacklevel=2)
if not symbols:
return NoMatch()
i = 0
while i < len(symbols)-1:
cur = symbols[i]
for j,other in enumerate(symbols[i+1:]):
if ( isequal(other, cur) ):
del symbols[i+j+1]
break
elif ( masks(cur, other) ):
del symbols[i+j+1]
symbols.insert(i,other)
cur = other
break
else:
i += 1
if not caseless and useRegex:
#~ print (strs,"->", "|".join( [ _escapeRegexChars(sym) for sym in symbols] ))
try:
if len(symbols)==len("".join(symbols)):
return Regex( "[%s]" % "".join(_escapeRegexRangeChars(sym) for sym in symbols) ).setName(' | '.join(symbols))
else:
return Regex( "|".join(re.escape(sym) for sym in symbols) ).setName(' | '.join(symbols))
except Exception:
warnings.warn("Exception creating Regex for oneOf, building MatchFirst",
SyntaxWarning, stacklevel=2)
# last resort, just use MatchFirst
return MatchFirst(parseElementClass(sym) for sym in symbols).setName(' | '.join(symbols))
def dictOf( key, value ):
"""
Helper to easily and clearly define a dictionary by specifying the respective patterns
for the key and value. Takes care of defining the C{L{Dict}}, C{L{ZeroOrMore}}, and C{L{Group}} tokens
in the proper order. The key pattern can include delimiting markers or punctuation,
as long as they are suppressed, thereby leaving the significant key text. The value
pattern can include named results, so that the C{Dict} results can include named token
fields.
Example::
text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
print(OneOrMore(attr_expr).parseString(text).dump())
attr_label = label
attr_value = Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)
# similar to Dict, but simpler call format
result = dictOf(attr_label, attr_value).parseString(text)
print(result.dump())
print(result['shape'])
print(result.shape) # object attribute access works too
print(result.asDict())
prints::
[['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
- color: light blue
- posn: upper left
- shape: SQUARE
- texture: burlap
SQUARE
SQUARE
{'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'}
"""
return Dict( ZeroOrMore( Group ( key + value ) ) )
def originalTextFor(expr, asString=True):
"""
Helper to return the original, untokenized text for a given expression. Useful to
restore the parsed fields of an HTML start tag into the raw tag text itself, or to
revert separate tokens with intervening whitespace back to the original matching
input text. By default, returns astring containing the original parsed text.
If the optional C{asString} argument is passed as C{False}, then the return value is a
C{L{ParseResults}} containing any results names that were originally matched, and a
single token containing the original matched text from the input string. So if
the expression passed to C{L{originalTextFor}} contains expressions with defined
results names, you must set C{asString} to C{False} if you want to preserve those
results name values.
Example::
src = "this is test <b> bold <i>text</i> </b> normal text "
for tag in ("b","i"):
opener,closer = makeHTMLTags(tag)
patt = originalTextFor(opener + SkipTo(closer) + closer)
print(patt.searchString(src)[0])
prints::
['<b> bold <i>text</i> </b>']
['<i>text</i>']
"""
locMarker = Empty().setParseAction(lambda s,loc,t: loc)
endlocMarker = locMarker.copy()
endlocMarker.callPreparse = False
matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")
if asString:
extractText = lambda s,l,t: s[t._original_start:t._original_end]
else:
def extractText(s,l,t):
t[:] = [s[t.pop('_original_start'):t.pop('_original_end')]]
matchExpr.setParseAction(extractText)
matchExpr.ignoreExprs = expr.ignoreExprs
return matchExpr
def ungroup(expr):
"""
Helper to undo pyparsing's default grouping of And expressions, even
if all but one are non-empty.
"""
return TokenConverter(expr).setParseAction(lambda t:t[0])
def locatedExpr(expr):
"""
Helper to decorate a returned token with its starting and ending locations in the input string.
This helper adds the following results names:
- locn_start = location where matched expression begins
- locn_end = location where matched expression ends
- value = the actual parsed results
Be careful if the input text contains C{<TAB>} characters, you may want to call
C{L{ParserElement.parseWithTabs}}
Example::
wd = Word(alphas)
for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"):
print(match)
prints::
[[0, 'ljsdf', 5]]
[[8, 'lksdjjf', 15]]
[[18, 'lkkjj', 23]]
"""
locator = Empty().setParseAction(lambda s,l,t: l)
return Group(locator("locn_start") + expr("value") + locator.copy().leaveWhitespace()("locn_end"))
# convenience constants for positional expressions
empty = Empty().setName("empty")
lineStart = LineStart().setName("lineStart")
lineEnd = LineEnd().setName("lineEnd")
stringStart = StringStart().setName("stringStart")
stringEnd = StringEnd().setName("stringEnd")
_escapedPunc = Word( _bslash, r"\[]-*.$+^?()~ ", exact=2 ).setParseAction(lambda s,l,t:t[0][1])
_escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").setParseAction(lambda s,l,t:unichr(int(t[0].lstrip(r'\0x'),16)))
_escapedOctChar = Regex(r"\\0[0-7]+").setParseAction(lambda s,l,t:unichr(int(t[0][1:],8)))
_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | Word(printables, excludeChars=r'\]', exact=1) | Regex(r"\w", re.UNICODE)
_charRange = Group(_singleChar + Suppress("-") + _singleChar)
_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group( OneOrMore( _charRange | _singleChar ) ).setResultsName("body") + "]"
def srange(s):
r"""
Helper to easily define string ranges for use in Word construction. Borrows
syntax from regexp '[]' string range definitions::
srange("[0-9]") -> "0123456789"
srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz"
srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"
The input string must be enclosed in []'s, and the returned string is the expanded
character set joined into a single string.
The values enclosed in the []'s may be:
- a single character
- an escaped character with a leading backslash (such as C{\-} or C{\]})
- an escaped hex character with a leading C{'\x'} (C{\x21}, which is a C{'!'} character)
(C{\0x##} is also supported for backwards compatibility)
- an escaped octal character with a leading C{'\0'} (C{\041}, which is a C{'!'} character)
- a range of any of the above, separated by a dash (C{'a-z'}, etc.)
- any combination of the above (C{'aeiouy'}, C{'a-zA-Z0-9_$'}, etc.)
"""
_expanded = lambda p: p if not isinstance(p,ParseResults) else ''.join(unichr(c) for c in range(ord(p[0]),ord(p[1])+1))
try:
return "".join(_expanded(part) for part in _reBracketExpr.parseString(s).body)
except Exception:
return ""
def matchOnlyAtCol(n):
"""
Helper method for defining parse actions that require matching at a specific
column in the input text.
"""
def verifyCol(strg,locn,toks):
if col(locn,strg) != n:
raise ParseException(strg,locn,"matched token not at column %d" % n)
return verifyCol
def replaceWith(replStr):
"""
Helper method for common parse actions that simply return a literal value. Especially
useful when used with C{L{transformString<ParserElement.transformString>}()}.
Example::
num = Word(nums).setParseAction(lambda toks: int(toks[0]))
na = oneOf("N/A NA").setParseAction(replaceWith(math.nan))
term = na | num
OneOrMore(term).parseString("324 234 N/A 234") # -> [324, 234, nan, 234]
"""
return lambda s,l,t: [replStr]
def removeQuotes(s,l,t):
"""
Helper parse action for removing quotation marks from parsed quoted strings.
Example::
# by default, quotation marks are included in parsed results
quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"]
# use removeQuotes to strip quotation marks from parsed results
quotedString.setParseAction(removeQuotes)
quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"]
"""
return t[0][1:-1]
def tokenMap(func, *args):
"""
Helper to define a parse action by mapping a function to all elements of a ParseResults list.If any additional
args are passed, they are forwarded to the given function as additional arguments after
the token, as in C{hex_integer = Word(hexnums).setParseAction(tokenMap(int, 16))}, which will convert the
parsed data to an integer using base 16.
Example (compare the last to example in L{ParserElement.transformString}::
hex_ints = OneOrMore(Word(hexnums)).setParseAction(tokenMap(int, 16))
hex_ints.runTests('''
00 11 22 aa FF 0a 0d 1a
''')
upperword = Word(alphas).setParseAction(tokenMap(str.upper))
OneOrMore(upperword).runTests('''
my kingdom for a horse
''')
wd = Word(alphas).setParseAction(tokenMap(str.title))
OneOrMore(wd).setParseAction(' '.join).runTests('''
now is the winter of our discontent made glorious summer by this sun of york
''')
prints::
00 11 22 aa FF 0a 0d 1a
[0, 17, 34, 170, 255, 10, 13, 26]
my kingdom for a horse
['MY', 'KINGDOM', 'FOR', 'A', 'HORSE']
now is the winter of our discontent made glorious summer by this sun of york
['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York']
"""
def pa(s,l,t):
return [func(tokn, *args) for tokn in t]
try:
func_name = getattr(func, '__name__',
getattr(func, '__class__').__name__)
except Exception:
func_name = str(func)
pa.__name__ = func_name
return pa
upcaseTokens = tokenMap(lambda t: _ustr(t).upper())
"""(Deprecated) Helper parse action to convert tokens to upper case. Deprecated in favor of L{pyparsing_common.upcaseTokens}"""
downcaseTokens = tokenMap(lambda t: _ustr(t).lower())
"""(Deprecated) Helper parse action to convert tokens to lower case. Deprecated in favor of L{pyparsing_common.downcaseTokens}"""
def _makeTags(tagStr, xml):
"""Internal helper to construct opening and closing tag expressions, given a tag name"""
if isinstance(tagStr,basestring):
resname = tagStr
tagStr = Keyword(tagStr, caseless=not xml)
else:
resname = tagStr.name
tagAttrName = Word(alphas,alphanums+"_-:")
if (xml):
tagAttrValue = dblQuotedString.copy().setParseAction( removeQuotes )
openTag = Suppress("<") + tagStr("tag") + \
Dict(ZeroOrMore(Group( tagAttrName + Suppress("=") + tagAttrValue ))) + \
Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
else:
printablesLessRAbrack = "".join(c for c in printables if c not in ">")
tagAttrValue = quotedString.copy().setParseAction( removeQuotes ) | Word(printablesLessRAbrack)
openTag = Suppress("<") + tagStr("tag") + \
Dict(ZeroOrMore(Group( tagAttrName.setParseAction(downcaseTokens) + \
Optional( Suppress("=") + tagAttrValue ) ))) + \
Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
closeTag = Combine(_L("</") + tagStr + ">")
openTag = openTag.setResultsName("start"+"".join(resname.replace(":"," ").title().split())).setName("<%s>" % resname)
closeTag = closeTag.setResultsName("end"+"".join(resname.replace(":"," ").title().split())).setName("</%s>" % resname)
openTag.tag = resname
closeTag.tag = resname
return openTag, closeTag
def makeHTMLTags(tagStr):
"""
Helper to construct opening and closing tag expressions for HTML, given a tag name. Matches
tags in either upper or lower case, attributes with namespaces and with quoted or unquoted values.
Example::
text = '<td>More info at the <a href="http://pyparsing.wikispaces.com">pyparsing</a> wiki page</td>'
# makeHTMLTags returns pyparsing expressions for the opening and closing tags as a 2-tuple
a,a_end = makeHTMLTags("A")
link_expr = a + SkipTo(a_end)("link_text") + a_end
for link in link_expr.searchString(text):
# attributes in the <A> tag (like "href" shown here) are also accessible as named results
print(link.link_text, '->', link.href)
prints::
pyparsing -> http://pyparsing.wikispaces.com
"""
return _makeTags( tagStr, False )
def makeXMLTags(tagStr):
"""
Helper to construct opening and closing tag expressions for XML, given a tag name. Matches
tags only in the given upper/lower case.
Example: similar to L{makeHTMLTags}
"""
return _makeTags( tagStr, True )
def withAttribute(*args,**attrDict):
"""
Helper to create a validating parse action to be used with start tags created
with C{L{makeXMLTags}} or C{L{makeHTMLTags}}. Use C{withAttribute} to qualify a starting tag
with a required attribute value, to avoid false matches on common tags such as
C{<TD>} or C{<DIV>}.
Call C{withAttribute} with a series of attribute names and values. Specify the list
of filter attributes names and values as:
- keyword arguments, as in C{(align="right")}, or
- as an explicit dict with C{**} operator, when an attribute name is also a Python
reserved word, as in C{**{"class":"Customer", "align":"right"}}
- a list of name-value tuples, as in ( ("ns1:class", "Customer"), ("ns2:align","right") )
For attribute names with a namespace prefix, you must use the second form. Attribute
names are matched insensitive to upper/lower case.
If just testing for C{class} (with or without a namespace), use C{L{withClass}}.
To verify that the attribute exists, but without specifying a value, pass
C{withAttribute.ANY_VALUE} as the value.
Example::
html = '''
<div>
Some text
<div type="grid">1 4 0 1 0</div>
<div type="graph">1,3 2,3 1,1</div>
<div>this has no type</div>
</div>
'''
div,div_end = makeHTMLTags("div")
# only match div tag having a type attribute with value "grid"
div_grid = div().setParseAction(withAttribute(type="grid"))
grid_expr = div_grid + SkipTo(div | div_end)("body")
for grid_header in grid_expr.searchString(html):
print(grid_header.body)
# construct a match with any div tag having a type attribute, regardless of the value
div_any_type = div().setParseAction(withAttribute(type=withAttribute.ANY_VALUE))
div_expr = div_any_type + SkipTo(div | div_end)("body")
for div_header in div_expr.searchString(html):
print(div_header.body)
prints::
1 4 0 1 0
1 4 0 1 0
1,3 2,3 1,1
"""
if args:
attrs = args[:]
else:
attrs = attrDict.items()
attrs = [(k,v) for k,v in attrs]
def pa(s,l,tokens):
for attrName,attrValue in attrs:
if attrName not in tokens:
raise ParseException(s,l,"no matching attribute " + attrName)
if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue:
raise ParseException(s,l,"attribute '%s' has value '%s', must be '%s'" %
(attrName, tokens[attrName], attrValue))
return pa
withAttribute.ANY_VALUE = object()
def withClass(classname, namespace=''):
"""
Simplified version of C{L{withAttribute}} when matching on a div class - made
difficult because C{class} is a reserved word in Python.
Example::
html = '''
<div>
Some text
<div class="grid">1 4 0 1 0</div>
<div class="graph">1,3 2,3 1,1</div>
<div>this <div> has no class</div>
</div>
'''
div,div_end = makeHTMLTags("div")
div_grid = div().setParseAction(withClass("grid"))
grid_expr = div_grid + SkipTo(div | div_end)("body")
for grid_header in grid_expr.searchString(html):
print(grid_header.body)
div_any_type = div().setParseAction(withClass(withAttribute.ANY_VALUE))
div_expr = div_any_type + SkipTo(div | div_end)("body")
for div_header in div_expr.searchString(html):
print(div_header.body)
prints::
1 4 0 1 0
1 4 0 1 0
1,3 2,3 1,1
"""
classattr = "%s:class" % namespace if namespace else "class"
return withAttribute(**{classattr : classname})
opAssoc = _Constants()
opAssoc.LEFT = object()
opAssoc.RIGHT = object()
def infixNotation( baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')') ):
"""
Helper method for constructing grammars of expressions made up of
operators working in a precedence hierarchy. Operators may be unary or
binary, left- or right-associative. Parse actions can also be attached
to operator expressions. The generated parser will also recognize the use
of parentheses to override operator precedences (see example below).
Note: if you define a deep operator list, you may see performance issues
when using infixNotation. See L{ParserElement.enablePackrat} for a
mechanism to potentially improve your parser performance.
Parameters:
- baseExpr - expression representing the most basic element for the nested
- opList - list of tuples, one for each operator precedence level in the
expression grammar; each tuple is of the form
(opExpr, numTerms, rightLeftAssoc, parseAction), where:
- opExpr is the pyparsing expression for the operator;
may also be a string, which will be converted to a Literal;
if numTerms is 3, opExpr is a tuple of two expressions, for the
two operators separating the 3 terms
- numTerms is the number of terms for this operator (must
be 1, 2, or 3)
- rightLeftAssoc is the indicator whether the operator is
right or left associative, using the pyparsing-defined
constants C{opAssoc.RIGHT} and C{opAssoc.LEFT}.
- parseAction is the parse action to be associated with
expressions matching this operator expression (the
parse action tuple member may be omitted)
- lpar - expression for matching left-parentheses (default=C{Suppress('(')})
- rpar - expression for matching right-parentheses (default=C{Suppress(')')})
Example::
# simple example of four-function arithmetic with ints and variable names
integer = pyparsing_common.signed_integer
varname = pyparsing_common.identifier
arith_expr = infixNotation(integer | varname,
[
('-', 1, opAssoc.RIGHT),
(oneOf('* /'), 2, opAssoc.LEFT),
(oneOf('+ -'), 2, opAssoc.LEFT),
])
arith_expr.runTests('''
5+3*6
(5+3)*6
-2--11
''', fullDump=False)
prints::
5+3*6
[[5, '+', [3, '*', 6]]]
(5+3)*6
[[[5, '+', 3], '*', 6]]
-2--11
[[['-', 2], '-', ['-', 11]]]
"""
ret = Forward()
lastExpr = baseExpr | ( lpar + ret + rpar )
for i,operDef in enumerate(opList):
opExpr,arity,rightLeftAssoc,pa = (operDef + (None,))[:4]
termName = "%s term" % opExpr if arity < 3 else "%s%s term" % opExpr
if arity == 3:
if opExpr is None or len(opExpr) != 2:
raise ValueError("if numterms=3, opExpr must be a tuple or list of two expressions")
opExpr1, opExpr2 = opExpr
thisExpr = Forward().setName(termName)
if rightLeftAssoc == opAssoc.LEFT:
if arity == 1:
matchExpr = FollowedBy(lastExpr + opExpr) + Group( lastExpr + OneOrMore( opExpr ) )
elif arity == 2:
if opExpr is not None:
matchExpr = FollowedBy(lastExpr + opExpr + lastExpr) + Group( lastExpr + OneOrMore( opExpr + lastExpr ) )
else:
matchExpr = FollowedBy(lastExpr+lastExpr) + Group( lastExpr + OneOrMore(lastExpr) )
elif arity == 3:
matchExpr = FollowedBy(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) + \
Group( lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr )
else:
raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
elif rightLeftAssoc == opAssoc.RIGHT:
if arity == 1:
# try to avoid LR with this extra test
if not isinstance(opExpr, Optional):
opExpr = Optional(opExpr)
matchExpr = FollowedBy(opExpr.expr + thisExpr) + Group( opExpr + thisExpr )
elif arity == 2:
if opExpr is not None:
matchExpr = FollowedBy(lastExpr + opExpr + thisExpr) + Group( lastExpr + OneOrMore( opExpr + thisExpr ) )
else:
matchExpr = FollowedBy(lastExpr + thisExpr) + Group( lastExpr + OneOrMore( thisExpr ) )
elif arity == 3:
matchExpr = FollowedBy(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + \
Group( lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr )
else:
raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
else:
raise ValueError("operator must indicate right or left associativity")
if pa:
matchExpr.setParseAction( pa )
thisExpr <<= ( matchExpr.setName(termName) | lastExpr )
lastExpr = thisExpr
ret <<= lastExpr
return ret
operatorPrecedence = infixNotation
"""(Deprecated) Former name of C{L{infixNotation}}, will be dropped in a future release."""
dblQuotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"').setName("string enclosed in double quotes")
sglQuotedString = Combine(Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("string enclosed in single quotes")
quotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"'|
Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("quotedString using single or double quotes")
unicodeString = Combine(_L('u') + quotedString.copy()).setName("unicode string literal")
def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()):
"""
Helper method for defining nested lists enclosed in opening and closing
delimiters ("(" and ")" are the default).
Parameters:
- opener - opening character for a nested list (default=C{"("}); can also be a pyparsing expression
- closer - closing character for a nested list (default=C{")"}); can also be a pyparsing expression
- content - expression for items within the nested lists (default=C{None})
- ignoreExpr - expression for ignoring opening and closing delimiters (default=C{quotedString})
If an expression is not provided for the content argument, the nested
expression will capture all whitespace-delimited content between delimiters
as a list of separate values.
Use the C{ignoreExpr} argument to define expressions that may contain
opening or closing characters that should not be treated as opening
or closing characters for nesting, such as quotedString or a comment
expression. Specify multiple expressions using an C{L{Or}} or C{L{MatchFirst}}.
The default is L{quotedString}, but if no expressions are to be ignored,
then pass C{None} for this argument.
Example::
data_type = oneOf("void int short long char float double")
decl_data_type = Combine(data_type + Optional(Word('*')))
ident = Word(alphas+'_', alphanums+'_')
number = pyparsing_common.number
arg = Group(decl_data_type + ident)
LPAR,RPAR = map(Suppress, "()")
code_body = nestedExpr('{', '}', ignoreExpr=(quotedString | cStyleComment))
c_function = (decl_data_type("type")
+ ident("name")
+ LPAR + Optional(delimitedList(arg), [])("args") + RPAR
+ code_body("body"))
c_function.ignore(cStyleComment)
source_code = '''
int is_odd(int x) {
return (x%2);
}
int dec_to_hex(char hchar) {
if (hchar >= '0' && hchar <= '9') {
return (ord(hchar)-ord('0'));
} else {
return (10+ord(hchar)-ord('A'));
}
}
'''
for func in c_function.searchString(source_code):
print("%(name)s (%(type)s) args: %(args)s" % func)
prints::
is_odd (int) args: [['int', 'x']]
dec_to_hex (int) args: [['char', 'hchar']]
"""
if opener == closer:
raise ValueError("opening and closing strings cannot be the same")
if content is None:
if isinstance(opener,basestring) and isinstance(closer,basestring):
if len(opener) == 1 and len(closer)==1:
if ignoreExpr is not None:
content = (Combine(OneOrMore(~ignoreExpr +
CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
content = (empty.copy()+CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS
).setParseAction(lambda t:t[0].strip()))
else:
if ignoreExpr is not None:
content = (Combine(OneOrMore(~ignoreExpr +
~Literal(opener) + ~Literal(closer) +
CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
content = (Combine(OneOrMore(~Literal(opener) + ~Literal(closer) +
CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
raise ValueError("opening and closing arguments must be strings if no content expression is given")
ret = Forward()
if ignoreExpr is not None:
ret <<= Group( Suppress(opener) + ZeroOrMore( ignoreExpr | ret | content ) + Suppress(closer) )
else:
ret <<= Group( Suppress(opener) + ZeroOrMore( ret | content ) + Suppress(closer) )
ret.setName('nested %s%s expression' % (opener,closer))
return ret
def indentedBlock(blockStatementExpr, indentStack, indent=True):
"""
Helper method for defining space-delimited indentation blocks, such as
those used to define block statements in Python source code.
Parameters:
- blockStatementExpr - expression defining syntax of statement that
is repeated within the indented block
- indentStack - list created by caller to manage indentation stack
(multiple statementWithIndentedBlock expressions within a single grammar
should share a common indentStack)
- indent - boolean indicating whether block must be indented beyond the
the current level; set to False for block of left-most statements
(default=C{True})
A valid block must contain at least one C{blockStatement}.
Example::
data = '''
def A(z):
A1
B = 100
G = A2
A2
A3
B
def BB(a,b,c):
BB1
def BBA():
bba1
bba2
bba3
C
D
def spam(x,y):
def eggs(z):
pass
'''
indentStack = [1]
stmt = Forward()
identifier = Word(alphas, alphanums)
funcDecl = ("def" + identifier + Group( "(" + Optional( delimitedList(identifier) ) + ")" ) + ":")
func_body = indentedBlock(stmt, indentStack)
funcDef = Group( funcDecl + func_body )
rvalue = Forward()
funcCall = Group(identifier + "(" + Optional(delimitedList(rvalue)) + ")")
rvalue << (funcCall | identifier | Word(nums))
assignment = Group(identifier + "=" + rvalue)
stmt << ( funcDef | assignment | identifier )
module_body = OneOrMore(stmt)
parseTree = module_body.parseString(data)
parseTree.pprint()
prints::
[['def',
'A',
['(', 'z', ')'],
':',
[['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]],
'B',
['def',
'BB',
['(', 'a', 'b', 'c', ')'],
':',
[['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]],
'C',
'D',
['def',
'spam',
['(', 'x', 'y', ')'],
':',
[[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]]
"""
def checkPeerIndent(s,l,t):
if l >= len(s): return
curCol = col(l,s)
if curCol != indentStack[-1]:
if curCol > indentStack[-1]:
raise ParseFatalException(s,l,"illegal nesting")
raise ParseException(s,l,"not a peer entry")
def checkSubIndent(s,l,t):
curCol = col(l,s)
if curCol > indentStack[-1]:
indentStack.append( curCol )
else:
raise ParseException(s,l,"not a subentry")
def checkUnindent(s,l,t):
if l >= len(s): return
curCol = col(l,s)
if not(indentStack and curCol < indentStack[-1] and curCol <= indentStack[-2]):
raise ParseException(s,l,"not an unindent")
indentStack.pop()
NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress())
INDENT = (Empty() + Empty().setParseAction(checkSubIndent)).setName('INDENT')
PEER = Empty().setParseAction(checkPeerIndent).setName('')
UNDENT = Empty().setParseAction(checkUnindent).setName('UNINDENT')
if indent:
smExpr = Group( Optional(NL) +
#~ FollowedBy(blockStatementExpr) +
INDENT + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) + UNDENT)
else:
smExpr = Group( Optional(NL) +
(OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) )
blockStatementExpr.ignore(_bslash + LineEnd())
return smExpr.setName('indented block')
alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]")
punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]")
anyOpenTag,anyCloseTag = makeHTMLTags(Word(alphas,alphanums+"_:").setName('any tag'))
_htmlEntityMap = dict(zip("gt lt amp nbsp quot apos".split(),'><& "\''))
commonHTMLEntity = Regex('&(?P<entity>' + '|'.join(_htmlEntityMap.keys()) +");").setName("common HTML entity")
def replaceHTMLEntity(t):
"""Helper parser action to replace common HTML entities with their special characters"""
return _htmlEntityMap.get(t.entity)
# it's easy to get these comment structures wrong - they're very common, so may as well make them available
cStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/').setName("C style comment")
"Comment of the form C{/* ... */}"
htmlComment = Regex(r"<!--[\s\S]*?-->").setName("HTML comment")
"Comment of the form C{<!-- ... -->}"
restOfLine = Regex(r".*").leaveWhitespace().setName("rest of line")
dblSlashComment = Regex(r"//(?:\\\n|[^\n])*").setName("// comment")
"Comment of the form C{// ... (to end of line)}"
cppStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/'| dblSlashComment).setName("C++ style comment")
"Comment of either form C{L{cStyleComment}} or C{L{dblSlashComment}}"
javaStyleComment = cppStyleComment
"Same as C{L{cppStyleComment}}"
pythonStyleComment = Regex(r"#.*").setName("Python style comment")
"Comment of the form C{# ... (to end of line)}"
_commasepitem = Combine(OneOrMore(Word(printables, excludeChars=',') +
Optional( Word(" \t") +
~Literal(",") + ~LineEnd() ) ) ).streamline().setName("commaItem")
commaSeparatedList = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("commaSeparatedList")
"""(Deprecated) Predefined expression of 1 or more printable words or quoted strings, separated by commas.
This expression is deprecated in favor of L{pyparsing_common.comma_separated_list}."""
# some other useful expressions - using lower-case class name since we are really using this as a namespace
class pyparsing_common:
"""
Here are some common low-level expressions that may be useful in jump-starting parser development:
- numeric forms (L{integers<integer>}, L{reals<real>}, L{scientific notation<sci_real>})
- common L{programming identifiers<identifier>}
- network addresses (L{MAC<mac_address>}, L{IPv4<ipv4_address>}, L{IPv6<ipv6_address>})
- ISO8601 L{dates<iso8601_date>} and L{datetime<iso8601_datetime>}
- L{UUID<uuid>}
- L{comma-separated list<comma_separated_list>}
Parse actions:
- C{L{convertToInteger}}
- C{L{convertToFloat}}
- C{L{convertToDate}}
- C{L{convertToDatetime}}
- C{L{stripHTMLTags}}
- C{L{upcaseTokens}}
- C{L{downcaseTokens}}
Example::
pyparsing_common.number.runTests('''
# any int or real number, returned as the appropriate type
100
-100
+100
3.14159
6.02e23
1e-12
''')
pyparsing_common.fnumber.runTests('''
# any int or real number, returned as float
100
-100
+100
3.14159
6.02e23
1e-12
''')
pyparsing_common.hex_integer.runTests('''
# hex numbers
100
FF
''')
pyparsing_common.fraction.runTests('''
# fractions
1/2
-3/4
''')
pyparsing_common.mixed_integer.runTests('''
# mixed fractions
1
1/2
-3/4
1-3/4
''')
import uuid
pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID))
pyparsing_common.uuid.runTests('''
# uuid
12345678-1234-5678-1234-567812345678
''')
prints::
# any int or real number, returned as the appropriate type
100
[100]
-100
[-100]
+100
[100]
3.14159
[3.14159]
6.02e23
[6.02e+23]
1e-12
[1e-12]
# any int or real number, returned as float
100
[100.0]
-100
[-100.0]
+100
[100.0]
3.14159
[3.14159]
6.02e23
[6.02e+23]
1e-12
[1e-12]
# hex numbers
100
[256]
FF
[255]
# fractions
1/2
[0.5]
-3/4
[-0.75]
# mixed fractions
1
[1]
1/2
[0.5]
-3/4
[-0.75]
1-3/4
[1.75]
# uuid
12345678-1234-5678-1234-567812345678
[UUID('12345678-1234-5678-1234-567812345678')]
"""
convertToInteger = tokenMap(int)
"""
Parse action for converting parsed integers to Python int
"""
convertToFloat = tokenMap(float)
"""
Parse action for converting parsed numbers to Python float
"""
integer = Word(nums).setName("integer").setParseAction(convertToInteger)
"""expression that parses an unsigned integer, returns an int"""
hex_integer = Word(hexnums).setName("hex integer").setParseAction(tokenMap(int,16))
"""expression that parses a hexadecimal integer, returns an int"""
signed_integer = Regex(r'[+-]?\d+').setName("signed integer").setParseAction(convertToInteger)
"""expression that parses an integer with optional leading sign, returns an int"""
fraction = (signed_integer().setParseAction(convertToFloat) + '/' + signed_integer().setParseAction(convertToFloat)).setName("fraction")
"""fractional expression of an integer divided by an integer, returns a float"""
fraction.addParseAction(lambda t: t[0]/t[-1])
mixed_integer = (fraction | signed_integer + Optional(Optional('-').suppress() + fraction)).setName("fraction or mixed integer-fraction")
"""mixed integer of the form 'integer - fraction', with optional leading integer, returns float"""
mixed_integer.addParseAction(sum)
real = Regex(r'[+-]?\d+\.\d*').setName("real number").setParseAction(convertToFloat)
"""expression that parses a floating point number and returns a float"""
sci_real = Regex(r'[+-]?\d+([eE][+-]?\d+|\.\d*([eE][+-]?\d+)?)').setName("real number with scientific notation").setParseAction(convertToFloat)
"""expression that parses a floating point number with optional scientific notation and returns a float"""
# streamlining this expression makes the docs nicer-looking
number = (sci_real | real | signed_integer).streamline()
"""any numeric expression, returns the corresponding Python type"""
fnumber = Regex(r'[+-]?\d+\.?\d*([eE][+-]?\d+)?').setName("fnumber").setParseAction(convertToFloat)
"""any int or real number, returned as float"""
identifier = Word(alphas+'_', alphanums+'_').setName("identifier")
"""typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')"""
ipv4_address = Regex(r'(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}').setName("IPv4 address")
"IPv4 address (C{0.0.0.0 - 255.255.255.255})"
_ipv6_part = Regex(r'[0-9a-fA-F]{1,4}').setName("hex_integer")
_full_ipv6_address = (_ipv6_part + (':' + _ipv6_part)*7).setName("full IPv6 address")
_short_ipv6_address = (Optional(_ipv6_part + (':' + _ipv6_part)*(0,6)) + "::" + Optional(_ipv6_part + (':' + _ipv6_part)*(0,6))).setName("short IPv6 address")
_short_ipv6_address.addCondition(lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8)
_mixed_ipv6_address = ("::ffff:" + ipv4_address).setName("mixed IPv6 address")
ipv6_address = Combine((_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).setName("IPv6 address")).setName("IPv6 address")
"IPv6 address (long, short, or mixed form)"
mac_address = Regex(r'[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}').setName("MAC address")
"MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)"
@staticmethod
def convertToDate(fmt="%Y-%m-%d"):
"""
Helper to create a parse action for converting parsed date string to Python datetime.date
Params -
- fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%d"})
Example::
date_expr = pyparsing_common.iso8601_date.copy()
date_expr.setParseAction(pyparsing_common.convertToDate())
print(date_expr.parseString("1999-12-31"))
prints::
[datetime.date(1999, 12, 31)]
"""
def cvt_fn(s,l,t):
try:
return datetime.strptime(t[0], fmt).date()
except ValueError as ve:
raise ParseException(s, l, str(ve))
return cvt_fn
@staticmethod
def convertToDatetime(fmt="%Y-%m-%dT%H:%M:%S.%f"):
"""
Helper to create a parse action for converting parsed datetime string to Python datetime.datetime
Params -
- fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%dT%H:%M:%S.%f"})
Example::
dt_expr = pyparsing_common.iso8601_datetime.copy()
dt_expr.setParseAction(pyparsing_common.convertToDatetime())
print(dt_expr.parseString("1999-12-31T23:59:59.999"))
prints::
[datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)]
"""
def cvt_fn(s,l,t):
try:
return datetime.strptime(t[0], fmt)
except ValueError as ve:
raise ParseException(s, l, str(ve))
return cvt_fn
iso8601_date = Regex(r'(?P<year>\d{4})(?:-(?P<month>\d\d)(?:-(?P<day>\d\d))?)?').setName("ISO8601 date")
"ISO8601 date (C{yyyy-mm-dd})"
iso8601_datetime = Regex(r'(?P<year>\d{4})-(?P<month>\d\d)-(?P<day>\d\d)[T ](?P<hour>\d\d):(?P<minute>\d\d)(:(?P<second>\d\d(\.\d*)?)?)?(?P<tz>Z|[+-]\d\d:?\d\d)?').setName("ISO8601 datetime")
"ISO8601 datetime (C{yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)}) - trailing seconds, milliseconds, and timezone optional; accepts separating C{'T'} or C{' '}"
uuid = Regex(r'[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}').setName("UUID")
"UUID (C{xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx})"
_html_stripper = anyOpenTag.suppress() | anyCloseTag.suppress()
@staticmethod
def stripHTMLTags(s, l, tokens):
"""
Parse action to remove HTML tags from web page HTML source
Example::
# strip HTML links from normal text
text = '<td>More info at the <a href="http://pyparsing.wikispaces.com">pyparsing</a> wiki page</td>'
td,td_end = makeHTMLTags("TD")
table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end
print(table_text.parseString(text).body) # -> 'More info at the pyparsing wiki page'
"""
return pyparsing_common._html_stripper.transformString(tokens[0])
_commasepitem = Combine(OneOrMore(~Literal(",") + ~LineEnd() + Word(printables, excludeChars=',')
+ Optional( White(" \t") ) ) ).streamline().setName("commaItem")
comma_separated_list = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("comma separated list")
"""Predefined expression of 1 or more printable words or quoted strings, separated by commas."""
upcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).upper()))
"""Parse action to convert tokens to upper case."""
downcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).lower()))
"""Parse action to convert tokens to lower case."""
if __name__ == "__main__":
selectToken = CaselessLiteral("select")
fromToken = CaselessLiteral("from")
ident = Word(alphas, alphanums + "_$")
columnName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens)
columnNameList = Group(delimitedList(columnName)).setName("columns")
columnSpec = ('*' | columnNameList)
tableName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens)
tableNameList = Group(delimitedList(tableName)).setName("tables")
simpleSQL = selectToken("command") + columnSpec("columns") + fromToken + tableNameList("tables")
# demo runTests method, including embedded comments in test string
simpleSQL.runTests("""
# '*' as column list and dotted table name
select * from SYS.XYZZY
# caseless match on "SELECT", and casts back to "select"
SELECT * from XYZZY, ABC
# list of column names, and mixed case SELECT keyword
Select AA,BB,CC from Sys.dual
# multiple tables
Select A, B, C from Sys.dual, Table2
# invalid SELECT keyword - should fail
Xelect A, B, C from Sys.dual
# incomplete command - should fail
Select
# invalid column name - should fail
Select ^^^ frox Sys.dual
""")
pyparsing_common.number.runTests("""
100
-100
+100
3.14159
6.02e23
1e-12
""")
# any int or real number, returned as float
pyparsing_common.fnumber.runTests("""
100
-100
+100
3.14159
6.02e23
1e-12
""")
pyparsing_common.hex_integer.runTests("""
100
FF
""")
import uuid
pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID))
pyparsing_common.uuid.runTests("""
12345678-1234-5678-1234-567812345678
""")
| gpl-2.0 | -7,066,982,590,310,306,000 | 38.355864 | 195 | 0.559963 | false |
ryfeus/lambda-packs | Keras_tensorflow_nightly/source2.7/tensorflow/contrib/layers/python/layers/embedding_ops.py | 2 | 41512 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Embedding functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util
from tensorflow.contrib.layers.python.ops import sparse_feature_cross_op
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
__all__ = [
"safe_embedding_lookup_sparse", "scattered_embedding_lookup",
"scattered_embedding_lookup_sparse", "embedding_lookup_unique",
"embedding_lookup_sparse_with_distributed_aggregation"
]
def safe_embedding_lookup_sparse(embedding_weights,
sparse_ids,
sparse_weights=None,
combiner=None,
default_id=None,
name=None,
partition_strategy="div",
max_norm=None):
"""Lookup embedding results, accounting for invalid IDs and empty features.
The partitioned embedding in `embedding_weights` must all be the same shape
except for the first dimension. The first dimension is allowed to vary as the
vocabulary size is not necessarily a multiple of `P`. `embedding_weights`
may be a `PartitionedVariable` as returned by using `tf.get_variable()` with a
partitioner.
Invalid IDs (< 0) are pruned from input IDs and weights, as well as any IDs
with non-positive weight. For an entry with no features, the embedding vector
for `default_id` is returned, or the 0-vector if `default_id` is not supplied.
The ids and weights may be multi-dimensional. Embeddings are always aggregated
along the last dimension.
Args:
embedding_weights: A list of `P` float tensors or values representing
partitioned embedding tensors. Alternatively, a `PartitionedVariable`,
created by partitioning along dimension 0. The total unpartitioned
shape should be `[e_0, e_1, ..., e_m]`, where `e_0` represents the
vocab size and `e_1, ..., e_m` are the embedding dimensions.
sparse_ids: `SparseTensor` of shape `[d_0, d_1, ..., d_n]` containing the
ids. `d_0` is typically batch size.
sparse_weights: `SparseTensor` of same shape as `sparse_ids`, containing
float weights corresponding to `sparse_ids`, or `None` if all weights
are be assumed to be 1.0.
combiner: A string specifying how to combine embedding results for each
entry. Currently "mean", "sqrtn" and "sum" are supported, with "mean"
the default.
default_id: The id to use for an entry with no features.
name: A name for this operation (optional).
partition_strategy: A string specifying the partitioning strategy.
Currently `"div"` and `"mod"` are supported. Default is `"div"`.
max_norm: If not None, all embeddings are l2-normalized to max_norm before
combining.
Returns:
Dense tensor of shape `[d_0, d_1, ..., d_{n-1}, e_1, ..., e_m]`.
Raises:
ValueError: if `embedding_weights` is empty.
"""
if combiner is None:
logging.warn("The default value of combiner will change from \"mean\" "
"to \"sqrtn\" after 2016/11/01.")
combiner = "mean"
if embedding_weights is None:
raise ValueError("Missing embedding_weights %s." % embedding_weights)
if isinstance(embedding_weights, variables.PartitionedVariable):
embedding_weights = list(embedding_weights) # get underlying Variables.
if not isinstance(embedding_weights, list):
embedding_weights = [embedding_weights]
if len(embedding_weights) < 1:
raise ValueError("Missing embedding_weights %s." % embedding_weights)
dtype = sparse_weights.dtype if sparse_weights is not None else None
if isinstance(embedding_weights, variables.PartitionedVariable):
embedding_weights = list(embedding_weights)
embedding_weights = [
ops.convert_to_tensor(w, dtype=dtype) for w in embedding_weights
]
contrib_tensor_util.assert_same_float_dtype(embedding_weights +
[sparse_weights])
with ops.name_scope(name, "embedding_lookup",
embedding_weights + [sparse_ids,
sparse_weights]) as scope:
# Reshape higher-rank sparse ids and weights to linear segment ids.
original_shape = sparse_ids.dense_shape
original_rank_dim = sparse_ids.dense_shape.get_shape()[0]
original_rank = (
array_ops.size(original_shape)
if original_rank_dim.value is None
else original_rank_dim.value)
sparse_ids = sparse_ops.sparse_reshape(sparse_ids, [
math_ops.reduce_prod(
array_ops.slice(original_shape, [0], [original_rank - 1])),
array_ops.gather(original_shape, original_rank - 1)])
if sparse_weights is not None:
sparse_weights = sparse_tensor.SparseTensor(
sparse_ids.indices,
sparse_weights.values, sparse_ids.dense_shape)
# Prune invalid ids and weights.
sparse_ids, sparse_weights = _prune_invalid_ids(sparse_ids, sparse_weights)
# Fill in dummy values for empty features, if necessary.
sparse_ids, is_row_empty = sparse_ops.sparse_fill_empty_rows(sparse_ids,
default_id or
0)
if sparse_weights is not None:
sparse_weights, _ = sparse_ops.sparse_fill_empty_rows(sparse_weights, 1.0)
result = embedding_ops.embedding_lookup_sparse(
embedding_weights,
sparse_ids,
sparse_weights,
combiner=combiner,
partition_strategy=partition_strategy,
name=None if default_id is None else scope,
max_norm=max_norm)
if default_id is None:
# Broadcast is_row_empty to the same shape as embedding_lookup_result,
# for use in Select.
is_row_empty = array_ops.tile(
array_ops.reshape(is_row_empty, [-1, 1]),
array_ops.stack([1, array_ops.shape(result)[1]]))
result = array_ops.where(is_row_empty,
array_ops.zeros_like(result),
result,
name=scope)
# Reshape back from linear ids back into higher-dimensional dense result.
final_result = array_ops.reshape(
result,
array_ops.concat([
array_ops.slice(
math_ops.cast(original_shape, dtypes.int32), [0],
[original_rank - 1]),
array_ops.slice(array_ops.shape(result), [1], [-1])
], 0))
final_result.set_shape(tensor_shape.unknown_shape(
(original_rank_dim - 1).value).concatenate(result.get_shape()[1:]))
return final_result
def _prune_invalid_ids(sparse_ids, sparse_weights):
"""Prune invalid IDs (< 0) from the input ids and weights."""
is_id_valid = math_ops.greater_equal(sparse_ids.values, 0)
if sparse_weights is not None:
is_id_valid = math_ops.logical_and(
is_id_valid, math_ops.greater(sparse_weights.values, 0))
sparse_ids = sparse_ops.sparse_retain(sparse_ids, is_id_valid)
if sparse_weights is not None:
sparse_weights = sparse_ops.sparse_retain(sparse_weights, is_id_valid)
return sparse_ids, sparse_weights
def scattered_embedding_lookup(params,
values,
dimension,
name=None,
hash_key=None):
"""Looks up embeddings using parameter hashing for each value in `values`.
The i-th embedding component of a value v in `values` is found by retrieving
the weight whose index is a fingerprint of the pair (v,i).
The concept is explored as "feature hashing" for model compression in this
paper: http://arxiv.org/pdf/1504.04788.pdf
Feature hashing has the pleasant effect of allowing us to compute an embedding
without needing a pre-determined vocabulary, relieving some amount of process
complexity. It also allows for us to maintain embeddings for possibly
trillions of features with a fixed amount of memory.
Note that this is superior to out-of-vocabulary shared "hash buckets" in that
the embedding is extremely likely to be unique for each token as opposed to
being shared across probably-colliding tokens. The price is that we must
compute a hash once for each scalar in the token's embedding as opposed to
once per token.
If `params` is a list, it represents a partition of the embedding parameters.
Each tensor in the list should have the same length, except for the first ones
which may have an additional element. For instance 10 parameters can be
partitioned in 4 tensors with length `[3, 3, 2, 2]`.
Args:
params: A `Tensor`, `list` of `Tensors`, or `PartitionedVariable`.
Each tensor must be of rank 1 with fully-defined shape.
values: `Tensor` of values to be embedded with shape `[d0, ..., dn]`.
dimension: Embedding dimension.
name: An optional name for this op.
hash_key: Specify the hash_key that will be used by the `FingerprintCat64`
function to combine the crosses fingerprints on SparseFeatureCrossOp
(optional).
Returns:
A `Tensor` with shape `[d0, ..., dn, dimension]`.
Raises:
ValueError: if dimension is not positive or the partition size is invalid.
"""
if dimension is None:
raise ValueError("You must specify dimension.")
return _sampled_scattered_embedding_lookup(
params, values, dimension=dimension, sampled_candidates=None,
hash_key=hash_key, name=name)
def _sampled_scattered_embedding_lookup(
params, values, dimension=None, sampled_candidates=None, hash_key=None,
name=None):
"""Looks up embeddings using parameter hashing for each value in `values`.
This method looks up selected embedding dimensions if `sampled_candidates` is
given, otherwise looks up all dimensions.
The i-th embedding component of a value v in `values` is found by retrieving
the weight whose index is a fingerprint of the pair (v,i).
The concept is explored as "feature hashing" for model compression in this
paper: http://arxiv.org/pdf/1504.04788.pdf
Feature hashing has the pleasant effect of allowing us to compute an embedding
without needing a pre-determined vocabulary, relieving some amount of process
complexity. It also allows for us to maintain embeddings for possibly
trillions of features with a fixed amount of memory.
Note that this is superior to out-of-vocabulary shared "hash buckets" in that
the embedding is extremely likely to be unique for each token as opposed to
being shared across probably-colliding tokens. The price is that we must
compute a hash once for each scalar in the token's embedding as opposed to
once per token.
If `params` is a list, it represents a partition of the embedding parameters.
Each tensor in the list should have the same length, except for the first ones
which may have an additional element. For instance 10 parameters can be
partitioned in 4 tensors with length `[3, 3, 2, 2]`.
Args:
params: A `Tensor`, `list` of `Tensors`, or `PartitionedVariable`.
Each tensor must be of rank 1 with fully-defined shape.
values: `Tensor` of values to be embedded with shape `[d0, ..., dn]`.
dimension: Embedding dimension. The user must specify either `dimension` or
`sampled_candidates`.
sampled_candidates: An optional `Tensor` of slice indices to keep along the
final dimension with shape `[d0, ..., dn, N]`. If given, `dimension` is
ignored. If `None`, looks up all candidates.
hash_key: Specify the hash_key that will be used by the `FingerprintCat64`
function to combine the crosses fingerprints on SparseFeatureCrossOp
(optional).
name: An optional name for this op.
Returns:
A `Tensor` with shape `[d0, ..., dn, dimension]`.
If `sampled_candidates` is given, the output shape is `[d0, ..., dn, N]`
Raises:
ValueError: if dimension is not positive or the partition size is invalid.
"""
if isinstance(params, variables.PartitionedVariable):
params = list(params)
if not isinstance(params, list):
params = [params]
with ops.name_scope(name, "scattered_embedding_lookup",
params + [dimension, values]):
# Flatten the values
values_shape = array_ops.shape(values)
values = array_ops.reshape(values, [-1, 1])
if sampled_candidates is None:
if dimension is None:
raise ValueError(
"You must specify either dimension or sampled_candidates.")
if dimension <= 0:
raise ValueError("Dimension must be >0. Given is %d" % dimension)
sampled_candidates = array_ops.tile(array_ops.expand_dims(
math_ops.range(0, dimension), 0), array_ops.shape(values))
else:
dimension = array_ops.shape(sampled_candidates)[
math_ops.subtract(array_ops.rank(sampled_candidates), 1)]
sampled_candidates_shape = array_ops.shape(sampled_candidates)
dimension_tensor = array_ops.reshape(dimension, shape=[1,])
expected_shape = array_ops.concat([values_shape, dimension_tensor], 0)
with ops.control_dependencies([control_flow_ops.Assert(
math_ops.reduce_all(math_ops.equal(sampled_candidates_shape,
expected_shape)),
["The shape of sampled_candidates: ", sampled_candidates_shape,
" does not match the shape of values: ", values_shape])]):
# Flatten sampled_candidates, same way as values are flattened.
sampled_candidates = array_ops.reshape(sampled_candidates,
[-1, dimension])
num_partitions = len(params)
partition_sizes = []
for p in range(num_partitions):
shape = params[p].get_shape()
shape.assert_has_rank(1)
shape.assert_is_fully_defined()
partition_sizes.append(shape[0].value)
num_params = sum(partition_sizes) # Total number of parameters.
# Assert the size of each partition.
for p in range(num_partitions):
expected_size = (num_params - p - 1) // num_partitions + 1
if partition_sizes[p] != expected_size:
raise ValueError("Tensor %d in params has size %d, expected %d." %
(p, partition_sizes[p], expected_size))
# With two values v1 and v2 and 3 dimensions, we will cross
# [[0, 1, 2], [0, 1, 2]] with [[v1], [v2]].
tensors_to_cross = [sampled_candidates, values]
ids = sparse_feature_cross_op.sparse_feature_cross(
tensors_to_cross, hashed_output=True, num_buckets=num_params,
hash_key=hash_key)
ids = sparse_ops.sparse_tensor_to_dense(ids)
# No need to validate the indices since we have checked the params
# dimensions and we know the largest id.
result = embedding_ops.embedding_lookup(
params, ids, partition_strategy="div")
return array_ops.reshape(result,
array_ops.concat([values_shape, [dimension]], 0))
def scattered_embedding_lookup_sparse(params,
sparse_values,
dimension,
combiner=None,
default_value=None,
name=None,
hash_key=None):
"""Looks up embeddings of a sparse feature using parameter hashing.
See `tf.contrib.layers.scattered_embedding_lookup` for embedding with hashing.
Args:
params: A `Tensor`, `list` of `Tensors`, or `PartitionedVariable`.
Each tensor must be of rank 1 with fully-defined shape.
sparse_values: A 2-D `SparseTensor` containing the values to be embedded.
Some rows may be empty.
dimension: Embedding dimension
combiner: A string specifying how to combine embedding results for each
entry. Currently "mean", "sqrtn" and "sum" are supported, with "mean"
the default.
default_value: The value to use for an entry with no features.
name: An optional name for this op.
hash_key: Specify the hash_key that will be used by the `FingerprintCat64`
function to combine the crosses fingerprints on SparseFeatureCrossOp
(optional).
Returns:
Dense tensor with shape [N, dimension] with N the number of rows in
sparse_values.
Raises:
TypeError: If sparse_values is not a SparseTensor.
ValueError: If combiner is not one of {"mean", "sqrtn", "sum"}.
"""
if combiner is None:
logging.warn("The default value of combiner will change from \"mean\" "
"to \"sqrtn\" after 2016/11/01.")
combiner = "mean"
if isinstance(params, variables.PartitionedVariable):
params = list(params)
if not isinstance(params, list):
params = [params]
if not isinstance(sparse_values, sparse_tensor.SparseTensor):
raise TypeError("sparse_values must be SparseTensor")
with ops.name_scope(name, "scattered_embedding_lookup_sparse",
params + [sparse_values]) as scope:
# Fill in the empty rows.
if default_value is None:
# Random default values to reduce the risk of collision.
if sparse_values.dtype == dtypes.string:
default_value = "6ZxWzWOHxZ"
else:
default_value = 1288896567
sparse_values, _ = sparse_ops.sparse_fill_empty_rows(
sparse_values, default_value)
segment_ids = sparse_values.indices[:, 0]
if segment_ids.dtype != dtypes.int32:
segment_ids = math_ops.cast(segment_ids, dtypes.int32)
values = sparse_values.values
values, idx = array_ops.unique(values)
embeddings = scattered_embedding_lookup(
params, values, dimension, hash_key=hash_key)
if combiner == "sum":
embeddings = math_ops.sparse_segment_sum(embeddings, idx, segment_ids,
name=scope)
elif combiner == "mean":
embeddings = math_ops.sparse_segment_mean(embeddings, idx, segment_ids,
name=scope)
elif combiner == "sqrtn":
embeddings = math_ops.sparse_segment_sqrt_n(embeddings, idx, segment_ids,
name=scope)
else:
raise ValueError("Combiner must be one of 'mean', 'sqrtn' or 'sum'.")
return embeddings
def embedding_lookup_unique(params, ids, name=None):
"""Version of embedding_lookup that avoids duplicate lookups.
This can save communication in the case of repeated ids.
Same interface as embedding_lookup. Except it supports multi-dimensional `ids`
which allows to not reshape input/output to fit gather.
Args:
params: A list of tensors with the same shape and type, or a
`PartitionedVariable`. Shape `[index, d1, d2, ...]`.
ids: A one-dimensional `Tensor` with type `int32` or `int64` containing
the ids to be looked up in `params`. Shape `[ids1, ids2, ...]`.
name: A name for this operation (optional).
Returns:
A `Tensor` with the same type as the tensors in `params` and dimension of
`[ids1, ids2, d1, d2, ...]`.
Raises:
ValueError: If `params` is empty.
"""
with ops.name_scope(name, "EmbeddingLookupUnique", [params, ids]):
ids = ops.convert_to_tensor(ids)
shape = array_ops.shape(ids)
ids_flat = array_ops.reshape(
ids, math_ops.reduce_prod(shape, keepdims=True))
unique_ids, idx = array_ops.unique(ids_flat)
unique_embeddings = embedding_ops.embedding_lookup(params, unique_ids)
embeds_flat = array_ops.gather(unique_embeddings, idx)
embed_shape = array_ops.concat(
[shape, array_ops.shape(unique_embeddings)[1:]], 0)
embeds = array_ops.reshape(embeds_flat, embed_shape)
embeds.set_shape(ids.get_shape().concatenate(
unique_embeddings.get_shape()[1:]))
return embeds
def _sampled_scattered_embedding_lookup_sparse(params,
sp_values,
dimension=None,
sampled_candidates=None,
hash_key=None,
with_sign_hash=False,
name=None):
"""Looks up embeddings using parameter hashing for sparse values.
This method looks up selected embedding dimensions if `sampled_candidates` is
given, otherwise looks up all dimensions.
The i-th embedding component of a value v in `values` is found by retrieving
the weight whose index is a fingerprint of the pair (v,i).
The concept is explored as "feature hashing" for model compression in this
paper: http://arxiv.org/pdf/1504.04788.pdf
This is logically equivalent to:
* Transforming `sp_values` (which has shape `[d0, d1]`) into a one-hot
`Tensor` of shape `[d0, N]`.
* Multiplying with a `Tensor` `h` of shape `[N, dimension]`, where
`h(i, j) = params[hash(i, j)]`.
Args:
params: A float `Tensor` with rank 1 and fully-defined shape.
sp_values: A 2D `SparseTensor` to be embedded with shape `[d0, d1]`.
dimension: An int `Tensor` of the final dimension. The user needs to provide
either `dimension` or `sampled_candidates`.
sampled_candidates: An optional `Tensor` of column indices to keep along
the final dimension with shape `[d0, N]`. If given, `dimension` is
ignored. If `None`, looks up all candidates.
hash_key: Specify the hash_key that will be used by the `FingerprintCat64`
function to combine the crosses fingerprints on SparseFeatureCrossOp
(optional).
with_sign_hash: A `bool` indicating whether `h(i, j)` should be multiplied
by `+1` or `-1`, where the value selected is determined by hashing
`(i, j)`. This is often necessary to remove bias resulting from hash
collisions.
name: An optional name for this op.
Returns:
A `Tensor` of shape `[d0, dimension]`.
If `sampled_candidates` is given, the output shape is `[d0, N]`.
Raises:
TypeError: If sp_values is not `SparseTensor`.
ValueError: If both `dimension` and `sampled_candidates` are `None`.
"""
if not isinstance(sp_values, sparse_tensor.SparseTensor):
raise TypeError("sp_values must be SparseTensor")
with ops.name_scope(
name=name,
default_name="sampled_scattered_embedding_lookup_sparse",
values=[sp_values, params, dimension, sampled_candidates]) as name_scope:
segment_ids = sp_values.indices[:, 0]
if sampled_candidates is not None:
# Tile sampled_candidates so there is one line corresponding to each
# element in sp_values.values
sampled_candidates = array_ops.gather(sampled_candidates, segment_ids)
embeddings = _sampled_scattered_embedding_lookup(
params, sp_values.values, dimension=dimension,
sampled_candidates=sampled_candidates,
hash_key=hash_key, name="values_lookup")
if with_sign_hash:
signs = _sampled_scattered_embedding_lookup(
array_ops.constant([-1., 1.]), sp_values.values, dimension=dimension,
sampled_candidates=sampled_candidates, hash_key=hash_key,
name="signs_lookup")
embeddings = math_ops.multiply(signs, embeddings, name="signs_hash")
if segment_ids.dtype != dtypes.int32:
segment_ids = math_ops.cast(segment_ids, dtypes.int32)
num_segments = array_ops.shape(sp_values)[0]
return math_ops.unsorted_segment_sum(embeddings, segment_ids,
num_segments=num_segments,
name=name_scope)
def embedding_lookup_sparse_with_distributed_aggregation(
params,
sp_ids,
sp_weights,
partition_strategy="mod",
name=None,
combiner=None,
max_norm=None):
"""Computes embeddings for the given ids and weights.
Embeddings belonging to same param are aggregated on that device first. This
op is intended to decrease data transmission and improve parallelism. See
`tf.nn.embedding_lookup_sparse` for the functionality and example of this op.
Args:
params: A single tensor representing the complete embedding tensor,
or a list of P tensors all of same shape except for the first dimension,
representing sharded embedding tensors. Alternatively, a
`PartitionedVariable`, created by partitioning along dimension 0. Each
element must be appropriately sized for the given `partition_strategy`.
sp_ids: N x M SparseTensor of int64 ids (typically from FeatureValueToId),
where N is typically batch size and M is arbitrary.
sp_weights: either a SparseTensor of float / double weights, or None to
indicate all weights should be taken to be 1. If specified, sp_weights
must have exactly the same shape and indices as sp_ids.
partition_strategy: A string specifying the partitioning strategy, relevant
if `len(params) > 1`. Currently `"div"` and `"mod"` are supported. Default
is `"mod"`. See `tf.nn.embedding_lookup` for more details.
name: Optional name for the op.
combiner: A string specifying the reduction op. Currently "mean", "sqrtn"
and "sum" are supported.
"sum" computes the weighted sum of the embedding results for each row.
"mean" is the weighted sum divided by the total weight.
"sqrtn" is the weighted sum divided by the square root of the sum of the
squares of the weights.
max_norm: If not None, each embedding is normalized to have l2 norm equal
to max_norm before combining.
Returns:
A dense tensor representing the combined embeddings for the
sparse ids. For each row in the dense tensor represented by sp_ids, the op
looks up the embeddings for all ids in that row, multiplies them by the
corresponding weight, and combines these embeddings as specified.
Raises:
TypeError: If sp_ids is not a SparseTensor, or if sp_weights is neither
None nor SparseTensor.
ValueError: If combiner is not one of {"mean", "sqrtn", "sum"}.
"""
if combiner is None:
logging.warn("The default value of combiner will change from \"mean\" "
"to \"sqrtn\" after 2016/11/01.")
combiner = "mean"
if combiner not in ("mean", "sqrtn", "sum"):
raise ValueError("combiner must be one of 'mean', 'sqrtn' or 'sum'")
if isinstance(params, variables.PartitionedVariable):
params = list(params) # Iterate to get the underlying Variables.
if not isinstance(params, list):
params = [params]
if not isinstance(sp_ids, sparse_tensor.SparseTensor):
raise TypeError("sp_ids must be SparseTensor")
ignore_weights = sp_weights is None
if not ignore_weights:
if not isinstance(sp_weights, sparse_tensor.SparseTensor):
raise TypeError("sp_weights must be either None or SparseTensor")
sp_ids.values.get_shape().assert_is_compatible_with(
sp_weights.values.get_shape())
sp_ids.indices.get_shape().assert_is_compatible_with(
sp_weights.indices.get_shape())
sp_ids.dense_shape.get_shape().assert_is_compatible_with(
sp_weights.dense_shape.get_shape())
# TODO(yleon): Add enhanced node assertions to verify that sp_ids and
# sp_weights have equal indices and shapes.
with ops.name_scope(name, "embedding_lookup_sparse",
params + [sp_ids]) as name:
segment_ids = sp_ids.indices[:, 0]
if segment_ids.dtype != dtypes.int32:
segment_ids = math_ops.cast(segment_ids, dtypes.int32)
ids = sp_ids.values
if ignore_weights:
ids, idx = array_ops.unique(ids)
else:
idx = None
weights = None if ignore_weights else sp_weights.values
embeddings = _embedding_lookup_with_distributed_aggregation(
params,
ids,
partition_strategy=partition_strategy,
max_norm=max_norm,
weights=weights,
idx=idx,
segment_ids=segment_ids)
# Set weights to all one if ignore weights.
if ignore_weights:
weights = array_ops.fill([array_ops.shape(segment_ids)[0]], 1)
if weights.dtype != embeddings.dtype:
weights = math_ops.cast(weights, embeddings.dtype)
# Reshape weights.
ones = array_ops.fill(
array_ops.expand_dims(array_ops.rank(embeddings) - 1, 0), 1)
bcast_weights_shape = array_ops.concat([array_ops.shape(weights), ones], 0)
orig_weights_shape = weights.get_shape()
weights = array_ops.reshape(weights, bcast_weights_shape)
if embeddings.get_shape().ndims is not None:
weights.set_shape(
orig_weights_shape.concatenate(
[1 for _ in range(embeddings.get_shape().ndims - 1)]))
if combiner == "mean":
weight_sum = math_ops.segment_sum(weights, segment_ids)
embeddings = math_ops.div(embeddings, weight_sum)
elif combiner == "sqrtn":
weights_squared = math_ops.pow(weights, 2)
weight_sum = math_ops.segment_sum(weights_squared, segment_ids)
weight_sum_sqrt = math_ops.sqrt(weight_sum)
embeddings = math_ops.div(embeddings, weight_sum_sqrt)
elif combiner != "sum":
assert False, "Unrecognized combiner"
return embeddings
def _do_gather(params, ids, name=None):
"""Deals with doing gather differently for resource variables."""
if isinstance(params, resource_variable_ops.ResourceVariable):
return params.sparse_read(ids, name=name)
return array_ops.gather(params, ids, name=name)
def _embedding_lookup_with_distributed_aggregation(params,
ids,
partition_strategy="mod",
name=None,
max_norm=None,
weights=None,
idx=None,
segment_ids=None):
"""Lookup helper for embedding_lookup_sparse_with_distributed_aggregation."""
if params is None or params == []: # pylint: disable=g-explicit-bool-comparison
raise ValueError("Need at least one param")
if isinstance(params, variables.PartitionedVariable):
params = list(params) # Iterate to get the underlying Variables.
if not isinstance(params, list):
params = [params]
def maybe_normalize(x):
if max_norm is not None:
if x.get_shape().ndims is not None:
ndims = x.get_shape().ndims
else:
ndims = array_ops.size(array_ops.shape(x))
return clip_ops.clip_by_norm(x, max_norm, axes=list(range(1, ndims)))
return x
with ops.name_scope(name, "embedding_lookup_with_distributed_aggregation",
params + [ids]) as name:
np = len(params) # Number of partitions
# Preserve the resource variable status to avoid accidental dense reads.
if not any(
isinstance(p, resource_variable_ops.ResourceVariable) for p in params):
params = ops.convert_n_to_tensor_or_indexed_slices(params, name="params")
if np == 1:
with ops.colocate_with(params[0]):
ret = maybe_normalize(_do_gather(params[0], ids))
ignore_weights = weights is None
if not ignore_weights:
if weights.dtype != ret.dtype:
weights = math_ops.cast(weights, ret.dtype)
# Reshape to allow broadcast
ones = array_ops.fill(
array_ops.expand_dims(array_ops.rank(ret) - 1, 0), 1)
bcast_weights_shape = array_ops.concat(
[array_ops.shape(weights), ones], 0)
orig_weights_shape = weights.get_shape()
weights = array_ops.reshape(weights, bcast_weights_shape)
# Set weights shape after reshape
if ret.get_shape().ndims is not None:
weights.set_shape(
orig_weights_shape.concatenate(
[1 for _ in range(ret.get_shape().ndims - 1)]))
ret *= weights
return math_ops.segment_sum(ret, segment_ids, name=name)
else:
return math_ops.sparse_segment_sum(ret, idx, segment_ids, name=name)
else:
ids = ops.convert_to_tensor(ids, name="ids")
flat_ids = array_ops.reshape(ids, [-1])
original_indices = math_ops.range(array_ops.size(flat_ids))
# Create p_assignments and set new_ids depending on the strategy.
if partition_strategy == "mod":
p_assignments = flat_ids % np
new_ids = flat_ids // np
elif partition_strategy == "div":
# Compute num_total_ids as the sum of dim-0 of params, then assign to
# partitions based on a constant number of ids per partition. Optimize
# if we already know the full shape statically.
dim_0_size = params[0].get_shape()[0]
for p in xrange(1, np):
dim_0_size += params[p].get_shape()[0]
if dim_0_size.value:
num_total_ids = constant_op.constant(dim_0_size.value, flat_ids.dtype)
else:
dim_0_sizes = []
for p in xrange(np):
if params[p].get_shape()[0].value is not None:
dim_0_sizes.append(params[p].get_shape()[0].value)
else:
with ops.colocate_with(params[p]):
dim_0_sizes.append(array_ops.shape(params[p])[0])
num_total_ids = math_ops.reduce_sum(
math_ops.cast(array_ops.stack(dim_0_sizes), flat_ids.dtype))
ids_per_partition = num_total_ids // np
extras = num_total_ids % np
p_assignments = math_ops.maximum(flat_ids // (ids_per_partition + 1), (
flat_ids - extras) // ids_per_partition)
# Emulate a conditional using a boolean indicator tensor
is_in_first_extras_partitions = math_ops.cast(p_assignments < extras,
flat_ids.dtype)
new_ids = (is_in_first_extras_partitions * (flat_ids %
(ids_per_partition + 1)) +
(1 - is_in_first_extras_partitions) * (
(flat_ids - extras) % ids_per_partition))
else:
raise ValueError("Unrecognized partition strategy: " +
partition_strategy)
# Cast partition assignments to int32 for use in dynamic_partition.
# There really should not be more than 2^32 partitions.
p_assignments = math_ops.cast(p_assignments, dtypes.int32)
# Partition list of ids based on assignments into np separate lists
gather_ids = data_flow_ops.dynamic_partition(new_ids, p_assignments, np)
# Similarly, partition the original indices.
pindices = data_flow_ops.dynamic_partition(original_indices,
p_assignments, np)
# Do np separate lookups, finding embeddings for plist[p] in params[p]
partitioned_result = []
for p in xrange(np):
with ops.colocate_with(params[p]):
partitioned_result.append(_do_gather(params[p], gather_ids[p]))
ignore_weights = weights is None
if not ignore_weights:
# Partition weights according to pindices.
partitioned_weight = []
for p in xrange(np):
partitioned_weight.append(array_ops.gather(weights, pindices[p]))
# Reshape each partition result.
element_shape = params[0].get_shape()[1:]
for p in params[1:]:
element_shape = element_shape.merge_with(p.get_shape()[1:])
if element_shape.is_fully_defined():
for p in xrange(np):
with ops.colocate_with(params[p]):
partitioned_result[p] = array_ops.reshape(
partitioned_result[p],
array_ops.concat([array_ops.shape(pindices[p]), element_shape],
0))
else:
with ops.colocate_with(params[0]):
params_shape = array_ops.shape(params[0])
for p in xrange(np):
with ops.colocate_with(params[p]):
partitioned_result[p] = array_ops.reshape(
partitioned_result[p],
array_ops.concat([
array_ops.shape(pindices[p]), array_ops.slice(
params_shape, [1], [-1])
], 0))
# Normalize each partition result.
for p in xrange(np):
with ops.colocate_with(params[p]):
partitioned_result[p] = maybe_normalize(partitioned_result[p])
if not ignore_weights:
# Multiply each partition result with partition weights.
for p in xrange(np):
with ops.colocate_with(params[p]):
if partitioned_weight[p].dtype != partitioned_result[p].dtype:
partitioned_weight[p] = math_ops.cast(partitioned_weight[p],
partitioned_result[p].dtype)
# Reshape partition weights.
ones = array_ops.fill(
array_ops.expand_dims(
array_ops.rank(partitioned_result[p]) - 1, 0), 1)
bcast_weights_shape = array_ops.concat(
[array_ops.shape(partitioned_weight[p]), ones], 0)
orig_weights_shape = partitioned_weight[p].get_shape()
partitioned_weight[p] = array_ops.reshape(partitioned_weight[p],
bcast_weights_shape)
if partitioned_result[p].get_shape().ndims is not None:
partitioned_weight[p].set_shape(
orig_weights_shape.concatenate([
1
for _ in range(partitioned_result[p].get_shape().ndims -
1)
]))
partitioned_result[p] *= partitioned_weight[p]
partitioned_segment_ids = []
for p in xrange(np):
if not ignore_weights:
# Partition segment_ids according to pindices.
p_segment_ids = array_ops.gather(segment_ids, pindices[p])
# Number the p_segment_ids to meet segment_sum's requirements. Note
# that unique_p_segment_ids contains unique segment ids of this
# partition and these ids' order is unchanged.
unique_p_segment_ids, unique_p_segment_idx = array_ops.unique(
p_segment_ids)
partitioned_segment_ids.append(unique_p_segment_ids)
# segment_sum this partition's result.
with ops.colocate_with(params[p]):
partitioned_result[p] = math_ops.segment_sum(
partitioned_result[p], unique_p_segment_idx)
else:
# When ignore weights, we need to get indexs of elements in idx and
# segment_ids.
_, exclude_idx = array_ops.setdiff1d(idx, pindices[p])
all_idx = math_ops.range(array_ops.shape(idx)[0])
_, include_idx = array_ops.setdiff1d(all_idx, exclude_idx)
# Gather segment_ids and idx according to indexs.
p_segment_ids = array_ops.gather(segment_ids, include_idx)
p_idx = array_ops.gather(idx, include_idx)
# Number the p_segment_ids, same as ignore_weights case above.
unique_p_segment_ids, unique_p_segment_idx = array_ops.unique(
p_segment_ids)
_, unique_p_idx_idx = array_ops.unique(p_idx)
partitioned_segment_ids.append(unique_p_segment_ids)
with ops.colocate_with(params[p]):
partitioned_result[p] = math_ops.sparse_segment_sum(
partitioned_result[p], unique_p_idx_idx, unique_p_segment_idx)
# Concat each partition's segment_ids and result for final segment_sum.
concat_segment_ids = array_ops.concat(partitioned_segment_ids, 0)
concat_partitioned_result = array_ops.concat(partitioned_result, 0)
return math_ops.unsorted_segment_sum(
concat_partitioned_result,
concat_segment_ids,
math_ops.reduce_max(concat_segment_ids) + 1,
name=name)
| mit | -709,370,804,553,335,700 | 44.818985 | 92 | 0.639333 | false |
nlproc/splunkml | bin/multiclassify.py | 1 | 2142 | import sys, os, itertools
try:
import cStringIO as StringIO
except:
import StringIO
import numpy as np
import scipy.sparse as sp
from gensim.corpora import TextCorpus
from gensim.models import LsiModel, TfidfModel, LdaModel
from gensim.matutils import corpus2csc
from sklearn.feature_extraction import FeatureHasher
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.decomposition import PCA
def is_number(str):
try:
n = float(str)
return True
except (ValueError, TypeError):
return False
def process_records(records, fields, target, textmodel=None):
tokenize = CountVectorizer().build_analyzer()
input = None
X = None
y_labels = []
for i, record in enumerate(records):
nums = []
strs = []
y_labels.append(record.get(target))
for field in fields:
if is_number(record.get(field)):
nums.append(record[field])
else:
strs.append(str(record.get(field) or "").lower())
if strs:
if input is None:
input = StringIO.StringIO()
print >> input, " ".join(tokenize(" ".join(strs)))
if nums:
if X is None:
X = sp.lil_matrix((len(records),len(nums)))
X[i] = np.array(nums, dtype=np.float64)
if input is not None:
if X is not None:
X_2 = X.tocsr()
else:
X_2 = None
if isinstance(textmodel,basestring):
if textmodel == 'lsi':
corpus = TextCorpus(input)
textmodel = LsiModel(corpus, chunksize=1000)
elif textmodel == 'tfidf':
corpus = TextCorpus(input)
textmodel = TfidfModel(corpus)
elif textmodel == 'hashing':
textmodel = None
hasher = FeatureHasher(n_features=2 ** 18, input_type="string")
input.seek(0)
X = hasher.transform(tokenize(line.strip()) for line in input)
if textmodel:
num_terms = len(textmodel.id2word or getattr(textmodel, 'dfs',[]))
X = corpus2csc(textmodel[corpus], num_terms).transpose()
if X_2 is not None:
# print >> sys.stderr, "X SHAPE:", X.shape
# print >> sys.stderr, "X_2 SHAPE:", X_2.shape
X = sp.hstack([X, X_2], format='csr')
elif X is not None:
textmodel = None
X = X.tocsr()
print >> sys.stderr, "X SHAPE:", X.shape
return X, y_labels, textmodel
| apache-2.0 | 2,015,773,664,446,975,200 | 23.340909 | 69 | 0.679272 | false |
stefanw/django-cms | cms/tests/admin.py | 8 | 76720 | # -*- coding: utf-8 -*-
from __future__ import with_statement
import json
import datetime
from cms import api
from cms.utils.urlutils import admin_reverse
from djangocms_text_ckeditor.cms_plugins import TextPlugin
from djangocms_text_ckeditor.models import Text
from django.contrib import admin
from django.contrib.admin.models import LogEntry
from django.contrib.admin.sites import site
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Permission, AnonymousUser
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse
from django.http import (Http404, HttpResponseBadRequest, HttpResponseForbidden, HttpResponse,
QueryDict, HttpResponseNotFound)
from django.utils.datastructures import MultiValueDictKeyError
from django.utils.encoding import force_text, smart_str
from django.utils import timezone
from django.utils.six.moves.urllib.parse import urlparse
from cms.admin.change_list import CMSChangeList
from cms.admin.forms import PageForm, AdvancedSettingsForm
from cms.admin.pageadmin import PageAdmin
from cms.admin.permissionadmin import PagePermissionInlineAdmin
from cms.api import create_page, create_title, add_plugin, assign_user_to_page, publish_page
from cms.constants import PLUGIN_MOVE_ACTION
from cms.models import UserSettings, StaticPlaceholder
from cms.models.pagemodel import Page
from cms.models.permissionmodels import GlobalPagePermission, PagePermission
from cms.models.placeholdermodel import Placeholder
from cms.models.pluginmodel import CMSPlugin
from cms.models.titlemodels import Title
from cms.test_utils import testcases as base
from cms.test_utils.testcases import CMSTestCase, URL_CMS_PAGE_DELETE, URL_CMS_PAGE, URL_CMS_TRANSLATION_DELETE
from cms.test_utils.util.fuzzy_int import FuzzyInt
from cms.utils import get_cms_setting
from cms.utils.compat import DJANGO_1_6
class AdminTestsBase(CMSTestCase):
@property
def admin_class(self):
return site._registry[Page]
def _get_guys(self, admin_only=False, use_global_permissions=True):
admiN_user = self.get_superuser()
if admin_only:
return admiN_user
USERNAME = 'test'
if get_user_model().USERNAME_FIELD == 'email':
normal_guy = get_user_model().objects.create_user(USERNAME, '[email protected]', '[email protected]')
else:
normal_guy = get_user_model().objects.create_user(USERNAME, '[email protected]', USERNAME)
normal_guy.is_staff = True
normal_guy.is_active = True
normal_guy.save()
normal_guy.user_permissions = Permission.objects.filter(
codename__in=['change_page', 'change_title', 'add_page', 'add_title', 'delete_page', 'delete_title']
)
if use_global_permissions:
gpp = GlobalPagePermission.objects.create(
user=normal_guy,
can_change=True,
can_delete=True,
can_change_advanced_settings=False,
can_publish=True,
can_change_permissions=False,
can_move_page=True,
)
gpp.sites = Site.objects.all()
return admiN_user, normal_guy
class AdminTestCase(AdminTestsBase):
def test_extension_not_in_admin(self):
admin_user, staff = self._get_guys()
with self.login_user_context(admin_user):
request = self.get_request('/admin/cms/page/1/', 'en',)
response = site.index(request)
self.assertNotContains(response, '/mytitleextension/')
self.assertNotContains(response, '/mypageextension/')
def test_permissioned_page_list(self):
"""
Makes sure that a user with restricted page permissions can view
the page list.
"""
admin_user, normal_guy = self._get_guys(use_global_permissions=False)
current_site = Site.objects.get(pk=1)
page = create_page("Test page", "nav_playground.html", "en",
site=current_site, created_by=admin_user)
PagePermission.objects.create(page=page, user=normal_guy)
with self.login_user_context(normal_guy):
resp = self.client.get(URL_CMS_PAGE)
self.assertEqual(resp.status_code, 200)
def test_edit_does_not_reset_page_adv_fields(self):
"""
Makes sure that if a non-superuser with no rights to edit advanced page
fields edits a page, those advanced fields are not touched.
"""
OLD_PAGE_NAME = 'Test Page'
NEW_PAGE_NAME = 'Test page 2'
REVERSE_ID = 'Test'
OVERRIDE_URL = 'my/override/url'
admin_user, normal_guy = self._get_guys()
current_site = Site.objects.get(pk=1)
# The admin creates the page
page = create_page(OLD_PAGE_NAME, "nav_playground.html", "en",
site=current_site, created_by=admin_user)
page.reverse_id = REVERSE_ID
page.save()
title = page.get_title_obj()
title.has_url_overwrite = True
title.path = OVERRIDE_URL
title.save()
self.assertEqual(page.get_title(), OLD_PAGE_NAME)
self.assertEqual(page.reverse_id, REVERSE_ID)
self.assertEqual(title.overwrite_url, OVERRIDE_URL)
# The user edits the page (change the page name for ex.)
page_data = {
'title': NEW_PAGE_NAME,
'slug': page.get_slug(),
'language': title.language,
'site': page.site.pk,
'template': page.template,
'pagepermission_set-TOTAL_FORMS': 0,
'pagepermission_set-INITIAL_FORMS': 0,
'pagepermission_set-MAX_NUM_FORMS': 0,
'pagepermission_set-2-TOTAL_FORMS': 0,
'pagepermission_set-2-INITIAL_FORMS': 0,
'pagepermission_set-2-MAX_NUM_FORMS': 0
}
# required only if user haves can_change_permission
with self.login_user_context(normal_guy):
resp = self.client.post(base.URL_CMS_PAGE_CHANGE % page.pk, page_data,
follow=True)
self.assertEqual(resp.status_code, 200)
self.assertTemplateNotUsed(resp, 'admin/login.html')
page = Page.objects.get(pk=page.pk)
self.assertEqual(page.get_title(), NEW_PAGE_NAME)
self.assertEqual(page.reverse_id, REVERSE_ID)
title = page.get_title_obj()
self.assertEqual(title.overwrite_url, OVERRIDE_URL)
# The admin edits the page (change the page name for ex.)
page_data = {
'title': OLD_PAGE_NAME,
'slug': page.get_slug(),
'language': title.language,
'site': page.site.pk,
'template': page.template,
'reverse_id': page.reverse_id,
'pagepermission_set-TOTAL_FORMS': 0, # required only if user haves can_change_permission
'pagepermission_set-INITIAL_FORMS': 0,
'pagepermission_set-MAX_NUM_FORMS': 0,
'pagepermission_set-2-TOTAL_FORMS': 0,
'pagepermission_set-2-INITIAL_FORMS': 0,
'pagepermission_set-2-MAX_NUM_FORMS': 0
}
with self.login_user_context(admin_user):
resp = self.client.post(base.URL_CMS_PAGE_CHANGE % page.pk, page_data,
follow=True)
self.assertEqual(resp.status_code, 200)
self.assertTemplateNotUsed(resp, 'admin/login.html')
page = Page.objects.get(pk=page.pk)
self.assertEqual(page.get_title(), OLD_PAGE_NAME)
self.assertEqual(page.reverse_id, REVERSE_ID)
title = page.get_title_obj()
self.assertEqual(title.overwrite_url, OVERRIDE_URL)
def test_edit_does_not_reset_apphook(self):
"""
Makes sure that if a non-superuser with no rights to edit advanced page
fields edits a page, those advanced fields are not touched.
"""
OLD_PAGE_NAME = 'Test Page'
NEW_PAGE_NAME = 'Test page 2'
REVERSE_ID = 'Test'
APPLICATION_URLS = 'project.sampleapp.urls'
admin_user, normal_guy = self._get_guys()
current_site = Site.objects.get(pk=1)
# The admin creates the page
page = create_page(OLD_PAGE_NAME, "nav_playground.html", "en",
site=current_site, created_by=admin_user)
page.reverse_id = REVERSE_ID
page.save()
title = page.get_title_obj()
title.has_url_overwrite = True
title.save()
page.application_urls = APPLICATION_URLS
page.save()
self.assertEqual(page.get_title(), OLD_PAGE_NAME)
self.assertEqual(page.reverse_id, REVERSE_ID)
self.assertEqual(page.application_urls, APPLICATION_URLS)
# The user edits the page (change the page name for ex.)
page_data = {
'title': NEW_PAGE_NAME,
'slug': page.get_slug(),
'language': title.language,
'site': page.site.pk,
'template': page.template,
'pagepermission_set-TOTAL_FORMS': 0,
'pagepermission_set-INITIAL_FORMS': 0,
'pagepermission_set-MAX_NUM_FORMS': 0,
'pagepermission_set-2-TOTAL_FORMS': 0,
'pagepermission_set-2-INITIAL_FORMS': 0,
'pagepermission_set-2-MAX_NUM_FORMS': 0,
}
with self.login_user_context(normal_guy):
resp = self.client.post(base.URL_CMS_PAGE_CHANGE % page.pk, page_data,
follow=True)
self.assertEqual(resp.status_code, 200)
self.assertTemplateNotUsed(resp, 'admin/login.html')
page = Page.objects.get(pk=page.pk)
self.assertEqual(page.get_title(), NEW_PAGE_NAME)
self.assertEqual(page.reverse_id, REVERSE_ID)
self.assertEqual(page.application_urls, APPLICATION_URLS)
title = page.get_title_obj()
# The admin edits the page (change the page name for ex.)
page_data = {
'title': OLD_PAGE_NAME,
'slug': page.get_slug(),
'language': title.language,
'site': page.site.pk,
'template': page.template,
'reverse_id': page.reverse_id,
}
with self.login_user_context(admin_user):
resp = self.client.post(base.URL_CMS_PAGE_ADVANCED_CHANGE % page.pk, page_data,
follow=True)
self.assertEqual(resp.status_code, 200)
self.assertTemplateNotUsed(resp, 'admin/login.html')
resp = self.client.post(base.URL_CMS_PAGE_CHANGE % page.pk, page_data,
follow=True)
self.assertEqual(resp.status_code, 200)
self.assertTemplateNotUsed(resp, 'admin/login.html')
page = Page.objects.get(pk=page.pk)
self.assertEqual(page.get_title(), OLD_PAGE_NAME)
self.assertEqual(page.reverse_id, REVERSE_ID)
self.assertEqual(page.application_urls, '')
def test_2apphooks_with_same_namespace(self):
PAGE1 = 'Test Page'
PAGE2 = 'Test page 2'
APPLICATION_URLS = 'project.sampleapp.urls'
admin_user, normal_guy = self._get_guys()
current_site = Site.objects.get(pk=1)
# The admin creates the page
page = create_page(PAGE1, "nav_playground.html", "en",
site=current_site, created_by=admin_user)
page2 = create_page(PAGE2, "nav_playground.html", "en",
site=current_site, created_by=admin_user)
page.application_urls = APPLICATION_URLS
page.application_namespace = "space1"
page.save()
page2.application_urls = APPLICATION_URLS
page2.save()
# The admin edits the page (change the page name for ex.)
page_data = {
'title': PAGE2,
'slug': page2.get_slug(),
'language': 'en',
'site': page.site.pk,
'template': page2.template,
'application_urls': 'SampleApp',
'application_namespace': 'space1',
}
with self.login_user_context(admin_user):
resp = self.client.post(base.URL_CMS_PAGE_ADVANCED_CHANGE % page.pk, page_data)
self.assertEqual(resp.status_code, 302)
self.assertEqual(Page.objects.filter(application_namespace="space1").count(), 1)
resp = self.client.post(base.URL_CMS_PAGE_ADVANCED_CHANGE % page2.pk, page_data)
self.assertEqual(resp.status_code, 200)
page_data['application_namespace'] = 'space2'
resp = self.client.post(base.URL_CMS_PAGE_ADVANCED_CHANGE % page2.pk, page_data)
self.assertEqual(resp.status_code, 302)
def test_delete(self):
admin_user = self.get_superuser()
create_page("home", "nav_playground.html", "en",
created_by=admin_user, published=True)
page = create_page("delete-page", "nav_playground.html", "en",
created_by=admin_user, published=True)
create_page('child-page', "nav_playground.html", "en",
created_by=admin_user, published=True, parent=page)
body = page.placeholders.get(slot='body')
add_plugin(body, 'TextPlugin', 'en', body='text')
page.publish('en')
with self.login_user_context(admin_user):
data = {'post': 'yes'}
with self.assertNumQueries(FuzzyInt(300, 407)):
response = self.client.post(URL_CMS_PAGE_DELETE % page.pk, data)
self.assertRedirects(response, URL_CMS_PAGE)
def test_delete_diff_language(self):
admin_user = self.get_superuser()
create_page("home", "nav_playground.html", "en",
created_by=admin_user, published=True)
page = create_page("delete-page", "nav_playground.html", "en",
created_by=admin_user, published=True)
create_page('child-page', "nav_playground.html", "de",
created_by=admin_user, published=True, parent=page)
body = page.placeholders.get(slot='body')
add_plugin(body, 'TextPlugin', 'en', body='text')
page.publish('en')
with self.login_user_context(admin_user):
data = {'post': 'yes'}
with self.assertNumQueries(FuzzyInt(300, 394)):
response = self.client.post(URL_CMS_PAGE_DELETE % page.pk, data)
self.assertRedirects(response, URL_CMS_PAGE)
def test_search_fields(self):
superuser = self.get_superuser()
from django.contrib.admin import site
with self.login_user_context(superuser):
for model, admin_instance in site._registry.items():
if model._meta.app_label != 'cms':
continue
if not admin_instance.search_fields:
continue
url = admin_reverse('cms_%s_changelist' % model._meta.model_name)
response = self.client.get('%s?q=1' % url)
errmsg = response.content
self.assertEqual(response.status_code, 200, errmsg)
def test_delete_translation(self):
admin_user = self.get_superuser()
page = create_page("delete-page-translation", "nav_playground.html", "en",
created_by=admin_user, published=True)
create_title("de", "delete-page-translation-2", page, slug="delete-page-translation-2")
create_title("es-mx", "delete-page-translation-es", page, slug="delete-page-translation-es")
with self.login_user_context(admin_user):
response = self.client.get(URL_CMS_TRANSLATION_DELETE % page.pk, {'language': 'de'})
self.assertEqual(response.status_code, 200)
response = self.client.post(URL_CMS_TRANSLATION_DELETE % page.pk, {'language': 'de'})
self.assertRedirects(response, URL_CMS_PAGE)
response = self.client.get(URL_CMS_TRANSLATION_DELETE % page.pk, {'language': 'es-mx'})
self.assertEqual(response.status_code, 200)
response = self.client.post(URL_CMS_TRANSLATION_DELETE % page.pk, {'language': 'es-mx'})
self.assertRedirects(response, URL_CMS_PAGE)
def test_change_dates(self):
admin_user, staff = self._get_guys()
page = create_page('test-page', 'nav_playground.html', 'en')
page.publish('en')
draft = page.get_draft_object()
with self.settings(USE_TZ=False):
original_date = draft.publication_date
original_end_date = draft.publication_end_date
new_date = timezone.now() - datetime.timedelta(days=1)
new_end_date = timezone.now() + datetime.timedelta(days=1)
url = admin_reverse('cms_page_dates', args=(draft.pk,))
with self.login_user_context(admin_user):
response = self.client.post(url, {
'language': 'en',
'site': draft.site.pk,
'publication_date_0': new_date.date(),
'publication_date_1': new_date.strftime("%H:%M:%S"),
'publication_end_date_0': new_end_date.date(),
'publication_end_date_1': new_end_date.strftime("%H:%M:%S"),
})
self.assertEqual(response.status_code, 302)
draft = Page.objects.get(pk=draft.pk)
self.assertNotEqual(draft.publication_date.timetuple(), original_date.timetuple())
self.assertEqual(draft.publication_date.timetuple(), new_date.timetuple())
self.assertEqual(draft.publication_end_date.timetuple(), new_end_date.timetuple())
if original_end_date:
self.assertNotEqual(draft.publication_end_date.timetuple(), original_end_date.timetuple())
with self.settings(USE_TZ=True):
original_date = draft.publication_date
original_end_date = draft.publication_end_date
new_date = timezone.localtime(timezone.now()) - datetime.timedelta(days=1)
new_end_date = timezone.localtime(timezone.now()) + datetime.timedelta(days=1)
url = admin_reverse('cms_page_dates', args=(draft.pk,))
with self.login_user_context(admin_user):
response = self.client.post(url, {
'language': 'en',
'site': draft.site.pk,
'publication_date_0': new_date.date(),
'publication_date_1': new_date.strftime("%H:%M:%S"),
'publication_end_date_0': new_end_date.date(),
'publication_end_date_1': new_end_date.strftime("%H:%M:%S"),
})
self.assertEqual(response.status_code, 302)
draft = Page.objects.get(pk=draft.pk)
self.assertNotEqual(draft.publication_date.timetuple(), original_date.timetuple())
self.assertEqual(timezone.localtime(draft.publication_date).timetuple(), new_date.timetuple())
self.assertEqual(timezone.localtime(draft.publication_end_date).timetuple(), new_end_date.timetuple())
if original_end_date:
self.assertNotEqual(draft.publication_end_date.timetuple(), original_end_date.timetuple())
def test_change_template(self):
admin_user, staff = self._get_guys()
request = self.get_request('/admin/cms/page/1/', 'en')
request.method = "POST"
pageadmin = site._registry[Page]
with self.login_user_context(staff):
self.assertRaises(Http404, pageadmin.change_template, request, 1)
page = create_page('test-page', 'nav_playground.html', 'en')
response = pageadmin.change_template(request, page.pk)
self.assertEqual(response.status_code, 403)
url = admin_reverse('cms_page_change_template', args=(page.pk,))
with self.login_user_context(admin_user):
response = self.client.post(url, {'template': 'doesntexist'})
self.assertEqual(response.status_code, 400)
response = self.client.post(url, {'template': get_cms_setting('TEMPLATES')[0][0]})
self.assertEqual(response.status_code, 200)
def test_get_permissions(self):
page = create_page('test-page', 'nav_playground.html', 'en')
url = admin_reverse('cms_page_get_permissions', args=(page.pk,))
response = self.client.get(url)
if DJANGO_1_6:
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'admin/login.html')
else:
self.assertEqual(response.status_code, 302)
self.assertRedirects(response, '/en/admin/login/?next=/en/admin/cms/page/%s/permissions/' % page.pk)
admin_user = self.get_superuser()
with self.login_user_context(admin_user):
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateNotUsed(response, 'admin/login.html')
def test_changelist_items(self):
admin_user = self.get_superuser()
first_level_page = create_page('level1', 'nav_playground.html', 'en')
second_level_page_top = create_page('level21', "nav_playground.html", "en",
created_by=admin_user, published=True, parent=first_level_page)
second_level_page_bottom = create_page('level22', "nav_playground.html", "en",
created_by=admin_user, published=True,
parent=self.reload(first_level_page))
third_level_page = create_page('level3', "nav_playground.html", "en",
created_by=admin_user, published=True, parent=second_level_page_top)
self.assertEqual(Page.objects.all().count(), 4)
url = admin_reverse('cms_%s_changelist' % Page._meta.model_name)
request = self.get_request(url)
request.session = {}
request.user = admin_user
page_admin = site._registry[Page]
cl_params = [request, page_admin.model, page_admin.list_display,
page_admin.list_display_links, page_admin.list_filter,
page_admin.date_hierarchy, page_admin.search_fields,
page_admin.list_select_related, page_admin.list_per_page]
if hasattr(page_admin, 'list_max_show_all'): # django 1.4
cl_params.append(page_admin.list_max_show_all)
cl_params.extend([page_admin.list_editable, page_admin])
cl = CMSChangeList(*tuple(cl_params))
cl.set_items(request)
root_page = cl.get_items()[0]
self.assertEqual(root_page, first_level_page)
self.assertEqual(root_page.get_children()[0], second_level_page_top)
self.assertEqual(root_page.get_children()[1], second_level_page_bottom)
self.assertEqual(root_page.get_children()[0].get_children()[0], third_level_page)
def test_changelist_tree(self):
""" This test checks for proper jstree cookie unquoting.
It should be converted to a selenium test to actually test the jstree behaviour.
Cookie set below is just a forged example (from live session)
"""
admin_user = self.get_superuser()
first_level_page = create_page('level1', 'nav_playground.html', 'en')
second_level_page_top = create_page('level21', "nav_playground.html", "en",
created_by=admin_user, published=True, parent=first_level_page)
second_level_page_bottom = create_page('level22', "nav_playground.html", "en",
created_by=admin_user, published=True,
parent=self.reload(first_level_page))
third_level_page = create_page('level3', "nav_playground.html", "en",
created_by=admin_user, published=True, parent=second_level_page_top)
url = admin_reverse('cms_%s_changelist' % Page._meta.model_name)
if get_user_model().USERNAME_FIELD == 'email':
self.client.login(username='[email protected]', password='[email protected]')
else:
self.client.login(username='admin', password='admin')
self.client.cookies['djangocms_nodes_open'] = 'page_1%2Cpage_2'
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context["open_menu_trees"], [1, 2])
# tests descendants method for the lazy load ajax call
url = "%s%d/en/descendants/" % (url, first_level_page.pk)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
# should include both direct descendant pages
self.assertContains(response, 'id="page_%s"' % second_level_page_top.pk)
self.assertContains(response, 'id="page_%s"' % second_level_page_bottom.pk)
# but not any further down the tree
self.assertNotContains(response, 'id="page_%s"' % third_level_page.pk)
self.assertNotContains(response, 'None')
def test_unihandecode_doesnt_break_404_in_admin(self):
self.get_superuser()
if get_user_model().USERNAME_FIELD == 'email':
self.client.login(username='[email protected]', password='[email protected]')
else:
self.client.login(username='admin', password='admin')
response = self.client.get('/en/admin/cms/page/1/?language=en')
self.assertEqual(response.status_code, 404)
def test_tree_displays_in_correct_language(self):
'''
Test to prove and protect that the page titles in the tree are
displayed in the currently set language.
'''
admin_guy, normal_guy = self._get_guys(use_global_permissions=False)
site = Site.objects.get(pk=1)
en_title = "EN Page"
es_title = "ES Pagina"
# Create a page in en
page = create_page(en_title, "nav_playground.html", "en", site=site, created_by=admin)
# Add a es-mx translation for this page
create_title("es-mx", es_title, page, slug="es_pagina")
url = admin_reverse('cms_%s_changelist' % Page._meta.model_name)
url_pat = '<a href="{0}/{1}/preview/"[^>]*>{2}</a>'
with self.login_user_context(admin_guy):
# Check the EN version of the tree...
response = self.client.get(url, {'language': 'en'})
self.assertRegexpMatches(str(response.content), url_pat.format(page.pk, 'en', en_title, ))
# Check the ES version of the tree...
response = self.client.get(url, {'language': 'es-mx'})
self.assertRegexpMatches(str(response.content), url_pat.format(page.pk, 'es-mx', es_title, ))
def test_empty_placeholder_in_correct_language(self):
"""
Test that Cleaning a placeholder only affect current language contents
"""
# create some objects
page_en = create_page("EmptyPlaceholderTestPage (EN)", "nav_playground.html", "en")
ph = page_en.placeholders.get(slot="body")
# add the text plugin to the en version of the page
add_plugin(ph, "TextPlugin", "en", body="Hello World EN 1")
add_plugin(ph, "TextPlugin", "en", body="Hello World EN 2")
# creating a de title of the page and adding plugins to it
create_title("de", page_en.get_title(), page_en, slug=page_en.get_slug())
add_plugin(ph, "TextPlugin", "de", body="Hello World DE")
add_plugin(ph, "TextPlugin", "de", body="Hello World DE 2")
add_plugin(ph, "TextPlugin", "de", body="Hello World DE 3")
# before cleaning the de placeholder
self.assertEqual(ph.get_plugins('en').count(), 2)
self.assertEqual(ph.get_plugins('de').count(), 3)
admin_user, staff = self._get_guys()
with self.login_user_context(admin_user):
url = '%s?language=de' % admin_reverse('cms_page_clear_placeholder', args=[ph.pk])
response = self.client.post(url, {'test': 0})
self.assertEqual(response.status_code, 302)
# After cleaning the de placeholder, en placeholder must still have all the plugins
self.assertEqual(ph.get_plugins('en').count(), 2)
self.assertEqual(ph.get_plugins('de').count(), 0)
class AdminTests(AdminTestsBase):
# TODO: needs tests for actual permissions, not only superuser/normaluser
def setUp(self):
self.page = create_page("testpage", "nav_playground.html", "en")
def get_admin(self):
User = get_user_model()
fields = dict(email="[email protected]", is_staff=True, is_superuser=True)
if (User.USERNAME_FIELD != 'email'):
fields[User.USERNAME_FIELD] = "admin"
usr = User(**fields)
usr.set_password(getattr(usr, User.USERNAME_FIELD))
usr.save()
return usr
def get_permless(self):
User = get_user_model()
fields = dict(email="[email protected]", is_staff=True)
if (User.USERNAME_FIELD != 'email'):
fields[User.USERNAME_FIELD] = "permless"
usr = User(**fields)
usr.set_password(getattr(usr, User.USERNAME_FIELD))
usr.save()
return usr
def get_page(self):
return self.page
def test_change_publish_unpublish(self):
page = self.get_page()
permless = self.get_permless()
with self.login_user_context(permless):
request = self.get_request()
response = self.admin_class.publish_page(request, page.pk, "en")
self.assertEqual(response.status_code, 403)
page = self.reload(page)
self.assertFalse(page.is_published('en'))
request = self.get_request(post_data={'no': 'data'})
response = self.admin_class.publish_page(request, page.pk, "en")
# Forbidden
self.assertEqual(response.status_code, 403)
self.assertFalse(page.is_published('en'))
admin_user = self.get_admin()
with self.login_user_context(admin_user):
request = self.get_request(post_data={'no': 'data'})
response = self.admin_class.publish_page(request, page.pk, "en")
self.assertEqual(response.status_code, 302)
page = self.reload(page)
self.assertTrue(page.is_published('en'))
response = self.admin_class.unpublish(request, page.pk, "en")
self.assertEqual(response.status_code, 302)
page = self.reload(page)
self.assertFalse(page.is_published('en'))
def test_change_status_adds_log_entry(self):
page = self.get_page()
admin_user = self.get_admin()
with self.login_user_context(admin_user):
request = self.get_request(post_data={'no': 'data'})
self.assertFalse(LogEntry.objects.count())
response = self.admin_class.publish_page(request, page.pk, "en")
self.assertEqual(response.status_code, 302)
self.assertEqual(1, LogEntry.objects.count())
self.assertEqual(page.pk, int(LogEntry.objects.all()[0].object_id))
def test_change_innavigation(self):
page = self.get_page()
permless = self.get_permless()
admin_user = self.get_admin()
with self.login_user_context(permless):
request = self.get_request()
response = self.admin_class.change_innavigation(request, page.pk)
self.assertEqual(response.status_code, 403)
with self.login_user_context(permless):
request = self.get_request(post_data={'no': 'data'})
self.assertRaises(Http404, self.admin_class.change_innavigation,
request, page.pk + 100)
with self.login_user_context(permless):
request = self.get_request(post_data={'no': 'data'})
response = self.admin_class.change_innavigation(request, page.pk)
self.assertEqual(response.status_code, 403)
with self.login_user_context(admin_user):
request = self.get_request(post_data={'no': 'data'})
old = page.in_navigation
response = self.admin_class.change_innavigation(request, page.pk)
# These asserts are for #3589
self.assertContains(response, 'lang="en"')
self.assertContains(response, './%s/en/preview/' % page.pk)
self.assertEqual(response.status_code, 200)
page = self.reload(page)
self.assertEqual(old, not page.in_navigation)
def test_publish_page_requires_perms(self):
permless = self.get_permless()
with self.login_user_context(permless):
request = self.get_request()
request.method = "POST"
response = self.admin_class.publish_page(request, Page.objects.all()[0].pk, "en")
self.assertEqual(response.status_code, 403)
def test_revert_page(self):
self.page.publish('en')
title = self.page.title_set.get(language='en')
title.title = 'new'
title.save()
self.assertEqual(Title.objects.all().count(), 2)
self.assertEqual(Page.objects.all().count(), 2)
with self.login_user_context(self.get_superuser()):
request = self.get_request()
request.method = "POST"
response = self.admin_class.revert_page(request, Page.objects.all()[0].pk, "en")
self.assertEqual(response.status_code, 302)
self.assertEqual(Title.objects.all().count(), 2)
self.assertEqual(Page.objects.all().count(), 2)
new_title = Title.objects.get(pk=title.pk)
self.assertNotEqual(title.title, new_title.title)
self.assertTrue(title.publisher_is_draft)
self.assertTrue(new_title.publisher_is_draft)
def test_revert_page_requires_perms(self):
permless = self.get_permless()
with self.login_user_context(permless):
request = self.get_request()
request.method = "POST"
response = self.admin_class.revert_page(request, Page.objects.all()[0].pk, 'en')
self.assertEqual(response.status_code, 403)
def test_revert_page_redirects(self):
admin_user = self.get_admin()
self.page.publish("en") # Ensure public copy exists before reverting
with self.login_user_context(admin_user):
response = self.client.get(admin_reverse('cms_page_revert_page', args=(self.page.pk, 'en')))
self.assertEqual(response.status_code, 302)
url = response['Location']
self.assertTrue(url.endswith('?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_OFF')))
def test_remove_plugin_requires_post(self):
ph = Placeholder.objects.create(slot='test')
plugin = add_plugin(ph, 'TextPlugin', 'en', body='test')
admin_user = self.get_admin()
with self.login_user_context(admin_user):
request = self.get_request()
response = self.admin_class.delete_plugin(request, plugin.pk)
self.assertEqual(response.status_code, 200)
def test_move_plugin(self):
ph = Placeholder.objects.create(slot='test')
plugin = add_plugin(ph, 'TextPlugin', 'en', body='test')
page = self.get_page()
source, target = list(page.placeholders.all())[:2]
pageplugin = add_plugin(source, 'TextPlugin', 'en', body='test')
plugin_class = pageplugin.get_plugin_class_instance()
expected = {'reload': plugin_class.requires_reload(PLUGIN_MOVE_ACTION)}
placeholder = Placeholder.objects.all()[0]
permless = self.get_permless()
admin_user = self.get_admin()
with self.login_user_context(permless):
request = self.get_request()
response = self.admin_class.move_plugin(request)
self.assertEqual(response.status_code, 405)
request = self.get_request(post_data={'not_usable': '1'})
self.assertRaises(MultiValueDictKeyError, self.admin_class.move_plugin, request)
with self.login_user_context(admin_user):
request = self.get_request(post_data={'ids': plugin.pk})
self.assertRaises(MultiValueDictKeyError, self.admin_class.move_plugin, request)
with self.login_user_context(admin_user):
request = self.get_request(post_data={'plugin_id': pageplugin.pk,
'placeholder_id': 'invalid-placeholder', 'plugin_language': 'en'})
self.assertRaises(ValueError, self.admin_class.move_plugin, request)
with self.login_user_context(permless):
request = self.get_request(post_data={'plugin_id': pageplugin.pk,
'placeholder_id': placeholder.pk, 'plugin_parent': '', 'plugin_language': 'en'})
self.assertEqual(self.admin_class.move_plugin(request).status_code, HttpResponseForbidden.status_code)
with self.login_user_context(admin_user):
request = self.get_request(post_data={'plugin_id': pageplugin.pk,
'placeholder_id': placeholder.pk, 'plugin_parent': '', 'plugin_language': 'en'})
response = self.admin_class.move_plugin(request)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content.decode('utf8')), expected)
with self.login_user_context(permless):
request = self.get_request(post_data={'plugin_id': pageplugin.pk,
'placeholder_id': placeholder.id, 'plugin_parent': '', 'plugin_language': 'en'})
self.assertEqual(self.admin_class.move_plugin(request).status_code, HttpResponseForbidden.status_code)
with self.login_user_context(admin_user):
request = self.get_request(post_data={'plugin_id': pageplugin.pk,
'placeholder_id': placeholder.id, 'plugin_parent': '', 'plugin_language': 'en'})
response = self.admin_class.move_plugin(request)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content.decode('utf8')), expected)
def test_move_language(self):
page = self.get_page()
source, target = list(page.placeholders.all())[:2]
col = add_plugin(source, 'MultiColumnPlugin', 'en')
sub_col = add_plugin(source, 'ColumnPlugin', 'en', target=col)
col2 = add_plugin(source, 'MultiColumnPlugin', 'de')
admin_user = self.get_admin()
with self.login_user_context(admin_user):
request = self.get_request(post_data={'plugin_id': sub_col.pk,
'placeholder_id': source.id, 'plugin_parent': col2.pk, 'plugin_language': 'de'})
response = self.admin_class.move_plugin(request)
self.assertEqual(response.status_code, 200)
sub_col = CMSPlugin.objects.get(pk=sub_col.pk)
self.assertEqual(sub_col.language, "de")
self.assertEqual(sub_col.parent_id, col2.pk)
def test_preview_page(self):
permless = self.get_permless()
with self.login_user_context(permless):
request = self.get_request()
self.assertRaises(Http404, self.admin_class.preview_page, request, 404, "en")
page = self.get_page()
page.publish("en")
base_url = page.get_absolute_url()
with self.login_user_context(permless):
request = self.get_request('/?public=true')
response = self.admin_class.preview_page(request, page.pk, 'en')
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'], '%s?%s&language=en' % (base_url, get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')))
request = self.get_request()
response = self.admin_class.preview_page(request, page.pk, 'en')
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'], '%s?%s&language=en' % (base_url, get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')))
current_site = Site.objects.create(domain='django-cms.org', name='django-cms')
page.site = current_site
page.save()
page.publish("en")
self.assertTrue(page.is_home)
response = self.admin_class.preview_page(request, page.pk, 'en')
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'],
'http://django-cms.org%s?%s&language=en' % (base_url, get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')))
def test_too_many_plugins_global(self):
conf = {
'body': {
'limits': {
'global': 1,
},
},
}
admin_user = self.get_admin()
url = admin_reverse('cms_page_add_plugin')
with self.settings(CMS_PERMISSION=False, CMS_PLACEHOLDER_CONF=conf):
page = create_page('somepage', 'nav_playground.html', 'en')
body = page.placeholders.get(slot='body')
add_plugin(body, 'TextPlugin', 'en', body='text')
with self.login_user_context(admin_user):
data = {
'plugin_type': 'TextPlugin',
'placeholder_id': body.pk,
'plugin_language': 'en',
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponseBadRequest.status_code)
def test_too_many_plugins_type(self):
conf = {
'body': {
'limits': {
'TextPlugin': 1,
},
},
}
admin_user = self.get_admin()
url = admin_reverse('cms_page_add_plugin')
with self.settings(CMS_PERMISSION=False, CMS_PLACEHOLDER_CONF=conf):
page = create_page('somepage', 'nav_playground.html', 'en')
body = page.placeholders.get(slot='body')
add_plugin(body, 'TextPlugin', 'en', body='text')
with self.login_user_context(admin_user):
data = {
'plugin_type': 'TextPlugin',
'placeholder_id': body.pk,
'plugin_language': 'en',
'plugin_parent': '',
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponseBadRequest.status_code)
def test_edit_title_dirty_bit(self):
language = "en"
admin_user = self.get_admin()
page = create_page('A', 'nav_playground.html', language)
page_admin = PageAdmin(Page, None)
page_admin._current_page = page
page.publish("en")
draft_page = page.get_draft_object()
admin_url = reverse("admin:cms_page_edit_title_fields", args=(
draft_page.pk, language
))
post_data = {
'title': "A Title"
}
with self.login_user_context(admin_user):
self.client.post(admin_url, post_data)
draft_page = Page.objects.get(pk=page.pk).get_draft_object()
self.assertTrue(draft_page.is_dirty('en'))
def test_edit_title_languages(self):
language = "en"
admin_user = self.get_admin()
page = create_page('A', 'nav_playground.html', language)
page_admin = PageAdmin(Page, None)
page_admin._current_page = page
page.publish("en")
draft_page = page.get_draft_object()
admin_url = reverse("admin:cms_page_edit_title_fields", args=(
draft_page.pk, language
))
post_data = {
'title': "A Title"
}
with self.login_user_context(admin_user):
self.client.post(admin_url, post_data)
draft_page = Page.objects.get(pk=page.pk).get_draft_object()
self.assertTrue(draft_page.is_dirty('en'))
def test_page_form_leak(self):
language = "en"
admin_user = self.get_admin()
request = self.get_request('/', 'en')
request.user = admin_user
page = create_page('A', 'nav_playground.html', language, menu_title='menu title')
page_admin = PageAdmin(Page, site)
page_admin._current_page = page
edit_form = page_admin.get_form(request, page)
add_form = page_admin.get_form(request, None)
self.assertEqual(edit_form.base_fields['menu_title'].initial, 'menu title')
self.assertEqual(add_form.base_fields['menu_title'].initial, None)
class NoDBAdminTests(CMSTestCase):
@property
def admin_class(self):
return site._registry[Page]
def test_lookup_allowed_site__exact(self):
self.assertTrue(self.admin_class.lookup_allowed('site__exact', '1'))
def test_lookup_allowed_published(self):
self.assertTrue(self.admin_class.lookup_allowed('published', value='1'))
class PluginPermissionTests(AdminTestsBase):
def setUp(self):
self._page = create_page('test page', 'nav_playground.html', 'en')
self._placeholder = self._page.placeholders.all()[0]
def _get_admin(self):
User = get_user_model()
fields = dict(email="[email protected]", is_staff=True, is_active=True)
if (User.USERNAME_FIELD != 'email'):
fields[User.USERNAME_FIELD] = "admin"
admin_user = User(**fields)
admin_user.set_password('admin')
admin_user.save()
return admin_user
def _get_page_admin(self):
return admin.site._registry[Page]
def _give_permission(self, user, model, permission_type, save=True):
codename = '%s_%s' % (permission_type, model._meta.object_name.lower())
user.user_permissions.add(Permission.objects.get(codename=codename))
def _give_page_permission_rights(self, user):
self._give_permission(user, PagePermission, 'add')
self._give_permission(user, PagePermission, 'change')
self._give_permission(user, PagePermission, 'delete')
def _get_change_page_request(self, user, page):
return type('Request', (object,), {
'user': user,
'path': base.URL_CMS_PAGE_CHANGE % page.pk
})
def _give_cms_permissions(self, user, save=True):
for perm_type in ['add', 'change', 'delete']:
for model in [Page, Title]:
self._give_permission(user, model, perm_type, False)
gpp = GlobalPagePermission.objects.create(
user=user,
can_change=True,
can_delete=True,
can_change_advanced_settings=False,
can_publish=True,
can_change_permissions=False,
can_move_page=True,
)
gpp.sites = Site.objects.all()
if save:
user.save()
def _create_plugin(self):
plugin = add_plugin(self._placeholder, 'TextPlugin', 'en')
return plugin
def test_plugin_add_requires_permissions(self):
"""User tries to add a plugin but has no permissions. He can add the plugin after he got the permissions"""
admin = self._get_admin()
self._give_cms_permissions(admin)
if get_user_model().USERNAME_FIELD == 'email':
self.client.login(username='[email protected]', password='admin')
else:
self.client.login(username='admin', password='admin')
url = admin_reverse('cms_page_add_plugin')
data = {
'plugin_type': 'TextPlugin',
'placeholder_id': self._placeholder.pk,
'plugin_language': 'en',
'plugin_parent': '',
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponseForbidden.status_code)
self._give_permission(admin, Text, 'add')
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponse.status_code)
def test_plugin_edit_requires_permissions(self):
"""User tries to edit a plugin but has no permissions. He can edit the plugin after he got the permissions"""
plugin = self._create_plugin()
_, normal_guy = self._get_guys()
if get_user_model().USERNAME_FIELD == 'email':
self.client.login(username='[email protected]', password='[email protected]')
else:
self.client.login(username='test', password='test')
url = admin_reverse('cms_page_edit_plugin', args=[plugin.id])
response = self.client.post(url, dict())
self.assertEqual(response.status_code, HttpResponseForbidden.status_code)
# After he got the permissions, he can edit the plugin
self._give_permission(normal_guy, Text, 'change')
response = self.client.post(url, dict())
self.assertEqual(response.status_code, HttpResponse.status_code)
def test_plugin_edit_wrong_url(self):
"""User tries to edit a plugin using a random url. 404 response returned"""
plugin = self._create_plugin()
_, normal_guy = self._get_guys()
if get_user_model().USERNAME_FIELD == 'email':
self.client.login(username='[email protected]', password='[email protected]')
else:
self.client.login(username='test', password='test')
self._give_permission(normal_guy, Text, 'change')
url = '%s/edit-plugin/%s/' % (admin_reverse('cms_page_edit_plugin', args=[plugin.id]), plugin.id)
response = self.client.post(url, dict())
self.assertEqual(response.status_code, HttpResponseNotFound.status_code)
self.assertTrue("Plugin not found" in force_text(response.content))
def test_plugin_remove_requires_permissions(self):
"""User tries to remove a plugin but has no permissions. He can remove the plugin after he got the permissions"""
plugin = self._create_plugin()
_, normal_guy = self._get_guys()
if get_user_model().USERNAME_FIELD == 'email':
self.client.login(username='[email protected]', password='[email protected]')
else:
self.client.login(username='test', password='test')
url = admin_reverse('cms_page_delete_plugin', args=[plugin.pk])
data = dict(plugin_id=plugin.id)
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponseForbidden.status_code)
# After he got the permissions, he can edit the plugin
self._give_permission(normal_guy, Text, 'delete')
response = self.client.post(url, data)
self.assertEqual(response.status_code, 302)
def test_plugin_move_requires_permissions(self):
"""User tries to move a plugin but has no permissions. He can move the plugin after he got the permissions"""
plugin = self._create_plugin()
_, normal_guy = self._get_guys()
if get_user_model().USERNAME_FIELD == 'email':
self.client.login(username='[email protected]', password='[email protected]')
else:
self.client.login(username='test', password='test')
url = admin_reverse('cms_page_move_plugin')
data = dict(plugin_id=plugin.id,
placeholder_id=self._placeholder.pk,
plugin_parent='',
)
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponseForbidden.status_code)
# After he got the permissions, he can edit the plugin
self._give_permission(normal_guy, Text, 'change')
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponse.status_code)
def test_plugins_copy_requires_permissions(self):
"""User tries to copy plugin but has no permissions. He can copy plugins after he got the permissions"""
plugin = self._create_plugin()
_, normal_guy = self._get_guys()
if get_user_model().USERNAME_FIELD == 'email':
self.client.login(username='[email protected]', password='[email protected]')
else:
self.client.login(username='test', password='test')
url = admin_reverse('cms_page_copy_plugins')
data = dict(source_plugin_id=plugin.id,
source_placeholder_id=self._placeholder.pk,
source_language='en',
target_language='fr',
target_placeholder_id=self._placeholder.pk,
)
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponseForbidden.status_code)
# After he got the permissions, he can edit the plugin
self._give_permission(normal_guy, Text, 'add')
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponse.status_code)
def test_plugins_copy_placeholder_ref(self):
"""User copies a placeholder into a clipboard. A PlaceholderReferencePlugin is created. Afterwards he copies this
into a placeholder and the PlaceholderReferencePlugin unpacks its content. After that he clear the clipboard"""
self.assertEqual(Placeholder.objects.count(), 2)
self._create_plugin()
self._create_plugin()
admin_user = self.get_superuser()
clipboard = Placeholder()
clipboard.save()
self.assertEqual(CMSPlugin.objects.count(), 2)
settings = UserSettings(language="fr", clipboard=clipboard, user=admin_user)
settings.save()
self.assertEqual(Placeholder.objects.count(), 3)
if get_user_model().USERNAME_FIELD == 'email':
self.client.login(username='[email protected]', password='[email protected]')
else:
self.client.login(username='admin', password='admin')
url = admin_reverse('cms_page_copy_plugins')
data = dict(source_plugin_id='',
source_placeholder_id=self._placeholder.pk,
source_language='en',
target_language='en',
target_placeholder_id=clipboard.pk,
)
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponse.status_code)
clipboard_plugins = clipboard.get_plugins()
self.assertEqual(CMSPlugin.objects.count(), 5)
self.assertEqual(clipboard_plugins.count(), 1)
self.assertEqual(clipboard_plugins[0].plugin_type, "PlaceholderPlugin")
placeholder_plugin, _ = clipboard_plugins[0].get_plugin_instance()
ref_placeholder = placeholder_plugin.placeholder_ref
copied_plugins = ref_placeholder.get_plugins()
self.assertEqual(copied_plugins.count(), 2)
data = dict(source_plugin_id=placeholder_plugin.pk,
source_placeholder_id=clipboard.pk,
source_language='en',
target_language='fr',
target_placeholder_id=self._placeholder.pk,
)
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponse.status_code)
plugins = self._placeholder.get_plugins()
self.assertEqual(plugins.count(), 4)
self.assertEqual(CMSPlugin.objects.count(), 7)
self.assertEqual(Placeholder.objects.count(), 4)
url = admin_reverse('cms_page_clear_placeholder', args=[clipboard.pk])
with self.assertNumQueries(FuzzyInt(70, 80)):
response = self.client.post(url, {'test': 0})
self.assertEqual(response.status_code, 302)
self.assertEqual(CMSPlugin.objects.count(), 4)
self.assertEqual(Placeholder.objects.count(), 3)
def test_plugins_copy_language(self):
"""User tries to copy plugin but has no permissions. He can copy plugins after he got the permissions"""
self._create_plugin()
_, normal_guy = self._get_guys()
if get_user_model().USERNAME_FIELD != 'email':
self.client.login(username='test', password='test')
else:
self.client.login(username='[email protected]', password='[email protected]')
self.assertEqual(1, CMSPlugin.objects.all().count())
url = admin_reverse('cms_page_copy_language', args=[self._page.pk])
data = dict(
source_language='en',
target_language='fr',
)
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponseForbidden.status_code)
# After he got the permissions, he can edit the plugin
self._give_permission(normal_guy, Text, 'add')
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponse.status_code)
self.assertEqual(2, CMSPlugin.objects.all().count())
def test_page_permission_inline_visibility(self):
User = get_user_model()
fields = dict(email='[email protected]', password='user', is_staff=True)
if get_user_model().USERNAME_FIELD != 'email':
fields[get_user_model().USERNAME_FIELD] = 'user'
user = User(**fields)
user.save()
self._give_page_permission_rights(user)
page = create_page('A', 'nav_playground.html', 'en')
page_permission = PagePermission.objects.create(
can_change_permissions=True, user=user, page=page)
request = self._get_change_page_request(user, page)
page_admin = PageAdmin(Page, None)
page_admin._current_page = page
# user has can_change_permission
# => must see the PagePermissionInline
self.assertTrue(
any(type(inline) is PagePermissionInlineAdmin
for inline in page_admin.get_inline_instances(request, page)))
page = Page.objects.get(pk=page.pk)
# remove can_change_permission
page_permission.can_change_permissions = False
page_permission.save()
request = self._get_change_page_request(user, page)
page_admin = PageAdmin(Page, None)
page_admin._current_page = page
# => PagePermissionInline is no longer visible
self.assertFalse(
any(type(inline) is PagePermissionInlineAdmin
for inline in page_admin.get_inline_instances(request, page)))
def test_edit_title_is_allowed_for_staff_user(self):
"""
We check here both the permission on a single page, and the global permissions
"""
user = self._create_user('user', is_staff=True)
another_user = self._create_user('another_user', is_staff=True)
page = create_page('A', 'nav_playground.html', 'en')
admin_url = reverse("admin:cms_page_edit_title_fields", args=(
page.pk, 'en'
))
page_admin = PageAdmin(Page, None)
page_admin._current_page = page
username = getattr(user, get_user_model().USERNAME_FIELD)
self.client.login(username=username, password=username)
response = self.client.get(admin_url)
self.assertEqual(response.status_code, HttpResponseForbidden.status_code)
assign_user_to_page(page, user, grant_all=True)
username = getattr(user, get_user_model().USERNAME_FIELD)
self.client.login(username=username, password=username)
response = self.client.get(admin_url)
self.assertEqual(response.status_code, HttpResponse.status_code)
self._give_cms_permissions(another_user)
username = getattr(another_user, get_user_model().USERNAME_FIELD)
self.client.login(username=username, password=username)
response = self.client.get(admin_url)
self.assertEqual(response.status_code, HttpResponse.status_code)
def test_plugin_add_returns_valid_pk_for_plugin(self):
admin_user = self._get_admin()
self._give_cms_permissions(admin_user)
self._give_permission(admin_user, Text, 'add')
username = getattr(admin_user, get_user_model().USERNAME_FIELD)
self.client.login(username=username, password='admin')
url = admin_reverse('cms_page_add_plugin')
data = {
'plugin_type': 'TextPlugin',
'placeholder_id': self._placeholder.pk,
'plugin_language': 'en',
'plugin_parent': '',
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponse.status_code)
self.assertEqual(response['content-type'], 'application/json')
pk = response.content.decode('utf8').split("edit-plugin/")[1].split("/")[0]
self.assertTrue(CMSPlugin.objects.filter(pk=int(pk)).exists())
class AdminFormsTests(AdminTestsBase):
def test_clean_overwrite_url(self):
user = AnonymousUser()
user.is_superuser = True
user.pk = 1
request = type('Request', (object,), {'user': user})
with self.settings():
data = {
'title': 'TestPage',
'slug': 'test-page',
'language': 'en',
'overwrite_url': '/overwrite/url/',
'site': Site.objects.get_current().pk,
'template': get_cms_setting('TEMPLATES')[0][0],
'published': True
}
form = PageForm(data)
self.assertTrue(form.is_valid(), form.errors.as_text())
instance = form.save()
instance.permission_user_cache = user
instance.permission_advanced_settings_cache = True
Title.objects.set_or_create(request, instance, form, 'en')
form = PageForm(data, instance=instance)
self.assertTrue(form.is_valid(), form.errors.as_text())
def test_missmatching_site_parent_dotsite(self):
site0 = Site.objects.create(domain='foo.com', name='foo.com')
site1 = Site.objects.create(domain='foo.com', name='foo.com')
parent_page = Page.objects.create(
template='nav_playground.html',
site=site0)
new_page_data = {
'title': 'Title',
'slug': 'slug',
'language': 'en',
'site': site1.pk,
'template': get_cms_setting('TEMPLATES')[0][0],
'reverse_id': '',
'parent': parent_page.pk,
}
form = PageForm(data=new_page_data, files=None)
self.assertFalse(form.is_valid())
self.assertIn(u"Site doesn't match the parent's page site",
form.errors['__all__'])
def test_form_errors(self):
new_page_data = {
'title': 'Title',
'slug': 'home',
'language': 'en',
'site': 10,
'template': get_cms_setting('TEMPLATES')[0][0],
'reverse_id': '',
}
form = PageForm(data=new_page_data, files=None)
self.assertFalse(form.is_valid())
site0 = Site.objects.create(domain='foo.com', name='foo.com')
page1 = api.create_page("test", get_cms_setting('TEMPLATES')[0][0], "fr", site=site0)
new_page_data = {
'title': 'Title',
'slug': 'home',
'language': 'en',
'site': 1,
'template': get_cms_setting('TEMPLATES')[0][0],
'reverse_id': '',
'parent': page1.pk,
}
form = PageForm(data=new_page_data, files=None)
self.assertFalse(form.is_valid())
new_page_data = {
'title': 'Title',
'slug': '#',
'language': 'en',
'site': 1,
'template': get_cms_setting('TEMPLATES')[0][0],
'reverse_id': '',
}
form = PageForm(data=new_page_data, files=None)
self.assertFalse(form.is_valid())
new_page_data = {
'title': 'Title',
'slug': 'home',
'language': 'pp',
'site': 1,
'template': get_cms_setting('TEMPLATES')[0][0],
'reverse_id': '',
'parent':'',
}
form = PageForm(data=new_page_data, files=None)
self.assertFalse(form.is_valid())
page2 = api.create_page("test", get_cms_setting('TEMPLATES')[0][0], "en")
new_page_data = {
'title': 'Title',
'slug': 'test',
'language': 'en',
'site': 1,
'template': get_cms_setting('TEMPLATES')[0][0],
'reverse_id': '',
'parent':'',
}
form = PageForm(data=new_page_data, files=None)
self.assertFalse(form.is_valid())
page3 = api.create_page("test", get_cms_setting('TEMPLATES')[0][0], "en", parent=page2)
page3.title_set.update(path="hello/")
page3 = page3.reload()
new_page_data = {
'title': 'Title',
'slug': 'test',
'language': 'en',
'site': 1,
'template': get_cms_setting('TEMPLATES')[0][0],
'reverse_id': '',
'parent':'',
}
form = PageForm(data=new_page_data, files=None, instance=page3)
self.assertFalse(form.is_valid())
def test_reverse_id_error_location(self):
''' Test moving the reverse_id validation error to a field specific one '''
# this is the Reverse ID we'll re-use to break things.
dupe_id = 'p1'
curren_site = Site.objects.get_current()
create_page('Page 1', 'nav_playground.html', 'en', reverse_id=dupe_id)
page2 = create_page('Page 2', 'nav_playground.html', 'en')
# Assemble a bunch of data to test the page form
page2_data = {
'language': 'en',
'site': curren_site.pk,
'reverse_id': dupe_id,
'template': 'col_two.html',
}
form = AdvancedSettingsForm(data=page2_data, files=None)
self.assertFalse(form.is_valid())
# reverse_id is the only item that is in __all__ as every other field
# has it's own clean method. Moving it to be a field error means
# __all__ is now not available.
self.assertNotIn('__all__', form.errors)
# In moving it to it's own field, it should be in form.errors, and
# the values contained therein should match these.
self.assertIn('reverse_id', form.errors)
self.assertEqual(1, len(form.errors['reverse_id']))
self.assertEqual([u'A page with this reverse URL id exists already.'],
form.errors['reverse_id'])
page2_data['reverse_id'] = ""
form = AdvancedSettingsForm(data=page2_data, files=None)
self.assertTrue(form.is_valid())
admin_user = self._get_guys(admin_only=True)
# reset some of page2_data so we can use cms.api.create_page
page2 = page2.reload()
page2.site = curren_site
page2.save()
with self.login_user_context(admin_user):
# re-reset the page2_data for the admin form instance.
page2_data['reverse_id'] = dupe_id
page2_data['site'] = curren_site.pk
# post to the admin change form for page 2, and test that the
# reverse_id form row has an errors class. Django's admin avoids
# collapsing these, so that the error is visible.
resp = self.client.post(base.URL_CMS_PAGE_ADVANCED_CHANGE % page2.pk, page2_data)
self.assertContains(resp, '<div class="form-row errors reverse_id">')
def test_create_page_type(self):
page = create_page('Test', 'static.html', 'en', published=True, reverse_id="home")
for placeholder in Placeholder.objects.all():
add_plugin(placeholder, TextPlugin, 'en', body='<b>Test</b>')
page.publish('en')
self.assertEqual(Page.objects.count(), 2)
self.assertEqual(CMSPlugin.objects.count(), 4)
superuser = self.get_superuser()
with self.login_user_context(superuser):
response = self.client.get(
"%s?copy_target=%s&language=%s" % (admin_reverse("cms_page_add_page_type"), page.pk, 'en'))
self.assertEqual(response.status_code, 302)
self.assertEqual(Page.objects.count(), 3)
self.assertEqual(Page.objects.filter(reverse_id="page_types").count(), 1)
page_types = Page.objects.get(reverse_id='page_types')
url = response.url if hasattr(response, 'url') else response['Location']
expected_url_params = QueryDict(
'target=%s&position=first-child&add_page_type=1©_target=%s&language=en' % (page_types.pk, page.pk))
response_url_params = QueryDict(urlparse(url).query)
self.assertDictEqual(expected_url_params, response_url_params)
response = self.client.get("%s?copy_target=%s&language=%s" % (
admin_reverse("cms_page_add_page_type"), page.pk, 'en'), follow=True)
self.assertEqual(response.status_code, 200)
# test no page types if no page types there
response = self.client.get(admin_reverse('cms_page_add'))
self.assertNotContains(response, "page_type")
# create out first page type
page_data = {
'title': 'type1', 'slug': 'type1', '_save': 1, 'template': 'static.html', 'site': 1,
'language': 'en'
}
response = self.client.post(
"/en/admin/cms/page/add/?target=%s&position=first-child&add_page_type=1©_target=%s&language=en" % (
page_types.pk, page.pk), data=page_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Page.objects.count(), 4)
self.assertEqual(CMSPlugin.objects.count(), 6)
response = self.client.get(admin_reverse('cms_page_add'))
self.assertContains(response, "page_type")
# no page types available if you use the copy_target
response = self.client.get("%s?copy_target=%s&language=en" % (admin_reverse('cms_page_add'), page.pk))
self.assertNotContains(response, "page_type")
def test_render_edit_mode(self):
from django.core.cache import cache
cache.clear()
create_page('Test', 'static.html', 'en', published=True)
for placeholder in Placeholder.objects.all():
add_plugin(placeholder, TextPlugin, 'en', body='<b>Test</b>')
user = self.get_superuser()
self.assertEqual(Placeholder.objects.all().count(), 4)
with self.login_user_context(user):
with self.assertNumQueries(FuzzyInt(40, 66)):
output = force_text(self.client.get('/en/?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')).content)
self.assertIn('<b>Test</b>', output)
self.assertEqual(Placeholder.objects.all().count(), 9)
self.assertEqual(StaticPlaceholder.objects.count(), 2)
for placeholder in Placeholder.objects.all():
add_plugin(placeholder, TextPlugin, 'en', body='<b>Test</b>')
with self.assertNumQueries(FuzzyInt(40, 72)):
output = force_text(self.client.get('/en/?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')).content)
self.assertIn('<b>Test</b>', output)
with self.assertNumQueries(FuzzyInt(18, 45)):
force_text(self.client.get('/en/?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')).content)
with self.assertNumQueries(FuzzyInt(11, 29)):
force_text(self.client.get('/en/').content)
def test_tree_view_queries(self):
from django.core.cache import cache
cache.clear()
for i in range(10):
create_page('Test%s' % i, 'col_two.html', 'en', published=True)
for placeholder in Placeholder.objects.all():
add_plugin(placeholder, TextPlugin, 'en', body='<b>Test</b>')
user = self.get_superuser()
with self.login_user_context(user):
with self.assertNumQueries(FuzzyInt(18, 33)):
force_text(self.client.get('/en/admin/cms/page/'))
def test_smart_link_published_pages(self):
admin, staff_guy = self._get_guys()
page_url = '/en/admin/cms/page/published-pages/' # Not sure how to achieve this with reverse...
with self.login_user_context(staff_guy):
multi_title_page = create_page('main_title', 'col_two.html', 'en', published=True,
overwrite_url='overwritten_url',
menu_title='menu_title')
title = multi_title_page.get_title_obj()
title.page_title = 'page_title'
title.save()
multi_title_page.save()
publish_page(multi_title_page, admin, 'en')
# Non ajax call should return a 403 as this page shouldn't be accessed by anything else but ajax queries
self.assertEqual(403, self.client.get(page_url).status_code)
self.assertEqual(200,
self.client.get(page_url, HTTP_X_REQUESTED_WITH='XMLHttpRequest').status_code
)
# Test that the query param is working as expected.
self.assertEqual(1, len(json.loads(self.client.get(page_url, {'q':'main_title'},
HTTP_X_REQUESTED_WITH='XMLHttpRequest').content.decode("utf-8"))))
self.assertEqual(1, len(json.loads(self.client.get(page_url, {'q':'menu_title'},
HTTP_X_REQUESTED_WITH='XMLHttpRequest').content.decode("utf-8"))))
self.assertEqual(1, len(json.loads(self.client.get(page_url, {'q':'overwritten_url'},
HTTP_X_REQUESTED_WITH='XMLHttpRequest').content.decode("utf-8"))))
self.assertEqual(1, len(json.loads(self.client.get(page_url, {'q':'page_title'},
HTTP_X_REQUESTED_WITH='XMLHttpRequest').content.decode("utf-8"))))
class AdminPageEditContentSizeTests(AdminTestsBase):
"""
System user count influences the size of the page edit page,
but the users are only 2 times present on the page
The test relates to extra=0
at PagePermissionInlineAdminForm and ViewRestrictionInlineAdmin
"""
def test_editpage_contentsize(self):
"""
Expected a username only 2 times in the content, but a relationship
between usercount and pagesize
"""
with self.settings(CMS_PERMISSION=True):
admin_user = self.get_superuser()
PAGE_NAME = 'TestPage'
USER_NAME = 'test_size_user_0'
current_site = Site.objects.get(pk=1)
page = create_page(PAGE_NAME, "nav_playground.html", "en", site=current_site, created_by=admin_user)
page.save()
self._page = page
with self.login_user_context(admin_user):
url = base.URL_CMS_PAGE_PERMISSION_CHANGE % self._page.pk
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
old_response_size = len(response.content)
old_user_count = get_user_model().objects.count()
# create additionals user and reload the page
get_user_model().objects.create_user(username=USER_NAME, email=USER_NAME + '@django-cms.org',
password=USER_NAME)
user_count = get_user_model().objects.count()
more_users_in_db = old_user_count < user_count
# we have more users
self.assertTrue(more_users_in_db, "New users got NOT created")
response = self.client.get(url)
new_response_size = len(response.content)
page_size_grown = old_response_size < new_response_size
# expect that the pagesize gets influenced by the useramount of the system
self.assertTrue(page_size_grown, "Page size has not grown after user creation")
# usernames are only 2 times in content
text = smart_str(response.content, response._charset)
foundcount = text.count(USER_NAME)
# 2 forms contain usernames as options
self.assertEqual(foundcount, 2,
"Username %s appeared %s times in response.content, expected 2 times" % (
USER_NAME, foundcount))
| bsd-3-clause | -2,511,381,197,464,639,000 | 45.244726 | 129 | 0.597041 | false |
delighted/phantomjs | src/qt/qtwebkit/Tools/QueueStatusServer/handlers/svnrevision.py | 143 | 1923 | # Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from google.appengine.ext import webapp
import model
class SVNRevision(webapp.RequestHandler):
def get(self, svn_revision_number):
svn_revisions = model.SVNRevision.all().filter('number =', int(svn_revision_number)).order('-date').fetch(1)
if not svn_revisions:
self.error(404)
return
self.response.out.write(svn_revisions[0].to_xml())
| bsd-3-clause | -8,009,478,829,172,260,000 | 47.075 | 116 | 0.75715 | false |
Hybrid-Cloud/badam | patches_tool/aws_patch/aws_deps/libcloud/test/dns/test_linode.py | 1 | 14072 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
import sys
import unittest
from libcloud.utils.py3 import httplib
from libcloud.common.linode import LinodeException
from libcloud.dns.types import RecordType, ZoneDoesNotExistError
from libcloud.dns.types import RecordDoesNotExistError
from libcloud.dns.drivers.linode import LinodeDNSDriver
from libcloud.test import MockHttp
from libcloud.test.file_fixtures import DNSFileFixtures
from libcloud.test.secrets import DNS_PARAMS_LINODE
class LinodeTests(unittest.TestCase):
def setUp(self):
LinodeDNSDriver.connectionCls.conn_classes = (
None, LinodeMockHttp)
LinodeMockHttp.use_param = 'api_action'
LinodeMockHttp.type = None
self.driver = LinodeDNSDriver(*DNS_PARAMS_LINODE)
def assertHasKeys(self, dictionary, keys):
for key in keys:
self.assertTrue(key in dictionary, 'key "%s" not in dictionary' %
(key))
def test_list_record_types(self):
record_types = self.driver.list_record_types()
self.assertEqual(len(record_types), 7)
self.assertTrue(RecordType.A in record_types)
def test_list_zones_success(self):
zones = self.driver.list_zones()
self.assertEqual(len(zones), 2)
zone = zones[0]
self.assertEqual(zone.id, '5093')
self.assertEqual(zone.type, 'master')
self.assertEqual(zone.domain, 'linode.com')
self.assertEqual(zone.ttl, None)
self.assertHasKeys(zone.extra, ['description', 'SOA_Email', 'status'])
def test_list_records_success(self):
zone = self.driver.list_zones()[0]
records = self.driver.list_records(zone=zone)
self.assertEqual(len(records), 2)
arecord = records[0]
self.assertEqual(arecord.id, '3585100')
self.assertEqual(arecord.name, 'mc')
self.assertEqual(arecord.type, RecordType.A)
self.assertEqual(arecord.data, '127.0.0.1')
self.assertHasKeys(arecord.extra, ['protocol', 'ttl_sec', 'port',
'weight'])
def test_list_records_zone_does_not_exist(self):
zone = self.driver.list_zones()[0]
LinodeMockHttp.type = 'ZONE_DOES_NOT_EXIST'
try:
self.driver.list_records(zone=zone)
except ZoneDoesNotExistError:
e = sys.exc_info()[1]
self.assertEqual(e.zone_id, zone.id)
else:
self.fail('Exception was not thrown')
def test_get_zone_success(self):
LinodeMockHttp.type = 'GET_ZONE'
zone = self.driver.get_zone(zone_id='5093')
self.assertEqual(zone.id, '5093')
self.assertEqual(zone.type, 'master')
self.assertEqual(zone.domain, 'linode.com')
self.assertEqual(zone.ttl, None)
self.assertHasKeys(zone.extra, ['description', 'SOA_Email', 'status'])
def test_get_zone_does_not_exist(self):
LinodeMockHttp.type = 'GET_ZONE_DOES_NOT_EXIST'
try:
self.driver.get_zone(zone_id='4444')
except ZoneDoesNotExistError:
e = sys.exc_info()[1]
self.assertEqual(e.zone_id, '4444')
else:
self.fail('Exception was not thrown')
def test_get_record_success(self):
LinodeMockHttp.type = 'GET_RECORD'
record = self.driver.get_record(zone_id='1234', record_id='3585100')
self.assertEqual(record.id, '3585100')
self.assertEqual(record.name, 'www')
self.assertEqual(record.type, RecordType.A)
self.assertEqual(record.data, '127.0.0.1')
self.assertHasKeys(record.extra, ['protocol', 'ttl_sec', 'port',
'weight'])
def test_get_record_zone_does_not_exist(self):
LinodeMockHttp.type = 'GET_RECORD_ZONE_DOES_NOT_EXIST'
try:
self.driver.get_record(zone_id='444', record_id='3585100')
except ZoneDoesNotExistError:
pass
else:
self.fail('Exception was not thrown')
def test_get_record_record_does_not_exist(self):
LinodeMockHttp.type = 'GET_RECORD_RECORD_DOES_NOT_EXIST'
try:
self.driver.get_record(zone_id='4441', record_id='3585100')
except RecordDoesNotExistError:
pass
else:
self.fail('Exception was not thrown')
def test_create_zone_success(self):
zone = self.driver.create_zone(domain='foo.bar.com', type='master',
ttl=None, extra=None)
self.assertEqual(zone.id, '5094')
self.assertEqual(zone.domain, 'foo.bar.com')
def test_create_zone_validaton_error(self):
LinodeMockHttp.type = 'VALIDATION_ERROR'
try:
self.driver.create_zone(domain='foo.bar.com', type='master',
ttl=None, extra=None)
except LinodeException:
pass
else:
self.fail('Exception was not thrown')
def test_update_zone_success(self):
zone = self.driver.list_zones()[0]
updated_zone = self.driver.update_zone(zone=zone,
domain='libcloud.org',
ttl=10,
extra={'SOA_Email':
'[email protected]'})
self.assertEqual(zone.extra['SOA_Email'], '[email protected]')
self.assertEqual(updated_zone.id, zone.id)
self.assertEqual(updated_zone.domain, 'libcloud.org')
self.assertEqual(updated_zone.type, zone.type)
self.assertEqual(updated_zone.ttl, 10)
self.assertEqual(updated_zone.extra['SOA_Email'], '[email protected]')
self.assertEqual(updated_zone.extra['status'], zone.extra['status'])
self.assertEqual(updated_zone.extra['description'],
zone.extra['description'])
def test_create_record_success(self):
zone = self.driver.list_zones()[0]
record = self.driver.create_record(name='www', zone=zone,
type=RecordType.A, data='127.0.0.1')
self.assertEqual(record.id, '3585100')
self.assertEqual(record.name, 'www')
self.assertEqual(record.zone, zone)
self.assertEqual(record.type, RecordType.A)
self.assertEqual(record.data, '127.0.0.1')
def test_update_record_success(self):
zone = self.driver.list_zones()[0]
record = self.driver.list_records(zone=zone)[0]
updated_record = self.driver.update_record(record=record, name='www',
type=RecordType.AAAA,
data='::1')
self.assertEqual(record.data, '127.0.0.1')
self.assertEqual(updated_record.id, record.id)
self.assertEqual(updated_record.name, 'www')
self.assertEqual(updated_record.zone, record.zone)
self.assertEqual(updated_record.type, RecordType.AAAA)
self.assertEqual(updated_record.data, '::1')
def test_delete_zone_success(self):
zone = self.driver.list_zones()[0]
status = self.driver.delete_zone(zone=zone)
self.assertTrue(status)
def test_delete_zone_does_not_exist(self):
zone = self.driver.list_zones()[0]
LinodeMockHttp.type = 'ZONE_DOES_NOT_EXIST'
try:
self.driver.delete_zone(zone=zone)
except ZoneDoesNotExistError:
e = sys.exc_info()[1]
self.assertEqual(e.zone_id, zone.id)
else:
self.fail('Exception was not thrown')
def test_delete_record_success(self):
zone = self.driver.list_zones()[0]
record = self.driver.list_records(zone=zone)[0]
status = self.driver.delete_record(record=record)
self.assertTrue(status)
def test_delete_record_does_not_exist(self):
zone = self.driver.list_zones()[0]
record = self.driver.list_records(zone=zone)[0]
LinodeMockHttp.type = 'RECORD_DOES_NOT_EXIST'
try:
self.driver.delete_record(record=record)
except RecordDoesNotExistError:
e = sys.exc_info()[1]
self.assertEqual(e.record_id, record.id)
else:
self.fail('Exception was not thrown')
class LinodeMockHttp(MockHttp):
fixtures = DNSFileFixtures('linode')
def _domain_list(self, method, url, body, headers):
body = self.fixtures.load('domain_list.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _domain_resource_list(self, method, url, body, headers):
body = self.fixtures.load('resource_list.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _ZONE_DOES_NOT_EXIST_domain_resource_list(self, method, url, body,
headers):
body = self.fixtures.load('resource_list_does_not_exist.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _GET_ZONE_domain_list(self, method, url, body, headers):
body = self.fixtures.load('get_zone.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _GET_ZONE_DOES_NOT_EXIST_domain_list(self, method, url, body,
headers):
body = self.fixtures.load('get_zone_does_not_exist.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _GET_RECORD_domain_list(self, method, url, body, headers):
body = self.fixtures.load('get_zone.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _GET_RECORD_domain_resource_list(self, method, url, body, headers):
body = self.fixtures.load('get_record.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _GET_RECORD_ZONE_DOES_NOT_EXIST_domain_list(self, method, url, body,
headers):
body = self.fixtures.load('get_zone_does_not_exist.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _GET_RECORD_ZONE_DOES_NOT_EXIST_domain_resource_list(self, method, url,
body, headers):
body = self.fixtures.load('get_record_does_not_exist.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _GET_RECORD_RECORD_DOES_NOT_EXIST_domain_list(self, method, url, body,
headers):
body = self.fixtures.load('get_zone.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _GET_RECORD_RECORD_DOES_NOT_EXIST_domain_resource_list(self, method,
url, body,
headers):
body = self.fixtures.load('get_record_does_not_exist.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _domain_create(self, method, url, body, headers):
body = self.fixtures.load('create_domain.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _VALIDATION_ERROR_domain_create(self, method, url, body, headers):
body = self.fixtures.load('create_domain_validation_error.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _domain_update(self, method, url, body, headers):
body = self.fixtures.load('update_domain.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _domain_resource_create(self, method, url, body, headers):
body = self.fixtures.load('create_resource.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _domain_resource_update(self, method, url, body, headers):
body = self.fixtures.load('update_resource.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _domain_delete(self, method, url, body, headers):
body = self.fixtures.load('delete_domain.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _ZONE_DOES_NOT_EXIST_domain_delete(self, method, url, body, headers):
body = self.fixtures.load('delete_domain_does_not_exist.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _domain_resource_delete(self, method, url, body, headers):
body = self.fixtures.load('delete_resource.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _RECORD_DOES_NOT_EXIST_domain_resource_delete(self, method, url, body,
headers):
body = self.fixtures.load('delete_resource_does_not_exist.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if __name__ == '__main__':
sys.exit(unittest.main())
| apache-2.0 | 7,821,658,460,038,013,000 | 40.772036 | 79 | 0.592382 | false |
mlperf/training_results_v0.7 | Google/benchmarks/maskrcnn/implementations/maskrcnn-research-TF-tpu-v4-128/object_detection/balanced_positive_negative_sampler.py | 2 | 11991 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Class to subsample minibatches by balancing positives and negatives.
Subsamples minibatches based on a pre-specified positive fraction in range
[0,1]. The class presumes there are many more negatives than positive examples:
if the desired batch_size cannot be achieved with the pre-specified positive
fraction, it fills the rest with negative examples. If this is not sufficient
for obtaining the desired batch_size, it returns fewer examples.
The main function to call is Subsample(self, indicator, labels). For convenience
one can also call SubsampleWeights(self, weights, labels) which is defined in
the minibatch_sampler base class.
When is_static is True, it implements a method that guarantees static shapes.
It also ensures the length of output of the subsample is always batch_size, even
when number of examples set to True in indicator is less than batch_size.
This is originally implemented in TensorFlow Object Detection API.
"""
import tensorflow.compat.v1 as tf
from REDACTED.tensorflow_models.mlperf.models.rough.mask_rcnn.object_detection import minibatch_sampler
from REDACTED.tensorflow_models.mlperf.models.rough.mask_rcnn.object_detection import ops
class BalancedPositiveNegativeSampler(minibatch_sampler.MinibatchSampler):
"""Subsamples minibatches to a desired balance of positives and negatives."""
def __init__(self, positive_fraction=0.5, is_static=False):
"""Constructs a minibatch sampler.
Args:
positive_fraction: desired fraction of positive examples (scalar in [0,1])
in the batch.
is_static: If True, uses an implementation with static shape guarantees.
Raises:
ValueError: if positive_fraction < 0, or positive_fraction > 1
"""
if positive_fraction < 0 or positive_fraction > 1:
raise ValueError('positive_fraction should be in range [0,1]. '
'Received: %s.' % positive_fraction)
self._positive_fraction = positive_fraction
self._is_static = is_static
def _get_num_pos_neg_samples(self, sorted_indices_tensor, sample_size):
"""Counts the number of positives and negatives numbers to be sampled.
Args:
sorted_indices_tensor: A sorted int32 tensor of shape [N] which contains
the signed indices of the examples where the sign is based on the label
value. The examples that cannot be sampled are set to 0. It samples
atmost sample_size*positive_fraction positive examples and remaining
from negative examples.
sample_size: Size of subsamples.
Returns:
A tuple containing the number of positive and negative labels in the
subsample.
"""
input_length = tf.shape(sorted_indices_tensor)[0]
valid_positive_index = tf.greater(sorted_indices_tensor,
tf.zeros(input_length, tf.int32))
num_sampled_pos = tf.reduce_sum(tf.cast(valid_positive_index, tf.int32))
max_num_positive_samples = tf.constant(
int(sample_size * self._positive_fraction), tf.int32)
num_positive_samples = tf.minimum(max_num_positive_samples, num_sampled_pos)
num_negative_samples = tf.constant(sample_size,
tf.int32) - num_positive_samples
return num_positive_samples, num_negative_samples
def _get_values_from_start_and_end(self, input_tensor, num_start_samples,
num_end_samples, total_num_samples):
"""slices num_start_samples and last num_end_samples from input_tensor.
Args:
input_tensor: An int32 tensor of shape [N] to be sliced.
num_start_samples: Number of examples to be sliced from the beginning
of the input tensor.
num_end_samples: Number of examples to be sliced from the end of the
input tensor.
total_num_samples: Sum of is num_start_samples and num_end_samples. This
should be a scalar.
Returns:
A tensor containing the first num_start_samples and last num_end_samples
from input_tensor.
"""
input_length = tf.shape(input_tensor)[0]
start_positions = tf.less(tf.range(input_length), num_start_samples)
end_positions = tf.greater_equal(
tf.range(input_length), input_length - num_end_samples)
selected_positions = tf.logical_or(start_positions, end_positions)
selected_positions = tf.cast(selected_positions, tf.float32)
indexed_positions = tf.multiply(tf.cumsum(selected_positions),
selected_positions)
one_hot_selector = tf.one_hot(tf.cast(indexed_positions, tf.int32) - 1,
total_num_samples,
dtype=tf.float32)
return tf.cast(tf.tensordot(tf.cast(input_tensor, tf.float32),
one_hot_selector, axes=[0, 0]), tf.int32)
def _static_subsample(self, indicator, batch_size, labels):
"""Returns subsampled minibatch.
Args:
indicator: boolean tensor of shape [N] whose True entries can be sampled.
N should be a complie time constant.
batch_size: desired batch size. This scalar cannot be None.
labels: boolean tensor of shape [N] denoting positive(=True) and negative
(=False) examples. N should be a complie time constant.
Returns:
sampled_idx_indicator: boolean tensor of shape [N], True for entries which
are sampled. It ensures the length of output of the subsample is always
batch_size, even when number of examples set to True in indicator is
less than batch_size.
Raises:
ValueError: if labels and indicator are not 1D boolean tensors.
"""
# Check if indicator and labels have a static size.
if not indicator.shape.is_fully_defined():
raise ValueError('indicator must be static in shape when is_static is'
'True')
if not labels.shape.is_fully_defined():
raise ValueError('labels must be static in shape when is_static is'
'True')
if not isinstance(batch_size, int):
raise ValueError('batch_size has to be an integer when is_static is'
'True.')
input_length = tf.shape(indicator)[0]
# Set the number of examples set True in indicator to be at least
# batch_size.
num_true_sampled = tf.reduce_sum(tf.cast(indicator, tf.float32))
additional_false_sample = tf.less_equal(
tf.cumsum(tf.cast(tf.logical_not(indicator), tf.float32)),
batch_size - num_true_sampled)
indicator = tf.logical_or(indicator, additional_false_sample)
# Shuffle indicator and label. Need to store the permutation to restore the
# order post sampling.
permutation = tf.random_shuffle(tf.range(input_length))
indicator = ops.matmul_gather_on_zeroth_axis(
tf.cast(indicator, tf.float32), permutation)
labels = ops.matmul_gather_on_zeroth_axis(
tf.cast(labels, tf.float32), permutation)
# index (starting from 1) when indicator is True, 0 when False
indicator_idx = tf.where(
tf.cast(indicator, tf.bool), tf.range(1, input_length + 1),
tf.zeros(input_length, tf.int32))
# Replace -1 for negative, +1 for positive labels
signed_label = tf.where(
tf.cast(labels, tf.bool), tf.ones(input_length, tf.int32),
tf.scalar_mul(-1, tf.ones(input_length, tf.int32)))
# negative of index for negative label, positive index for positive label,
# 0 when indicator is False.
signed_indicator_idx = tf.multiply(indicator_idx, signed_label)
sorted_signed_indicator_idx = tf.nn.top_k(
signed_indicator_idx, input_length, sorted=True).values
[num_positive_samples,
num_negative_samples] = self._get_num_pos_neg_samples(
sorted_signed_indicator_idx, batch_size)
sampled_idx = self._get_values_from_start_and_end(
sorted_signed_indicator_idx, num_positive_samples,
num_negative_samples, batch_size)
# Shift the indices to start from 0 and remove any samples that are set as
# False.
sampled_idx = tf.abs(sampled_idx) - tf.ones(batch_size, tf.int32)
sampled_idx = tf.multiply(
tf.cast(tf.greater_equal(sampled_idx, tf.constant(0)), tf.int32),
sampled_idx)
sampled_idx_indicator = tf.cast(tf.reduce_sum(
tf.one_hot(sampled_idx, depth=input_length),
axis=0), tf.bool)
# project back the order based on stored permutations
reprojections = tf.one_hot(permutation, depth=input_length,
dtype=tf.float32)
return tf.cast(tf.tensordot(
tf.cast(sampled_idx_indicator, tf.float32),
reprojections, axes=[0, 0]), tf.bool)
def subsample(self, indicator, batch_size, labels, scope=None):
"""Returns subsampled minibatch.
Args:
indicator: boolean tensor of shape [N] whose True entries can be sampled.
batch_size: desired batch size. If None, keeps all positive samples and
randomly selects negative samples so that the positive sample fraction
matches self._positive_fraction. It cannot be None is is_static is True.
labels: boolean tensor of shape [N] denoting positive(=True) and negative
(=False) examples.
scope: name scope.
Returns:
sampled_idx_indicator: boolean tensor of shape [N], True for entries which
are sampled.
Raises:
ValueError: if labels and indicator are not 1D boolean tensors.
"""
if len(indicator.get_shape().as_list()) != 1:
raise ValueError('indicator must be 1 dimensional, got a tensor of '
'shape %s' % indicator.get_shape())
if len(labels.get_shape().as_list()) != 1:
raise ValueError('labels must be 1 dimensional, got a tensor of '
'shape %s' % labels.get_shape())
if labels.dtype != tf.bool:
raise ValueError('labels should be of type bool. Received: %s' %
labels.dtype)
if indicator.dtype != tf.bool:
raise ValueError('indicator should be of type bool. Received: %s' %
indicator.dtype)
with tf.name_scope(scope, 'BalancedPositiveNegativeSampler'):
if self._is_static:
return self._static_subsample(indicator, batch_size, labels)
else:
# Only sample from indicated samples
negative_idx = tf.logical_not(labels)
positive_idx = tf.logical_and(labels, indicator)
negative_idx = tf.logical_and(negative_idx, indicator)
# Sample positive and negative samples separately
if batch_size is None:
max_num_pos = tf.reduce_sum(tf.to_int32(positive_idx))
else:
max_num_pos = int(self._positive_fraction * batch_size)
sampled_pos_idx = self.subsample_indicator(positive_idx, max_num_pos)
num_sampled_pos = tf.reduce_sum(tf.cast(sampled_pos_idx, tf.int32))
if batch_size is None:
negative_positive_ratio = (
1 - self._positive_fraction) / self._positive_fraction
max_num_neg = tf.to_int32(
negative_positive_ratio * tf.to_float(num_sampled_pos))
else:
max_num_neg = batch_size - num_sampled_pos
sampled_neg_idx = self.subsample_indicator(negative_idx, max_num_neg)
return tf.logical_or(sampled_pos_idx, sampled_neg_idx)
| apache-2.0 | -770,015,242,663,909,200 | 44.078947 | 103 | 0.669169 | false |
radiasoft/radtrack | radtrack/rt_params.py | 1 | 9448 | # -*- coding: utf-8 -*-
"""Parameter declaration parser
:copyright: Copyright (c) 2015 Bivio Software, Inc. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
import UserDict
import __builtin__
import copy
import enum
import importlib
import re
import sys
import yaml
from pykern.pkdebug import pkdc, pkdp
from pykern import pkcompat
from pykern import pkio
from pykern import pkcollections
from pykern import pkyaml
#: Valid attributes for a declaration
DECLARATION_ATTRS = ('name', 'required', 'children', 'label', 'units', 'py_type')
class DeclarationException(ValueError):
pass
class Declaration(UserDict.DictMixin):
"""Describe a parameter and its children (if any)
Attributes:
children (ordered): OrderedDict of subparameters
label (str): displayed to the user (default: generated from name)
name (str): programmatic name
py_type (type): how to render value (None implies has children)
required (list or dict): components need this parameter (may be inherited)
units (str): expected units (default: None)
"""
def __init__(self, decl, qualifier=None):
try:
#TODO(robnagler) more type checking: especially required and children
self.name = decl['name']
self.qualified_name = qualifier + '.' + self.name if qualifier else self.name
unknown = [x for x in decl if x not in DECLARATION_ATTRS]
if unknown:
raise ValueError('{}: unknown attribute(s)'.format(unknown))
self.label = self._label(decl)
self.py_type = self._py_type(decl)
self.units = self._units(decl)
self.required = self._required(decl)
self.children = self._children(decl)
assert self.children or self.py_type, \
'{}: declaration must be one type or the other'
except DeclarationException:
raise
except Exception as e:
n = None
for a in ('qualified_name', 'name'):
if hasattr(self, a):
n = getattr(self, a)
break
if n is None:
n = decl
raise DeclarationException('{}: declaration error for {}'.format(e, n)), None, sys.exc_info()[2]
def __repr__(self):
return 'Declaration({})'.format(self.name or self.qualified_name)
def __getitem__(self, key):
if not (self.children and key in self.children):
raise KeyError(key)
return self.children[key]
def keys(self):
if not self.children:
return []
return pkcollections.map_keys(self.children)
def _children(self, decl):
if 'children' not in decl:
return None
res = pkcollections.OrderedMapping()
for c in decl['children']:
if pkcompat.isinstance_str(c):
d = c
n = c
else:
d = Declaration(c, self.qualified_name)
n = d.name
assert n not in res, \
'{}: duplicate key in {}'.format(n, self.name)
res[n] = d
return res
def _label(self, decl):
if 'label' in decl:
return decl['label']
res = self.name
res = re.sub(r'(^|_)([a-z])', lambda x: x.group(1) + x.group(2).upper(), res)
res = re.sub(r'_', ' ', res)
res = re.sub(r'\bLen\b', 'Length', res)
res = re.sub(r'\bNum\b', 'Number of', res)
res = re.sub(r'\bCoord\b', 'Coordinate', res)
res = re.sub(r'\bAvg\b', 'Average', res)
return res
def _py_type(self, decl):
"""Parse py_type to Python type instance"""
if 'py_type' not in decl:
return None
t = decl['py_type']
try:
t = getattr(__builtin__, t)
if isinstance(t, type):
return t
except AttributeError:
pass
s = re.search(r'^(\w+)\.(\w+)$', decl['py_type'])
assert s, \
'{py_type}: py_type for {name} not found'.format(*decl)
m = importlib.import_module('radtrack.' + s.group(1))
t = getattr(m, s.group(2))
assert isinstance(t, type), \
'{py_type}: py_type for {name} not a type'.format(*decl)
return t
def _required(self, decl):
return decl['required']
def _units(self, decl):
return decl['units'] if 'units' in decl else None
class Default(UserDict.DictMixin):
def __init__(self, value, component, decl, parent_type=None, qualifier=None):
self.decl = decl
self.qualified_name = qualifier + '.' + decl.qualified_name if qualifier else decl.qualified_name
self.children = self._children(value, decl, component)
if decl.py_type:
if decl.children:
self.value = self.children[next(iter(self.children))].value
else:
self.value = _parse_value(value, decl.py_type)
elif parent_type:
self.value = _parse_value(decl.name, parent_type)
def iter_leaves(self):
if not self.children:
yield self
else:
for c in pkcollections.map_values(self.children):
for l in c.iter_leaves():
yield l
def iter_leaves(self):
if not self.children:
yield self
else:
for c in pkcollections.map_values(self.children):
for l in c.iter_leaves():
yield l
def iter_nodes(self):
yield self
if not self.children:
return
for c in pkcollections.map_values(self.children):
for l in c.iter_nodes():
yield l
def __repr__(self):
return 'Default({})'.format(self.decl.qualified_name)
def __getitem__(self, key):
if not (self.children and key in self.children):
raise KeyError('{}: no key in {}'.format(key, self))
return self.children[key]
def keys(self):
if not self.children:
return []
return pkcollections.map_keys(self.children)
def _children(self, values, decl, component):
if not decl.children:
return None
res = pkcollections.OrderedMapping()
for child_decl in decl.values():
d = Default(
values[child_decl.name],
component,
child_decl,
decl.py_type,
self.qualified_name,
)
res[child_decl.name] = d
return res
def declarations(file_prefix):
"""Parsed parameter declarations from ``<file_prefix>_declarations.yml``
Args:
file_prefix (str): which file to parse
Returns:
OrderedMapping: mapping of declarations
"""
return _get(file_prefix, 'declarations', _parse_declarations)
def defaults(file_prefix, decl):
"""Parsed parameter defaults from ``<file_prefix>_defaults.yml``
Args:
file_prefix (str): which file to parse
decl (OrderedMapping): how to parse data
Returns:
OrderedMapping: mapping of default values
"""
return _get(
file_prefix,
'defaults',
lambda v, fp: defaults_from_dict(v, fp, decl),
)
def defaults_from_dict(values, component, decl):
"""Parsed parameter values from already parsed YAML.
Args:
values (dict): read from YAML
component (str): which component in decl, e.g. 'srw_multi'
decl (OrderedMapping): how to parse the data
Returns:
OrderedMapping: mapping of values
"""
return Default(values, component, decl)
def init_params(defaults):
"""Create a tree of default params excluding headings and computed params
Args:
defaults (dict): used for initializations
Returns:
OrderedMapping: nested dictionary of params
"""
res = pkcollections.OrderedMapping()
for k in defaults:
v = defaults[k]
res[k] = init_params(v.children) if v.children else v.value
return res
def _get(file_name_or_prefix, which, how):
"""Parse and validate YAML file.
Args:
file_name_or_prefix (str): which file to parse
which (str): "declarations" or "defaults" or None
how (callable): parser
Returns:
OrderedMapping: parsed YAML file
"""
fn = '{}_{}'.format(file_name_or_prefix, which) if which else file_name_or_prefix
values = pkyaml.load_resource(fn)
return how(values, file_name_or_prefix)
def _parse_declarations(values, file_prefix):
"""Recurse the parsed YAML declarations; convert values and types
Order preserving so can use for layout.
Args:
values (list): raw YAML as a list
file_prefix (str): which file to parse
"""
root = Declaration({
'name': '',
'children': values,
'required': None,
})
_parse_declarations_link(root, root)
return root
def _parse_declarations_link(decl, root):
for k, v in decl.items():
if not isinstance(v, Declaration):
decl.children[k] = root[k]
else:
_parse_declarations_link(v, root)
def _parse_value(v, t):
if hasattr(t, 'from_anything'):
return t.from_anything(v)
return t(v)
| apache-2.0 | 2,926,831,937,173,079,000 | 29.379421 | 108 | 0.575783 | false |
cgstudiomap/cgstudiomap | main/local_modules/res_partner_url_validation_missing_details/__openerp__.py | 1 | 1456 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) cgstudiomap <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Res Partner url validation: missing details',
'version': '0.2',
'author': 'cgstudiomap',
'maintainer': 'cgstudiomap',
'license': 'AGPL-3',
'category': 'Sales',
'summary': 'Set up for urls for missing details bot',
'depends': [
'res_partner_missing_details',
'res_partner_url_validation',
],
'external_dependencies': {},
'data': [
'missing_details.xml',
],
'installable': True,
}
| agpl-3.0 | -6,023,934,606,815,032,000 | 36.333333 | 78 | 0.59272 | false |
FEniCS/fiat | FIAT/discontinuous_taylor.py | 1 | 2114 | # Copyright (C) 2008 Robert C. Kirby (Texas Tech University)
# Modified by Colin Cotter (Imperial College London)
# David Ham (Imperial College London)
#
# This file is part of FIAT (https://www.fenicsproject.org)
#
# SPDX-License-Identifier: LGPL-3.0-or-later
from FIAT import finite_element, polynomial_set, dual_set, functional, P0, quadrature
from FIAT.polynomial_set import mis
import numpy
class DiscontinuousTaylorDualSet(dual_set.DualSet):
"""The dual basis for Taylor elements. This class works for
intervals. Nodes are function and derivative evaluation
at the midpoint."""
def __init__(self, ref_el, degree):
nodes = []
dim = ref_el.get_spatial_dimension()
Q = quadrature.make_quadrature(ref_el, 2 * (degree + 1))
f_at_qpts = numpy.ones(len(Q.wts))
nodes.append(functional.IntegralMoment(ref_el, Q, f_at_qpts))
vertices = ref_el.get_vertices()
midpoint = tuple(sum(numpy.array(vertices)) / len(vertices))
for k in range(1, degree + 1):
# Loop over all multi-indices of degree k.
for alpha in mis(dim, k):
nodes.append(functional.PointDerivative(ref_el, midpoint, alpha))
entity_ids = {d: {e: [] for e in ref_el.sub_entities[d]}
for d in range(dim + 1)}
entity_ids[dim][0] = list(range(len(nodes)))
super(DiscontinuousTaylorDualSet, self).__init__(nodes, ref_el, entity_ids)
class HigherOrderDiscontinuousTaylor(finite_element.CiarletElement):
"""The discontinuous Taylor finite element. Use a Taylor basis for DG."""
def __init__(self, ref_el, degree):
poly_set = polynomial_set.ONPolynomialSet(ref_el, degree)
dual = DiscontinuousTaylorDualSet(ref_el, degree)
formdegree = ref_el.get_spatial_dimension() # n-form
super(HigherOrderDiscontinuousTaylor, self).__init__(poly_set, dual, degree, formdegree)
def DiscontinuousTaylor(ref_el, degree):
if degree == 0:
return P0.P0(ref_el)
else:
return HigherOrderDiscontinuousTaylor(ref_el, degree)
| lgpl-3.0 | -4,400,522,402,054,451,700 | 36.75 | 96 | 0.657521 | false |
lsp84ch83/PyText | UItestframework/testcase/test_baidu.py | 1 | 1073 | #coding=utf-8
from time import sleep
from public.common import mytest
from public.pages import baiduIndexPage
from public.common import datainfo
class TestBaiduIndex(mytest.MyTest):
"""百度搜索测试"""
def _search(self,searchKey):
"""封装百度搜索的函数"""
baidupage = baiduIndexPage.BaiduIndexPage(self.dr)
baidupage.into_baidu_page()
baidupage.input_search_key(searchKey)
baidupage.click_search_button()
sleep(2)
self.assertIn(searchKey, baidupage.return_title())
def test_search(self):
"""直接搜索"""
baidupage = baiduIndexPage.BaiduIndexPage(self.dr)
baidupage.into_baidu_page()
baidupage.input_search_key('小石头tester')
baidupage.click_search_button()
sleep(2)
self.assertIn('小石头',baidupage.return_title())
def test_search_excel(self):
"""使用数据驱动,进行测试"""
datas = datainfo.get_xls_to_list('searKey.xlsx','Sheet1')
for data in datas:
self._search(data)
| gpl-3.0 | 4,786,702,535,316,635,000 | 27.657143 | 65 | 0.645065 | false |
Yukarumya/Yukarum-Redfoxes | dom/canvas/test/webgl-conf/checkout/deqp/functional/gles3/fborender/fborender_test_generator.py | 51 | 3682 | #!/usr/bin/env python
# Copyright (c) 2016 The Khronos Group Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and/or associated documentation files (the
# "Materials"), to deal in the Materials without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Materials, and to
# permit persons to whom the Materials are furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Materials.
#
# THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
"""
Generator for fborender* tests.
This file needs to be run in its folder.
"""
import sys
_DO_NOT_EDIT_WARNING = """<!--
This file is auto-generated from fborender_test_generator.py
DO NOT EDIT!
-->
"""
_HTML_TEMPLATE = """<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title>WebGL Framebuffer Render Tests</title>
<link rel="stylesheet" href="../../../../resources/js-test-style.css"/>
<script src="../../../../js/js-test-pre.js"></script>
<script src="../../../../js/webgl-test-utils.js"></script>
<script src="../../../../closure-library/closure/goog/base.js"></script>
<script src="../../../deqp-deps.js"></script>
<script>goog.require('functional.gles3.es3fFboRenderTest');</script>
</head>
<body>
<div id="description"></div>
<div id="console"></div>
<canvas id="canvas" width="200" height="128"> </canvas>
<script>
var wtu = WebGLTestUtils;
var gl = wtu.create3DContext('canvas', null, 2);
functional.gles3.es3fFboRenderTest.run(gl, [%(start)s, %(end)s]);
</script>
</body>
</html>
"""
_GROUPS = [
'stencil_clear',
'shared_colorbuffer_clear',
'shared_colorbuffer',
'shared_depth_stencil',
'resize',
'recreate_color',
'recreate_depth_stencil'
]
_GROUP_TEST_COUNTS = [
1,
1,
3,
1,
4,
7,
1
]
def GenerateFilename(group, count, index):
"""Generate test filename."""
filename = group
assert index >= 0 and index < count
if count > 1:
index_str = str(index)
if index < 10:
index_str = "0" + index_str
filename += "_" + index_str
filename += ".html"
return filename
def WriteTest(filename, start, end):
"""Write one test."""
file = open(filename, "wb")
file.write(_DO_NOT_EDIT_WARNING)
file.write(_HTML_TEMPLATE % {
'start': start,
'end': end
})
file.close
def GenerateTests():
"""Generate all tests."""
assert len(_GROUPS) == len(_GROUP_TEST_COUNTS)
test_index = 0
filelist = []
for ii in range(len(_GROUPS)):
group = _GROUPS[ii]
count = _GROUP_TEST_COUNTS[ii]
for index in range(count):
filename = GenerateFilename(group, count, index)
filelist.append(filename)
WriteTest(filename, test_index, test_index + 1)
test_index += 1
return filelist
def GenerateTestList(filelist):
file = open("00_test_list.txt", "wb")
file.write('\n'.join(filelist))
file.close
def main(argv):
"""This is the main function."""
filelist = GenerateTests()
GenerateTestList(filelist)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| mpl-2.0 | -7,141,904,113,386,431,000 | 26.477612 | 73 | 0.673819 | false |
tiagoantao/igrat | igrat/bio/tasks/load_genomeposition.py | 1 | 1502 | """
Copyright 2013 Tiago Antao
This file is part of igrat.
igrat is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
igrat is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with igrat. If not, see <http://www.gnu.org/licenses/>.
"""
import argparse
import vcf
from igrat.cunit import FileImportCompute
class GPImportCompute(FileImportCompute):
def __init__(self, fname, outputs, key):
FileImportCompute.__init__(self, fname, outputs)
self.out_db = self.outputs[0]
self.key = key
def compute(self, start, end):
cstart, pstart = start
cend, pend = end
#cstart and cend are expected to be the same
v = vcf.Reader(filename=self.fname)
recs = v.fetch(cstart, pstart, pend)
for rec in recs:
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("fname", help="Indexed VCF to load")
parser.add_argument("db", help="DB")
parser.add_argument("key", help="key")
parser.parse_args()
#TODO: include filter
| agpl-3.0 | -9,087,709,297,147,429,000 | 24.033333 | 75 | 0.689081 | false |
angr/angr | angr/analyses/girlscout.py | 3 | 32197 | raise ImportError("Don't import me! I don't work!")
import logging
import math
import os
import pickle
import re
import string
from collections import defaultdict
from datetime import datetime
import cle
import networkx
import progressbar
import pyvex
from . import Analysis
from angr.analyses.cfg.cfg_fast import SegmentList
from .. import options as o
from ..annocfg import AnnotatedCFG
from ..errors import SimMemoryError, SimEngineError, AngrError, SimValueError, SimIRSBError, SimSolverModeError, \
SimError
from ..state_plugins.sim_action import SimActionData
from ..surveyors import Explorer, Slicecutor
l = logging.getLogger(name=__name__)
class GirlScout(Analysis):
"""
We find functions inside the given binary, try to decide the base address if needed, and build a control-flow
graph on top of that to see if there is an entry or not. Obviously if the binary is not loaded as a blob (not
using Blob as its backend), GirlScout will not try to determine the base address.
It's also optional to perform a full code scan of the binary to show where all codes are. By default we don't scan
the entire binary since it's time consuming.
You probably need a BoyScout to determine the possible architecture and endianess of your binary blob.
"""
def __init__(self, binary=None, start=None, end=None, pickle_intermediate_results=False, perform_full_code_scan=False):
self._binary = binary if binary is not None else self.project.loader.main_object
self._start = start if start is not None else self._binary.min_addr
self._end = end if end is not None else self._binary.max_addr
self._pickle_intermediate_results = pickle_intermediate_results
self._perform_full_code_scan = perform_full_code_scan
l.debug("Starts at 0x%08x and ends at 0x%08x.", self._start, self._end)
# Valid memory regions
self._valid_memory_regions = sorted(
(start, start+len(backer)) for start, backer in self.project.loader.memory.backers())
self._valid_memory_region_size = sum([ (end - start) for start, end in self._valid_memory_regions ])
# Size of each basic block
self._block_size = { }
self._next_addr = self._start - 1
# Starting point of functions
self.functions = None
# Calls between functions
self.call_map = networkx.DiGraph()
# A CFG - this is not what you get from project.analyses.CFG() !
self.cfg = networkx.DiGraph()
# Create the segment list
self._seg_list = SegmentList()
self._read_addr_to_run = defaultdict(list)
self._write_addr_to_run = defaultdict(list)
# All IRSBs with an indirect exit target
self._indirect_jumps = set()
self._unassured_functions = set()
self.base_address = None
# Start working!
self._reconnoiter()
@property
def call_map(self):
return self.call_map
def _get_next_addr_to_search(self, alignment=None):
# TODO: Take care of those functions that are already generated
curr_addr = self._next_addr
if self._seg_list.has_blocks:
curr_addr = self._seg_list.next_free_pos(curr_addr)
if alignment is not None:
if curr_addr % alignment > 0:
curr_addr = curr_addr - curr_addr % alignment + alignment
# Make sure curr_addr exists in binary
accepted = False
for start, end in self._valid_memory_regions:
if curr_addr >= start and curr_addr < end:
# accept
accepted = True
break
if curr_addr < start:
# accept, but we are skipping the gap
accepted = True
curr_addr = start
if not accepted:
# No memory available!
return None
self._next_addr = curr_addr
if self._end is None or curr_addr < self._end:
l.debug("Returning new recon address: 0x%08x", curr_addr)
return curr_addr
else:
l.debug("0x%08x is beyond the ending point.", curr_addr)
return None
def _get_next_code_addr(self, initial_state):
"""
Besides calling _get_next_addr, we will check if data locates at that address seems to be code or not. If not,
we'll move on to request for next valid address.
"""
next_addr = self._get_next_addr_to_search()
if next_addr is None:
return None
start_addr = next_addr
sz = ""
is_sz = True
while is_sz:
# Get data until we meet a 0
while next_addr in initial_state.memory:
try:
l.debug("Searching address %x", next_addr)
val = initial_state.mem_concrete(next_addr, 1)
if val == 0:
if len(sz) < 4:
is_sz = False
else:
reach_end = True
break
if chr(val) not in string.printable:
is_sz = False
break
sz += chr(val)
next_addr += 1
except SimValueError:
# Not concretizable
l.debug("Address 0x%08x is not concretizable!", next_addr)
break
if len(sz) > 0 and is_sz:
l.debug("Got a string of %d chars: [%s]", len(sz), sz)
# l.debug("Occpuy %x - %x", start_addr, start_addr + len(sz) + 1)
self._seg_list.occupy(start_addr, len(sz) + 1)
sz = ""
next_addr = self._get_next_addr_to_search()
if next_addr is None:
return None
# l.debug("next addr = %x", next_addr)
start_addr = next_addr
if is_sz:
next_addr += 1
instr_alignment = initial_state.arch.instruction_alignment
if start_addr % instr_alignment > 0:
start_addr = start_addr - start_addr % instr_alignment + \
instr_alignment
l.debug('_get_next_code_addr() returns 0x%x', start_addr)
return start_addr
def _symbolic_reconnoiter(self, addr, target_addr, max_depth=10):
"""
When an IRSB has more than two exits (for example, a jumptable), we
cannot concretize their exits in concrete mode. Hence we statically
execute the function from beginning in this method, and then switch to
symbolic mode for the final IRSB to get all possible exits of that
IRSB.
"""
state = self.project.factory.blank_state(addr=addr,
mode="symbolic",
add_options={o.CALLLESS}
)
initial_exit = self.project.factory.path(state)
explorer = Explorer(self.project,
start=initial_exit,
max_depth=max_depth,
find=(target_addr), num_find=1).run()
if len(explorer.found) > 0:
path = explorer.found[0]
last_run = path.last_run
return last_run.flat_exits()
else:
return []
def _static_memory_slice(self, run):
if isinstance(run, SimIRSB):
for stmt in run.statements:
refs = stmt.actions
if len(refs) > 0:
real_ref = refs[-1]
if type(real_ref) == SimActionData:
if real_ref.action == 'write':
addr = real_ref.addr
if not run.initial_state.solver.symbolic(addr):
concrete_addr = run.initial_state.solver.eval(addr)
self._write_addr_to_run[addr].append(run.addr)
elif real_ref.action == 'read':
addr = real_ref.addr
if not run.initial_state.solver.symbolic(addr):
concrete_addr = run.initial_state.solver.eval(addr)
self._read_addr_to_run[addr].append(run.addr)
def _scan_code(self, traced_addresses, function_exits, initial_state, starting_address):
# Saving tuples like (current_function_addr, next_exit_addr)
# Current_function_addr == -1 for exits not inside any function
remaining_exits = set()
next_addr = starting_address
# Initialize the remaining_exits set
remaining_exits.add((next_addr,
next_addr,
next_addr,
initial_state.copy()))
while len(remaining_exits):
current_function_addr, previous_addr, parent_addr, state = \
remaining_exits.pop()
if previous_addr in traced_addresses:
continue
# Add this node to the CFG first, in case this is a dangling node
self.cfg.add_node(previous_addr)
if current_function_addr != -1:
l.debug("Tracing new exit 0x%08x in function 0x%08x",
previous_addr, current_function_addr)
else:
l.debug("Tracing new exit 0x%08x", previous_addr)
traced_addresses.add(previous_addr)
self._scan_block(previous_addr, state, current_function_addr, function_exits, remaining_exits, traced_addresses)
def _scan_block(self, addr, state, current_function_addr, function_exits, remaining_exits, traced_addresses):
# Let's try to create the pyvex IRSB directly, since it's much faster
try:
irsb = self.project.factory.block(addr).vex
# Log the size of this basic block
self._block_size[addr] = irsb.size
# Occupy the block
self._seg_list.occupy(addr, irsb.size)
except (SimEngineError, SimMemoryError):
return
# Get all possible successors
next, jumpkind = irsb.next, irsb.jumpkind
successors = [ (i.dst, i.jumpkind) for i in irsb.statements if type(i) is pyvex.IRStmt.Exit]
successors.append((next, jumpkind))
# Process each successor
for suc in successors:
target, jumpkind = suc
if type(target) is pyvex.IRExpr.Const:
next_addr = target.con.value
else:
next_addr = None
if jumpkind == 'Ijk_Boring' and next_addr is not None:
remaining_exits.add((current_function_addr, next_addr,
addr, None))
elif jumpkind == 'Ijk_Call' and next_addr is not None:
# Log it before we cut the tracing :)
if jumpkind == "Ijk_Call":
if current_function_addr != -1:
self.functions.add(current_function_addr)
self.functions.add(next_addr)
self.call_map.add_edge(current_function_addr, next_addr)
else:
self.functions.add(next_addr)
self.call_map.add_node(next_addr)
elif jumpkind == "Ijk_Boring" or \
jumpkind == "Ijk_Ret":
if current_function_addr != -1:
function_exits[current_function_addr].add(next_addr)
# If we have traced it before, don't trace it anymore
if next_addr in traced_addresses:
return
remaining_exits.add((next_addr, next_addr, addr, None))
l.debug("Function calls: %d", len(self.call_map.nodes()))
def _scan_block_(self, addr, state, current_function_addr, function_exits, remaining_exits, traced_addresses):
# Get a basic block
state.ip = addr
s_path = self.project.factory.path(state)
try:
s_run = s_path.next_run
except SimIRSBError as ex:
l.debug(ex)
return
except AngrError as ex:
# "No memory at xxx"
l.debug(ex)
return
except (SimValueError, SimSolverModeError) as ex:
# Cannot concretize something when executing the SimRun
l.debug(ex)
return
except SimError as ex:
# Catch all simuvex errors
l.debug(ex)
return
if type(s_run) is SimIRSB:
# Calculate its entropy to avoid jumping into uninitialized/all-zero space
bytes = s_run.irsb._state[1]['bytes']
size = s_run.irsb.size
ent = self._calc_entropy(bytes, size=size)
if ent < 1.0 and size > 40:
# Skipping basic blocks that have a very low entropy
return
# self._static_memory_slice(s_run)
# Mark that part as occupied
if isinstance(s_run, SimIRSB):
self._seg_list.occupy(addr, s_run.irsb.size)
successors = s_run.flat_successors + s_run.unsat_successors
has_call_exit = False
tmp_exit_set = set()
for suc in successors:
if suc.history.jumpkind == "Ijk_Call":
has_call_exit = True
for suc in successors:
jumpkind = suc.history.jumpkind
if has_call_exit and jumpkind == "Ijk_Ret":
jumpkind = "Ijk_FakeRet"
if jumpkind == "Ijk_Ret":
continue
try:
# Try to concretize the target. If we can't, just move on
# to the next target
next_addr = suc.solver.eval_one(suc.ip)
except (SimValueError, SimSolverModeError) as ex:
# Undecidable jumps (might be a function return, or a conditional branch, etc.)
# We log it
self._indirect_jumps.add((suc.history.jumpkind, addr))
l.info("IRSB 0x%x has an indirect exit %s.", addr, suc.history.jumpkind)
continue
self.cfg.add_edge(addr, next_addr, jumpkind=jumpkind)
# Log it before we cut the tracing :)
if jumpkind == "Ijk_Call":
if current_function_addr != -1:
self.call_map.add_edge(current_function_addr, next_addr)
else:
self.call_map.add_node(next_addr)
elif jumpkind == "Ijk_Boring" or \
jumpkind == "Ijk_Ret":
if current_function_addr != -1:
function_exits[current_function_addr].add(next_addr)
# If we have traced it before, don't trace it anymore
if next_addr in traced_addresses:
continue
# If we have traced it in current loop, don't tract it either
if next_addr in tmp_exit_set:
continue
tmp_exit_set.add(next_addr)
if jumpkind == "Ijk_Call":
# This is a call. Let's record it
new_state = suc.copy()
# Unconstrain those parameters
# TODO: Support other archs as well
# if 12 + 16 in new_state.registers.mem:
# del new_state.registers.mem[12 + 16]
#if 16 + 16 in new_state.registers.mem:
# del new_state.registers.mem[16 + 16]
#if 20 + 16 in new_state.registers.mem:
# del new_state.registers.mem[20 + 16]
# 0x8000000: call 0x8000045
remaining_exits.add((next_addr, next_addr, addr, new_state))
l.debug("Function calls: %d", len(self.call_map.nodes()))
elif jumpkind == "Ijk_Boring" or \
jumpkind == "Ijk_Ret" or \
jumpkind == "Ijk_FakeRet":
new_state = suc.copy()
l.debug("New exit with jumpkind %s", jumpkind)
# FIXME: should not use current_function_addr if jumpkind is "Ijk_Ret"
remaining_exits.add((current_function_addr, next_addr,
addr, new_state))
elif jumpkind == "Ijk_NoDecode":
# That's something VEX cannot decode!
# We assume we ran into a deadend
pass
elif jumpkind.startswith("Ijk_Sig"):
# Should not go into that exit
pass
elif jumpkind == "Ijk_TInval":
# ppc32: isync
# FIXME: It is the same as Ijk_Boring! Process it later
pass
elif jumpkind == 'Ijk_Sys_syscall':
# Let's not jump into syscalls
pass
elif jumpkind == 'Ijk_InvalICache':
pass
elif jumpkind == 'Ijk_MapFail':
pass
elif jumpkind == 'Ijk_EmWarn':
pass
else:
raise Exception("NotImplemented")
def _scan_function_prologues(self, traced_address, function_exits, initial_state):
"""
Scan the entire program space for prologues, and start code scanning at those positions
:param traced_address:
:param function_exits:
:param initial_state:
:param next_addr:
:returns:
"""
# Precompile all regexes
regexes = set()
for ins_regex in self.project.arch.function_prologs:
r = re.compile(ins_regex)
regexes.add(r)
# TODO: Make sure self._start is aligned
# Construct the binary blob first
for start_, bytes_ in self.project.loader.main_object.memory.backers():
for regex in regexes:
# Match them!
for mo in regex.finditer(bytes):
position = mo.start() + start_
if position % self.project.arch.instruction_alignment == 0:
if position not in traced_address:
percentage = self._seg_list.occupied_size * 100.0 / (self._valid_memory_region_size)
l.info("Scanning %xh, progress %0.04f%%", position, percentage)
self._unassured_functions.add(position)
self._scan_code(traced_address, function_exits, initial_state, position)
else:
l.info("Skipping %xh", position)
def _process_indirect_jumps(self):
"""
Execute each basic block with an indeterminiable exit target
:returns:
"""
function_starts = set()
l.info("We have %d indirect jumps", len(self._indirect_jumps))
for jumpkind, irsb_addr in self._indirect_jumps:
# First execute the current IRSB in concrete mode
if len(function_starts) > 20:
break
if jumpkind == "Ijk_Call":
state = self.project.factory.blank_state(addr=irsb_addr, mode="concrete",
add_options={o.SYMBOLIC_INITIAL_VALUES}
)
path = self.project.factory.path(state)
l.debug(hex(irsb_addr))
try:
r = (path.next_run.successors + path.next_run.unsat_successors)[0]
ip = r.solver.eval_one(r.ip)
function_starts.add(ip)
continue
except SimSolverModeError as ex:
pass
# Not resolved
# Do a backward slicing from the call
irsb = self.project.factory.block(irsb_addr).vex
stmts = irsb.statements
# Start slicing from the "next"
b = Blade(self.cfg, irsb.addr, -1, project=self.project)
# Debugging output
for addr, stmt_idx in sorted(list(b.slice.nodes())):
irsb = self.project.factory.block(addr).vex
stmts = irsb.statements
l.debug("%x: %d | %s %d", (addr, stmt_idx), stmts[stmt_idx], b.slice.in_degree((addr, stmt_idx)))
# Get all sources
sources = [n for n in b.slice.nodes() if b.slice.in_degree(n) == 0]
# Create the annotated CFG
annotatedcfg = AnnotatedCFG(self.project, None, target_irsb_addr=irsb_addr, detect_loops=False)
annotatedcfg.from_digraph(b.slice)
for src_irsb, src_stmt_idx in sources:
# Use slicecutor to execute each one, and get the address
# We simply give up if any exception occurs on the way
start_state = self.project.factory.blank_state(addr=src_irsb,
add_options=
{o.DO_RET_EMULATION,
o.TRUE_RET_EMULATION_GUARD}
)
start_path = self.project.factory.path(start_state)
# Create the slicecutor
slicecutor = Slicecutor(self.project, annotatedcfg, start=start_path, targets=(irsb_addr,))
# Run it!
try:
slicecutor.run()
except KeyError as ex:
# This is because the program slice is incomplete.
# Blade will support more IRExprs and IRStmts
l.debug("KeyError occurred due to incomplete program slice.", exc_info=ex)
continue
# Get the jumping targets
for r in slicecutor.reached_targets:
if r.next_run.successors:
target_ip = r.next_run.successors[0].ip
se = r.next_run.successors[0].se
if not se.symbolic(target_ip):
concrete_ip = se.eval_one(target_ip)
function_starts.add(concrete_ip)
l.info("Found a function address %x", concrete_ip)
return function_starts
def _solve_forbase_address(self, function_starts, functions):
"""
Voting for the most possible base address.
:param function_starts:
:param functions:
:returns:
"""
pseudo_base_addr = self.project.loader.main_object.min_addr
base_addr_ctr = { }
for s in function_starts:
for f in functions:
base_addr = s - f + pseudo_base_addr
ctr = 1
for k in function_starts:
if k - base_addr + pseudo_base_addr in functions:
ctr += 1
if ctr > 5:
base_addr_ctr[base_addr] = ctr
if len(base_addr_ctr):
base_addr, hits = sorted([(k, v) for k, v in base_addr_ctr.items()], key=lambda x: x[1], reverse=True)[0]
return base_addr
else:
return None
def _reconnoiter(self):
if type(self._binary) is cle.blob.Blob:
self._determinebase_address()
if self._perform_full_code_scan:
self._full_code_scan()
def _determinebase_address(self):
"""
The basic idea is simple: start from a specific point, try to construct
functions as much as we can, and maintain a function distribution graph
and a call graph simultaneously. Repeat searching until we come to the
end that there is no new function to be found.
A function should start with:
# some addresses that a call exit leads to, or
# certain instructions. They are recoreded in SimArch.
For a better performance, instead of blindly scanning the entire process
space, we first try to search for instruction patterns that a function
may start with, and start scanning at those positions. Then we try to
decode anything that is left.
"""
traced_address = set()
self.functions = set()
self.call_map = networkx.DiGraph()
self.cfg = networkx.DiGraph()
initial_state = self.project.factory.blank_state(mode="fastpath")
initial_options = initial_state.options - { o.TRACK_CONSTRAINTS } - o.refs
initial_options |= { o.SUPER_FASTPATH }
# initial_options.remove(o.COW_STATES)
initial_state.options = initial_options
# Sadly, not all calls to functions are explicitly made by call
# instruction - they could be a jmp or b, or something else. So we
# should record all exits from a single function, and then add
# necessary calling edges in our call map during the post-processing
# phase.
function_exits = defaultdict(set)
dump_file_prefix = self.project.filename
if self._pickle_intermediate_results and \
os.path.exists(dump_file_prefix + "_indirect_jumps.angr"):
l.debug("Loading existing intermediate results.")
self._indirect_jumps = pickle.load(open(dump_file_prefix + "_indirect_jumps.angr", "rb"))
self.cfg = pickle.load(open(dump_file_prefix + "_coercecfg.angr", "rb"))
self._unassured_functions = pickle.load(open(dump_file_prefix + "_unassured_functions.angr", "rb"))
else:
# Performance boost :-)
# Scan for existing function prologues
self._scan_function_prologues(traced_address, function_exits, initial_state)
if self._pickle_intermediate_results:
l.debug("Dumping intermediate results.")
pickle.dump(self._indirect_jumps, open(dump_file_prefix + "_indirect_jumps.angr", "wb"), -1)
pickle.dump(self.cfg, open(dump_file_prefix + "_coercecfg.angr", "wb"), -1)
pickle.dump(self._unassured_functions, open(dump_file_prefix + "_unassured_functions.angr", "wb"), -1)
if len(self._indirect_jumps):
# We got some indirect jumps!
# Gotta execute each basic block and see where it wants to jump to
function_starts = self._process_indirect_jumps()
self.base_address = self._solve_forbase_address(function_starts, self._unassured_functions)
l.info("Base address should be 0x%x", self.base_address)
else:
l.debug("No indirect jumps are found. We switch to the slowpath mode.")
# TODO: Slowpath mode...
while True:
next_addr = self._get_next_code_addr(initial_state)
percentage = self._seg_list.occupied_size * 100.0 / (self._valid_memory_region_size)
l.info("Analyzing %xh, progress %0.04f%%", next_addr, percentage)
if next_addr is None:
break
self.call_map.add_node(next_addr)
self._scan_code(traced_address, function_exits, initial_state, next_addr)
# Post-processing: Map those calls that are not made by call/blr
# instructions to their targets in our map
for src, s in function_exits.items():
if src in self.call_map:
for target in s:
if target in self.call_map:
self.call_map.add_edge(src, target)
nodes = sorted(self.call_map.nodes())
for i in range(len(nodes) - 1):
if nodes[i] >= nodes[i + 1] - 4:
for dst in self.call_map.successors(nodes[i + 1]):
self.call_map.add_edge(nodes[i], dst)
for src in self.call_map.predecessors(nodes[i + 1]):
self.call_map.add_edge(src, nodes[i])
self.call_map.remove_node(nodes[i + 1])
l.debug("Construction finished.")
def _full_code_scan(self):
"""
Perform a full code scan on the target binary.
"""
# We gotta time this function
start_time = datetime.now()
traced_address = set()
self.functions = set()
self.call_map = networkx.DiGraph()
self.cfg = networkx.DiGraph()
initial_state = self.project.factory.blank_state(mode="fastpath")
initial_options = initial_state.options - {o.TRACK_CONSTRAINTS} - o.refs
initial_options |= {o.SUPER_FASTPATH}
# initial_options.remove(o.COW_STATES)
initial_state.options = initial_options
# Sadly, not all calls to functions are explicitly made by call
# instruction - they could be a jmp or b, or something else. So we
# should record all exits from a single function, and then add
# necessary calling edges in our call map during the post-processing
# phase.
function_exits = defaultdict(set)
widgets = [progressbar.Percentage(),
' ',
progressbar.Bar(marker=progressbar.RotatingMarker()),
' ',
progressbar.Timer(),
' ',
progressbar.ETA()
]
pb = progressbar.ProgressBar(widgets=widgets, maxval=10000 * 100).start()
while True:
next_addr = self._get_next_code_addr(initial_state)
percentage = self._seg_list.occupied_size * 100.0 / (self._valid_memory_region_size)
if percentage > 100.0: percentage = 100.0
pb.update(percentage * 10000)
if next_addr is not None:
l.info("Analyzing %xh, progress %0.04f%%", next_addr, percentage)
else:
l.info('No more addr to analyze. Progress %0.04f%%', percentage)
break
self.call_map.add_node(next_addr)
self._scan_code(traced_address, function_exits, initial_state, next_addr)
pb.finish()
end_time = datetime.now()
l.info("A full code scan takes %d seconds.", (end_time - start_time).seconds)
def _calc_entropy(self, data, size=None):
if not data:
return 0
entropy = 0
if size is None: size = len(data)
data = str(pyvex.ffi.buffer(data, size))
for x in range(0, 256):
p_x = float(data.count(chr(x)))/size
if p_x > 0:
entropy += - p_x * math.log(p_x, 2)
return entropy
def _dbg_output(self):
ret = ""
ret += "Functions:\n"
function_list = list(self.functions)
# Sort it
function_list = sorted(function_list)
for f in function_list:
ret += "0x%08x" % f
return ret
def genenare_callmap_sif(self, filepath):
"""
Generate a sif file from the call map
"""
graph = self.call_map
if graph is None:
raise AngrGirlScoutError('Please generate the call graph first.')
f = open(filepath, "wb")
for src, dst in graph.edges():
f.write("0x%x\tDirectEdge\t0x%x\n" % (src, dst))
f.close()
def generate_code_cover(self):
"""
Generate a list of all recovered basic blocks.
"""
lst = [ ]
for irsb_addr in self.cfg.nodes():
if irsb_addr not in self._block_size:
continue
irsb_size = self._block_size[irsb_addr]
lst.append((irsb_addr, irsb_size))
lst = sorted(lst, key=lambda x: x[0])
return lst
from angr.analyses import AnalysesHub
AnalysesHub.register_default('GirlScout', GirlScout)
from ..blade import Blade
from ..errors import AngrGirlScoutError
| bsd-2-clause | -1,838,273,232,323,139,800 | 38.89715 | 124 | 0.53915 | false |
nitin-cherian/LifeLongLearning | Python/PythonProgrammingLanguage/Encapsulation/encap_env/lib/python3.5/site-packages/pickleshare.py | 12 | 9849 | #!/usr/bin/env python
""" PickleShare - a small 'shelve' like datastore with concurrency support
Like shelve, a PickleShareDB object acts like a normal dictionary. Unlike
shelve, many processes can access the database simultaneously. Changing a
value in database is immediately visible to other processes accessing the
same database.
Concurrency is possible because the values are stored in separate files. Hence
the "database" is a directory where *all* files are governed by PickleShare.
Example usage::
from pickleshare import *
db = PickleShareDB('~/testpickleshare')
db.clear()
print "Should be empty:",db.items()
db['hello'] = 15
db['aku ankka'] = [1,2,313]
db['paths/are/ok/key'] = [1,(5,46)]
print db.keys()
del db['aku ankka']
This module is certainly not ZODB, but can be used for low-load
(non-mission-critical) situations where tiny code size trumps the
advanced features of a "real" object database.
Installation guide: pip install path pickleshare
Author: Ville Vainio <[email protected]>
License: MIT open source license.
"""
from __future__ import print_function
__version__ = "0.7.4"
try:
from pathlib import Path
except ImportError:
# Python 2 backport
from pathlib2 import Path
import os,stat,time
import collections
try:
import cPickle as pickle
except ImportError:
import pickle
import errno
import sys
if sys.version_info[0] >= 3:
string_types = (str,)
else:
string_types = (str, unicode)
def gethashfile(key):
return ("%02x" % abs(hash(key) % 256))[-2:]
_sentinel = object()
class PickleShareDB(collections.MutableMapping):
""" The main 'connection' object for PickleShare database """
def __init__(self,root):
""" Return a db object that will manage the specied directory"""
if not isinstance(root, string_types):
root = str(root)
root = os.path.abspath(os.path.expanduser(root))
self.root = Path(root)
if not self.root.is_dir():
# catching the exception is necessary if multiple processes are concurrently trying to create a folder
# exists_ok keyword argument of mkdir does the same but only from Python 3.5
try:
self.root.mkdir(parents=True)
except OSError as e:
if e.errno != errno.EEXIST:
raise
# cache has { 'key' : (obj, orig_mod_time) }
self.cache = {}
def __getitem__(self,key):
""" db['key'] reading """
fil = self.root / key
try:
mtime = (fil.stat()[stat.ST_MTIME])
except OSError:
raise KeyError(key)
if fil in self.cache and mtime == self.cache[fil][1]:
return self.cache[fil][0]
try:
# The cached item has expired, need to read
with fil.open("rb") as f:
obj = pickle.loads(f.read())
except:
raise KeyError(key)
self.cache[fil] = (obj,mtime)
return obj
def __setitem__(self,key,value):
""" db['key'] = 5 """
fil = self.root / key
parent = fil.parent
if parent and not parent.is_dir():
parent.mkdir(parents=True)
# We specify protocol 2, so that we can mostly go between Python 2
# and Python 3. We can upgrade to protocol 3 when Python 2 is obsolete.
with fil.open('wb') as f:
pickle.dump(value, f, protocol=2)
try:
self.cache[fil] = (value, fil.stat().st_mtime)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def hset(self, hashroot, key, value):
""" hashed set """
hroot = self.root / hashroot
if not hroot.is_dir():
hroot.mkdir()
hfile = hroot / gethashfile(key)
d = self.get(hfile, {})
d.update( {key : value})
self[hfile] = d
def hget(self, hashroot, key, default = _sentinel, fast_only = True):
""" hashed get """
hroot = self.root / hashroot
hfile = hroot / gethashfile(key)
d = self.get(hfile, _sentinel )
#print "got dict",d,"from",hfile
if d is _sentinel:
if fast_only:
if default is _sentinel:
raise KeyError(key)
return default
# slow mode ok, works even after hcompress()
d = self.hdict(hashroot)
return d.get(key, default)
def hdict(self, hashroot):
""" Get all data contained in hashed category 'hashroot' as dict """
hfiles = self.keys(hashroot + "/*")
hfiles.sort()
last = len(hfiles) and hfiles[-1] or ''
if last.endswith('xx'):
# print "using xx"
hfiles = [last] + hfiles[:-1]
all = {}
for f in hfiles:
# print "using",f
try:
all.update(self[f])
except KeyError:
print("Corrupt",f,"deleted - hset is not threadsafe!")
del self[f]
self.uncache(f)
return all
def hcompress(self, hashroot):
""" Compress category 'hashroot', so hset is fast again
hget will fail if fast_only is True for compressed items (that were
hset before hcompress).
"""
hfiles = self.keys(hashroot + "/*")
all = {}
for f in hfiles:
# print "using",f
all.update(self[f])
self.uncache(f)
self[hashroot + '/xx'] = all
for f in hfiles:
p = self.root / f
if p.name == 'xx':
continue
p.unlink()
def __delitem__(self,key):
""" del db["key"] """
fil = self.root / key
self.cache.pop(fil,None)
try:
fil.unlink()
except OSError:
# notfound and permission denied are ok - we
# lost, the other process wins the conflict
pass
def _normalized(self, p):
""" Make a key suitable for user's eyes """
return str(p.relative_to(self.root)).replace('\\','/')
def keys(self, globpat = None):
""" All keys in DB, or all keys matching a glob"""
if globpat is None:
files = self.root.rglob('*')
else:
files = self.root.glob(globpat)
return [self._normalized(p) for p in files if p.is_file()]
def __iter__(self):
return iter(self.keys())
def __len__(self):
return len(self.keys())
def uncache(self,*items):
""" Removes all, or specified items from cache
Use this after reading a large amount of large objects
to free up memory, when you won't be needing the objects
for a while.
"""
if not items:
self.cache = {}
for it in items:
self.cache.pop(it,None)
def waitget(self,key, maxwaittime = 60 ):
""" Wait (poll) for a key to get a value
Will wait for `maxwaittime` seconds before raising a KeyError.
The call exits normally if the `key` field in db gets a value
within the timeout period.
Use this for synchronizing different processes or for ensuring
that an unfortunately timed "db['key'] = newvalue" operation
in another process (which causes all 'get' operation to cause a
KeyError for the duration of pickling) won't screw up your program
logic.
"""
wtimes = [0.2] * 3 + [0.5] * 2 + [1]
tries = 0
waited = 0
while 1:
try:
val = self[key]
return val
except KeyError:
pass
if waited > maxwaittime:
raise KeyError(key)
time.sleep(wtimes[tries])
waited+=wtimes[tries]
if tries < len(wtimes) -1:
tries+=1
def getlink(self,folder):
""" Get a convenient link for accessing items """
return PickleShareLink(self, folder)
def __repr__(self):
return "PickleShareDB('%s')" % self.root
class PickleShareLink:
""" A shortdand for accessing nested PickleShare data conveniently.
Created through PickleShareDB.getlink(), example::
lnk = db.getlink('myobjects/test')
lnk.foo = 2
lnk.bar = lnk.foo + 5
"""
def __init__(self, db, keydir ):
self.__dict__.update(locals())
def __getattr__(self,key):
return self.__dict__['db'][self.__dict__['keydir']+'/' + key]
def __setattr__(self,key,val):
self.db[self.keydir+'/' + key] = val
def __repr__(self):
db = self.__dict__['db']
keys = db.keys( self.__dict__['keydir'] +"/*")
return "<PickleShareLink '%s': %s>" % (
self.__dict__['keydir'],
";".join([Path(k).basename() for k in keys]))
def main():
import textwrap
usage = textwrap.dedent("""\
pickleshare - manage PickleShare databases
Usage:
pickleshare dump /path/to/db > dump.txt
pickleshare load /path/to/db < dump.txt
pickleshare test /path/to/db
""")
DB = PickleShareDB
import sys
if len(sys.argv) < 2:
print(usage)
return
cmd = sys.argv[1]
args = sys.argv[2:]
if cmd == 'dump':
if not args: args= ['.']
db = DB(args[0])
import pprint
pprint.pprint(db.items())
elif cmd == 'load':
cont = sys.stdin.read()
db = DB(args[0])
data = eval(cont)
db.clear()
for k,v in db.items():
db[k] = v
elif cmd == 'testwait':
db = DB(args[0])
db.clear()
print(db.waitget('250'))
elif cmd == 'test':
test()
stress()
if __name__== "__main__":
main()
| mit | -1,124,421,575,492,411,000 | 27.22063 | 114 | 0.552137 | false |
UCRoboticsLab/BaxterTictactoe | src/baxter_tools/src/baxter_tools/__init__.py | 3 | 1588 | # Copyright (c) 2013-2015, Rethink Robotics
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the Rethink Robotics nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from .smoketests import SmokeTest
| apache-2.0 | 2,671,328,835,609,972,700 | 55.714286 | 77 | 0.785894 | false |
geodynamics/gale | boost/libs/python/test/virtual_functions.py | 46 | 1803 | # Copyright David Abrahams 2004. Distributed under the Boost
# Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
'''
>>> from virtual_functions_ext import *
>>> class C1(concrete):
... def f(self, y):
... return concrete.f(self, Y(-y.value()))
>>> class C2(concrete):
... pass
>>> class A1(abstract):
... def f(self, y):
... return y.value() * 2
... def g(self, y):
... return self
>>> class A2(abstract):
... pass
>>> y1 = Y(16)
>>> y2 = Y(17)
#
# Test abstract with f,g overridden
#
>>> a1 = A1(42)
>>> a1.value()
42
# Call f,g indirectly from C++
>>> a1.call_f(y1)
32
>>> assert type(a1.call_g(y1)) is abstract
# Call f directly from Python
>>> a1.f(y2)
34
#
# Test abstract with f not overridden
#
>>> a2 = A2(42)
>>> a2.value()
42
# Call f indirectly from C++
>>> try: a2.call_f(y1)
... except AttributeError: pass
... else: print 'no exception'
# Call f directly from Python
>>> try: a2.call_f(y2)
... except AttributeError: pass
... else: print 'no exception'
############# Concrete Tests ############
#
# Test concrete with f overridden
#
>>> c1 = C1(42)
>>> c1.value()
42
# Call f indirectly from C++
>>> c1.call_f(y1)
-16
# Call f directly from Python
>>> c1.f(y2)
-17
#
# Test concrete with f not overridden
#
>>> c2 = C2(42)
>>> c2.value()
42
# Call f indirectly from C++
>>> c2.call_f(y1)
16
# Call f directly from Python
>>> c2.f(y2)
17
'''
def run(args = None):
import sys
import doctest
if args is not None:
sys.argv = args
return doctest.testmod(sys.modules.get(__name__))
if __name__ == '__main__':
print "running..."
import sys
status = run()[0]
if (status == 0): print "Done."
sys.exit(status)
| gpl-2.0 | 5,288,668,452,777,348,000 | 15.390909 | 71 | 0.582363 | false |
mozvip/CouchPotatoServer | libs/suds/sax/date.py | 160 | 10456 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Nathan Van Gheem ([email protected])
"""
The I{xdate} module provides classes for converstion
between XML dates and python objects.
"""
from logging import getLogger
from suds import *
from suds.xsd import *
import time
import datetime as dt
import re
log = getLogger(__name__)
class Date:
"""
An XML date object.
Supported formats:
- YYYY-MM-DD
- YYYY-MM-DD(z|Z)
- YYYY-MM-DD+06:00
- YYYY-MM-DD-06:00
@ivar date: The object value.
@type date: B{datetime}.I{date}
"""
def __init__(self, date):
"""
@param date: The value of the object.
@type date: (date|str)
@raise ValueError: When I{date} is invalid.
"""
if isinstance(date, dt.date):
self.date = date
return
if isinstance(date, basestring):
self.date = self.__parse(date)
return
raise ValueError, type(date)
def year(self):
"""
Get the I{year} component.
@return: The year.
@rtype: int
"""
return self.date.year
def month(self):
"""
Get the I{month} component.
@return: The month.
@rtype: int
"""
return self.date.month
def day(self):
"""
Get the I{day} component.
@return: The day.
@rtype: int
"""
return self.date.day
def __parse(self, s):
"""
Parse the string date.
Supported formats:
- YYYY-MM-DD
- YYYY-MM-DD(z|Z)
- YYYY-MM-DD+06:00
- YYYY-MM-DD-06:00
Although, the TZ is ignored because it's meaningless
without the time, right?
@param s: A date string.
@type s: str
@return: A date object.
@rtype: I{date}
"""
try:
year, month, day = s[:10].split('-', 2)
year = int(year)
month = int(month)
day = int(day)
return dt.date(year, month, day)
except:
log.debug(s, exec_info=True)
raise ValueError, 'Invalid format "%s"' % s
def __str__(self):
return unicode(self)
def __unicode__(self):
return self.date.isoformat()
class Time:
"""
An XML time object.
Supported formats:
- HH:MI:SS
- HH:MI:SS(z|Z)
- HH:MI:SS.ms
- HH:MI:SS.ms(z|Z)
- HH:MI:SS(+|-)06:00
- HH:MI:SS.ms(+|-)06:00
@ivar tz: The timezone
@type tz: L{Timezone}
@ivar date: The object value.
@type date: B{datetime}.I{time}
"""
def __init__(self, time, adjusted=True):
"""
@param time: The value of the object.
@type time: (time|str)
@param adjusted: Adjust for I{local} Timezone.
@type adjusted: boolean
@raise ValueError: When I{time} is invalid.
"""
self.tz = Timezone()
if isinstance(time, dt.time):
self.time = time
return
if isinstance(time, basestring):
self.time = self.__parse(time)
if adjusted:
self.__adjust()
return
raise ValueError, type(time)
def hour(self):
"""
Get the I{hour} component.
@return: The hour.
@rtype: int
"""
return self.time.hour
def minute(self):
"""
Get the I{minute} component.
@return: The minute.
@rtype: int
"""
return self.time.minute
def second(self):
"""
Get the I{seconds} component.
@return: The seconds.
@rtype: int
"""
return self.time.second
def microsecond(self):
"""
Get the I{microsecond} component.
@return: The microsecond.
@rtype: int
"""
return self.time.microsecond
def __adjust(self):
"""
Adjust for TZ offset.
"""
if hasattr(self, 'offset'):
today = dt.date.today()
delta = self.tz.adjustment(self.offset)
d = dt.datetime.combine(today, self.time)
d = ( d + delta )
self.time = d.time()
def __parse(self, s):
"""
Parse the string date.
Patterns:
- HH:MI:SS
- HH:MI:SS(z|Z)
- HH:MI:SS.ms
- HH:MI:SS.ms(z|Z)
- HH:MI:SS(+|-)06:00
- HH:MI:SS.ms(+|-)06:00
@param s: A time string.
@type s: str
@return: A time object.
@rtype: B{datetime}.I{time}
"""
try:
offset = None
part = Timezone.split(s)
hour, minute, second = part[0].split(':', 2)
hour = int(hour)
minute = int(minute)
second, ms = self.__second(second)
if len(part) == 2:
self.offset = self.__offset(part[1])
if ms is None:
return dt.time(hour, minute, second)
else:
return dt.time(hour, minute, second, ms)
except:
log.debug(s, exec_info=True)
raise ValueError, 'Invalid format "%s"' % s
def __second(self, s):
"""
Parse the seconds and microseconds.
The microseconds are truncated to 999999 due to a restriction in
the python datetime.datetime object.
@param s: A string representation of the seconds.
@type s: str
@return: Tuple of (sec,ms)
@rtype: tuple.
"""
part = s.split('.')
if len(part) > 1:
return (int(part[0]), int(part[1][:6]))
else:
return (int(part[0]), None)
def __offset(self, s):
"""
Parse the TZ offset.
@param s: A string representation of the TZ offset.
@type s: str
@return: The signed offset in hours.
@rtype: str
"""
if len(s) == len('-00:00'):
return int(s[:3])
if len(s) == 0:
return self.tz.local
if len(s) == 1:
return 0
raise Exception()
def __str__(self):
return unicode(self)
def __unicode__(self):
time = self.time.isoformat()
if self.tz.local:
return '%s%+.2d:00' % (time, self.tz.local)
else:
return '%sZ' % time
class DateTime(Date,Time):
"""
An XML time object.
Supported formats:
- YYYY-MM-DDB{T}HH:MI:SS
- YYYY-MM-DDB{T}HH:MI:SS(z|Z)
- YYYY-MM-DDB{T}HH:MI:SS.ms
- YYYY-MM-DDB{T}HH:MI:SS.ms(z|Z)
- YYYY-MM-DDB{T}HH:MI:SS(+|-)06:00
- YYYY-MM-DDB{T}HH:MI:SS.ms(+|-)06:00
@ivar datetime: The object value.
@type datetime: B{datetime}.I{datedate}
"""
def __init__(self, date):
"""
@param date: The value of the object.
@type date: (datetime|str)
@raise ValueError: When I{tm} is invalid.
"""
if isinstance(date, dt.datetime):
Date.__init__(self, date.date())
Time.__init__(self, date.time())
self.datetime = \
dt.datetime.combine(self.date, self.time)
return
if isinstance(date, basestring):
part = date.split('T')
Date.__init__(self, part[0])
Time.__init__(self, part[1], 0)
self.datetime = \
dt.datetime.combine(self.date, self.time)
self.__adjust()
return
raise ValueError, type(date)
def __adjust(self):
"""
Adjust for TZ offset.
"""
if not hasattr(self, 'offset'):
return
delta = self.tz.adjustment(self.offset)
try:
d = ( self.datetime + delta )
self.datetime = d
self.date = d.date()
self.time = d.time()
except OverflowError:
log.warn('"%s" caused overflow, not-adjusted', self.datetime)
def __str__(self):
return unicode(self)
def __unicode__(self):
s = []
s.append(Date.__unicode__(self))
s.append(Time.__unicode__(self))
return 'T'.join(s)
class UTC(DateTime):
"""
Represents current UTC time.
"""
def __init__(self, date=None):
if date is None:
date = dt.datetime.utcnow()
DateTime.__init__(self, date)
self.tz.local = 0
class Timezone:
"""
Timezone object used to do TZ conversions
@cvar local: The (A) local TZ offset.
@type local: int
@cvar patten: The regex patten to match TZ.
@type patten: re.Pattern
"""
pattern = re.compile('([zZ])|([\-\+][0-9]{2}:[0-9]{2})')
LOCAL = ( 0-time.timezone/60/60 )
def __init__(self, offset=None):
if offset is None:
offset = self.LOCAL
self.local = offset
@classmethod
def split(cls, s):
"""
Split the TZ from string.
@param s: A string containing a timezone
@type s: basestring
@return: The split parts.
@rtype: tuple
"""
m = cls.pattern.search(s)
if m is None:
return (s,)
x = m.start(0)
return (s[:x], s[x:])
def adjustment(self, offset):
"""
Get the adjustment to the I{local} TZ.
@return: The delta between I{offset} and local TZ.
@rtype: B{datetime}.I{timedelta}
"""
delta = ( self.local - offset )
return dt.timedelta(hours=delta)
| gpl-3.0 | -1,263,599,776,242,215,200 | 26.661376 | 76 | 0.509564 | false |
Jun1113/MapReduce-Example | contrib/hod/hodlib/RingMaster/ringMaster.py | 182 | 35563 | #Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
#!/usr/bin/env python
"""manages services and nodepool"""
# -*- python -*-
import os, sys, random, time, sets, shutil, threading
import urllib, urlparse, re, getpass, pprint, signal, shutil
from pprint import pformat
from HTMLParser import HTMLParser
binfile = sys.path[0]
libdir = os.path.dirname(binfile)
sys.path.append(libdir)
import hodlib.Common.logger
from hodlib.RingMaster.idleJobTracker import JobTrackerMonitor, HadoopJobStatus
from hodlib.Common.threads import func
from hodlib.Hod.nodePool import *
from hodlib.Common.util import *
from hodlib.Common.nodepoolutil import NodePoolUtil
from hodlib.Common.socketServers import hodXMLRPCServer
from hodlib.Common.socketServers import threadedHTTPServer
from hodlib.NodePools import *
from hodlib.NodePools.torque import *
from hodlib.GridServices import *
from hodlib.Common.descGenerator import *
from hodlib.Common.xmlrpc import hodXRClient
from hodlib.Common.miniHTMLParser import miniHTMLParser
from hodlib.Common.threads import simpleCommand
class ringMasterServer:
"""The RPC server that exposes all the master config
changes. Also, one of these RPC servers runs as a proxy
and all the hodring instances register with this proxy"""
instance = None
xmlrpc = None
def __init__(self, cfg, log, logMasterSources, retry=5):
try:
from hodlib.Common.socketServers import twistedXMLRPCServer
ringMasterServer.xmlrpc = twistedXMLRPCServer("",
cfg['ringmaster']['xrs-port-range'])
except ImportError:
log.info("Twisted interface not found. Using hodXMLRPCServer.")
ringMasterServer.xmlrpc = hodXMLRPCServer("",
cfg['ringmaster']['xrs-port-range'])
ringMasterServer.xmlrpc.register_instance(logMasterSources)
self.logMasterSources = logMasterSources
ringMasterServer.xmlrpc.serve_forever()
while not ringMasterServer.xmlrpc.is_alive():
time.sleep(.5)
log.debug('Ringmaster RPC Server at %d' %
ringMasterServer.xmlrpc.server_address[1])
def startService(ss, cfg, np, log, rm):
logMasterSources = _LogMasterSources(ss, cfg, np, log, rm)
ringMasterServer.instance = ringMasterServer(cfg, log, logMasterSources)
def stopService():
ringMasterServer.xmlrpc.stop()
def getPort():
return ringMasterServer.instance.port
def getAddress():
return 'http://%s:%d/' % (socket.gethostname(),
ringMasterServer.xmlrpc.server_address[1])
startService = staticmethod(startService)
stopService = staticmethod(stopService)
getPort = staticmethod(getPort)
getAddress = staticmethod(getAddress)
class _LogMasterSources:
"""All the methods that are run by the RPC server are
added into this class """
def __init__(self, serviceDict, cfg, np, log, rm):
self.serviceDict = serviceDict
self.tarSource = []
self.tarSourceLock = threading.Lock()
self.dict = {}
self.count = {}
self.logsourceList = []
self.logsourceListLock = threading.Lock()
self.masterParam = []
self.masterParamLock = threading.Lock()
self.verify = 'none'
self.cmdLock = threading.Lock()
self.cfg = cfg
self.log = log
self.np = np
self.rm = rm
self.hdfsHost = None
self.mapredHost = None
self.maxconnect = self.cfg['ringmaster']['max-connect']
self.log.debug("Using max-connect value %s"%self.maxconnect)
def registerTarSource(self, hostname, url, addr=None):
self.log.debug("registering: " + url)
lock = self.tarSourceLock
lock.acquire()
self.dict[url] = url
self.count[url] = 0
# addr is None when ringMaster himself invokes this method
if addr:
c = self.count[addr]
self.count[addr] = c - 1
lock.release()
if addr:
str = "%s is done" % (addr)
self.log.debug(str)
return url
def getTarList(self,hodring): # this looks useful
lock = self.tarSourceLock
lock.acquire()
leastkey = None
leastval = -1
for k, v in self.count.iteritems():
if (leastval == -1):
leastval = v
pass
if (v <= leastval and v < self.maxconnect):
leastkey = k
leastval = v
if (leastkey == None):
url = 'none'
else:
url = self.dict[leastkey]
self.count[leastkey] = leastval + 1
self.log.debug("%s %d" % (leastkey, self.count[leastkey]))
lock.release()
self.log.debug('sending url ' + url+" to "+hodring) # this looks useful
return url
def tarDone(self, uri):
str = "%s is done" % (uri)
self.log.debug(str)
lock = self.tarSourceLock
lock.acquire()
c = self.count[uri]
self.count[uri] = c - 1
lock.release()
return uri
def status(self):
return True
# FIXME: this code is broken, it relies on a central service registry
#
# def clusterStart(self, changedClusterParams=[]):
# self.log.debug("clusterStart method invoked.")
# self.dict = {}
# self.count = {}
# try:
# if (len(changedClusterParams) > 0):
# self.log.debug("Updating config.")
# for param in changedClusterParams:
# (key, sep1, val) = param.partition('=')
# (i1, sep2, i2) = key.partition('.')
# try:
# prev = self.cfg[i1][i2]
# self.rm.cfg[i1][i2] = val
# self.cfg[i1][i2] = val
# self.log.debug("\nModified [%s][%s]=%s to [%s][%s]=%s" % (i1, i2, prev, i1, i2, val))
# except KeyError, e:
# self.log.info("Skipping %s as no such config parameter found in ringmaster" % param)
# self.log.debug("Regenerating Service Description.")
# dGen = DescGenerator(self.rm.cfg)
# self.rm.cfg['servicedesc'] = dGen.createServiceDescDict()
# self.cfg['servicedesc'] = self.rm.cfg['servicedesc']
#
# self.rm.tar = None
# if self.rm.cfg['ringmaster'].has_key('hadoop-tar-ball'):
# self.rm.download = True
# self.rm.tar = self.rm.cfg['ringmaster']['hadoop-tar-ball']
# self.log.debug("self.rm.tar=%s" % self.rm.tar)
#
# self.rm.cd_to_tempdir()
#
# self.rm.tarAddress = None
# hostname = socket.gethostname()
# if (self.rm.download):
# self.rm.basename = os.path.basename(self.rm.tar)
# dest = os.path.join(os.getcwd(), self.rm.basename)
# src = self.rm.tar
# self.log.debug("cp %s -> %s" % (src, dest))
# shutil.copy(src, dest)
# self.rm.tarAddress = "%s%s" % (self.rm.httpAddress, self.rm.basename)
# self.registerTarSource(hostname, self.rm.tarAddress)
# self.log.debug("Registered new tarAddress %s" % self.rm.tarAddress)
# else:
# self.log.debug("Download not set.")
#
# if (self.rm.tar != None):
# self.cfg['hodring']['download-addr'] = self.rm.tarAddress
# self.rm.cfg['hodring']['download-addr'] = self.rm.tarAddress
#
# sdl = self.rm.cfg['servicedesc']
# workDirs = self.rm.getWorkDirs(self.rm.cfg, True)
# hdfsDesc = sdl['hdfs']
# hdfs = None
# if hdfsDesc.isExternal():
# hdfs = HdfsExternal(hdfsDesc, workDirs)
# else:
# hdfs = Hdfs(hdfsDesc, workDirs, 0, False, True)
#
# self.rm.serviceDict[hdfs.getName()] = hdfs
# mrDesc = sdl['mapred']
# mr = None
# if mrDesc.isExternal():
# mr = MapReduceExternal(mrDesc, workDirs)
# else:
# mr = MapReduce(mrDesc, workDirs, 1)
# self.rm.serviceDict[mr.getName()] = mr
#
# ringList = self.rm.serviceClient.getServiceInfo(self.cfg['hodring']['userid'],
# self.np.getServiceId(), 'hodring', 'hod')
#
# slaveList = ringList
# hdfsringXRAddress = None
# # Start HDFS Master - Step 1
# if not hdfsDesc.isExternal():
# masterFound = False
# for ring in ringList:
# ringXRAddress = ring['xrs']
# if ringXRAddress == None:
# raise Exception("Could not get hodring XML-RPC server address.")
# if (ringXRAddress.find(self.hdfsHost) != -1):
# ringClient = hodXRClient(ringXRAddress, None, None, 0, 0, 0, False, 0)
# hdfsringXRAddress = ringXRAddress
# self.log.debug("Invoking clusterStart on " + ringXRAddress + " (HDFS Master)")
# ringClient.clusterStart()
# masterFound = True
# slaveList.remove(ring)
# break
# if not masterFound:
# raise Exception("HDFS Master host not found")
# while hdfs.getInfoAddrs() == None:
# self.log.debug("Waiting for HDFS Master (Name Node) to register dfs.info.port")
# time.sleep(1)
#
# # Start MAPRED Master - Step 2
# if not mrDesc.isExternal():
# masterFound = False
# for ring in ringList:
# ringXRAddress = ring['xrs']
# if ringXRAddress == None:
# raise Exception("Could not get hodring XML-RPC server address.")
# if (not mrDesc.isExternal() and ringXRAddress.find(self.mapredHost) != -1):
# ringClient = hodXRClient(ringXRAddress, None, None, 0, 0, 0, False, 0)
# self.log.debug("Invoking clusterStart on " + ringXRAddress + " (MAPRED Master)")
# ringClient.clusterStart()
# masterFound = True
# slaveList.remove(ring)
# break
# if not masterFound:
# raise Excpetion("MAPRED Master host not found")
# while mr.getInfoAddrs() == None:
# self.log.debug("Waiting for MAPRED Master (Job Tracker) to register \
# mapred.job.tracker.info.port")
# time.sleep(1)
#
# # Start Slaves - Step 3
# for ring in slaveList:
# ringXRAddress = ring['xrs']
# if ringXRAddress == None:
# raise Exception("Could not get hodring XML-RPC server address.")
# ringClient = hodXRClient(ringXRAddress, None, None, 0, 0, 0, False, 0)
# self.log.debug("Invoking clusterStart on " + ringXRAddress + " (Slaves)")
# ringThread = func(name='hodring_slaves_start', functionRef=ringClient.clusterStart())
# ring['thread'] = ringThread
# ringThread.start()
#
# for ring in slaveList:
# ringThread = ring['thread']
# if ringThread == None:
# raise Exception("Could not get hodring thread (Slave).")
# ringThread.join()
# self.log.debug("Completed clusterStart on " + ring['xrs'] + " (Slave)")
#
# # Run Admin Commands on HDFS Master - Step 4
# if not hdfsDesc.isExternal():
# if hdfsringXRAddress == None:
# raise Exception("HDFS Master host not found (to Run Admin Commands)")
# ringClient = hodXRClient(hdfsringXRAddress, None, None, 0, 0, 0, False, 0)
# self.log.debug("Invoking clusterStart(False) - Admin on "
# + hdfsringXRAddress + " (HDFS Master)")
# ringClient.clusterStart(False)
#
# except:
# self.log.debug(get_exception_string())
# return False
#
# self.log.debug("Successfully started cluster.")
# return True
#
# def clusterStop(self):
# self.log.debug("clusterStop method invoked.")
# try:
# hdfsAddr = self.getServiceAddr('hdfs')
# if hdfsAddr.find(':') != -1:
# h, p = hdfsAddr.split(':', 1)
# self.hdfsHost = h
# self.log.debug("hdfsHost: " + self.hdfsHost)
# mapredAddr = self.getServiceAddr('mapred')
# if mapredAddr.find(':') != -1:
# h, p = mapredAddr.split(':', 1)
# self.mapredHost = h
# self.log.debug("mapredHost: " + self.mapredHost)
# ringList = self.rm.serviceClient.getServiceInfo(self.cfg['hodring']['userid'],
# self.np.getServiceId(),
# 'hodring', 'hod')
# for ring in ringList:
# ringXRAddress = ring['xrs']
# if ringXRAddress == None:
# raise Exception("Could not get hodring XML-RPC server address.")
# ringClient = hodXRClient(ringXRAddress, None, None, 0, 0, 0, False)
# self.log.debug("Invoking clusterStop on " + ringXRAddress)
# ringThread = func(name='hodring_stop', functionRef=ringClient.clusterStop())
# ring['thread'] = ringThread
# ringThread.start()
#
# for ring in ringList:
# ringThread = ring['thread']
# if ringThread == None:
# raise Exception("Could not get hodring thread.")
# ringThread.join()
# self.log.debug("Completed clusterStop on " + ring['xrs'])
#
# except:
# self.log.debug(get_exception_string())
# return False
#
# self.log.debug("Successfully stopped cluster.")
#
# return True
def getCommand(self, addr):
"""This method is called by the
hodrings to get commands from
the ringmaster"""
lock = self.cmdLock
cmdList = []
lock.acquire()
try:
try:
for v in self.serviceDict.itervalues():
if (not v.isExternal()):
if v.isLaunchable(self.serviceDict):
# If a master is still not launched, or the number of
# retries for launching master is not reached,
# launch master
if not v.isMasterLaunched() and \
(v.getMasterFailureCount() <= \
self.cfg['ringmaster']['max-master-failures']):
cmdList = v.getMasterCommands(self.serviceDict)
v.setlaunchedMaster()
v.setMasterAddress(addr)
break
if cmdList == []:
for s in self.serviceDict.itervalues():
if (not v.isExternal()):
if s.isMasterInitialized():
cl = s.getWorkerCommands(self.serviceDict)
cmdList.extend(cl)
else:
cmdList = []
break
except:
self.log.debug(get_exception_string())
finally:
lock.release()
pass
cmd = addr + pformat(cmdList)
self.log.debug("getCommand returning " + cmd)
return cmdList
def getAdminCommand(self, addr):
"""This method is called by the
hodrings to get admin commands from
the ringmaster"""
lock = self.cmdLock
cmdList = []
lock.acquire()
try:
try:
for v in self.serviceDict.itervalues():
cmdList = v.getAdminCommands(self.serviceDict)
if cmdList != []:
break
except Exception, e:
self.log.debug(get_exception_string())
finally:
lock.release()
pass
cmd = addr + pformat(cmdList)
self.log.debug("getAdminCommand returning " + cmd)
return cmdList
def addMasterParams(self, addr, vals):
"""This method is called by
hodring to update any parameters
its changed for the commands it was
running"""
self.log.debug('Comment: adding master params from %s' % addr)
self.log.debug(pformat(vals))
lock = self.masterParamLock
lock.acquire()
try:
for v in self.serviceDict.itervalues():
if v.isMasterLaunched():
if (v.getMasterAddress() == addr):
v.setMasterParams(vals)
v.setMasterInitialized()
except:
self.log.debug(get_exception_string())
pass
lock.release()
return addr
def setHodRingErrors(self, addr, errors):
"""This method is called by the hodrings to update errors
it encountered while starting up"""
self.log.critical("Hodring at %s failed with following errors:\n%s" \
% (addr, errors))
lock = self.masterParamLock
lock.acquire()
try:
for v in self.serviceDict.itervalues():
if v.isMasterLaunched():
if (v.getMasterAddress() == addr):
# strip the PID part.
idx = addr.rfind('_')
if idx is not -1:
addr = addr[:idx]
v.setMasterFailed("Hodring at %s failed with following" \
" errors:\n%s" % (addr, errors))
except:
self.log.debug(get_exception_string())
pass
lock.release()
return True
def getKeys(self):
lock= self.masterParamLock
lock.acquire()
keys = self.serviceDict.keys()
lock.release()
return keys
def getServiceAddr(self, name):
addr = 'not found'
self.log.debug("getServiceAddr name: %s" % name)
lock= self.masterParamLock
lock.acquire()
try:
service = self.serviceDict[name]
except KeyError:
pass
else:
self.log.debug("getServiceAddr service: %s" % service)
# Check if we should give up ! If the limit on max failures is hit,
# give up.
err = service.getMasterFailed()
if (err is not None) and \
(service.getMasterFailureCount() > \
self.cfg['ringmaster']['max-master-failures']):
self.log.critical("Detected errors (%s) beyond allowed number"\
" of failures (%s). Flagging error to client" \
% (service.getMasterFailureCount(), \
self.cfg['ringmaster']['max-master-failures']))
addr = "Error: " + err
elif (service.isMasterInitialized()):
addr = service.getMasterAddrs()[0]
else:
addr = 'not found'
lock.release()
self.log.debug("getServiceAddr addr %s: %s" % (name, addr))
return addr
def getURLs(self, name):
addr = 'none'
lock = self.masterParamLock
lock.acquire()
try:
service = self.serviceDict[name]
except KeyError:
pass
else:
if (service.isMasterInitialized()):
addr = service.getInfoAddrs()[0]
lock.release()
return addr
def stopRM(self):
"""An XMLRPC call which will spawn a thread to stop the Ringmaster program."""
# We spawn a thread here because we want the XMLRPC call to return. Calling
# stop directly from here will also stop the XMLRPC server.
try:
self.log.debug("inside xml-rpc call to stop ringmaster")
rmStopperThread = func('RMStopper', self.rm.stop)
rmStopperThread.start()
self.log.debug("returning from xml-rpc call to stop ringmaster")
return True
except:
self.log.debug("Exception in stop: %s" % get_exception_string())
return False
class RingMaster:
def __init__(self, cfg, log, **kwds):
"""starts nodepool and services"""
self.download = False
self.httpServer = None
self.cfg = cfg
self.log = log
self.__hostname = local_fqdn()
self.workDirs = None
# ref to the idle job tracker object.
self.__jtMonitor = None
self.__idlenessDetected = False
self.__stopInProgress = False
self.__isStopped = False # to let main exit
self.__exitCode = 0 # exit code with which the ringmaster main method should return
self.workers_per_ring = self.cfg['ringmaster']['workers_per_ring']
self.__initialize_signal_handlers()
sdd = self.cfg['servicedesc']
gsvc = None
for key in sdd:
gsvc = sdd[key]
break
npd = self.cfg['nodepooldesc']
self.np = NodePoolUtil.getNodePool(npd, cfg, log)
self.log.debug("Getting service ID.")
self.serviceId = self.np.getServiceId()
self.log.debug("Got service ID: %s" % self.serviceId)
self.tarSrcLoc = None
if self.cfg['ringmaster'].has_key('hadoop-tar-ball'):
self.download = True
self.tarSrcLoc = self.cfg['ringmaster']['hadoop-tar-ball']
self.cd_to_tempdir()
if (self.download):
self.__copy_tarball(os.getcwd())
self.basename = self.__find_tarball_in_dir(os.getcwd())
if self.basename is None:
raise Exception('Did not find tarball copied from %s in %s.'
% (self.tarSrcLoc, os.getcwd()))
self.serviceAddr = to_http_url(self.cfg['ringmaster']['svcrgy-addr'])
self.log.debug("Service registry @ %s" % self.serviceAddr)
self.serviceClient = hodXRClient(self.serviceAddr)
self.serviceDict = {}
try:
sdl = self.cfg['servicedesc']
workDirs = self.getWorkDirs(cfg)
hdfsDesc = sdl['hdfs']
hdfs = None
# Determine hadoop Version
hadoopVers = hadoopVersion(self.__getHadoopDir(), \
self.cfg['hodring']['java-home'], self.log)
if (hadoopVers['major']==None) or (hadoopVers['minor']==None):
raise Exception('Could not retrive the version of Hadoop.'
+ ' Check the Hadoop installation or the value of the hodring.java-home variable.')
if hdfsDesc.isExternal():
hdfs = HdfsExternal(hdfsDesc, workDirs, version=int(hadoopVers['minor']))
hdfs.setMasterParams( self.cfg['gridservice-hdfs'] )
else:
hdfs = Hdfs(hdfsDesc, workDirs, 0, version=int(hadoopVers['minor']),
workers_per_ring = self.workers_per_ring)
self.serviceDict[hdfs.getName()] = hdfs
mrDesc = sdl['mapred']
mr = None
if mrDesc.isExternal():
mr = MapReduceExternal(mrDesc, workDirs, version=int(hadoopVers['minor']))
mr.setMasterParams( self.cfg['gridservice-mapred'] )
else:
mr = MapReduce(mrDesc, workDirs,1, version=int(hadoopVers['minor']),
workers_per_ring = self.workers_per_ring)
self.serviceDict[mr.getName()] = mr
except:
self.log.critical("Exception in creating Hdfs and Map/Reduce descriptor objects: \
%s." % get_exception_error_string())
self.log.debug(get_exception_string())
raise
# should not be starting these in a constructor
ringMasterServer.startService(self.serviceDict, cfg, self.np, log, self)
self.rpcserver = ringMasterServer.getAddress()
self.httpAddress = None
self.tarAddress = None
hostname = socket.gethostname()
if (self.download):
self.httpServer = threadedHTTPServer(hostname,
self.cfg['ringmaster']['http-port-range'])
self.httpServer.serve_forever()
self.httpAddress = "http://%s:%d/" % (self.httpServer.server_address[0],
self.httpServer.server_address[1])
self.tarAddress = "%s%s" % (self.httpAddress, self.basename)
ringMasterServer.instance.logMasterSources.registerTarSource(hostname,
self.tarAddress)
else:
self.log.debug("Download not set.")
self.log.debug("%s %s %s %s %s" % (self.cfg['ringmaster']['userid'],
self.serviceId, self.__hostname, 'ringmaster', 'hod'))
if self.cfg['ringmaster']['register']:
if self.httpAddress:
self.serviceClient.registerService(self.cfg['ringmaster']['userid'],
self.serviceId, self.__hostname, 'ringmaster', 'hod', {
'xrs' : self.rpcserver, 'http' : self.httpAddress })
else:
self.serviceClient.registerService(self.cfg['ringmaster']['userid'],
self.serviceId, self.__hostname, 'ringmaster', 'hod', {
'xrs' : self.rpcserver, })
self.log.debug("Registered with serivce registry: %s." % self.serviceAddr)
hodRingPath = os.path.join(cfg['ringmaster']['base-dir'], 'bin', 'hodring')
hodRingWorkDir = os.path.join(cfg['hodring']['temp-dir'], 'hodring' + '_'
+ getpass.getuser())
self.cfg['hodring']['hodring'] = [hodRingWorkDir,]
self.cfg['hodring']['svcrgy-addr'] = self.cfg['ringmaster']['svcrgy-addr']
self.cfg['hodring']['service-id'] = self.np.getServiceId()
self.cfg['hodring']['ringmaster-xrs-addr'] = self.__url_to_addr(self.rpcserver)
if (self.tarSrcLoc != None):
cfg['hodring']['download-addr'] = self.tarAddress
self.__init_job_tracker_monitor(ringMasterServer.instance.logMasterSources)
def __init_job_tracker_monitor(self, logMasterSources):
hadoopDir = self.__getHadoopDir()
self.log.debug('hadoopdir=%s, java-home=%s' % \
(hadoopDir, self.cfg['hodring']['java-home']))
try:
self.__jtMonitor = JobTrackerMonitor(self.log, self,
self.cfg['ringmaster']['jt-poll-interval'],
self.cfg['ringmaster']['idleness-limit'],
hadoopDir, self.cfg['hodring']['java-home'],
logMasterSources)
self.log.debug('starting jt monitor')
self.__jtMonitor.start()
except:
self.log.critical('Exception in running idle job tracker. This cluster cannot be deallocated if idle.\
Exception message: %s' % get_exception_error_string())
self.log.debug('Exception details: %s' % get_exception_string())
def __getHadoopDir(self):
hadoopDir = None
if self.cfg['ringmaster'].has_key('hadoop-tar-ball'):
tarFile = os.path.join(os.getcwd(), self.basename)
ret = untar(tarFile, os.getcwd())
if not ret:
raise Exception('Untarring tarfile %s to directory %s failed. Cannot find hadoop directory.' \
% (tarFile, os.getcwd()))
hadoopDir = os.path.join(os.getcwd(), self.__get_dir(tarFile))
else:
hadoopDir = self.cfg['gridservice-mapred']['pkgs']
self.log.debug('Returning Hadoop directory as: %s' % hadoopDir)
return hadoopDir
def __get_dir(self, name):
"""Return the root directory inside the tarball
specified by name. Assumes that the tarball begins
with a root directory."""
import tarfile
myTarFile = tarfile.open(name)
hadoopPackage = myTarFile.getnames()[0]
self.log.debug("tarball name : %s hadoop package name : %s" %(name,hadoopPackage))
return hadoopPackage
def __find_tarball_in_dir(self, dir):
"""Find the tarball among files specified in the given
directory. We need this method because how the tarball
source URI is given depends on the method of copy and
we can't get the tarball name from that.
This method will fail if there are multiple tarballs
in the directory with the same suffix."""
files = os.listdir(dir)
for file in files:
if self.tarSrcLoc.endswith(file):
return file
return None
def __copy_tarball(self, destDir):
"""Copy the hadoop tar ball from a remote location to the
specified destination directory. Based on the URL it executes
an appropriate copy command. Throws an exception if the command
returns a non-zero exit code."""
# for backwards compatibility, treat the default case as file://
url = ''
if self.tarSrcLoc.startswith('/'):
url = 'file:/'
src = '%s%s' % (url, self.tarSrcLoc)
if src.startswith('file://'):
src = src[len('file://')-1:]
cpCmd = '/bin/cp'
cmd = '%s %s %s' % (cpCmd, src, destDir)
self.log.debug('Command to execute: %s' % cmd)
copyProc = simpleCommand('remote copy', cmd)
copyProc.start()
copyProc.wait()
copyProc.join()
ret = copyProc.exit_code()
self.log.debug('Completed command execution. Exit Code: %s.' % ret)
if ret != 0:
output = copyProc.output()
raise Exception('Could not copy tarball using command %s. Exit code: %s. Output: %s'
% (cmd, ret, output))
else:
raise Exception('Unsupported URL for file: %s' % src)
# input: http://hostname:port/. output: [hostname,port]
def __url_to_addr(self, url):
addr = url.rstrip('/')
if addr.startswith('http://'):
addr = addr.replace('http://', '', 1)
addr_parts = addr.split(':')
return [addr_parts[0], int(addr_parts[1])]
def __initialize_signal_handlers(self):
def sigStop(sigNum, handler):
sig_wrapper(sigNum, self.stop)
signal.signal(signal.SIGTERM, sigStop)
signal.signal(signal.SIGINT, sigStop)
signal.signal(signal.SIGQUIT, sigStop)
def __clean_up(self):
tempDir = self.__get_tempdir()
os.chdir(os.path.split(tempDir)[0])
if os.path.exists(tempDir):
shutil.rmtree(tempDir, True)
self.log.debug("Cleaned up temporary dir: %s" % tempDir)
def __get_tempdir(self):
dir = os.path.join(self.cfg['ringmaster']['temp-dir'],
"%s.%s.ringmaster" % (self.cfg['ringmaster']['userid'],
self.np.getServiceId()))
return dir
def getWorkDirs(self, cfg, reUse=False):
if (not reUse) or (self.workDirs == None):
import math
frand = random.random()
while math.ceil(frand) != math.floor(frand):
frand = frand * 100
irand = int(frand)
uniq = '%s-%d-%s' % (socket.gethostname(), os.getpid(), irand)
dirs = []
parentDirs = cfg['ringmaster']['work-dirs']
for p in parentDirs:
dir = os.path.join(p, uniq)
dirs.append(dir)
self.workDirs = dirs
return self.workDirs
def _fetchLink(self, link, parentDir):
parser = miniHTMLParser()
self.log.debug("Checking link %s" %link)
while link:
# Get the file from the site and link
input = urllib.urlopen(link)
out = None
contentType = input.info().gettype()
isHtml = contentType == 'text/html'
#print contentType
if isHtml:
parser.setBaseUrl(input.geturl())
else:
parsed = urlparse.urlparse(link)
hp = parsed[1]
h = hp
p = None
if hp.find(':') != -1:
h, p = hp.split(':', 1)
path = parsed[2]
path = path.split('/')
file = os.path.join(parentDir, h, p)
for c in path:
if c == '':
continue
file = os.path.join(file, c)
try:
self.log.debug('Creating %s' % file)
dir, tail = os.path.split(file)
if not os.path.exists(dir):
os.makedirs(dir)
except:
self.log.debug(get_exception_string())
out = open(file, 'w')
bufSz = 8192
buf = input.read(bufSz)
while len(buf) > 0:
if isHtml:
# Feed the file into the HTML parser
parser.feed(buf)
if out:
out.write(buf)
buf = input.read(bufSz)
input.close()
if out:
out.close()
# Search the retfile here
# Get the next link in level traversal order
link = parser.getNextLink()
parser.close()
def _finalize(self):
try:
# FIXME: get dir from config
dir = 'HOD-log-P%d' % (os.getpid())
dir = os.path.join('.', dir)
except:
self.log.debug(get_exception_string())
self.np.finalize()
def handleIdleJobTracker(self):
self.log.critical("Detected idle job tracker for %s seconds. The allocation will be cleaned up." \
% self.cfg['ringmaster']['idleness-limit'])
self.__idlenessDetected = True
def cd_to_tempdir(self):
dir = self.__get_tempdir()
if not os.path.exists(dir):
os.makedirs(dir)
os.chdir(dir)
return dir
def getWorkload(self):
return self.workload
def getHostName(self):
return self.__hostname
def start(self):
"""run the thread main loop"""
self.log.debug("Entered start method.")
hodring = os.path.join(self.cfg['ringmaster']['base-dir'],
'bin', 'hodring')
largs = [hodring]
targs = self.cfg.get_args(section='hodring')
largs.extend(targs)
hodringCmd = ""
for item in largs:
hodringCmd = "%s%s " % (hodringCmd, item)
self.log.debug(hodringCmd)
if self.np.runWorkers(largs) > 0:
self.log.critical("Failed to start worker.")
self.log.debug("Returned from runWorkers.")
self._finalize()
def __findExitCode(self):
"""Determine the exit code based on the status of the cluster or jobs run on them"""
xmlrpcServer = ringMasterServer.instance.logMasterSources
if xmlrpcServer.getServiceAddr('hdfs') == 'not found' or \
xmlrpcServer.getServiceAddr('hdfs').startswith("Error: "):
self.__exitCode = 7
elif xmlrpcServer.getServiceAddr('mapred') == 'not found' or \
xmlrpcServer.getServiceAddr('mapred').startswith("Error: "):
self.__exitCode = 8
else:
clusterStatus = get_cluster_status(xmlrpcServer.getServiceAddr('hdfs'),
xmlrpcServer.getServiceAddr('mapred'))
if clusterStatus != 0:
self.__exitCode = clusterStatus
else:
self.__exitCode = self.__findHadoopJobsExitCode()
self.log.debug('exit code %s' % self.__exitCode)
def __findHadoopJobsExitCode(self):
"""Determine the consolidate exit code of hadoop jobs run on this cluster, provided
this information is available. Return 0 otherwise"""
ret = 0
failureStatus = 3
failureCount = 0
if self.__jtMonitor:
jobStatusList = self.__jtMonitor.getJobsStatus()
try:
if len(jobStatusList) > 0:
for jobStatus in jobStatusList:
self.log.debug('job status for %s: %s' % (jobStatus.getJobId(),
jobStatus.getStatus()))
if jobStatus.getStatus() == failureStatus:
failureCount = failureCount+1
if failureCount > 0:
if failureCount == len(jobStatusList): # all jobs failed
ret = 16
else:
ret = 17
except:
self.log.debug('exception in finding hadoop jobs exit code' % get_exception_string())
return ret
def stop(self):
self.log.debug("RingMaster stop method invoked.")
if self.__stopInProgress or self.__isStopped:
return
self.__stopInProgress = True
if ringMasterServer.instance is not None:
self.log.debug('finding exit code')
self.__findExitCode()
self.log.debug('stopping ringmaster instance')
ringMasterServer.stopService()
else:
self.__exitCode = 6
if self.__jtMonitor is not None:
self.__jtMonitor.stop()
if self.httpServer:
self.httpServer.stop()
self.__clean_up()
self.__isStopped = True
def shouldStop(self):
"""Indicates whether the main loop should exit, either due to idleness condition,
or a stop signal was received"""
return self.__idlenessDetected or self.__isStopped
def getExitCode(self):
"""return the exit code of the program"""
return self.__exitCode
def main(cfg,log):
try:
rm = None
dGen = DescGenerator(cfg)
cfg = dGen.initializeDesc()
rm = RingMaster(cfg, log)
rm.start()
while not rm.shouldStop():
time.sleep(1)
rm.stop()
log.debug('returning from main')
return rm.getExitCode()
except Exception, e:
if log:
log.critical(get_exception_string())
raise Exception(e)
| apache-2.0 | -1,769,660,916,769,620,200 | 33.899902 | 108 | 0.606192 | false |
syndbg/ubuntu-make | umake/interactions/__init__.py | 1 | 6238 | # -*- coding: utf-8 -*-
# Copyright (C) 2014 Canonical
#
# Authors:
# Didier Roche
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; version 3.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# module gather different types of interactions with the UI
from gettext import gettext as _
import logging
from umake.tools import InputError
logger = logging.getLogger(__name__)
class Choice:
def __init__(self, id, label, callback_fn, txt_shorcut=None, is_default=False):
"""Choice element containing label and callback function"""
self.id = id
self.label = label
self.txt_shorcut = txt_shorcut
self.callback_fn = callback_fn
self.is_default = is_default
class TextWithChoices:
def __init__(self, content, choices=[], newline_before_option=False):
"""Content text with a list of multiple Choice elements"""
current_ids = []
default_found = False
for choice in choices:
if choice.id in current_ids:
message = "{} choice id is already in registered ids. Can't instantiate this " \
"interaction".format(choice.id)
logger.error(message)
raise BaseException(message)
current_ids.append(choice.id)
if choice.is_default:
if default_found:
message = "One default was already registered, can't register a second one in that choices set: {}"\
.format([choice.label for choice in choices])
logger.error(message)
raise BaseException(message)
default_found = True
self.content = content
self.choices = choices
self.newline_before_option = newline_before_option
def choose(self, choice_id=None, answer=None):
"""Return associated callback for choice"""
for choice in self.choices:
if (choice_id is not None and choice.id == choice_id) or\
(answer is not None and (choice.label.lower() == answer.lower() or
(choice.txt_shorcut is not None and
choice.txt_shorcut.lower() == answer.lower()))):
return choice.callback_fn()
msg = _("No suitable answer provided")
if choice_id is not None:
msg = _("Your entry '{}' isn't an acceptable choice. choices are: {}")\
.format(choice_id, [choice.id for choice in self.choices])
if answer is not None:
msg = _("Your entry '{}' isn't an acceptable choice. choices are: {} and {}")\
.format(answer, [choice.txt_shorcut for choice in self.choices if choice.txt_shorcut is not None],
[choice.label for choice in self.choices])
if not choice_id and not answer:
for choice in self.choices:
if choice.is_default:
return choice.callback_fn()
logger.error(msg)
raise InputError(msg)
@property
def prompt(self):
"""Text prompt handling if we do have some shortcuts"""
possible_answers = []
for choice in self.choices:
answer = choice.label
if choice.txt_shorcut:
# NOTE: sum of answers
answer += _(" ({})").format((choice.txt_shorcut))
possible_answers.append(answer)
if self.newline_before_option:
# NOTE: first is prompt, newline and then set of answers
prompt = _("{}\n[{}] ").format(self.content, '/'.join(possible_answers))
else:
# NOTE: first is prompt, then set of answers:
prompt = _("{} [{}] ").format(self.content, '/'.join(possible_answers))
return prompt
class LicenseAgreement(TextWithChoices):
def __init__(self, content, callback_yes, callback_no):
"""License agreement text with accept/decline"""
choices = [Choice(0, _("I Accept"), callback_yes, txt_shorcut=_("a")),
Choice(1, _("I don't accept"), callback_no, txt_shorcut=_("N"), is_default=True)]
super().__init__(content, choices=choices, newline_before_option=True)
@property
def input(self):
"""Text input prompt handling if we do have some shortcuts"""
answers = []
for choice in self.choices:
# NOTE: first element is choice, and then shortcut
_("{} ({})")
answer = _("{} ({})").format(choice.label, choice.txt_shorcut)
answers.append(answer)
# append different possible choices
return _("[{}] ").format('/'.join(answers))
class InputText:
def __init__(self, content, callback_fn, default_input=""):
"""Content text with an line input"""
self.content = content
self._callback_fn = callback_fn
self.default_input = default_input
def run_callback(self, result):
self._callback_fn(result)
class YesNo(TextWithChoices):
def __init__(self, content, callback_yes, callback_no, default_is_yes=False):
"""Return a basic Yes No question, default being false or overriden"""
super().__init__(content, [Choice(0, _("Yes"), callback_yes, txt_shorcut=_('y'), is_default=default_is_yes),
Choice(1, _("No"), callback_no, txt_shorcut=_("N"),
is_default=(not default_is_yes))])
class DisplayMessage:
def __init__(self, text):
self.text = text
class UnknownProgress:
def __init__(self, iterator):
self.bar = None
self._iterator = iterator
| gpl-3.0 | -1,425,082,578,962,650,400 | 38.732484 | 120 | 0.588169 | false |
Yuriy-Leonov/nova | nova/api/openstack/compute/contrib/server_diagnostics.py | 15 | 2561 | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob.exc
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import compute
from nova import exception
from nova.openstack.common.gettextutils import _
authorize = extensions.extension_authorizer('compute', 'server_diagnostics')
sd_nsmap = {None: wsgi.XMLNS_V11}
class ServerDiagnosticsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('diagnostics')
elem = xmlutil.SubTemplateElement(root, xmlutil.Selector(0),
selector=xmlutil.get_items)
elem.text = 1
return xmlutil.MasterTemplate(root, 1, nsmap=sd_nsmap)
class ServerDiagnosticsController(object):
@wsgi.serializers(xml=ServerDiagnosticsTemplate)
def index(self, req, server_id):
context = req.environ["nova.context"]
authorize(context)
compute_api = compute.API()
try:
instance = compute_api.get(context, server_id)
except exception.NotFound():
raise webob.exc.HTTPNotFound(_("Instance not found"))
return compute_api.get_diagnostics(context, instance)
class Server_diagnostics(extensions.ExtensionDescriptor):
"""Allow Admins to view server diagnostics through server action."""
name = "ServerDiagnostics"
alias = "os-server-diagnostics"
namespace = ("http://docs.openstack.org/compute/ext/"
"server-diagnostics/api/v1.1")
updated = "2011-12-21T00:00:00+00:00"
def get_resources(self):
parent_def = {'member_name': 'server', 'collection_name': 'servers'}
#NOTE(bcwaldon): This should be prefixed with 'os-'
ext = extensions.ResourceExtension('diagnostics',
ServerDiagnosticsController(),
parent=parent_def)
return [ext]
| apache-2.0 | -694,403,430,750,275,800 | 36.661765 | 78 | 0.673955 | false |
ity/pants | tests/python/pants_test/util/test_osutil.py | 24 | 1839 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
import unittest
from contextlib import contextmanager
from pants.util.osutil import OS_ALIASES, known_os_names, normalize_os_name
class OsutilTest(unittest.TestCase):
class WarningRecorder(object):
"""Simple logging handler to record warnings."""
def __init__(self):
self.warning_list = []
self.level = logging.WARNING
def handle(self, record):
self.warning_list.append('{}: {}'.format(record.name, record.getMessage()))
@contextmanager
def warnings(self):
handler = self.WarningRecorder()
logging.getLogger('').addHandler(handler)
yield handler.warning_list
def test_alias_normalization(self):
for normal_os, aliases in OS_ALIASES.items():
for alias in aliases:
self.assertEqual(normal_os, normalize_os_name(alias))
def test_keys_in_aliases(self):
for key in OS_ALIASES.keys():
self.assertIn(key, known_os_names())
def test_no_warnings_on_known_names(self):
for name in known_os_names():
with self.warnings() as warning_list:
normalize_os_name(name)
self.assertEqual(0, len(warning_list),
'Recieved unexpected warnings: {}'.format(warning_list))
def test_warnings_on_unknown_names(self):
name = 'I really hope no one ever names an operating system with this string.'
with self.warnings() as warning_list:
normalize_os_name(name)
self.assertEqual(1, len(warning_list),
'Expected exactly one warning, but got: {}'.format(warning_list))
| apache-2.0 | -6,263,407,353,687,622,000 | 33.055556 | 93 | 0.679173 | false |
bukzor/sympy | sympy/__init__.py | 42 | 2442 | """SymPy is a Python library for symbolic mathematics. It aims to become a
full-featured computer algebra system (CAS) while keeping the code as
simple as possible in order to be comprehensible and easily extensible.
SymPy is written entirely in Python and does not require any external
libraries, except optionally for plotting support.
See the webpage for more information and documentation:
http://sympy.org
"""
from __future__ import absolute_import, print_function
del absolute_import, print_function
try:
import mpmath
except ImportError:
raise ImportError("SymPy now depends on mpmath as an external library. "
"See http://docs.sympy.org/latest/install.html#mpmath for more information.")
from sympy.release import __version__
import sys
if sys.version_info[0] == 2 and sys.version_info[1] < 6:
raise ImportError("Python Version 2.6 or above is required for SymPy.")
else: # Python 3
pass
# Here we can also check for specific Python 3 versions, if needed
del sys
def __sympy_debug():
# helper function so we don't import os globally
import os
debug_str = os.getenv('SYMPY_DEBUG', 'False')
if debug_str in ('True', 'False'):
return eval(debug_str)
else:
raise RuntimeError("unrecognized value for SYMPY_DEBUG: %s" %
debug_str)
SYMPY_DEBUG = __sympy_debug()
from .core import *
from .logic import *
from .assumptions import *
from .polys import *
from .series import *
from .functions import *
from .ntheory import *
from .concrete import *
from .simplify import *
from .sets import *
from .solvers import *
from .matrices import *
from .geometry import *
from .utilities import *
from .integrals import *
from .tensor import *
from .parsing import *
from .calculus import *
# Adds about .04-.05 seconds of import time
# from combinatorics import *
# This module is slow to import:
#from physics import units
from .plotting import plot, textplot, plot_backends, plot_implicit
from .printing import pretty, pretty_print, pprint, pprint_use_unicode, \
pprint_try_use_unicode, print_gtk, print_tree, pager_print, TableForm
from .printing import ccode, fcode, jscode, mathematica_code, octave_code, \
latex, preview
from .printing import python, print_python, srepr, sstr, sstrrepr
from .interactive import init_session, init_printing
evalf._create_evalf_table()
# This is slow to import:
#import abc
from .deprecated import *
| bsd-3-clause | 3,648,404,883,689,580,500 | 29.911392 | 81 | 0.72973 | false |
tillahoffmann/tensorflow | tensorflow/python/eager/core.py | 14 | 2317 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental API for TensorFlow's "Eager" mode of execution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import memory_trace
from tensorflow.python.framework import errors
# Trace of execution and memory usage.
_active_trace = None
def _status_to_exception(code, message):
try:
error_class = errors.exception_type_from_error_code(code)
return error_class(None, None, message)
except KeyError:
return errors.UnknownError(None, None, message, code)
class _NotOkStatusException(Exception):
"""Exception class to handle not ok Status."""
def __init__(self, message, code):
super(_NotOkStatusException, self).__init__()
self.message = message
self.code = code
def __str__(self):
e = _status_to_exception(self.code, self.message)
return "%s: %s" % (e.__class__.__name__, e)
pywrap_tensorflow.TFE_Py_RegisterExceptionClass(_NotOkStatusException)
def enable_tracing():
"""Enables tracing of execution and memory usage.
WARNING: tracing is not thread-safe.
"""
# TODO(alive): Add code example in doc string.
global _active_trace
_active_trace = memory_trace.MemoryTrace()
def flush_trace():
"""Flushes the active trace, if it exists.
WARNING: tracing is not thread-safe.
"""
# TODO(alive): Add code example in doc string.
if _active_trace is not None:
_active_trace.flush_trace()
def active_trace():
"""Returns the current global active trace of execution and memory usage."""
return _active_trace
| apache-2.0 | -4,248,505,585,574,437,400 | 29.893333 | 80 | 0.701338 | false |
paulthulstrup/moose | framework/contrib/nsiqcppstyle/rules/RULE_3_2_F_use_representitive_classname_for_cpp_filename.py | 43 | 3744 | """
The file name should contain the representitive class/struct name.
If the file contains class/struct decls or defs, the file name should be
one of classes.
If the class/struct name starts with "C", "C" can be ommited in the file name.
== Vilolation ==
= a.h = <== Violation. It should contain class name 'TestClass'
class TestClass() {
}
= a.cpp = <== Violation. It should contain class name 'Test'
void Test::Method1() {
}
== Good ==
= TestClass.h = <== OK
class TestClass {
}
= Class1.h = <== OK.
class CClass1 {
}
= TestClass.cpp = <== OK
void TestClass::Method1() {
}
"""
from nsiqcppstyle_rulemanager import *
from nsiqcppstyle_reporter import *
from nsiqcppstyle_rulemanager import *
try :
set()
except NameError:
from sets import Set as set
classname = None
def RunFunctionNameRule(lexer, fullName, decl, contextStack, context) :
names = fullName.split("::")
if len(names) > 1 :
if len(names[0]) != 0 :
classname.add(names[0])
def RunTypeNameRule(lexer, currentType, fullName, decl, contextStack, context) :
if currentType in ["CLASS", "STRUCT"] :
names = fullName.split("::")
if len(names[-1]) != 0 :
classname.add(names[-1])
def RunFileStartRule(lexer, filename, dirname) :
global classname
classname = set()
def RunFileEndRule(lexer, filename, dirname):
goodFileName = False
filename = filename.lower( )
if len(classname) == 0 : return
for t in classname :
if t.startswith("C") :
t = t[1:]
if filename.find(t.lower()) != -1 :
goodFileName = True
break
if not goodFileName :
nsiqcppstyle_reporter.Error(DummyToken(lexer.filename, "", 0, 0), __name__,
"The filename does not represent the classnames (%s)" %(classname))
ruleManager.AddFileStartRule(RunFileStartRule)
ruleManager.AddTypeNameRule(RunTypeNameRule)
ruleManager.AddFunctionNameRule(RunFunctionNameRule)
ruleManager.AddFileEndRule(RunFileEndRule)
###########################################################################################
# Unit Test
###########################################################################################
from nsiqunittest.nsiqcppstyle_unittestbase import *
class testRule(nct):
def setUpRule(self):
ruleManager.AddFileStartRule(RunFileStartRule)
ruleManager.AddTypeNameRule(RunTypeNameRule)
ruleManager.AddFunctionNameRule(RunFunctionNameRule)
ruleManager.AddFileEndRule(RunFileEndRule)
def test1(self):
self.Analyze("test/aa.c",
"""
void AA::DSD() {
}
""")
assert not CheckErrorContent(__name__)
def test2(self):
self.Analyze("test/ab.c",
"""
void AA::DSD() {
}
""")
assert CheckErrorContent(__name__)
def test3(self):
self.Analyze("test/aa.c",
"""
void CAA::DSD() {
}
""")
assert not CheckErrorContent(__name__)
def test4(self):
self.Analyze("test/aa.c",
"""
void DSD() {
}
""")
assert not CheckErrorContent(__name__)
def test5(self):
self.Analyze("test/aa.cpp",
"""
struct AA {
}
class BB {
}
""")
assert not CheckErrorContent(__name__)
def test6(self):
self.Analyze("test/aa.cpp",
"""
struct AA1 {
}
class BB {
}
""")
assert CheckErrorContent(__name__)
def test7(self):
self.Analyze("test/CamRecorderFactory.cpp",
"""
class __declspec(dllexport) CCamRecorderFactory
{
};
""")
assert not CheckErrorContent(__name__)
def test8(self):
self.Analyze("test/CamRecorderFactory.cpp",
"""
class DLLEXPORT CCamRecorderFactory
{
};
""")
assert not CheckErrorContent(__name__)
| lgpl-2.1 | 1,117,866,397,743,444,500 | 22.254658 | 94 | 0.598558 | false |
jhayworth/config | .emacs.d/elpy/rpc-venv/lib/python2.7/site-packages/pip/_vendor/urllib3/connectionpool.py | 10 | 36488 | from __future__ import absolute_import
import errno
import logging
import sys
import warnings
from socket import error as SocketError, timeout as SocketTimeout
import socket
from .exceptions import (
ClosedPoolError,
ProtocolError,
EmptyPoolError,
HeaderParsingError,
HostChangedError,
LocationValueError,
MaxRetryError,
ProxyError,
ReadTimeoutError,
SSLError,
TimeoutError,
InsecureRequestWarning,
NewConnectionError,
)
from .packages.ssl_match_hostname import CertificateError
from .packages import six
from .packages.six.moves import queue
from .connection import (
port_by_scheme,
DummyConnection,
HTTPConnection,
HTTPSConnection,
VerifiedHTTPSConnection,
HTTPException,
BaseSSLError,
)
from .request import RequestMethods
from .response import HTTPResponse
from .util.connection import is_connection_dropped
from .util.request import set_file_position
from .util.response import assert_header_parsing
from .util.retry import Retry
from .util.timeout import Timeout
from .util.url import (
get_host,
parse_url,
Url,
_normalize_host as normalize_host,
_encode_target,
)
from .util.queue import LifoQueue
xrange = six.moves.xrange
log = logging.getLogger(__name__)
_Default = object()
# Pool objects
class ConnectionPool(object):
"""
Base class for all connection pools, such as
:class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`.
"""
scheme = None
QueueCls = LifoQueue
def __init__(self, host, port=None):
if not host:
raise LocationValueError("No host specified.")
self.host = _normalize_host(host, scheme=self.scheme)
self._proxy_host = host.lower()
self.port = port
def __str__(self):
return "%s(host=%r, port=%r)" % (type(self).__name__, self.host, self.port)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
# Return False to re-raise any potential exceptions
return False
def close(self):
"""
Close all pooled connections and disable the pool.
"""
pass
# This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252
_blocking_errnos = {errno.EAGAIN, errno.EWOULDBLOCK}
class HTTPConnectionPool(ConnectionPool, RequestMethods):
"""
Thread-safe connection pool for one host.
:param host:
Host used for this HTTP Connection (e.g. "localhost"), passed into
:class:`httplib.HTTPConnection`.
:param port:
Port used for this HTTP Connection (None is equivalent to 80), passed
into :class:`httplib.HTTPConnection`.
:param strict:
Causes BadStatusLine to be raised if the status line can't be parsed
as a valid HTTP/1.0 or 1.1 status line, passed into
:class:`httplib.HTTPConnection`.
.. note::
Only works in Python 2. This parameter is ignored in Python 3.
:param timeout:
Socket timeout in seconds for each individual connection. This can
be a float or integer, which sets the timeout for the HTTP request,
or an instance of :class:`urllib3.util.Timeout` which gives you more
fine-grained control over request timeouts. After the constructor has
been parsed, this is always a `urllib3.util.Timeout` object.
:param maxsize:
Number of connections to save that can be reused. More than 1 is useful
in multithreaded situations. If ``block`` is set to False, more
connections will be created but they will not be saved once they've
been used.
:param block:
If set to True, no more than ``maxsize`` connections will be used at
a time. When no free connections are available, the call will block
until a connection has been released. This is a useful side effect for
particular multithreaded situations where one does not want to use more
than maxsize connections per host to prevent flooding.
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
:param retries:
Retry configuration to use by default with requests in this pool.
:param _proxy:
Parsed proxy URL, should not be used directly, instead, see
:class:`urllib3.connectionpool.ProxyManager`"
:param _proxy_headers:
A dictionary with proxy headers, should not be used directly,
instead, see :class:`urllib3.connectionpool.ProxyManager`"
:param \\**conn_kw:
Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`,
:class:`urllib3.connection.HTTPSConnection` instances.
"""
scheme = "http"
ConnectionCls = HTTPConnection
ResponseCls = HTTPResponse
def __init__(
self,
host,
port=None,
strict=False,
timeout=Timeout.DEFAULT_TIMEOUT,
maxsize=1,
block=False,
headers=None,
retries=None,
_proxy=None,
_proxy_headers=None,
**conn_kw
):
ConnectionPool.__init__(self, host, port)
RequestMethods.__init__(self, headers)
self.strict = strict
if not isinstance(timeout, Timeout):
timeout = Timeout.from_float(timeout)
if retries is None:
retries = Retry.DEFAULT
self.timeout = timeout
self.retries = retries
self.pool = self.QueueCls(maxsize)
self.block = block
self.proxy = _proxy
self.proxy_headers = _proxy_headers or {}
# Fill the queue up so that doing get() on it will block properly
for _ in xrange(maxsize):
self.pool.put(None)
# These are mostly for testing and debugging purposes.
self.num_connections = 0
self.num_requests = 0
self.conn_kw = conn_kw
if self.proxy:
# Enable Nagle's algorithm for proxies, to avoid packet fragmentation.
# We cannot know if the user has added default socket options, so we cannot replace the
# list.
self.conn_kw.setdefault("socket_options", [])
def _new_conn(self):
"""
Return a fresh :class:`HTTPConnection`.
"""
self.num_connections += 1
log.debug(
"Starting new HTTP connection (%d): %s:%s",
self.num_connections,
self.host,
self.port or "80",
)
conn = self.ConnectionCls(
host=self.host,
port=self.port,
timeout=self.timeout.connect_timeout,
strict=self.strict,
**self.conn_kw
)
return conn
def _get_conn(self, timeout=None):
"""
Get a connection. Will return a pooled connection if one is available.
If no connections are available and :prop:`.block` is ``False``, then a
fresh connection is returned.
:param timeout:
Seconds to wait before giving up and raising
:class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and
:prop:`.block` is ``True``.
"""
conn = None
try:
conn = self.pool.get(block=self.block, timeout=timeout)
except AttributeError: # self.pool is None
raise ClosedPoolError(self, "Pool is closed.")
except queue.Empty:
if self.block:
raise EmptyPoolError(
self,
"Pool reached maximum size and no more connections are allowed.",
)
pass # Oh well, we'll create a new connection then
# If this is a persistent connection, check if it got disconnected
if conn and is_connection_dropped(conn):
log.debug("Resetting dropped connection: %s", self.host)
conn.close()
if getattr(conn, "auto_open", 1) == 0:
# This is a proxied connection that has been mutated by
# httplib._tunnel() and cannot be reused (since it would
# attempt to bypass the proxy)
conn = None
return conn or self._new_conn()
def _put_conn(self, conn):
"""
Put a connection back into the pool.
:param conn:
Connection object for the current host and port as returned by
:meth:`._new_conn` or :meth:`._get_conn`.
If the pool is already full, the connection is closed and discarded
because we exceeded maxsize. If connections are discarded frequently,
then maxsize should be increased.
If the pool is closed, then the connection will be closed and discarded.
"""
try:
self.pool.put(conn, block=False)
return # Everything is dandy, done.
except AttributeError:
# self.pool is None.
pass
except queue.Full:
# This should never happen if self.block == True
log.warning("Connection pool is full, discarding connection: %s", self.host)
# Connection never got put back into the pool, close it.
if conn:
conn.close()
def _validate_conn(self, conn):
"""
Called right before a request is made, after the socket is created.
"""
pass
def _prepare_proxy(self, conn):
# Nothing to do for HTTP connections.
pass
def _get_timeout(self, timeout):
""" Helper that always returns a :class:`urllib3.util.Timeout` """
if timeout is _Default:
return self.timeout.clone()
if isinstance(timeout, Timeout):
return timeout.clone()
else:
# User passed us an int/float. This is for backwards compatibility,
# can be removed later
return Timeout.from_float(timeout)
def _raise_timeout(self, err, url, timeout_value):
"""Is the error actually a timeout? Will raise a ReadTimeout or pass"""
if isinstance(err, SocketTimeout):
raise ReadTimeoutError(
self, url, "Read timed out. (read timeout=%s)" % timeout_value
)
# See the above comment about EAGAIN in Python 3. In Python 2 we have
# to specifically catch it and throw the timeout error
if hasattr(err, "errno") and err.errno in _blocking_errnos:
raise ReadTimeoutError(
self, url, "Read timed out. (read timeout=%s)" % timeout_value
)
# Catch possible read timeouts thrown as SSL errors. If not the
# case, rethrow the original. We need to do this because of:
# http://bugs.python.org/issue10272
if "timed out" in str(err) or "did not complete (read)" in str(
err
): # Python < 2.7.4
raise ReadTimeoutError(
self, url, "Read timed out. (read timeout=%s)" % timeout_value
)
def _make_request(
self, conn, method, url, timeout=_Default, chunked=False, **httplib_request_kw
):
"""
Perform a request on a given urllib connection object taken from our
pool.
:param conn:
a connection from one of our connection pools
:param timeout:
Socket timeout in seconds for the request. This can be a
float or integer, which will set the same timeout value for
the socket connect and the socket read, or an instance of
:class:`urllib3.util.Timeout`, which gives you more fine-grained
control over your timeouts.
"""
self.num_requests += 1
timeout_obj = self._get_timeout(timeout)
timeout_obj.start_connect()
conn.timeout = timeout_obj.connect_timeout
# Trigger any extra validation we need to do.
try:
self._validate_conn(conn)
except (SocketTimeout, BaseSSLError) as e:
# Py2 raises this as a BaseSSLError, Py3 raises it as socket timeout.
self._raise_timeout(err=e, url=url, timeout_value=conn.timeout)
raise
# conn.request() calls httplib.*.request, not the method in
# urllib3.request. It also calls makefile (recv) on the socket.
if chunked:
conn.request_chunked(method, url, **httplib_request_kw)
else:
conn.request(method, url, **httplib_request_kw)
# Reset the timeout for the recv() on the socket
read_timeout = timeout_obj.read_timeout
# App Engine doesn't have a sock attr
if getattr(conn, "sock", None):
# In Python 3 socket.py will catch EAGAIN and return None when you
# try and read into the file pointer created by http.client, which
# instead raises a BadStatusLine exception. Instead of catching
# the exception and assuming all BadStatusLine exceptions are read
# timeouts, check for a zero timeout before making the request.
if read_timeout == 0:
raise ReadTimeoutError(
self, url, "Read timed out. (read timeout=%s)" % read_timeout
)
if read_timeout is Timeout.DEFAULT_TIMEOUT:
conn.sock.settimeout(socket.getdefaulttimeout())
else: # None or a value
conn.sock.settimeout(read_timeout)
# Receive the response from the server
try:
try:
# Python 2.7, use buffering of HTTP responses
httplib_response = conn.getresponse(buffering=True)
except TypeError:
# Python 3
try:
httplib_response = conn.getresponse()
except BaseException as e:
# Remove the TypeError from the exception chain in
# Python 3 (including for exceptions like SystemExit).
# Otherwise it looks like a bug in the code.
six.raise_from(e, None)
except (SocketTimeout, BaseSSLError, SocketError) as e:
self._raise_timeout(err=e, url=url, timeout_value=read_timeout)
raise
# AppEngine doesn't have a version attr.
http_version = getattr(conn, "_http_vsn_str", "HTTP/?")
log.debug(
'%s://%s:%s "%s %s %s" %s %s',
self.scheme,
self.host,
self.port,
method,
url,
http_version,
httplib_response.status,
httplib_response.length,
)
try:
assert_header_parsing(httplib_response.msg)
except (HeaderParsingError, TypeError) as hpe: # Platform-specific: Python 3
log.warning(
"Failed to parse headers (url=%s): %s",
self._absolute_url(url),
hpe,
exc_info=True,
)
return httplib_response
def _absolute_url(self, path):
return Url(scheme=self.scheme, host=self.host, port=self.port, path=path).url
def close(self):
"""
Close all pooled connections and disable the pool.
"""
if self.pool is None:
return
# Disable access to the pool
old_pool, self.pool = self.pool, None
try:
while True:
conn = old_pool.get(block=False)
if conn:
conn.close()
except queue.Empty:
pass # Done.
def is_same_host(self, url):
"""
Check if the given ``url`` is a member of the same host as this
connection pool.
"""
if url.startswith("/"):
return True
# TODO: Add optional support for socket.gethostbyname checking.
scheme, host, port = get_host(url)
if host is not None:
host = _normalize_host(host, scheme=scheme)
# Use explicit default port for comparison when none is given
if self.port and not port:
port = port_by_scheme.get(scheme)
elif not self.port and port == port_by_scheme.get(scheme):
port = None
return (scheme, host, port) == (self.scheme, self.host, self.port)
def urlopen(
self,
method,
url,
body=None,
headers=None,
retries=None,
redirect=True,
assert_same_host=True,
timeout=_Default,
pool_timeout=None,
release_conn=None,
chunked=False,
body_pos=None,
**response_kw
):
"""
Get a connection from the pool and perform an HTTP request. This is the
lowest level call for making a request, so you'll need to specify all
the raw details.
.. note::
More commonly, it's appropriate to use a convenience method provided
by :class:`.RequestMethods`, such as :meth:`request`.
.. note::
`release_conn` will only behave as expected if
`preload_content=False` because we want to make
`preload_content=False` the default behaviour someday soon without
breaking backwards compatibility.
:param method:
HTTP request method (such as GET, POST, PUT, etc.)
:param body:
Data to send in the request body (useful for creating
POST requests, see HTTPConnectionPool.post_url for
more convenience).
:param headers:
Dictionary of custom headers to send, such as User-Agent,
If-None-Match, etc. If None, pool headers are used. If provided,
these headers completely replace any pool-specific headers.
:param retries:
Configure the number of retries to allow before raising a
:class:`~urllib3.exceptions.MaxRetryError` exception.
Pass ``None`` to retry until you receive a response. Pass a
:class:`~urllib3.util.retry.Retry` object for fine-grained control
over different types of retries.
Pass an integer number to retry connection errors that many times,
but no other types of errors. Pass zero to never retry.
If ``False``, then retries are disabled and any exception is raised
immediately. Also, instead of raising a MaxRetryError on redirects,
the redirect response will be returned.
:type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.
:param redirect:
If True, automatically handle redirects (status codes 301, 302,
303, 307, 308). Each redirect counts as a retry. Disabling retries
will disable redirect, too.
:param assert_same_host:
If ``True``, will make sure that the host of the pool requests is
consistent else will raise HostChangedError. When False, you can
use the pool on an HTTP proxy and request foreign hosts.
:param timeout:
If specified, overrides the default timeout for this one
request. It may be a float (in seconds) or an instance of
:class:`urllib3.util.Timeout`.
:param pool_timeout:
If set and the pool is set to block=True, then this method will
block for ``pool_timeout`` seconds and raise EmptyPoolError if no
connection is available within the time period.
:param release_conn:
If False, then the urlopen call will not release the connection
back into the pool once a response is received (but will release if
you read the entire contents of the response such as when
`preload_content=True`). This is useful if you're not preloading
the response's content immediately. You will need to call
``r.release_conn()`` on the response ``r`` to return the connection
back into the pool. If None, it takes the value of
``response_kw.get('preload_content', True)``.
:param chunked:
If True, urllib3 will send the body using chunked transfer
encoding. Otherwise, urllib3 will send the body using the standard
content-length form. Defaults to False.
:param int body_pos:
Position to seek to in file-like body in the event of a retry or
redirect. Typically this won't need to be set because urllib3 will
auto-populate the value when needed.
:param \\**response_kw:
Additional parameters are passed to
:meth:`urllib3.response.HTTPResponse.from_httplib`
"""
if headers is None:
headers = self.headers
if not isinstance(retries, Retry):
retries = Retry.from_int(retries, redirect=redirect, default=self.retries)
if release_conn is None:
release_conn = response_kw.get("preload_content", True)
# Check host
if assert_same_host and not self.is_same_host(url):
raise HostChangedError(self, url, retries)
# Ensure that the URL we're connecting to is properly encoded
if url.startswith("/"):
url = six.ensure_str(_encode_target(url))
else:
url = six.ensure_str(parse_url(url).url)
conn = None
# Track whether `conn` needs to be released before
# returning/raising/recursing. Update this variable if necessary, and
# leave `release_conn` constant throughout the function. That way, if
# the function recurses, the original value of `release_conn` will be
# passed down into the recursive call, and its value will be respected.
#
# See issue #651 [1] for details.
#
# [1] <https://github.com/urllib3/urllib3/issues/651>
release_this_conn = release_conn
# Merge the proxy headers. Only do this in HTTP. We have to copy the
# headers dict so we can safely change it without those changes being
# reflected in anyone else's copy.
if self.scheme == "http":
headers = headers.copy()
headers.update(self.proxy_headers)
# Must keep the exception bound to a separate variable or else Python 3
# complains about UnboundLocalError.
err = None
# Keep track of whether we cleanly exited the except block. This
# ensures we do proper cleanup in finally.
clean_exit = False
# Rewind body position, if needed. Record current position
# for future rewinds in the event of a redirect/retry.
body_pos = set_file_position(body, body_pos)
try:
# Request a connection from the queue.
timeout_obj = self._get_timeout(timeout)
conn = self._get_conn(timeout=pool_timeout)
conn.timeout = timeout_obj.connect_timeout
is_new_proxy_conn = self.proxy is not None and not getattr(
conn, "sock", None
)
if is_new_proxy_conn:
self._prepare_proxy(conn)
# Make the request on the httplib connection object.
httplib_response = self._make_request(
conn,
method,
url,
timeout=timeout_obj,
body=body,
headers=headers,
chunked=chunked,
)
# If we're going to release the connection in ``finally:``, then
# the response doesn't need to know about the connection. Otherwise
# it will also try to release it and we'll have a double-release
# mess.
response_conn = conn if not release_conn else None
# Pass method to Response for length checking
response_kw["request_method"] = method
# Import httplib's response into our own wrapper object
response = self.ResponseCls.from_httplib(
httplib_response,
pool=self,
connection=response_conn,
retries=retries,
**response_kw
)
# Everything went great!
clean_exit = True
except queue.Empty:
# Timed out by queue.
raise EmptyPoolError(self, "No pool connections are available.")
except (
TimeoutError,
HTTPException,
SocketError,
ProtocolError,
BaseSSLError,
SSLError,
CertificateError,
) as e:
# Discard the connection for these exceptions. It will be
# replaced during the next _get_conn() call.
clean_exit = False
if isinstance(e, (BaseSSLError, CertificateError)):
e = SSLError(e)
elif isinstance(e, (SocketError, NewConnectionError)) and self.proxy:
e = ProxyError("Cannot connect to proxy.", e)
elif isinstance(e, (SocketError, HTTPException)):
e = ProtocolError("Connection aborted.", e)
retries = retries.increment(
method, url, error=e, _pool=self, _stacktrace=sys.exc_info()[2]
)
retries.sleep()
# Keep track of the error for the retry warning.
err = e
finally:
if not clean_exit:
# We hit some kind of exception, handled or otherwise. We need
# to throw the connection away unless explicitly told not to.
# Close the connection, set the variable to None, and make sure
# we put the None back in the pool to avoid leaking it.
conn = conn and conn.close()
release_this_conn = True
if release_this_conn:
# Put the connection back to be reused. If the connection is
# expired then it will be None, which will get replaced with a
# fresh connection during _get_conn.
self._put_conn(conn)
if not conn:
# Try again
log.warning(
"Retrying (%r) after connection broken by '%r': %s", retries, err, url
)
return self.urlopen(
method,
url,
body,
headers,
retries,
redirect,
assert_same_host,
timeout=timeout,
pool_timeout=pool_timeout,
release_conn=release_conn,
chunked=chunked,
body_pos=body_pos,
**response_kw
)
def drain_and_release_conn(response):
try:
# discard any remaining response body, the connection will be
# released back to the pool once the entire response is read
response.read()
except (
TimeoutError,
HTTPException,
SocketError,
ProtocolError,
BaseSSLError,
SSLError,
):
pass
# Handle redirect?
redirect_location = redirect and response.get_redirect_location()
if redirect_location:
if response.status == 303:
method = "GET"
try:
retries = retries.increment(method, url, response=response, _pool=self)
except MaxRetryError:
if retries.raise_on_redirect:
# Drain and release the connection for this response, since
# we're not returning it to be released manually.
drain_and_release_conn(response)
raise
return response
# drain and return the connection to the pool before recursing
drain_and_release_conn(response)
retries.sleep_for_retry(response)
log.debug("Redirecting %s -> %s", url, redirect_location)
return self.urlopen(
method,
redirect_location,
body,
headers,
retries=retries,
redirect=redirect,
assert_same_host=assert_same_host,
timeout=timeout,
pool_timeout=pool_timeout,
release_conn=release_conn,
chunked=chunked,
body_pos=body_pos,
**response_kw
)
# Check if we should retry the HTTP response.
has_retry_after = bool(response.getheader("Retry-After"))
if retries.is_retry(method, response.status, has_retry_after):
try:
retries = retries.increment(method, url, response=response, _pool=self)
except MaxRetryError:
if retries.raise_on_status:
# Drain and release the connection for this response, since
# we're not returning it to be released manually.
drain_and_release_conn(response)
raise
return response
# drain and return the connection to the pool before recursing
drain_and_release_conn(response)
retries.sleep(response)
log.debug("Retry: %s", url)
return self.urlopen(
method,
url,
body,
headers,
retries=retries,
redirect=redirect,
assert_same_host=assert_same_host,
timeout=timeout,
pool_timeout=pool_timeout,
release_conn=release_conn,
chunked=chunked,
body_pos=body_pos,
**response_kw
)
return response
class HTTPSConnectionPool(HTTPConnectionPool):
"""
Same as :class:`.HTTPConnectionPool`, but HTTPS.
When Python is compiled with the :mod:`ssl` module, then
:class:`.VerifiedHTTPSConnection` is used, which *can* verify certificates,
instead of :class:`.HTTPSConnection`.
:class:`.VerifiedHTTPSConnection` uses one of ``assert_fingerprint``,
``assert_hostname`` and ``host`` in this order to verify connections.
If ``assert_hostname`` is False, no verification is done.
The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs``,
``ca_cert_dir``, ``ssl_version``, ``key_password`` are only used if :mod:`ssl`
is available and are fed into :meth:`urllib3.util.ssl_wrap_socket` to upgrade
the connection socket into an SSL socket.
"""
scheme = "https"
ConnectionCls = HTTPSConnection
def __init__(
self,
host,
port=None,
strict=False,
timeout=Timeout.DEFAULT_TIMEOUT,
maxsize=1,
block=False,
headers=None,
retries=None,
_proxy=None,
_proxy_headers=None,
key_file=None,
cert_file=None,
cert_reqs=None,
key_password=None,
ca_certs=None,
ssl_version=None,
assert_hostname=None,
assert_fingerprint=None,
ca_cert_dir=None,
**conn_kw
):
HTTPConnectionPool.__init__(
self,
host,
port,
strict,
timeout,
maxsize,
block,
headers,
retries,
_proxy,
_proxy_headers,
**conn_kw
)
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.key_password = key_password
self.ca_certs = ca_certs
self.ca_cert_dir = ca_cert_dir
self.ssl_version = ssl_version
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
def _prepare_conn(self, conn):
"""
Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket`
and establish the tunnel if proxy is used.
"""
if isinstance(conn, VerifiedHTTPSConnection):
conn.set_cert(
key_file=self.key_file,
key_password=self.key_password,
cert_file=self.cert_file,
cert_reqs=self.cert_reqs,
ca_certs=self.ca_certs,
ca_cert_dir=self.ca_cert_dir,
assert_hostname=self.assert_hostname,
assert_fingerprint=self.assert_fingerprint,
)
conn.ssl_version = self.ssl_version
return conn
def _prepare_proxy(self, conn):
"""
Establish tunnel connection early, because otherwise httplib
would improperly set Host: header to proxy's IP:port.
"""
conn.set_tunnel(self._proxy_host, self.port, self.proxy_headers)
conn.connect()
def _new_conn(self):
"""
Return a fresh :class:`httplib.HTTPSConnection`.
"""
self.num_connections += 1
log.debug(
"Starting new HTTPS connection (%d): %s:%s",
self.num_connections,
self.host,
self.port or "443",
)
if not self.ConnectionCls or self.ConnectionCls is DummyConnection:
raise SSLError(
"Can't connect to HTTPS URL because the SSL module is not available."
)
actual_host = self.host
actual_port = self.port
if self.proxy is not None:
actual_host = self.proxy.host
actual_port = self.proxy.port
conn = self.ConnectionCls(
host=actual_host,
port=actual_port,
timeout=self.timeout.connect_timeout,
strict=self.strict,
cert_file=self.cert_file,
key_file=self.key_file,
key_password=self.key_password,
**self.conn_kw
)
return self._prepare_conn(conn)
def _validate_conn(self, conn):
"""
Called right before a request is made, after the socket is created.
"""
super(HTTPSConnectionPool, self)._validate_conn(conn)
# Force connect early to allow us to validate the connection.
if not getattr(conn, "sock", None): # AppEngine might not have `.sock`
conn.connect()
if not conn.is_verified:
warnings.warn(
(
"Unverified HTTPS request is being made. "
"Adding certificate verification is strongly advised. See: "
"https://urllib3.readthedocs.io/en/latest/advanced-usage.html"
"#ssl-warnings"
),
InsecureRequestWarning,
)
def connection_from_url(url, **kw):
"""
Given a url, return an :class:`.ConnectionPool` instance of its host.
This is a shortcut for not having to parse out the scheme, host, and port
of the url before creating an :class:`.ConnectionPool` instance.
:param url:
Absolute URL string that must include the scheme. Port is optional.
:param \\**kw:
Passes additional parameters to the constructor of the appropriate
:class:`.ConnectionPool`. Useful for specifying things like
timeout, maxsize, headers, etc.
Example::
>>> conn = connection_from_url('http://google.com/')
>>> r = conn.request('GET', '/')
"""
scheme, host, port = get_host(url)
port = port or port_by_scheme.get(scheme, 80)
if scheme == "https":
return HTTPSConnectionPool(host, port=port, **kw)
else:
return HTTPConnectionPool(host, port=port, **kw)
def _normalize_host(host, scheme):
"""
Normalize hosts for comparisons and use with sockets.
"""
host = normalize_host(host, scheme)
# httplib doesn't like it when we include brackets in IPv6 addresses
# Specifically, if we include brackets but also pass the port then
# httplib crazily doubles up the square brackets on the Host header.
# Instead, we need to make sure we never pass ``None`` as the port.
# However, for backward compatibility reasons we can't actually
# *assert* that. See http://bugs.python.org/issue28539
if host.startswith("[") and host.endswith("]"):
host = host[1:-1]
return host
| gpl-3.0 | 5,188,614,900,558,072,000 | 33.717412 | 99 | 0.580081 | false |
MiltosD/CEF-ELRC | lib/python2.7/site-packages/haystack/fields.py | 14 | 12973 | import re
from django.utils import datetime_safe
from django.template import loader, Context
from haystack.exceptions import SearchFieldError
class NOT_PROVIDED:
pass
DATETIME_REGEX = re.compile('^(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2})(T|\s+)(?P<hour>\d{2}):(?P<minute>\d{2}):(?P<second>\d{2}).*?$')
# All the SearchFields variants.
class SearchField(object):
"""The base implementation of a search field."""
field_type = None
def __init__(self, model_attr=None, use_template=False, template_name=None,
document=False, indexed=True, stored=True, faceted=False,
default=NOT_PROVIDED, null=False, index_fieldname=None,
facet_class=None, boost=1.0, weight=None):
# Track what the index thinks this field is called.
self.instance_name = None
self.model_attr = model_attr
self.use_template = use_template
self.template_name = template_name
self.document = document
self.indexed = indexed
self.stored = stored
self.faceted = faceted
self._default = default
self.null = null
self.index_fieldname = index_fieldname
self.boost = weight or boost
self.is_multivalued = False
# We supply the facet_class for making it easy to create a faceted
# field based off of this field.
self.facet_class = facet_class
if self.facet_class is None:
self.facet_class = FacetCharField
self.set_instance_name(None)
def set_instance_name(self, instance_name):
self.instance_name = instance_name
if self.index_fieldname is None:
self.index_fieldname = self.instance_name
def has_default(self):
"""Returns a boolean of whether this field has a default value."""
return self._default is not NOT_PROVIDED
@property
def default(self):
"""Returns the default value for the field."""
if callable(self._default):
return self._default()
return self._default
def prepare(self, obj):
"""
Takes data from the provided object and prepares it for storage in the
index.
"""
# Give priority to a template.
if self.use_template:
return self.prepare_template(obj)
elif self.model_attr is not None:
# Check for `__` in the field for looking through the relation.
attrs = self.model_attr.split('__')
current_object = obj
for attr in attrs:
if not hasattr(current_object, attr):
raise SearchFieldError("The model '%s' does not have a model_attr '%s'." % (repr(obj), attr))
current_object = getattr(current_object, attr, None)
if current_object is None:
if self.has_default():
current_object = self._default
# Fall out of the loop, given any further attempts at
# accesses will fail misreably.
break
elif self.null:
current_object = None
# Fall out of the loop, given any further attempts at
# accesses will fail misreably.
break
else:
raise SearchFieldError("The model '%s' has an empty model_attr '%s' and doesn't allow a default or null value." % (repr(obj), attr))
if callable(current_object):
return current_object()
return current_object
if self.has_default():
return self.default
else:
return None
def prepare_template(self, obj):
"""
Flattens an object for indexing.
This loads a template
(``search/indexes/{app_label}/{model_name}_{field_name}.txt``) and
returns the result of rendering that template. ``object`` will be in
its context.
"""
if self.instance_name is None and self.template_name is None:
raise SearchFieldError("This field requires either its instance_name variable to be populated or an explicit template_name in order to load the correct template.")
if self.template_name is not None:
template_names = self.template_name
if not isinstance(template_names, (list, tuple)):
template_names = [template_names]
else:
template_names = ['search/indexes/%s/%s_%s.txt' % (obj._meta.app_label, obj._meta.module_name, self.instance_name)]
t = loader.select_template(template_names)
return t.render(Context({'object': obj}))
def convert(self, value):
"""
Handles conversion between the data found and the type of the field.
Extending classes should override this method and provide correct
data coercion.
"""
return value
class CharField(SearchField):
field_type = 'string'
def __init__(self, **kwargs):
if kwargs.get('facet_class') is None:
kwargs['facet_class'] = FacetCharField
super(CharField, self).__init__(**kwargs)
def prepare(self, obj):
return self.convert(super(CharField, self).prepare(obj))
def convert(self, value):
if value is None:
return None
return unicode(value)
class LocationField(SearchField):
field_type = 'location'
def prepare(self, obj):
from haystack.utils.geo import ensure_point
value = super(LocationField, self).prepare(obj)
if value is None:
return None
pnt = ensure_point(value)
pnt_lng, pnt_lat = pnt.get_coords()
return "%s,%s" % (pnt_lat, pnt_lng)
def convert(self, value):
from haystack.utils.geo import ensure_point, Point
if value is None:
return None
if hasattr(value, 'geom_type'):
value = ensure_point(value)
return value
if isinstance(value, basestring):
lat, lng = value.split(',')
elif isinstance(value, (list, tuple)):
# GeoJSON-alike
lat, lng = value[1], value[0]
elif isinstance(value, dict):
lat = value.get('lat', 0)
lng = value.get('lon', 0)
value = Point(float(lng), float(lat))
return value
class NgramField(CharField):
field_type = 'ngram'
def __init__(self, **kwargs):
if kwargs.get('faceted') is True:
raise SearchFieldError("%s can not be faceted." % self.__class__.__name__)
super(NgramField, self).__init__(**kwargs)
class EdgeNgramField(NgramField):
field_type = 'edge_ngram'
class IntegerField(SearchField):
field_type = 'integer'
def __init__(self, **kwargs):
if kwargs.get('facet_class') is None:
kwargs['facet_class'] = FacetIntegerField
super(IntegerField, self).__init__(**kwargs)
def prepare(self, obj):
return self.convert(super(IntegerField, self).prepare(obj))
def convert(self, value):
if value is None:
return None
return int(value)
class FloatField(SearchField):
field_type = 'float'
def __init__(self, **kwargs):
if kwargs.get('facet_class') is None:
kwargs['facet_class'] = FacetFloatField
super(FloatField, self).__init__(**kwargs)
def prepare(self, obj):
return self.convert(super(FloatField, self).prepare(obj))
def convert(self, value):
if value is None:
return None
return float(value)
class DecimalField(SearchField):
field_type = 'string'
def __init__(self, **kwargs):
if kwargs.get('facet_class') is None:
kwargs['facet_class'] = FacetDecimalField
super(DecimalField, self).__init__(**kwargs)
def prepare(self, obj):
return self.convert(super(DecimalField, self).prepare(obj))
def convert(self, value):
if value is None:
return None
return unicode(value)
class BooleanField(SearchField):
field_type = 'boolean'
def __init__(self, **kwargs):
if kwargs.get('facet_class') is None:
kwargs['facet_class'] = FacetBooleanField
super(BooleanField, self).__init__(**kwargs)
def prepare(self, obj):
return self.convert(super(BooleanField, self).prepare(obj))
def convert(self, value):
if value is None:
return None
return bool(value)
class DateField(SearchField):
field_type = 'date'
def __init__(self, **kwargs):
if kwargs.get('facet_class') is None:
kwargs['facet_class'] = FacetDateField
super(DateField, self).__init__(**kwargs)
def convert(self, value):
if value is None:
return None
if isinstance(value, basestring):
match = DATETIME_REGEX.search(value)
if match:
data = match.groupdict()
return datetime_safe.date(int(data['year']), int(data['month']), int(data['day']))
else:
raise SearchFieldError("Date provided to '%s' field doesn't appear to be a valid date string: '%s'" % (self.instance_name, value))
return value
class DateTimeField(SearchField):
field_type = 'datetime'
def __init__(self, **kwargs):
if kwargs.get('facet_class') is None:
kwargs['facet_class'] = FacetDateTimeField
super(DateTimeField, self).__init__(**kwargs)
def convert(self, value):
if value is None:
return None
if isinstance(value, basestring):
match = DATETIME_REGEX.search(value)
if match:
data = match.groupdict()
return datetime_safe.datetime(int(data['year']), int(data['month']), int(data['day']), int(data['hour']), int(data['minute']), int(data['second']))
else:
raise SearchFieldError("Datetime provided to '%s' field doesn't appear to be a valid datetime string: '%s'" % (self.instance_name, value))
return value
class MultiValueField(SearchField):
field_type = 'string'
def __init__(self, **kwargs):
if kwargs.get('facet_class') is None:
kwargs['facet_class'] = FacetMultiValueField
if kwargs.get('use_template') is True:
raise SearchFieldError("'%s' fields can not use templates to prepare their data." % self.__class__.__name__)
super(MultiValueField, self).__init__(**kwargs)
self.is_multivalued = True
def prepare(self, obj):
return self.convert(super(MultiValueField, self).prepare(obj))
def convert(self, value):
if value is None:
return None
return list(value)
class FacetField(SearchField):
"""
``FacetField`` is slightly different than the other fields because it can
work in conjunction with other fields as its data source.
Accepts an optional ``facet_for`` kwarg, which should be the field name
(not ``index_fieldname``) of the field it should pull data from.
"""
instance_name = None
def __init__(self, **kwargs):
handled_kwargs = self.handle_facet_parameters(kwargs)
super(FacetField, self).__init__(**handled_kwargs)
def handle_facet_parameters(self, kwargs):
if kwargs.get('faceted', False):
raise SearchFieldError("FacetField (%s) does not accept the 'faceted' argument." % self.instance_name)
if not kwargs.get('null', True):
raise SearchFieldError("FacetField (%s) does not accept False for the 'null' argument." % self.instance_name)
if not kwargs.get('indexed', True):
raise SearchFieldError("FacetField (%s) does not accept False for the 'indexed' argument." % self.instance_name)
if kwargs.get('facet_class'):
raise SearchFieldError("FacetField (%s) does not accept the 'facet_class' argument." % self.instance_name)
self.facet_for = None
self.facet_class = None
# Make sure the field is nullable.
kwargs['null'] = True
if 'facet_for' in kwargs:
self.facet_for = kwargs['facet_for']
del(kwargs['facet_for'])
return kwargs
def get_facet_for_name(self):
return self.facet_for or self.instance_name
class FacetCharField(FacetField, CharField):
pass
class FacetIntegerField(FacetField, IntegerField):
pass
class FacetFloatField(FacetField, FloatField):
pass
class FacetDecimalField(FacetField, DecimalField):
pass
class FacetBooleanField(FacetField, BooleanField):
pass
class FacetDateField(FacetField, DateField):
pass
class FacetDateTimeField(FacetField, DateTimeField):
pass
class FacetMultiValueField(FacetField, MultiValueField):
pass
| bsd-3-clause | 6,164,028,066,813,941,000 | 28.891705 | 175 | 0.597934 | false |
akashsinghal/Speech-Memorization-App | Python_Backend/lib/requests/_internal_utils.py | 414 | 1096 | # -*- coding: utf-8 -*-
"""
requests._internal_utils
~~~~~~~~~~~~~~
Provides utility functions that are consumed internally by Requests
which depend on extremely few external helpers (such as compat)
"""
from .compat import is_py2, builtin_str, str
def to_native_string(string, encoding='ascii'):
"""Given a string object, regardless of type, returns a representation of
that string in the native string type, encoding and decoding where
necessary. This assumes ASCII unless told otherwise.
"""
if isinstance(string, builtin_str):
out = string
else:
if is_py2:
out = string.encode(encoding)
else:
out = string.decode(encoding)
return out
def unicode_is_ascii(u_string):
"""Determine if unicode string only contains ASCII characters.
:param str u_string: unicode string to check. Must be unicode
and not Python 2 `str`.
:rtype: bool
"""
assert isinstance(u_string, str)
try:
u_string.encode('ascii')
return True
except UnicodeEncodeError:
return False
| apache-2.0 | -538,853,146,613,893,000 | 25.095238 | 77 | 0.653285 | false |
Distrotech/bzr | bzrlib/gpg.py | 2 | 21179 | # Copyright (C) 2005, 2011 Canonical Ltd
# Authors: Robert Collins <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""GPG signing and checking logic."""
from __future__ import absolute_import
import os
import sys
from StringIO import StringIO
from bzrlib.lazy_import import lazy_import
lazy_import(globals(), """
import errno
import subprocess
from bzrlib import (
config,
errors,
trace,
ui,
)
from bzrlib.i18n import (
gettext,
ngettext,
)
""")
from bzrlib.symbol_versioning import (
deprecated_in,
deprecated_method,
)
#verification results
SIGNATURE_VALID = 0
SIGNATURE_KEY_MISSING = 1
SIGNATURE_NOT_VALID = 2
SIGNATURE_NOT_SIGNED = 3
SIGNATURE_EXPIRED = 4
def bulk_verify_signatures(repository, revids, strategy,
process_events_callback=None):
"""Do verifications on a set of revisions
:param repository: repository object
:param revids: list of revision ids to verify
:param strategy: GPG strategy to use
:param process_events_callback: method to call for GUI frontends that
want to keep their UI refreshed
:return: count dictionary of results of each type,
result list for each revision,
boolean True if all results are verified successfully
"""
count = {SIGNATURE_VALID: 0,
SIGNATURE_KEY_MISSING: 0,
SIGNATURE_NOT_VALID: 0,
SIGNATURE_NOT_SIGNED: 0,
SIGNATURE_EXPIRED: 0}
result = []
all_verifiable = True
total = len(revids)
pb = ui.ui_factory.nested_progress_bar()
try:
for i, (rev_id, verification_result, uid) in enumerate(
repository.verify_revision_signatures(
revids, strategy)):
pb.update("verifying signatures", i, total)
result.append([rev_id, verification_result, uid])
count[verification_result] += 1
if verification_result != SIGNATURE_VALID:
all_verifiable = False
if process_events_callback is not None:
process_events_callback()
finally:
pb.finished()
return (count, result, all_verifiable)
class DisabledGPGStrategy(object):
"""A GPG Strategy that makes everything fail."""
@staticmethod
def verify_signatures_available():
return True
def __init__(self, ignored):
"""Real strategies take a configuration."""
def sign(self, content):
raise errors.SigningFailed('Signing is disabled.')
def verify(self, content, testament):
raise errors.SignatureVerificationFailed('Signature verification is \
disabled.')
def set_acceptable_keys(self, command_line_input):
pass
class LoopbackGPGStrategy(object):
"""A GPG Strategy that acts like 'cat' - data is just passed through.
Used in tests.
"""
@staticmethod
def verify_signatures_available():
return True
def __init__(self, ignored):
"""Real strategies take a configuration."""
def sign(self, content):
return ("-----BEGIN PSEUDO-SIGNED CONTENT-----\n" + content +
"-----END PSEUDO-SIGNED CONTENT-----\n")
def verify(self, content, testament):
return SIGNATURE_VALID, None
def set_acceptable_keys(self, command_line_input):
if command_line_input is not None:
patterns = command_line_input.split(",")
self.acceptable_keys = []
for pattern in patterns:
if pattern == "unknown":
pass
else:
self.acceptable_keys.append(pattern)
@deprecated_method(deprecated_in((2, 6, 0)))
def do_verifications(self, revisions, repository):
return bulk_verify_signatures(repository, revisions, self)
@deprecated_method(deprecated_in((2, 6, 0)))
def valid_commits_message(self, count):
return valid_commits_message(count)
@deprecated_method(deprecated_in((2, 6, 0)))
def unknown_key_message(self, count):
return unknown_key_message(count)
@deprecated_method(deprecated_in((2, 6, 0)))
def commit_not_valid_message(self, count):
return commit_not_valid_message(count)
@deprecated_method(deprecated_in((2, 6, 0)))
def commit_not_signed_message(self, count):
return commit_not_signed_message(count)
@deprecated_method(deprecated_in((2, 6, 0)))
def expired_commit_message(self, count):
return expired_commit_message(count)
def _set_gpg_tty():
tty = os.environ.get('TTY')
if tty is not None:
os.environ['GPG_TTY'] = tty
trace.mutter('setting GPG_TTY=%s', tty)
else:
# This is not quite worthy of a warning, because some people
# don't need GPG_TTY to be set. But it is worthy of a big mark
# in ~/.bzr.log, so that people can debug it if it happens to them
trace.mutter('** Env var TTY empty, cannot set GPG_TTY.'
' Is TTY exported?')
class GPGStrategy(object):
"""GPG Signing and checking facilities."""
acceptable_keys = None
def __init__(self, config_stack):
self._config_stack = config_stack
try:
import gpgme
self.context = gpgme.Context()
except ImportError, error:
pass # can't use verify()
@staticmethod
def verify_signatures_available():
"""
check if this strategy can verify signatures
:return: boolean if this strategy can verify signatures
"""
try:
import gpgme
return True
except ImportError, error:
return False
def _command_line(self):
key = self._config_stack.get('gpg_signing_key')
if key is None or key == 'default':
# 'default' or not setting gpg_signing_key at all means we should
# use the user email address
key = config.extract_email_address(self._config_stack.get('email'))
return [self._config_stack.get('gpg_signing_command'), '--clearsign',
'-u', key]
def sign(self, content):
if isinstance(content, unicode):
raise errors.BzrBadParameterUnicode('content')
ui.ui_factory.clear_term()
preexec_fn = _set_gpg_tty
if sys.platform == 'win32':
# Win32 doesn't support preexec_fn, but wouldn't support TTY anyway.
preexec_fn = None
try:
process = subprocess.Popen(self._command_line(),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
preexec_fn=preexec_fn)
try:
result = process.communicate(content)[0]
if process.returncode is None:
process.wait()
if process.returncode != 0:
raise errors.SigningFailed(self._command_line())
return result
except OSError, e:
if e.errno == errno.EPIPE:
raise errors.SigningFailed(self._command_line())
else:
raise
except ValueError:
# bad subprocess parameters, should never happen.
raise
except OSError, e:
if e.errno == errno.ENOENT:
# gpg is not installed
raise errors.SigningFailed(self._command_line())
else:
raise
def verify(self, content, testament):
"""Check content has a valid signature.
:param content: the commit signature
:param testament: the valid testament string for the commit
:return: SIGNATURE_VALID or a failed SIGNATURE_ value, key uid if valid
"""
try:
import gpgme
except ImportError, error:
raise errors.GpgmeNotInstalled(error)
signature = StringIO(content)
plain_output = StringIO()
try:
result = self.context.verify(signature, None, plain_output)
except gpgme.GpgmeError,error:
raise errors.SignatureVerificationFailed(error[2])
# No result if input is invalid.
# test_verify_invalid()
if len(result) == 0:
return SIGNATURE_NOT_VALID, None
# User has specified a list of acceptable keys, check our result is in
# it. test_verify_unacceptable_key()
fingerprint = result[0].fpr
if self.acceptable_keys is not None:
if not fingerprint in self.acceptable_keys:
return SIGNATURE_KEY_MISSING, fingerprint[-8:]
# Check the signature actually matches the testament.
# test_verify_bad_testament()
if testament != plain_output.getvalue():
return SIGNATURE_NOT_VALID, None
# Yay gpgme set the valid bit.
# Can't write a test for this one as you can't set a key to be
# trusted using gpgme.
if result[0].summary & gpgme.SIGSUM_VALID:
key = self.context.get_key(fingerprint)
name = key.uids[0].name
email = key.uids[0].email
return SIGNATURE_VALID, name + " <" + email + ">"
# Sigsum_red indicates a problem, unfortunatly I have not been able
# to write any tests which actually set this.
if result[0].summary & gpgme.SIGSUM_RED:
return SIGNATURE_NOT_VALID, None
# GPG does not know this key.
# test_verify_unknown_key()
if result[0].summary & gpgme.SIGSUM_KEY_MISSING:
return SIGNATURE_KEY_MISSING, fingerprint[-8:]
# Summary isn't set if sig is valid but key is untrusted but if user
# has explicity set the key as acceptable we can validate it.
if result[0].summary == 0 and self.acceptable_keys is not None:
if fingerprint in self.acceptable_keys:
# test_verify_untrusted_but_accepted()
return SIGNATURE_VALID, None
# test_verify_valid_but_untrusted()
if result[0].summary == 0 and self.acceptable_keys is None:
return SIGNATURE_NOT_VALID, None
if result[0].summary & gpgme.SIGSUM_KEY_EXPIRED:
expires = self.context.get_key(result[0].fpr).subkeys[0].expires
if expires > result[0].timestamp:
# The expired key was not expired at time of signing.
# test_verify_expired_but_valid()
return SIGNATURE_EXPIRED, fingerprint[-8:]
else:
# I can't work out how to create a test where the signature
# was expired at the time of signing.
return SIGNATURE_NOT_VALID, None
# A signature from a revoked key gets this.
# test_verify_revoked_signature()
if result[0].summary & gpgme.SIGSUM_SYS_ERROR:
return SIGNATURE_NOT_VALID, None
# Other error types such as revoked keys should (I think) be caught by
# SIGSUM_RED so anything else means something is buggy.
raise errors.SignatureVerificationFailed("Unknown GnuPG key "\
"verification result")
def set_acceptable_keys(self, command_line_input):
"""Set the acceptable keys for verifying with this GPGStrategy.
:param command_line_input: comma separated list of patterns from
command line
:return: nothing
"""
key_patterns = None
acceptable_keys_config = self._config_stack.get('acceptable_keys')
try:
if isinstance(acceptable_keys_config, unicode):
acceptable_keys_config = str(acceptable_keys_config)
except UnicodeEncodeError:
# gpg Context.keylist(pattern) does not like unicode
raise errors.BzrCommandError(
gettext('Only ASCII permitted in option names'))
if acceptable_keys_config is not None:
key_patterns = acceptable_keys_config
if command_line_input is not None: # command line overrides config
key_patterns = command_line_input
if key_patterns is not None:
patterns = key_patterns.split(",")
self.acceptable_keys = []
for pattern in patterns:
result = self.context.keylist(pattern)
found_key = False
for key in result:
found_key = True
self.acceptable_keys.append(key.subkeys[0].fpr)
trace.mutter("Added acceptable key: " + key.subkeys[0].fpr)
if not found_key:
trace.note(gettext(
"No GnuPG key results for pattern: {0}"
).format(pattern))
@deprecated_method(deprecated_in((2, 6, 0)))
def do_verifications(self, revisions, repository,
process_events_callback=None):
"""do verifications on a set of revisions
:param revisions: list of revision ids to verify
:param repository: repository object
:param process_events_callback: method to call for GUI frontends that
want to keep their UI refreshed
:return: count dictionary of results of each type,
result list for each revision,
boolean True if all results are verified successfully
"""
return bulk_verify_signatures(repository, revisions, self,
process_events_callback)
@deprecated_method(deprecated_in((2, 6, 0)))
def verbose_valid_message(self, result):
"""takes a verify result and returns list of signed commits strings"""
return verbose_valid_message(result)
@deprecated_method(deprecated_in((2, 6, 0)))
def verbose_not_valid_message(self, result, repo):
"""takes a verify result and returns list of not valid commit info"""
return verbose_not_valid_message(result, repo)
@deprecated_method(deprecated_in((2, 6, 0)))
def verbose_not_signed_message(self, result, repo):
"""takes a verify result and returns list of not signed commit info"""
return verbose_not_valid_message(result, repo)
@deprecated_method(deprecated_in((2, 6, 0)))
def verbose_missing_key_message(self, result):
"""takes a verify result and returns list of missing key info"""
return verbose_missing_key_message(result)
@deprecated_method(deprecated_in((2, 6, 0)))
def verbose_expired_key_message(self, result, repo):
"""takes a verify result and returns list of expired key info"""
return verbose_expired_key_message(result, repo)
@deprecated_method(deprecated_in((2, 6, 0)))
def valid_commits_message(self, count):
"""returns message for number of commits"""
return valid_commits_message(count)
@deprecated_method(deprecated_in((2, 6, 0)))
def unknown_key_message(self, count):
"""returns message for number of commits"""
return unknown_key_message(count)
@deprecated_method(deprecated_in((2, 6, 0)))
def commit_not_valid_message(self, count):
"""returns message for number of commits"""
return commit_not_valid_message(count)
@deprecated_method(deprecated_in((2, 6, 0)))
def commit_not_signed_message(self, count):
"""returns message for number of commits"""
return commit_not_signed_message(count)
@deprecated_method(deprecated_in((2, 6, 0)))
def expired_commit_message(self, count):
"""returns message for number of commits"""
return expired_commit_message(count)
def valid_commits_message(count):
"""returns message for number of commits"""
return gettext(u"{0} commits with valid signatures").format(
count[SIGNATURE_VALID])
def unknown_key_message(count):
"""returns message for number of commits"""
return ngettext(u"{0} commit with unknown key",
u"{0} commits with unknown keys",
count[SIGNATURE_KEY_MISSING]).format(
count[SIGNATURE_KEY_MISSING])
def commit_not_valid_message(count):
"""returns message for number of commits"""
return ngettext(u"{0} commit not valid",
u"{0} commits not valid",
count[SIGNATURE_NOT_VALID]).format(
count[SIGNATURE_NOT_VALID])
def commit_not_signed_message(count):
"""returns message for number of commits"""
return ngettext(u"{0} commit not signed",
u"{0} commits not signed",
count[SIGNATURE_NOT_SIGNED]).format(
count[SIGNATURE_NOT_SIGNED])
def expired_commit_message(count):
"""returns message for number of commits"""
return ngettext(u"{0} commit with key now expired",
u"{0} commits with key now expired",
count[SIGNATURE_EXPIRED]).format(
count[SIGNATURE_EXPIRED])
def verbose_expired_key_message(result, repo):
"""takes a verify result and returns list of expired key info"""
signers = {}
fingerprint_to_authors = {}
for rev_id, validity, fingerprint in result:
if validity == SIGNATURE_EXPIRED:
revision = repo.get_revision(rev_id)
authors = ', '.join(revision.get_apparent_authors())
signers.setdefault(fingerprint, 0)
signers[fingerprint] += 1
fingerprint_to_authors[fingerprint] = authors
result = []
for fingerprint, number in signers.items():
result.append(
ngettext(u"{0} commit by author {1} with key {2} now expired",
u"{0} commits by author {1} with key {2} now expired",
number).format(
number, fingerprint_to_authors[fingerprint], fingerprint))
return result
def verbose_valid_message(result):
"""takes a verify result and returns list of signed commits strings"""
signers = {}
for rev_id, validity, uid in result:
if validity == SIGNATURE_VALID:
signers.setdefault(uid, 0)
signers[uid] += 1
result = []
for uid, number in signers.items():
result.append(ngettext(u"{0} signed {1} commit",
u"{0} signed {1} commits",
number).format(uid, number))
return result
def verbose_not_valid_message(result, repo):
"""takes a verify result and returns list of not valid commit info"""
signers = {}
for rev_id, validity, empty in result:
if validity == SIGNATURE_NOT_VALID:
revision = repo.get_revision(rev_id)
authors = ', '.join(revision.get_apparent_authors())
signers.setdefault(authors, 0)
signers[authors] += 1
result = []
for authors, number in signers.items():
result.append(ngettext(u"{0} commit by author {1}",
u"{0} commits by author {1}",
number).format(number, authors))
return result
def verbose_not_signed_message(result, repo):
"""takes a verify result and returns list of not signed commit info"""
signers = {}
for rev_id, validity, empty in result:
if validity == SIGNATURE_NOT_SIGNED:
revision = repo.get_revision(rev_id)
authors = ', '.join(revision.get_apparent_authors())
signers.setdefault(authors, 0)
signers[authors] += 1
result = []
for authors, number in signers.items():
result.append(ngettext(u"{0} commit by author {1}",
u"{0} commits by author {1}",
number).format(number, authors))
return result
def verbose_missing_key_message(result):
"""takes a verify result and returns list of missing key info"""
signers = {}
for rev_id, validity, fingerprint in result:
if validity == SIGNATURE_KEY_MISSING:
signers.setdefault(fingerprint, 0)
signers[fingerprint] += 1
result = []
for fingerprint, number in signers.items():
result.append(ngettext(u"Unknown key {0} signed {1} commit",
u"Unknown key {0} signed {1} commits",
number).format(fingerprint, number))
return result
| gpl-2.0 | 8,883,162,089,343,562,000 | 37.023339 | 80 | 0.601067 | false |
ticosax/django | django/contrib/gis/geos/polygon.py | 450 | 6843 | from ctypes import byref, c_uint
from django.contrib.gis.geos import prototypes as capi
from django.contrib.gis.geos.geometry import GEOSGeometry
from django.contrib.gis.geos.libgeos import GEOM_PTR, get_pointer_arr
from django.contrib.gis.geos.linestring import LinearRing
from django.utils import six
from django.utils.six.moves import range
class Polygon(GEOSGeometry):
_minlength = 1
def __init__(self, *args, **kwargs):
"""
Initializes on an exterior ring and a sequence of holes (both
instances may be either LinearRing instances, or a tuple/list
that may be constructed into a LinearRing).
Examples of initialization, where shell, hole1, and hole2 are
valid LinearRing geometries:
>>> from django.contrib.gis.geos import LinearRing, Polygon
>>> shell = hole1 = hole2 = LinearRing()
>>> poly = Polygon(shell, hole1, hole2)
>>> poly = Polygon(shell, (hole1, hole2))
>>> # Example where a tuple parameters are used:
>>> poly = Polygon(((0, 0), (0, 10), (10, 10), (0, 10), (0, 0)),
... ((4, 4), (4, 6), (6, 6), (6, 4), (4, 4)))
"""
if not args:
raise TypeError('Must provide at least one LinearRing, or a tuple, to initialize a Polygon.')
# Getting the ext_ring and init_holes parameters from the argument list
ext_ring = args[0]
init_holes = args[1:]
n_holes = len(init_holes)
# If initialized as Polygon(shell, (LinearRing, LinearRing)) [for backward-compatibility]
if n_holes == 1 and isinstance(init_holes[0], (tuple, list)):
if len(init_holes[0]) == 0:
init_holes = ()
n_holes = 0
elif isinstance(init_holes[0][0], LinearRing):
init_holes = init_holes[0]
n_holes = len(init_holes)
polygon = self._create_polygon(n_holes + 1, (ext_ring,) + init_holes)
super(Polygon, self).__init__(polygon, **kwargs)
def __iter__(self):
"Iterates over each ring in the polygon."
for i in range(len(self)):
yield self[i]
def __len__(self):
"Returns the number of rings in this Polygon."
return self.num_interior_rings + 1
@classmethod
def from_bbox(cls, bbox):
"Constructs a Polygon from a bounding box (4-tuple)."
x0, y0, x1, y1 = bbox
for z in bbox:
if not isinstance(z, six.integer_types + (float,)):
return GEOSGeometry('POLYGON((%s %s, %s %s, %s %s, %s %s, %s %s))' %
(x0, y0, x0, y1, x1, y1, x1, y0, x0, y0))
return Polygon(((x0, y0), (x0, y1), (x1, y1), (x1, y0), (x0, y0)))
# ### These routines are needed for list-like operation w/ListMixin ###
def _create_polygon(self, length, items):
# Instantiate LinearRing objects if necessary, but don't clone them yet
# _construct_ring will throw a TypeError if a parameter isn't a valid ring
# If we cloned the pointers here, we wouldn't be able to clean up
# in case of error.
rings = []
for r in items:
if isinstance(r, GEOM_PTR):
rings.append(r)
else:
rings.append(self._construct_ring(r))
shell = self._clone(rings.pop(0))
n_holes = length - 1
if n_holes:
holes = get_pointer_arr(n_holes)
for i, r in enumerate(rings):
holes[i] = self._clone(r)
holes_param = byref(holes)
else:
holes_param = None
return capi.create_polygon(shell, holes_param, c_uint(n_holes))
def _clone(self, g):
if isinstance(g, GEOM_PTR):
return capi.geom_clone(g)
else:
return capi.geom_clone(g.ptr)
def _construct_ring(self, param, msg=(
'Parameter must be a sequence of LinearRings or objects that can initialize to LinearRings')):
"Helper routine for trying to construct a ring from the given parameter."
if isinstance(param, LinearRing):
return param
try:
ring = LinearRing(param)
return ring
except TypeError:
raise TypeError(msg)
def _set_list(self, length, items):
# Getting the current pointer, replacing with the newly constructed
# geometry, and destroying the old geometry.
prev_ptr = self.ptr
srid = self.srid
self.ptr = self._create_polygon(length, items)
if srid:
self.srid = srid
capi.destroy_geom(prev_ptr)
def _get_single_internal(self, index):
"""
Returns the ring at the specified index. The first index, 0, will
always return the exterior ring. Indices > 0 will return the
interior ring at the given index (e.g., poly[1] and poly[2] would
return the first and second interior ring, respectively).
CAREFUL: Internal/External are not the same as Interior/Exterior!
_get_single_internal returns a pointer from the existing geometries for use
internally by the object's methods. _get_single_external returns a clone
of the same geometry for use by external code.
"""
if index == 0:
return capi.get_extring(self.ptr)
else:
# Getting the interior ring, have to subtract 1 from the index.
return capi.get_intring(self.ptr, index - 1)
def _get_single_external(self, index):
return GEOSGeometry(capi.geom_clone(self._get_single_internal(index)), srid=self.srid)
_set_single = GEOSGeometry._set_single_rebuild
_assign_extended_slice = GEOSGeometry._assign_extended_slice_rebuild
# #### Polygon Properties ####
@property
def num_interior_rings(self):
"Returns the number of interior rings."
# Getting the number of rings
return capi.get_nrings(self.ptr)
def _get_ext_ring(self):
"Gets the exterior ring of the Polygon."
return self[0]
def _set_ext_ring(self, ring):
"Sets the exterior ring of the Polygon."
self[0] = ring
# Properties for the exterior ring/shell.
exterior_ring = property(_get_ext_ring, _set_ext_ring)
shell = exterior_ring
@property
def tuple(self):
"Gets the tuple for each ring in this Polygon."
return tuple(self[i].tuple for i in range(len(self)))
coords = tuple
@property
def kml(self):
"Returns the KML representation of this Polygon."
inner_kml = ''.join("<innerBoundaryIs>%s</innerBoundaryIs>" % self[i + 1].kml
for i in range(self.num_interior_rings))
return "<Polygon><outerBoundaryIs>%s</outerBoundaryIs>%s</Polygon>" % (self[0].kml, inner_kml)
| bsd-3-clause | 7,671,372,188,479,111,000 | 37.661017 | 106 | 0.596668 | false |
0x0all/nupic | py/regions/ImageSensorFilters/LogPolar.py | 17 | 9875 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
## @file
This file defines the LogPolar filter, an ImageSensor filter that distorts
incoming images in a "fish-eye" manner.
"""
from PIL import Image
import numpy
from nupic.regions.ImageSensorFilters.BaseFilter import BaseFilter
class LogPolar(BaseFilter):
"""
Apply a LogPolar transformation to the original image
"""
def __init__(self, xsize, ysize, c, preserveCenterResolution=False, Debug=False):
"""
Initializes the kernel matrices, a one-time cost, which are then applied to
each image.
@param xsize -- The x-dimension size of the desired output
@param ysize -- The y-dimension size of the desired output
@param c -- Paramaterizes how much the image bends (ie. how steep the
fish-eye. c=0 is no distortion. c=3 is severe.
@param preserveCenterResolution -- if True, the resolution of the center of the
image will be preserved and the edges will be sub-sampled.
If False, the center pixels will be blown up to keep the
outside corners at the original resolution.
@param Debug -- Determines whether to compute and save some intermediate
data structures for debugging (deltaxmat, deltaymat, scales).
"""
BaseFilter.__init__(self)
# Init params
self._lastOutputImage = None
self._xsize = xsize
self._ysize = ysize
self._c = c
self._pcr = preserveCenterResolution
self._debug = Debug
self._kernelx = None
self._kernely = None
self._kernel = None
self._imgSize = (-1,-1)
def process(self, image):
"""
Perform LogPolar filtering on the input image and return the response
@param image -- The image to process
"""
#image.save("logPolarDebugInput.png")
BaseFilter.process(self, image)
#image.save("logPolarDebugPostBase.png")
out = self._applyKernel(image, 0)
outImg = Image.fromarray(out.astype(numpy.int8))
#outImg.save("logPolarDebug.png")
maskOut = self._applyKernel(image, 1)
maskOutImg = Image.fromarray(maskOut.astype(numpy.int8))
outImg.putalpha(maskOutImg)
#outImg.save("logPolarDebugMask.png")
self._lastOutputImage = outImg
return outImg
def _applyKernel(self, img, channel=0, Mirror=False):
"""
The "guts" of the filter. Takes an input PIL image and returns a numpy
array containing the output image data
@param img -- The image to process
@param channel -- Which part of the image to process
0: The image
1: The mask
@param Mirror -- If the image is smaller than the output, whether to mirror
the image (or to fill with zeros)
"""
# Create the kernel if we haven't done so already
if self._kernelx is None:
self._makeKernel(self._xsize, self._ysize, self._c, Debug=self._debug, Save=False)
# Get the input image into a flattened array
data = numpy.array(img.split()[channel].getdata())
# Add a sentinel pixel at the end which is set to the background color
data = numpy.resize(data, data.size+1)
data[-1] = self.background
# See if we need to re-compute our clipped, flattened kernel, which depends on the
# image size
if img.size != self._imgSize:
# Convert our kernel matrix center to the center of the input image, and mark indicies
# that are outside the bounds of the input image with a sentinel
sentinel = -1 * img.size[0] * img.size[1]
kxBig = self._ceilFoor(self._kernelx, img.size[1], None, Mirror).astype('int')
kyBig = self._ceilFoor(self._kernely, img.size[0], None, Mirror).astype('int')
# When preserving the resolution at the edges, we make the output image size the
# same as the input image size. So, when the input image size is smaller than our
# kernel, we have to clip the outside edges of our kernel
if not self._pcr:
kx = self._cropMatCenter(kxBig, (img.size[1],img.size[0]))
ky = self._cropMatCenter(kyBig, (img.size[1],img.size[0]))
matSize = (img.size[1],img.size[0])
else:
kx = kxBig
ky = kyBig
matSize = (self._ysize, self._xsize)
# Convert our kernel to indices into the flattened array of the input image.
kernel = (kx + ky*img.size[0]).flatten()
# Convert all negative indices (sentinels) to reference the last element of the data
kernel[kernel < 0] = -1
self._kernel = kernel
self._imgSize = img.size
self._matSize = matSize
# Map the output from the kernel
output = data[self._kernel].reshape(self._matSize)
return output
def _ceilFoor(self, mat, width, sentinel, Mirror=False):
"""
Center our kernel matrix around the center of the given input image and ensure that
the kernel matrix does not try to access pixels outside the input data array.
"""
out = mat.copy()
# Re-center around the image center
maxIdx = width-1
out += maxIdx / 2.0
# Mark the indices that go outside the source image with a sentinel, we will use these as
# indicators to plug-in the background pixel value
if Mirror:
out[out < 0] = -out[out < 0]
out[out > maxIdx] = 2 * maxIdx-out[out > maxIdx]
else:
if sentinel is not None:
out[out < 0] = sentinel
out[out > maxIdx] = sentinel
else:
out[out < 0] = 0
out[out > maxIdx] = maxIdx
return out
def _cropMatCenter(self, mat, outSize):
"""
Crops mat to be outSize, maintaining the original center.
"""
(xsize, ysize) = outSize
if mat.shape[0] < xsize or mat.shape[1] < ysize:
raise ValueError("Mat shape %s must be >= (xsize=%i,ysize=%i)" %(str(mat.shape), xsize,ysize))
mcenterx = mat.shape[0]/2.
mcentery = mat.shape[1]/2.
x0 = int(mcenterx - xsize/2.)
y0 = int(mcentery - ysize/2.)
return mat[x0:x0+xsize, y0:y0+ysize]
def _makeKernel(self, xsize, ysize, c, Debug=False, Save=True):
"""
Make the original kernel matrices, of size (xsize,ysize) and with bending
parameter c. Debug determines whether to compute and store data structures
useful for debugging (deltaxmat, deltaymat, scales). Save determines whether
to save the kernel matrices to disk, eg. to be loaded later instead of
recomputed.
"""
# Numeric errors if c is exactly zero:
if c == 0:
c = 0.001
centerx = (xsize-1)/2.;
centery = (ysize-1)/2.;
self._kernelx = numpy.zeros((ysize,xsize))
self._kernely = numpy.zeros((ysize,xsize))
if Debug:
self._deltaxmat = numpy.zeros((ysize,xsize))
self._deltaymat = numpy.zeros((ysize,xsize))
self._scales = numpy.zeros((ysize,xsize))
hypotmax = numpy.sqrt(numpy.power(xsize-1-centerx,2) + \
numpy.power(ysize-1-centery,2))
k = 1 / (numpy.exp(c) - 1)
# Are we preserving the center resolution? If so, compute the factor required
# to give a scale of 1 to the center pixels
if self._pcr:
scaleInCenter = k * (numpy.exp(c*1.0/hypotmax)-1) / (1.0/hypotmax)
scaleFactor = 1.0/scaleInCenter
else:
scaleFactor = 1.0
for row in range(ysize):
for col in range(xsize):
if (col != centerx) or (row != centery):
deltax = col-centerx
deltay = row-centery
hypot = numpy.sqrt(numpy.power(deltax,2) + numpy.power(deltay,2))
scale = scaleFactor * k * (numpy.exp(c*hypot/hypotmax)-1) / (hypot/hypotmax)
# scale = numpy.power(hypot/centerx, 1.1) / hypot
self._kernelx[row,col] = scale*deltax
self._kernely[row,col] = scale*deltay
if Debug:
self._deltaxmat[row,col] = deltax
self._deltaymat[row,col] = deltay
self._scales[row,col] = scale
# Compute the optimim input image size so that the output image fills the self._xsize,
# self._ysize destination image
if self._pcr:
optSrcWidth = self._kernelx[centery][-1] * 2
optSrcHeight = self._kernely[-1][centerx] * 2
print "LogPolar Filter: Optimum input image size for this value of c (%f)" % (c), \
"is %d x %d (width x height)" % (optSrcWidth, optSrcHeight)
if Save:
import cPickle
f = open('kernelx%ix%ic%.2f.dat' %(xsize,ysize,c),'w')
cPickle.dump(self._kernelx, f)
f.close()
f = open('kernely%ix%ic%.2f.dat' %(xsize,ysize,c),'w')
cPickle.dump(self._kernely, f)
f.close()
if Debug:
f = open('deltax%ix%i.dat' %(xsize,ysize),'w')
cPickle.dump(self._deltaxmat, f)
f.close()
f = open('deltay%ix%i.dat' %(xsize,ysize),'w')
cPickle.dump(self._deltaymat, f)
f.close()
f = open('scales%ix%ic%.2f.dat' %(xsize,ysize,c),'w')
cPickle.dump(self._scales, f)
f.close()
| gpl-3.0 | 4,320,849,482,652,025,300 | 36.405303 | 100 | 0.629772 | false |
Colorfulstan/robotframework | src/robot/variables/tablesetter.py | 17 | 5033 | # Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib import contextmanager
from robot.errors import DataError
from robot.utils import split_from_equals, unic, is_string, DotDict
from .isvar import validate_var
from .splitter import VariableSplitter
class VariableTableSetter(object):
def __init__(self, store):
self._store = store
def set(self, variables, overwrite=False):
for name, value in VariableTableReader().read(variables):
self._store.add(name, value, overwrite, decorated=False)
class VariableTableReader(object):
def read(self, variables):
for var in variables:
if not var:
continue
try:
yield self._get_name_and_value(var.name, var.value,
var.report_invalid_syntax)
except DataError as err:
var.report_invalid_syntax(err)
def _get_name_and_value(self, name, value, error_reporter):
return name[2:-1], VariableTableValue(value, name, error_reporter)
def VariableTableValue(value, name, error_reporter=None):
validate_var(name)
VariableTableValue = {'$': ScalarVariableTableValue,
'@': ListVariableTableValue,
'&': DictVariableTableValue}[name[0]]
return VariableTableValue(value, error_reporter)
class VariableTableValueBase(object):
def __init__(self, values, error_reporter=None):
self._values = self._format_values(values)
self._error_reporter = error_reporter
self._resolving = False
def _format_values(self, values):
return values
def resolve(self, variables):
with self._avoid_recursion:
return self._replace_variables(self._values, variables)
@property
@contextmanager
def _avoid_recursion(self):
if self._resolving:
raise DataError('Recursive variable definition.')
self._resolving = True
try:
yield
finally:
self._resolving = False
def _replace_variables(self, value, variables):
raise NotImplementedError
def report_error(self, error):
if self._error_reporter:
self._error_reporter(unicode(error))
class ScalarVariableTableValue(VariableTableValueBase):
def _format_values(self, values):
separator = None
if is_string(values):
values = [values]
elif values and values[0].startswith('SEPARATOR='):
separator = values.pop(0)[10:]
return separator, values
def _replace_variables(self, values, variables):
separator, values = values
if (separator is None and len(values) == 1 and
not VariableSplitter(values[0]).is_list_variable()):
return variables.replace_scalar(values[0])
if separator is None:
separator = ' '
separator = variables.replace_string(separator)
values = variables.replace_list(values)
return separator.join(unic(item) for item in values)
class ListVariableTableValue(VariableTableValueBase):
def _replace_variables(self, values, variables):
return variables.replace_list(values)
class DictVariableTableValue(VariableTableValueBase):
def _format_values(self, values):
return list(self._yield_formatted(values))
def _yield_formatted(self, values):
for item in values:
if VariableSplitter(item).is_dict_variable():
yield item
else:
name, value = split_from_equals(item)
if value is None:
raise DataError("Dictionary item '%s' does not contain "
"'=' separator." % item)
yield name, value
def _replace_variables(self, values, variables):
try:
return DotDict(self._yield_replaced(values,
variables.replace_scalar))
except TypeError as err:
raise DataError('Creating dictionary failed: %s' % err)
def _yield_replaced(self, values, replace_scalar):
for item in values:
if isinstance(item, tuple):
key, values = item
yield replace_scalar(key), replace_scalar(values)
else:
for key, values in replace_scalar(item).items():
yield key, values
| apache-2.0 | 326,204,359,237,540,030 | 32.778523 | 76 | 0.62269 | false |
hpcloud-mon/tempest | tempest/cmd/run_stress.py | 14 | 4969 | #!/usr/bin/env python
# Copyright 2013 Quanta Research Cambridge, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import inspect
import json
import sys
try:
from unittest import loader
except ImportError:
# unittest in python 2.6 does not contain loader, so uses unittest2
from unittest2 import loader
from oslo_log import log as logging
from testtools import testsuite
from tempest.stress import driver
LOG = logging.getLogger(__name__)
def discover_stress_tests(path="./", filter_attr=None, call_inherited=False):
"""Discovers all tempest tests and create action out of them
"""
LOG.info("Start test discovery")
tests = []
testloader = loader.TestLoader()
list = testloader.discover(path)
for func in (testsuite.iterate_tests(list)):
attrs = []
try:
method_name = getattr(func, '_testMethodName')
full_name = "%s.%s.%s" % (func.__module__,
func.__class__.__name__,
method_name)
test_func = getattr(func, method_name)
# NOTE(mkoderer): this contains a list of all type attributes
attrs = getattr(test_func, "__testtools_attrs")
except Exception:
next
if 'stress' in attrs:
if filter_attr is not None and filter_attr not in attrs:
continue
class_setup_per = getattr(test_func, "st_class_setup_per")
action = {'action':
"tempest.stress.actions.unit_test.UnitTest",
'kwargs': {"test_method": full_name,
"class_setup_per": class_setup_per
}
}
if (not call_inherited and
getattr(test_func, "st_allow_inheritance") is not True):
class_structure = inspect.getmro(test_func.im_class)
if test_func.__name__ not in class_structure[0].__dict__:
continue
tests.append(action)
return tests
parser = argparse.ArgumentParser(description='Run stress tests')
parser.add_argument('-d', '--duration', default=300, type=int,
help="Duration of test in secs")
parser.add_argument('-s', '--serial', action='store_true',
help="Trigger running tests serially")
parser.add_argument('-S', '--stop', action='store_true',
default=False, help="Stop on first error")
parser.add_argument('-n', '--number', type=int,
help="How often an action is executed for each process")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-a', '--all', action='store_true',
help="Execute all stress tests")
parser.add_argument('-T', '--type',
help="Filters tests of a certain type (e.g. gate)")
parser.add_argument('-i', '--call-inherited', action='store_true',
default=False,
help="Call also inherited function with stress attribute")
group.add_argument('-t', "--tests", nargs='?',
help="Name of the file with test description")
def main():
ns = parser.parse_args()
result = 0
if not ns.all:
tests = json.load(open(ns.tests, 'r'))
else:
tests = discover_stress_tests(filter_attr=ns.type,
call_inherited=ns.call_inherited)
if ns.serial:
# Duration is total time
duration = ns.duration / len(tests)
for test in tests:
step_result = driver.stress_openstack([test],
duration,
ns.number,
ns.stop)
# NOTE(mkoderer): we just save the last result code
if (step_result != 0):
result = step_result
if ns.stop:
return result
else:
result = driver.stress_openstack(tests,
ns.duration,
ns.number,
ns.stop)
return result
if __name__ == "__main__":
try:
sys.exit(main())
except Exception:
LOG.exception("Failure in the stress test framework")
sys.exit(1)
| apache-2.0 | -8,572,493,170,697,134,000 | 37.223077 | 78 | 0.554639 | false |
yugui/grpc | tools/run_tests/artifacts/distribtest_targets.py | 10 | 12759 | #!/usr/bin/env python2.7
# Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Definition of targets run distribution package tests."""
import os.path
import sys
sys.path.insert(0, os.path.abspath('..'))
import python_utils.jobset as jobset
def create_docker_jobspec(name, dockerfile_dir, shell_command, environ={},
flake_retries=0, timeout_retries=0):
"""Creates jobspec for a task running under docker."""
environ = environ.copy()
environ['RUN_COMMAND'] = shell_command
environ['RELATIVE_COPY_PATH'] = 'test/distrib'
docker_args=[]
for k,v in environ.items():
docker_args += ['-e', '%s=%s' % (k, v)]
docker_env = {'DOCKERFILE_DIR': dockerfile_dir,
'DOCKER_RUN_SCRIPT': 'tools/run_tests/dockerize/docker_run.sh'}
jobspec = jobset.JobSpec(
cmdline=['tools/run_tests/dockerize/build_and_run_docker.sh'] + docker_args,
environ=docker_env,
shortname='distribtest.%s' % (name),
timeout_seconds=30*60,
flake_retries=flake_retries,
timeout_retries=timeout_retries)
return jobspec
def create_jobspec(name, cmdline, environ=None, shell=False,
flake_retries=0, timeout_retries=0):
"""Creates jobspec."""
jobspec = jobset.JobSpec(
cmdline=cmdline,
environ=environ,
shortname='distribtest.%s' % (name),
timeout_seconds=10*60,
flake_retries=flake_retries,
timeout_retries=timeout_retries,
shell=shell)
return jobspec
class CSharpDistribTest(object):
"""Tests C# NuGet package"""
def __init__(self, platform, arch, docker_suffix=None, use_dotnet_cli=False):
self.name = 'csharp_nuget_%s_%s' % (platform, arch)
self.platform = platform
self.arch = arch
self.docker_suffix = docker_suffix
self.labels = ['distribtest', 'csharp', platform, arch]
self.script_suffix = ''
if docker_suffix:
self.name += '_%s' % docker_suffix
self.labels.append(docker_suffix)
if use_dotnet_cli:
self.name += '_dotnetcli'
self.script_suffix = '_dotnetcli'
self.labels.append('dotnetcli')
else:
self.labels.append('olddotnet')
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
if self.platform == 'linux':
return create_docker_jobspec(self.name,
'tools/dockerfile/distribtest/csharp_%s_%s' % (
self.docker_suffix,
self.arch),
'test/distrib/csharp/run_distrib_test%s.sh' % self.script_suffix)
elif self.platform == 'macos':
return create_jobspec(self.name,
['test/distrib/csharp/run_distrib_test%s.sh' % self.script_suffix],
environ={'EXTERNAL_GIT_ROOT': '../../..'})
elif self.platform == 'windows':
if self.arch == 'x64':
environ={'MSBUILD_EXTRA_ARGS': '/p:Platform=x64',
'DISTRIBTEST_OUTPATH': 'DistribTest\\bin\\x64\\Debug'}
else:
environ={'DISTRIBTEST_OUTPATH': 'DistribTest\\bin\\\Debug'}
return create_jobspec(self.name,
['test\\distrib\\csharp\\run_distrib_test%s.bat' % self.script_suffix],
environ=environ)
else:
raise Exception("Not supported yet.")
def __str__(self):
return self.name
class NodeDistribTest(object):
"""Tests Node package"""
def __init__(self, platform, arch, docker_suffix, node_version):
self.name = 'node_npm_%s_%s_%s' % (platform, arch, node_version)
self.platform = platform
self.arch = arch
self.node_version = node_version
self.labels = ['distribtest', 'node', platform, arch,
'node-%s' % node_version]
if docker_suffix is not None:
self.name += '_%s' % docker_suffix
self.docker_suffix = docker_suffix
self.labels.append(docker_suffix)
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
if self.platform == 'linux':
linux32 = ''
if self.arch == 'x86':
linux32 = 'linux32'
return create_docker_jobspec(self.name,
'tools/dockerfile/distribtest/node_%s_%s' % (
self.docker_suffix,
self.arch),
'%s test/distrib/node/run_distrib_test.sh %s' % (
linux32,
self.node_version))
elif self.platform == 'macos':
return create_jobspec(self.name,
['test/distrib/node/run_distrib_test.sh',
str(self.node_version)],
environ={'EXTERNAL_GIT_ROOT': '../../..'})
else:
raise Exception("Not supported yet.")
def __str__(self):
return self.name
class PythonDistribTest(object):
"""Tests Python package"""
def __init__(self, platform, arch, docker_suffix):
self.name = 'python_%s_%s_%s' % (platform, arch, docker_suffix)
self.platform = platform
self.arch = arch
self.docker_suffix = docker_suffix
self.labels = ['distribtest', 'python', platform, arch, docker_suffix]
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
if not self.platform == 'linux':
raise Exception("Not supported yet.")
return create_docker_jobspec(self.name,
'tools/dockerfile/distribtest/python_%s_%s' % (
self.docker_suffix,
self.arch),
'test/distrib/python/run_distrib_test.sh')
def __str__(self):
return self.name
class RubyDistribTest(object):
"""Tests Ruby package"""
def __init__(self, platform, arch, docker_suffix):
self.name = 'ruby_%s_%s_%s' % (platform, arch, docker_suffix)
self.platform = platform
self.arch = arch
self.docker_suffix = docker_suffix
self.labels = ['distribtest', 'ruby', platform, arch, docker_suffix]
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
if not self.platform == 'linux':
raise Exception("Not supported yet.")
return create_docker_jobspec(self.name,
'tools/dockerfile/distribtest/ruby_%s_%s' % (
self.docker_suffix,
self.arch),
'test/distrib/ruby/run_distrib_test.sh')
def __str__(self):
return self.name
class PHPDistribTest(object):
"""Tests PHP package"""
def __init__(self, platform, arch, docker_suffix=None):
self.name = 'php_%s_%s_%s' % (platform, arch, docker_suffix)
self.platform = platform
self.arch = arch
self.docker_suffix = docker_suffix
self.labels = ['distribtest', 'php', platform, arch, docker_suffix]
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
if self.platform == 'linux':
return create_docker_jobspec(self.name,
'tools/dockerfile/distribtest/php_%s_%s' % (
self.docker_suffix,
self.arch),
'test/distrib/php/run_distrib_test.sh')
elif self.platform == 'macos':
return create_jobspec(self.name,
['test/distrib/php/run_distrib_test.sh'],
environ={'EXTERNAL_GIT_ROOT': '../../..'})
else:
raise Exception("Not supported yet.")
def __str__(self):
return self.name
class CppDistribTest(object):
"""Tests Cpp make intall by building examples."""
def __init__(self, platform, arch, docker_suffix=None):
self.name = 'cpp_%s_%s_%s' % (platform, arch, docker_suffix)
self.platform = platform
self.arch = arch
self.docker_suffix = docker_suffix
self.labels = ['distribtest', 'cpp', platform, arch, docker_suffix]
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
if self.platform == 'linux':
return create_docker_jobspec(self.name,
'tools/dockerfile/distribtest/cpp_%s_%s' % (
self.docker_suffix,
self.arch),
'test/distrib/cpp/run_distrib_test.sh')
else:
raise Exception("Not supported yet.")
def __str__(self):
return self.name
def targets():
"""Gets list of supported targets"""
return [CppDistribTest('linux', 'x64', 'jessie'),
CSharpDistribTest('linux', 'x64', 'wheezy'),
CSharpDistribTest('linux', 'x64', 'jessie'),
CSharpDistribTest('linux', 'x86', 'jessie'),
CSharpDistribTest('linux', 'x64', 'centos7'),
CSharpDistribTest('linux', 'x64', 'ubuntu1404'),
CSharpDistribTest('linux', 'x64', 'ubuntu1504'),
CSharpDistribTest('linux', 'x64', 'ubuntu1510'),
CSharpDistribTest('linux', 'x64', 'ubuntu1604'),
CSharpDistribTest('linux', 'x64', 'ubuntu1404', use_dotnet_cli=True),
CSharpDistribTest('macos', 'x86'),
CSharpDistribTest('windows', 'x86'),
CSharpDistribTest('windows', 'x64'),
PythonDistribTest('linux', 'x64', 'wheezy'),
PythonDistribTest('linux', 'x64', 'jessie'),
PythonDistribTest('linux', 'x86', 'jessie'),
PythonDistribTest('linux', 'x64', 'centos6'),
PythonDistribTest('linux', 'x64', 'centos7'),
PythonDistribTest('linux', 'x64', 'fedora20'),
PythonDistribTest('linux', 'x64', 'fedora21'),
PythonDistribTest('linux', 'x64', 'fedora22'),
PythonDistribTest('linux', 'x64', 'fedora23'),
PythonDistribTest('linux', 'x64', 'opensuse'),
PythonDistribTest('linux', 'x64', 'arch'),
PythonDistribTest('linux', 'x64', 'ubuntu1204'),
PythonDistribTest('linux', 'x64', 'ubuntu1404'),
PythonDistribTest('linux', 'x64', 'ubuntu1504'),
PythonDistribTest('linux', 'x64', 'ubuntu1510'),
PythonDistribTest('linux', 'x64', 'ubuntu1604'),
RubyDistribTest('linux', 'x64', 'wheezy'),
RubyDistribTest('linux', 'x64', 'jessie'),
RubyDistribTest('linux', 'x86', 'jessie'),
RubyDistribTest('linux', 'x64', 'centos6'),
RubyDistribTest('linux', 'x64', 'centos7'),
RubyDistribTest('linux', 'x64', 'fedora20'),
RubyDistribTest('linux', 'x64', 'fedora21'),
RubyDistribTest('linux', 'x64', 'fedora22'),
RubyDistribTest('linux', 'x64', 'fedora23'),
RubyDistribTest('linux', 'x64', 'opensuse'),
RubyDistribTest('linux', 'x64', 'ubuntu1204'),
RubyDistribTest('linux', 'x64', 'ubuntu1404'),
RubyDistribTest('linux', 'x64', 'ubuntu1504'),
RubyDistribTest('linux', 'x64', 'ubuntu1510'),
RubyDistribTest('linux', 'x64', 'ubuntu1604'),
NodeDistribTest('macos', 'x64', None, '4'),
NodeDistribTest('macos', 'x64', None, '5'),
NodeDistribTest('linux', 'x86', 'jessie', '4'),
PHPDistribTest('linux', 'x64', 'jessie'),
PHPDistribTest('macos', 'x64'),
] + [
NodeDistribTest('linux', 'x64', os, version)
for os in ('wheezy', 'jessie', 'ubuntu1204', 'ubuntu1404',
'ubuntu1504', 'ubuntu1510', 'ubuntu1604')
for version in ('0.12', '3', '4', '5')
]
| bsd-3-clause | 1,437,540,140,894,283,300 | 37.086567 | 86 | 0.600831 | false |
josh-willis/pycbc | tools/einsteinathome/check_GW150914_detection.py | 9 | 1290 | # Read a pycbc_inspiral HDF5 trigger file and check that it contains triggers
# compatible with GW150914
# 2016 Tito Dal Canton
import sys
import h5py
import numpy as np
# GW150914 params from my run
# https://www.atlas.aei.uni-hannover.de/~tito/LSC/er8/er8b_c00_1.2.0_run1
gw150914_time = 1126259462.4
gw150914_snr = {'H1': 19.71, 'L1': 13.28}
gw150914_chi2r = {'H1': 1.05, 'L1': 0.45}
f = h5py.File(sys.argv[1], 'r')
detector = tuple(f.keys())[0]
end_times = f[detector]['end_time'][:]
snrs = f[detector]['snr'][:]
chi2rs = f[detector]['chisq'][:] / (2 * f[detector]['chisq_dof'][:] - 2)
# search for trigs compatible with GW150914
mask = np.logical_and.reduce([abs(end_times - gw150914_time) < 0.1,
snrs > 0.8 * gw150914_snr[detector],
snrs < 1.2 * gw150914_snr[detector],
chi2rs > 0.8 * gw150914_chi2r[detector],
chi2rs < 1.2 * gw150914_chi2r[detector]])
if mask.any():
print('Pass: %d GW150914-like triggers' % sum(mask))
print('end_time snr reduced_chi2')
for t, s, c in zip(end_times[mask], snrs[mask], chi2rs[mask]):
print('%.3f %.3f %.3f' % (t, s, c))
sys.exit(0)
else:
print('Fail: no GW150914-like triggers')
sys.exit(1)
| gpl-3.0 | -299,593,791,056,402,200 | 33.864865 | 77 | 0.593798 | false |
Eksmo/calibre | src/calibre/gui2/dialogs/restore_library.py | 4 | 4621 | #!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
__license__ = 'GPL v3'
__copyright__ = '2011, Kovid Goyal <[email protected]>'
__docformat__ = 'restructuredtext en'
from PyQt4.Qt import (QDialog, QLabel, QVBoxLayout, QDialogButtonBox,
QProgressBar, QSize, QTimer, pyqtSignal, Qt)
from calibre.library.restore import Restore
from calibre.gui2 import (error_dialog, question_dialog, warning_dialog,
info_dialog)
from calibre import force_unicode
from calibre.constants import filesystem_encoding
class DBRestore(QDialog):
update_signal = pyqtSignal(object, object)
def __init__(self, parent, library_path):
QDialog.__init__(self, parent)
self.l = QVBoxLayout()
self.setLayout(self.l)
self.l1 = QLabel('<b>'+_('Restoring database from backups, do not'
' interrupt, this will happen in three stages')+'...')
self.setWindowTitle(_('Restoring database'))
self.l.addWidget(self.l1)
self.pb = QProgressBar(self)
self.l.addWidget(self.pb)
self.pb.setMaximum(0)
self.pb.setMinimum(0)
self.msg = QLabel('')
self.l.addWidget(self.msg)
self.msg.setWordWrap(True)
self.bb = QDialogButtonBox(QDialogButtonBox.Cancel)
self.l.addWidget(self.bb)
self.bb.rejected.connect(self.reject)
self.resize(self.sizeHint() + QSize(100, 50))
self.error = None
self.rejected = False
self.library_path = library_path
self.update_signal.connect(self.do_update, type=Qt.QueuedConnection)
self.restorer = Restore(library_path, self)
self.restorer.daemon = True
# Give the metadata backup thread time to stop
QTimer.singleShot(2000, self.start)
def start(self):
self.restorer.start()
QTimer.singleShot(10, self.update)
def reject(self):
self.rejected = True
self.restorer.progress_callback = lambda x, y: x
QDialog.reject(self)
def update(self):
if self.restorer.is_alive():
QTimer.singleShot(10, self.update)
else:
self.restorer.progress_callback = lambda x, y: x
self.accept()
def __call__(self, msg, step):
self.update_signal.emit(msg, step)
def do_update(self, msg, step):
if msg is None:
self.pb.setMaximum(step)
else:
self.msg.setText(msg)
self.pb.setValue(step)
def _show_success_msg(restorer, parent=None):
r = restorer
olddb = _('The old database was saved as: %s')%force_unicode(r.olddb,
filesystem_encoding)
if r.errors_occurred:
warning_dialog(parent, _('Success'),
_('Restoring the database succeeded with some warnings'
' click Show details to see the details. %s')%olddb,
det_msg=r.report, show=True)
else:
info_dialog(parent, _('Success'),
_('Restoring database was successful. %s')%olddb, show=True,
show_copy_button=False)
def restore_database(db, parent=None):
if not question_dialog(parent, _('Are you sure?'), '<p>'+
_('Your list of books, with all their metadata is '
'stored in a single file, called a database. '
'In addition, metadata for each individual '
'book is stored in that books\' folder, as '
'a backup.'
'<p>This operation will rebuild '
'the database from the individual book '
'metadata. This is useful if the '
'database has been corrupted and you get a '
'blank list of books.'
'<p>Do you want to restore the database?')):
return False
db.conn.close()
d = DBRestore(parent, db.library_path)
d.exec_()
r = d.restorer
d.restorer = None
if d.rejected:
return True
if r.tb is not None:
error_dialog(parent, _('Failed'),
_('Restoring database failed, click Show details to see details'),
det_msg=r.tb, show=True)
else:
_show_success_msg(r, parent=parent)
return True
def repair_library_at(library_path, parent=None):
d = DBRestore(parent, library_path)
d.exec_()
if d.rejected:
return False
r = d.restorer
if r.tb is not None:
error_dialog(parent, _('Failed'),
_('Restoring database failed, click Show details to see details'),
det_msg=r.tb, show=True)
return False
_show_success_msg(r, parent=parent)
return True
| gpl-3.0 | 4,900,813,173,412,322,000 | 33.22963 | 76 | 0.602034 | false |
djwbrown/swift | benchmark/scripts/compare_perf_tests.py | 10 | 16354 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# ===--- compare_perf_tests.py -------------------------------------------===//
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ===---------------------------------------------------------------------===//
from __future__ import print_function
import argparse
import csv
import sys
from math import sqrt
class PerformanceTestResult(object):
"""PerformanceTestResult holds results from executing an individual
benchmark from the Swift Benchmark Suite as reported by the test driver
(Benchmark_O, Benchmark_Onone, Benchmark_Ounchecked or Benchmark_Driver).
It depends on the log format emitted by the test driver in the form:
#,TEST,SAMPLES,MIN(μs),MAX(μs),MEAN(μs),SD(μs),MEDIAN(μs),MAX_RSS(B)
The last column, MAX_RSS, is emitted only for runs instrumented by the
Benchmark_Driver to measure rough memory use during the execution of the
benchmark.
"""
def __init__(self, csv_row):
"""PerformanceTestResult instance is created from an iterable with
length of 8 or 9. (Like a row provided by the CSV parser.)
"""
# csv_row[0] is just an ordinal number of the test - skip that
self.name = csv_row[1] # Name of the performance test
self.samples = int(csv_row[2]) # Number of measurement samples taken
self.min = int(csv_row[3]) # Minimum runtime (ms)
self.max = int(csv_row[4]) # Maximum runtime (ms)
self.mean = int(csv_row[5]) # Mean (average) runtime (ms)
sd = int(csv_row[6]) # Standard Deviation (ms)
# For computing running variance
self.S_runtime = (0 if self.samples < 2 else
(sd * sd) * (self.samples - 1))
self.median = int(csv_row[7]) # Median runtime (ms)
self.max_rss = ( # Maximum Resident Set Size (B)
int(csv_row[8]) if len(csv_row) > 8 else None)
def __repr__(self):
return (
'<PerformanceTestResult name:{0.name!r} '
'samples:{0.samples!r} min:{0.min!r} max:{0.max!r} '
'mean:{0.mean!r} sd:{0.sd!r} median:{0.median!r}>'.format(self))
@property
def sd(self):
"""Standard Deviation (ms)"""
return (0 if self.samples < 2 else
sqrt(self.S_runtime / (self.samples - 1)))
@staticmethod
def running_mean_variance((k, M_, S_), x):
"""
Compute running variance, B. P. Welford's method
See Knuth TAOCP vol 2, 3rd edition, page 232, or
https://www.johndcook.com/blog/standard_deviation/
M is mean, Standard Deviation is defined as sqrt(S/k-1)
"""
k = float(k + 1)
M = M_ + (x - M_) / k
S = S_ + (x - M_) * (x - M)
return (k, M, S)
def merge(self, r):
"""Merging test results recomputes min and max.
It attempts to recompute mean and standard deviation when all_samples
are available. There is no correct way to compute these values from
test results that are summaries from more than 3 samples.
The use case here is comparing tests results parsed from concatenated
log files from multiple runs of benchmark driver.
"""
self.min = min(self.min, r.min)
self.max = max(self.max, r.max)
# self.median = None # unclear what to do here
def push(x):
state = (self.samples, self.mean, self.S_runtime)
state = self.running_mean_variance(state, x)
(self.samples, self.mean, self.S_runtime) = state
# Merging test results with up to 3 samples is exact
values = [r.min, r.max, r.median][:min(r.samples, 3)]
map(push, values)
# Column labels for header row in results table
header = ('TEST', 'MIN', 'MAX', 'MEAN', 'MAX_RSS')
def values(self):
"""Values property for display in results table comparisons
in format: ('TEST', 'MIN', 'MAX', 'MEAN', 'MAX_RSS').
"""
return (
self.name,
str(self.min), str(self.max), str(int(self.mean)),
str(self.max_rss) if self.max_rss else '—'
)
class ResultComparison(object):
"""ResultComparison compares MINs from new and old PerformanceTestResult.
It computes speedup ratio and improvement delta (%).
"""
def __init__(self, old, new):
self.old = old
self.new = new
assert(old.name == new.name)
self.name = old.name # Test name, convenience accessor
# Speedup ratio
self.ratio = (old.min + 0.001) / (new.min + 0.001)
# Test runtime improvement in %
ratio = (new.min + 0.001) / (old.min + 0.001)
self.delta = ((ratio - 1) * 100)
# Add ' (?)' to the speedup column as indication of dubious changes:
# result's MIN falls inside the (MIN, MAX) interval of result they are
# being compared with.
self.is_dubious = (
' (?)' if ((old.min < new.min and new.min < old.max) or
(new.min < old.min and old.min < new.max))
else '')
# Column labels for header row in results table
header = ('TEST', 'OLD', 'NEW', 'DELTA', 'SPEEDUP')
def values(self):
"""Values property for display in results table comparisons
in format: ('TEST', 'OLD', 'NEW', 'DELTA', 'SPEEDUP').
"""
return (self.name,
str(self.old.min), str(self.new.min),
'{0:+.1f}%'.format(self.delta),
'{0:.2f}x{1}'.format(self.ratio, self.is_dubious))
class TestComparator(object):
"""TestComparator parses `PerformanceTestResult`s from CSV log files.
Then it determines which tests were `added`, `removed` and which can be
compared. It then splits the `ResultComparison`s into 3 groups according to
the `delta_threshold` by the change in performance: `increased`,
`descreased` and `unchanged`.
The lists of `added`, `removed` and `unchanged` tests are sorted
alphabetically. The `increased` and `decreased` lists are sorted in
descending order by the amount of change.
"""
def __init__(self, old_file, new_file, delta_threshold):
def load_from_CSV(filename): # handles output from Benchmark_O and
def skip_totals(row): # Benchmark_Driver (added MAX_RSS column)
return len(row) > 7 and row[0].isdigit()
tests = map(PerformanceTestResult,
filter(skip_totals, csv.reader(open(filename))))
def add_or_merge(names, r):
if r.name not in names:
names[r.name] = r
else:
names[r.name].merge(r)
return names
return reduce(add_or_merge, tests, dict())
old_results = load_from_CSV(old_file)
new_results = load_from_CSV(new_file)
old_tests = set(old_results.keys())
new_tests = set(new_results.keys())
comparable_tests = new_tests.intersection(old_tests)
added_tests = new_tests.difference(old_tests)
removed_tests = old_tests.difference(new_tests)
self.added = sorted([new_results[t] for t in added_tests],
key=lambda r: r.name)
self.removed = sorted([old_results[t] for t in removed_tests],
key=lambda r: r.name)
def compare(name):
return ResultComparison(old_results[name], new_results[name])
comparisons = map(compare, comparable_tests)
def partition(l, p):
return reduce(lambda x, y: x[not p(y)].append(y) or x, l, ([], []))
decreased, not_decreased = partition(
comparisons, lambda c: c.ratio < (1 - delta_threshold))
increased, unchanged = partition(
not_decreased, lambda c: c.ratio > (1 + delta_threshold))
# sorted partitions
names = [c.name for c in comparisons]
comparisons = dict(zip(names, comparisons))
self.decreased = [comparisons[c.name]
for c in sorted(decreased, key=lambda c: -c.delta)]
self.increased = [comparisons[c.name]
for c in sorted(increased, key=lambda c: c.delta)]
self.unchanged = [comparisons[c.name]
for c in sorted(unchanged, key=lambda c: c.name)]
class ReportFormatter(object):
"""ReportFormatter formats the `PerformanceTestResult`s and
`ResultComparison`s provided by `TestComparator` using their `header` and
`values()` into report table. Supported formats are: `markdown` (used for
displaying benchmark results on GitHub), `git` and `html`.
"""
def __init__(self, comparator, old_branch, new_branch, changes_only):
self.comparator = comparator
self.old_branch = old_branch
self.new_branch = new_branch
self.changes_only = changes_only
MARKDOWN_DETAIL = """
<details {3}>
<summary>{0} ({1})</summary>
{2}
</details>
"""
GIT_DETAIL = """
{0} ({1}): {2}"""
def markdown(self):
return self._formatted_text(
ROW='{0} | {1} | {2} | {3} | {4} \n',
HEADER_SEPARATOR='---',
DETAIL=self.MARKDOWN_DETAIL)
def git(self):
return self._formatted_text(
ROW='{0} {1} {2} {3} {4} \n',
HEADER_SEPARATOR=' ',
DETAIL=self.GIT_DETAIL)
def _column_widths(self):
changed = self.comparator.decreased + self.comparator.increased
comparisons = (changed if self.changes_only else
changed + self.comparator.unchanged)
comparisons += self.comparator.added + self.comparator.removed
widths = [
map(len, columns) for columns in
[PerformanceTestResult.header, ResultComparison.header] +
[c.values() for c in comparisons]
]
def max_widths(maximum, widths):
return tuple(map(max, zip(maximum, widths)))
return reduce(max_widths, widths, tuple([0] * 5))
def _formatted_text(self, ROW, HEADER_SEPARATOR, DETAIL):
widths = self._column_widths()
def justify_columns(contents):
return tuple([c.ljust(w) for w, c in zip(widths, contents)])
def row(contents):
return ROW.format(*justify_columns(contents))
def header(header):
return '\n' + row(header) + row(tuple([HEADER_SEPARATOR] * 5))
def format_columns(r, strong):
return (r if not strong else
r[:-1] + ('**{0}**'.format(r[-1]), ))
def table(title, results, is_strong=False, is_open=False):
rows = [
row(format_columns(result_comparison.values(), is_strong))
for result_comparison in results
]
return ('' if not rows else
DETAIL.format(*[
title, len(results),
(header(results[0].header) + ''.join(rows)),
('open' if is_open else '')
]))
return ''.join([
# FIXME print self.old_branch, self.new_branch
table('Regression', self.comparator.decreased, True, True),
table('Improvement', self.comparator.increased, True),
('' if self.changes_only else
table('No Changes', self.comparator.unchanged)),
table('Added', self.comparator.added, is_open=True),
table('Removed', self.comparator.removed, is_open=True)
])
HTML = """
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<style>
body {{ font-family: -apple-system, sans-serif; font-size: 14px; }}
table {{ border-spacing: 2px; border-color: gray; border-spacing: 0;
border-collapse: collapse; }}
table tr {{ background-color: #fff; border-top: 1px solid #c6cbd1; }}
table th, table td {{ padding: 6px 13px; border: 1px solid #dfe2e5; }}
th {{ text-align: center; padding-top: 130px; }}
td {{ text-align: right; }}
table td:first-child {{ text-align: left; }}
tr:nth-child(even) {{ background-color: #000000; }}
tr:nth-child(2n) {{ background-color: #f6f8fa; }}
</style>
</head>
<body>
<table>
{0}
</table>
</body>
</html>"""
HTML_HEADER_ROW = """
<tr>
<th align='left'>{0} ({1})</th>
<th align='left'>{2}</th>
<th align='left'>{3}</th>
<th align='left'>{4}</th>
<th align='left'>{5}</th>
</tr>
"""
HTML_ROW = """
<tr>
<td align='left'>{0}</td>
<td align='left'>{1}</td>
<td align='left'>{2}</td>
<td align='left'>{3}</td>
<td align='left'><font color='{4}'>{5}</font></td>
</tr>
"""
def html(self):
def row(name, old, new, delta, speedup, speedup_color):
return self.HTML_ROW.format(
name, old, new, delta, speedup_color, speedup)
def header(contents):
return self.HTML_HEADER_ROW.format(* contents)
def table(title, results, speedup_color):
rows = [
row(*(result_comparison.values() + (speedup_color,)))
for result_comparison in results
]
return ('' if not rows else
header((title, len(results)) + results[0].header[1:]) +
''.join(rows))
return self.HTML.format(
''.join([
# FIXME print self.old_branch, self.new_branch
table('Regression', self.comparator.decreased, 'red'),
table('Improvement', self.comparator.increased, 'green'),
('' if self.changes_only else
table('No Changes', self.comparator.unchanged, 'black')),
table('Added', self.comparator.added, ''),
table('Removed', self.comparator.removed, '')
]))
def parse_args(args):
"""Parse command line arguments and set default values."""
parser = argparse.ArgumentParser(description='Compare Performance tests.')
parser.add_argument('--old-file',
help='Baseline performance test suite (csv file)',
required=True)
parser.add_argument('--new-file',
help='New performance test suite (csv file)',
required=True)
parser.add_argument('--format',
choices=['markdown', 'git', 'html'],
help='Output format. Default is markdown.',
default="markdown")
parser.add_argument('--output', help='Output file name')
parser.add_argument('--changes-only',
help='Output only affected tests', action='store_true')
parser.add_argument('--new-branch',
help='Name of the new branch', default='NEW_MIN')
parser.add_argument('--old-branch',
help='Name of the old branch', default='OLD_MIN')
parser.add_argument('--delta-threshold',
help='Delta threshold. Default 0.05.',
type=float, default=0.05)
return parser.parse_args(args)
def main():
args = parse_args(sys.argv[1:])
comparator = TestComparator(args.old_file, args.new_file,
args.delta_threshold)
formatter = ReportFormatter(comparator, args.old_branch, args.new_branch,
args.changes_only)
formats = {
'markdown': formatter.markdown,
'git': formatter.git,
'html': formatter.html
}
report = formats[args.format]()
print(report)
if args.output:
with open(args.output, 'w') as f:
f.write(report)
if __name__ == '__main__':
sys.exit(main())
| apache-2.0 | -5,498,540,851,723,611,000 | 37.283372 | 79 | 0.556983 | false |
BIDS-collaborative/cega-trace | q.py | 2 | 2030 | # -*- coding: utf-8 -*-
# import requests
# import urllib
import re
import urllib
import json
import sys
def search_doi(s):
url = "http://search.crossref.org/?q=" + convert_string(s)
htmlfile = urllib.urlopen(url)
htmltext = htmlfile.read()
regex ="href='http://dx.doi.org/"+ '(.*)' + "'>"
pattern = re.compile(regex)
match = re.search(pattern, htmltext)
if match:
doi = match.group(1)
else:
print('did not found')
return None
print(doi)
return doi
def convert_string(s):
replaced = re.sub(',', '%2C' , s)
replaced = re.sub(';', '%3B', replaced)
replaced = re.sub(' ', '+', replaced)
replaced = re.sub(':', '%3A' , replaced)
replaced = re.sub('/', '%2F', replaced)
replaced = re.sub('&', '%26' , replaced)
replaced = re.sub(r'\(', '%28', replaced)
replaced = re.sub(r'\)', '%29', replaced)
return replaced
# s = 'Benjamin, D., & Brandt, L.2002.Property rights, labour markets, and efficiency in a transition economy: the case of rural China.Canadian Journal of Economics, 35(4), 689-716.'
# search_doi(s)
# def check_matching(doi_citation, doi_title):
# if doi_citation == doi_title:
# return doi_title
# else:
# url_citation = "http://api.crossref.org/works/" + doi_citation
# url_title = "http://api.crossref.org/works/" + doi_title
# htmlfile_citation = urllib.urlopen(url_citation)
# htmlfile_title = urllib.urlopen(url_citation)
# htmltext_citation = htmlfile_citation.read()
# htmltext_titel = htmlfile_title.read()
def decode(parse_file):
with codecs.open(parse_file, 'r+', encoding='utf-8', errors='ignore') as txt_file:
txt = txt_file.readlines()
return txt
def main():
for i in range(2, 8): #range of files to run
name = (str(i) + '.txt')
data = decode(name) #decode if necessary
# data = open(name, 'r')
if data:
my_list = data
out = (str(i) + 'doi.txt')
outfile = open(out, 'w')
for line in my_list:
print(line)
doi = search_doi(line)
outfile.write(doi + '\n')
outfile.close()
if __name__ == '__main__':
main()
| bsd-2-clause | 5,924,794,721,406,216,000 | 26.432432 | 182 | 0.632512 | false |
nbargnesi/tornado | tornado/iostream.py | 38 | 64263 | #!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utility classes to write to and read from non-blocking files and sockets.
Contents:
* `BaseIOStream`: Generic interface for reading and writing.
* `IOStream`: Implementation of BaseIOStream using non-blocking sockets.
* `SSLIOStream`: SSL-aware version of IOStream.
* `PipeIOStream`: Pipe-based IOStream implementation.
"""
from __future__ import absolute_import, division, print_function, with_statement
import collections
import errno
import numbers
import os
import socket
import sys
import re
from tornado.concurrent import TracebackFuture
from tornado import ioloop
from tornado.log import gen_log, app_log
from tornado.netutil import ssl_wrap_socket, ssl_match_hostname, SSLCertificateError, _client_ssl_defaults, _server_ssl_defaults
from tornado import stack_context
from tornado.util import errno_from_exception
try:
from tornado.platform.posix import _set_nonblocking
except ImportError:
_set_nonblocking = None
try:
import ssl
except ImportError:
# ssl is not available on Google App Engine
ssl = None
# These errnos indicate that a non-blocking operation must be retried
# at a later time. On most platforms they're the same value, but on
# some they differ.
_ERRNO_WOULDBLOCK = (errno.EWOULDBLOCK, errno.EAGAIN)
if hasattr(errno, "WSAEWOULDBLOCK"):
_ERRNO_WOULDBLOCK += (errno.WSAEWOULDBLOCK,)
# These errnos indicate that a connection has been abruptly terminated.
# They should be caught and handled less noisily than other errors.
_ERRNO_CONNRESET = (errno.ECONNRESET, errno.ECONNABORTED, errno.EPIPE,
errno.ETIMEDOUT)
if hasattr(errno, "WSAECONNRESET"):
_ERRNO_CONNRESET += (errno.WSAECONNRESET, errno.WSAECONNABORTED, errno.WSAETIMEDOUT)
if sys.platform == 'darwin':
# OSX appears to have a race condition that causes send(2) to return
# EPROTOTYPE if called while a socket is being torn down:
# http://erickt.github.io/blog/2014/11/19/adventures-in-debugging-a-potential-osx-kernel-bug/
# Since the socket is being closed anyway, treat this as an ECONNRESET
# instead of an unexpected error.
_ERRNO_CONNRESET += (errno.EPROTOTYPE,)
# More non-portable errnos:
_ERRNO_INPROGRESS = (errno.EINPROGRESS,)
if hasattr(errno, "WSAEINPROGRESS"):
_ERRNO_INPROGRESS += (errno.WSAEINPROGRESS,)
class StreamClosedError(IOError):
"""Exception raised by `IOStream` methods when the stream is closed.
Note that the close callback is scheduled to run *after* other
callbacks on the stream (to allow for buffered data to be processed),
so you may see this error before you see the close callback.
"""
pass
class UnsatisfiableReadError(Exception):
"""Exception raised when a read cannot be satisfied.
Raised by ``read_until`` and ``read_until_regex`` with a ``max_bytes``
argument.
"""
pass
class StreamBufferFullError(Exception):
"""Exception raised by `IOStream` methods when the buffer is full.
"""
class BaseIOStream(object):
"""A utility class to write to and read from a non-blocking file or socket.
We support a non-blocking ``write()`` and a family of ``read_*()`` methods.
All of the methods take an optional ``callback`` argument and return a
`.Future` only if no callback is given. When the operation completes,
the callback will be run or the `.Future` will resolve with the data
read (or ``None`` for ``write()``). All outstanding ``Futures`` will
resolve with a `StreamClosedError` when the stream is closed; users
of the callback interface will be notified via
`.BaseIOStream.set_close_callback` instead.
When a stream is closed due to an error, the IOStream's ``error``
attribute contains the exception object.
Subclasses must implement `fileno`, `close_fd`, `write_to_fd`,
`read_from_fd`, and optionally `get_fd_error`.
"""
def __init__(self, io_loop=None, max_buffer_size=None,
read_chunk_size=None, max_write_buffer_size=None):
"""`BaseIOStream` constructor.
:arg io_loop: The `.IOLoop` to use; defaults to `.IOLoop.current`.
Deprecated since Tornado 4.1.
:arg max_buffer_size: Maximum amount of incoming data to buffer;
defaults to 100MB.
:arg read_chunk_size: Amount of data to read at one time from the
underlying transport; defaults to 64KB.
:arg max_write_buffer_size: Amount of outgoing data to buffer;
defaults to unlimited.
.. versionchanged:: 4.0
Add the ``max_write_buffer_size`` parameter. Changed default
``read_chunk_size`` to 64KB.
"""
self.io_loop = io_loop or ioloop.IOLoop.current()
self.max_buffer_size = max_buffer_size or 104857600
# A chunk size that is too close to max_buffer_size can cause
# spurious failures.
self.read_chunk_size = min(read_chunk_size or 65536,
self.max_buffer_size // 2)
self.max_write_buffer_size = max_write_buffer_size
self.error = None
self._read_buffer = collections.deque()
self._write_buffer = collections.deque()
self._read_buffer_size = 0
self._write_buffer_size = 0
self._write_buffer_frozen = False
self._read_delimiter = None
self._read_regex = None
self._read_max_bytes = None
self._read_bytes = None
self._read_partial = False
self._read_until_close = False
self._read_callback = None
self._read_future = None
self._streaming_callback = None
self._write_callback = None
self._write_future = None
self._close_callback = None
self._connect_callback = None
self._connect_future = None
# _ssl_connect_future should be defined in SSLIOStream
# but it's here so we can clean it up in maybe_run_close_callback.
# TODO: refactor that so subclasses can add additional futures
# to be cancelled.
self._ssl_connect_future = None
self._connecting = False
self._state = None
self._pending_callbacks = 0
self._closed = False
def fileno(self):
"""Returns the file descriptor for this stream."""
raise NotImplementedError()
def close_fd(self):
"""Closes the file underlying this stream.
``close_fd`` is called by `BaseIOStream` and should not be called
elsewhere; other users should call `close` instead.
"""
raise NotImplementedError()
def write_to_fd(self, data):
"""Attempts to write ``data`` to the underlying file.
Returns the number of bytes written.
"""
raise NotImplementedError()
def read_from_fd(self):
"""Attempts to read from the underlying file.
Returns ``None`` if there was nothing to read (the socket
returned `~errno.EWOULDBLOCK` or equivalent), otherwise
returns the data. When possible, should return no more than
``self.read_chunk_size`` bytes at a time.
"""
raise NotImplementedError()
def get_fd_error(self):
"""Returns information about any error on the underlying file.
This method is called after the `.IOLoop` has signaled an error on the
file descriptor, and should return an Exception (such as `socket.error`
with additional information, or None if no such information is
available.
"""
return None
def read_until_regex(self, regex, callback=None, max_bytes=None):
"""Asynchronously read until we have matched the given regex.
The result includes the data that matches the regex and anything
that came before it. If a callback is given, it will be run
with the data as an argument; if not, this method returns a
`.Future`.
If ``max_bytes`` is not None, the connection will be closed
if more than ``max_bytes`` bytes have been read and the regex is
not satisfied.
.. versionchanged:: 4.0
Added the ``max_bytes`` argument. The ``callback`` argument is
now optional and a `.Future` will be returned if it is omitted.
"""
future = self._set_read_callback(callback)
self._read_regex = re.compile(regex)
self._read_max_bytes = max_bytes
try:
self._try_inline_read()
except UnsatisfiableReadError as e:
# Handle this the same way as in _handle_events.
gen_log.info("Unsatisfiable read, closing connection: %s" % e)
self.close(exc_info=True)
return future
except:
if future is not None:
# Ensure that the future doesn't log an error because its
# failure was never examined.
future.add_done_callback(lambda f: f.exception())
raise
return future
def read_until(self, delimiter, callback=None, max_bytes=None):
"""Asynchronously read until we have found the given delimiter.
The result includes all the data read including the delimiter.
If a callback is given, it will be run with the data as an argument;
if not, this method returns a `.Future`.
If ``max_bytes`` is not None, the connection will be closed
if more than ``max_bytes`` bytes have been read and the delimiter
is not found.
.. versionchanged:: 4.0
Added the ``max_bytes`` argument. The ``callback`` argument is
now optional and a `.Future` will be returned if it is omitted.
"""
future = self._set_read_callback(callback)
self._read_delimiter = delimiter
self._read_max_bytes = max_bytes
try:
self._try_inline_read()
except UnsatisfiableReadError as e:
# Handle this the same way as in _handle_events.
gen_log.info("Unsatisfiable read, closing connection: %s" % e)
self.close(exc_info=True)
return future
except:
if future is not None:
future.add_done_callback(lambda f: f.exception())
raise
return future
def read_bytes(self, num_bytes, callback=None, streaming_callback=None,
partial=False):
"""Asynchronously read a number of bytes.
If a ``streaming_callback`` is given, it will be called with chunks
of data as they become available, and the final result will be empty.
Otherwise, the result is all the data that was read.
If a callback is given, it will be run with the data as an argument;
if not, this method returns a `.Future`.
If ``partial`` is true, the callback is run as soon as we have
any bytes to return (but never more than ``num_bytes``)
.. versionchanged:: 4.0
Added the ``partial`` argument. The callback argument is now
optional and a `.Future` will be returned if it is omitted.
"""
future = self._set_read_callback(callback)
assert isinstance(num_bytes, numbers.Integral)
self._read_bytes = num_bytes
self._read_partial = partial
self._streaming_callback = stack_context.wrap(streaming_callback)
try:
self._try_inline_read()
except:
if future is not None:
future.add_done_callback(lambda f: f.exception())
raise
return future
def read_until_close(self, callback=None, streaming_callback=None):
"""Asynchronously reads all data from the socket until it is closed.
If a ``streaming_callback`` is given, it will be called with chunks
of data as they become available, and the final result will be empty.
Otherwise, the result is all the data that was read.
If a callback is given, it will be run with the data as an argument;
if not, this method returns a `.Future`.
Note that if a ``streaming_callback`` is used, data will be
read from the socket as quickly as it becomes available; there
is no way to apply backpressure or cancel the reads. If flow
control or cancellation are desired, use a loop with
`read_bytes(partial=True) <.read_bytes>` instead.
.. versionchanged:: 4.0
The callback argument is now optional and a `.Future` will
be returned if it is omitted.
"""
future = self._set_read_callback(callback)
self._streaming_callback = stack_context.wrap(streaming_callback)
if self.closed():
if self._streaming_callback is not None:
self._run_read_callback(self._read_buffer_size, True)
self._run_read_callback(self._read_buffer_size, False)
return future
self._read_until_close = True
try:
self._try_inline_read()
except:
future.add_done_callback(lambda f: f.exception())
raise
return future
def write(self, data, callback=None):
"""Asynchronously write the given data to this stream.
If ``callback`` is given, we call it when all of the buffered write
data has been successfully written to the stream. If there was
previously buffered write data and an old write callback, that
callback is simply overwritten with this new callback.
If no ``callback`` is given, this method returns a `.Future` that
resolves (with a result of ``None``) when the write has been
completed. If `write` is called again before that `.Future` has
resolved, the previous future will be orphaned and will never resolve.
.. versionchanged:: 4.0
Now returns a `.Future` if no callback is given.
"""
assert isinstance(data, bytes)
self._check_closed()
# We use bool(_write_buffer) as a proxy for write_buffer_size>0,
# so never put empty strings in the buffer.
if data:
if (self.max_write_buffer_size is not None and
self._write_buffer_size + len(data) > self.max_write_buffer_size):
raise StreamBufferFullError("Reached maximum write buffer size")
# Break up large contiguous strings before inserting them in the
# write buffer, so we don't have to recopy the entire thing
# as we slice off pieces to send to the socket.
WRITE_BUFFER_CHUNK_SIZE = 128 * 1024
for i in range(0, len(data), WRITE_BUFFER_CHUNK_SIZE):
self._write_buffer.append(data[i:i + WRITE_BUFFER_CHUNK_SIZE])
self._write_buffer_size += len(data)
if callback is not None:
self._write_callback = stack_context.wrap(callback)
future = None
else:
future = self._write_future = TracebackFuture()
future.add_done_callback(lambda f: f.exception())
if not self._connecting:
self._handle_write()
if self._write_buffer:
self._add_io_state(self.io_loop.WRITE)
self._maybe_add_error_listener()
return future
def set_close_callback(self, callback):
"""Call the given callback when the stream is closed.
This is not necessary for applications that use the `.Future`
interface; all outstanding ``Futures`` will resolve with a
`StreamClosedError` when the stream is closed.
"""
self._close_callback = stack_context.wrap(callback)
self._maybe_add_error_listener()
def close(self, exc_info=False):
"""Close this stream.
If ``exc_info`` is true, set the ``error`` attribute to the current
exception from `sys.exc_info` (or if ``exc_info`` is a tuple,
use that instead of `sys.exc_info`).
"""
if not self.closed():
if exc_info:
if not isinstance(exc_info, tuple):
exc_info = sys.exc_info()
if any(exc_info):
self.error = exc_info[1]
if self._read_until_close:
if (self._streaming_callback is not None and
self._read_buffer_size):
self._run_read_callback(self._read_buffer_size, True)
self._read_until_close = False
self._run_read_callback(self._read_buffer_size, False)
if self._state is not None:
self.io_loop.remove_handler(self.fileno())
self._state = None
self.close_fd()
self._closed = True
self._maybe_run_close_callback()
def _maybe_run_close_callback(self):
# If there are pending callbacks, don't run the close callback
# until they're done (see _maybe_add_error_handler)
if self.closed() and self._pending_callbacks == 0:
futures = []
if self._read_future is not None:
futures.append(self._read_future)
self._read_future = None
if self._write_future is not None:
futures.append(self._write_future)
self._write_future = None
if self._connect_future is not None:
futures.append(self._connect_future)
self._connect_future = None
if self._ssl_connect_future is not None:
futures.append(self._ssl_connect_future)
self._ssl_connect_future = None
for future in futures:
if self._is_connreset(self.error):
# Treat connection resets as closed connections so
# clients only have to catch one kind of exception
# to avoid logging.
future.set_exception(StreamClosedError())
else:
future.set_exception(self.error or StreamClosedError())
if self._close_callback is not None:
cb = self._close_callback
self._close_callback = None
self._run_callback(cb)
# Delete any unfinished callbacks to break up reference cycles.
self._read_callback = self._write_callback = None
# Clear the buffers so they can be cleared immediately even
# if the IOStream object is kept alive by a reference cycle.
# TODO: Clear the read buffer too; it currently breaks some tests.
self._write_buffer = None
def reading(self):
"""Returns true if we are currently reading from the stream."""
return self._read_callback is not None or self._read_future is not None
def writing(self):
"""Returns true if we are currently writing to the stream."""
return bool(self._write_buffer)
def closed(self):
"""Returns true if the stream has been closed."""
return self._closed
def set_nodelay(self, value):
"""Sets the no-delay flag for this stream.
By default, data written to TCP streams may be held for a time
to make the most efficient use of bandwidth (according to
Nagle's algorithm). The no-delay flag requests that data be
written as soon as possible, even if doing so would consume
additional bandwidth.
This flag is currently defined only for TCP-based ``IOStreams``.
.. versionadded:: 3.1
"""
pass
def _handle_events(self, fd, events):
if self.closed():
gen_log.warning("Got events for closed stream %s", fd)
return
try:
if self._connecting:
# Most IOLoops will report a write failed connect
# with the WRITE event, but SelectIOLoop reports a
# READ as well so we must check for connecting before
# either.
self._handle_connect()
if self.closed():
return
if events & self.io_loop.READ:
self._handle_read()
if self.closed():
return
if events & self.io_loop.WRITE:
self._handle_write()
if self.closed():
return
if events & self.io_loop.ERROR:
self.error = self.get_fd_error()
# We may have queued up a user callback in _handle_read or
# _handle_write, so don't close the IOStream until those
# callbacks have had a chance to run.
self.io_loop.add_callback(self.close)
return
state = self.io_loop.ERROR
if self.reading():
state |= self.io_loop.READ
if self.writing():
state |= self.io_loop.WRITE
if state == self.io_loop.ERROR and self._read_buffer_size == 0:
# If the connection is idle, listen for reads too so
# we can tell if the connection is closed. If there is
# data in the read buffer we won't run the close callback
# yet anyway, so we don't need to listen in this case.
state |= self.io_loop.READ
if state != self._state:
assert self._state is not None, \
"shouldn't happen: _handle_events without self._state"
self._state = state
self.io_loop.update_handler(self.fileno(), self._state)
except UnsatisfiableReadError as e:
gen_log.info("Unsatisfiable read, closing connection: %s" % e)
self.close(exc_info=True)
except Exception:
gen_log.error("Uncaught exception, closing connection.",
exc_info=True)
self.close(exc_info=True)
raise
def _run_callback(self, callback, *args):
def wrapper():
self._pending_callbacks -= 1
try:
return callback(*args)
except Exception:
app_log.error("Uncaught exception, closing connection.",
exc_info=True)
# Close the socket on an uncaught exception from a user callback
# (It would eventually get closed when the socket object is
# gc'd, but we don't want to rely on gc happening before we
# run out of file descriptors)
self.close(exc_info=True)
# Re-raise the exception so that IOLoop.handle_callback_exception
# can see it and log the error
raise
finally:
self._maybe_add_error_listener()
# We schedule callbacks to be run on the next IOLoop iteration
# rather than running them directly for several reasons:
# * Prevents unbounded stack growth when a callback calls an
# IOLoop operation that immediately runs another callback
# * Provides a predictable execution context for e.g.
# non-reentrant mutexes
# * Ensures that the try/except in wrapper() is run outside
# of the application's StackContexts
with stack_context.NullContext():
# stack_context was already captured in callback, we don't need to
# capture it again for IOStream's wrapper. This is especially
# important if the callback was pre-wrapped before entry to
# IOStream (as in HTTPConnection._header_callback), as we could
# capture and leak the wrong context here.
self._pending_callbacks += 1
self.io_loop.add_callback(wrapper)
def _read_to_buffer_loop(self):
# This method is called from _handle_read and _try_inline_read.
try:
if self._read_bytes is not None:
target_bytes = self._read_bytes
elif self._read_max_bytes is not None:
target_bytes = self._read_max_bytes
elif self.reading():
# For read_until without max_bytes, or
# read_until_close, read as much as we can before
# scanning for the delimiter.
target_bytes = None
else:
target_bytes = 0
next_find_pos = 0
# Pretend to have a pending callback so that an EOF in
# _read_to_buffer doesn't trigger an immediate close
# callback. At the end of this method we'll either
# establish a real pending callback via
# _read_from_buffer or run the close callback.
#
# We need two try statements here so that
# pending_callbacks is decremented before the `except`
# clause below (which calls `close` and does need to
# trigger the callback)
self._pending_callbacks += 1
while not self.closed():
# Read from the socket until we get EWOULDBLOCK or equivalent.
# SSL sockets do some internal buffering, and if the data is
# sitting in the SSL object's buffer select() and friends
# can't see it; the only way to find out if it's there is to
# try to read it.
if self._read_to_buffer() == 0:
break
self._run_streaming_callback()
# If we've read all the bytes we can use, break out of
# this loop. We can't just call read_from_buffer here
# because of subtle interactions with the
# pending_callback and error_listener mechanisms.
#
# If we've reached target_bytes, we know we're done.
if (target_bytes is not None and
self._read_buffer_size >= target_bytes):
break
# Otherwise, we need to call the more expensive find_read_pos.
# It's inefficient to do this on every read, so instead
# do it on the first read and whenever the read buffer
# size has doubled.
if self._read_buffer_size >= next_find_pos:
pos = self._find_read_pos()
if pos is not None:
return pos
next_find_pos = self._read_buffer_size * 2
return self._find_read_pos()
finally:
self._pending_callbacks -= 1
def _handle_read(self):
try:
pos = self._read_to_buffer_loop()
except UnsatisfiableReadError:
raise
except Exception:
gen_log.warning("error on read", exc_info=True)
self.close(exc_info=True)
return
if pos is not None:
self._read_from_buffer(pos)
return
else:
self._maybe_run_close_callback()
def _set_read_callback(self, callback):
assert self._read_callback is None, "Already reading"
assert self._read_future is None, "Already reading"
if callback is not None:
self._read_callback = stack_context.wrap(callback)
else:
self._read_future = TracebackFuture()
return self._read_future
def _run_read_callback(self, size, streaming):
if streaming:
callback = self._streaming_callback
else:
callback = self._read_callback
self._read_callback = self._streaming_callback = None
if self._read_future is not None:
assert callback is None
future = self._read_future
self._read_future = None
future.set_result(self._consume(size))
if callback is not None:
assert (self._read_future is None) or streaming
self._run_callback(callback, self._consume(size))
else:
# If we scheduled a callback, we will add the error listener
# afterwards. If we didn't, we have to do it now.
self._maybe_add_error_listener()
def _try_inline_read(self):
"""Attempt to complete the current read operation from buffered data.
If the read can be completed without blocking, schedules the
read callback on the next IOLoop iteration; otherwise starts
listening for reads on the socket.
"""
# See if we've already got the data from a previous read
self._run_streaming_callback()
pos = self._find_read_pos()
if pos is not None:
self._read_from_buffer(pos)
return
self._check_closed()
try:
pos = self._read_to_buffer_loop()
except Exception:
# If there was an in _read_to_buffer, we called close() already,
# but couldn't run the close callback because of _pending_callbacks.
# Before we escape from this function, run the close callback if
# applicable.
self._maybe_run_close_callback()
raise
if pos is not None:
self._read_from_buffer(pos)
return
# We couldn't satisfy the read inline, so either close the stream
# or listen for new data.
if self.closed():
self._maybe_run_close_callback()
else:
self._add_io_state(ioloop.IOLoop.READ)
def _read_to_buffer(self):
"""Reads from the socket and appends the result to the read buffer.
Returns the number of bytes read. Returns 0 if there is nothing
to read (i.e. the read returns EWOULDBLOCK or equivalent). On
error closes the socket and raises an exception.
"""
try:
chunk = self.read_from_fd()
except (socket.error, IOError, OSError) as e:
# ssl.SSLError is a subclass of socket.error
if self._is_connreset(e):
# Treat ECONNRESET as a connection close rather than
# an error to minimize log spam (the exception will
# be available on self.error for apps that care).
self.close(exc_info=True)
return
self.close(exc_info=True)
raise
if chunk is None:
return 0
self._read_buffer.append(chunk)
self._read_buffer_size += len(chunk)
if self._read_buffer_size > self.max_buffer_size:
gen_log.error("Reached maximum read buffer size")
self.close()
raise StreamBufferFullError("Reached maximum read buffer size")
return len(chunk)
def _run_streaming_callback(self):
if self._streaming_callback is not None and self._read_buffer_size:
bytes_to_consume = self._read_buffer_size
if self._read_bytes is not None:
bytes_to_consume = min(self._read_bytes, bytes_to_consume)
self._read_bytes -= bytes_to_consume
self._run_read_callback(bytes_to_consume, True)
def _read_from_buffer(self, pos):
"""Attempts to complete the currently-pending read from the buffer.
The argument is either a position in the read buffer or None,
as returned by _find_read_pos.
"""
self._read_bytes = self._read_delimiter = self._read_regex = None
self._read_partial = False
self._run_read_callback(pos, False)
def _find_read_pos(self):
"""Attempts to find a position in the read buffer that satisfies
the currently-pending read.
Returns a position in the buffer if the current read can be satisfied,
or None if it cannot.
"""
if (self._read_bytes is not None and
(self._read_buffer_size >= self._read_bytes or
(self._read_partial and self._read_buffer_size > 0))):
num_bytes = min(self._read_bytes, self._read_buffer_size)
return num_bytes
elif self._read_delimiter is not None:
# Multi-byte delimiters (e.g. '\r\n') may straddle two
# chunks in the read buffer, so we can't easily find them
# without collapsing the buffer. However, since protocols
# using delimited reads (as opposed to reads of a known
# length) tend to be "line" oriented, the delimiter is likely
# to be in the first few chunks. Merge the buffer gradually
# since large merges are relatively expensive and get undone in
# _consume().
if self._read_buffer:
while True:
loc = self._read_buffer[0].find(self._read_delimiter)
if loc != -1:
delimiter_len = len(self._read_delimiter)
self._check_max_bytes(self._read_delimiter,
loc + delimiter_len)
return loc + delimiter_len
if len(self._read_buffer) == 1:
break
_double_prefix(self._read_buffer)
self._check_max_bytes(self._read_delimiter,
len(self._read_buffer[0]))
elif self._read_regex is not None:
if self._read_buffer:
while True:
m = self._read_regex.search(self._read_buffer[0])
if m is not None:
self._check_max_bytes(self._read_regex, m.end())
return m.end()
if len(self._read_buffer) == 1:
break
_double_prefix(self._read_buffer)
self._check_max_bytes(self._read_regex,
len(self._read_buffer[0]))
return None
def _check_max_bytes(self, delimiter, size):
if (self._read_max_bytes is not None and
size > self._read_max_bytes):
raise UnsatisfiableReadError(
"delimiter %r not found within %d bytes" % (
delimiter, self._read_max_bytes))
def _handle_write(self):
while self._write_buffer:
try:
if not self._write_buffer_frozen:
# On windows, socket.send blows up if given a
# write buffer that's too large, instead of just
# returning the number of bytes it was able to
# process. Therefore we must not call socket.send
# with more than 128KB at a time.
_merge_prefix(self._write_buffer, 128 * 1024)
num_bytes = self.write_to_fd(self._write_buffer[0])
if num_bytes == 0:
# With OpenSSL, if we couldn't write the entire buffer,
# the very same string object must be used on the
# next call to send. Therefore we suppress
# merging the write buffer after an incomplete send.
# A cleaner solution would be to set
# SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER, but this is
# not yet accessible from python
# (http://bugs.python.org/issue8240)
self._write_buffer_frozen = True
break
self._write_buffer_frozen = False
_merge_prefix(self._write_buffer, num_bytes)
self._write_buffer.popleft()
self._write_buffer_size -= num_bytes
except (socket.error, IOError, OSError) as e:
if e.args[0] in _ERRNO_WOULDBLOCK:
self._write_buffer_frozen = True
break
else:
if not self._is_connreset(e):
# Broken pipe errors are usually caused by connection
# reset, and its better to not log EPIPE errors to
# minimize log spam
gen_log.warning("Write error on %s: %s",
self.fileno(), e)
self.close(exc_info=True)
return
if not self._write_buffer:
if self._write_callback:
callback = self._write_callback
self._write_callback = None
self._run_callback(callback)
if self._write_future:
future = self._write_future
self._write_future = None
future.set_result(None)
def _consume(self, loc):
if loc == 0:
return b""
_merge_prefix(self._read_buffer, loc)
self._read_buffer_size -= loc
return self._read_buffer.popleft()
def _check_closed(self):
if self.closed():
raise StreamClosedError("Stream is closed")
def _maybe_add_error_listener(self):
# This method is part of an optimization: to detect a connection that
# is closed when we're not actively reading or writing, we must listen
# for read events. However, it is inefficient to do this when the
# connection is first established because we are going to read or write
# immediately anyway. Instead, we insert checks at various times to
# see if the connection is idle and add the read listener then.
if self._pending_callbacks != 0:
return
if self._state is None or self._state == ioloop.IOLoop.ERROR:
if self.closed():
self._maybe_run_close_callback()
elif (self._read_buffer_size == 0 and
self._close_callback is not None):
self._add_io_state(ioloop.IOLoop.READ)
def _add_io_state(self, state):
"""Adds `state` (IOLoop.{READ,WRITE} flags) to our event handler.
Implementation notes: Reads and writes have a fast path and a
slow path. The fast path reads synchronously from socket
buffers, while the slow path uses `_add_io_state` to schedule
an IOLoop callback. Note that in both cases, the callback is
run asynchronously with `_run_callback`.
To detect closed connections, we must have called
`_add_io_state` at some point, but we want to delay this as
much as possible so we don't have to set an `IOLoop.ERROR`
listener that will be overwritten by the next slow-path
operation. As long as there are callbacks scheduled for
fast-path ops, those callbacks may do more reads.
If a sequence of fast-path ops do not end in a slow-path op,
(e.g. for an @asynchronous long-poll request), we must add
the error handler. This is done in `_run_callback` and `write`
(since the write callback is optional so we can have a
fast-path write with no `_run_callback`)
"""
if self.closed():
# connection has been closed, so there can be no future events
return
if self._state is None:
self._state = ioloop.IOLoop.ERROR | state
with stack_context.NullContext():
self.io_loop.add_handler(
self.fileno(), self._handle_events, self._state)
elif not self._state & state:
self._state = self._state | state
self.io_loop.update_handler(self.fileno(), self._state)
def _is_connreset(self, exc):
"""Return true if exc is ECONNRESET or equivalent.
May be overridden in subclasses.
"""
return (isinstance(exc, (socket.error, IOError)) and
errno_from_exception(exc) in _ERRNO_CONNRESET)
class IOStream(BaseIOStream):
r"""Socket-based `IOStream` implementation.
This class supports the read and write methods from `BaseIOStream`
plus a `connect` method.
The ``socket`` parameter may either be connected or unconnected.
For server operations the socket is the result of calling
`socket.accept <socket.socket.accept>`. For client operations the
socket is created with `socket.socket`, and may either be
connected before passing it to the `IOStream` or connected with
`IOStream.connect`.
A very simple (and broken) HTTP client using this class:
.. testcode::
import tornado.ioloop
import tornado.iostream
import socket
def send_request():
stream.write(b"GET / HTTP/1.0\r\nHost: friendfeed.com\r\n\r\n")
stream.read_until(b"\r\n\r\n", on_headers)
def on_headers(data):
headers = {}
for line in data.split(b"\r\n"):
parts = line.split(b":")
if len(parts) == 2:
headers[parts[0].strip()] = parts[1].strip()
stream.read_bytes(int(headers[b"Content-Length"]), on_body)
def on_body(data):
print(data)
stream.close()
tornado.ioloop.IOLoop.current().stop()
if __name__ == '__main__':
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
stream = tornado.iostream.IOStream(s)
stream.connect(("friendfeed.com", 80), send_request)
tornado.ioloop.IOLoop.current().start()
.. testoutput::
:hide:
"""
def __init__(self, socket, *args, **kwargs):
self.socket = socket
self.socket.setblocking(False)
super(IOStream, self).__init__(*args, **kwargs)
def fileno(self):
return self.socket
def close_fd(self):
self.socket.close()
self.socket = None
def get_fd_error(self):
errno = self.socket.getsockopt(socket.SOL_SOCKET,
socket.SO_ERROR)
return socket.error(errno, os.strerror(errno))
def read_from_fd(self):
try:
chunk = self.socket.recv(self.read_chunk_size)
except socket.error as e:
if e.args[0] in _ERRNO_WOULDBLOCK:
return None
else:
raise
if not chunk:
self.close()
return None
return chunk
def write_to_fd(self, data):
return self.socket.send(data)
def connect(self, address, callback=None, server_hostname=None):
"""Connects the socket to a remote address without blocking.
May only be called if the socket passed to the constructor was
not previously connected. The address parameter is in the
same format as for `socket.connect <socket.socket.connect>` for
the type of socket passed to the IOStream constructor,
e.g. an ``(ip, port)`` tuple. Hostnames are accepted here,
but will be resolved synchronously and block the IOLoop.
If you have a hostname instead of an IP address, the `.TCPClient`
class is recommended instead of calling this method directly.
`.TCPClient` will do asynchronous DNS resolution and handle
both IPv4 and IPv6.
If ``callback`` is specified, it will be called with no
arguments when the connection is completed; if not this method
returns a `.Future` (whose result after a successful
connection will be the stream itself).
In SSL mode, the ``server_hostname`` parameter will be used
for certificate validation (unless disabled in the
``ssl_options``) and SNI (if supported; requires Python
2.7.9+).
Note that it is safe to call `IOStream.write
<BaseIOStream.write>` while the connection is pending, in
which case the data will be written as soon as the connection
is ready. Calling `IOStream` read methods before the socket is
connected works on some platforms but is non-portable.
.. versionchanged:: 4.0
If no callback is given, returns a `.Future`.
.. versionchanged:: 4.2
SSL certificates are validated by default; pass
``ssl_options=dict(cert_reqs=ssl.CERT_NONE)`` or a
suitably-configured `ssl.SSLContext` to the
`SSLIOStream` constructor to disable.
"""
self._connecting = True
if callback is not None:
self._connect_callback = stack_context.wrap(callback)
future = None
else:
future = self._connect_future = TracebackFuture()
try:
self.socket.connect(address)
except socket.error as e:
# In non-blocking mode we expect connect() to raise an
# exception with EINPROGRESS or EWOULDBLOCK.
#
# On freebsd, other errors such as ECONNREFUSED may be
# returned immediately when attempting to connect to
# localhost, so handle them the same way as an error
# reported later in _handle_connect.
if (errno_from_exception(e) not in _ERRNO_INPROGRESS and
errno_from_exception(e) not in _ERRNO_WOULDBLOCK):
if future is None:
gen_log.warning("Connect error on fd %s: %s",
self.socket.fileno(), e)
self.close(exc_info=True)
return future
self._add_io_state(self.io_loop.WRITE)
return future
def start_tls(self, server_side, ssl_options=None, server_hostname=None):
"""Convert this `IOStream` to an `SSLIOStream`.
This enables protocols that begin in clear-text mode and
switch to SSL after some initial negotiation (such as the
``STARTTLS`` extension to SMTP and IMAP).
This method cannot be used if there are outstanding reads
or writes on the stream, or if there is any data in the
IOStream's buffer (data in the operating system's socket
buffer is allowed). This means it must generally be used
immediately after reading or writing the last clear-text
data. It can also be used immediately after connecting,
before any reads or writes.
The ``ssl_options`` argument may be either an `ssl.SSLContext`
object or a dictionary of keyword arguments for the
`ssl.wrap_socket` function. The ``server_hostname`` argument
will be used for certificate validation unless disabled
in the ``ssl_options``.
This method returns a `.Future` whose result is the new
`SSLIOStream`. After this method has been called,
any other operation on the original stream is undefined.
If a close callback is defined on this stream, it will be
transferred to the new stream.
.. versionadded:: 4.0
.. versionchanged:: 4.2
SSL certificates are validated by default; pass
``ssl_options=dict(cert_reqs=ssl.CERT_NONE)`` or a
suitably-configured `ssl.SSLContext` to disable.
"""
if (self._read_callback or self._read_future or
self._write_callback or self._write_future or
self._connect_callback or self._connect_future or
self._pending_callbacks or self._closed or
self._read_buffer or self._write_buffer):
raise ValueError("IOStream is not idle; cannot convert to SSL")
if ssl_options is None:
if server_side:
ssl_options = _server_ssl_defaults
else:
ssl_options = _client_ssl_defaults
socket = self.socket
self.io_loop.remove_handler(socket)
self.socket = None
socket = ssl_wrap_socket(socket, ssl_options,
server_hostname=server_hostname,
server_side=server_side,
do_handshake_on_connect=False)
orig_close_callback = self._close_callback
self._close_callback = None
future = TracebackFuture()
ssl_stream = SSLIOStream(socket, ssl_options=ssl_options,
io_loop=self.io_loop)
# Wrap the original close callback so we can fail our Future as well.
# If we had an "unwrap" counterpart to this method we would need
# to restore the original callback after our Future resolves
# so that repeated wrap/unwrap calls don't build up layers.
def close_callback():
if not future.done():
future.set_exception(ssl_stream.error or StreamClosedError())
if orig_close_callback is not None:
orig_close_callback()
ssl_stream.set_close_callback(close_callback)
ssl_stream._ssl_connect_callback = lambda: future.set_result(ssl_stream)
ssl_stream.max_buffer_size = self.max_buffer_size
ssl_stream.read_chunk_size = self.read_chunk_size
return future
def _handle_connect(self):
err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if err != 0:
self.error = socket.error(err, os.strerror(err))
# IOLoop implementations may vary: some of them return
# an error state before the socket becomes writable, so
# in that case a connection failure would be handled by the
# error path in _handle_events instead of here.
if self._connect_future is None:
gen_log.warning("Connect error on fd %s: %s",
self.socket.fileno(), errno.errorcode[err])
self.close()
return
if self._connect_callback is not None:
callback = self._connect_callback
self._connect_callback = None
self._run_callback(callback)
if self._connect_future is not None:
future = self._connect_future
self._connect_future = None
future.set_result(self)
self._connecting = False
def set_nodelay(self, value):
if (self.socket is not None and
self.socket.family in (socket.AF_INET, socket.AF_INET6)):
try:
self.socket.setsockopt(socket.IPPROTO_TCP,
socket.TCP_NODELAY, 1 if value else 0)
except socket.error as e:
# Sometimes setsockopt will fail if the socket is closed
# at the wrong time. This can happen with HTTPServer
# resetting the value to false between requests.
if e.errno != errno.EINVAL and not self._is_connreset(e):
raise
class SSLIOStream(IOStream):
"""A utility class to write to and read from a non-blocking SSL socket.
If the socket passed to the constructor is already connected,
it should be wrapped with::
ssl.wrap_socket(sock, do_handshake_on_connect=False, **kwargs)
before constructing the `SSLIOStream`. Unconnected sockets will be
wrapped when `IOStream.connect` is finished.
"""
def __init__(self, *args, **kwargs):
"""The ``ssl_options`` keyword argument may either be an
`ssl.SSLContext` object or a dictionary of keywords arguments
for `ssl.wrap_socket`
"""
self._ssl_options = kwargs.pop('ssl_options', _client_ssl_defaults)
super(SSLIOStream, self).__init__(*args, **kwargs)
self._ssl_accepting = True
self._handshake_reading = False
self._handshake_writing = False
self._ssl_connect_callback = None
self._server_hostname = None
# If the socket is already connected, attempt to start the handshake.
try:
self.socket.getpeername()
except socket.error:
pass
else:
# Indirectly start the handshake, which will run on the next
# IOLoop iteration and then the real IO state will be set in
# _handle_events.
self._add_io_state(self.io_loop.WRITE)
def reading(self):
return self._handshake_reading or super(SSLIOStream, self).reading()
def writing(self):
return self._handshake_writing or super(SSLIOStream, self).writing()
def _do_ssl_handshake(self):
# Based on code from test_ssl.py in the python stdlib
try:
self._handshake_reading = False
self._handshake_writing = False
self.socket.do_handshake()
except ssl.SSLError as err:
if err.args[0] == ssl.SSL_ERROR_WANT_READ:
self._handshake_reading = True
return
elif err.args[0] == ssl.SSL_ERROR_WANT_WRITE:
self._handshake_writing = True
return
elif err.args[0] in (ssl.SSL_ERROR_EOF,
ssl.SSL_ERROR_ZERO_RETURN):
return self.close(exc_info=True)
elif err.args[0] == ssl.SSL_ERROR_SSL:
try:
peer = self.socket.getpeername()
except Exception:
peer = '(not connected)'
gen_log.warning("SSL Error on %s %s: %s",
self.socket.fileno(), peer, err)
return self.close(exc_info=True)
raise
except socket.error as err:
# Some port scans (e.g. nmap in -sT mode) have been known
# to cause do_handshake to raise EBADF, so make that error
# quiet as well.
# https://groups.google.com/forum/?fromgroups#!topic/python-tornado/ApucKJat1_0
if self._is_connreset(err) or err.args[0] == errno.EBADF:
return self.close(exc_info=True)
raise
except AttributeError:
# On Linux, if the connection was reset before the call to
# wrap_socket, do_handshake will fail with an
# AttributeError.
return self.close(exc_info=True)
else:
self._ssl_accepting = False
if not self._verify_cert(self.socket.getpeercert()):
self.close()
return
self._run_ssl_connect_callback()
def _run_ssl_connect_callback(self):
if self._ssl_connect_callback is not None:
callback = self._ssl_connect_callback
self._ssl_connect_callback = None
self._run_callback(callback)
if self._ssl_connect_future is not None:
future = self._ssl_connect_future
self._ssl_connect_future = None
future.set_result(self)
def _verify_cert(self, peercert):
"""Returns True if peercert is valid according to the configured
validation mode and hostname.
The ssl handshake already tested the certificate for a valid
CA signature; the only thing that remains is to check
the hostname.
"""
if isinstance(self._ssl_options, dict):
verify_mode = self._ssl_options.get('cert_reqs', ssl.CERT_NONE)
elif isinstance(self._ssl_options, ssl.SSLContext):
verify_mode = self._ssl_options.verify_mode
assert verify_mode in (ssl.CERT_NONE, ssl.CERT_REQUIRED, ssl.CERT_OPTIONAL)
if verify_mode == ssl.CERT_NONE or self._server_hostname is None:
return True
cert = self.socket.getpeercert()
if cert is None and verify_mode == ssl.CERT_REQUIRED:
gen_log.warning("No SSL certificate given")
return False
try:
ssl_match_hostname(peercert, self._server_hostname)
except SSLCertificateError:
gen_log.warning("Invalid SSL certificate", exc_info=True)
return False
else:
return True
def _handle_read(self):
if self._ssl_accepting:
self._do_ssl_handshake()
return
super(SSLIOStream, self)._handle_read()
def _handle_write(self):
if self._ssl_accepting:
self._do_ssl_handshake()
return
super(SSLIOStream, self)._handle_write()
def connect(self, address, callback=None, server_hostname=None):
self._server_hostname = server_hostname
# Pass a dummy callback to super.connect(), which is slightly
# more efficient than letting it return a Future we ignore.
super(SSLIOStream, self).connect(address, callback=lambda: None)
return self.wait_for_handshake(callback)
def _handle_connect(self):
# Call the superclass method to check for errors.
super(SSLIOStream, self)._handle_connect()
if self.closed():
return
# When the connection is complete, wrap the socket for SSL
# traffic. Note that we do this by overriding _handle_connect
# instead of by passing a callback to super().connect because
# user callbacks are enqueued asynchronously on the IOLoop,
# but since _handle_events calls _handle_connect immediately
# followed by _handle_write we need this to be synchronous.
#
# The IOLoop will get confused if we swap out self.socket while the
# fd is registered, so remove it now and re-register after
# wrap_socket().
self.io_loop.remove_handler(self.socket)
old_state = self._state
self._state = None
self.socket = ssl_wrap_socket(self.socket, self._ssl_options,
server_hostname=self._server_hostname,
do_handshake_on_connect=False)
self._add_io_state(old_state)
def wait_for_handshake(self, callback=None):
"""Wait for the initial SSL handshake to complete.
If a ``callback`` is given, it will be called with no
arguments once the handshake is complete; otherwise this
method returns a `.Future` which will resolve to the
stream itself after the handshake is complete.
Once the handshake is complete, information such as
the peer's certificate and NPN/ALPN selections may be
accessed on ``self.socket``.
This method is intended for use on server-side streams
or after using `IOStream.start_tls`; it should not be used
with `IOStream.connect` (which already waits for the
handshake to complete). It may only be called once per stream.
.. versionadded:: 4.2
"""
if (self._ssl_connect_callback is not None or
self._ssl_connect_future is not None):
raise RuntimeError("Already waiting")
if callback is not None:
self._ssl_connect_callback = stack_context.wrap(callback)
future = None
else:
future = self._ssl_connect_future = TracebackFuture()
if not self._ssl_accepting:
self._run_ssl_connect_callback()
return future
def write_to_fd(self, data):
try:
return self.socket.send(data)
except ssl.SSLError as e:
if e.args[0] == ssl.SSL_ERROR_WANT_WRITE:
# In Python 3.5+, SSLSocket.send raises a WANT_WRITE error if
# the socket is not writeable; we need to transform this into
# an EWOULDBLOCK socket.error or a zero return value,
# either of which will be recognized by the caller of this
# method. Prior to Python 3.5, an unwriteable socket would
# simply return 0 bytes written.
return 0
raise
def read_from_fd(self):
if self._ssl_accepting:
# If the handshake hasn't finished yet, there can't be anything
# to read (attempting to read may or may not raise an exception
# depending on the SSL version)
return None
try:
# SSLSocket objects have both a read() and recv() method,
# while regular sockets only have recv().
# The recv() method blocks (at least in python 2.6) if it is
# called when there is nothing to read, so we have to use
# read() instead.
chunk = self.socket.read(self.read_chunk_size)
except ssl.SSLError as e:
# SSLError is a subclass of socket.error, so this except
# block must come first.
if e.args[0] == ssl.SSL_ERROR_WANT_READ:
return None
else:
raise
except socket.error as e:
if e.args[0] in _ERRNO_WOULDBLOCK:
return None
else:
raise
if not chunk:
self.close()
return None
return chunk
def _is_connreset(self, e):
if isinstance(e, ssl.SSLError) and e.args[0] == ssl.SSL_ERROR_EOF:
return True
return super(SSLIOStream, self)._is_connreset(e)
class PipeIOStream(BaseIOStream):
"""Pipe-based `IOStream` implementation.
The constructor takes an integer file descriptor (such as one returned
by `os.pipe`) rather than an open file object. Pipes are generally
one-way, so a `PipeIOStream` can be used for reading or writing but not
both.
"""
def __init__(self, fd, *args, **kwargs):
self.fd = fd
_set_nonblocking(fd)
super(PipeIOStream, self).__init__(*args, **kwargs)
def fileno(self):
return self.fd
def close_fd(self):
os.close(self.fd)
def write_to_fd(self, data):
return os.write(self.fd, data)
def read_from_fd(self):
try:
chunk = os.read(self.fd, self.read_chunk_size)
except (IOError, OSError) as e:
if errno_from_exception(e) in _ERRNO_WOULDBLOCK:
return None
elif errno_from_exception(e) == errno.EBADF:
# If the writing half of a pipe is closed, select will
# report it as readable but reads will fail with EBADF.
self.close(exc_info=True)
return None
else:
raise
if not chunk:
self.close()
return None
return chunk
def _double_prefix(deque):
"""Grow by doubling, but don't split the second chunk just because the
first one is small.
"""
new_len = max(len(deque[0]) * 2,
(len(deque[0]) + len(deque[1])))
_merge_prefix(deque, new_len)
def _merge_prefix(deque, size):
"""Replace the first entries in a deque of strings with a single
string of up to size bytes.
>>> d = collections.deque(['abc', 'de', 'fghi', 'j'])
>>> _merge_prefix(d, 5); print(d)
deque(['abcde', 'fghi', 'j'])
Strings will be split as necessary to reach the desired size.
>>> _merge_prefix(d, 7); print(d)
deque(['abcdefg', 'hi', 'j'])
>>> _merge_prefix(d, 3); print(d)
deque(['abc', 'defg', 'hi', 'j'])
>>> _merge_prefix(d, 100); print(d)
deque(['abcdefghij'])
"""
if len(deque) == 1 and len(deque[0]) <= size:
return
prefix = []
remaining = size
while deque and remaining > 0:
chunk = deque.popleft()
if len(chunk) > remaining:
deque.appendleft(chunk[remaining:])
chunk = chunk[:remaining]
prefix.append(chunk)
remaining -= len(chunk)
# This data structure normally just contains byte strings, but
# the unittest gets messy if it doesn't use the default str() type,
# so do the merge based on the type of data that's actually present.
if prefix:
deque.appendleft(type(prefix[0])().join(prefix))
if not deque:
deque.appendleft(b"")
def doctests():
import doctest
return doctest.DocTestSuite()
| apache-2.0 | 8,708,752,972,629,134,000 | 40.919765 | 128 | 0.58992 | false |
foufou55/Sick-Beard | sickbeard/frenchFinder.py | 21 | 6074 | # Author: Ludovic SARAKHA
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import datetime
import operator
import sickbeard
from sickbeard import db
from sickbeard import helpers, logger, show_name_helpers
from sickbeard import providers
from sickbeard import search
from sickbeard.common import SNATCHED_FRENCH
from sickbeard.common import showLanguages
import re
resultFilters = ["sub(pack|s|bed)", "nlsub(bed|s)?", "swesub(bed)?",
"(dir|sample|nfo)fix", "sample", "(dvd)?extras"]
class FrenchFinder():
def __init__(self, force=None, show=None):
#TODOif not sickbeard.DOWNLOAD_FRENCH:
# return
if sickbeard.showList==None:
return
logger.log(u"Beginning the search for french episodes older than "+ str(sickbeard.FRENCH_DELAY) +" days")
frenchlist=[]
#get list of english episodes that we want to search in french
myDB = db.DBConnection()
today = datetime.date.today().toordinal()
if show:
frenchsql=myDB.select("SELECT showid, season, episode from tv_episodes where audio_langs='en' and tv_episodes.showid =? and (? - tv_episodes.airdate) > ? order by showid, airdate asc",[show,today,sickbeard.FRENCH_DELAY])
count=myDB.select("SELECT count(*) from tv_episodes where audio_langs='en' and tv_episodes.showid =? and (? - tv_episodes.airdate) > ?",[show,today,sickbeard.FRENCH_DELAY])
else:
frenchsql=myDB.select("SELECT showid, season, episode from tv_episodes, tv_shows where audio_langs='en' and tv_episodes.showid = tv_shows.tvdb_id and tv_shows.frenchsearch = 1 and (? - tv_episodes.airdate) > ? order by showid, airdate asc",[today,sickbeard.FRENCH_DELAY])
count=myDB.select("SELECT count(*) from tv_episodes, tv_shows where audio_langs='en' and tv_episodes.showid = tv_shows.tvdb_id and tv_shows.frenchsearch = 1 and (? - tv_episodes.airdate) > ?",[today,sickbeard.FRENCH_DELAY])
#make the episodes objects
logger.log(u"Searching for "+str(count[0][0]) +" episodes in french")
for episode in frenchsql:
showObj = helpers.findCertainShow(sickbeard.showList, episode[0])
epObj = showObj.getEpisode(episode[1], episode[2])
frenchlist.append(epObj)
#for each episode in frenchlist fire a search in french
delay=[]
rest=count[0][0]
for frepisode in frenchlist:
rest=rest-1
if frepisode.show.tvdbid in delay:
logger.log(u"Previous episode for show "+str(frepisode.show.tvdbid)+" not found in french so skipping this search", logger.DEBUG)
continue
result=[]
for curProvider in providers.sortedProviderList():
if not curProvider.isActive():
continue
logger.log(u"Searching for french episode on "+curProvider.name +" for " +frepisode.show.name +" season "+str(frepisode.season)+" episode "+str(frepisode.episode))
try:
curfrench = curProvider.findFrench(frepisode, manualSearch=True)
except:
logger.log(u"Exception", logger.DEBUG)
pass
test=0
if curfrench:
for x in curfrench:
if not show_name_helpers.filterBadReleases(x.name):
logger.log(u"French "+x.name+" isn't a valid scene release that we want, ignoring it", logger.DEBUG)
test+=1
continue
if sickbeard.IGNORE_WORDS == "":
ignore_words="ztreyfgut"
else:
ignore_words=str(sickbeard.IGNORE_WORDS)
for fil in resultFilters + ignore_words.split(','):
if fil == showLanguages.get(u"fr"):
continue
if re.search('(^|[\W_])'+fil+'($|[\W_])', x.url, re.I) or re.search('(^|[\W_])'+fil+'($|[\W_])', x.name, re.I) :
logger.log(u"Invalid scene release: "+x.url+" contains "+fil+", ignoring it", logger.DEBUG)
test+=1
if test==0:
result.append(x)
best=None
try:
epi={}
epi[1]=frepisode
best = search.pickBestResult(result, episode = epi)
except:
pass
if best:
best.name=best.name + ' snatchedfr'
logger.log(u"Found french episode for " +frepisode.show.name +" season "+str(frepisode.season)+" episode "+str(frepisode.episode))
try:
search.snatchEpisode(best, SNATCHED_FRENCH)
except:
logger.log(u"Exception", logger.DEBUG)
pass
else:
delay.append(frepisode.show.tvdbid)
logger.log(u"No french episode found for " +frepisode.show.name +" season "+str(frepisode.season)+" episode "+str(frepisode.episode))
logger.log(str(rest) + u" episodes left")
| gpl-3.0 | -1,993,961,498,501,692,200 | 48.198347 | 283 | 0.572605 | false |
branchard/django-react-scrapy-sample | djangoapp/components/models.py | 1 | 3265 | from django.db import models
class Component(models.Model):
name = models.CharField(max_length=50, unique=True)
photoUrl = models.URLField(null=True, blank=True)
brand = models.ForeignKey(
'Brand',
on_delete=models.CASCADE,
)
class Brand(models.Model):
name = models.CharField(max_length=50)
class Processor(Component):
frequency = models.FloatField() # Ghz
cores = models.IntegerField()
socket = models.ForeignKey(
'Socket',
on_delete=models.CASCADE,
)
class Motherboard(Component):
ramSlots = models.IntegerField() # number of slots
maxRam = models.IntegerField() # Go
ramtype = models.ForeignKey(
'RamType',
on_delete=models.CASCADE,
)
ramfrequency = models.ManyToManyField("RamFrequency") # une carte mere est compatible avec plusieurs frequences de ram
socket = models.ForeignKey(
'Socket',
on_delete=models.CASCADE,
)
pcitypes = models.ManyToManyField("PciType") # une carte mere peut avoir plusieurs slots PCI
formfactor = models.ForeignKey(
'MotherBoardFormFactor',
on_delete=models.CASCADE,
)
class Socket(models.Model):
name = models.CharField(max_length=10)
def __str__(self):
return "{{id: {}, name: {}}}".format(self.id, self.name)
class Ram(Component):
capacity = models.IntegerField() # Go par barrette, capacity * quantity = total memory
quantity = models.IntegerField() # nombre de barrette
ramtype = models.ForeignKey(
'RamType',
on_delete=models.CASCADE,
)
frequency = models.ForeignKey(
'RamFrequency',
on_delete=models.CASCADE,
)
class RamFrequency(models.Model):
frequency = models.IntegerField() # Mhz
class RamType(models.Model):
typeName = models.CharField(max_length=10) # DDR2, DDR3, DDR4
class GraphicCard(Component):
memory = models.IntegerField() # Mo
pcitype = models.ForeignKey(
'PciType',
on_delete=models.CASCADE,
)
class PciType(models.Model):
name = models.CharField(max_length=50) # PCI-E 3.0, PCI-E 2.0
class Case(Component):
weight = models.FloatField() # in Kg
width = models.IntegerField() # in mm
height = models.IntegerField() # in mm
depth = models.IntegerField() # in mm
motherBoardFormFactors = models.ManyToManyField("MotherBoardFormFactor") # un boitier peut etre compatible avec plusieurs Carte mere
powerSupplyFormFactor = models.ForeignKey(
'PowerSupplyFormFactor',
on_delete=models.CASCADE,
)
class MotherBoardFormFactor(models.Model):
name = models.CharField(max_length=10)
class PowerSupply(Component):
watts = models.IntegerField() # in watt
modular = models.BooleanField()
factorForm = models.ForeignKey(
'PowerSupplyFormFactor',
on_delete=models.CASCADE,
)
class PowerSupplyFormFactor(models.Model):
name = models.CharField(max_length=10)
class HardDrive(Component):
capacity = models.IntegerField() # Go
hardDriveType = models.ForeignKey(
'HardDriveType',
on_delete=models.CASCADE,
)
class HardDriveType(models.Model):
name = models.CharField(max_length=10) # SSD ou HDD
| gpl-3.0 | 5,144,723,501,824,918,000 | 25.544715 | 137 | 0.667994 | false |
renaelectronics/linuxcnc | src/emc/usr_intf/axis/scripts/teach-in.py | 5 | 2641 | #!/usr/bin/env python
"""Usage:
python teach.py nmlfile outputfile
If outputfile is not specified, writes to standard output.
You must ". scripts/rip-environment" before running this script, if you use
run-in-place.
"""
# Copyright 2007 Jeff Epler <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import linuxcnc
import Tkinter
import sys
linenumber = 1;
if len(sys.argv) > 1:
linuxcnc.nmlfile = sys.argv[1]
if len(sys.argv) > 2:
outfile = sys.argv[2]
sys.stdout = open(outfile, 'w')
s = linuxcnc.stat()
def get_cart():
s.poll()
position = ""
for i,a in enumerate("XYZABCUVW"):
if s.axis_mask & (1<<i):
position = position + "%-8.4f " % (s.position[i])
return position[:-1] # remove the final space char
def get_joint():
s.poll()
position = " ".join(["%-8.4f"] * s.joints)
return position % s.joint_actual_position[:s.joints]
def log():
global linenumber;
if world.get():
p = get_cart()
else:
p = get_joint()
label1.configure(text='Learned: %s' % p)
print linenumber, p, s.flood, s.mist, s.lube, s.spindle_enabled;
linenumber += 1;
def show():
s.poll()
if world.get():
p = get_cart()
else:
p = get_joint()
label2.configure(text='Position: %s' % p)
app.after(100, show)
app = Tkinter.Tk(); app.wm_title('LinuxCNC Teach-In')
world = Tkinter.IntVar(app)
button = Tkinter.Button(app, command=log, text='Learn', font=("helvetica", 14))
button.pack(side='left')
label2 = Tkinter.Label(app, width=60, font='fixed', anchor="w")
label2.pack(side='top')
label1 = Tkinter.Label(app, width=60, font='fixed', text="Learned: (nothing yet)", anchor="w")
label1.pack(side='top')
r1 = Tkinter.Radiobutton(app, text="Joint", variable=world, value=0)
r1.pack(side='left')
r2 = Tkinter.Radiobutton(app, text="World", variable=world, value=1)
r2.pack(side='left')
show()
app.mainloop()
| gpl-2.0 | -6,859,997,627,438,694,000 | 28.021978 | 95 | 0.660356 | false |
harayz/raspberry_pwn | src/pentest/fimap/singleScan.py | 8 | 5441 | #
# This file is part of fimap.
#
# Copyright(c) 2009-2010 Iman Karim([email protected]).
# http://fimap.googlecode.com
#
# This file may be licensed under the terms of of the
# GNU General Public License Version 2 (the ``GPL'').
#
# Software distributed under the License is distributed
# on an ``AS IS'' basis, WITHOUT WARRANTY OF ANY KIND, either
# express or implied. See the GPL for the specific language
# governing rights and limitations.
#
# You should have received a copy of the GPL along with this
# program. If not, go to http://www.gnu.org/licenses/gpl.html
# or write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
from baseClass import baseClass
from targetScanner import targetScanner
import sys, time
__author__="Iman Karim([email protected])"
__date__ ="$03.09.2009 01:29:37$"
class singleScan(baseClass):
def _load(self):
self.URL = None
self.quite = False
def setURL(self, URL):
self.URL = URL
def setQuite(self, b):
self.quite = b
def scan(self):
try:
self.localLog("SingleScan is testing URL: '%s'" %self.URL)
t = targetScanner(self.config)
t.MonkeyTechnique = self.config["p_monkeymode"]
idx = 0
if (t.prepareTarget(self.URL)):
res = t.testTargetVuln()
if (len(res) == 0):
self.localLog("Target URL isn't affected by any file inclusion bug :(")
else:
for i in res:
report = i[0]
files = i[1]
idx = idx +1
boxarr = []
header = "[%d] Possible File Inclusion"%(idx)
if (report.getLanguage() != None):
header = "[%d] Possible %s-File Inclusion"%(idx, report.getLanguage())
boxarr.append(" [URL] %s"%report.getURL())
if (report.getPostData() != None and report.getPostData() != ""): boxarr.append(" [POST] %s"%report.getPostData())
if (report.isPost):
boxarr.append(" [POSTPARM] %s"%report.getVulnKey())
else:
boxarr.append(" [PARAM] %s"%report.getVulnKey())
if (report.isBlindDiscovered()):
boxarr.append(" [PATH] Not received (Blindmode)")
else:
boxarr.append(" [PATH] %s"%report.getServerPath())
if (report.isUnix()):
boxarr.append(" [OS] Unix")
else:
boxarr.append(" [OS] Windows")
boxarr.append(" [TYPE] %s"%report.getType())
if (not report.isBlindDiscovered()):
if (report.isNullbytePossible() == None):
boxarr.append(" [NULLBYTE] No Need. It's clean.")
else:
if (report.isNullbytePossible()):
boxarr.append(" [NULLBYTE] Works. :)")
else:
boxarr.append(" [NULLBYTE] Doesn't work. :(")
else:
if (report.isNullbytePossible()):
boxarr.append(" [NULLBYTE] Is needed.")
else:
boxarr.append(" [NULLBYTE] Not tested.")
boxarr.append(" [READABLE FILES]")
if (len(files) == 0):
boxarr.append(" No Readable files found :(")
else:
fidx = 0
for file in files:
payload = "%s%s%s"%(report.getPrefix(), file, report.getSurfix())
if (file != payload):
if report.isWindows() and file[1]==":":
file = file[3:]
txt = " [%d] %s -> %s"%(fidx, file, payload)
#if (fidx == 0): txt = txt.strip()
boxarr.append(txt)
else:
txt = " [%d] %s"%(fidx, file)
#if (fidx == 0): txt = txt.strip()
boxarr.append(txt)
fidx = fidx +1
self.drawBox(header, boxarr)
except KeyboardInterrupt:
if (self.quite): # We are in google mode.
print "\nCancelled current target..."
print "Press CTRL+C again in the next second to terminate fimap."
try:
time.sleep(1)
except KeyboardInterrupt:
raise
else: # We are in single mode. Simply raise the exception.
raise
def localLog(self, txt):
if (not self.quite):
print txt | gpl-3.0 | -7,330,241,218,164,925,000 | 44.35 | 143 | 0.433192 | false |
merckhung/bokken | ui/treeviews.py | 3 | 11285 | # treeviews.py
# -*- coding: utf-8 -*-
#
# Copyright 2011 Hugo Teso <[email protected]>
# Copyright 2014 David Martínez Moreno <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
import gtk
from lib.common import datafile_path
class TreeViews(gtk.TreeView):
'''Main TextView elements'''
def __init__(self, core, textviews):
self.store = gtk.ListStore(gtk.gdk.Pixbuf, str, str, str, str)
super(TreeViews,self).__init__(self.store)
self.uicore = core
self.textviews = textviews
self.set_rules_hint(True)
self.set_has_tooltip(True)
# Connect right click popup search menu
self.popup_handler = self.connect('button-press-event', self.popup_menu)
self.popup_handler = self.connect('row-activated', self.popup_menu)
def create_functions_columns(self):
rendererText = gtk.CellRendererText()
rendererText.tooltip_handle = self.connect('motion-notify-event', self.fcn_tooltip)
rendererPix = gtk.CellRendererPixbuf()
self.fcn_pix = gtk.gdk.pixbuf_new_from_file(datafile_path('function.png'))
self.bb_pix = gtk.gdk.pixbuf_new_from_file(datafile_path('block.png'))
column = gtk.TreeViewColumn("Function")
column.set_spacing(5)
column.pack_start(rendererPix, False)
column.pack_start(rendererText, True)
column.set_attributes(rendererText, text=1)
column.set_attributes(rendererPix, pixbuf=0)
column.set_sort_column_id(1)
self.store.set_sort_column_id(1,gtk.SORT_ASCENDING)
self.append_column(column)
self.set_model(self.store)
def create_relocs_columns(self):
self.data_sec_pix = gtk.gdk.pixbuf_new_from_file(datafile_path('data-sec.png'))
rendererPix = gtk.CellRendererPixbuf()
rendererText = gtk.CellRendererText()
column = gtk.TreeViewColumn("Name")
column.set_spacing(5)
column.pack_start(rendererPix, False)
column.pack_start(rendererText, True)
column.set_attributes(rendererText, text=1)
column.set_attributes(rendererPix, pixbuf=0)
column.set_sort_column_id(0)
self.append_column(column)
rendererText = gtk.CellRendererText()
column = gtk.TreeViewColumn("Virtual Address", rendererText, text=2)
self.store.set_sort_column_id(2,gtk.SORT_ASCENDING)
column.set_sort_column_id(2)
self.append_column(column)
rendererText = gtk.CellRendererText()
column = gtk.TreeViewColumn("Size", rendererText, text=3)
column.set_sort_column_id(3)
self.append_column(column)
def create_exports_columns(self):
self.exp_pix = gtk.gdk.pixbuf_new_from_file(datafile_path('export.png'))
rendererPix = gtk.CellRendererPixbuf()
rendererText = gtk.CellRendererText()
column = gtk.TreeViewColumn("Offset")
column.set_spacing(5)
column.pack_start(rendererPix, False)
column.pack_start(rendererText, True)
column.set_attributes(rendererText, text=1)
column.set_attributes(rendererPix, pixbuf=0)
self.store.set_sort_column_id(1,gtk.SORT_ASCENDING)
column.set_sort_column_id(1)
self.append_column(column)
rendererText = gtk.CellRendererText()
column = gtk.TreeViewColumn("Name", rendererText, text=2)
column.set_sort_column_id(2)
self.append_column(column)
rendererText = gtk.CellRendererText()
column = gtk.TreeViewColumn("Ordinal", rendererText, text=3)
column.set_sort_column_id(3)
self.append_column(column)
self.set_model(self.store)
def remove_columns(self):
columns = self.get_columns()
for column in columns:
self.remove_column(column)
def create_tree(self, imps):
# Create the column
imports = gtk.TreeViewColumn()
imports.set_title("Imports")
imports.set_spacing(5)
self.treestore = gtk.TreeStore(gtk.gdk.Pixbuf, str)
self.imp_pix = gtk.gdk.pixbuf_new_from_file(datafile_path('import.png'))
rendererPix = gtk.CellRendererPixbuf()
rendererText = gtk.CellRendererText()
imports.pack_start(rendererPix, False)
imports.pack_start(rendererText, True)
imports.set_attributes(rendererText, text=1)
imports.set_attributes(rendererPix, pixbuf=0)
# Iterate imports and add to the tree
for element in imps.keys():
it = self.treestore.append(None, [self.fcn_pix, element])
for imp in imps[element]:
self.treestore.append(it, [self.imp_pix, imp[0] + '\t' + imp[1]])
# Add column to tree
self.append_column(imports)
self.set_model(self.treestore)
self.expand_all()
def search_and_graph(self, widget, link_name):
self.textviews.search(widget, link_name)
if self.dograph:
self.textviews.update_graph(widget, link_name)
def fcn_tooltip(self, widget, event):
x = int(event.x)
y = int(event.y)
tup = widget.get_path_at_pos(x, y)
if "Function" == tup[1].get_title():
model = widget.get_model()
tree_iter = model.get_iter(tup[0])
fcn = model.get_value(tree_iter, 1)
value = self.uicore.send_cmd_str('pdi 15 @ ' + fcn)
widget.set_tooltip_markup("<span font_family=\"monospace\">" + value.rstrip() + "</span>")
else:
widget.set_tooltip_markup("")
def popup_menu(self, tv, event, row=None):
'''Controls the behavior of the treeviews on the left:
Left-click or Enter/Space: Goes to the corresponding graph/address/etc.
Right-click: Shows a menu.
@param tv: The treeview.
@parameter event: The GTK event (gtk.gdk.Event) in case this is a mouse
click. Otherwise it's the activated row index in format (n,).
@parameter row: A gtk.TreeViewColumn object in case it's a keypress,
otherwise None.
The function works by abstracting the event type and then defining
primary_action (True if left-click or Enter on a row, False if
double_click).
'''
self.dograph = False
# Let's get the row clicked whether it was by mouse or keyboard.
if row:
# Keyboard.
path = event
primary_action = True
else:
# Mouse.
# I do this to return fast (and to avoid leaking memory in 'e io.va' for now).
if (event.button != 1) and (event.button !=3):
return False
elif event.button == 1:
# Left-click.
primary_action = True
else:
primary_action = False
coordinates = tv.get_path_at_pos(int(event.x), int(event.y))
# coordinates is None if the click is outside the rows but inside
# the widget.
if not coordinates:
return False
(path, column, x, y) = coordinates
# FIXME: We should do this on the uicore, possibly in every operation.
if self.uicore.use_va:
self.uicore.core.cmd0('e io.va=0')
else:
self.uicore.core.cmd0('e io.va=1')
# Main loop, deciding whether to take an action or display a pop-up.
if primary_action:
# It's a left click or Enter on a row.
# Is it over a plugin name?
# Get the information about the row.
if len(path) == 1:
link_name = self.store[path][1]
# Special for exports
if '0x' in link_name:
link_name = self.store[path][2]
else:
link_name = self.treestore[path][1]
# Detect if search string is from URL or PE/ELF
link_name = link_name.split("\t")
# Elf/PE (function)
if len( link_name ) == 1:
if '0x' in link_name[0]:
link_name = link_name[0]
elif 'reloc.' in link_name[0]:
link_name = link_name[0]
else:
# Just get graph for functions
if not 'loc.' in link_name[0] and link_name[0][0] != '.':
self.dograph = True
# Adjust section name to search inside r2 flags
link_name = "0x%08x" % self.uicore.core.num.get(link_name[0])
# Elf/PE (import/export)
elif len( link_name ) == 2 and link_name[1] != '':
link_name = "0x%08x" % int(link_name[0], 16)
self.search_and_graph(self, link_name)
self.dograph = False
else:
# It's a right click!
_time = event.time
# Is it over a plugin name?
# Get the information about the click.
if len(path) == 1:
link_name = self.store[path][1]
else:
link_name = self.treestore[path][1]
# Detect if search string is from URL or PE/ELF
link_name = link_name.split("\t")
# Elf/PE (function)
if len( link_name ) == 1:
if '0x' in link_name[0]:
link_name = link_name[0]
elif 'reloc.' in link_name[0]:
link_name = link_name[0]
else:
# Just get graph for functions
if not 'loc.' in link_name[0] and link_name[0][0] != '.':
self.dograph = True
# Adjust section name to search inside r2 flags
link_name = "0x%08x" % self.uicore.core.num.get(link_name[0])
# Elf/PE (import/export)
elif len( link_name ) == 2 and link_name[1] != '':
link_name = "0x%08x" % int(link_name[0], 16)
# Ok, now I show the popup menu !
# Create the popup menu
gm = gtk.Menu()
# And the items
e = gtk.MenuItem("Go to")
e.connect('activate', self.search_and_graph, link_name)
gm.append( e )
if self.dograph:
e = gtk.MenuItem("Show graph")
e.connect('activate', self.textviews.update_graph, link_name)
gm.append( e )
gm.show_all()
gm.popup( None, None, None, event.button, _time)
| gpl-2.0 | -6,145,221,357,693,862,000 | 38.592982 | 102 | 0.576391 | false |
malkoto1/just_cook | SQLAlchemy-1.0.4/lib/sqlalchemy/dialects/mysql/zxjdbc.py | 59 | 3942 | # mysql/zxjdbc.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mysql+zxjdbc
:name: zxjdbc for Jython
:dbapi: zxjdbc
:connectstring: mysql+zxjdbc://<user>:<password>@<hostname>[:<port>]/\
<database>
:driverurl: http://dev.mysql.com/downloads/connector/j/
.. note:: Jython is not supported by current versions of SQLAlchemy. The
zxjdbc dialect should be considered as experimental.
Character Sets
--------------
SQLAlchemy zxjdbc dialects pass unicode straight through to the
zxjdbc/JDBC layer. To allow multiple character sets to be sent from the
MySQL Connector/J JDBC driver, by default SQLAlchemy sets its
``characterEncoding`` connection property to ``UTF-8``. It may be
overridden via a ``create_engine`` URL parameter.
"""
import re
from ... import types as sqltypes, util
from ...connectors.zxJDBC import ZxJDBCConnector
from .base import BIT, MySQLDialect, MySQLExecutionContext
class _ZxJDBCBit(BIT):
def result_processor(self, dialect, coltype):
"""Converts boolean or byte arrays from MySQL Connector/J to longs."""
def process(value):
if value is None:
return value
if isinstance(value, bool):
return int(value)
v = 0
for i in value:
v = v << 8 | (i & 0xff)
value = v
return value
return process
class MySQLExecutionContext_zxjdbc(MySQLExecutionContext):
def get_lastrowid(self):
cursor = self.create_cursor()
cursor.execute("SELECT LAST_INSERT_ID()")
lastrowid = cursor.fetchone()[0]
cursor.close()
return lastrowid
class MySQLDialect_zxjdbc(ZxJDBCConnector, MySQLDialect):
jdbc_db_name = 'mysql'
jdbc_driver_name = 'com.mysql.jdbc.Driver'
execution_ctx_cls = MySQLExecutionContext_zxjdbc
colspecs = util.update_copy(
MySQLDialect.colspecs,
{
sqltypes.Time: sqltypes.Time,
BIT: _ZxJDBCBit
}
)
def _detect_charset(self, connection):
"""Sniff out the character set in use for connection results."""
# Prefer 'character_set_results' for the current connection over the
# value in the driver. SET NAMES or individual variable SETs will
# change the charset without updating the driver's view of the world.
#
# If it's decided that issuing that sort of SQL leaves you SOL, then
# this can prefer the driver value.
rs = connection.execute("SHOW VARIABLES LIKE 'character_set%%'")
opts = dict((row[0], row[1]) for row in self._compat_fetchall(rs))
for key in ('character_set_connection', 'character_set'):
if opts.get(key, None):
return opts[key]
util.warn("Could not detect the connection character set. "
"Assuming latin1.")
return 'latin1'
def _driver_kwargs(self):
"""return kw arg dict to be sent to connect()."""
return dict(characterEncoding='UTF-8', yearIsDateType='false')
def _extract_error_code(self, exception):
# e.g.: DBAPIError: (Error) Table 'test.u2' doesn't exist
# [SQLCode: 1146], [SQLState: 42S02] 'DESCRIBE `u2`' ()
m = re.compile(r"\[SQLCode\: (\d+)\]").search(str(exception.args))
c = m.group(1)
if c:
return int(c)
def _get_server_version_info(self, connection):
dbapi_con = connection.connection
version = []
r = re.compile('[.\-]')
for n in r.split(dbapi_con.dbversion):
try:
version.append(int(n))
except ValueError:
version.append(n)
return tuple(version)
dialect = MySQLDialect_zxjdbc
| gpl-2.0 | 3,371,445,033,006,024,700 | 32.692308 | 78 | 0.626332 | false |
espadrine/opera | chromium/src/third_party/python_26/Tools/scripts/linktree.py | 101 | 2425 | #! /usr/bin/env python
# linktree
#
# Make a copy of a directory tree with symbolic links to all files in the
# original tree.
# All symbolic links go to a special symbolic link at the top, so you
# can easily fix things if the original source tree moves.
# See also "mkreal".
#
# usage: mklinks oldtree newtree
import sys, os
LINK = '.LINK' # Name of special symlink at the top.
debug = 0
def main():
if not 3 <= len(sys.argv) <= 4:
print 'usage:', sys.argv[0], 'oldtree newtree [linkto]'
return 2
oldtree, newtree = sys.argv[1], sys.argv[2]
if len(sys.argv) > 3:
link = sys.argv[3]
link_may_fail = 1
else:
link = LINK
link_may_fail = 0
if not os.path.isdir(oldtree):
print oldtree + ': not a directory'
return 1
try:
os.mkdir(newtree, 0777)
except os.error, msg:
print newtree + ': cannot mkdir:', msg
return 1
linkname = os.path.join(newtree, link)
try:
os.symlink(os.path.join(os.pardir, oldtree), linkname)
except os.error, msg:
if not link_may_fail:
print linkname + ': cannot symlink:', msg
return 1
else:
print linkname + ': warning: cannot symlink:', msg
linknames(oldtree, newtree, link)
return 0
def linknames(old, new, link):
if debug: print 'linknames', (old, new, link)
try:
names = os.listdir(old)
except os.error, msg:
print old + ': warning: cannot listdir:', msg
return
for name in names:
if name not in (os.curdir, os.pardir):
oldname = os.path.join(old, name)
linkname = os.path.join(link, name)
newname = os.path.join(new, name)
if debug > 1: print oldname, newname, linkname
if os.path.isdir(oldname) and \
not os.path.islink(oldname):
try:
os.mkdir(newname, 0777)
ok = 1
except:
print newname + \
': warning: cannot mkdir:', msg
ok = 0
if ok:
linkname = os.path.join(os.pardir,
linkname)
linknames(oldname, newname, linkname)
else:
os.symlink(linkname, newname)
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause | -8,982,702,451,634,868,000 | 29.3125 | 73 | 0.531546 | false |
pombreda/seascope | src/backend/plugins/gtags/GtagsProject.py | 2 | 3525 | #!/usr/bin/python
# Copyright (c) 2010 Anil Kumar
# All rights reserved.
#
# License: BSD
import os, string, re
from ..PluginBase import PluginFeatureBase, ProjectBase, ConfigBase, QueryBase
from ..PluginBase import PluginProcess
from ..CtagsCache import CtagsThread
class GtagsFeature(PluginFeatureBase):
def __init__(self):
PluginFeatureBase.__init__(self)
self.feat_desc = [
['REF', '-r'],
['DEF', ''],
#['<--', '2'],
['-->', '-r'],
#[ ['TXT', '4'],
['GREP','-g'],
['FIL', '-P'],
['INC', '-g'],
['QDEF', ''],
['CTREE','12'],
['CLGRAPH', '13'],
['CLGRAPHD', '14'],
['FFGRAPH', '14'],
['UPD', '25'],
]
self.ctree_query_args = [
['-->', '--> F', 'Calling tree' ],
#['<--', 'F -->', 'Called tree' ],
['REF', '==> F', 'Advanced calling tree' ],
]
def query_dlg_cb(self, req, cmd_str, in_opt):
if req != '' and in_opt['substring']:
req = '.*' + req + '.*'
opt = None
if in_opt['ignorecase']:
opt = '-i'
res = (cmd_str, req, opt)
return res
class ConfigGtags(ConfigBase):
def __init__(self):
ConfigBase.__init__(self, 'gtags')
class ProjectGtags(ProjectBase):
def __init__(self):
ProjectBase.__init__(self)
@staticmethod
def _prj_new_or_open(conf):
prj = ProjectGtags()
prj.feat = GtagsFeature()
prj.conf = conf
prj.qry = QueryGtags(prj.conf, prj.feat)
return (prj)
@staticmethod
def prj_new(proj_args):
d = proj_args[0]
prj = ProjectGtags.prj_open(d)
return None
@staticmethod
def prj_open(proj_path):
conf = ConfigGtags()
conf.proj_open(proj_path)
prj = ProjectGtags._prj_new_or_open(conf)
return (prj)
class GtProcess(PluginProcess):
def __init__(self, wdir, rq):
PluginProcess.__init__(self, wdir, rq)
self.name = 'gtags process'
def parse_result(self, text, sig):
text = re.split('\r?\n', text)
if self.cmd_str == 'FIL':
res = [ ['', line.split(' ')[0], '', '' ] for line in text if line != '' ]
return res
res = []
for line in text:
if line == '':
continue
line = line.split(' ', 3)
line = ['<unknown>', line[0], line[2], line[3]]
res.append(line)
CtagsThread(sig).apply_fix(self.cmd_str, res, ['<unknown>'])
return None
class QueryGtags(QueryBase):
def __init__(self, conf, feat):
QueryBase.__init__(self)
self.conf = conf
self.feat = feat
def query(self, rquery):
if (not self.conf):
#or not self.conf.is_ready()):
print "pm_query not is_ready"
return None
cmd_str = rquery['cmd']
req = rquery['req']
opt = rquery['opt']
if opt == None or opt == '':
opt = []
else:
opt = opt.split()
cmd_opt = self.feat.cmd_str2id[cmd_str]
pargs = [ 'global', '-a', '--result=cscope', '-x' ] + opt
if cmd_opt != '':
pargs += [ cmd_opt ]
pargs += [ '--', req ]
qsig = GtProcess(self.conf.c_dir, [cmd_str, req]).run_query_process(pargs, req, rquery)
return qsig
def rebuild(self):
if (not self.conf.is_ready()):
print "pm_query not is_ready"
return None
if (os.path.exists(os.path.join(self.conf.c_dir, 'GTAGS'))):
pargs = [ 'global', '-u' ]
else:
pargs = [ 'gtags', '-i' ]
qsig = GtProcess(self.conf.c_dir, None).run_rebuild_process(pargs)
return qsig
def query_fl(self):
if not os.path.exists(os.path.join(self.conf.c_dir, 'GTAGS')):
return []
pargs = [ 'global', '-P', '-a' ]
qsig = GtProcess(self.conf.c_dir, None).run_query_fl(pargs)
return qsig
def gt_is_open(self):
return self.conf != None
def gt_is_ready(self):
return self.conf.is_ready()
| bsd-3-clause | -8,958,743,971,184,090,000 | 21.741935 | 89 | 0.584681 | false |
RPI-OPENEDX/edx-platform | common/test/acceptance/pages/lms/problem.py | 23 | 5236 | """
Problem Page.
"""
from bok_choy.page_object import PageObject
class ProblemPage(PageObject):
"""
View of problem page.
"""
url = None
CSS_PROBLEM_HEADER = '.problem-header'
def is_browser_on_page(self):
return self.q(css='.xblock-student_view').present
@property
def problem_name(self):
"""
Return the current problem name.
"""
return self.q(css='.problem-header').text[0]
@property
def problem_text(self):
"""
Return the text of the question of the problem.
"""
return self.q(css="div.problem p").text
@property
def message_text(self):
"""
Return the "message" text of the question of the problem.
"""
return self.q(css="div.problem span.message").text[0]
@property
def hint_text(self):
"""
Return the "hint" text of the problem from its div.
"""
return self.q(css="div.problem div.problem-hint").text[0]
def verify_mathjax_rendered_in_problem(self):
"""
Check that MathJax have been rendered in problem hint
"""
def mathjax_present():
""" Returns True if MathJax css is present in the problem body """
mathjax_container = self.q(css="div.problem p .MathJax .math")
return mathjax_container.visible and mathjax_container.present
self.wait_for(
mathjax_present,
description="MathJax rendered in problem body"
)
def verify_mathjax_rendered_in_hint(self):
"""
Check that MathJax have been rendered in problem hint
"""
def mathjax_present():
""" Returns True if MathJax css is present in the problem body """
mathjax_container = self.q(css="div.problem div.problem-hint .MathJax .math")
return mathjax_container.visible and mathjax_container.present
self.wait_for(
mathjax_present,
description="MathJax rendered in hint"
)
def fill_answer(self, text, input_num=None):
"""
Fill in the answer to the problem.
args:
text: String to fill the input with.
kwargs:
input_num: If provided, fills only the input_numth field. Else, all
input fields will be filled.
"""
fields = self.q(css='div.problem div.capa_inputtype.textline input')
fields = fields.nth(input_num) if input_num is not None else fields
fields.fill(text)
def fill_answer_numerical(self, text):
"""
Fill in the answer to a numerical problem.
"""
self.q(css='div.problem section.inputtype input').fill(text)
self.wait_for_ajax()
def click_check(self):
"""
Click the Check button!
"""
self.q(css='div.problem button.check').click()
self.wait_for_ajax()
def wait_for_status_icon(self):
"""
wait for status icon
"""
self.wait_for_element_visibility('div.problem section.inputtype div .status', 'wait for status icon')
def click_hint(self):
"""
Click the Hint button.
"""
self.q(css='div.problem button.hint-button').click()
self.wait_for_ajax()
def click_choice(self, choice_value):
"""
Click the choice input(radio, checkbox or option) where value matches `choice_value` in choice group.
"""
self.q(css='div.problem .choicegroup input[value="' + choice_value + '"]').click()
self.wait_for_ajax()
def is_correct(self):
"""
Is there a "correct" status showing?
"""
return self.q(css="div.problem div.capa_inputtype.textline div.correct span.status").is_present()
def simpleprob_is_correct(self):
"""
Is there a "correct" status showing? Works with simple problem types.
"""
return self.q(css="div.problem section.inputtype div.correct span.status").is_present()
def simpleprob_is_partially_correct(self):
"""
Is there a "partially correct" status showing? Works with simple problem types.
"""
return self.q(css="div.problem section.inputtype div.partially-correct span.status").is_present()
def simpleprob_is_incorrect(self):
"""
Is there an "incorrect" status showing? Works with simple problem types.
"""
return self.q(css="div.problem section.inputtype div.incorrect span.status").is_present()
def click_clarification(self, index=0):
"""
Click on an inline icon that can be included in problem text using an HTML <clarification> element:
Problem <clarification>clarification text hidden by an icon in rendering</clarification> Text
"""
self.q(css='div.problem .clarification:nth-child({index}) i[data-tooltip]'.format(index=index + 1)).click()
@property
def visible_tooltip_text(self):
"""
Get the text seen in any tooltip currently visible on the page.
"""
self.wait_for_element_visibility('body > .tooltip', 'A tooltip is visible.')
return self.q(css='body > .tooltip').text[0]
| agpl-3.0 | 7,734,806,684,764,329,000 | 31.521739 | 115 | 0.600267 | false |
rudischilder/MAV_TU_Delft_gr10 | sw/ground_segment/python/settings_app/settingsframe.py | 29 | 6685 | #Boa:Frame:PlotFrame
from __future__ import division
import wx
import sys
import os
PPRZ_SRC = os.getenv("PAPARAZZI_SRC", os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)),
'../../../..')))
sys.path.append(PPRZ_SRC + "/sw/lib/python")
from settings_tool import IvySettingsInterface
def create(parent, ac_ids):
return SettingsFrame(parent, ac_ids)
SLIDER_ID_OFFSET = 250000
BUTTON_ID_OFFSET = 2 * 250000
SLIDER_FACTOR = 100
# Wraps TextCtrl to provide added functionality
class TextCtrlSetting(wx.TextCtrl):
update_callback = None
def __init__(self, parent, setting):
self.setting = setting
wx.TextCtrl.__init__(self, parent=parent, name=setting.shortname, id=setting.index)
self.Bind(wx.EVT_TEXT, self.onEvtText, self)
def RegisterUpdateCallback(self, cb):
self.update_callback = cb
def onEvtText(self, event):
index = int(self.GetId())
try:
value = float(self.GetValue())
self.update_callback(index, value)
except:
return
# helper function to toggle edit box boldness (bold = user-set, normal=downlink-received)
def setBold(self, bold):
font = self.GetFont()
if (bold):
font.SetWeight(wx.FONTWEIGHT_BOLD)
else:
font.SetWeight(wx.FONTWEIGHT_NORMAL)
self.SetFont(font)
def SetSettingValue(self, value):
if (self.setting.step < 1):
self.SetValue("%.2f" % float(value))
else:
self.SetValue("%i" % int(float(value)))
# Wraps slider
class SettingCtrl(wx.Slider):
update_callback = None
def __init__(self, parent, setting):
self.setting = setting
max_v = int(setting.max_value) * SLIDER_FACTOR
min_v = int(setting.min_value) * SLIDER_FACTOR
if (min_v >= max_v):
max_v = max_v + 1
wx.Slider.__init__(self, parent=parent, minValue=min_v, maxValue=max_v, style=wx.SL_HORIZONTAL | wx.SL_AUTOTICKS, size=(200, 30), id=setting.index + SLIDER_ID_OFFSET)
self.SetLineSize(setting.step * SLIDER_FACTOR)
self.Bind(wx.EVT_MOUSEWHEEL, self.sliderWheel, self)
self.Bind(wx.EVT_SLIDER, self.OnEvtSlider, self)
def RegisterUpdateCallback(self, cb):
self.update_callback = cb
def OnEvtSlider(self, event):
slider = event.GetEventObject()
self.update_callback(slider.GetSettingIndex(), slider.GetSettingValue())
# Called on mouse wheel events (default handler seems backwards?)
def sliderWheel(self, event):
slider = event.GetEventObject()
if (event.GetWheelRotation() > 0):
slider.SetValue(slider.GetValue() + slider.GetLineSize())
else:
slider.SetValue(slider.GetValue() - slider.GetLineSize())
self.update_callback(slider.GetSettingIndex(), slider.GetSettingValue())
def GetSettingIndex(self):
index = int(self.GetId())
if index >= SLIDER_ID_OFFSET:
index = index - SLIDER_ID_OFFSET
return index
def SetSettingValue(self, value):
self.SetValue(int(float(value)) * SLIDER_FACTOR)
def GetSettingValue(self):
if (self.setting.step < 1):
return float(self.GetValue()) / SLIDER_FACTOR
else:
return int(self.GetValue()) // SLIDER_FACTOR
class SettingsFrame(wx.Frame):
edits = []
sliders = []
def __init__(self, parent, ac_ids):
self.settings = IvySettingsInterface(ac_ids)
title = "Settings %s (%s)" % (ac_ids, self.settings.GetACName())
wx.Frame.__init__(self, name=u'SettingsFrame', parent=parent, title=title, size=(480, 320))
self.book = wx.Notebook(self)
self.updates = []
self.Bind( wx.EVT_CLOSE, self.OnClose)
for setting_group in self.settings.groups:
page = wx.Panel(self.book)
vert_box = wx.BoxSizer(orient=wx.VERTICAL)
for setting in setting_group.member_list:
horz_box = wx.BoxSizer(orient=wx.HORIZONTAL)
text = wx.StaticText(page, label=setting.shortname, size=(100,30))
# Edit
edit = TextCtrlSetting(page, setting)
edit.RegisterUpdateCallback(self.editUpdate)
self.edits.append(edit)
# Slider
slider = SettingCtrl(page, setting)
slider.RegisterUpdateCallback(self.updateEditFromSlider)
self.sliders.append(slider)
# Button
button = wx.Button(page, id=setting.index + BUTTON_ID_OFFSET, label="Apply")
self.Bind(wx.EVT_BUTTON, self.onButton)
horz_box.AddWindow(text)
horz_box.AddWindow(edit)
horz_box.AddWindow(slider)
horz_box.AddWindow(button)
vert_box.AddWindow(horz_box)
page.SetSizer(vert_box)
self.book.AddPage(page, setting_group.name)
self.settings.RegisterCallback(self.onUpdate)
# Copy slider value into associated edit box
def updateEditFromSlider(self, index, value):
self.edits[index].ChangeValue(str(value))
self.edits[index].setBold(True)
# Called on edit box update
def editUpdate(self, index, value):
self.sliders[index].SetSettingValue(value)
self.edits[index].setBold(True)
# Called on button push
def onButton(self, event):
button = event.GetEventObject()
index = int(button.GetId())
if index >= BUTTON_ID_OFFSET:
index = index - BUTTON_ID_OFFSET
self.settings.lookup[index].value = self.sliders[index].GetSettingValue()
self.settings.SendSetting(index)
# Called for remote settings updates
def onUpdate(self, index, value, fromRemote):
# Schedule the call for later via wx (run after events)
# to prevent crashy crashy
wx.CallAfter(self.update_value, index, value, fromRemote)
# Called to update GUI with new values
def update_value(self, index, value, fromRemote):
editCtrl = self.edits[index]
if fromRemote and editCtrl.FindFocus() == editCtrl:
# don't process remote updates if the control is focused
return
editCtrl.SetSettingValue(value)
editCtrl.setBold(not fromRemote)
self.sliders[index].SetSettingValue(value)
def OnClose(self, event):
# need to forward close to canvas so that ivy is shut down, otherwise ivy hangs the shutdown
self.settings.OnClose()
self.Destroy()
| gpl-2.0 | -5,386,395,156,695,230,000 | 34.94086 | 174 | 0.615258 | false |
temasek/android_external_chromium_org_third_party_WebKit | Source/devtools/scripts/generate_devtools_html.py | 9 | 3173 | #!/usr/bin/env python
#
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from os import path
import os
import sys
def generate_include_tag(resource_path):
if (resource_path.endswith('.js')):
return ' <script type="text/javascript" src="%s"></script>\n' % resource_path
elif (resource_path.endswith('.css')):
return ' <link rel="stylesheet" type="text/css" href="%s">\n' % resource_path
else:
assert resource_path
def write_app_input_html(app_input_file, app_output_file, application_name, debug):
for line in app_input_file:
if not debug:
if '<script ' in line or '<link ' in line:
continue
if '</head>' in line:
app_output_file.write(generate_include_tag("%s.css" % application_name))
app_output_file.write(generate_include_tag("%s.js" % application_name))
app_output_file.write(line)
def main(argv):
if len(argv) < 4:
print('usage: %s app_input_html generated_app_html debug' % argv[0])
return 1
# The first argument is ignored. We put 'web.gyp' in the inputs list
# for this script, so every time the list of script gets changed, our html
# file is rebuilt.
app_input_html_name = argv[1]
app_output_html_name = argv[2]
debug = argv[3] != '0'
application_name = path.splitext(path.basename(app_input_html_name))[0]
with open(app_input_html_name, 'r') as app_input_html:
with open(app_output_html_name, 'w') as app_output_html:
write_app_input_html(app_input_html, app_output_html, application_name, debug)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| bsd-3-clause | 7,900,873,253,431,654,000 | 41.878378 | 90 | 0.695556 | false |
sriki18/scipy | scipy/sparse/linalg/tests/test_interface.py | 38 | 12724 | """Test functions for the sparse.linalg.interface module
"""
from __future__ import division, print_function, absolute_import
from functools import partial
from itertools import product
import operator
import nose
from numpy.testing import TestCase, assert_, assert_equal, \
assert_raises
import numpy as np
import scipy.sparse as sparse
from scipy.sparse.linalg import interface
# Only test matmul operator (A @ B) when available (Python 3.5+)
TEST_MATMUL = hasattr(operator, 'matmul')
class TestLinearOperator(TestCase):
def setUp(self):
self.A = np.array([[1,2,3],
[4,5,6]])
self.B = np.array([[1,2],
[3,4],
[5,6]])
self.C = np.array([[1,2],
[3,4]])
def test_matvec(self):
def get_matvecs(A):
return [{
'shape': A.shape,
'matvec': lambda x: np.dot(A, x).reshape(A.shape[0]),
'rmatvec': lambda x: np.dot(A.T.conj(),
x).reshape(A.shape[1])
},
{
'shape': A.shape,
'matvec': lambda x: np.dot(A, x),
'rmatvec': lambda x: np.dot(A.T.conj(), x),
'matmat': lambda x: np.dot(A, x)
}]
for matvecs in get_matvecs(self.A):
A = interface.LinearOperator(**matvecs)
assert_(A.args == ())
assert_equal(A.matvec(np.array([1,2,3])), [14,32])
assert_equal(A.matvec(np.array([[1],[2],[3]])), [[14],[32]])
assert_equal(A * np.array([1,2,3]), [14,32])
assert_equal(A * np.array([[1],[2],[3]]), [[14],[32]])
assert_equal(A.dot(np.array([1,2,3])), [14,32])
assert_equal(A.dot(np.array([[1],[2],[3]])), [[14],[32]])
assert_equal(A.matvec(np.matrix([[1],[2],[3]])), [[14],[32]])
assert_equal(A * np.matrix([[1],[2],[3]]), [[14],[32]])
assert_equal(A.dot(np.matrix([[1],[2],[3]])), [[14],[32]])
assert_equal((2*A)*[1,1,1], [12,30])
assert_equal((2*A).rmatvec([1,1]), [10, 14, 18])
assert_equal((2*A).H.matvec([1,1]), [10, 14, 18])
assert_equal((2*A)*[[1],[1],[1]], [[12],[30]])
assert_equal((2*A).matmat([[1],[1],[1]]), [[12],[30]])
assert_equal((A*2)*[1,1,1], [12,30])
assert_equal((A*2)*[[1],[1],[1]], [[12],[30]])
assert_equal((2j*A)*[1,1,1], [12j,30j])
assert_equal((A+A)*[1,1,1], [12, 30])
assert_equal((A+A).rmatvec([1,1]), [10, 14, 18])
assert_equal((A+A).H.matvec([1,1]), [10, 14, 18])
assert_equal((A+A)*[[1],[1],[1]], [[12], [30]])
assert_equal((A+A).matmat([[1],[1],[1]]), [[12], [30]])
assert_equal((-A)*[1,1,1], [-6,-15])
assert_equal((-A)*[[1],[1],[1]], [[-6],[-15]])
assert_equal((A-A)*[1,1,1], [0,0])
assert_equal((A-A)*[[1],[1],[1]], [[0],[0]])
z = A+A
assert_(len(z.args) == 2 and z.args[0] is A and z.args[1] is A)
z = 2*A
assert_(len(z.args) == 2 and z.args[0] is A and z.args[1] == 2)
assert_(isinstance(A.matvec([1, 2, 3]), np.ndarray))
assert_(isinstance(A.matvec(np.array([[1],[2],[3]])), np.ndarray))
assert_(isinstance(A * np.array([1,2,3]), np.ndarray))
assert_(isinstance(A * np.array([[1],[2],[3]]), np.ndarray))
assert_(isinstance(A.dot(np.array([1,2,3])), np.ndarray))
assert_(isinstance(A.dot(np.array([[1],[2],[3]])), np.ndarray))
assert_(isinstance(A.matvec(np.matrix([[1],[2],[3]])), np.ndarray))
assert_(isinstance(A * np.matrix([[1],[2],[3]]), np.ndarray))
assert_(isinstance(A.dot(np.matrix([[1],[2],[3]])), np.ndarray))
assert_(isinstance(2*A, interface._ScaledLinearOperator))
assert_(isinstance(2j*A, interface._ScaledLinearOperator))
assert_(isinstance(A+A, interface._SumLinearOperator))
assert_(isinstance(-A, interface._ScaledLinearOperator))
assert_(isinstance(A-A, interface._SumLinearOperator))
assert_((2j*A).dtype == np.complex_)
assert_raises(ValueError, A.matvec, np.array([1,2]))
assert_raises(ValueError, A.matvec, np.array([1,2,3,4]))
assert_raises(ValueError, A.matvec, np.array([[1],[2]]))
assert_raises(ValueError, A.matvec, np.array([[1],[2],[3],[4]]))
assert_raises(ValueError, lambda: A*A)
assert_raises(ValueError, lambda: A**2)
for matvecsA, matvecsB in product(get_matvecs(self.A),
get_matvecs(self.B)):
A = interface.LinearOperator(**matvecsA)
B = interface.LinearOperator(**matvecsB)
assert_equal((A*B)*[1,1], [50,113])
assert_equal((A*B)*[[1],[1]], [[50],[113]])
assert_equal((A*B).matmat([[1],[1]]), [[50],[113]])
assert_equal((A*B).rmatvec([1,1]), [71,92])
assert_equal((A*B).H.matvec([1,1]), [71,92])
assert_(isinstance(A*B, interface._ProductLinearOperator))
assert_raises(ValueError, lambda: A+B)
assert_raises(ValueError, lambda: A**2)
z = A*B
assert_(len(z.args) == 2 and z.args[0] is A and z.args[1] is B)
for matvecsC in get_matvecs(self.C):
C = interface.LinearOperator(**matvecsC)
assert_equal((C**2)*[1,1], [17,37])
assert_equal((C**2).rmatvec([1,1]), [22,32])
assert_equal((C**2).H.matvec([1,1]), [22,32])
assert_equal((C**2).matmat([[1],[1]]), [[17],[37]])
assert_(isinstance(C**2, interface._PowerLinearOperator))
def test_matmul(self):
if not TEST_MATMUL:
raise nose.SkipTest("matmul is only tested in Python 3.5+")
D = {'shape': self.A.shape,
'matvec': lambda x: np.dot(self.A, x).reshape(self.A.shape[0]),
'rmatvec': lambda x: np.dot(self.A.T.conj(),
x).reshape(self.A.shape[1]),
'matmat': lambda x: np.dot(self.A, x)}
A = interface.LinearOperator(**D)
B = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
b = B[0]
assert_equal(operator.matmul(A, b), A * b)
assert_equal(operator.matmul(A, B), A * B)
assert_raises(ValueError, operator.matmul, A, 2)
assert_raises(ValueError, operator.matmul, 2, A)
class TestAsLinearOperator(TestCase):
def setUp(self):
self.cases = []
def make_cases(dtype):
self.cases.append(np.matrix([[1,2,3],[4,5,6]], dtype=dtype))
self.cases.append(np.array([[1,2,3],[4,5,6]], dtype=dtype))
self.cases.append(sparse.csr_matrix([[1,2,3],[4,5,6]], dtype=dtype))
# Test default implementations of _adjoint and _rmatvec, which
# refer to each other.
def mv(x, dtype):
y = np.array([1 * x[0] + 2 * x[1] + 3 * x[2],
4 * x[0] + 5 * x[1] + 6 * x[2]], dtype=dtype)
if len(x.shape) == 2:
y = y.reshape(-1, 1)
return y
def rmv(x, dtype):
return np.array([1 * x[0] + 4 * x[1],
2 * x[0] + 5 * x[1],
3 * x[0] + 6 * x[1]], dtype=dtype)
class BaseMatlike(interface.LinearOperator):
def __init__(self, dtype):
self.dtype = np.dtype(dtype)
self.shape = (2,3)
def _matvec(self, x):
return mv(x, self.dtype)
class HasRmatvec(BaseMatlike):
def _rmatvec(self,x):
return rmv(x, self.dtype)
class HasAdjoint(BaseMatlike):
def _adjoint(self):
shape = self.shape[1], self.shape[0]
matvec = partial(rmv, dtype=self.dtype)
rmatvec = partial(mv, dtype=self.dtype)
return interface.LinearOperator(matvec=matvec,
rmatvec=rmatvec,
dtype=self.dtype,
shape=shape)
self.cases.append(HasRmatvec(dtype))
self.cases.append(HasAdjoint(dtype))
make_cases('int32')
make_cases('float32')
make_cases('float64')
def test_basic(self):
for M in self.cases:
A = interface.aslinearoperator(M)
M,N = A.shape
assert_equal(A.matvec(np.array([1,2,3])), [14,32])
assert_equal(A.matvec(np.array([[1],[2],[3]])), [[14],[32]])
assert_equal(A * np.array([1,2,3]), [14,32])
assert_equal(A * np.array([[1],[2],[3]]), [[14],[32]])
assert_equal(A.rmatvec(np.array([1,2])), [9,12,15])
assert_equal(A.rmatvec(np.array([[1],[2]])), [[9],[12],[15]])
assert_equal(A.H.matvec(np.array([1,2])), [9,12,15])
assert_equal(A.H.matvec(np.array([[1],[2]])), [[9],[12],[15]])
assert_equal(
A.matmat(np.array([[1,4],[2,5],[3,6]])),
[[14,32],[32,77]])
assert_equal(A * np.array([[1,4],[2,5],[3,6]]), [[14,32],[32,77]])
if hasattr(M,'dtype'):
assert_equal(A.dtype, M.dtype)
def test_dot(self):
for M in self.cases:
A = interface.aslinearoperator(M)
M,N = A.shape
assert_equal(A.dot(np.array([1,2,3])), [14,32])
assert_equal(A.dot(np.array([[1],[2],[3]])), [[14],[32]])
assert_equal(
A.dot(np.array([[1,4],[2,5],[3,6]])),
[[14,32],[32,77]])
def test_repr():
A = interface.LinearOperator(shape=(1, 1), matvec=lambda x: 1)
repr_A = repr(A)
assert_('unspecified dtype' not in repr_A, repr_A)
def test_identity():
ident = interface.IdentityOperator((3, 3))
assert_equal(ident * [1, 2, 3], [1, 2, 3])
assert_equal(ident.dot(np.arange(9).reshape(3, 3)).ravel(), np.arange(9))
assert_raises(ValueError, ident.matvec, [1, 2, 3, 4])
def test_attributes():
A = interface.aslinearoperator(np.arange(16).reshape(4, 4))
def always_four_ones(x):
x = np.asarray(x)
assert_(x.shape == (3,) or x.shape == (3, 1))
return np.ones(4)
B = interface.LinearOperator(shape=(4, 3), matvec=always_four_ones)
for op in [A, B, A * B, A.H, A + A, B + B, A ** 4]:
assert_(hasattr(op, "dtype"))
assert_(hasattr(op, "shape"))
assert_(hasattr(op, "_matvec"))
def matvec(x):
""" Needed for test_pickle as local functions are not pickleable """
return np.zeros(3)
def test_pickle():
import pickle
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
A = interface.LinearOperator((3, 3), matvec)
s = pickle.dumps(A, protocol=protocol)
B = pickle.loads(s)
for k in A.__dict__:
assert_equal(getattr(A, k), getattr(B, k))
def test_inheritance():
class Empty(interface.LinearOperator):
pass
assert_raises(TypeError, Empty)
class Identity(interface.LinearOperator):
def __init__(self, n):
super(Identity, self).__init__(dtype=None, shape=(n, n))
def _matvec(self, x):
return x
id3 = Identity(3)
assert_equal(id3.matvec([1, 2, 3]), [1, 2, 3])
assert_raises(NotImplementedError, id3.rmatvec, [4, 5, 6])
class MatmatOnly(interface.LinearOperator):
def __init__(self, A):
super(MatmatOnly, self).__init__(A.dtype, A.shape)
self.A = A
def _matmat(self, x):
return self.A.dot(x)
mm = MatmatOnly(np.random.randn(5, 3))
assert_equal(mm.matvec(np.random.randn(3)).shape, (5,))
def test_dtypes_of_operator_sum():
# gh-6078
mat_complex = np.random.rand(2,2) + 1j * np.random.rand(2,2)
mat_real = np.random.rand(2,2)
complex_operator = interface.aslinearoperator(mat_complex)
real_operator = interface.aslinearoperator(mat_real)
sum_complex = complex_operator + complex_operator
sum_real = real_operator + real_operator
assert_equal(sum_real.dtype, np.float64)
assert_equal(sum_complex.dtype, np.complex128)
| bsd-3-clause | -1,209,716,646,552,355,300 | 36.09621 | 80 | 0.493634 | false |
mollstam/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Python-2.7.10/Tools/framer/framer/member.py | 50 | 1933 | from framer import template
from framer.util import cstring, unindent
T_SHORT = "T_SHORT"
T_INT = "T_INT"
T_LONG = "T_LONG"
T_FLOAT = "T_FLOAT"
T_DOUBLE = "T_DOUBLE"
T_STRING = "T_STRING"
T_OBJECT = "T_OBJECT"
T_CHAR = "T_CHAR"
T_BYTE = "T_BYTE"
T_UBYTE = "T_UBYTE"
T_UINT = "T_UINT"
T_ULONG = "T_ULONG"
T_STRING_INPLACE = "T_STRING_INPLACE"
T_OBJECT_EX = "T_OBJECT_EX"
RO = READONLY = "READONLY"
READ_RESTRICTED = "READ_RESTRICTED"
WRITE_RESTRICTED = "WRITE_RESTRICTED"
RESTRICT = "RESTRICTED"
c2t = {"int" : T_INT,
"unsigned int" : T_UINT,
"long" : T_LONG,
"unsigned long" : T_LONG,
"float" : T_FLOAT,
"double" : T_DOUBLE,
"char *" : T_CHAR,
"PyObject *" : T_OBJECT,
}
class member(object):
def __init__(self, cname=None, type=None, flags=None, doc=None):
self.type = type
self.flags = flags
self.cname = cname
self.doc = doc
self.name = None
self.struct = None
def register(self, name, struct):
self.name = name
self.struct = struct
self.initvars()
def initvars(self):
v = self.vars = {}
v["PythonName"] = self.name
if self.cname is not None:
v["CName"] = self.cname
else:
v["CName"] = self.name
v["Flags"] = self.flags or "0"
v["Type"] = self.get_type()
if self.doc is not None:
v["Docstring"] = cstring(unindent(self.doc))
v["StructName"] = self.struct.name
def get_type(self):
"""Deduce type code from struct specification if not defined"""
if self.type is not None:
return self.type
ctype = self.struct.get_type(self.name)
return c2t[ctype]
def dump(self, f):
if self.doc is None:
print >> f, template.memberdef_def % self.vars
else:
print >> f, template.memberdef_def_doc % self.vars
| mit | -8,502,403,472,369,238,000 | 25.479452 | 71 | 0.561821 | false |
vedujoshi/tempest | tempest/scenario/test_minimum_basic.py | 1 | 7041 | # Copyright 2013 NEC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from tempest.common import custom_matchers
from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions
from tempest.scenario import manager
from tempest import test
CONF = config.CONF
class TestMinimumBasicScenario(manager.ScenarioTest):
"""This is a basic minimum scenario test.
This test below:
* across the multiple components
* as a regular user
* with and without optional parameters
* check command outputs
Steps:
1. Create image
2. Create keypair
3. Boot instance with keypair and get list of instances
4. Create volume and show list of volumes
5. Attach volume to instance and getlist of volumes
6. Add IP to instance
7. Create and add security group to instance
8. Check SSH connection to instance
9. Reboot instance
10. Check SSH connection to instance after reboot
"""
def nova_show(self, server):
got_server = (self.servers_client.show_server(server['id'])
['server'])
excluded_keys = ['OS-EXT-AZ:availability_zone']
# Exclude these keys because of LP:#1486475
excluded_keys.extend(['OS-EXT-STS:power_state', 'updated'])
self.assertThat(
server, custom_matchers.MatchesDictExceptForKeys(
got_server, excluded_keys=excluded_keys))
def cinder_show(self, volume):
got_volume = self.volumes_client.show_volume(volume['id'])['volume']
self.assertEqual(volume, got_volume)
def nova_reboot(self, server):
self.servers_client.reboot_server(server['id'], type='SOFT')
waiters.wait_for_server_status(self.servers_client,
server['id'], 'ACTIVE')
def check_disks(self):
# NOTE(andreaf) The device name may be different on different guest OS
disks = self.linux_client.get_disks()
self.assertEqual(1, disks.count(CONF.compute.volume_device_name))
def create_and_add_security_group_to_server(self, server):
secgroup = self._create_security_group()
self.servers_client.add_security_group(server['id'],
name=secgroup['name'])
self.addCleanup(self.servers_client.remove_security_group,
server['id'], name=secgroup['name'])
def wait_for_secgroup_add():
body = (self.servers_client.show_server(server['id'])
['server'])
return {'name': secgroup['name']} in body['security_groups']
if not test_utils.call_until_true(wait_for_secgroup_add,
CONF.compute.build_timeout,
CONF.compute.build_interval):
msg = ('Timed out waiting for adding security group %s to server '
'%s' % (secgroup['id'], server['id']))
raise exceptions.TimeoutException(msg)
def _get_floating_ip_in_server_addresses(self, floating_ip, server):
for addresses in server['addresses'].values():
for address in addresses:
if (address['OS-EXT-IPS:type'] == 'floating' and
address['addr'] == floating_ip['ip']):
return address
@decorators.idempotent_id('bdbb5441-9204-419d-a225-b4fdbfb1a1a8')
@testtools.skipUnless(CONF.network.public_network_id,
'The public_network_id option must be specified.')
@testtools.skipUnless(CONF.network_feature_enabled.floating_ips,
'Floating ips are not available')
@test.services('compute', 'volume', 'image', 'network')
def test_minimum_basic_scenario(self):
image = self.glance_image_create()
keypair = self.create_keypair()
server = self.create_server(image_id=image, key_name=keypair['name'])
servers = self.servers_client.list_servers()['servers']
self.assertIn(server['id'], [x['id'] for x in servers])
self.nova_show(server)
volume = self.create_volume()
volumes = self.volumes_client.list_volumes()['volumes']
self.assertIn(volume['id'], [x['id'] for x in volumes])
self.cinder_show(volume)
volume = self.nova_volume_attach(server, volume)
self.addCleanup(self.nova_volume_detach, server, volume)
self.cinder_show(volume)
floating_ip = self.create_floating_ip(server)
# fetch the server again to make sure the addresses were refreshed
# after associating the floating IP
server = self.servers_client.show_server(server['id'])['server']
address = self._get_floating_ip_in_server_addresses(
floating_ip, server)
self.assertIsNotNone(
address,
"Failed to find floating IP '%s' in server addresses: %s" %
(floating_ip['ip'], server['addresses']))
self.create_and_add_security_group_to_server(server)
# check that we can SSH to the server before reboot
self.linux_client = self.get_remote_client(
floating_ip['ip'], private_key=keypair['private_key'],
server=server)
self.nova_reboot(server)
# check that we can SSH to the server after reboot
# (both connections are part of the scenario)
self.linux_client = self.get_remote_client(
floating_ip['ip'], private_key=keypair['private_key'],
server=server)
self.check_disks()
# delete the floating IP, this should refresh the server addresses
self.compute_floating_ips_client.delete_floating_ip(floating_ip['id'])
def is_floating_ip_detached_from_server():
server_info = self.servers_client.show_server(
server['id'])['server']
address = self._get_floating_ip_in_server_addresses(
floating_ip, server_info)
return (not address)
if not test_utils.call_until_true(
is_floating_ip_detached_from_server,
CONF.compute.build_timeout,
CONF.compute.build_interval):
msg = ("Floating IP '%s' should not be in server addresses: %s" %
(floating_ip['ip'], server['addresses']))
raise exceptions.TimeoutException(msg)
| apache-2.0 | -234,094,730,467,651,780 | 39.699422 | 78 | 0.626758 | false |
UnrememberMe/pants | tests/python/pants_test/backend/project_info/tasks/resolve_jars_test_mixin.py | 17 | 3782 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from textwrap import dedent
from pants.util.contextutil import temporary_dir
class ResolveJarsTestMixin(object):
"""Mixin for evaluating tasks which resolve their own source and javadoc jars (such as Export)."""
def evaluate_subtask(self, targets, workdir, load_extra_confs, extra_args, expected_jars):
"""Evaluate the underlying task with the given target specs.
:param targets: the list of targets.
:param string workdir: the working directory to execute in.
:param bool load_extra_confs: whether to attempt to download sources and javadocs.
:param list extra_args: extra args to pass to the task.
:param list expected_jars: list of jars that were expected to be resolved.
"""
raise NotImplementedError()
def _test_jar_lib_with_url(self, load_all):
with self.temporary_workdir() as workdir:
with self.temporary_sourcedir() as source_dir:
with temporary_dir() as dist_dir:
os.makedirs(os.path.join(source_dir, 'src'))
with open(os.path.join(source_dir, 'src', 'BUILD.one'), 'w+') as f:
f.write(dedent("""
jvm_binary(name='synthetic',
source='Main.java',
)
"""))
with open(os.path.join(source_dir, 'src', 'Main.java'), 'w+') as f:
f.write(dedent("""
public class Main {
public static void main(String[] args) {
System.out.println("Hello.");
}
}
"""))
with open(os.path.join(source_dir, 'src', 'Foo.java'), 'w+') as f:
f.write(dedent("""
public class Foo {
public static void main(String[] args) {
Main.main(args);
}
}
"""))
binary_target = '{}:synthetic'.format(os.path.join(source_dir, 'src'))
pants_run = self.run_pants_with_workdir(['binary', binary_target,
'--pants-distdir={}'.format(dist_dir)], workdir)
self.assert_success(pants_run)
jar_path = os.path.realpath(os.path.join(dist_dir, 'synthetic.jar'))
self.assertTrue(os.path.exists(jar_path), 'Synthetic binary was not created!')
jar_url = 'file://{}'.format(os.path.abspath(jar_path))
with open(os.path.join(source_dir, 'src', 'BUILD.two'), 'w+') as f:
f.write(dedent("""
jar_library(name='lib_with_url',
jars=[
jar(org='org.pantsbuild', name='synthetic-test-jar', rev='1.2.3',
url='{jar_url}')
],
)
java_library(name='src',
sources=['Foo.java'],
dependencies=[':lib_with_url'],
)
""").format(jar_url=jar_url))
spec_names = ['lib_with_url', 'src']
targets = ['{0}:{1}'.format(os.path.join(source_dir, 'src'), name) for name in spec_names]
with temporary_dir() as ivy_temp_dir:
extra_args = ['--ivy-cache-dir={}'.format(ivy_temp_dir)]
self.evaluate_subtask(targets, workdir, load_all, extra_args=extra_args,
expected_jars=['org.pantsbuild:synthetic-test-jar:1.2.3'])
def test_jar_lib_with_url_resolve_default(self):
self._test_jar_lib_with_url(False)
def test_jar_lib_with_url_resolve_all(self):
self._test_jar_lib_with_url(True)
| apache-2.0 | 5,588,423,080,037,379,000 | 40.108696 | 100 | 0.565045 | false |
MattsFleaMarket/python-for-android | python3-alpha/python3-src/PC/VS8.0/build_ssl.py | 48 | 10103 | # Script for building the _ssl and _hashlib modules for Windows.
# Uses Perl to setup the OpenSSL environment correctly
# and build OpenSSL, then invokes a simple nmake session
# for the actual _ssl.pyd and _hashlib.pyd DLLs.
# THEORETICALLY, you can:
# * Unpack the latest SSL release one level above your main Python source
# directory. It is likely you will already find the zlib library and
# any other external packages there.
# * Install ActivePerl and ensure it is somewhere on your path.
# * Run this script from the PC/VS8.0 directory.
#
# it should configure and build SSL, then build the _ssl and _hashlib
# Python extensions without intervention.
# Modified by Christian Heimes
# Now this script supports pre-generated makefiles and assembly files.
# Developers don't need an installation of Perl anymore to build Python. A svn
# checkout from our svn repository is enough.
#
# In Order to create the files in the case of an update you still need Perl.
# Run build_ssl in this order:
# python.exe build_ssl.py Release x64
# python.exe build_ssl.py Release Win32
import os, sys, re, shutil
# Find all "foo.exe" files on the PATH.
def find_all_on_path(filename, extras = None):
entries = os.environ["PATH"].split(os.pathsep)
ret = []
for p in entries:
fname = os.path.abspath(os.path.join(p, filename))
if os.path.isfile(fname) and fname not in ret:
ret.append(fname)
if extras:
for p in extras:
fname = os.path.abspath(os.path.join(p, filename))
if os.path.isfile(fname) and fname not in ret:
ret.append(fname)
return ret
# Find a suitable Perl installation for OpenSSL.
# cygwin perl does *not* work. ActivePerl does.
# Being a Perl dummy, the simplest way I can check is if the "Win32" package
# is available.
def find_working_perl(perls):
for perl in perls:
fh = os.popen('"%s" -e "use Win32;"' % perl)
fh.read()
rc = fh.close()
if rc:
continue
return perl
print("Can not find a suitable PERL:")
if perls:
print(" the following perl interpreters were found:")
for p in perls:
print(" ", p)
print(" None of these versions appear suitable for building OpenSSL")
else:
print(" NO perl interpreters were found on this machine at all!")
print(" Please install ActivePerl and ensure it appears on your path")
return None
# Locate the best SSL directory given a few roots to look into.
def find_best_ssl_dir(sources):
candidates = []
for s in sources:
try:
# note: do not abspath s; the build will fail if any
# higher up directory name has spaces in it.
fnames = os.listdir(s)
except os.error:
fnames = []
for fname in fnames:
fqn = os.path.join(s, fname)
if os.path.isdir(fqn) and fname.startswith("openssl-"):
candidates.append(fqn)
# Now we have all the candidates, locate the best.
best_parts = []
best_name = None
for c in candidates:
parts = re.split("[.-]", os.path.basename(c))[1:]
# eg - openssl-0.9.7-beta1 - ignore all "beta" or any other qualifiers
if len(parts) >= 4:
continue
if parts > best_parts:
best_parts = parts
best_name = c
if best_name is not None:
print("Found an SSL directory at '%s'" % (best_name,))
else:
print("Could not find an SSL directory in '%s'" % (sources,))
sys.stdout.flush()
return best_name
def create_makefile64(makefile, m32):
"""Create and fix makefile for 64bit
Replace 32 with 64bit directories
"""
if not os.path.isfile(m32):
return
with open(m32) as fin:
with open(makefile, 'w') as fout:
for line in fin:
line = line.replace("=tmp32", "=tmp64")
line = line.replace("=out32", "=out64")
line = line.replace("=inc32", "=inc64")
# force 64 bit machine
line = line.replace("MKLIB=lib", "MKLIB=lib /MACHINE:X64")
line = line.replace("LFLAGS=", "LFLAGS=/MACHINE:X64 ")
# don't link against the lib on 64bit systems
line = line.replace("bufferoverflowu.lib", "")
fout.write(line)
os.unlink(m32)
def fix_makefile(makefile):
"""Fix some stuff in all makefiles
"""
if not os.path.isfile(makefile):
return
with open(makefile) as fin:
lines = fin.readlines()
with open(makefile, 'w') as fout:
for line in lines:
if line.startswith("PERL="):
continue
if line.startswith("CP="):
line = "CP=copy\n"
if line.startswith("MKDIR="):
line = "MKDIR=mkdir\n"
if line.startswith("CFLAG="):
line = line.strip()
for algo in ("RC5", "MDC2", "IDEA"):
noalgo = " -DOPENSSL_NO_%s" % algo
if noalgo not in line:
line = line + noalgo
line = line + '\n'
fout.write(line)
def run_configure(configure, do_script):
print("perl Configure "+configure+" no-idea no-mdc2")
os.system("perl Configure "+configure+" no-idea no-mdc2")
print(do_script)
os.system(do_script)
def cmp(f1, f2):
bufsize = 1024 * 8
with open(f1, 'rb') as fp1, open(f2, 'rb') as fp2:
while True:
b1 = fp1.read(bufsize)
b2 = fp2.read(bufsize)
if b1 != b2:
return False
if not b1:
return True
def copy(src, dst):
if os.path.isfile(dst) and cmp(src, dst):
return
shutil.copy(src, dst)
def main():
build_all = "-a" in sys.argv
if sys.argv[1] == "Release":
debug = False
elif sys.argv[1] == "Debug":
debug = True
else:
raise ValueError(str(sys.argv))
if sys.argv[2] == "Win32":
arch = "x86"
configure = "VC-WIN32"
do_script = "ms\\do_nasm"
makefile="ms\\nt.mak"
m32 = makefile
dirsuffix = "32"
elif sys.argv[2] == "x64":
arch="amd64"
configure = "VC-WIN64A"
do_script = "ms\\do_win64a"
makefile = "ms\\nt64.mak"
m32 = makefile.replace('64', '')
dirsuffix = "64"
#os.environ["VSEXTCOMP_USECL"] = "MS_OPTERON"
else:
raise ValueError(str(sys.argv))
make_flags = ""
if build_all:
make_flags = "-a"
# perl should be on the path, but we also look in "\perl" and "c:\\perl"
# as "well known" locations
perls = find_all_on_path("perl.exe", ["\\perl\\bin", "C:\\perl\\bin"])
perl = find_working_perl(perls)
if perl:
print("Found a working perl at '%s'" % (perl,))
else:
print("No Perl installation was found. Existing Makefiles are used.")
sys.stdout.flush()
# Look for SSL 3 levels up from PC/VS8.0 - ie, same place zlib etc all live.
ssl_dir = find_best_ssl_dir(("..\\..\\..",))
if ssl_dir is None:
sys.exit(1)
old_cd = os.getcwd()
try:
os.chdir(ssl_dir)
# rebuild makefile when we do the role over from 32 to 64 build
if arch == "amd64" and os.path.isfile(m32) and not os.path.isfile(makefile):
os.unlink(m32)
# If the ssl makefiles do not exist, we invoke Perl to generate them.
# Due to a bug in this script, the makefile sometimes ended up empty
# Force a regeneration if it is.
if not os.path.isfile(makefile) or os.path.getsize(makefile)==0:
if perl is None:
print("Perl is required to build the makefiles!")
sys.exit(1)
print("Creating the makefiles...")
sys.stdout.flush()
# Put our working Perl at the front of our path
os.environ["PATH"] = os.path.dirname(perl) + \
os.pathsep + \
os.environ["PATH"]
run_configure(configure, do_script)
if debug:
print("OpenSSL debug builds aren't supported.")
#if arch=="x86" and debug:
# # the do_masm script in openssl doesn't generate a debug
# # build makefile so we generate it here:
# os.system("perl util\mk1mf.pl debug "+configure+" >"+makefile)
if arch == "amd64":
create_makefile64(makefile, m32)
fix_makefile(makefile)
copy(r"crypto\buildinf.h", r"crypto\buildinf_%s.h" % arch)
copy(r"crypto\opensslconf.h", r"crypto\opensslconf_%s.h" % arch)
# If the assembler files don't exist in tmpXX, copy them there
if perl is None and os.path.exists("asm"+dirsuffix):
if not os.path.exists("tmp"+dirsuffix):
os.mkdir("tmp"+dirsuffix)
for f in os.listdir("asm"+dirsuffix):
if not f.endswith(".asm"): continue
if os.path.isfile(r"tmp%s\%s" % (dirsuffix, f)): continue
shutil.copy(r"asm%s\%s" % (dirsuffix, f), "tmp"+dirsuffix)
# Now run make.
if arch == "amd64":
rc = os.system("ml64 -c -Foms\\uptable.obj ms\\uptable.asm")
if rc:
print("ml64 assembler has failed.")
sys.exit(rc)
copy(r"crypto\buildinf_%s.h" % arch, r"crypto\buildinf.h")
copy(r"crypto\opensslconf_%s.h" % arch, r"crypto\opensslconf.h")
#makeCommand = "nmake /nologo PERL=\"%s\" -f \"%s\"" %(perl, makefile)
makeCommand = "nmake /nologo -f \"%s\"" % makefile
print("Executing ssl makefiles:", makeCommand)
sys.stdout.flush()
rc = os.system(makeCommand)
if rc:
print("Executing "+makefile+" failed")
print(rc)
sys.exit(rc)
finally:
os.chdir(old_cd)
sys.exit(rc)
if __name__=='__main__':
main()
| apache-2.0 | -5,549,738,077,198,014,000 | 35.472924 | 84 | 0.566465 | false |
vmax-feihu/hue | desktop/core/ext-py/Pygments-1.3.1/pygments/formatters/other.py | 75 | 3857 | # -*- coding: utf-8 -*-
"""
pygments.formatters.other
~~~~~~~~~~~~~~~~~~~~~~~~~
Other formatters: NullFormatter, RawTokenFormatter.
:copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.formatter import Formatter
from pygments.util import OptionError, get_choice_opt, b
from pygments.token import Token
from pygments.console import colorize
__all__ = ['NullFormatter', 'RawTokenFormatter']
class NullFormatter(Formatter):
"""
Output the text unchanged without any formatting.
"""
name = 'Text only'
aliases = ['text', 'null']
filenames = ['*.txt']
def format(self, tokensource, outfile):
enc = self.encoding
for ttype, value in tokensource:
if enc:
outfile.write(value.encode(enc))
else:
outfile.write(value)
class RawTokenFormatter(Formatter):
r"""
Format tokens as a raw representation for storing token streams.
The format is ``tokentype<TAB>repr(tokenstring)\n``. The output can later
be converted to a token stream with the `RawTokenLexer`, described in the
`lexer list <lexers.txt>`_.
Only two options are accepted:
`compress`
If set to ``'gz'`` or ``'bz2'``, compress the output with the given
compression algorithm after encoding (default: ``''``).
`error_color`
If set to a color name, highlight error tokens using that color. If
set but with no value, defaults to ``'red'``.
*New in Pygments 0.11.*
"""
name = 'Raw tokens'
aliases = ['raw', 'tokens']
filenames = ['*.raw']
unicodeoutput = False
def __init__(self, **options):
Formatter.__init__(self, **options)
if self.encoding:
raise OptionError('the raw formatter does not support the '
'encoding option')
self.encoding = 'ascii' # let pygments.format() do the right thing
self.compress = get_choice_opt(options, 'compress',
['', 'none', 'gz', 'bz2'], '')
self.error_color = options.get('error_color', None)
if self.error_color is True:
self.error_color = 'red'
if self.error_color is not None:
try:
colorize(self.error_color, '')
except KeyError:
raise ValueError("Invalid color %r specified" %
self.error_color)
def format(self, tokensource, outfile):
try:
outfile.write(b(''))
except TypeError:
raise TypeError('The raw tokens formatter needs a binary '
'output file')
if self.compress == 'gz':
import gzip
outfile = gzip.GzipFile('', 'wb', 9, outfile)
def write(text):
outfile.write(text.encode())
flush = outfile.flush
elif self.compress == 'bz2':
import bz2
compressor = bz2.BZ2Compressor(9)
def write(text):
outfile.write(compressor.compress(text.encode()))
def flush():
outfile.write(compressor.flush())
outfile.flush()
else:
def write(text):
outfile.write(text.encode())
flush = outfile.flush
lasttype = None
lastval = u''
if self.error_color:
for ttype, value in tokensource:
line = "%s\t%r\n" % (ttype, value)
if ttype is Token.Error:
write(colorize(self.error_color, line))
else:
write(line)
else:
for ttype, value in tokensource:
write("%s\t%r\n" % (ttype, value))
flush()
| apache-2.0 | 6,905,584,914,308,392,000 | 31.965812 | 77 | 0.547057 | false |
thurt/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Lib/pty.py | 109 | 4869 | """Pseudo terminal utilities."""
# Bugs: No signal handling. Doesn't set slave termios and window size.
# Only tested on Linux.
# See: W. Richard Stevens. 1992. Advanced Programming in the
# UNIX Environment. Chapter 19.
# Author: Steen Lumholt -- with additions by Guido.
from select import select
import os
import tty
__all__ = ["openpty","fork","spawn"]
STDIN_FILENO = 0
STDOUT_FILENO = 1
STDERR_FILENO = 2
CHILD = 0
def openpty():
"""openpty() -> (master_fd, slave_fd)
Open a pty master/slave pair, using os.openpty() if possible."""
try:
return os.openpty()
except (AttributeError, OSError):
pass
master_fd, slave_name = _open_terminal()
slave_fd = slave_open(slave_name)
return master_fd, slave_fd
def master_open():
"""master_open() -> (master_fd, slave_name)
Open a pty master and return the fd, and the filename of the slave end.
Deprecated, use openpty() instead."""
try:
master_fd, slave_fd = os.openpty()
except (AttributeError, OSError):
pass
else:
slave_name = os.ttyname(slave_fd)
os.close(slave_fd)
return master_fd, slave_name
return _open_terminal()
def _open_terminal():
"""Open pty master and return (master_fd, tty_name).
SGI and generic BSD version, for when openpty() fails."""
try:
import sgi
except ImportError:
pass
else:
try:
tty_name, master_fd = sgi._getpty(os.O_RDWR, 0666, 0)
except IOError, msg:
raise os.error, msg
return master_fd, tty_name
for x in 'pqrstuvwxyzPQRST':
for y in '0123456789abcdef':
pty_name = '/dev/pty' + x + y
try:
fd = os.open(pty_name, os.O_RDWR)
except os.error:
continue
return (fd, '/dev/tty' + x + y)
raise os.error, 'out of pty devices'
def slave_open(tty_name):
"""slave_open(tty_name) -> slave_fd
Open the pty slave and acquire the controlling terminal, returning
opened filedescriptor.
Deprecated, use openpty() instead."""
result = os.open(tty_name, os.O_RDWR)
try:
from fcntl import ioctl, I_PUSH
except ImportError:
return result
try:
ioctl(result, I_PUSH, "ptem")
ioctl(result, I_PUSH, "ldterm")
except IOError:
pass
return result
def fork():
"""fork() -> (pid, master_fd)
Fork and make the child a session leader with a controlling terminal."""
try:
pid, fd = os.forkpty()
except (AttributeError, OSError):
pass
else:
if pid == CHILD:
try:
os.setsid()
except OSError:
# os.forkpty() already set us session leader
pass
return pid, fd
master_fd, slave_fd = openpty()
pid = os.fork()
if pid == CHILD:
# Establish a new session.
os.setsid()
os.close(master_fd)
# Slave becomes stdin/stdout/stderr of child.
os.dup2(slave_fd, STDIN_FILENO)
os.dup2(slave_fd, STDOUT_FILENO)
os.dup2(slave_fd, STDERR_FILENO)
if (slave_fd > STDERR_FILENO):
os.close (slave_fd)
# Explicitly open the tty to make it become a controlling tty.
tmp_fd = os.open(os.ttyname(STDOUT_FILENO), os.O_RDWR)
os.close(tmp_fd)
else:
os.close(slave_fd)
# Parent and child process.
return pid, master_fd
def _writen(fd, data):
"""Write all the data to a descriptor."""
while data != '':
n = os.write(fd, data)
data = data[n:]
def _read(fd):
"""Default read function."""
return os.read(fd, 1024)
def _copy(master_fd, master_read=_read, stdin_read=_read):
"""Parent copy loop.
Copies
pty master -> standard output (master_read)
standard input -> pty master (stdin_read)"""
while 1:
rfds, wfds, xfds = select(
[master_fd, STDIN_FILENO], [], [])
if master_fd in rfds:
data = master_read(master_fd)
os.write(STDOUT_FILENO, data)
if STDIN_FILENO in rfds:
data = stdin_read(STDIN_FILENO)
_writen(master_fd, data)
def spawn(argv, master_read=_read, stdin_read=_read):
"""Create a spawned process."""
if type(argv) == type(''):
argv = (argv,)
pid, master_fd = fork()
if pid == CHILD:
os.execlp(argv[0], *argv)
try:
mode = tty.tcgetattr(STDIN_FILENO)
tty.setraw(STDIN_FILENO)
restore = 1
except tty.error: # This is the same as termios.error
restore = 0
try:
_copy(master_fd, master_read, stdin_read)
except (IOError, OSError):
if restore:
tty.tcsetattr(STDIN_FILENO, tty.TCSAFLUSH, mode)
os.close(master_fd)
| apache-2.0 | -7,293,037,869,893,835,000 | 26.982759 | 76 | 0.575478 | false |
jvrsantacruz/XlsxWriter | xlsxwriter/test/comparison/test_textbox16.py | 8 | 1117 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, [email protected]
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'textbox16.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with textbox(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_textbox('E9', 'This is some text',
{'align': {'vertical': 'middle'}})
workbook.close()
self.assertExcelEqual()
| bsd-2-clause | -2,977,649,407,571,888,600 | 25.595238 | 79 | 0.573859 | false |
mahabs/nitro | nssrc/com/citrix/netscaler/nitro/resource/config/aaa/aaasession.py | 1 | 8759 | #
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class aaasession(base_resource) :
""" Configuration for active connection resource. """
def __init__(self) :
self._username = ""
self._groupname = ""
self._iip = ""
self._netmask = ""
self._all = False
self._publicip = ""
self._publicport = 0
self._ipaddress = ""
self._port = 0
self._privateip = ""
self._privateport = 0
self._destip = ""
self._destport = 0
self._intranetip = ""
self._peid = 0
self.___count = 0
@property
def username(self) :
"""Name of the AAA user.<br/>Minimum length = 1.
"""
try :
return self._username
except Exception as e:
raise e
@username.setter
def username(self, username) :
"""Name of the AAA user.<br/>Minimum length = 1
"""
try :
self._username = username
except Exception as e:
raise e
@property
def groupname(self) :
"""Name of the AAA group.<br/>Minimum length = 1.
"""
try :
return self._groupname
except Exception as e:
raise e
@groupname.setter
def groupname(self, groupname) :
"""Name of the AAA group.<br/>Minimum length = 1
"""
try :
self._groupname = groupname
except Exception as e:
raise e
@property
def iip(self) :
"""IP address or the first address in the intranet IP range.<br/>Minimum length = 1.
"""
try :
return self._iip
except Exception as e:
raise e
@iip.setter
def iip(self, iip) :
"""IP address or the first address in the intranet IP range.<br/>Minimum length = 1
"""
try :
self._iip = iip
except Exception as e:
raise e
@property
def netmask(self) :
"""Subnet mask for the intranet IP range.<br/>Minimum length = 1.
"""
try :
return self._netmask
except Exception as e:
raise e
@netmask.setter
def netmask(self, netmask) :
"""Subnet mask for the intranet IP range.<br/>Minimum length = 1
"""
try :
self._netmask = netmask
except Exception as e:
raise e
@property
def all(self) :
"""Terminate all active AAA-TM/VPN sessions.
"""
try :
return self._all
except Exception as e:
raise e
@all.setter
def all(self, all) :
"""Terminate all active AAA-TM/VPN sessions.
"""
try :
self._all = all
except Exception as e:
raise e
@property
def publicip(self) :
"""Client's public IP address.
"""
try :
return self._publicip
except Exception as e:
raise e
@property
def publicport(self) :
"""Client's public port.<br/>Range 1 - 65535.
"""
try :
return self._publicport
except Exception as e:
raise e
@property
def ipaddress(self) :
"""NetScaler's IP address.
"""
try :
return self._ipaddress
except Exception as e:
raise e
@property
def port(self) :
"""NetScaler's port.<br/>Range 1 - 65535.
"""
try :
return self._port
except Exception as e:
raise e
@property
def privateip(self) :
"""Client's private/mapped IP address.
"""
try :
return self._privateip
except Exception as e:
raise e
@property
def privateport(self) :
"""Client's private/mapped port.<br/>Range 1 - 65535.
"""
try :
return self._privateport
except Exception as e:
raise e
@property
def destip(self) :
"""Destination IP address.
"""
try :
return self._destip
except Exception as e:
raise e
@property
def destport(self) :
"""Destination port.<br/>Range 1 - 65535.
"""
try :
return self._destport
except Exception as e:
raise e
@property
def intranetip(self) :
"""Specifies the Intranet IP.
"""
try :
return self._intranetip
except Exception as e:
raise e
@property
def peid(self) :
"""Core id of the session owner.
"""
try :
return self._peid
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(aaasession_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.aaasession
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
return None
except Exception as e :
raise e
@classmethod
def kill(cls, client, resource) :
""" Use this API to kill aaasession.
"""
try :
if type(resource) is not list :
killresource = aaasession()
killresource.username = resource.username
killresource.groupname = resource.groupname
killresource.iip = resource.iip
killresource.netmask = resource.netmask
killresource.all = resource.all
return killresource.perform_operation(client,"kill")
else :
if (resource and len(resource) > 0) :
killresources = [ aaasession() for _ in range(len(resource))]
for i in range(len(resource)) :
killresources[i].username = resource[i].username
killresources[i].groupname = resource[i].groupname
killresources[i].iip = resource[i].iip
killresources[i].netmask = resource[i].netmask
killresources[i].all = resource[i].all
result = cls.perform_operation_bulk_request(client, killresources,"kill")
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
""" Use this API to fetch all the aaasession resources that are configured on netscaler.
"""
try :
if not name :
obj = aaasession()
response = obj.get_resources(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_args(cls, client, args) :
""" Use this API to fetch all the aaasession resources that are configured on netscaler.
# This uses aaasession_args which is a way to provide additional arguments while fetching the resources.
"""
try :
obj = aaasession()
option_ = options()
option_.args = nitro_util.object_to_string_withoutquotes(args)
response = obj.get_resources(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
""" Use this API to fetch filtered set of aaasession resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = aaasession()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
""" Use this API to count the aaasession resources configured on NetScaler.
"""
try :
obj = aaasession()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
""" Use this API to count filtered the set of aaasession resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = aaasession()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class aaasession_response(base_response) :
def __init__(self, length=1) :
self.aaasession = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.aaasession = [aaasession() for _ in range(length)]
| apache-2.0 | -1,915,675,048,806,550,000 | 23.196133 | 112 | 0.669483 | false |
pschmitt/home-assistant | homeassistant/components/verisure/alarm_control_panel.py | 9 | 3404 | """Support for Verisure alarm control panels."""
import logging
from time import sleep
import homeassistant.components.alarm_control_panel as alarm
from homeassistant.components.alarm_control_panel.const import (
SUPPORT_ALARM_ARM_AWAY,
SUPPORT_ALARM_ARM_HOME,
)
from homeassistant.const import (
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_DISARMED,
)
from . import CONF_ALARM, CONF_CODE_DIGITS, CONF_GIID, HUB as hub
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Verisure platform."""
alarms = []
if int(hub.config.get(CONF_ALARM, 1)):
hub.update_overview()
alarms.append(VerisureAlarm())
add_entities(alarms)
def set_arm_state(state, code=None):
"""Send set arm state command."""
transaction_id = hub.session.set_arm_state(code, state)[
"armStateChangeTransactionId"
]
_LOGGER.info("verisure set arm state %s", state)
transaction = {}
while "result" not in transaction:
sleep(0.5)
transaction = hub.session.get_arm_state_transaction(transaction_id)
hub.update_overview(no_throttle=True)
class VerisureAlarm(alarm.AlarmControlPanelEntity):
"""Representation of a Verisure alarm status."""
def __init__(self):
"""Initialize the Verisure alarm panel."""
self._state = None
self._digits = hub.config.get(CONF_CODE_DIGITS)
self._changed_by = None
@property
def name(self):
"""Return the name of the device."""
giid = hub.config.get(CONF_GIID)
if giid is not None:
aliass = {i["giid"]: i["alias"] for i in hub.session.installations}
if giid in aliass.keys():
return "{} alarm".format(aliass[giid])
_LOGGER.error("Verisure installation giid not found: %s", giid)
return "{} alarm".format(hub.session.installations[0]["alias"])
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
return SUPPORT_ALARM_ARM_HOME | SUPPORT_ALARM_ARM_AWAY
@property
def code_format(self):
"""Return one or more digits/characters."""
return alarm.FORMAT_NUMBER
@property
def changed_by(self):
"""Return the last change triggered by."""
return self._changed_by
def update(self):
"""Update alarm status."""
hub.update_overview()
status = hub.get_first("$.armState.statusType")
if status == "DISARMED":
self._state = STATE_ALARM_DISARMED
elif status == "ARMED_HOME":
self._state = STATE_ALARM_ARMED_HOME
elif status == "ARMED_AWAY":
self._state = STATE_ALARM_ARMED_AWAY
elif status != "PENDING":
_LOGGER.error("Unknown alarm state %s", status)
self._changed_by = hub.get_first("$.armState.name")
def alarm_disarm(self, code=None):
"""Send disarm command."""
set_arm_state("DISARMED", code)
def alarm_arm_home(self, code=None):
"""Send arm home command."""
set_arm_state("ARMED_HOME", code)
def alarm_arm_away(self, code=None):
"""Send arm away command."""
set_arm_state("ARMED_AWAY", code)
| apache-2.0 | 7,620,519,112,160,614,000 | 30.229358 | 79 | 0.622797 | false |
Kingclove/ChannelAPI-Demo | server/lib/itsdangerous.py | 296 | 30509 | # -*- coding: utf-8 -*-
"""
itsdangerous
~~~~~~~~~~~~
A module that implements various functions to deal with untrusted
sources. Mainly useful for web applications.
:copyright: (c) 2011 by Armin Ronacher and the Django Software Foundation.
:license: BSD, see LICENSE for more details.
"""
import sys
import hmac
import zlib
import time
import base64
import hashlib
import operator
from datetime import datetime
PY2 = sys.version_info[0] == 2
if PY2:
from itertools import izip
text_type = unicode
int_to_byte = chr
number_types = (int, long, float)
else:
from functools import reduce
izip = zip
text_type = str
int_to_byte = operator.methodcaller('to_bytes', 1, 'big')
number_types = (int, float)
try:
import simplejson as json
except ImportError:
import json
class _CompactJSON(object):
"""Wrapper around simplejson that strips whitespace.
"""
def loads(self, payload):
return json.loads(payload)
def dumps(self, obj):
return json.dumps(obj, separators=(',', ':'))
compact_json = _CompactJSON()
# 2011/01/01 in UTC
EPOCH = 1293840000
def want_bytes(s, encoding='utf-8', errors='strict'):
if isinstance(s, text_type):
s = s.encode(encoding, errors)
return s
def is_text_serializer(serializer):
"""Checks wheather a serializer generates text or binary."""
return isinstance(serializer.dumps({}), text_type)
# Starting with 3.3 the standard library has a c-implementation for
# constant time string compares.
_builtin_constant_time_compare = getattr(hmac, 'compare_digest', None)
def constant_time_compare(val1, val2):
"""Returns True if the two strings are equal, False otherwise.
The time taken is independent of the number of characters that match. Do
not use this function for anything else than comparision with known
length targets.
This is should be implemented in C in order to get it completely right.
"""
if _builtin_constant_time_compare is not None:
return _builtin_constant_time_compare(val1, val2)
len_eq = len(val1) == len(val2)
if len_eq:
result = 0
left = val1
else:
result = 1
left = val2
for x, y in izip(bytearray(left), bytearray(val2)):
result |= x ^ y
return result == 0
class BadData(Exception):
"""Raised if bad data of any sort was encountered. This is the
base for all exceptions that itsdangerous is currently using.
.. versionadded:: 0.15
"""
message = None
def __init__(self, message):
Exception.__init__(self, message)
self.message = message
def __str__(self):
return text_type(self.message)
if PY2:
__unicode__ = __str__
def __str__(self):
return self.__unicode__().encode('utf-8')
class BadPayload(BadData):
"""This error is raised in situations when payload is loaded without
checking the signature first and an exception happend as a result of
that. The original exception that caused that will be stored on the
exception as :attr:`original_error`.
.. versionadded:: 0.15
"""
def __init__(self, message, original_error=None):
BadData.__init__(self, message)
#: If available, the error that indicates why the payload
#: was not valid. This might be `None`.
self.original_error = original_error
class BadSignature(BadData):
"""This error is raised if a signature does not match. As of
itsdangerous 0.14 there are helpful attributes on the exception
instances. You can also catch down the baseclass :exc:`BadData`.
"""
def __init__(self, message, payload=None):
BadData.__init__(self, message)
#: The payload that failed the signature test. In some
#: situations you might still want to inspect this, even if
#: you know it was tampered with.
#:
#: .. versionadded:: 0.14
self.payload = payload
class BadTimeSignature(BadSignature):
"""Raised for time based signatures that fail. This is a subclass
of :class:`BadSignature` so you can catch those down as well.
"""
def __init__(self, message, payload=None, date_signed=None):
BadSignature.__init__(self, message, payload)
#: If the signature expired this exposes the date of when the
#: signature was created. This can be helpful in order to
#: tell the user how long a link has been gone stale.
#:
#: .. versionadded:: 0.14
self.date_signed = date_signed
class SignatureExpired(BadTimeSignature):
"""Signature timestamp is older than required max_age. This is a
subclass of :exc:`BadTimeSignature` so you can use the baseclass for
catching the error.
"""
def base64_encode(string):
"""base64 encodes a single bytestring (and is tolerant to getting
called with a unicode string).
The resulting bytestring is safe for putting into URLs.
"""
string = want_bytes(string)
return base64.urlsafe_b64encode(string).strip(b'=')
def base64_decode(string):
"""base64 decodes a single bytestring (and is tolerant to getting
called with a unicode string).
The result is also a bytestring.
"""
string = want_bytes(string, encoding='ascii', errors='ignore')
return base64.urlsafe_b64decode(string + b'=' * (-len(string) % 4))
def int_to_bytes(num):
assert num >= 0
rv = []
while num:
rv.append(int_to_byte(num & 0xff))
num >>= 8
return b''.join(reversed(rv))
def bytes_to_int(bytestr):
return reduce(lambda a, b: a << 8 | b, bytearray(bytestr), 0)
class SigningAlgorithm(object):
"""Subclasses of `SigningAlgorithm` have to implement `get_signature` to
provide signature generation functionality.
"""
def get_signature(self, key, value):
"""Returns the signature for the given key and value"""
raise NotImplementedError()
def verify_signature(self, key, value, sig):
"""Verifies the given signature matches the expected signature"""
return constant_time_compare(sig, self.get_signature(key, value))
class NoneAlgorithm(SigningAlgorithm):
"""This class provides a algorithm that does not perform any signing and
returns an empty signature.
"""
def get_signature(self, key, value):
return b''
class HMACAlgorithm(SigningAlgorithm):
"""This class provides signature generation using HMACs."""
#: The digest method to use with the MAC algorithm. This defaults to sha1
#: but can be changed for any other function in the hashlib module.
default_digest_method = staticmethod(hashlib.sha1)
def __init__(self, digest_method=None):
if digest_method is None:
digest_method = self.default_digest_method
self.digest_method = digest_method
def get_signature(self, key, value):
mac = hmac.new(key, msg=value, digestmod=self.digest_method)
return mac.digest()
class Signer(object):
"""This class can sign bytes and unsign it and validate the signature
provided.
Salt can be used to namespace the hash, so that a signed string is only
valid for a given namespace. Leaving this at the default value or re-using
a salt value across different parts of your application where the same
signed value in one part can mean something different in another part
is a security risk.
See :ref:`the-salt` for an example of what the salt is doing and how you
can utilize it.
.. versionadded:: 0.14
`key_derivation` and `digest_method` were added as arguments to the
class constructor.
.. versionadded:: 0.18
`algorithm` was added as an argument to the class constructor.
"""
#: The digest method to use for the signer. This defaults to sha1 but can
#: be changed for any other function in the hashlib module.
#:
#: .. versionchanged:: 0.14
default_digest_method = staticmethod(hashlib.sha1)
#: Controls how the key is derived. The default is Django style
#: concatenation. Possible values are ``concat``, ``django-concat``
#: and ``hmac``. This is used for deriving a key from the secret key
#: with an added salt.
#:
#: .. versionadded:: 0.14
default_key_derivation = 'django-concat'
def __init__(self, secret_key, salt=None, sep='.', key_derivation=None,
digest_method=None, algorithm=None):
self.secret_key = want_bytes(secret_key)
self.sep = sep
self.salt = 'itsdangerous.Signer' if salt is None else salt
if key_derivation is None:
key_derivation = self.default_key_derivation
self.key_derivation = key_derivation
if digest_method is None:
digest_method = self.default_digest_method
self.digest_method = digest_method
if algorithm is None:
algorithm = HMACAlgorithm(self.digest_method)
self.algorithm = algorithm
def derive_key(self):
"""This method is called to derive the key. If you're unhappy with
the default key derivation choices you can override them here.
Keep in mind that the key derivation in itsdangerous is not intended
to be used as a security method to make a complex key out of a short
password. Instead you should use large random secret keys.
"""
salt = want_bytes(self.salt)
if self.key_derivation == 'concat':
return self.digest_method(salt + self.secret_key).digest()
elif self.key_derivation == 'django-concat':
return self.digest_method(salt + b'signer' +
self.secret_key).digest()
elif self.key_derivation == 'hmac':
mac = hmac.new(self.secret_key, digestmod=self.digest_method)
mac.update(salt)
return mac.digest()
elif self.key_derivation == 'none':
return self.secret_key
else:
raise TypeError('Unknown key derivation method')
def get_signature(self, value):
"""Returns the signature for the given value"""
value = want_bytes(value)
key = self.derive_key()
sig = self.algorithm.get_signature(key, value)
return base64_encode(sig)
def sign(self, value):
"""Signs the given string."""
return value + want_bytes(self.sep) + self.get_signature(value)
def verify_signature(self, value, sig):
"""Verifies the signature for the given value."""
key = self.derive_key()
sig = base64_decode(sig)
return self.algorithm.verify_signature(key, value, sig)
def unsign(self, signed_value):
"""Unsigns the given string."""
signed_value = want_bytes(signed_value)
sep = want_bytes(self.sep)
if sep not in signed_value:
raise BadSignature('No %r found in value' % self.sep)
value, sig = signed_value.rsplit(sep, 1)
if self.verify_signature(value, sig):
return value
raise BadSignature('Signature %r does not match' % sig,
payload=value)
def validate(self, signed_value):
"""Just validates the given signed value. Returns `True` if the
signature exists and is valid, `False` otherwise."""
try:
self.unsign(signed_value)
return True
except BadSignature:
return False
class TimestampSigner(Signer):
"""Works like the regular :class:`Signer` but also records the time
of the signing and can be used to expire signatures. The unsign
method can rause a :exc:`SignatureExpired` method if the unsigning
failed because the signature is expired. This exception is a subclass
of :exc:`BadSignature`.
"""
def get_timestamp(self):
"""Returns the current timestamp. This implementation returns the
seconds since 1/1/2011. The function must return an integer.
"""
return int(time.time() - EPOCH)
def timestamp_to_datetime(self, ts):
"""Used to convert the timestamp from `get_timestamp` into a
datetime object.
"""
return datetime.utcfromtimestamp(ts + EPOCH)
def sign(self, value):
"""Signs the given string and also attaches a time information."""
value = want_bytes(value)
timestamp = base64_encode(int_to_bytes(self.get_timestamp()))
sep = want_bytes(self.sep)
value = value + sep + timestamp
return value + sep + self.get_signature(value)
def unsign(self, value, max_age=None, return_timestamp=False):
"""Works like the regular :meth:`~Signer.unsign` but can also
validate the time. See the base docstring of the class for
the general behavior. If `return_timestamp` is set to `True`
the timestamp of the signature will be returned as naive
:class:`datetime.datetime` object in UTC.
"""
try:
result = Signer.unsign(self, value)
sig_error = None
except BadSignature as e:
sig_error = e
result = e.payload or b''
sep = want_bytes(self.sep)
# If there is no timestamp in the result there is something
# seriously wrong. In case there was a signature error, we raise
# that one directly, otherwise we have a weird situation in which
# we shouldn't have come except someone uses a time-based serializer
# on non-timestamp data, so catch that.
if not sep in result:
if sig_error:
raise sig_error
raise BadTimeSignature('timestamp missing', payload=result)
value, timestamp = result.rsplit(sep, 1)
try:
timestamp = bytes_to_int(base64_decode(timestamp))
except Exception:
timestamp = None
# Signature is *not* okay. Raise a proper error now that we have
# split the value and the timestamp.
if sig_error is not None:
raise BadTimeSignature(text_type(sig_error), payload=value,
date_signed=timestamp)
# Signature was okay but the timestamp is actually not there or
# malformed. Should not happen, but well. We handle it nonetheless
if timestamp is None:
raise BadTimeSignature('Malformed timestamp', payload=value)
# Check timestamp is not older than max_age
if max_age is not None:
age = self.get_timestamp() - timestamp
if age > max_age:
raise SignatureExpired(
'Signature age %s > %s seconds' % (age, max_age),
payload=value,
date_signed=self.timestamp_to_datetime(timestamp))
if return_timestamp:
return value, self.timestamp_to_datetime(timestamp)
return value
def validate(self, signed_value, max_age=None):
"""Just validates the given signed value. Returns `True` if the
signature exists and is valid, `False` otherwise."""
try:
self.unsign(signed_value, max_age=max_age)
return True
except BadSignature:
return False
class Serializer(object):
"""This class provides a serialization interface on top of the
signer. It provides a similar API to json/pickle and other modules but is
slightly differently structured internally. If you want to change the
underlying implementation for parsing and loading you have to override the
:meth:`load_payload` and :meth:`dump_payload` functions.
This implementation uses simplejson if available for dumping and loading
and will fall back to the standard library's json module if it's not
available.
Starting with 0.14 you do not need to subclass this class in order to
switch out or customer the :class:`Signer`. You can instead also pass a
different class to the constructor as well as keyword arguments as
dictionary that should be forwarded::
s = Serializer(signer_kwargs={'key_derivation': 'hmac'})
.. versionchanged:: 0.14:
The `signer` and `signer_kwargs` parameters were added to the
constructor.
"""
#: If a serializer module or class is not passed to the constructor
#: this one is picked up. This currently defaults to :mod:`json`.
default_serializer = json
#: The default :class:`Signer` class that is being used by this
#: serializer.
#:
#: .. versionadded:: 0.14
default_signer = Signer
def __init__(self, secret_key, salt=b'itsdangerous', serializer=None,
signer=None, signer_kwargs=None):
self.secret_key = want_bytes(secret_key)
self.salt = want_bytes(salt)
if serializer is None:
serializer = self.default_serializer
self.serializer = serializer
self.is_text_serializer = is_text_serializer(serializer)
if signer is None:
signer = self.default_signer
self.signer = signer
self.signer_kwargs = signer_kwargs or {}
def load_payload(self, payload, serializer=None):
"""Loads the encoded object. This function raises :class:`BadPayload`
if the payload is not valid. The `serializer` parameter can be used to
override the serializer stored on the class. The encoded payload is
always byte based.
"""
if serializer is None:
serializer = self.serializer
is_text = self.is_text_serializer
else:
is_text = is_text_serializer(serializer)
try:
if is_text:
payload = payload.decode('utf-8')
return serializer.loads(payload)
except Exception as e:
raise BadPayload('Could not load the payload because an '
'exception ocurred on unserializing the data',
original_error=e)
def dump_payload(self, obj):
"""Dumps the encoded object. The return value is always a
bytestring. If the internal serializer is text based the value
will automatically be encoded to utf-8.
"""
return want_bytes(self.serializer.dumps(obj))
def make_signer(self, salt=None):
"""A method that creates a new instance of the signer to be used.
The default implementation uses the :class:`Signer` baseclass.
"""
if salt is None:
salt = self.salt
return self.signer(self.secret_key, salt=salt, **self.signer_kwargs)
def dumps(self, obj, salt=None):
"""Returns a signed string serialized with the internal serializer.
The return value can be either a byte or unicode string depending
on the format of the internal serializer.
"""
payload = want_bytes(self.dump_payload(obj))
rv = self.make_signer(salt).sign(payload)
if self.is_text_serializer:
rv = rv.decode('utf-8')
return rv
def dump(self, obj, f, salt=None):
"""Like :meth:`dumps` but dumps into a file. The file handle has
to be compatible with what the internal serializer expects.
"""
f.write(self.dumps(obj, salt))
def loads(self, s, salt=None):
"""Reverse of :meth:`dumps`, raises :exc:`BadSignature` if the
signature validation fails.
"""
s = want_bytes(s)
return self.load_payload(self.make_signer(salt).unsign(s))
def load(self, f, salt=None):
"""Like :meth:`loads` but loads from a file."""
return self.loads(f.read(), salt)
def loads_unsafe(self, s, salt=None):
"""Like :meth:`loads` but without verifying the signature. This is
potentially very dangerous to use depending on how your serializer
works. The return value is ``(signature_okay, payload)`` instead of
just the payload. The first item will be a boolean that indicates
if the signature is okay (``True``) or if it failed. This function
never fails.
Use it for debugging only and if you know that your serializer module
is not exploitable (eg: do not use it with a pickle serializer).
.. versionadded:: 0.15
"""
return self._loads_unsafe_impl(s, salt)
def _loads_unsafe_impl(self, s, salt, load_kwargs=None,
load_payload_kwargs=None):
"""Lowlevel helper function to implement :meth:`loads_unsafe` in
serializer subclasses.
"""
try:
return True, self.loads(s, salt=salt, **(load_kwargs or {}))
except BadSignature as e:
if e.payload is None:
return False, None
try:
return False, self.load_payload(e.payload,
**(load_payload_kwargs or {}))
except BadPayload:
return False, None
def load_unsafe(self, f, *args, **kwargs):
"""Like :meth:`loads_unsafe` but loads from a file.
.. versionadded:: 0.15
"""
return self.loads_unsafe(f.read(), *args, **kwargs)
class TimedSerializer(Serializer):
"""Uses the :class:`TimestampSigner` instead of the default
:meth:`Signer`.
"""
default_signer = TimestampSigner
def loads(self, s, max_age=None, return_timestamp=False, salt=None):
"""Reverse of :meth:`dumps`, raises :exc:`BadSignature` if the
signature validation fails. If a `max_age` is provided it will
ensure the signature is not older than that time in seconds. In
case the signature is outdated, :exc:`SignatureExpired` is raised
which is a subclass of :exc:`BadSignature`. All arguments are
forwarded to the signer's :meth:`~TimestampSigner.unsign` method.
"""
base64d, timestamp = self.make_signer(salt) \
.unsign(s, max_age, return_timestamp=True)
payload = self.load_payload(base64d)
if return_timestamp:
return payload, timestamp
return payload
def loads_unsafe(self, s, max_age=None, salt=None):
load_kwargs = {'max_age': max_age}
load_payload_kwargs = {}
return self._loads_unsafe_impl(s, salt, load_kwargs, load_payload_kwargs)
class JSONWebSignatureSerializer(Serializer):
"""This serializer implements JSON Web Signature (JWS) support. Only
supports the JWS Compact Serialization.
"""
jws_algorithms = {
'HS256': HMACAlgorithm(hashlib.sha256),
'HS384': HMACAlgorithm(hashlib.sha384),
'HS512': HMACAlgorithm(hashlib.sha512),
'none': NoneAlgorithm(),
}
#: The default algorithm to use for signature generation
default_algorithm = 'HS256'
default_serializer = compact_json
def __init__(self, secret_key, salt=None, serializer=None,
signer=None, signer_kwargs=None, algorithm_name=None):
Serializer.__init__(self, secret_key, salt, serializer,
signer, signer_kwargs)
if algorithm_name is None:
algorithm_name = self.default_algorithm
self.algorithm_name = algorithm_name
self.algorithm = self.make_algorithm(algorithm_name)
def load_payload(self, payload, return_header=False):
payload = want_bytes(payload)
if b'.' not in payload:
raise BadPayload('No "." found in value')
base64d_header, base64d_payload = payload.split(b'.', 1)
try:
json_header = base64_decode(base64d_header)
json_payload = base64_decode(base64d_payload)
except Exception as e:
raise BadPayload('Could not base64 decode the payload because of '
'an exception', original_error=e)
header = Serializer.load_payload(self, json_header,
serializer=json)
if not isinstance(header, dict):
raise BadPayload('Header payload is not a JSON object')
payload = Serializer.load_payload(self, json_payload)
if return_header:
return payload, header
return payload
def dump_payload(self, header, obj):
base64d_header = base64_encode(self.serializer.dumps(header))
base64d_payload = base64_encode(self.serializer.dumps(obj))
return base64d_header + b'.' + base64d_payload
def make_algorithm(self, algorithm_name):
try:
return self.jws_algorithms[algorithm_name]
except KeyError:
raise NotImplementedError('Algorithm not supported')
def make_signer(self, salt=None, algorithm=None):
if salt is None:
salt = self.salt
key_derivation = 'none' if salt is None else None
if algorithm is None:
algorithm = self.algorithm
return self.signer(self.secret_key, salt=salt, sep='.',
key_derivation=key_derivation, algorithm=algorithm)
def make_header(self, header_fields):
header = header_fields.copy() if header_fields else {}
header['alg'] = self.algorithm_name
return header
def dumps(self, obj, salt=None, header_fields=None):
"""Like :meth:`~Serializer.dumps` but creates a JSON Web Signature. It
also allows for specifying additional fields to be included in the JWS
Header.
"""
header = self.make_header(header_fields)
signer = self.make_signer(salt, self.algorithm)
return signer.sign(self.dump_payload(header, obj))
def loads(self, s, salt=None, return_header=False):
"""Reverse of :meth:`dumps`. If requested via `return_header` it will
return a tuple of payload and header.
"""
payload, header = self.load_payload(
self.make_signer(salt, self.algorithm).unsign(want_bytes(s)),
return_header=True)
if header.get('alg') != self.algorithm_name:
raise BadSignature('Algorithm mismatch')
if return_header:
return payload, header
return payload
def loads_unsafe(self, s, salt=None, return_header=False):
kwargs = {'return_header': return_header}
return self._loads_unsafe_impl(s, salt, kwargs, kwargs)
class TimedJSONWebSignatureSerializer(JSONWebSignatureSerializer):
"""Works like the regular :class:`JSONWebSignatureSerializer` but also
records the time of the signing and can be used to expire signatures.
JWS currently does not specify this behavior but it mentions a possibility
extension like this in the spec. Expiry date is encoded into the header
similarily as specified in `draft-ietf-oauth-json-web-token
<http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html#expDef`_.
The unsign method can raise a :exc:`SignatureExpired` method if the
unsigning failed because the signature is expired. This exception is a
subclass of :exc:`BadSignature`.
"""
DEFAULT_EXPIRES_IN = 3600
def __init__(self, secret_key, expires_in=None, **kwargs):
JSONWebSignatureSerializer.__init__(self, secret_key, **kwargs)
if expires_in is None:
expires_in = self.DEFAULT_EXPIRES_IN
self.expires_in = expires_in
def make_header(self, header_fields):
header = JSONWebSignatureSerializer.make_header(self, header_fields)
iat = self.now()
exp = iat + self.expires_in
header['iat'] = iat
header['exp'] = exp
return header
def loads(self, s, salt=None, return_header=False):
payload, header = JSONWebSignatureSerializer.loads(
self, s, salt, return_header=True)
if 'exp' not in header:
raise BadSignature('Missing expiry date', payload=payload)
if not (isinstance(header['exp'], number_types)
and header['exp'] > 0):
raise BadSignature('expiry date is not an IntDate',
payload=payload)
if header['exp'] < self.now():
raise SignatureExpired('Signature expired', payload=payload,
date_signed=self.get_issue_date(header))
if return_header:
return payload, header
return payload
def get_issue_date(self, header):
rv = header.get('iat')
if isinstance(rv, number_types):
return datetime.utcfromtimestamp(int(rv))
def now(self):
return int(time.time())
class URLSafeSerializerMixin(object):
"""Mixed in with a regular serializer it will attempt to zlib compress
the string to make it shorter if necessary. It will also base64 encode
the string so that it can safely be placed in a URL.
"""
def load_payload(self, payload):
decompress = False
if payload.startswith(b'.'):
payload = payload[1:]
decompress = True
try:
json = base64_decode(payload)
except Exception as e:
raise BadPayload('Could not base64 decode the payload because of '
'an exception', original_error=e)
if decompress:
try:
json = zlib.decompress(json)
except Exception as e:
raise BadPayload('Could not zlib decompress the payload before '
'decoding the payload', original_error=e)
return super(URLSafeSerializerMixin, self).load_payload(json)
def dump_payload(self, obj):
json = super(URLSafeSerializerMixin, self).dump_payload(obj)
is_compressed = False
compressed = zlib.compress(json)
if len(compressed) < (len(json) - 1):
json = compressed
is_compressed = True
base64d = base64_encode(json)
if is_compressed:
base64d = b'.' + base64d
return base64d
class URLSafeSerializer(URLSafeSerializerMixin, Serializer):
"""Works like :class:`Serializer` but dumps and loads into a URL
safe string consisting of the upper and lowercase character of the
alphabet as well as ``'_'``, ``'-'`` and ``'.'``.
"""
default_serializer = compact_json
class URLSafeTimedSerializer(URLSafeSerializerMixin, TimedSerializer):
"""Works like :class:`TimedSerializer` but dumps and loads into a URL
safe string consisting of the upper and lowercase character of the
alphabet as well as ``'_'``, ``'-'`` and ``'.'``.
"""
default_serializer = compact_json
| apache-2.0 | -6,719,257,700,779,446,000 | 35.581535 | 81 | 0.633256 | false |
rruebner/odoo | addons/claim_from_delivery/__init__.py | 374 | 1053 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import stock_picking
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -1,106,425,932,407,991,400 | 42.875 | 79 | 0.614435 | false |
leonevo/euao | tornadows/xmltypes.py | 1 | 6469 | #!/usr/bin/env python
#
# Copyright 2011 Rodrigo Ancavil del Pino
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Are incorporated the primitive datatypes defined by XML.
Array is defined for the use of array of elements and his respective datatype.
"""
import inspect
from tornadows import complextypes
def createElementXML(name,type,prefix='xsd'):
""" Function used for the creation of xml elements. """
return b'<%s:element name="%s" type="%s:%s"/>'%(prefix,name,prefix,type)
def createArrayXML(name,type,prefix='xsd',maxoccurs=None):
""" Function used for the creation of xml complexElements """
complexType = b'<%s:complexType name="%sParams">\n'%(prefix,name)
complexType += b'<%s:sequence>\n'%prefix
if maxoccurs == None:
complexType += b'<%s:element name="value" type="%s:%s" maxOccurs="unbounded"/>\n'%(prefix,prefix,type)
else:
complexType += b'<%s:element name="value" type="%s:%s" maxOccurs="%d"/>\n'%(prefix,prefix,type,maxoccurs)
complexType += b'</%s:sequence>\n'%prefix
complexType += b'</%s:complexType>\n'%prefix
complexType += b'<%s:element name="%s" type="tns:%sParams"/>\n'%(prefix,name,name)
return complexType
class Array:
""" Create arrays of xml elements.
Here an example:
@webservices(_params=xmltypes.Array(xmltypes.Integer),_returns=xmltypes.Integer)
def function(sefl, list_of_elements):
for e in list_of_elements:
# Do something with the element
return len(list_of_elements)
xmltypes.Array(xmltype.Integer) generate an xml element into schema definition:
<xsd:element name="arrayOfElement" type="xsd:integer" maxOccurs="unbounded"/>
this make the parameter of the function list_of_elements is a python list.
if you specify xmltypes.Array(xmltypes.Integer,10), is generated:
<xsd:element name="arrayOfElement" type="xsd:integer" maxOccurs="10"/>
"""
def __init__(self,type,maxOccurs=None):
self._type = type
self._n = maxOccurs
def createArray(self,name):
type = None
if inspect.isclass(self._type) and not issubclass(self._type,PrimitiveType):
type = complextypes.createPythonType2XMLType(self._type.__name__)
else:
type = self._type.getType(self._type)
return createArrayXML(name,type,'xsd',self._n)
def createType(self,name):
prefix = 'xsd'
type = None
if inspect.isclass(self._type) and not issubclass(self._type,PrimitiveType):
type = complextypes.createPythonType2XMLType(self._type.__name__)
else:
type = self._type.getType(self._type)
maxoccurs = self._n
complexType = b''
if self._n == None:
complexType += b'<%s:element name="%s" type="%s:%s" maxOccurs="unbounded"/>\n'%(prefix,name,prefix,type)
else:
complexType += b'<%s:element name="%s" type="%s:%s" maxOccurs="%d"/>\n'%(prefix,name,prefix,type,maxoccurs)
return complexType
def genType(self,v):
value = None
if inspect.isclass(self._type) and issubclass(self._type,PrimitiveType):
value = self._type.genType(v)
elif hasattr(self._type,'__name__'):
value = complextypes.convert(self._type.__name__,v)
# Convert str to bool
if value == 'true':
value = True
elif value == 'false':
value = False
return value
class PrimitiveType:
""" Class father for all derived types. """
pass
class Integer(PrimitiveType):
""" 1. XML primitive type : integer """
@staticmethod
def createElement(name,prefix='xsd'):
return createElementXML(name,'integer')
@staticmethod
def getType(self):
return 'integer'
@classmethod
def genType(self,v):
return int(v)
class Decimal(PrimitiveType):
""" 2. XML primitive type : decimal """
@staticmethod
def createElement(name,prefix='xsd'):
return createElementXML(name,'decimal')
@staticmethod
def getType(self):
return 'decimal'
@classmethod
def genType(self,v):
return float(v)
class Double(PrimitiveType):
""" 3. XML primitive type : double """
@staticmethod
def createElement(name,prefix='xsd'):
return createElementXML(name,'double')
@staticmethod
def getType(self):
return 'double'
@classmethod
def genType(self,v):
return float(v)
class Float(PrimitiveType):
""" 4. XML primitive type : float """
@staticmethod
def createElement(name,prefix='xsd'):
return createElementXML(name,'float')
@staticmethod
def getType(self):
return 'float'
@classmethod
def genType(self,v):
return float(v)
class Duration(PrimitiveType):
""" 5. XML primitive type : duration """
@staticmethod
def createElement(name,prefix='xsd'):
return createElementXML(name,'duration')
@staticmethod
def getType(self):
return 'duration'
@classmethod
def genType(self,v):
return str(v)
class Date(PrimitiveType):
""" 6. XML primitive type : date """
@staticmethod
def createElement(name,prefix='xsd'):
return createElementXML(name,'date')
@staticmethod
def getType(self):
return 'date'
@classmethod
def genType(self,v):
return str(v)
class Time(PrimitiveType):
""" 7. XML primitive type : time """
@staticmethod
def createElement(name,prefix='xsd'):
return createElementXML(name,'time')
@staticmethod
def getType(self):
return 'time'
@classmethod
def genType(self,v):
return str(v)
class DateTime(PrimitiveType):
""" 8. XML primitive type : dateTime """
@staticmethod
def createElement(name,prefix='xsd'):
return createElementXML(name,'dateTime')
@staticmethod
def getType(self):
return 'dateTime'
@classmethod
def genType(self,v):
return str(v)
class String(PrimitiveType):
""" 9. XML primitive type : string """
@staticmethod
def createElement(name,prefix='xsd'):
return createElementXML(name,'string')
@staticmethod
def getType(self):
return 'string'
@classmethod
def genType(self,v):
return str(v)
class Boolean(PrimitiveType):
""" 10. XML primitive type : boolean """
@staticmethod
def createElement(name,prefix='xsd'):
return createElementXML(name,'boolean')
@staticmethod
def getType(self):
return 'boolean'
@classmethod
def genType(self,v):
return str(v).lower()
| apache-2.0 | 6,267,989,904,934,680,000 | 28.008969 | 110 | 0.710156 | false |
CXQERP/ODOOERP | addons/note/note.py | 223 | 8893 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import SUPERUSER_ID
from openerp.osv import osv, fields
from openerp.tools import html2plaintext
class note_stage(osv.osv):
""" Category of Note """
_name = "note.stage"
_description = "Note Stage"
_columns = {
'name': fields.char('Stage Name', translate=True, required=True),
'sequence': fields.integer('Sequence', help="Used to order the note stages"),
'user_id': fields.many2one('res.users', 'Owner', help="Owner of the note stage.", required=True, ondelete='cascade'),
'fold': fields.boolean('Folded by Default'),
}
_order = 'sequence asc'
_defaults = {
'fold': 0,
'user_id': lambda self, cr, uid, ctx: uid,
'sequence' : 1,
}
class note_tag(osv.osv):
_name = "note.tag"
_description = "Note Tag"
_columns = {
'name' : fields.char('Tag Name', required=True),
}
class note_note(osv.osv):
""" Note """
_name = 'note.note'
_inherit = ['mail.thread']
_description = "Note"
#writing method (no modification of values)
def name_create(self, cr, uid, name, context=None):
rec_id = self.create(cr, uid, {'memo': name}, context=context)
return self.name_get(cr, uid, [rec_id], context)[0]
#read the first line (convert hml into text)
def _get_note_first_line(self, cr, uid, ids, name="", args={}, context=None):
res = {}
for note in self.browse(cr, uid, ids, context=context):
res[note.id] = (note.memo and html2plaintext(note.memo) or "").strip().replace('*','').split("\n")[0]
return res
def onclick_note_is_done(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'open': False, 'date_done': fields.date.today()}, context=context)
def onclick_note_not_done(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'open': True}, context=context)
#return the default stage for the uid user
def _get_default_stage_id(self,cr,uid,context=None):
ids = self.pool.get('note.stage').search(cr,uid,[('user_id','=',uid)], context=context)
return ids and ids[0] or False
def _set_stage_per_user(self, cr, uid, id, name, value, args=None, context=None):
note = self.browse(cr, uid, id, context=context)
if not value: return False
stage_ids = [value] + [stage.id for stage in note.stage_ids if stage.user_id.id != uid ]
return self.write(cr, uid, [id], {'stage_ids': [(6, 0, set(stage_ids))]}, context=context)
def _get_stage_per_user(self, cr, uid, ids, name, args, context=None):
result = dict.fromkeys(ids, False)
for record in self.browse(cr, uid, ids, context=context):
for stage in record.stage_ids:
if stage.user_id.id == uid:
result[record.id] = stage.id
return result
_columns = {
'name': fields.function(_get_note_first_line,
string='Note Summary',
type='text', store=True),
'user_id': fields.many2one('res.users', 'Owner'),
'memo': fields.html('Note Content'),
'sequence': fields.integer('Sequence'),
'stage_id': fields.function(_get_stage_per_user,
fnct_inv=_set_stage_per_user,
string='Stage',
type='many2one',
relation='note.stage'),
'stage_ids': fields.many2many('note.stage','note_stage_rel','note_id','stage_id','Stages of Users'),
'open': fields.boolean('Active', track_visibility='onchange'),
'date_done': fields.date('Date done'),
'color': fields.integer('Color Index'),
'tag_ids' : fields.many2many('note.tag','note_tags_rel','note_id','tag_id','Tags'),
}
_defaults = {
'user_id': lambda self, cr, uid, ctx=None: uid,
'open' : 1,
'stage_id' : _get_default_stage_id,
}
_order = 'sequence'
def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False, lazy=True):
if groupby and groupby[0]=="stage_id":
#search all stages
current_stage_ids = self.pool.get('note.stage').search(cr,uid,[('user_id','=',uid)], context=context)
if current_stage_ids: #if the user have some stages
stages = self.pool['note.stage'].browse(cr, uid, current_stage_ids, context=context)
result = [{ #notes by stage for stages user
'__context': {'group_by': groupby[1:]},
'__domain': domain + [('stage_ids.id', '=', stage.id)],
'stage_id': (stage.id, stage.name),
'stage_id_count': self.search(cr,uid, domain+[('stage_ids', '=', stage.id)], context=context, count=True),
'__fold': stage.fold,
} for stage in stages]
#note without user's stage
nb_notes_ws = self.search(cr,uid, domain+[('stage_ids', 'not in', current_stage_ids)], context=context, count=True)
if nb_notes_ws:
# add note to the first column if it's the first stage
dom_not_in = ('stage_ids', 'not in', current_stage_ids)
if result and result[0]['stage_id'][0] == current_stage_ids[0]:
dom_in = result[0]['__domain'].pop()
result[0]['__domain'] = domain + ['|', dom_in, dom_not_in]
result[0]['stage_id_count'] += nb_notes_ws
else:
# add the first stage column
result = [{
'__context': {'group_by': groupby[1:]},
'__domain': domain + [dom_not_in],
'stage_id': (stages[0].id, stages[0].name),
'stage_id_count':nb_notes_ws,
'__fold': stages[0].name,
}] + result
else: # if stage_ids is empty
#note without user's stage
nb_notes_ws = self.search(cr,uid, domain, context=context, count=True)
if nb_notes_ws:
result = [{ #notes for unknown stage
'__context': {'group_by': groupby[1:]},
'__domain': domain,
'stage_id': False,
'stage_id_count':nb_notes_ws
}]
else:
result = []
return result
else:
return super(note_note, self).read_group(cr, uid, domain, fields, groupby,
offset=offset, limit=limit, context=context, orderby=orderby,lazy=lazy)
#upgrade config setting page to configure pad, fancy and tags mode
class note_base_config_settings(osv.osv_memory):
_inherit = 'base.config.settings'
_columns = {
'module_note_pad': fields.boolean('Use collaborative pads (etherpad)'),
'group_note_fancy': fields.boolean('Use fancy layouts for notes', implied_group='note.group_note_fancy'),
}
class res_users(osv.Model):
_name = 'res.users'
_inherit = ['res.users']
def create(self, cr, uid, data, context=None):
user_id = super(res_users, self).create(cr, uid, data, context=context)
note_obj = self.pool['note.stage']
data_obj = self.pool['ir.model.data']
is_employee = self.has_group(cr, user_id, 'base.group_user')
if is_employee:
for n in range(5):
xmlid = 'note_stage_%02d' % (n,)
try:
_model, stage_id = data_obj.get_object_reference(cr, SUPERUSER_ID, 'note', xmlid)
except ValueError:
continue
note_obj.copy(cr, SUPERUSER_ID, stage_id, default={'user_id': user_id}, context=context)
return user_id
| agpl-3.0 | -8,391,852,542,603,725,000 | 43.688442 | 131 | 0.547959 | false |
sivaprakashniet/push_pull | p2p/lib/python2.7/site-packages/kombu/tests/transport/virtual/test_scheduling.py | 38 | 1835 | from __future__ import absolute_import
from kombu.transport.virtual.scheduling import FairCycle
from kombu.tests.case import Case
class MyEmpty(Exception):
pass
def consume(fun, n):
r = []
for i in range(n):
r.append(fun())
return r
class test_FairCycle(Case):
def test_cycle(self):
resources = ['a', 'b', 'c', 'd', 'e']
def echo(r, timeout=None):
return r
# cycle should be ['a', 'b', 'c', 'd', 'e', ... repeat]
cycle = FairCycle(echo, resources, MyEmpty)
for i in range(len(resources)):
self.assertEqual(cycle.get(), (resources[i],
resources[i]))
for i in range(len(resources)):
self.assertEqual(cycle.get(), (resources[i],
resources[i]))
def test_cycle_breaks(self):
resources = ['a', 'b', 'c', 'd', 'e']
def echo(r):
if r == 'c':
raise MyEmpty(r)
return r
cycle = FairCycle(echo, resources, MyEmpty)
self.assertEqual(
consume(cycle.get, len(resources)),
[('a', 'a'), ('b', 'b'), ('d', 'd'),
('e', 'e'), ('a', 'a')],
)
self.assertEqual(
consume(cycle.get, len(resources)),
[('b', 'b'), ('d', 'd'), ('e', 'e'),
('a', 'a'), ('b', 'b')],
)
cycle2 = FairCycle(echo, ['c', 'c'], MyEmpty)
with self.assertRaises(MyEmpty):
consume(cycle2.get, 3)
def test_cycle_no_resources(self):
cycle = FairCycle(None, [], MyEmpty)
cycle.pos = 10
with self.assertRaises(MyEmpty):
cycle._next()
def test__repr__(self):
self.assertTrue(repr(FairCycle(lambda x: x, [1, 2, 3], MyEmpty)))
| bsd-3-clause | -2,505,846,133,721,004,500 | 26.38806 | 73 | 0.480109 | false |
J861449197/edx-platform | lms/djangoapps/staticbook/views.py | 91 | 6351 | """
Views for serving static textbooks.
"""
from django.contrib.auth.decorators import login_required
from django.http import Http404
from edxmako.shortcuts import render_to_response
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xmodule.annotator_token import retrieve_token
from courseware.access import has_access
from courseware.courses import get_course_with_access
from notes.utils import notes_enabled_for_course
from static_replace import replace_static_urls
@login_required
def index(request, course_id, book_index, page=None):
"""
Serve static image-based textbooks.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course = get_course_with_access(request.user, 'load', course_key)
staff_access = bool(has_access(request.user, 'staff', course))
book_index = int(book_index)
if book_index < 0 or book_index >= len(course.textbooks):
raise Http404("Invalid book index value: {0}".format(book_index))
textbook = course.textbooks[book_index]
table_of_contents = textbook.table_of_contents
if page is None:
page = textbook.start_page
return render_to_response(
'staticbook.html',
{
'book_index': book_index, 'page': int(page),
'course': course,
'book_url': textbook.book_url,
'table_of_contents': table_of_contents,
'start_page': textbook.start_page,
'end_page': textbook.end_page,
'staff_access': staff_access,
},
)
def remap_static_url(original_url, course):
"""Remap a URL in the ways the course requires."""
# Ick: this should be possible without having to quote and unquote the URL...
input_url = "'" + original_url + "'"
output_url = replace_static_urls(
input_url,
getattr(course, 'data_dir', None),
course_id=course.id,
static_asset_path=course.static_asset_path
)
# strip off the quotes again...
return output_url[1:-1]
@login_required
def pdf_index(request, course_id, book_index, chapter=None, page=None):
"""
Display a PDF textbook.
course_id: course for which to display text. The course should have
"pdf_textbooks" property defined.
book index: zero-based index of which PDF textbook to display.
chapter: (optional) one-based index into the chapter array of textbook PDFs to display.
Defaults to first chapter. Specifying this assumes that there are separate PDFs for
each chapter in a textbook.
page: (optional) one-based page number to display within the PDF. Defaults to first page.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course = get_course_with_access(request.user, 'load', course_key)
staff_access = bool(has_access(request.user, 'staff', course))
book_index = int(book_index)
if book_index < 0 or book_index >= len(course.pdf_textbooks):
raise Http404("Invalid book index value: {0}".format(book_index))
textbook = course.pdf_textbooks[book_index]
viewer_params = '&file='
current_url = ''
if 'url' in textbook:
textbook['url'] = remap_static_url(textbook['url'], course)
viewer_params += textbook['url']
current_url = textbook['url']
# then remap all the chapter URLs as well, if they are provided.
current_chapter = None
if 'chapters' in textbook:
for entry in textbook['chapters']:
entry['url'] = remap_static_url(entry['url'], course)
if chapter is not None:
current_chapter = textbook['chapters'][int(chapter) - 1]
else:
current_chapter = textbook['chapters'][0]
viewer_params += current_chapter['url']
current_url = current_chapter['url']
viewer_params += '#zoom=page-fit&disableRange=true'
if page is not None:
viewer_params += '&page={}'.format(page)
if request.GET.get('viewer', '') == 'true':
template = 'pdf_viewer.html'
else:
template = 'static_pdfbook.html'
return render_to_response(
template,
{
'book_index': book_index,
'course': course,
'textbook': textbook,
'chapter': chapter,
'page': page,
'viewer_params': viewer_params,
'current_chapter': current_chapter,
'staff_access': staff_access,
'current_url': current_url,
},
)
@login_required
def html_index(request, course_id, book_index, chapter=None):
"""
Display an HTML textbook.
course_id: course for which to display text. The course should have
"html_textbooks" property defined.
book index: zero-based index of which HTML textbook to display.
chapter: (optional) one-based index into the chapter array of textbook HTML files to display.
Defaults to first chapter. Specifying this assumes that there are separate HTML files for
each chapter in a textbook.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course = get_course_with_access(request.user, 'load', course_key)
staff_access = bool(has_access(request.user, 'staff', course))
notes_enabled = notes_enabled_for_course(course)
book_index = int(book_index)
if book_index < 0 or book_index >= len(course.html_textbooks):
raise Http404("Invalid book index value: {0}".format(book_index))
textbook = course.html_textbooks[book_index]
if 'url' in textbook:
textbook['url'] = remap_static_url(textbook['url'], course)
# then remap all the chapter URLs as well, if they are provided.
if 'chapters' in textbook:
for entry in textbook['chapters']:
entry['url'] = remap_static_url(entry['url'], course)
student = request.user
return render_to_response(
'static_htmlbook.html',
{
'book_index': book_index,
'course': course,
'textbook': textbook,
'chapter': chapter,
'student': student,
'staff_access': staff_access,
'notes_enabled': notes_enabled,
'storage': course.annotation_storage_url,
'token': retrieve_token(student.email, course.annotation_token_secret),
},
)
| agpl-3.0 | 4,760,974,006,913,074,000 | 34.480447 | 98 | 0.641316 | false |
jcpowermac/ansible | lib/ansible/module_utils/facts/hardware/linux.py | 7 | 27299 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import collections
import errno
import glob
import json
import os
import re
import sys
from ansible.module_utils.six import iteritems
from ansible.module_utils.basic import bytes_to_human
from ansible.module_utils.facts.hardware.base import Hardware, HardwareCollector
from ansible.module_utils.facts.utils import get_file_content, get_file_lines, get_mount_size
# import this as a module to ensure we get the same module isntance
from ansible.module_utils.facts import timeout
def get_partition_uuid(partname):
try:
uuids = os.listdir("/dev/disk/by-uuid")
except OSError:
return
for uuid in uuids:
dev = os.path.realpath("/dev/disk/by-uuid/" + uuid)
if dev == ("/dev/" + partname):
return uuid
return None
class LinuxHardware(Hardware):
"""
Linux-specific subclass of Hardware. Defines memory and CPU facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor (a list)
- processor_cores
- processor_count
In addition, it also defines number of DMI facts and device facts.
"""
platform = 'Linux'
# Originally only had these four as toplevelfacts
ORIGINAL_MEMORY_FACTS = frozenset(('MemTotal', 'SwapTotal', 'MemFree', 'SwapFree'))
# Now we have all of these in a dict structure
MEMORY_FACTS = ORIGINAL_MEMORY_FACTS.union(('Buffers', 'Cached', 'SwapCached'))
# regex used against findmnt output to detect bind mounts
BIND_MOUNT_RE = re.compile(r'.*\]')
# regex used against mtab content to find entries that are bind mounts
MTAB_BIND_MOUNT_RE = re.compile(r'.*bind.*"')
def populate(self, collected_facts=None):
hardware_facts = {}
cpu_facts = self.get_cpu_facts(collected_facts=collected_facts)
memory_facts = self.get_memory_facts()
dmi_facts = self.get_dmi_facts()
device_facts = self.get_device_facts()
uptime_facts = self.get_uptime_facts()
lvm_facts = self.get_lvm_facts()
mount_facts = {}
try:
mount_facts = self.get_mount_facts()
except timeout.TimeoutError:
pass
hardware_facts.update(cpu_facts)
hardware_facts.update(memory_facts)
hardware_facts.update(dmi_facts)
hardware_facts.update(device_facts)
hardware_facts.update(uptime_facts)
hardware_facts.update(lvm_facts)
hardware_facts.update(mount_facts)
return hardware_facts
def get_memory_facts(self):
memory_facts = {}
if not os.access("/proc/meminfo", os.R_OK):
return memory_facts
memstats = {}
for line in get_file_lines("/proc/meminfo"):
data = line.split(":", 1)
key = data[0]
if key in self.ORIGINAL_MEMORY_FACTS:
val = data[1].strip().split(' ')[0]
memory_facts["%s_mb" % key.lower()] = int(val) // 1024
if key in self.MEMORY_FACTS:
val = data[1].strip().split(' ')[0]
memstats[key.lower()] = int(val) // 1024
if None not in (memstats.get('memtotal'), memstats.get('memfree')):
memstats['real:used'] = memstats['memtotal'] - memstats['memfree']
if None not in (memstats.get('cached'), memstats.get('memfree'), memstats.get('buffers')):
memstats['nocache:free'] = memstats['cached'] + memstats['memfree'] + memstats['buffers']
if None not in (memstats.get('memtotal'), memstats.get('nocache:free')):
memstats['nocache:used'] = memstats['memtotal'] - memstats['nocache:free']
if None not in (memstats.get('swaptotal'), memstats.get('swapfree')):
memstats['swap:used'] = memstats['swaptotal'] - memstats['swapfree']
memory_facts['memory_mb'] = {
'real': {
'total': memstats.get('memtotal'),
'used': memstats.get('real:used'),
'free': memstats.get('memfree'),
},
'nocache': {
'free': memstats.get('nocache:free'),
'used': memstats.get('nocache:used'),
},
'swap': {
'total': memstats.get('swaptotal'),
'free': memstats.get('swapfree'),
'used': memstats.get('swap:used'),
'cached': memstats.get('swapcached'),
},
}
return memory_facts
def get_cpu_facts(self, collected_facts=None):
cpu_facts = {}
collected_facts = collected_facts or {}
i = 0
vendor_id_occurrence = 0
model_name_occurrence = 0
physid = 0
coreid = 0
sockets = {}
cores = {}
xen = False
xen_paravirt = False
try:
if os.path.exists('/proc/xen'):
xen = True
else:
for line in get_file_lines('/sys/hypervisor/type'):
if line.strip() == 'xen':
xen = True
# Only interested in the first line
break
except IOError:
pass
if not os.access("/proc/cpuinfo", os.R_OK):
return cpu_facts
cpu_facts['processor'] = []
for line in get_file_lines('/proc/cpuinfo'):
data = line.split(":", 1)
key = data[0].strip()
if xen:
if key == 'flags':
# Check for vme cpu flag, Xen paravirt does not expose this.
# Need to detect Xen paravirt because it exposes cpuinfo
# differently than Xen HVM or KVM and causes reporting of
# only a single cpu core.
if 'vme' not in data:
xen_paravirt = True
# model name is for Intel arch, Processor (mind the uppercase P)
# works for some ARM devices, like the Sheevaplug.
# 'ncpus active' is SPARC attribute
if key in ['model name', 'Processor', 'vendor_id', 'cpu', 'Vendor', 'processor']:
if 'processor' not in cpu_facts:
cpu_facts['processor'] = []
cpu_facts['processor'].append(data[1].strip())
if key == 'vendor_id':
vendor_id_occurrence += 1
if key == 'model name':
model_name_occurrence += 1
i += 1
elif key == 'physical id':
physid = data[1].strip()
if physid not in sockets:
sockets[physid] = 1
elif key == 'core id':
coreid = data[1].strip()
if coreid not in sockets:
cores[coreid] = 1
elif key == 'cpu cores':
sockets[physid] = int(data[1].strip())
elif key == 'siblings':
cores[coreid] = int(data[1].strip())
elif key == '# processors':
cpu_facts['processor_cores'] = int(data[1].strip())
elif key == 'ncpus active':
i = int(data[1].strip())
# Skip for platforms without vendor_id/model_name in cpuinfo (e.g ppc64le)
if vendor_id_occurrence > 0:
if vendor_id_occurrence == model_name_occurrence:
i = vendor_id_occurrence
# FIXME
if collected_facts.get('ansible_architecture') != 's390x':
if xen_paravirt:
cpu_facts['processor_count'] = i
cpu_facts['processor_cores'] = i
cpu_facts['processor_threads_per_core'] = 1
cpu_facts['processor_vcpus'] = i
else:
if sockets:
cpu_facts['processor_count'] = len(sockets)
else:
cpu_facts['processor_count'] = i
socket_values = list(sockets.values())
if socket_values and socket_values[0]:
cpu_facts['processor_cores'] = socket_values[0]
else:
cpu_facts['processor_cores'] = 1
core_values = list(cores.values())
if core_values:
cpu_facts['processor_threads_per_core'] = core_values[0] // cpu_facts['processor_cores']
else:
cpu_facts['processor_threads_per_core'] = 1 // cpu_facts['processor_cores']
cpu_facts['processor_vcpus'] = (cpu_facts['processor_threads_per_core'] *
cpu_facts['processor_count'] * cpu_facts['processor_cores'])
return cpu_facts
def get_dmi_facts(self):
''' learn dmi facts from system
Try /sys first for dmi related facts.
If that is not available, fall back to dmidecode executable '''
dmi_facts = {}
if os.path.exists('/sys/devices/virtual/dmi/id/product_name'):
# Use kernel DMI info, if available
# DMI SPEC -- http://www.dmtf.org/sites/default/files/standards/documents/DSP0134_2.7.0.pdf
FORM_FACTOR = ["Unknown", "Other", "Unknown", "Desktop",
"Low Profile Desktop", "Pizza Box", "Mini Tower", "Tower",
"Portable", "Laptop", "Notebook", "Hand Held", "Docking Station",
"All In One", "Sub Notebook", "Space-saving", "Lunch Box",
"Main Server Chassis", "Expansion Chassis", "Sub Chassis",
"Bus Expansion Chassis", "Peripheral Chassis", "RAID Chassis",
"Rack Mount Chassis", "Sealed-case PC", "Multi-system",
"CompactPCI", "AdvancedTCA", "Blade"]
DMI_DICT = {
'bios_date': '/sys/devices/virtual/dmi/id/bios_date',
'bios_version': '/sys/devices/virtual/dmi/id/bios_version',
'form_factor': '/sys/devices/virtual/dmi/id/chassis_type',
'product_name': '/sys/devices/virtual/dmi/id/product_name',
'product_serial': '/sys/devices/virtual/dmi/id/product_serial',
'product_uuid': '/sys/devices/virtual/dmi/id/product_uuid',
'product_version': '/sys/devices/virtual/dmi/id/product_version',
'system_vendor': '/sys/devices/virtual/dmi/id/sys_vendor'
}
for (key, path) in DMI_DICT.items():
data = get_file_content(path)
if data is not None:
if key == 'form_factor':
try:
dmi_facts['form_factor'] = FORM_FACTOR[int(data)]
except IndexError:
dmi_facts['form_factor'] = 'unknown (%s)' % data
else:
dmi_facts[key] = data
else:
dmi_facts[key] = 'NA'
else:
# Fall back to using dmidecode, if available
dmi_bin = self.module.get_bin_path('dmidecode')
DMI_DICT = {
'bios_date': 'bios-release-date',
'bios_version': 'bios-version',
'form_factor': 'chassis-type',
'product_name': 'system-product-name',
'product_serial': 'system-serial-number',
'product_uuid': 'system-uuid',
'product_version': 'system-version',
'system_vendor': 'system-manufacturer'
}
for (k, v) in DMI_DICT.items():
if dmi_bin is not None:
(rc, out, err) = self.module.run_command('%s -s %s' % (dmi_bin, v))
if rc == 0:
# Strip out commented lines (specific dmidecode output)
thisvalue = ''.join([line for line in out.splitlines() if not line.startswith('#')])
try:
json.dumps(thisvalue)
except UnicodeDecodeError:
thisvalue = "NA"
dmi_facts[k] = thisvalue
else:
dmi_facts[k] = 'NA'
else:
dmi_facts[k] = 'NA'
return dmi_facts
def _run_lsblk(self, lsblk_path):
# call lsblk and collect all uuids
# --exclude 2 makes lsblk ignore floppy disks, which are slower to answer than typical timeouts
# this uses the linux major device number
# for details see https://www.kernel.org/doc/Documentation/devices.txt
args = ['--list', '--noheadings', '--paths', '--output', 'NAME,UUID', '--exclude', '2']
cmd = [lsblk_path] + args
rc, out, err = self.module.run_command(cmd)
return rc, out, err
def _lsblk_uuid(self):
uuids = {}
lsblk_path = self.module.get_bin_path("lsblk")
if not lsblk_path:
return uuids
rc, out, err = self._run_lsblk(lsblk_path)
if rc != 0:
return uuids
# each line will be in format:
# <devicename><some whitespace><uuid>
# /dev/sda1 32caaec3-ef40-4691-a3b6-438c3f9bc1c0
for lsblk_line in out.splitlines():
if not lsblk_line:
continue
line = lsblk_line.strip()
fields = line.rsplit(None, 1)
if len(fields) < 2:
continue
device_name, uuid = fields[0].strip(), fields[1].strip()
if device_name in uuids:
continue
uuids[device_name] = uuid
return uuids
def _run_findmnt(self, findmnt_path):
args = ['--list', '--noheadings', '--notruncate']
cmd = [findmnt_path] + args
rc, out, err = self.module.run_command(cmd, errors='surrogate_then_replace')
return rc, out, err
def _find_bind_mounts(self):
bind_mounts = set()
findmnt_path = self.module.get_bin_path("findmnt")
if not findmnt_path:
return bind_mounts
rc, out, err = self._run_findmnt(findmnt_path)
if rc != 0:
return bind_mounts
# find bind mounts, in case /etc/mtab is a symlink to /proc/mounts
for line in out.splitlines():
fields = line.split()
# fields[0] is the TARGET, fields[1] is the SOURCE
if len(fields) < 2:
continue
# bind mounts will have a [/directory_name] in the SOURCE column
if self.BIND_MOUNT_RE.match(fields[1]):
bind_mounts.add(fields[0])
return bind_mounts
def _mtab_entries(self):
mtab_file = '/etc/mtab'
if not os.path.exists(mtab_file):
mtab_file = '/proc/mounts'
mtab = get_file_content(mtab_file, '')
mtab_entries = []
for line in mtab.splitlines():
fields = line.split()
if len(fields) < 4:
continue
mtab_entries.append(fields)
return mtab_entries
@timeout.timeout()
def get_mount_facts(self):
mount_facts = {}
mount_facts['mounts'] = []
bind_mounts = self._find_bind_mounts()
uuids = self._lsblk_uuid()
mtab_entries = self._mtab_entries()
mounts = []
for fields in mtab_entries:
device, mount, fstype, options = fields[0], fields[1], fields[2], fields[3]
if not device.startswith('/') and ':/' not in device:
continue
if fstype == 'none':
continue
mount_statvfs_info = get_mount_size(mount)
if mount in bind_mounts:
# only add if not already there, we might have a plain /etc/mtab
if not self.MTAB_BIND_MOUNT_RE.match(options):
options += ",bind"
mount_info = {'mount': mount,
'device': device,
'fstype': fstype,
'options': options,
'uuid': uuids.get(device, 'N/A')}
mount_info.update(mount_statvfs_info)
mounts.append(mount_info)
mount_facts['mounts'] = mounts
return mount_facts
def get_device_links(self, link_dir):
if not os.path.exists(link_dir):
return {}
try:
retval = collections.defaultdict(set)
for entry in os.listdir(link_dir):
try:
target = os.path.basename(os.readlink(os.path.join(link_dir, entry)))
retval[target].add(entry)
except OSError:
continue
return dict((k, list(sorted(v))) for (k, v) in iteritems(retval))
except OSError:
return {}
def get_all_device_owners(self):
try:
retval = collections.defaultdict(set)
for path in glob.glob('/sys/block/*/slaves/*'):
elements = path.split('/')
device = elements[3]
target = elements[5]
retval[target].add(device)
return dict((k, list(sorted(v))) for (k, v) in iteritems(retval))
except OSError:
return {}
def get_all_device_links(self):
return {
'ids': self.get_device_links('/dev/disk/by-id'),
'uuids': self.get_device_links('/dev/disk/by-uuid'),
'labels': self.get_device_links('/dev/disk/by-label'),
'masters': self.get_all_device_owners(),
}
def get_holders(self, block_dev_dict, sysdir):
block_dev_dict['holders'] = []
if os.path.isdir(sysdir + "/holders"):
for folder in os.listdir(sysdir + "/holders"):
if not folder.startswith("dm-"):
continue
name = get_file_content(sysdir + "/holders/" + folder + "/dm/name")
if name:
block_dev_dict['holders'].append(name)
else:
block_dev_dict['holders'].append(folder)
def get_device_facts(self):
device_facts = {}
device_facts['devices'] = {}
lspci = self.module.get_bin_path('lspci')
if lspci:
rc, pcidata, err = self.module.run_command([lspci, '-D'], errors='surrogate_then_replace')
else:
pcidata = None
try:
block_devs = os.listdir("/sys/block")
except OSError:
return device_facts
devs_wwn = {}
try:
devs_by_id = os.listdir("/dev/disk/by-id")
except OSError:
pass
else:
for link_name in devs_by_id:
if link_name.startswith("wwn-"):
try:
wwn_link = os.readlink(os.path.join("/dev/disk/by-id", link_name))
except OSError:
continue
devs_wwn[os.path.basename(wwn_link)] = link_name[4:]
links = self.get_all_device_links()
device_facts['device_links'] = links
for block in block_devs:
virtual = 1
sysfs_no_links = 0
try:
path = os.readlink(os.path.join("/sys/block/", block))
except OSError:
e = sys.exc_info()[1]
if e.errno == errno.EINVAL:
path = block
sysfs_no_links = 1
else:
continue
sysdir = os.path.join("/sys/block", path)
if sysfs_no_links == 1:
for folder in os.listdir(sysdir):
if "device" in folder:
virtual = 0
break
d = {}
d['virtual'] = virtual
d['links'] = {}
for (link_type, link_values) in iteritems(links):
d['links'][link_type] = link_values.get(block, [])
diskname = os.path.basename(sysdir)
for key in ['vendor', 'model', 'sas_address', 'sas_device_handle']:
d[key] = get_file_content(sysdir + "/device/" + key)
sg_inq = self.module.get_bin_path('sg_inq')
if sg_inq:
device = "/dev/%s" % (block)
rc, drivedata, err = self.module.run_command([sg_inq, device])
if rc == 0:
serial = re.search(r"Unit serial number:\s+(\w+)", drivedata)
if serial:
d['serial'] = serial.group(1)
for key in ['vendor', 'model']:
d[key] = get_file_content(sysdir + "/device/" + key)
for key, test in [('removable', '/removable'),
('support_discard', '/queue/discard_granularity'),
]:
d[key] = get_file_content(sysdir + test)
if diskname in devs_wwn:
d['wwn'] = devs_wwn[diskname]
d['partitions'] = {}
for folder in os.listdir(sysdir):
m = re.search("(" + diskname + r"\d+)", folder)
if m:
part = {}
partname = m.group(1)
part_sysdir = sysdir + "/" + partname
part['links'] = {}
for (link_type, link_values) in iteritems(links):
part['links'][link_type] = link_values.get(partname, [])
part['start'] = get_file_content(part_sysdir + "/start", 0)
part['sectors'] = get_file_content(part_sysdir + "/size", 0)
part['sectorsize'] = get_file_content(part_sysdir + "/queue/logical_block_size")
if not part['sectorsize']:
part['sectorsize'] = get_file_content(part_sysdir + "/queue/hw_sector_size", 512)
part['size'] = bytes_to_human((float(part['sectors']) * 512.0))
part['uuid'] = get_partition_uuid(partname)
self.get_holders(part, part_sysdir)
d['partitions'][partname] = part
d['rotational'] = get_file_content(sysdir + "/queue/rotational")
d['scheduler_mode'] = ""
scheduler = get_file_content(sysdir + "/queue/scheduler")
if scheduler is not None:
m = re.match(r".*?(\[(.*)\])", scheduler)
if m:
d['scheduler_mode'] = m.group(2)
d['sectors'] = get_file_content(sysdir + "/size")
if not d['sectors']:
d['sectors'] = 0
d['sectorsize'] = get_file_content(sysdir + "/queue/logical_block_size")
if not d['sectorsize']:
d['sectorsize'] = get_file_content(sysdir + "/queue/hw_sector_size", 512)
d['size'] = bytes_to_human(float(d['sectors']) * 512.0)
d['host'] = ""
# domains are numbered (0 to ffff), bus (0 to ff), slot (0 to 1f), and function (0 to 7).
m = re.match(r".+/([a-f0-9]{4}:[a-f0-9]{2}:[0|1][a-f0-9]\.[0-7])/", sysdir)
if m and pcidata:
pciid = m.group(1)
did = re.escape(pciid)
m = re.search("^" + did + r"\s(.*)$", pcidata, re.MULTILINE)
if m:
d['host'] = m.group(1)
self.get_holders(d, sysdir)
device_facts['devices'][diskname] = d
return device_facts
def get_uptime_facts(self):
uptime_facts = {}
uptime_file_content = get_file_content('/proc/uptime')
if uptime_file_content:
uptime_seconds_string = uptime_file_content.split(' ')[0]
uptime_facts['uptime_seconds'] = int(float(uptime_seconds_string))
return uptime_facts
def _find_mapper_device_name(self, dm_device):
dm_prefix = '/dev/dm-'
mapper_device = dm_device
if dm_device.startswith(dm_prefix):
dmsetup_cmd = self.module.get_bin_path('dmsetup', True)
mapper_prefix = '/dev/mapper/'
rc, dm_name, err = self.module.run_command("%s info -C --noheadings -o name %s" % (dmsetup_cmd, dm_device))
if rc == 0:
mapper_device = mapper_prefix + dm_name.rstrip()
return mapper_device
def get_lvm_facts(self):
""" Get LVM Facts if running as root and lvm utils are available """
lvm_facts = {}
if os.getuid() == 0 and self.module.get_bin_path('vgs'):
lvm_util_options = '--noheadings --nosuffix --units g --separator ,'
vgs_path = self.module.get_bin_path('vgs')
# vgs fields: VG #PV #LV #SN Attr VSize VFree
vgs = {}
if vgs_path:
rc, vg_lines, err = self.module.run_command('%s %s' % (vgs_path, lvm_util_options))
for vg_line in vg_lines.splitlines():
items = vg_line.strip().split(',')
vgs[items[0]] = {'size_g': items[-2],
'free_g': items[-1],
'num_lvs': items[2],
'num_pvs': items[1]}
lvs_path = self.module.get_bin_path('lvs')
# lvs fields:
# LV VG Attr LSize Pool Origin Data% Move Log Copy% Convert
lvs = {}
if lvs_path:
rc, lv_lines, err = self.module.run_command('%s %s' % (lvs_path, lvm_util_options))
for lv_line in lv_lines.splitlines():
items = lv_line.strip().split(',')
lvs[items[0]] = {'size_g': items[3], 'vg': items[1]}
pvs_path = self.module.get_bin_path('pvs')
# pvs fields: PV VG #Fmt #Attr PSize PFree
pvs = {}
if pvs_path:
rc, pv_lines, err = self.module.run_command('%s %s' % (pvs_path, lvm_util_options))
for pv_line in pv_lines.splitlines():
items = pv_line.strip().split(',')
pvs[self._find_mapper_device_name(items[0])] = {
'size_g': items[4],
'free_g': items[5],
'vg': items[1]}
lvm_facts['lvm'] = {'lvs': lvs, 'vgs': vgs, 'pvs': pvs}
return lvm_facts
class LinuxHardwareCollector(HardwareCollector):
_platform = 'Linux'
_fact_class = LinuxHardware
required_facts = set(['platform'])
| gpl-3.0 | -4,739,569,470,297,255,000 | 37.233894 | 119 | 0.508041 | false |
lcpt/xc | misc/test/defSections.py | 1 | 3450 | # Source Generated with Decompyle++
# File: defSections.pyc (Python 2.7)
from __future__ import division
import xc_base
import geom
import xc
from materials.ehe import EHE_materials
areaFi8 = 5e-05
areaFi10 = 7.85e-05
areaFi12 = 0.000113
areaFi16 = 0.000201
areaFi20 = 0.000314
areaFi25 = 0.0004608
def getDiagIntSection2(mdlr):
ancho = 1
canto = 0.3
recpos = 0.06 + 0.008
recneg = 0.06 + 0.008
geomSecHA1 = mdlr.getMaterialHandler.newSectionGeometry('geomSecHA1')
regions = geomSecHA1.getRegions
concrete= regions.newQuadRegion(EHE_materials.HA30.nmbDiagD)
concrete.nDivIJ = 10
concrete.nDivJK = 10
concrete.pMin = geom.Pos2d(-ancho / 2, -canto / 2)
concrete.pMax = geom.Pos2d(ancho / 2, canto / 2)
reinforcement = geomSecHA1.getReinfLayers
bottomReinforcement = reinforcement.newStraightReinfLayer(EHE_materials.B500S.nmbDiagD)
bottomReinforcement.numReinfBars = 5
bottomReinforcement.barArea = areaFi12
bottomReinforcement.p1 = geom.Pos2d(-ancho / 2 + recneg, -canto / 2 + recneg)
bottomReinforcement.p2 = geom.Pos2d(ancho / 2 - recneg, -canto / 2 + recneg)
topReinforcement = reinforcement.newStraightReinfLayer(EHE_materials.B500S.nmbDiagD)
topReinforcement.numReinfBars = 5
topReinforcement.barArea = areaFi12
topReinforcement.p1 = geom.Pos2d(-ancho / 2 + recpos, canto / 2 - recpos)
topReinforcement.p2 = geom.Pos2d(ancho / 2 - recpos, canto / 2 - recpos)
materiales = mdlr.getMaterialHandler
secHA1 = materiales.newMaterial('fiber_section_3d', 'secHA1')
fiberSectionRepr = secHA1.getFiberSectionRepr()
fiberSectionRepr.setGeomNamed('geomSecHA1')
secHA1.setupFibers()
param = xc.InteractionDiagramParameters()
param.concreteTag = EHE_materials.HA30.tagDiagD
param.reinforcementTag = EHE_materials.B500S.tagDiagD
diagIntSecHA1 = materiales.calcInteractionDiagram('secHA1', param)
return diagIntSecHA1
def getDiagIntSection1(mdlr):
ancho = 1
canto = 0.3
recpos = 0.076 + 0.008
recneg = 0.076 + 0.008
geomSecHA2 = mdlr.getMaterialHandler.newSectionGeometry('geomSecHA2')
regions = geomSecHA2.getRegions
concrete= regions.newQuadRegion(EHE_materials.HA30.nmbDiagD)
concrete.nDivIJ = 10
concrete.nDivJK = 10
concrete.pMin = geom.Pos2d(-ancho / 2, -canto / 2)
concrete.pMax = geom.Pos2d(ancho / 2, canto / 2)
reinforcement = geomSecHA2.getReinfLayers
bottomReinforcement = reinforcement.newStraightReinfLayer(EHE_materials.B500S.nmbDiagD)
bottomReinforcement.numReinfBars = 7
bottomReinforcement.barArea = areaFi20
bottomReinforcement.p1 = geom.Pos2d(-ancho / 2 + recneg, -canto / 2 + recneg)
bottomReinforcement.p2 = geom.Pos2d(ancho / 2 - recneg, -canto / 2 + recneg)
topReinforcement = reinforcement.newStraightReinfLayer(EHE_materials.B500S.nmbDiagD)
topReinforcement.numReinfBars = 7
topReinforcement.barArea = areaFi20
topReinforcement.p1 = geom.Pos2d(-ancho / 2 + recpos, canto / 2 - recpos)
topReinforcement.p2 = geom.Pos2d(ancho / 2 - recpos, canto / 2 - recpos)
materiales = mdlr.getMaterialHandler
secHA2 = materiales.newMaterial('fiber_section_3d', 'secHA2')
fiberSectionRepr = secHA2.getFiberSectionRepr()
fiberSectionRepr.setGeomNamed('geomSecHA2')
secHA2.setupFibers()
param = xc.InteractionDiagramParameters()
param.concreteTag = EHE_materials.HA30.tagDiagD
param.reinforcementTag = EHE_materials.B500S.tagDiagD
diagIntSecHA2 = materiales.calcInteractionDiagram('secHA2', param)
return diagIntSecHA2
| gpl-3.0 | 1,103,720,121,774,635,600 | 40.071429 | 89 | 0.761159 | false |
vainotuisk/icecreamratings | ENV/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/__about__.py | 101 | 1073 | # Copyright 2014 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
__all__ = [
"__title__", "__summary__", "__uri__", "__version__", "__author__",
"__email__", "__license__", "__copyright__",
]
__title__ = "packaging"
__summary__ = "Core utilities for Python packages"
__uri__ = "https://github.com/pypa/packaging"
__version__ = "15.1"
__author__ = "Donald Stufft"
__email__ = "[email protected]"
__license__ = "Apache License, Version 2.0"
__copyright__ = "Copyright 2014 %s" % __author__
| bsd-3-clause | 4,840,966,784,882,902,000 | 33.612903 | 74 | 0.680336 | false |
SpectreJan/gnuradio | gr-utils/python/modtool/gr-newmod/docs/doxygen/doxyxml/base.py | 333 | 6794 | #
# Copyright 2010 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
"""
A base class is created.
Classes based upon this are used to make more user-friendly interfaces
to the doxygen xml docs than the generated classes provide.
"""
import os
import pdb
from xml.parsers.expat import ExpatError
from generated import compound
class Base(object):
class Duplicate(StandardError):
pass
class NoSuchMember(StandardError):
pass
class ParsingError(StandardError):
pass
def __init__(self, parse_data, top=None):
self._parsed = False
self._error = False
self._parse_data = parse_data
self._members = []
self._dict_members = {}
self._in_category = {}
self._data = {}
if top is not None:
self._xml_path = top._xml_path
# Set up holder of references
else:
top = self
self._refs = {}
self._xml_path = parse_data
self.top = top
@classmethod
def from_refid(cls, refid, top=None):
""" Instantiate class from a refid rather than parsing object. """
# First check to see if its already been instantiated.
if top is not None and refid in top._refs:
return top._refs[refid]
# Otherwise create a new instance and set refid.
inst = cls(None, top=top)
inst.refid = refid
inst.add_ref(inst)
return inst
@classmethod
def from_parse_data(cls, parse_data, top=None):
refid = getattr(parse_data, 'refid', None)
if refid is not None and top is not None and refid in top._refs:
return top._refs[refid]
inst = cls(parse_data, top=top)
if refid is not None:
inst.refid = refid
inst.add_ref(inst)
return inst
def add_ref(self, obj):
if hasattr(obj, 'refid'):
self.top._refs[obj.refid] = obj
mem_classes = []
def get_cls(self, mem):
for cls in self.mem_classes:
if cls.can_parse(mem):
return cls
raise StandardError(("Did not find a class for object '%s'." \
% (mem.get_name())))
def convert_mem(self, mem):
try:
cls = self.get_cls(mem)
converted = cls.from_parse_data(mem, self.top)
if converted is None:
raise StandardError('No class matched this object.')
self.add_ref(converted)
return converted
except StandardError, e:
print e
@classmethod
def includes(cls, inst):
return isinstance(inst, cls)
@classmethod
def can_parse(cls, obj):
return False
def _parse(self):
self._parsed = True
def _get_dict_members(self, cat=None):
"""
For given category a dictionary is returned mapping member names to
members of that category. For names that are duplicated the name is
mapped to None.
"""
self.confirm_no_error()
if cat not in self._dict_members:
new_dict = {}
for mem in self.in_category(cat):
if mem.name() not in new_dict:
new_dict[mem.name()] = mem
else:
new_dict[mem.name()] = self.Duplicate
self._dict_members[cat] = new_dict
return self._dict_members[cat]
def in_category(self, cat):
self.confirm_no_error()
if cat is None:
return self._members
if cat not in self._in_category:
self._in_category[cat] = [mem for mem in self._members
if cat.includes(mem)]
return self._in_category[cat]
def get_member(self, name, cat=None):
self.confirm_no_error()
# Check if it's in a namespace or class.
bits = name.split('::')
first = bits[0]
rest = '::'.join(bits[1:])
member = self._get_dict_members(cat).get(first, self.NoSuchMember)
# Raise any errors that are returned.
if member in set([self.NoSuchMember, self.Duplicate]):
raise member()
if rest:
return member.get_member(rest, cat=cat)
return member
def has_member(self, name, cat=None):
try:
mem = self.get_member(name, cat=cat)
return True
except self.NoSuchMember:
return False
def data(self):
self.confirm_no_error()
return self._data
def members(self):
self.confirm_no_error()
return self._members
def process_memberdefs(self):
mdtss = []
for sec in self._retrieved_data.compounddef.sectiondef:
mdtss += sec.memberdef
# At the moment we lose all information associated with sections.
# Sometimes a memberdef is in several sectiondef.
# We make sure we don't get duplicates here.
uniques = set([])
for mem in mdtss:
converted = self.convert_mem(mem)
pair = (mem.name, mem.__class__)
if pair not in uniques:
uniques.add(pair)
self._members.append(converted)
def retrieve_data(self):
filename = os.path.join(self._xml_path, self.refid + '.xml')
try:
self._retrieved_data = compound.parse(filename)
except ExpatError:
print('Error in xml in file %s' % filename)
self._error = True
self._retrieved_data = None
def check_parsed(self):
if not self._parsed:
self._parse()
def confirm_no_error(self):
self.check_parsed()
if self._error:
raise self.ParsingError()
def error(self):
self.check_parsed()
return self._error
def name(self):
# first see if we can do it without processing.
if self._parse_data is not None:
return self._parse_data.name
self.check_parsed()
return self._retrieved_data.compounddef.name
| gpl-3.0 | -6,082,990,284,957,464,000 | 30.022831 | 76 | 0.580807 | false |
adityacs/ansible | test/units/plugins/connection/test_connection.py | 52 | 6390 | # (c) 2015, Toshio Kuratomi <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from io import StringIO
from ansible.compat.tests import mock
from ansible.compat.tests import unittest
from ansible.errors import AnsibleError
from ansible.playbook.play_context import PlayContext
from ansible.plugins.connection import ConnectionBase
#from ansible.plugins.connection.accelerate import Connection as AccelerateConnection
#from ansible.plugins.connection.chroot import Connection as ChrootConnection
#from ansible.plugins.connection.funcd import Connection as FuncdConnection
#from ansible.plugins.connection.jail import Connection as JailConnection
#from ansible.plugins.connection.libvirt_lxc import Connection as LibvirtLXCConnection
from ansible.plugins.connection.lxc import Connection as LxcConnection
from ansible.plugins.connection.local import Connection as LocalConnection
from ansible.plugins.connection.paramiko_ssh import Connection as ParamikoConnection
from ansible.plugins.connection.ssh import Connection as SSHConnection
from ansible.plugins.connection.docker import Connection as DockerConnection
#from ansible.plugins.connection.winrm import Connection as WinRmConnection
from ansible.plugins.connection.network_cli import Connection as NetworkCliConnection
class TestConnectionBaseClass(unittest.TestCase):
def setUp(self):
self.play_context = PlayContext()
self.in_stream = StringIO()
def tearDown(self):
pass
def test_subclass_error(self):
class ConnectionModule1(ConnectionBase):
pass
with self.assertRaises(TypeError):
ConnectionModule1()
class ConnectionModule2(ConnectionBase):
def get(self, key):
super(ConnectionModule2, self).get(key)
with self.assertRaises(TypeError):
ConnectionModule2()
def test_subclass_success(self):
class ConnectionModule3(ConnectionBase):
@property
def transport(self):
pass
def _connect(self):
pass
def exec_command(self):
pass
def put_file(self):
pass
def fetch_file(self):
pass
def close(self):
pass
self.assertIsInstance(ConnectionModule3(self.play_context, self.in_stream), ConnectionModule3)
# def test_accelerate_connection_module(self):
# self.assertIsInstance(AccelerateConnection(), AccelerateConnection)
#
# def test_chroot_connection_module(self):
# self.assertIsInstance(ChrootConnection(), ChrootConnection)
#
# def test_funcd_connection_module(self):
# self.assertIsInstance(FuncdConnection(), FuncdConnection)
#
# def test_jail_connection_module(self):
# self.assertIsInstance(JailConnection(), JailConnection)
#
# def test_libvirt_lxc_connection_module(self):
# self.assertIsInstance(LibvirtLXCConnection(), LibvirtLXCConnection)
def test_lxc_connection_module(self):
self.assertIsInstance(LxcConnection(self.play_context, self.in_stream), LxcConnection)
def test_local_connection_module(self):
self.assertIsInstance(LocalConnection(self.play_context, self.in_stream), LocalConnection)
def test_paramiko_connection_module(self):
self.assertIsInstance(ParamikoConnection(self.play_context, self.in_stream), ParamikoConnection)
def test_ssh_connection_module(self):
self.assertIsInstance(SSHConnection(self.play_context, self.in_stream), SSHConnection)
@mock.patch('ansible.plugins.connection.docker.Connection._old_docker_version', return_value=('false', 'garbage', '', 1))
@mock.patch('ansible.plugins.connection.docker.Connection._new_docker_version', return_value=('docker version', '1.2.3', '', 0))
def test_docker_connection_module_too_old(self, mock_new_docker_verison, mock_old_docker_version):
self.assertRaisesRegexp(AnsibleError, '^docker connection type requires docker 1.3 or higher$',
DockerConnection, self.play_context, self.in_stream, docker_command='/fake/docker')
@mock.patch('ansible.plugins.connection.docker.Connection._old_docker_version', return_value=('false', 'garbage', '', 1))
@mock.patch('ansible.plugins.connection.docker.Connection._new_docker_version', return_value=('docker version', '1.3.4', '', 0))
def test_docker_connection_module(self, mock_new_docker_verison, mock_old_docker_version):
self.assertIsInstance(DockerConnection(self.play_context, self.in_stream, docker_command='/fake/docker'),
DockerConnection)
# old version and new version fail
@mock.patch('ansible.plugins.connection.docker.Connection._old_docker_version', return_value=('false', 'garbage', '', 1))
@mock.patch('ansible.plugins.connection.docker.Connection._new_docker_version', return_value=('false', 'garbage', '', 1))
def test_docker_connection_module_wrong_cmd(self, mock_new_docker_version, mock_old_docker_version):
self.assertRaisesRegexp(AnsibleError, '^Docker version check (.*?) failed: ',
DockerConnection, self.play_context, self.in_stream, docker_command='/fake/docker')
# def test_winrm_connection_module(self):
# self.assertIsInstance(WinRmConnection(), WinRmConnection)
def test_network_cli_connection_module(self):
self.assertIsInstance(NetworkCliConnection(self.play_context, self.in_stream), NetworkCliConnection)
self.assertIsInstance(NetworkCliConnection(self.play_context, self.in_stream), ParamikoConnection)
| gpl-3.0 | -2,160,114,617,911,164,400 | 46.686567 | 132 | 0.723787 | false |
upsidetravel/bucket-antivirus-function | scan_bucket.py | 1 | 4671 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Upside Travel, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import sys
import boto3
from common import AV_STATUS_METADATA, LAMBDA_ENDPOINT
from common import AV_TIMESTAMP_METADATA
from common import S3_ENDPOINT
# Get all objects in an S3 bucket that have not been previously scanned
def get_objects(s3_client, s3_bucket_name):
s3_object_list = []
s3_list_objects_result = {"IsTruncated": True}
while s3_list_objects_result["IsTruncated"]:
s3_list_objects_config = {"Bucket": s3_bucket_name}
continuation_token = s3_list_objects_result.get("NextContinuationToken")
if continuation_token:
s3_list_objects_config["ContinuationToken"] = continuation_token
s3_list_objects_result = s3_client.list_objects_v2(**s3_list_objects_config)
if "Contents" not in s3_list_objects_result:
break
for key in s3_list_objects_result["Contents"]:
key_name = key["Key"]
# Don't include objects that have been scanned
if not object_previously_scanned(s3_client, s3_bucket_name, key_name):
s3_object_list.append(key_name)
return s3_object_list
# Determine if an object has been previously scanned for viruses
def object_previously_scanned(s3_client, s3_bucket_name, key_name):
s3_object_tags = s3_client.get_object_tagging(Bucket=s3_bucket_name, Key=key_name)
if "TagSet" not in s3_object_tags:
return False
for tag in s3_object_tags["TagSet"]:
if tag["Key"] in [AV_STATUS_METADATA, AV_TIMESTAMP_METADATA]:
return True
return False
# Scan an S3 object for viruses by invoking the lambda function
# Skip any objects that have already been scanned
def scan_object(lambda_client, lambda_function_name, s3_bucket_name, key_name):
print("Scanning: {}/{}".format(s3_bucket_name, key_name))
s3_event = format_s3_event(s3_bucket_name, key_name)
lambda_invoke_result = lambda_client.invoke(
FunctionName=lambda_function_name,
InvocationType="Event",
Payload=json.dumps(s3_event),
)
if lambda_invoke_result["ResponseMetadata"]["HTTPStatusCode"] != 202:
print("Error invoking lambda: {}".format(lambda_invoke_result))
# Format an S3 Event to use when invoking the lambda function
# https://docs.aws.amazon.com/AmazonS3/latest/dev/notification-content-structure.html
def format_s3_event(s3_bucket_name, key_name):
s3_event = {
"Records": [
{"s3": {"bucket": {"name": s3_bucket_name}, "object": {"key": key_name}}}
]
}
return s3_event
def main(lambda_function_name, s3_bucket_name, limit):
# Verify the lambda exists
lambda_client = boto3.client("lambda", endpoint_url=LAMBDA_ENDPOINT)
try:
lambda_client.get_function(FunctionName=lambda_function_name)
except Exception:
print("Lambda Function '{}' does not exist".format(lambda_function_name))
sys.exit(1)
# Verify the S3 bucket exists
s3_client = boto3.client("s3", endpoint_url=S3_ENDPOINT)
try:
s3_client.head_bucket(Bucket=s3_bucket_name)
except Exception:
print("S3 Bucket '{}' does not exist".format(s3_bucket_name))
sys.exit(1)
# Scan the objects in the bucket
s3_object_list = get_objects(s3_client, s3_bucket_name)
if limit:
s3_object_list = s3_object_list[: min(limit, len(s3_object_list))]
for key_name in s3_object_list:
scan_object(lambda_client, lambda_function_name, s3_bucket_name, key_name)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Scan an S3 bucket for viruses.")
parser.add_argument(
"--lambda-function-name",
required=True,
help="The name of the lambda function to invoke",
)
parser.add_argument(
"--s3-bucket-name", required=True, help="The name of the S3 bucket to scan"
)
parser.add_argument("--limit", type=int, help="The number of records to limit to")
args = parser.parse_args()
main(args.lambda_function_name, args.s3_bucket_name, args.limit)
| apache-2.0 | -4,984,718,419,400,336,000 | 35.779528 | 86 | 0.678656 | false |
ecanzonieri/pyleus | examples/word_count/word_count/line_spout.py | 9 | 1818 | import logging
import random
from pyleus.storm import Spout
log = logging.getLogger('counter')
LINES = """
Lorem ipsum dolor sit amet, consectetur
adipiscing elit. Curabitur pharetra ante eget
nunc blandit vestibulum. Curabitur tempus mi
a risus lacinia egestas. Nulla faucibus
elit vitae dignissim euismod. Fusce ac
elementum leo, ut elementum dui. Ut
consequat est magna, eu posuere mi
pulvinar eget. Integer adipiscing, quam vitae
pretium facilisis, mi ligula viverra sapien,
nec elementum lacus metus ac mi.
Morbi sodales diam non velit accumsan
mollis. Donec eleifend quam in metus
faucibus auctor. Cras auctor sapien non
mauris vehicula, vel aliquam libero luctus.
Sed eu lobortis sapien. Maecenas eu
fringilla enim. Ut in velit nec
lectus tincidunt varius. Sed vel dictum
nunc. Morbi mollis nunc augue, eget
sagittis libero laoreet id. Suspendisse lobortis
nibh mauris, non bibendum magna iaculis
sed. Mauris interdum massa ut sagittis
vestibulum. In ipsum lacus, faucibus eu
hendrerit at, egestas non nisi. Duis
erat mauris, aliquam in hendrerit eget,
aliquam vel nibh. Proin molestie porta
imperdiet. Interdum et malesuada fames ac
ante ipsum primis in faucibus. Praesent
vitae cursus leo, a congue justo.
Ut interdum tellus non odio adipiscing
malesuada. Mauris in ante nec erat
lobortis eleifend. Morbi condimentum interdum elit,
quis iaculis ante pharetra id. In
""".strip().split('\n')
class LineSpout(Spout):
OUTPUT_FIELDS = ["line"]
def next_tuple(self):
line = random.choice(LINES)
log.debug(line)
self.emit((line,), tup_id=random.randrange(999999999))
if __name__ == '__main__':
logging.basicConfig(
level=logging.DEBUG,
filename='/tmp/word_count_lines.log',
format="%(message)s",
filemode='a',
)
LineSpout().run()
| apache-2.0 | -4,077,361,497,650,171,000 | 28.322581 | 62 | 0.749175 | false |
Smarsh/django | tests/regressiontests/templates/smartif.py | 50 | 2175 | import unittest
from django.template.smartif import IfParser, Literal
class SmartIfTests(unittest.TestCase):
def assertCalcEqual(self, expected, tokens):
self.assertEqual(expected, IfParser(tokens).parse().eval({}))
# We only test things here that are difficult to test elsewhere
# Many other tests are found in the main tests for builtin template tags
# Test parsing via the printed parse tree
def test_not(self):
var = IfParser(["not", False]).parse()
self.assertEqual("(not (literal False))", repr(var))
self.assert_(var.eval({}))
self.assertFalse(IfParser(["not", True]).parse().eval({}))
def test_or(self):
var = IfParser([True, "or", False]).parse()
self.assertEqual("(or (literal True) (literal False))", repr(var))
self.assert_(var.eval({}))
def test_in(self):
list_ = [1,2,3]
self.assertCalcEqual(True, [1, 'in', list_])
self.assertCalcEqual(False, [1, 'in', None])
self.assertCalcEqual(False, [None, 'in', list_])
def test_not_in(self):
list_ = [1,2,3]
self.assertCalcEqual(False, [1, 'not', 'in', list_])
self.assertCalcEqual(True, [4, 'not', 'in', list_])
self.assertCalcEqual(False, [1, 'not', 'in', None])
self.assertCalcEqual(True, [None, 'not', 'in', list_])
def test_precedence(self):
# (False and False) or True == True <- we want this one, like Python
# False and (False or True) == False
self.assertCalcEqual(True, [False, 'and', False, 'or', True])
# True or (False and False) == True <- we want this one, like Python
# (True or False) and False == False
self.assertCalcEqual(True, [True, 'or', False, 'and', False])
# (1 or 1) == 2 -> False
# 1 or (1 == 2) -> True <- we want this one
self.assertCalcEqual(True, [1, 'or', 1, '==', 2])
self.assertCalcEqual(True, [True, '==', True, 'or', True, '==', False])
self.assertEqual("(or (and (== (literal 1) (literal 2)) (literal 3)) (literal 4))",
repr(IfParser([1, '==', 2, 'and', 3, 'or', 4]).parse()))
| bsd-3-clause | -8,601,469,431,514,981,000 | 40.037736 | 91 | 0.575632 | false |
cburgmer/eclectus | tomoeqt/handwritingwidget.py | 1 | 12861 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Qt widget integrating Tegaki/Tomoe handwriting character recognition for
Japanese Kanji and Chinese Hanzi.
Includes a QApplication demonstrating the wiget.
10.02.2009 Christoph Burgmer ([email protected])
History:
* 11.02.2009, show boundaries and keep handwriting within them, resizeable.
* 12.02.2009, dictionary setting method, stroke count, maximum size,
graceful import failure
* 02.06.2009, ported to also work with Tegaki
Released under the LGPL (http://www.gnu.org/licenses/lgpl.html).
"""
import sys
import os
import signal
# imports needed by tomoe widget
from PyQt4 import QtGui, QtCore
try:
from tegaki.recognizer import Recognizer
from tegaki.character import Writing
recognizerType = 'tegaki'
except ImportError:
try:
from tomoe import Recognizer, Writing, Dict
recognizerType = 'tomoe'
except ImportError:
recognizerType = None
class HandwritingWidget(QtGui.QGraphicsView):
"""
Qt widget integrating Tegaki/Tomoe handwriting character recognition for
Japanese Kanji and Chinese Hanzi.
Example:
dictionary = os.path.join("/usr/local/share/tomoe/recognizer/",
'handwriting-zh_CN.xml')
settings = {'tomoe': {'dictionary'; dictionary}}
widget = HandwritingWidget(mainWindow, dictionary, 200, 200)
connect(widget, QtCore.SIGNAL("updated()"), showResults)
"""
class LineDrawingGraphicsScene(QtGui.QGraphicsScene):
"""Graphics scene for drawing strokes and handling recognizer."""
def __init__(self, parent, recognizerSettings=None, size=100):
QtGui.QGraphicsScene.__init__(self, parent)
self.size = 100
self.writing = None
# set pen for handwriting
self.pen = QtGui.QPen()
self.pen.setWidth(3)
self.strokeItemGroups = []
self.currentStrokeItems = []
self.setSize(size)
if recognizerSettings:
self.setDictionary(recognizerSettings)
def setDictionary(self, recognizerSettings={}):
#self.clear_strokes()
#initialize the default dictionary and a simple recognizer
if recognizerType == 'tomoe' \
and 'tomoe' in recognizerSettings \
and 'dictionary' in recognizerSettings['tomoe']:
tomoeDict = Dict("XML",
filename=recognizerSettings['tomoe']['dictionary'])
self.recognizer = Recognizer('Simple',
dictionary=tomoeDict)
# will encapsulate stroke data
if not self.writing:
self.writing = Writing()
elif recognizerType == 'tegaki':
recognizers = Recognizer.get_available_recognizers()
if not recognizers:
raise Exception('No recognizer available')
if 'tegaki' in recognizerSettings \
and 'recognizer' in recognizerSettings['tegaki']:
engine = recognizerSettings['tegaki']['recognizer']
if engine not in recognizers:
raise Exception('recognizer not available')
else:
engine = recognizers.keys()[0]
recognizer_klass = recognizers[engine]
self.recognizer = recognizer_klass()
if 'tegaki' in recognizerSettings \
and 'model' in recognizerSettings['tegaki']:
model = recognizerSettings['tegaki']['model']
if model not in recognizer_klass.get_available_models():
raise Exception('Model not available')
else:
model = recognizer_klass.get_available_models().keys()[0]
self.recognizer.set_model(model)
# will encapsulate stroke data
if not self.writing:
self.writing = Writing()
else:
self.writing = None
def enabled(self):
#return True
return self.writing != None # TODO bug ?
def setSize(self, size):
for group in self.strokeItemGroups:
for item in group:
self.removeItem(item)
self.clear()
self.setSceneRect(0, 0, size, size)
# draw character grid
self.setBackgroundBrush(QtCore.Qt.lightGray)
self.addRect(-1, -1, size+2, size+2,
QtCore.Qt.white, QtCore.Qt.white).setZValue(-1)
self.addRect(0.1 * size, 0.1 * size, 0.8 * size, 0.8 * size)
self.addLine(0.5 * size, 0.1 * size, 0.5 * size, 0.9 * size,
QtGui.QPen(QtCore.Qt.DashLine))
self.addLine(0.1 * size, 0.5 * size, 0.9 * size, 0.5 * size,
QtGui.QPen(QtCore.Qt.DashLine))
# recalculate drawn strokes
scaleFactor = 1.0 * size / self.size
for group in self.strokeItemGroups:
for item in group:
self.addItem(item)
line = item.line()
line.setLine(line.x1() * scaleFactor,
line.y1() * scaleFactor, line.x2() * scaleFactor,
line.y2() * scaleFactor)
item.setLine(line)
self.size = size
def clear_strokes(self):
"""Removes all strokes and clears the drawing area."""
if self.strokeItemGroups:
for group in self.strokeItemGroups:
for item in group:
self.removeItem(item)
self.strokeItemGroups = []
if self.writing:
self.writing.clear()
def remove_last_stroke(self):
"""Removes the latest stroke."""
if self.strokeItemGroups:
for item in self.strokeItemGroups.pop():
self.removeItem(item)
if self.writing:
self.writing.remove_last_stroke()
def strokeCount(self):
return self.writing.get_n_strokes()
def doSearch(self, maxResults=10):
"""Searches for the current stroke input and returns the results."""
if self.writing and self.writing.get_n_strokes() > 0:
if recognizerType == 'tomoe':
res = self.recognizer.search(self.writing)
if maxResults != None:
res = res[:min(maxResults, len(res))]
return [(r.get_char().get_utf8().decode('utf8'),
r.get_score()) for r in res]
elif recognizerType == 'tegaki':
return [(c.decode('utf8'), score) for c, score \
in self.recognizer.recognize(self.writing, maxResults)]
else:
return []
def mouseReleaseEvent(self, mouseEvent):
if mouseEvent.button() & QtCore.Qt.LeftButton:
# left button released
#pos = mouseEvent.scenePos()
#self.keepBounds(pos)
#self.writing.line_to(pos.x() * 1000 / self.size,
#pos.y() * 1000 / self.size)
self.strokeItemGroups.append(self.currentStrokeItems)
self.currentStrokeItems = []
self.emit(QtCore.SIGNAL("strokeAdded()"))
def mousePressEvent(self, mouseEvent):
if mouseEvent.button() & QtCore.Qt.LeftButton:
# left button pressed
pos = mouseEvent.scenePos()
self.keepBounds(pos)
self.writing.move_to(int(pos.x() * 1000 / self.size),
int(pos.y() * 1000 / self.size))
def mouseMoveEvent(self, mouseEvent):
if mouseEvent.buttons() & QtCore.Qt.LeftButton:
# mouse is moved with the left button hold down
lastPos = mouseEvent.lastScenePos()
self.keepBounds(lastPos)
pos = mouseEvent.scenePos()
self.keepBounds(pos)
self.currentStrokeItems.append(
self.addLine(QtCore.QLineF(lastPos, pos), self.pen))
# tomoe seems to use a 1000x1000 pixel grid
self.writing.line_to(int(pos.x() * 1000 / self.size),
int(pos.y() * 1000 / self.size))
def keepBounds(self, point):
"""Keep the coordinates inside the scene rectangle."""
point.setX(min(max(0, point.x()), self.size))
point.setY(min(max(0, point.y()), self.size))
def __init__(self, parent, recognizerSettings=None, size=100):
self.scene = HandwritingWidget.LineDrawingGraphicsScene(parent,
recognizerSettings, size)
QtGui.QGraphicsView.__init__(self, self.scene, parent)
self.setRenderHints(QtGui.QPainter.Antialiasing)
self.connect(self.scene, QtCore.SIGNAL("strokeAdded()"),
lambda: self.emit(QtCore.SIGNAL("updated()")))
self.setInteractive(self.recognizerAvailable())
self.setMaximumSize(0)
@staticmethod
def recognizerAvailable():
return recognizerType != None
def setDictionary(self, recognizerSettings):
self.scene.setDictionary(recognizerSettings)
self.setInteractive(self.recognizerAvailable())
self.emit(QtCore.SIGNAL("updated()"))
def setMaximumSize(self, size):
self.maximumSize = size
def results(self, maxResults=None):
"""
Returns the results for the current strokes with at maximum maxResults.
"""
if self.scene.enabled():
return self.scene.doSearch()
def strokeCount(self):
if self.scene.enabled():
return self.scene.strokeCount()
def clear(self):
"""Removes all strokes and clears the drawing area."""
if self.scene.enabled():
self.scene.clear_strokes()
self.emit(QtCore.SIGNAL("updated()"))
def remove_last_stroke(self):
"""Removes the latest stroke."""
if self.scene.enabled():
self.scene.remove_last_stroke()
self.emit(QtCore.SIGNAL("updated()"))
def resizeEvent(self, event):
QtGui.QGraphicsView.resizeEvent(self, event)
size = event.size()
minSize = min(size.width(), size.height())
if self.maximumSize:
minSize = min(minSize, self.maximumSize)
self.scene.setSize(minSize)
class MainWindow(QtGui.QMainWindow):
def __init__(self):
QtGui.QMainWindow.__init__(self)
# this is all you need to get the widget working
tomoeDictionary = os.path.join("/usr/local/share/tomoe/recognizer/",
'handwriting-zh_CN.xml')
recognizerSettings = {'tomoe': {'dictionary': tomoeDictionary},
'tegaki': {}}
self.widget = HandwritingWidget(self, recognizerSettings, 200)
self.connect(self.widget, QtCore.SIGNAL("updated()"), self.showResults)
# add some nice layout and buttons to clear strokes
self.centralwidget = QtGui.QWidget(self)
self.verticalLayout = QtGui.QVBoxLayout(self.centralwidget)
self.horizontalLayout = QtGui.QHBoxLayout()
self.clearButton = QtGui.QPushButton(self.centralwidget)
self.clearButton.setText('&Clear')
self.backButton = QtGui.QPushButton(self.centralwidget)
self.backButton.setText('&Back')
self.resultLabel = QtGui.QLineEdit(self.centralwidget)
self.horizontalLayout.addWidget(self.clearButton)
self.horizontalLayout.addWidget(self.backButton)
self.verticalLayout.addLayout(self.horizontalLayout)
self.verticalLayout.addWidget(self.widget)
self.verticalLayout.addWidget(self.resultLabel)
# add connections for clearing stroke input
self.connect(self.clearButton, QtCore.SIGNAL("clicked()"),
self.widget.clear)
self.connect(self.backButton, QtCore.SIGNAL("clicked()"),
self.widget.remove_last_stroke)
self.setCentralWidget(self.centralwidget)
def showResults(self):
resultList = self.widget.results(10)
#self.resultLabel.setText(
#', '.join([char + ' (' + str(s) + ')' for char, s in resultList]))
self.resultLabel.setText(''.join([char for char, _ in resultList]))
def main():
# create applicaton
app = QtGui.QApplication(sys.argv)
# create main window
window = MainWindow()
window.show()
# react to CTRL+C on the command line
signal.signal(signal.SIGINT, signal.SIG_DFL)
app.exec_()
if __name__ == '__main__':
main()
| gpl-3.0 | 5,516,803,540,838,374,000 | 35.851003 | 80 | 0.578027 | false |
keenerd/namcap | Namcap/tests/pkgbuild/test_invalidstartdir.py | 4 | 2034 | # -*- coding: utf-8 -*-
#
# namcap tests - invalidstartdir
# Copyright (C) 2011 Rémy Oudompheng <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
#
from Namcap.tests.pkgbuild_test import PkgbuildTest
import Namcap.rules.invalidstartdir as module
class NamcapInvalidStartdirTest(PkgbuildTest):
pkgbuild1 = """
# Maintainer: Arch Linux <[email protected]>
# Contributor: Arch Linux <[email protected]>
pkgname=mypackage
pkgver=1.0
pkgrel=1
pkgdesc="This program does foobar"
arch=('i686' 'x86_64')
url="http://www.example.com/"
license=('GPL')
depends=('glibc')
options=('!libtool')
source=(ftp://ftp.example.com/pub/mypackage-0.1.tar.gz)
md5sums=('abcdefabcdef12345678901234567890')
build() {
cd "$startdir/src/${pkgname}-${pkgver}"
patch -p1 ${startdir}/patch
./configure --prefix=/usr
make
}
package() {
cd "${srcdir}"/${pkgname}-${pkgver}
./configure --prefix=/usr
make DESTDIR="$startdir/pkg" install
}
"""
test_valid = PkgbuildTest.valid_tests
def preSetUp(self):
self.rule = module.package
def test_example1(self):
# Example 1
r = self.run_on_pkg(self.pkgbuild1)
self.assertEqual(set(r.errors), set([
("file-referred-in-startdir", ()),
("use-pkgdir", ()),
("use-srcdir", ())
]))
self.assertEqual(r.warnings, [])
self.assertEqual(r.infos, [])
# vim: set ts=4 sw=4 noet:
| gpl-2.0 | -8,273,263,974,540,367,000 | 26.849315 | 72 | 0.706345 | false |
ZenDevelopmentSystems/scikit-learn | sklearn/metrics/cluster/unsupervised.py | 230 | 8281 | """ Unsupervised evaluation metrics. """
# Authors: Robert Layton <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from ...utils import check_random_state
from ..pairwise import pairwise_distances
def silhouette_score(X, labels, metric='euclidean', sample_size=None,
random_state=None, **kwds):
"""Compute the mean Silhouette Coefficient of all samples.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``. To clarify, ``b`` is the distance between a sample and the nearest
cluster that the sample is not a part of.
Note that Silhouette Coefficent is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the mean Silhouette Coefficient over all samples.
To obtain the values for each sample, use :func:`silhouette_samples`.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters. Negative values generally indicate that a sample has
been assigned to the wrong cluster, as a different cluster is more similar.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
Predicted labels for each sample.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`metrics.pairwise.pairwise_distances
<sklearn.metrics.pairwise.pairwise_distances>`. If X is the distance
array itself, use ``metric="precomputed"``.
sample_size : int or None
The size of the sample to use when computing the Silhouette Coefficient
on a random subset of the data.
If ``sample_size is None``, no sampling is used.
random_state : integer or numpy.RandomState, optional
The generator used to randomly select a subset of samples if
``sample_size is not None``. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : float
Mean Silhouette Coefficient for all samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<http://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
n_labels = len(np.unique(labels))
n_samples = X.shape[0]
if not 1 < n_labels < n_samples:
raise ValueError("Number of labels is %d. Valid values are 2 "
"to n_samples - 1 (inclusive)" % n_labels)
if sample_size is not None:
random_state = check_random_state(random_state)
indices = random_state.permutation(X.shape[0])[:sample_size]
if metric == "precomputed":
X, labels = X[indices].T[indices].T, labels[indices]
else:
X, labels = X[indices], labels[indices]
return np.mean(silhouette_samples(X, labels, metric=metric, **kwds))
def silhouette_samples(X, labels, metric='euclidean', **kwds):
"""Compute the Silhouette Coefficient for each sample.
The Silhouette Coefficient is a measure of how well samples are clustered
with samples that are similar to themselves. Clustering models with a high
Silhouette Coefficient are said to be dense, where samples in the same
cluster are similar to each other, and well separated, where samples in
different clusters are not very similar to each other.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``.
Note that Silhouette Coefficent is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the Silhouette Coefficient for each sample.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
label values for each sample
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`sklearn.metrics.pairwise.pairwise_distances`. If X is
the distance array itself, use "precomputed" as the metric.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a ``scipy.spatial.distance`` metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : array, shape = [n_samples]
Silhouette Coefficient for each samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<http://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
distances = pairwise_distances(X, metric=metric, **kwds)
n = labels.shape[0]
A = np.array([_intra_cluster_distance(distances[i], labels, i)
for i in range(n)])
B = np.array([_nearest_cluster_distance(distances[i], labels, i)
for i in range(n)])
sil_samples = (B - A) / np.maximum(A, B)
return sil_samples
def _intra_cluster_distance(distances_row, labels, i):
"""Calculate the mean intra-cluster distance for sample i.
Parameters
----------
distances_row : array, shape = [n_samples]
Pairwise distance matrix between sample i and each sample.
labels : array, shape = [n_samples]
label values for each sample
i : int
Sample index being calculated. It is excluded from calculation and
used to determine the current label
Returns
-------
a : float
Mean intra-cluster distance for sample i
"""
mask = labels == labels[i]
mask[i] = False
if not np.any(mask):
# cluster of size 1
return 0
a = np.mean(distances_row[mask])
return a
def _nearest_cluster_distance(distances_row, labels, i):
"""Calculate the mean nearest-cluster distance for sample i.
Parameters
----------
distances_row : array, shape = [n_samples]
Pairwise distance matrix between sample i and each sample.
labels : array, shape = [n_samples]
label values for each sample
i : int
Sample index being calculated. It is used to determine the current
label.
Returns
-------
b : float
Mean nearest-cluster distance for sample i
"""
label = labels[i]
b = np.min([np.mean(distances_row[labels == cur_label])
for cur_label in set(labels) if not cur_label == label])
return b
| bsd-3-clause | -4,046,836,699,392,720,400 | 36.640909 | 80 | 0.660427 | false |
polyaxon/polyaxon | examples/quick-start/model.py | 1 | 4890 | #!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import keras
import tensorflow as tf
from keras.datasets import fashion_mnist
from tensorflow.keras.layers import Dense, Flatten, Conv2D, Dense, Dropout, Activation, MaxPooling2D
from tensorflow.keras.models import Sequential
from tensorflow.keras import optimizers
from polyaxon import tracking
from polyaxon.tracking.contrib.keras import PolyaxonCallback
OPTIMIZERS = {
'adam': optimizers.Adam,
'rmsprop': optimizers.RMSprop,
'sgd': optimizers.SGD,
}
def create_model(
conv1_size,
conv2_size,
dropout,
hidden1_size,
conv_activation,
dense_activation,
optimizer,
learning_rate,
loss,
num_classes,
):
model = Sequential()
model.add(Conv2D(conv1_size, (5, 5), activation=conv_activation,
input_shape=(img_width, img_height, 1)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(conv2_size, (5, 5), activation=conv_activation))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(dropout))
model.add(Flatten())
model.add(Dense(hidden1_size, activation=dense_activation))
model.add(Dense(num_classes, activation='softmax'))
model.compile(
optimizer=OPTIMIZERS[optimizer](learning_rate=learning_rate),
loss=loss,
metrics=['accuracy'],
)
return model
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--conv1_size',
type=int,
default=32)
parser.add_argument(
'--conv2_size',
type=int,
default=64
)
parser.add_argument(
'--dropout',
type=float,
default=0.2
)
parser.add_argument(
'--hidden1_size',
type=int,
default=500
)
parser.add_argument(
'--conv_activation',
type=str,
default="relu"
)
parser.add_argument(
'--dense_activation',
type=str,
default="relu"
)
parser.add_argument(
'--optimizer',
type=str,
default='adam'
)
parser.add_argument(
'--learning_rate',
type=float,
default=0.001
)
parser.add_argument(
'--epochs',
type=int,
default=10
)
parser.add_argument(
'--loss',
type=str,
default="categorical_crossentropy"
)
args = parser.parse_args()
img_width, img_height = 28, 28
# Data
(X_train, y_train), (X_test, y_test) = fashion_mnist.load_data()
labels = ["T-shirt/top", "Trouser", "Pullover", "Dress", "Coat",
"Sandal", "Shirt", "Sneaker", "Bag", "Ankle boot"]
X_train = X_train.astype('float32')
X_train /= 255.
X_test = X_test.astype('float32')
X_test /= 255.
# reshape input data
X_train = X_train.reshape(X_train.shape[0], img_width, img_height, 1)
X_test = X_test.reshape(X_test.shape[0], img_width, img_height, 1)
# one hot encode outputs
y_train = keras.utils.to_categorical(y_train)
y_test = keras.utils.to_categorical(y_test)
num_classes = y_test.shape[1]
# Polyaxon
tracking.init()
tracking.log_data_ref(content=X_train, name='x_train')
tracking.log_data_ref(content=y_train, name='y_train')
tracking.log_data_ref(content=X_test, name='X_test')
tracking.log_data_ref(content=y_test, name='y_train')
plx_callback = PolyaxonCallback()
log_dir = tracking.get_tensorboard_path()
print("log_dir", log_dir)
print("model_dir", plx_callback.filepath)
# TF Model
model = create_model(
conv1_size=args.conv1_size,
conv2_size=args.conv2_size,
dropout=args.dropout,
hidden1_size=args.hidden1_size,
conv_activation=args.conv_activation,
dense_activation=args.dense_activation,
optimizer=args.optimizer,
learning_rate=args.learning_rate,
loss=args.loss,
num_classes=y_test.shape[1]
)
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir=log_dir,
histogram_freq=1,
update_freq=100
)
model.fit(x=X_train,
y=y_train,
epochs=args.epochs,
validation_data=(X_test, y_test),
callbacks=[tensorboard_callback, plx_callback])
| apache-2.0 | -4,045,998,437,653,401,600 | 25.868132 | 100 | 0.62454 | false |
tomlof/scikit-learn | examples/svm/plot_custom_kernel.py | 93 | 1562 | """
======================
SVM with custom kernel
======================
Simple usage of Support Vector Machines to classify a sample. It will
plot the decision surface and the support vectors.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
Y = iris.target
def my_kernel(X, Y):
"""
We create a custom kernel:
(2 0)
k(X, Y) = X ( ) Y.T
(0 1)
"""
M = np.array([[2, 0], [0, 1.0]])
return np.dot(np.dot(X, M), Y.T)
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data.
clf = svm.SVC(kernel=my_kernel)
clf.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired, edgecolors='k')
plt.title('3-Class classification using Support Vector Machine with custom'
' kernel')
plt.axis('tight')
plt.show()
| bsd-3-clause | 6,155,640,212,110,244,000 | 26.403509 | 76 | 0.597311 | false |
orione7/plugin.video.streamondemand-pureita | lib/gdata/tlslite/utils/cryptomath.py | 172 | 11559 | """cryptomath module
This module has basic math/crypto code."""
import os
import math
import base64
import binascii
import sha
from compat import *
# **************************************************************************
# Load Optional Modules
# **************************************************************************
# Try to load M2Crypto/OpenSSL
try:
from M2Crypto import m2
m2cryptoLoaded = True
except ImportError:
m2cryptoLoaded = False
# Try to load cryptlib
try:
import cryptlib_py
try:
cryptlib_py.cryptInit()
except cryptlib_py.CryptException, e:
#If tlslite and cryptoIDlib are both present,
#they might each try to re-initialize this,
#so we're tolerant of that.
if e[0] != cryptlib_py.CRYPT_ERROR_INITED:
raise
cryptlibpyLoaded = True
except ImportError:
cryptlibpyLoaded = False
#Try to load GMPY
try:
import gmpy
gmpyLoaded = True
except ImportError:
gmpyLoaded = False
#Try to load pycrypto
try:
import Crypto.Cipher.AES
pycryptoLoaded = True
except ImportError:
pycryptoLoaded = False
# **************************************************************************
# PRNG Functions
# **************************************************************************
# Get os.urandom PRNG
try:
os.urandom(1)
def getRandomBytes(howMany):
return stringToBytes(os.urandom(howMany))
prngName = "os.urandom"
except:
# Else get cryptlib PRNG
if cryptlibpyLoaded:
def getRandomBytes(howMany):
randomKey = cryptlib_py.cryptCreateContext(cryptlib_py.CRYPT_UNUSED,
cryptlib_py.CRYPT_ALGO_AES)
cryptlib_py.cryptSetAttribute(randomKey,
cryptlib_py.CRYPT_CTXINFO_MODE,
cryptlib_py.CRYPT_MODE_OFB)
cryptlib_py.cryptGenerateKey(randomKey)
bytes = createByteArrayZeros(howMany)
cryptlib_py.cryptEncrypt(randomKey, bytes)
return bytes
prngName = "cryptlib"
else:
#Else get UNIX /dev/urandom PRNG
try:
devRandomFile = open("/dev/urandom", "rb")
def getRandomBytes(howMany):
return stringToBytes(devRandomFile.read(howMany))
prngName = "/dev/urandom"
except IOError:
#Else get Win32 CryptoAPI PRNG
try:
import win32prng
def getRandomBytes(howMany):
s = win32prng.getRandomBytes(howMany)
if len(s) != howMany:
raise AssertionError()
return stringToBytes(s)
prngName ="CryptoAPI"
except ImportError:
#Else no PRNG :-(
def getRandomBytes(howMany):
raise NotImplementedError("No Random Number Generator "\
"available.")
prngName = "None"
# **************************************************************************
# Converter Functions
# **************************************************************************
def bytesToNumber(bytes):
total = 0L
multiplier = 1L
for count in range(len(bytes)-1, -1, -1):
byte = bytes[count]
total += multiplier * byte
multiplier *= 256
return total
def numberToBytes(n):
howManyBytes = numBytes(n)
bytes = createByteArrayZeros(howManyBytes)
for count in range(howManyBytes-1, -1, -1):
bytes[count] = int(n % 256)
n >>= 8
return bytes
def bytesToBase64(bytes):
s = bytesToString(bytes)
return stringToBase64(s)
def base64ToBytes(s):
s = base64ToString(s)
return stringToBytes(s)
def numberToBase64(n):
bytes = numberToBytes(n)
return bytesToBase64(bytes)
def base64ToNumber(s):
bytes = base64ToBytes(s)
return bytesToNumber(bytes)
def stringToNumber(s):
bytes = stringToBytes(s)
return bytesToNumber(bytes)
def numberToString(s):
bytes = numberToBytes(s)
return bytesToString(bytes)
def base64ToString(s):
try:
return base64.decodestring(s)
except binascii.Error, e:
raise SyntaxError(e)
except binascii.Incomplete, e:
raise SyntaxError(e)
def stringToBase64(s):
return base64.encodestring(s).replace("\n", "")
def mpiToNumber(mpi): #mpi is an openssl-format bignum string
if (ord(mpi[4]) & 0x80) !=0: #Make sure this is a positive number
raise AssertionError()
bytes = stringToBytes(mpi[4:])
return bytesToNumber(bytes)
def numberToMPI(n):
bytes = numberToBytes(n)
ext = 0
#If the high-order bit is going to be set,
#add an extra byte of zeros
if (numBits(n) & 0x7)==0:
ext = 1
length = numBytes(n) + ext
bytes = concatArrays(createByteArrayZeros(4+ext), bytes)
bytes[0] = (length >> 24) & 0xFF
bytes[1] = (length >> 16) & 0xFF
bytes[2] = (length >> 8) & 0xFF
bytes[3] = length & 0xFF
return bytesToString(bytes)
# **************************************************************************
# Misc. Utility Functions
# **************************************************************************
def numBytes(n):
if n==0:
return 0
bits = numBits(n)
return int(math.ceil(bits / 8.0))
def hashAndBase64(s):
return stringToBase64(sha.sha(s).digest())
def getBase64Nonce(numChars=22): #defaults to an 132 bit nonce
bytes = getRandomBytes(numChars)
bytesStr = "".join([chr(b) for b in bytes])
return stringToBase64(bytesStr)[:numChars]
# **************************************************************************
# Big Number Math
# **************************************************************************
def getRandomNumber(low, high):
if low >= high:
raise AssertionError()
howManyBits = numBits(high)
howManyBytes = numBytes(high)
lastBits = howManyBits % 8
while 1:
bytes = getRandomBytes(howManyBytes)
if lastBits:
bytes[0] = bytes[0] % (1 << lastBits)
n = bytesToNumber(bytes)
if n >= low and n < high:
return n
def gcd(a,b):
a, b = max(a,b), min(a,b)
while b:
a, b = b, a % b
return a
def lcm(a, b):
#This will break when python division changes, but we can't use // cause
#of Jython
return (a * b) / gcd(a, b)
#Returns inverse of a mod b, zero if none
#Uses Extended Euclidean Algorithm
def invMod(a, b):
c, d = a, b
uc, ud = 1, 0
while c != 0:
#This will break when python division changes, but we can't use //
#cause of Jython
q = d / c
c, d = d-(q*c), c
uc, ud = ud - (q * uc), uc
if d == 1:
return ud % b
return 0
if gmpyLoaded:
def powMod(base, power, modulus):
base = gmpy.mpz(base)
power = gmpy.mpz(power)
modulus = gmpy.mpz(modulus)
result = pow(base, power, modulus)
return long(result)
else:
#Copied from Bryan G. Olson's post to comp.lang.python
#Does left-to-right instead of pow()'s right-to-left,
#thus about 30% faster than the python built-in with small bases
def powMod(base, power, modulus):
nBitScan = 5
""" Return base**power mod modulus, using multi bit scanning
with nBitScan bits at a time."""
#TREV - Added support for negative exponents
negativeResult = False
if (power < 0):
power *= -1
negativeResult = True
exp2 = 2**nBitScan
mask = exp2 - 1
# Break power into a list of digits of nBitScan bits.
# The list is recursive so easy to read in reverse direction.
nibbles = None
while power:
nibbles = int(power & mask), nibbles
power = power >> nBitScan
# Make a table of powers of base up to 2**nBitScan - 1
lowPowers = [1]
for i in xrange(1, exp2):
lowPowers.append((lowPowers[i-1] * base) % modulus)
# To exponentiate by the first nibble, look it up in the table
nib, nibbles = nibbles
prod = lowPowers[nib]
# For the rest, square nBitScan times, then multiply by
# base^nibble
while nibbles:
nib, nibbles = nibbles
for i in xrange(nBitScan):
prod = (prod * prod) % modulus
if nib: prod = (prod * lowPowers[nib]) % modulus
#TREV - Added support for negative exponents
if negativeResult:
prodInv = invMod(prod, modulus)
#Check to make sure the inverse is correct
if (prod * prodInv) % modulus != 1:
raise AssertionError()
return prodInv
return prod
#Pre-calculate a sieve of the ~100 primes < 1000:
def makeSieve(n):
sieve = range(n)
for count in range(2, int(math.sqrt(n))):
if sieve[count] == 0:
continue
x = sieve[count] * 2
while x < len(sieve):
sieve[x] = 0
x += sieve[count]
sieve = [x for x in sieve[2:] if x]
return sieve
sieve = makeSieve(1000)
def isPrime(n, iterations=5, display=False):
#Trial division with sieve
for x in sieve:
if x >= n: return True
if n % x == 0: return False
#Passed trial division, proceed to Rabin-Miller
#Rabin-Miller implemented per Ferguson & Schneier
#Compute s, t for Rabin-Miller
if display: print "*",
s, t = n-1, 0
while s % 2 == 0:
s, t = s/2, t+1
#Repeat Rabin-Miller x times
a = 2 #Use 2 as a base for first iteration speedup, per HAC
for count in range(iterations):
v = powMod(a, s, n)
if v==1:
continue
i = 0
while v != n-1:
if i == t-1:
return False
else:
v, i = powMod(v, 2, n), i+1
a = getRandomNumber(2, n)
return True
def getRandomPrime(bits, display=False):
if bits < 10:
raise AssertionError()
#The 1.5 ensures the 2 MSBs are set
#Thus, when used for p,q in RSA, n will have its MSB set
#
#Since 30 is lcm(2,3,5), we'll set our test numbers to
#29 % 30 and keep them there
low = (2L ** (bits-1)) * 3/2
high = 2L ** bits - 30
p = getRandomNumber(low, high)
p += 29 - (p % 30)
while 1:
if display: print ".",
p += 30
if p >= high:
p = getRandomNumber(low, high)
p += 29 - (p % 30)
if isPrime(p, display=display):
return p
#Unused at the moment...
def getRandomSafePrime(bits, display=False):
if bits < 10:
raise AssertionError()
#The 1.5 ensures the 2 MSBs are set
#Thus, when used for p,q in RSA, n will have its MSB set
#
#Since 30 is lcm(2,3,5), we'll set our test numbers to
#29 % 30 and keep them there
low = (2 ** (bits-2)) * 3/2
high = (2 ** (bits-1)) - 30
q = getRandomNumber(low, high)
q += 29 - (q % 30)
while 1:
if display: print ".",
q += 30
if (q >= high):
q = getRandomNumber(low, high)
q += 29 - (q % 30)
#Ideas from Tom Wu's SRP code
#Do trial division on p and q before Rabin-Miller
if isPrime(q, 0, display=display):
p = (2 * q) + 1
if isPrime(p, display=display):
if isPrime(q, display=display):
return p
| gpl-3.0 | -7,732,716,543,097,964,000 | 27.8975 | 82 | 0.536811 | false |
katsikas/gnuradio | gnuradio-core/src/lib/filter/generate_gr_fir_sysconfig.py | 17 | 3117 | #!/bin/env python
# -*- python -*-
#
# Copyright 2003,2009 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from generate_utils import *
# ----------------------------------------------------------------
def make_gr_fir_sysconfig_h ():
out = open_and_log_name ('gr_fir_sysconfig.h', 'w')
if not out:
return
out.write (copyright)
out.write (
'''
/*
* WARNING: This file is automatically generated by generate_gr_fir_sysconfig.py
* Any changes made to this file will be overwritten.
*/
#ifndef INCLUDED_GR_FIR_SYSCONFIG_H
#define INCLUDED_GR_FIR_SYSCONFIG_H
#include <gr_types.h>
''')
# for sig in fir_signatures:
# out.write ('class gr_fir_' + sig + ';\n')
out.write ('#include <gr_fir_util.h>\n')
out.write (
'''
/*!
* \\brief abstract base class for configuring the automatic selection of the
* fastest gr_fir for your platform.
*
* This is used internally by gr_fir_util.
*/
class gr_fir_sysconfig {
public:
virtual ~gr_fir_sysconfig ();
''')
for sig in fir_signatures:
out.write ((' virtual gr_fir_%s *create_gr_fir_%s (const std::vector<%s> &taps) = 0;\n' %
(sig, sig, tap_type (sig))))
out.write ('\n')
for sig in fir_signatures:
out.write ((' virtual void get_gr_fir_%s_info (std::vector<gr_fir_%s_info> *info) = 0;\n' %
(sig, sig)))
out.write (
'''
};
/*
* This returns the single instance of the appropriate derived class.
* This function must be defined only once in the system, and should be defined
* in the platform specific code.
*/
gr_fir_sysconfig *gr_fir_sysconfig_singleton ();
#endif /* INCLUDED_GR_FIR_SYSCONFIG_H */
''')
out.close ()
# ----------------------------------------------------------------
def make_gr_fir_sysconfig_cc ():
out = open_and_log_name ('gr_fir_sysconfig.cc', 'w')
if not out:
return
out.write (copyright)
out.write (
'''
/*
* WARNING: This file is automatically generated by generate_gr_fir_sysconfig.py
* Any changes made to this file will be overwritten.
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include <gr_fir_sysconfig.h>
gr_fir_sysconfig::~gr_fir_sysconfig ()
{
}
''')
out.close ()
# ----------------------------------------------------------------
def generate ():
make_gr_fir_sysconfig_h ()
make_gr_fir_sysconfig_cc ()
if __name__ == '__main__':
generate ()
| gpl-3.0 | 6,920,180,942,158,627,000 | 22.43609 | 98 | 0.616939 | false |
BartoszCichecki/onlinepython | onlinepython/pypy-2.4.0-win32/lib-python/2.7/test/test_float.py | 23 | 62308 |
import unittest, struct
import os
from test import test_support
import math
from math import isinf, isnan, copysign, ldexp
import operator
import random
import fractions
import sys
INF = float("inf")
NAN = float("nan")
have_getformat = hasattr(float, "__getformat__")
requires_getformat = unittest.skipUnless(have_getformat,
"requires __getformat__")
requires_setformat = unittest.skipUnless(hasattr(float, "__setformat__"),
"requires __setformat__")
# decorator for skipping tests on non-IEEE 754 platforms
requires_IEEE_754 = unittest.skipUnless(have_getformat and
float.__getformat__("double").startswith("IEEE"),
"test requires IEEE 754 doubles")
#locate file with float format test values
test_dir = os.path.dirname(__file__) or os.curdir
format_testfile = os.path.join(test_dir, 'formatfloat_testcases.txt')
class GeneralFloatCases(unittest.TestCase):
def test_float(self):
self.assertEqual(float(3.14), 3.14)
self.assertEqual(float(314), 314.0)
self.assertEqual(float(314L), 314.0)
self.assertEqual(float(" 3.14 "), 3.14)
self.assertRaises(ValueError, float, " 0x3.1 ")
self.assertRaises(ValueError, float, " -0x3.p-1 ")
self.assertRaises(ValueError, float, " +0x3.p-1 ")
self.assertRaises(ValueError, float, "++3.14")
self.assertRaises(ValueError, float, "+-3.14")
self.assertRaises(ValueError, float, "-+3.14")
self.assertRaises(ValueError, float, "--3.14")
# check that we don't accept alternate exponent markers
self.assertRaises(ValueError, float, "-1.7d29")
self.assertRaises(ValueError, float, "3D-14")
if test_support.have_unicode:
self.assertEqual(float(unicode(" 3.14 ")), 3.14)
self.assertEqual(float(unicode(" \u0663.\u0661\u0664 ",'raw-unicode-escape')), 3.14)
# extra long strings should no longer be a problem
# (in 2.6, long unicode inputs to float raised ValueError)
float('.' + '1'*1000)
float(unicode('.' + '1'*1000))
def check_conversion_to_int(self, x):
"""Check that int(x) has the correct value and type, for a float x."""
n = int(x)
if x >= 0.0:
# x >= 0 and n = int(x) ==> n <= x < n + 1
self.assertLessEqual(n, x)
self.assertLess(x, n + 1)
else:
# x < 0 and n = int(x) ==> n >= x > n - 1
self.assertGreaterEqual(n, x)
self.assertGreater(x, n - 1)
# Result should be an int if within range, else a long.
if -sys.maxint-1 <= n <= sys.maxint:
self.assertEqual(type(n), int)
else:
self.assertEqual(type(n), long)
# Double check.
self.assertEqual(type(int(n)), type(n))
def test_conversion_to_int(self):
# Check that floats within the range of an int convert to type
# int, not long. (issue #11144.)
boundary = float(sys.maxint + 1)
epsilon = 2**-sys.float_info.mant_dig * boundary
# These 2 floats are either side of the positive int/long boundary on
# both 32-bit and 64-bit systems.
self.check_conversion_to_int(boundary - epsilon)
self.check_conversion_to_int(boundary)
# These floats are either side of the negative long/int boundary on
# 64-bit systems...
self.check_conversion_to_int(-boundary - 2*epsilon)
self.check_conversion_to_int(-boundary)
# ... and these ones are either side of the negative long/int
# boundary on 32-bit systems.
self.check_conversion_to_int(-boundary - 1.0)
self.check_conversion_to_int(-boundary - 1.0 + 2*epsilon)
@test_support.run_with_locale('LC_NUMERIC', 'fr_FR', 'de_DE')
def test_float_with_comma(self):
# set locale to something that doesn't use '.' for the decimal point
# float must not accept the locale specific decimal point but
# it still has to accept the normal python syntax
import locale
if not locale.localeconv()['decimal_point'] == ',':
self.skipTest('decimal_point is not ","')
self.assertEqual(float(" 3.14 "), 3.14)
self.assertEqual(float("+3.14 "), 3.14)
self.assertEqual(float("-3.14 "), -3.14)
self.assertEqual(float(".14 "), .14)
self.assertEqual(float("3. "), 3.0)
self.assertEqual(float("3.e3 "), 3000.0)
self.assertEqual(float("3.2e3 "), 3200.0)
self.assertEqual(float("2.5e-1 "), 0.25)
self.assertEqual(float("5e-1"), 0.5)
self.assertRaises(ValueError, float, " 3,14 ")
self.assertRaises(ValueError, float, " +3,14 ")
self.assertRaises(ValueError, float, " -3,14 ")
self.assertRaises(ValueError, float, " 0x3.1 ")
self.assertRaises(ValueError, float, " -0x3.p-1 ")
self.assertRaises(ValueError, float, " +0x3.p-1 ")
self.assertEqual(float(" 25.e-1 "), 2.5)
self.assertEqual(test_support.fcmp(float(" .25e-1 "), .025), 0)
def test_floatconversion(self):
# Make sure that calls to __float__() work properly
class Foo0:
def __float__(self):
return 42.
class Foo1(object):
def __float__(self):
return 42.
class Foo2(float):
def __float__(self):
return 42.
class Foo3(float):
def __new__(cls, value=0.):
return float.__new__(cls, 2*value)
def __float__(self):
return self
class Foo4(float):
def __float__(self):
return 42
# Issue 5759: __float__ not called on str subclasses (though it is on
# unicode subclasses).
class FooStr(str):
def __float__(self):
return float(str(self)) + 1
class FooUnicode(unicode):
def __float__(self):
return float(unicode(self)) + 1
self.assertAlmostEqual(float(Foo0()), 42.)
self.assertAlmostEqual(float(Foo1()), 42.)
self.assertAlmostEqual(float(Foo2()), 42.)
self.assertAlmostEqual(float(Foo3(21)), 42.)
self.assertRaises(TypeError, float, Foo4(42))
self.assertAlmostEqual(float(FooUnicode('8')), 9.)
self.assertAlmostEqual(float(FooStr('8')), 9.)
def test_is_integer(self):
self.assertFalse((1.1).is_integer())
self.assertTrue((1.).is_integer())
self.assertFalse(float("nan").is_integer())
self.assertFalse(float("inf").is_integer())
def test_floatasratio(self):
for f, ratio in [
(0.875, (7, 8)),
(-0.875, (-7, 8)),
(0.0, (0, 1)),
(11.5, (23, 2)),
]:
self.assertEqual(f.as_integer_ratio(), ratio)
for i in range(10000):
f = random.random()
f *= 10 ** random.randint(-100, 100)
n, d = f.as_integer_ratio()
self.assertEqual(float(n).__truediv__(d), f)
R = fractions.Fraction
self.assertEqual(R(0, 1),
R(*float(0.0).as_integer_ratio()))
self.assertEqual(R(5, 2),
R(*float(2.5).as_integer_ratio()))
self.assertEqual(R(1, 2),
R(*float(0.5).as_integer_ratio()))
self.assertEqual(R(4728779608739021, 2251799813685248),
R(*float(2.1).as_integer_ratio()))
self.assertEqual(R(-4728779608739021, 2251799813685248),
R(*float(-2.1).as_integer_ratio()))
self.assertEqual(R(-2100, 1),
R(*float(-2100.0).as_integer_ratio()))
self.assertRaises(OverflowError, float('inf').as_integer_ratio)
self.assertRaises(OverflowError, float('-inf').as_integer_ratio)
self.assertRaises(ValueError, float('nan').as_integer_ratio)
def assertEqualAndEqualSign(self, a, b):
# fail unless a == b and a and b have the same sign bit;
# the only difference from assertEqual is that this test
# distinguishes -0.0 and 0.0.
self.assertEqual((a, copysign(1.0, a)), (b, copysign(1.0, b)))
@requires_IEEE_754
def test_float_mod(self):
# Check behaviour of % operator for IEEE 754 special cases.
# In particular, check signs of zeros.
mod = operator.mod
self.assertEqualAndEqualSign(mod(-1.0, 1.0), 0.0)
self.assertEqualAndEqualSign(mod(-1e-100, 1.0), 1.0)
self.assertEqualAndEqualSign(mod(-0.0, 1.0), 0.0)
self.assertEqualAndEqualSign(mod(0.0, 1.0), 0.0)
self.assertEqualAndEqualSign(mod(1e-100, 1.0), 1e-100)
self.assertEqualAndEqualSign(mod(1.0, 1.0), 0.0)
self.assertEqualAndEqualSign(mod(-1.0, -1.0), -0.0)
self.assertEqualAndEqualSign(mod(-1e-100, -1.0), -1e-100)
self.assertEqualAndEqualSign(mod(-0.0, -1.0), -0.0)
self.assertEqualAndEqualSign(mod(0.0, -1.0), -0.0)
self.assertEqualAndEqualSign(mod(1e-100, -1.0), -1.0)
self.assertEqualAndEqualSign(mod(1.0, -1.0), -0.0)
@requires_IEEE_754
def test_float_pow(self):
# test builtin pow and ** operator for IEEE 754 special cases.
# Special cases taken from section F.9.4.4 of the C99 specification
for pow_op in pow, operator.pow:
# x**NAN is NAN for any x except 1
self.assertTrue(isnan(pow_op(-INF, NAN)))
self.assertTrue(isnan(pow_op(-2.0, NAN)))
self.assertTrue(isnan(pow_op(-1.0, NAN)))
self.assertTrue(isnan(pow_op(-0.5, NAN)))
self.assertTrue(isnan(pow_op(-0.0, NAN)))
self.assertTrue(isnan(pow_op(0.0, NAN)))
self.assertTrue(isnan(pow_op(0.5, NAN)))
self.assertTrue(isnan(pow_op(2.0, NAN)))
self.assertTrue(isnan(pow_op(INF, NAN)))
self.assertTrue(isnan(pow_op(NAN, NAN)))
# NAN**y is NAN for any y except +-0
self.assertTrue(isnan(pow_op(NAN, -INF)))
self.assertTrue(isnan(pow_op(NAN, -2.0)))
self.assertTrue(isnan(pow_op(NAN, -1.0)))
self.assertTrue(isnan(pow_op(NAN, -0.5)))
self.assertTrue(isnan(pow_op(NAN, 0.5)))
self.assertTrue(isnan(pow_op(NAN, 1.0)))
self.assertTrue(isnan(pow_op(NAN, 2.0)))
self.assertTrue(isnan(pow_op(NAN, INF)))
# (+-0)**y raises ZeroDivisionError for y a negative odd integer
self.assertRaises(ZeroDivisionError, pow_op, -0.0, -1.0)
self.assertRaises(ZeroDivisionError, pow_op, 0.0, -1.0)
# (+-0)**y raises ZeroDivisionError for y finite and negative
# but not an odd integer
self.assertRaises(ZeroDivisionError, pow_op, -0.0, -2.0)
self.assertRaises(ZeroDivisionError, pow_op, -0.0, -0.5)
self.assertRaises(ZeroDivisionError, pow_op, 0.0, -2.0)
self.assertRaises(ZeroDivisionError, pow_op, 0.0, -0.5)
# (+-0)**y is +-0 for y a positive odd integer
self.assertEqualAndEqualSign(pow_op(-0.0, 1.0), -0.0)
self.assertEqualAndEqualSign(pow_op(0.0, 1.0), 0.0)
# (+-0)**y is 0 for y finite and positive but not an odd integer
self.assertEqualAndEqualSign(pow_op(-0.0, 0.5), 0.0)
self.assertEqualAndEqualSign(pow_op(-0.0, 2.0), 0.0)
self.assertEqualAndEqualSign(pow_op(0.0, 0.5), 0.0)
self.assertEqualAndEqualSign(pow_op(0.0, 2.0), 0.0)
# (-1)**+-inf is 1
self.assertEqualAndEqualSign(pow_op(-1.0, -INF), 1.0)
self.assertEqualAndEqualSign(pow_op(-1.0, INF), 1.0)
# 1**y is 1 for any y, even if y is an infinity or nan
self.assertEqualAndEqualSign(pow_op(1.0, -INF), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, -2.0), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, -1.0), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, -0.5), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, 0.5), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, 1.0), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, 2.0), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, INF), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, NAN), 1.0)
# x**+-0 is 1 for any x, even if x is a zero, infinity, or nan
self.assertEqualAndEqualSign(pow_op(-INF, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-2.0, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-1.0, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-0.5, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-0.0, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(0.0, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(0.5, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(2.0, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(INF, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(NAN, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-INF, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-2.0, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-1.0, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-0.5, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-0.0, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(0.0, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(0.5, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(2.0, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(INF, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(NAN, -0.0), 1.0)
# x**y raises ValueError for finite negative x and non-integral y
self.assertRaises(ValueError, pow_op, -2.0, -0.5)
self.assertRaises(ValueError, pow_op, -2.0, 0.5)
self.assertRaises(ValueError, pow_op, -1.0, -0.5)
self.assertRaises(ValueError, pow_op, -1.0, 0.5)
self.assertRaises(ValueError, pow_op, -0.5, -0.5)
self.assertRaises(ValueError, pow_op, -0.5, 0.5)
# x**-INF is INF for abs(x) < 1
self.assertEqualAndEqualSign(pow_op(-0.5, -INF), INF)
self.assertEqualAndEqualSign(pow_op(-0.0, -INF), INF)
self.assertEqualAndEqualSign(pow_op(0.0, -INF), INF)
self.assertEqualAndEqualSign(pow_op(0.5, -INF), INF)
# x**-INF is 0 for abs(x) > 1
self.assertEqualAndEqualSign(pow_op(-INF, -INF), 0.0)
self.assertEqualAndEqualSign(pow_op(-2.0, -INF), 0.0)
self.assertEqualAndEqualSign(pow_op(2.0, -INF), 0.0)
self.assertEqualAndEqualSign(pow_op(INF, -INF), 0.0)
# x**INF is 0 for abs(x) < 1
self.assertEqualAndEqualSign(pow_op(-0.5, INF), 0.0)
self.assertEqualAndEqualSign(pow_op(-0.0, INF), 0.0)
self.assertEqualAndEqualSign(pow_op(0.0, INF), 0.0)
self.assertEqualAndEqualSign(pow_op(0.5, INF), 0.0)
# x**INF is INF for abs(x) > 1
self.assertEqualAndEqualSign(pow_op(-INF, INF), INF)
self.assertEqualAndEqualSign(pow_op(-2.0, INF), INF)
self.assertEqualAndEqualSign(pow_op(2.0, INF), INF)
self.assertEqualAndEqualSign(pow_op(INF, INF), INF)
# (-INF)**y is -0.0 for y a negative odd integer
self.assertEqualAndEqualSign(pow_op(-INF, -1.0), -0.0)
# (-INF)**y is 0.0 for y negative but not an odd integer
self.assertEqualAndEqualSign(pow_op(-INF, -0.5), 0.0)
self.assertEqualAndEqualSign(pow_op(-INF, -2.0), 0.0)
# (-INF)**y is -INF for y a positive odd integer
self.assertEqualAndEqualSign(pow_op(-INF, 1.0), -INF)
# (-INF)**y is INF for y positive but not an odd integer
self.assertEqualAndEqualSign(pow_op(-INF, 0.5), INF)
self.assertEqualAndEqualSign(pow_op(-INF, 2.0), INF)
# INF**y is INF for y positive
self.assertEqualAndEqualSign(pow_op(INF, 0.5), INF)
self.assertEqualAndEqualSign(pow_op(INF, 1.0), INF)
self.assertEqualAndEqualSign(pow_op(INF, 2.0), INF)
# INF**y is 0.0 for y negative
self.assertEqualAndEqualSign(pow_op(INF, -2.0), 0.0)
self.assertEqualAndEqualSign(pow_op(INF, -1.0), 0.0)
self.assertEqualAndEqualSign(pow_op(INF, -0.5), 0.0)
# basic checks not covered by the special cases above
self.assertEqualAndEqualSign(pow_op(-2.0, -2.0), 0.25)
self.assertEqualAndEqualSign(pow_op(-2.0, -1.0), -0.5)
self.assertEqualAndEqualSign(pow_op(-2.0, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-2.0, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-2.0, 1.0), -2.0)
self.assertEqualAndEqualSign(pow_op(-2.0, 2.0), 4.0)
self.assertEqualAndEqualSign(pow_op(-1.0, -2.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-1.0, -1.0), -1.0)
self.assertEqualAndEqualSign(pow_op(-1.0, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-1.0, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(-1.0, 1.0), -1.0)
self.assertEqualAndEqualSign(pow_op(-1.0, 2.0), 1.0)
self.assertEqualAndEqualSign(pow_op(2.0, -2.0), 0.25)
self.assertEqualAndEqualSign(pow_op(2.0, -1.0), 0.5)
self.assertEqualAndEqualSign(pow_op(2.0, -0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(2.0, 0.0), 1.0)
self.assertEqualAndEqualSign(pow_op(2.0, 1.0), 2.0)
self.assertEqualAndEqualSign(pow_op(2.0, 2.0), 4.0)
# 1 ** large and -1 ** large; some libms apparently
# have problems with these
self.assertEqualAndEqualSign(pow_op(1.0, -1e100), 1.0)
self.assertEqualAndEqualSign(pow_op(1.0, 1e100), 1.0)
self.assertEqualAndEqualSign(pow_op(-1.0, -1e100), 1.0)
self.assertEqualAndEqualSign(pow_op(-1.0, 1e100), 1.0)
# check sign for results that underflow to 0
self.assertEqualAndEqualSign(pow_op(-2.0, -2000.0), 0.0)
self.assertRaises(ValueError, pow_op, -2.0, -2000.5)
self.assertEqualAndEqualSign(pow_op(-2.0, -2001.0), -0.0)
self.assertEqualAndEqualSign(pow_op(2.0, -2000.0), 0.0)
self.assertEqualAndEqualSign(pow_op(2.0, -2000.5), 0.0)
self.assertEqualAndEqualSign(pow_op(2.0, -2001.0), 0.0)
self.assertEqualAndEqualSign(pow_op(-0.5, 2000.0), 0.0)
self.assertRaises(ValueError, pow_op, -0.5, 2000.5)
self.assertEqualAndEqualSign(pow_op(-0.5, 2001.0), -0.0)
self.assertEqualAndEqualSign(pow_op(0.5, 2000.0), 0.0)
self.assertEqualAndEqualSign(pow_op(0.5, 2000.5), 0.0)
self.assertEqualAndEqualSign(pow_op(0.5, 2001.0), 0.0)
# check we don't raise an exception for subnormal results,
# and validate signs. Tests currently disabled, since
# they fail on systems where a subnormal result from pow
# is flushed to zero (e.g. Debian/ia64.)
#self.assertTrue(0.0 < pow_op(0.5, 1048) < 1e-315)
#self.assertTrue(0.0 < pow_op(-0.5, 1048) < 1e-315)
#self.assertTrue(0.0 < pow_op(0.5, 1047) < 1e-315)
#self.assertTrue(0.0 > pow_op(-0.5, 1047) > -1e-315)
#self.assertTrue(0.0 < pow_op(2.0, -1048) < 1e-315)
#self.assertTrue(0.0 < pow_op(-2.0, -1048) < 1e-315)
#self.assertTrue(0.0 < pow_op(2.0, -1047) < 1e-315)
#self.assertTrue(0.0 > pow_op(-2.0, -1047) > -1e-315)
@requires_setformat
class FormatFunctionsTestCase(unittest.TestCase):
def setUp(self):
self.save_formats = {'double':float.__getformat__('double'),
'float':float.__getformat__('float')}
def tearDown(self):
float.__setformat__('double', self.save_formats['double'])
float.__setformat__('float', self.save_formats['float'])
def test_getformat(self):
self.assertIn(float.__getformat__('double'),
['unknown', 'IEEE, big-endian', 'IEEE, little-endian'])
self.assertIn(float.__getformat__('float'),
['unknown', 'IEEE, big-endian', 'IEEE, little-endian'])
self.assertRaises(ValueError, float.__getformat__, 'chicken')
self.assertRaises(TypeError, float.__getformat__, 1)
def test_setformat(self):
for t in 'double', 'float':
float.__setformat__(t, 'unknown')
if self.save_formats[t] == 'IEEE, big-endian':
self.assertRaises(ValueError, float.__setformat__,
t, 'IEEE, little-endian')
elif self.save_formats[t] == 'IEEE, little-endian':
self.assertRaises(ValueError, float.__setformat__,
t, 'IEEE, big-endian')
else:
self.assertRaises(ValueError, float.__setformat__,
t, 'IEEE, big-endian')
self.assertRaises(ValueError, float.__setformat__,
t, 'IEEE, little-endian')
self.assertRaises(ValueError, float.__setformat__,
t, 'chicken')
self.assertRaises(ValueError, float.__setformat__,
'chicken', 'unknown')
BE_DOUBLE_INF = '\x7f\xf0\x00\x00\x00\x00\x00\x00'
LE_DOUBLE_INF = ''.join(reversed(BE_DOUBLE_INF))
BE_DOUBLE_NAN = '\x7f\xf8\x00\x00\x00\x00\x00\x00'
LE_DOUBLE_NAN = ''.join(reversed(BE_DOUBLE_NAN))
BE_FLOAT_INF = '\x7f\x80\x00\x00'
LE_FLOAT_INF = ''.join(reversed(BE_FLOAT_INF))
BE_FLOAT_NAN = '\x7f\xc0\x00\x00'
LE_FLOAT_NAN = ''.join(reversed(BE_FLOAT_NAN))
# on non-IEEE platforms, attempting to unpack a bit pattern
# representing an infinity or a NaN should raise an exception.
@requires_setformat
class UnknownFormatTestCase(unittest.TestCase):
def setUp(self):
self.save_formats = {'double':float.__getformat__('double'),
'float':float.__getformat__('float')}
float.__setformat__('double', 'unknown')
float.__setformat__('float', 'unknown')
def tearDown(self):
float.__setformat__('double', self.save_formats['double'])
float.__setformat__('float', self.save_formats['float'])
def test_double_specials_dont_unpack(self):
for fmt, data in [('>d', BE_DOUBLE_INF),
('>d', BE_DOUBLE_NAN),
('<d', LE_DOUBLE_INF),
('<d', LE_DOUBLE_NAN)]:
self.assertRaises(ValueError, struct.unpack, fmt, data)
def test_float_specials_dont_unpack(self):
for fmt, data in [('>f', BE_FLOAT_INF),
('>f', BE_FLOAT_NAN),
('<f', LE_FLOAT_INF),
('<f', LE_FLOAT_NAN)]:
self.assertRaises(ValueError, struct.unpack, fmt, data)
# on an IEEE platform, all we guarantee is that bit patterns
# representing infinities or NaNs do not raise an exception; all else
# is accident (today).
# let's also try to guarantee that -0.0 and 0.0 don't get confused.
class IEEEFormatTestCase(unittest.TestCase):
@requires_IEEE_754
def test_double_specials_do_unpack(self):
for fmt, data in [('>d', BE_DOUBLE_INF),
('>d', BE_DOUBLE_NAN),
('<d', LE_DOUBLE_INF),
('<d', LE_DOUBLE_NAN)]:
struct.unpack(fmt, data)
@requires_IEEE_754
def test_float_specials_do_unpack(self):
for fmt, data in [('>f', BE_FLOAT_INF),
('>f', BE_FLOAT_NAN),
('<f', LE_FLOAT_INF),
('<f', LE_FLOAT_NAN)]:
struct.unpack(fmt, data)
@requires_IEEE_754
def test_negative_zero(self):
def pos_pos():
return 0.0, math.atan2(0.0, -1)
def pos_neg():
return 0.0, math.atan2(-0.0, -1)
def neg_pos():
return -0.0, math.atan2(0.0, -1)
def neg_neg():
return -0.0, math.atan2(-0.0, -1)
self.assertEqual(pos_pos(), neg_pos())
self.assertEqual(pos_neg(), neg_neg())
@requires_IEEE_754
def test_underflow_sign(self):
# check that -1e-1000 gives -0.0, not 0.0
self.assertEqual(math.atan2(-1e-1000, -1), math.atan2(-0.0, -1))
self.assertEqual(math.atan2(float('-1e-1000'), -1),
math.atan2(-0.0, -1))
def test_format(self):
# these should be rewritten to use both format(x, spec) and
# x.__format__(spec)
self.assertEqual(format(0.0, 'f'), '0.000000')
# the default is 'g', except for empty format spec
self.assertEqual(format(0.0, ''), '0.0')
self.assertEqual(format(0.01, ''), '0.01')
self.assertEqual(format(0.01, 'g'), '0.01')
# empty presentation type should format in the same way as str
# (issue 5920)
x = 100/7.
self.assertEqual(format(x, ''), str(x))
self.assertEqual(format(x, '-'), str(x))
self.assertEqual(format(x, '>'), str(x))
self.assertEqual(format(x, '2'), str(x))
self.assertEqual(format(1.0, 'f'), '1.000000')
self.assertEqual(format(-1.0, 'f'), '-1.000000')
self.assertEqual(format( 1.0, ' f'), ' 1.000000')
self.assertEqual(format(-1.0, ' f'), '-1.000000')
self.assertEqual(format( 1.0, '+f'), '+1.000000')
self.assertEqual(format(-1.0, '+f'), '-1.000000')
# % formatting
self.assertEqual(format(-1.0, '%'), '-100.000000%')
# conversion to string should fail
self.assertRaises(ValueError, format, 3.0, "s")
# other format specifiers shouldn't work on floats,
# in particular int specifiers
for format_spec in ([chr(x) for x in range(ord('a'), ord('z')+1)] +
[chr(x) for x in range(ord('A'), ord('Z')+1)]):
if not format_spec in 'eEfFgGn%':
self.assertRaises(ValueError, format, 0.0, format_spec)
self.assertRaises(ValueError, format, 1.0, format_spec)
self.assertRaises(ValueError, format, -1.0, format_spec)
self.assertRaises(ValueError, format, 1e100, format_spec)
self.assertRaises(ValueError, format, -1e100, format_spec)
self.assertRaises(ValueError, format, 1e-100, format_spec)
self.assertRaises(ValueError, format, -1e-100, format_spec)
# issue 3382: 'f' and 'F' with inf's and nan's
self.assertEqual('{0:f}'.format(INF), 'inf')
self.assertEqual('{0:F}'.format(INF), 'INF')
self.assertEqual('{0:f}'.format(-INF), '-inf')
self.assertEqual('{0:F}'.format(-INF), '-INF')
self.assertEqual('{0:f}'.format(NAN), 'nan')
self.assertEqual('{0:F}'.format(NAN), 'NAN')
@requires_IEEE_754
def test_format_testfile(self):
with open(format_testfile) as testfile:
for line in open(format_testfile):
if line.startswith('--'):
continue
line = line.strip()
if not line:
continue
lhs, rhs = map(str.strip, line.split('->'))
fmt, arg = lhs.split()
arg = float(arg)
self.assertEqual(fmt % arg, rhs)
if not math.isnan(arg) and copysign(1.0, arg) > 0.0:
self.assertEqual(fmt % -arg, '-' + rhs)
def test_issue5864(self):
self.assertEqual(format(123.456, '.4'), '123.5')
self.assertEqual(format(1234.56, '.4'), '1.235e+03')
self.assertEqual(format(12345.6, '.4'), '1.235e+04')
class ReprTestCase(unittest.TestCase):
def test_repr(self):
floats_file = open(os.path.join(os.path.split(__file__)[0],
'floating_points.txt'))
for line in floats_file:
line = line.strip()
if not line or line.startswith('#'):
continue
v = eval(line)
self.assertEqual(v, eval(repr(v)))
floats_file.close()
@unittest.skipUnless(getattr(sys, 'float_repr_style', '') == 'short',
"applies only when using short float repr style")
def test_short_repr(self):
# test short float repr introduced in Python 3.1. One aspect
# of this repr is that we get some degree of str -> float ->
# str roundtripping. In particular, for any numeric string
# containing 15 or fewer significant digits, those exact same
# digits (modulo trailing zeros) should appear in the output.
# No more repr(0.03) -> "0.029999999999999999"!
test_strings = [
# output always includes *either* a decimal point and at
# least one digit after that point, or an exponent.
'0.0',
'1.0',
'0.01',
'0.02',
'0.03',
'0.04',
'0.05',
'1.23456789',
'10.0',
'100.0',
# values >= 1e16 get an exponent...
'1000000000000000.0',
'9999999999999990.0',
'1e+16',
'1e+17',
# ... and so do values < 1e-4
'0.001',
'0.001001',
'0.00010000000000001',
'0.0001',
'9.999999999999e-05',
'1e-05',
# values designed to provoke failure if the FPU rounding
# precision isn't set correctly
'8.72293771110361e+25',
'7.47005307342313e+26',
'2.86438000439698e+28',
'8.89142905246179e+28',
'3.08578087079232e+35',
]
for s in test_strings:
negs = '-'+s
self.assertEqual(s, repr(float(s)))
self.assertEqual(negs, repr(float(negs)))
@requires_IEEE_754
class RoundTestCase(unittest.TestCase):
def test_second_argument_type(self):
# any type with an __index__ method should be permitted as
# a second argument
self.assertAlmostEqual(round(12.34, True), 12.3)
class MyIndex(object):
def __index__(self): return 4
self.assertAlmostEqual(round(-0.123456, MyIndex()), -0.1235)
# but floats should be illegal
self.assertRaises(TypeError, round, 3.14159, 2.0)
def test_inf_nan(self):
# rounding an infinity or nan returns the same number;
# (in py3k, rounding an infinity or nan raises an error,
# since the result can't be represented as a long).
self.assertEqual(round(INF), INF)
self.assertEqual(round(-INF), -INF)
self.assertTrue(math.isnan(round(NAN)))
for n in range(-5, 5):
self.assertEqual(round(INF, n), INF)
self.assertEqual(round(-INF, n), -INF)
self.assertTrue(math.isnan(round(NAN, n)))
self.assertRaises(TypeError, round, INF, 0.0)
self.assertRaises(TypeError, round, -INF, 1.0)
self.assertRaises(TypeError, round, NAN, "ceci n'est pas un integer")
self.assertRaises(TypeError, round, -0.0, 1j)
def test_large_n(self):
for n in [324, 325, 400, 2**31-1, 2**31, 2**32, 2**100]:
self.assertEqual(round(123.456, n), 123.456)
self.assertEqual(round(-123.456, n), -123.456)
self.assertEqual(round(1e300, n), 1e300)
self.assertEqual(round(1e-320, n), 1e-320)
self.assertEqual(round(1e150, 300), 1e150)
self.assertEqual(round(1e300, 307), 1e300)
self.assertEqual(round(-3.1415, 308), -3.1415)
self.assertEqual(round(1e150, 309), 1e150)
self.assertEqual(round(1.4e-315, 315), 1e-315)
def test_small_n(self):
for n in [-308, -309, -400, 1-2**31, -2**31, -2**31-1, -2**100]:
self.assertEqual(round(123.456, n), 0.0)
self.assertEqual(round(-123.456, n), -0.0)
self.assertEqual(round(1e300, n), 0.0)
self.assertEqual(round(1e-320, n), 0.0)
def test_overflow(self):
self.assertRaises(OverflowError, round, 1.6e308, -308)
self.assertRaises(OverflowError, round, -1.7e308, -308)
@unittest.skipUnless(getattr(sys, 'float_repr_style', '') == 'short',
"test applies only when using short float repr style")
def test_previous_round_bugs(self):
# particular cases that have occurred in bug reports
self.assertEqual(round(562949953421312.5, 1),
562949953421312.5)
self.assertEqual(round(56294995342131.5, 3),
56294995342131.5)
@unittest.skipUnless(getattr(sys, 'float_repr_style', '') == 'short',
"test applies only when using short float repr style")
def test_halfway_cases(self):
# Halfway cases need special attention, since the current
# implementation has to deal with them specially. Note that
# 2.x rounds halfway values up (i.e., away from zero) while
# 3.x does round-half-to-even.
self.assertAlmostEqual(round(0.125, 2), 0.13)
self.assertAlmostEqual(round(0.375, 2), 0.38)
self.assertAlmostEqual(round(0.625, 2), 0.63)
self.assertAlmostEqual(round(0.875, 2), 0.88)
self.assertAlmostEqual(round(-0.125, 2), -0.13)
self.assertAlmostEqual(round(-0.375, 2), -0.38)
self.assertAlmostEqual(round(-0.625, 2), -0.63)
self.assertAlmostEqual(round(-0.875, 2), -0.88)
self.assertAlmostEqual(round(0.25, 1), 0.3)
self.assertAlmostEqual(round(0.75, 1), 0.8)
self.assertAlmostEqual(round(-0.25, 1), -0.3)
self.assertAlmostEqual(round(-0.75, 1), -0.8)
self.assertEqual(round(-6.5, 0), -7.0)
self.assertEqual(round(-5.5, 0), -6.0)
self.assertEqual(round(-1.5, 0), -2.0)
self.assertEqual(round(-0.5, 0), -1.0)
self.assertEqual(round(0.5, 0), 1.0)
self.assertEqual(round(1.5, 0), 2.0)
self.assertEqual(round(2.5, 0), 3.0)
self.assertEqual(round(3.5, 0), 4.0)
self.assertEqual(round(4.5, 0), 5.0)
self.assertEqual(round(5.5, 0), 6.0)
self.assertEqual(round(6.5, 0), 7.0)
# same but without an explicit second argument; in 3.x these
# will give integers
self.assertEqual(round(-6.5), -7.0)
self.assertEqual(round(-5.5), -6.0)
self.assertEqual(round(-1.5), -2.0)
self.assertEqual(round(-0.5), -1.0)
self.assertEqual(round(0.5), 1.0)
self.assertEqual(round(1.5), 2.0)
self.assertEqual(round(2.5), 3.0)
self.assertEqual(round(3.5), 4.0)
self.assertEqual(round(4.5), 5.0)
self.assertEqual(round(5.5), 6.0)
self.assertEqual(round(6.5), 7.0)
self.assertEqual(round(-25.0, -1), -30.0)
self.assertEqual(round(-15.0, -1), -20.0)
self.assertEqual(round(-5.0, -1), -10.0)
self.assertEqual(round(5.0, -1), 10.0)
self.assertEqual(round(15.0, -1), 20.0)
self.assertEqual(round(25.0, -1), 30.0)
self.assertEqual(round(35.0, -1), 40.0)
self.assertEqual(round(45.0, -1), 50.0)
self.assertEqual(round(55.0, -1), 60.0)
self.assertEqual(round(65.0, -1), 70.0)
self.assertEqual(round(75.0, -1), 80.0)
self.assertEqual(round(85.0, -1), 90.0)
self.assertEqual(round(95.0, -1), 100.0)
self.assertEqual(round(12325.0, -1), 12330.0)
self.assertEqual(round(350.0, -2), 400.0)
self.assertEqual(round(450.0, -2), 500.0)
self.assertAlmostEqual(round(0.5e21, -21), 1e21)
self.assertAlmostEqual(round(1.5e21, -21), 2e21)
self.assertAlmostEqual(round(2.5e21, -21), 3e21)
self.assertAlmostEqual(round(5.5e21, -21), 6e21)
self.assertAlmostEqual(round(8.5e21, -21), 9e21)
self.assertAlmostEqual(round(-1.5e22, -22), -2e22)
self.assertAlmostEqual(round(-0.5e22, -22), -1e22)
self.assertAlmostEqual(round(0.5e22, -22), 1e22)
self.assertAlmostEqual(round(1.5e22, -22), 2e22)
@requires_IEEE_754
def test_format_specials(self):
# Test formatting of nans and infs.
def test(fmt, value, expected):
# Test with both % and format().
self.assertEqual(fmt % value, expected, fmt)
if not '#' in fmt:
# Until issue 7094 is implemented, format() for floats doesn't
# support '#' formatting
fmt = fmt[1:] # strip off the %
self.assertEqual(format(value, fmt), expected, fmt)
for fmt in ['%e', '%f', '%g', '%.0e', '%.6f', '%.20g',
'%#e', '%#f', '%#g', '%#.20e', '%#.15f', '%#.3g']:
pfmt = '%+' + fmt[1:]
sfmt = '% ' + fmt[1:]
test(fmt, INF, 'inf')
test(fmt, -INF, '-inf')
test(fmt, NAN, 'nan')
test(fmt, -NAN, 'nan')
# When asking for a sign, it's always provided. nans are
# always positive.
test(pfmt, INF, '+inf')
test(pfmt, -INF, '-inf')
test(pfmt, NAN, '+nan')
test(pfmt, -NAN, '+nan')
# When using ' ' for a sign code, only infs can be negative.
# Others have a space.
test(sfmt, INF, ' inf')
test(sfmt, -INF, '-inf')
test(sfmt, NAN, ' nan')
test(sfmt, -NAN, ' nan')
# Beginning with Python 2.6 float has cross platform compatible
# ways to create and represent inf and nan
class InfNanTest(unittest.TestCase):
def test_inf_from_str(self):
self.assertTrue(isinf(float("inf")))
self.assertTrue(isinf(float("+inf")))
self.assertTrue(isinf(float("-inf")))
self.assertTrue(isinf(float("infinity")))
self.assertTrue(isinf(float("+infinity")))
self.assertTrue(isinf(float("-infinity")))
self.assertEqual(repr(float("inf")), "inf")
self.assertEqual(repr(float("+inf")), "inf")
self.assertEqual(repr(float("-inf")), "-inf")
self.assertEqual(repr(float("infinity")), "inf")
self.assertEqual(repr(float("+infinity")), "inf")
self.assertEqual(repr(float("-infinity")), "-inf")
self.assertEqual(repr(float("INF")), "inf")
self.assertEqual(repr(float("+Inf")), "inf")
self.assertEqual(repr(float("-iNF")), "-inf")
self.assertEqual(repr(float("Infinity")), "inf")
self.assertEqual(repr(float("+iNfInItY")), "inf")
self.assertEqual(repr(float("-INFINITY")), "-inf")
self.assertEqual(str(float("inf")), "inf")
self.assertEqual(str(float("+inf")), "inf")
self.assertEqual(str(float("-inf")), "-inf")
self.assertEqual(str(float("infinity")), "inf")
self.assertEqual(str(float("+infinity")), "inf")
self.assertEqual(str(float("-infinity")), "-inf")
self.assertRaises(ValueError, float, "info")
self.assertRaises(ValueError, float, "+info")
self.assertRaises(ValueError, float, "-info")
self.assertRaises(ValueError, float, "in")
self.assertRaises(ValueError, float, "+in")
self.assertRaises(ValueError, float, "-in")
self.assertRaises(ValueError, float, "infinit")
self.assertRaises(ValueError, float, "+Infin")
self.assertRaises(ValueError, float, "-INFI")
self.assertRaises(ValueError, float, "infinitys")
def test_inf_as_str(self):
self.assertEqual(repr(1e300 * 1e300), "inf")
self.assertEqual(repr(-1e300 * 1e300), "-inf")
self.assertEqual(str(1e300 * 1e300), "inf")
self.assertEqual(str(-1e300 * 1e300), "-inf")
def test_nan_from_str(self):
self.assertTrue(isnan(float("nan")))
self.assertTrue(isnan(float("+nan")))
self.assertTrue(isnan(float("-nan")))
self.assertEqual(repr(float("nan")), "nan")
self.assertEqual(repr(float("+nan")), "nan")
self.assertEqual(repr(float("-nan")), "nan")
self.assertEqual(repr(float("NAN")), "nan")
self.assertEqual(repr(float("+NAn")), "nan")
self.assertEqual(repr(float("-NaN")), "nan")
self.assertEqual(str(float("nan")), "nan")
self.assertEqual(str(float("+nan")), "nan")
self.assertEqual(str(float("-nan")), "nan")
self.assertRaises(ValueError, float, "nana")
self.assertRaises(ValueError, float, "+nana")
self.assertRaises(ValueError, float, "-nana")
self.assertRaises(ValueError, float, "na")
self.assertRaises(ValueError, float, "+na")
self.assertRaises(ValueError, float, "-na")
def test_nan_as_str(self):
self.assertEqual(repr(1e300 * 1e300 * 0), "nan")
self.assertEqual(repr(-1e300 * 1e300 * 0), "nan")
self.assertEqual(str(1e300 * 1e300 * 0), "nan")
self.assertEqual(str(-1e300 * 1e300 * 0), "nan")
def notest_float_nan(self):
self.assertTrue(NAN.is_nan())
self.assertFalse(INF.is_nan())
self.assertFalse((0.).is_nan())
def notest_float_inf(self):
self.assertTrue(INF.is_inf())
self.assertFalse(NAN.is_inf())
self.assertFalse((0.).is_inf())
def test_hash_inf(self):
# the actual values here should be regarded as an
# implementation detail, but they need to be
# identical to those used in the Decimal module.
self.assertEqual(hash(float('inf')), 314159)
self.assertEqual(hash(float('-inf')), -271828)
self.assertEqual(hash(float('nan')), 0)
fromHex = float.fromhex
toHex = float.hex
class HexFloatTestCase(unittest.TestCase):
MAX = fromHex('0x.fffffffffffff8p+1024') # max normal
MIN = fromHex('0x1p-1022') # min normal
TINY = fromHex('0x0.0000000000001p-1022') # min subnormal
EPS = fromHex('0x0.0000000000001p0') # diff between 1.0 and next float up
def identical(self, x, y):
# check that floats x and y are identical, or that both
# are NaNs
if isnan(x) or isnan(y):
if isnan(x) == isnan(y):
return
elif x == y and (x != 0.0 or copysign(1.0, x) == copysign(1.0, y)):
return
self.fail('%r not identical to %r' % (x, y))
def test_ends(self):
self.identical(self.MIN, ldexp(1.0, -1022))
self.identical(self.TINY, ldexp(1.0, -1074))
self.identical(self.EPS, ldexp(1.0, -52))
self.identical(self.MAX, 2.*(ldexp(1.0, 1023) - ldexp(1.0, 970)))
def test_invalid_inputs(self):
invalid_inputs = [
'infi', # misspelt infinities and nans
'-Infinit',
'++inf',
'-+Inf',
'--nan',
'+-NaN',
'snan',
'NaNs',
'nna',
'an',
'nf',
'nfinity',
'inity',
'iinity',
'0xnan',
'',
' ',
'x1.0p0',
'0xX1.0p0',
'+ 0x1.0p0', # internal whitespace
'- 0x1.0p0',
'0 x1.0p0',
'0x 1.0p0',
'0x1 2.0p0',
'+0x1 .0p0',
'0x1. 0p0',
'-0x1.0 1p0',
'-0x1.0 p0',
'+0x1.0p +0',
'0x1.0p -0',
'0x1.0p 0',
'+0x1.0p+ 0',
'-0x1.0p- 0',
'++0x1.0p-0', # double signs
'--0x1.0p0',
'+-0x1.0p+0',
'-+0x1.0p0',
'0x1.0p++0',
'+0x1.0p+-0',
'-0x1.0p-+0',
'0x1.0p--0',
'0x1.0.p0',
'0x.p0', # no hex digits before or after point
'0x1,p0', # wrong decimal point character
'0x1pa',
u'0x1p\uff10', # fullwidth Unicode digits
u'\uff10x1p0',
u'0x\uff11p0',
u'0x1.\uff10p0',
'0x1p0 \n 0x2p0',
'0x1p0\0 0x1p0', # embedded null byte is not end of string
]
for x in invalid_inputs:
try:
result = fromHex(x)
except ValueError:
pass
else:
self.fail('Expected float.fromhex(%r) to raise ValueError; '
'got %r instead' % (x, result))
def test_whitespace(self):
value_pairs = [
('inf', INF),
('-Infinity', -INF),
('nan', NAN),
('1.0', 1.0),
('-0x.2', -0.125),
('-0.0', -0.0)
]
whitespace = [
'',
' ',
'\t',
'\n',
'\n \t',
'\f',
'\v',
'\r'
]
for inp, expected in value_pairs:
for lead in whitespace:
for trail in whitespace:
got = fromHex(lead + inp + trail)
self.identical(got, expected)
def test_from_hex(self):
MIN = self.MIN;
MAX = self.MAX;
TINY = self.TINY;
EPS = self.EPS;
# two spellings of infinity, with optional signs; case-insensitive
self.identical(fromHex('inf'), INF)
self.identical(fromHex('+Inf'), INF)
self.identical(fromHex('-INF'), -INF)
self.identical(fromHex('iNf'), INF)
self.identical(fromHex('Infinity'), INF)
self.identical(fromHex('+INFINITY'), INF)
self.identical(fromHex('-infinity'), -INF)
self.identical(fromHex('-iNFiNitY'), -INF)
# nans with optional sign; case insensitive
self.identical(fromHex('nan'), NAN)
self.identical(fromHex('+NaN'), NAN)
self.identical(fromHex('-NaN'), NAN)
self.identical(fromHex('-nAN'), NAN)
# variations in input format
self.identical(fromHex('1'), 1.0)
self.identical(fromHex('+1'), 1.0)
self.identical(fromHex('1.'), 1.0)
self.identical(fromHex('1.0'), 1.0)
self.identical(fromHex('1.0p0'), 1.0)
self.identical(fromHex('01'), 1.0)
self.identical(fromHex('01.'), 1.0)
self.identical(fromHex('0x1'), 1.0)
self.identical(fromHex('0x1.'), 1.0)
self.identical(fromHex('0x1.0'), 1.0)
self.identical(fromHex('+0x1.0'), 1.0)
self.identical(fromHex('0x1p0'), 1.0)
self.identical(fromHex('0X1p0'), 1.0)
self.identical(fromHex('0X1P0'), 1.0)
self.identical(fromHex('0x1P0'), 1.0)
self.identical(fromHex('0x1.p0'), 1.0)
self.identical(fromHex('0x1.0p0'), 1.0)
self.identical(fromHex('0x.1p4'), 1.0)
self.identical(fromHex('0x.1p04'), 1.0)
self.identical(fromHex('0x.1p004'), 1.0)
self.identical(fromHex('0x1p+0'), 1.0)
self.identical(fromHex('0x1P-0'), 1.0)
self.identical(fromHex('+0x1p0'), 1.0)
self.identical(fromHex('0x01p0'), 1.0)
self.identical(fromHex('0x1p00'), 1.0)
self.identical(fromHex(u'0x1p0'), 1.0)
self.identical(fromHex(' 0x1p0 '), 1.0)
self.identical(fromHex('\n 0x1p0'), 1.0)
self.identical(fromHex('0x1p0 \t'), 1.0)
self.identical(fromHex('0xap0'), 10.0)
self.identical(fromHex('0xAp0'), 10.0)
self.identical(fromHex('0xaP0'), 10.0)
self.identical(fromHex('0xAP0'), 10.0)
self.identical(fromHex('0xbep0'), 190.0)
self.identical(fromHex('0xBep0'), 190.0)
self.identical(fromHex('0xbEp0'), 190.0)
self.identical(fromHex('0XBE0P-4'), 190.0)
self.identical(fromHex('0xBEp0'), 190.0)
self.identical(fromHex('0xB.Ep4'), 190.0)
self.identical(fromHex('0x.BEp8'), 190.0)
self.identical(fromHex('0x.0BEp12'), 190.0)
# moving the point around
pi = fromHex('0x1.921fb54442d18p1')
self.identical(fromHex('0x.006487ed5110b46p11'), pi)
self.identical(fromHex('0x.00c90fdaa22168cp10'), pi)
self.identical(fromHex('0x.01921fb54442d18p9'), pi)
self.identical(fromHex('0x.03243f6a8885a3p8'), pi)
self.identical(fromHex('0x.06487ed5110b46p7'), pi)
self.identical(fromHex('0x.0c90fdaa22168cp6'), pi)
self.identical(fromHex('0x.1921fb54442d18p5'), pi)
self.identical(fromHex('0x.3243f6a8885a3p4'), pi)
self.identical(fromHex('0x.6487ed5110b46p3'), pi)
self.identical(fromHex('0x.c90fdaa22168cp2'), pi)
self.identical(fromHex('0x1.921fb54442d18p1'), pi)
self.identical(fromHex('0x3.243f6a8885a3p0'), pi)
self.identical(fromHex('0x6.487ed5110b46p-1'), pi)
self.identical(fromHex('0xc.90fdaa22168cp-2'), pi)
self.identical(fromHex('0x19.21fb54442d18p-3'), pi)
self.identical(fromHex('0x32.43f6a8885a3p-4'), pi)
self.identical(fromHex('0x64.87ed5110b46p-5'), pi)
self.identical(fromHex('0xc9.0fdaa22168cp-6'), pi)
self.identical(fromHex('0x192.1fb54442d18p-7'), pi)
self.identical(fromHex('0x324.3f6a8885a3p-8'), pi)
self.identical(fromHex('0x648.7ed5110b46p-9'), pi)
self.identical(fromHex('0xc90.fdaa22168cp-10'), pi)
self.identical(fromHex('0x1921.fb54442d18p-11'), pi)
# ...
self.identical(fromHex('0x1921fb54442d1.8p-47'), pi)
self.identical(fromHex('0x3243f6a8885a3p-48'), pi)
self.identical(fromHex('0x6487ed5110b46p-49'), pi)
self.identical(fromHex('0xc90fdaa22168cp-50'), pi)
self.identical(fromHex('0x1921fb54442d18p-51'), pi)
self.identical(fromHex('0x3243f6a8885a30p-52'), pi)
self.identical(fromHex('0x6487ed5110b460p-53'), pi)
self.identical(fromHex('0xc90fdaa22168c0p-54'), pi)
self.identical(fromHex('0x1921fb54442d180p-55'), pi)
# results that should overflow...
self.assertRaises(OverflowError, fromHex, '-0x1p1024')
self.assertRaises(OverflowError, fromHex, '0x1p+1025')
self.assertRaises(OverflowError, fromHex, '+0X1p1030')
self.assertRaises(OverflowError, fromHex, '-0x1p+1100')
self.assertRaises(OverflowError, fromHex, '0X1p123456789123456789')
self.assertRaises(OverflowError, fromHex, '+0X.8p+1025')
self.assertRaises(OverflowError, fromHex, '+0x0.8p1025')
self.assertRaises(OverflowError, fromHex, '-0x0.4p1026')
self.assertRaises(OverflowError, fromHex, '0X2p+1023')
self.assertRaises(OverflowError, fromHex, '0x2.p1023')
self.assertRaises(OverflowError, fromHex, '-0x2.0p+1023')
self.assertRaises(OverflowError, fromHex, '+0X4p+1022')
self.assertRaises(OverflowError, fromHex, '0x1.ffffffffffffffp+1023')
self.assertRaises(OverflowError, fromHex, '-0X1.fffffffffffff9p1023')
self.assertRaises(OverflowError, fromHex, '0X1.fffffffffffff8p1023')
self.assertRaises(OverflowError, fromHex, '+0x3.fffffffffffffp1022')
self.assertRaises(OverflowError, fromHex, '0x3fffffffffffffp+970')
self.assertRaises(OverflowError, fromHex, '0x10000000000000000p960')
self.assertRaises(OverflowError, fromHex, '-0Xffffffffffffffffp960')
# ...and those that round to +-max float
self.identical(fromHex('+0x1.fffffffffffffp+1023'), MAX)
self.identical(fromHex('-0X1.fffffffffffff7p1023'), -MAX)
self.identical(fromHex('0X1.fffffffffffff7fffffffffffffp1023'), MAX)
# zeros
self.identical(fromHex('0x0p0'), 0.0)
self.identical(fromHex('0x0p1000'), 0.0)
self.identical(fromHex('-0x0p1023'), -0.0)
self.identical(fromHex('0X0p1024'), 0.0)
self.identical(fromHex('-0x0p1025'), -0.0)
self.identical(fromHex('0X0p2000'), 0.0)
self.identical(fromHex('0x0p123456789123456789'), 0.0)
self.identical(fromHex('-0X0p-0'), -0.0)
self.identical(fromHex('-0X0p-1000'), -0.0)
self.identical(fromHex('0x0p-1023'), 0.0)
self.identical(fromHex('-0X0p-1024'), -0.0)
self.identical(fromHex('-0x0p-1025'), -0.0)
self.identical(fromHex('-0x0p-1072'), -0.0)
self.identical(fromHex('0X0p-1073'), 0.0)
self.identical(fromHex('-0x0p-1074'), -0.0)
self.identical(fromHex('0x0p-1075'), 0.0)
self.identical(fromHex('0X0p-1076'), 0.0)
self.identical(fromHex('-0X0p-2000'), -0.0)
self.identical(fromHex('-0x0p-123456789123456789'), -0.0)
# values that should underflow to 0
self.identical(fromHex('0X1p-1075'), 0.0)
self.identical(fromHex('-0X1p-1075'), -0.0)
self.identical(fromHex('-0x1p-123456789123456789'), -0.0)
self.identical(fromHex('0x1.00000000000000001p-1075'), TINY)
self.identical(fromHex('-0x1.1p-1075'), -TINY)
self.identical(fromHex('0x1.fffffffffffffffffp-1075'), TINY)
# check round-half-even is working correctly near 0 ...
self.identical(fromHex('0x1p-1076'), 0.0)
self.identical(fromHex('0X2p-1076'), 0.0)
self.identical(fromHex('0X3p-1076'), TINY)
self.identical(fromHex('0x4p-1076'), TINY)
self.identical(fromHex('0X5p-1076'), TINY)
self.identical(fromHex('0X6p-1076'), 2*TINY)
self.identical(fromHex('0x7p-1076'), 2*TINY)
self.identical(fromHex('0X8p-1076'), 2*TINY)
self.identical(fromHex('0X9p-1076'), 2*TINY)
self.identical(fromHex('0xap-1076'), 2*TINY)
self.identical(fromHex('0Xbp-1076'), 3*TINY)
self.identical(fromHex('0xcp-1076'), 3*TINY)
self.identical(fromHex('0Xdp-1076'), 3*TINY)
self.identical(fromHex('0Xep-1076'), 4*TINY)
self.identical(fromHex('0xfp-1076'), 4*TINY)
self.identical(fromHex('0x10p-1076'), 4*TINY)
self.identical(fromHex('-0x1p-1076'), -0.0)
self.identical(fromHex('-0X2p-1076'), -0.0)
self.identical(fromHex('-0x3p-1076'), -TINY)
self.identical(fromHex('-0X4p-1076'), -TINY)
self.identical(fromHex('-0x5p-1076'), -TINY)
self.identical(fromHex('-0x6p-1076'), -2*TINY)
self.identical(fromHex('-0X7p-1076'), -2*TINY)
self.identical(fromHex('-0X8p-1076'), -2*TINY)
self.identical(fromHex('-0X9p-1076'), -2*TINY)
self.identical(fromHex('-0Xap-1076'), -2*TINY)
self.identical(fromHex('-0xbp-1076'), -3*TINY)
self.identical(fromHex('-0xcp-1076'), -3*TINY)
self.identical(fromHex('-0Xdp-1076'), -3*TINY)
self.identical(fromHex('-0xep-1076'), -4*TINY)
self.identical(fromHex('-0Xfp-1076'), -4*TINY)
self.identical(fromHex('-0X10p-1076'), -4*TINY)
# ... and near MIN ...
self.identical(fromHex('0x0.ffffffffffffd6p-1022'), MIN-3*TINY)
self.identical(fromHex('0x0.ffffffffffffd8p-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffdap-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffdcp-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffdep-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffe0p-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffe2p-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffe4p-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffe6p-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffe8p-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffeap-1022'), MIN-TINY)
self.identical(fromHex('0x0.ffffffffffffecp-1022'), MIN-TINY)
self.identical(fromHex('0x0.ffffffffffffeep-1022'), MIN-TINY)
self.identical(fromHex('0x0.fffffffffffff0p-1022'), MIN-TINY)
self.identical(fromHex('0x0.fffffffffffff2p-1022'), MIN-TINY)
self.identical(fromHex('0x0.fffffffffffff4p-1022'), MIN-TINY)
self.identical(fromHex('0x0.fffffffffffff6p-1022'), MIN-TINY)
self.identical(fromHex('0x0.fffffffffffff8p-1022'), MIN)
self.identical(fromHex('0x0.fffffffffffffap-1022'), MIN)
self.identical(fromHex('0x0.fffffffffffffcp-1022'), MIN)
self.identical(fromHex('0x0.fffffffffffffep-1022'), MIN)
self.identical(fromHex('0x1.00000000000000p-1022'), MIN)
self.identical(fromHex('0x1.00000000000002p-1022'), MIN)
self.identical(fromHex('0x1.00000000000004p-1022'), MIN)
self.identical(fromHex('0x1.00000000000006p-1022'), MIN)
self.identical(fromHex('0x1.00000000000008p-1022'), MIN)
self.identical(fromHex('0x1.0000000000000ap-1022'), MIN+TINY)
self.identical(fromHex('0x1.0000000000000cp-1022'), MIN+TINY)
self.identical(fromHex('0x1.0000000000000ep-1022'), MIN+TINY)
self.identical(fromHex('0x1.00000000000010p-1022'), MIN+TINY)
self.identical(fromHex('0x1.00000000000012p-1022'), MIN+TINY)
self.identical(fromHex('0x1.00000000000014p-1022'), MIN+TINY)
self.identical(fromHex('0x1.00000000000016p-1022'), MIN+TINY)
self.identical(fromHex('0x1.00000000000018p-1022'), MIN+2*TINY)
# ... and near 1.0.
self.identical(fromHex('0x0.fffffffffffff0p0'), 1.0-EPS)
self.identical(fromHex('0x0.fffffffffffff1p0'), 1.0-EPS)
self.identical(fromHex('0X0.fffffffffffff2p0'), 1.0-EPS)
self.identical(fromHex('0x0.fffffffffffff3p0'), 1.0-EPS)
self.identical(fromHex('0X0.fffffffffffff4p0'), 1.0-EPS)
self.identical(fromHex('0X0.fffffffffffff5p0'), 1.0-EPS/2)
self.identical(fromHex('0X0.fffffffffffff6p0'), 1.0-EPS/2)
self.identical(fromHex('0x0.fffffffffffff7p0'), 1.0-EPS/2)
self.identical(fromHex('0x0.fffffffffffff8p0'), 1.0-EPS/2)
self.identical(fromHex('0X0.fffffffffffff9p0'), 1.0-EPS/2)
self.identical(fromHex('0X0.fffffffffffffap0'), 1.0-EPS/2)
self.identical(fromHex('0x0.fffffffffffffbp0'), 1.0-EPS/2)
self.identical(fromHex('0X0.fffffffffffffcp0'), 1.0)
self.identical(fromHex('0x0.fffffffffffffdp0'), 1.0)
self.identical(fromHex('0X0.fffffffffffffep0'), 1.0)
self.identical(fromHex('0x0.ffffffffffffffp0'), 1.0)
self.identical(fromHex('0X1.00000000000000p0'), 1.0)
self.identical(fromHex('0X1.00000000000001p0'), 1.0)
self.identical(fromHex('0x1.00000000000002p0'), 1.0)
self.identical(fromHex('0X1.00000000000003p0'), 1.0)
self.identical(fromHex('0x1.00000000000004p0'), 1.0)
self.identical(fromHex('0X1.00000000000005p0'), 1.0)
self.identical(fromHex('0X1.00000000000006p0'), 1.0)
self.identical(fromHex('0X1.00000000000007p0'), 1.0)
self.identical(fromHex('0x1.00000000000007ffffffffffffffffffffp0'),
1.0)
self.identical(fromHex('0x1.00000000000008p0'), 1.0)
self.identical(fromHex('0x1.00000000000008000000000000000001p0'),
1+EPS)
self.identical(fromHex('0X1.00000000000009p0'), 1.0+EPS)
self.identical(fromHex('0x1.0000000000000ap0'), 1.0+EPS)
self.identical(fromHex('0x1.0000000000000bp0'), 1.0+EPS)
self.identical(fromHex('0X1.0000000000000cp0'), 1.0+EPS)
self.identical(fromHex('0x1.0000000000000dp0'), 1.0+EPS)
self.identical(fromHex('0x1.0000000000000ep0'), 1.0+EPS)
self.identical(fromHex('0X1.0000000000000fp0'), 1.0+EPS)
self.identical(fromHex('0x1.00000000000010p0'), 1.0+EPS)
self.identical(fromHex('0X1.00000000000011p0'), 1.0+EPS)
self.identical(fromHex('0x1.00000000000012p0'), 1.0+EPS)
self.identical(fromHex('0X1.00000000000013p0'), 1.0+EPS)
self.identical(fromHex('0X1.00000000000014p0'), 1.0+EPS)
self.identical(fromHex('0x1.00000000000015p0'), 1.0+EPS)
self.identical(fromHex('0x1.00000000000016p0'), 1.0+EPS)
self.identical(fromHex('0X1.00000000000017p0'), 1.0+EPS)
self.identical(fromHex('0x1.00000000000017ffffffffffffffffffffp0'),
1.0+EPS)
self.identical(fromHex('0x1.00000000000018p0'), 1.0+2*EPS)
self.identical(fromHex('0X1.00000000000018000000000000000001p0'),
1.0+2*EPS)
self.identical(fromHex('0x1.00000000000019p0'), 1.0+2*EPS)
self.identical(fromHex('0X1.0000000000001ap0'), 1.0+2*EPS)
self.identical(fromHex('0X1.0000000000001bp0'), 1.0+2*EPS)
self.identical(fromHex('0x1.0000000000001cp0'), 1.0+2*EPS)
self.identical(fromHex('0x1.0000000000001dp0'), 1.0+2*EPS)
self.identical(fromHex('0x1.0000000000001ep0'), 1.0+2*EPS)
self.identical(fromHex('0X1.0000000000001fp0'), 1.0+2*EPS)
self.identical(fromHex('0x1.00000000000020p0'), 1.0+2*EPS)
def test_roundtrip(self):
def roundtrip(x):
return fromHex(toHex(x))
for x in [NAN, INF, self.MAX, self.MIN, self.MIN-self.TINY, self.TINY, 0.0]:
self.identical(x, roundtrip(x))
self.identical(-x, roundtrip(-x))
# fromHex(toHex(x)) should exactly recover x, for any non-NaN float x.
import random
for i in xrange(10000):
e = random.randrange(-1200, 1200)
m = random.random()
s = random.choice([1.0, -1.0])
try:
x = s*ldexp(m, e)
except OverflowError:
pass
else:
self.identical(x, fromHex(toHex(x)))
def test_main():
test_support.run_unittest(
GeneralFloatCases,
FormatFunctionsTestCase,
UnknownFormatTestCase,
IEEEFormatTestCase,
ReprTestCase,
RoundTestCase,
InfNanTest,
HexFloatTestCase,
)
if __name__ == '__main__':
test_main()
| gpl-2.0 | -1,488,209,148,920,339,700 | 43.442225 | 98 | 0.577117 | false |
gavmain/django_demo | demo/users/views.py | 95 | 1459 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.core.urlresolvers import reverse
from django.views.generic import DetailView, ListView, RedirectView, UpdateView
from django.contrib.auth.mixins import LoginRequiredMixin
from .models import User
class UserDetailView(LoginRequiredMixin, DetailView):
model = User
# These next two lines tell the view to index lookups by username
slug_field = 'username'
slug_url_kwarg = 'username'
class UserRedirectView(LoginRequiredMixin, RedirectView):
permanent = False
def get_redirect_url(self):
return reverse('users:detail',
kwargs={'username': self.request.user.username})
class UserUpdateView(LoginRequiredMixin, UpdateView):
fields = ['name', ]
# we already imported User in the view code above, remember?
model = User
# send the user back to their own page after a successful update
def get_success_url(self):
return reverse('users:detail',
kwargs={'username': self.request.user.username})
def get_object(self):
# Only get the User record for the user making the request
return User.objects.get(username=self.request.user.username)
class UserListView(LoginRequiredMixin, ListView):
model = User
# These next two lines tell the view to index lookups by username
slug_field = 'username'
slug_url_kwarg = 'username'
| mit | 4,437,241,735,609,583,600 | 29.395833 | 79 | 0.701851 | false |
mkieszek/odoo | openerp/addons/test_inherit/models.py | 11 | 2179 | # -*- coding: utf-8 -*-
from openerp import models, fields, api, osv
# We just create a new model
class mother(models.Model):
_name = 'test.inherit.mother'
_columns = {
# check interoperability of field inheritance with old-style fields
'name': osv.fields.char('Name'),
'state': osv.fields.selection([('a', 'A'), ('b', 'B')], string='State'),
}
_defaults = {
'name': 'Foo',
}
surname = fields.Char(compute='_compute_surname')
@api.one
@api.depends('name')
def _compute_surname(self):
self.surname = self.name or ''
# We want to inherits from the parent model and we add some fields
# in the child object
class daughter(models.Model):
_name = 'test.inherit.daughter'
template_id = fields.Many2one('test.inherit.mother', 'Template',
delegate=True, required=True, ondelete='cascade')
field_in_daughter = fields.Char('Field1')
# We add a new field in the parent object. Because of a recent refactoring,
# this feature was broken.
# This test and these models try to show the bug and fix it.
class mother(models.Model):
_inherit = 'test.inherit.mother'
field_in_mother = fields.Char()
# extend the name field: make it required and change its default value
name = fields.Char(required=True, default='Bar')
# extend the selection of the state field
state = fields.Selection(selection_add=[('c', 'C')])
# override the computed field, and extend its dependencies
@api.one
@api.depends('field_in_mother')
def _compute_surname(self):
if self.field_in_mother:
self.surname = self.field_in_mother
else:
super(mother, self)._compute_surname()
class mother(models.Model):
_inherit = 'test.inherit.mother'
# extend again the selection of the state field
state = fields.Selection(selection_add=[('d', 'D')])
class daughter(models.Model):
_inherit = 'test.inherit.daughter'
# simply redeclare the field without adding any option
template_id = fields.Many2one()
# change the default value of an inherited field
name = fields.Char(default='Baz')
| agpl-3.0 | -6,770,268,864,479,548,000 | 29.263889 | 83 | 0.647086 | false |
PredictiveScienceLab/py-mcmc | pymcmc/_mala_proposal.py | 2 | 1960 | """
This is a Metropolis Adjusted Langevin Algorithm (MALA) proposal.
Author:
Ilias Bilionis
"""
__all__ = ['MALAProposal']
import numpy as np
from scipy.stats import norm
from . import GradProposal
from . import SingleParameterTunableProposalConcept
class MALAProposal(GradProposal, SingleParameterTunableProposalConcept):
"""
A MALA proposal.
:param dt: The time step. The larger you pick it, the bigger the steps
you make and the acceptance rate will go down.
:type dt: float
The rest of the keyword arguments is what you would find in:
+ :class:`pymcmc.GradProposal`
+ :class:`pymcmc.SingleParameterTunableProposal`
"""
def __init__(self, dt=1., **kwargs):
"""
Initialize the object.
"""
self.dt = dt
if not kwargs.has_key('name'):
kwargs['name'] = 'MALA Proposal'
kwargs['param_name'] = 'dt'
GradProposal.__init__(self, **kwargs)
SingleParameterTunableProposalConcept.__init__(self, **kwargs)
def _sample(self, old_params, old_grad_params):
return (old_params +
0.5 * self.dt ** 2 * old_grad_params +
self.dt * np.random.randn(old_params.shape[0]))
def __call__(self, new_params, old_params, old_grad_params):
return np.sum(norm.logpdf(new_params,
loc=(old_params + 0.5 * self.dt ** 2 * old_grad_params),
scale=self.dt))
def __getstate__(self):
state = GradProposal.__getstate__(self)
state['dt'] = self.dt
tuner_state = SingleParameterTunableProposalConcept.__getstate__(self)
return dict(state.items() + tuner_state.items())
def __setstate__(self, state):
GradProposal.__setstate__(self, state)
self.dt = state['dt']
SingleParameterTunableProposalConcept.__setstate__(self, state['tuner'])
| lgpl-3.0 | -804,289,330,180,064,100 | 30.111111 | 90 | 0.595408 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.