repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
---|---|---|---|---|
foundit/Piped | refs/heads/develop | contrib/status_testing/piped_status_testing/test/test_processors.py | 2 | # Copyright (c) 2010-2011, Found IT A/S and Piped Project Contributors.
# See LICENSE for details.
from StringIO import StringIO
from twisted.internet import defer
from twisted.trial import unittest
from piped import processing
from piped_status_testing import processors, statustest
class TestProcessor(processors.StatusTestProcessor):
name = 'test-processor'
class Test(statustest.StatusTestCase):
def statustest_success(self):
pass
def statustest_failed(self):
self.assertFalse(True)
def statustest_error(self):
raise Exception()
def statustest_skip(self):
pass
statustest_skip.skip = 'Skip this'
def statustest_todo(self):
raise Exception()
statustest_todo.todo = 'This should raise an Exception.'
def statustest_todo_success(self):
pass
statustest_todo_success.todo = 'This should pass "unexpectedly".'
class TestProcessorSuccess(processors.StatusTestProcessor):
name = 'test-processor-success'
class Test(statustest.StatusTestCase):
def setUp(self, processor):
self.processor = processor
def statustest_success(self):
self.assertIsInstance(self.processor, TestProcessorSuccess)
def get_namespace(self, baton):
return dict(processor=self)
class TestSimpleStatus(unittest.TestCase):
def setUp(self):
self.runtime_environment = processing.RuntimeEnvironment()
self.package_name = __name__.rsplit('.', 1)[0]
@defer.inlineCallbacks
def test_that_test_results_are_being_processed(self):
self.runtime_environment.configuration_manager.set('pipelines', dict(
collect=['collect-batons'],
status=[
{'call-named-any': dict(name='StringIO.StringIO', output_path='stream')},
{'create-statustest-reporter': dict(arguments=dict(stream_path='stream'), processor='pipeline.collect')},
'test-processor',
'test-processor-success',
'wait-for-statustest-reporter'
]
))
self.runtime_environment.configure()
pgf = processing.ProcessorGraphFactory()
pgf.configure(self.runtime_environment)
# register our test processor before making the pipeline
pgf.plugin_manager._register_plugin(TestProcessor)
pgf.plugin_manager._register_plugin(TestProcessorSuccess)
evaluators = pgf.make_all_pipelines()
for evaluator in evaluators.values():
evaluator.configure_processors(self.runtime_environment)
# get the report creator processor
report_creator = list(evaluators['status'])[1]
report_creator.processor_dependency.on_resource_ready(evaluators['collect'].process)
report_creator.processor_dependency.fire_on_ready()
processed = yield evaluators['status'].process(dict())
# the collector processor should have collected all the test results
collector = list(evaluators['collect'])[-1]
self.assertEquals(len(collector.list), 7)
# .. and the reporter should contain the test results
reporter = processed[0]['reporter']
self.assertEquals(reporter.testsRun, 7)
self.assertEquals(reporter.successes, 2)
self.assertEquals(len(reporter.failures), 1)
self.assertEquals(len(reporter.errors), 1)
self.assertEquals(len(reporter.skips), 1)
self.assertEquals(len(reporter.expectedFailures), 1)
self.assertEquals(len(reporter.unexpectedSuccesses), 1)
@defer.inlineCallbacks
def test_without_reporter_processor(self):
self.runtime_environment.configuration_manager.set('pipelines', dict(
status=[
{'call-named-any': dict(name='StringIO.StringIO', output_path='stream')},
{'create-statustest-reporter': dict(arguments=dict(stream_path='stream'))},
'test-processor',
'test-processor-success',
{'wait-for-statustest-reporter': dict(done=True)}
]
))
self.runtime_environment.configure()
pgf = processing.ProcessorGraphFactory()
pgf.configure(self.runtime_environment)
# register our test processor before making the pipeline
pgf.plugin_manager._register_plugin(TestProcessor)
pgf.plugin_manager._register_plugin(TestProcessorSuccess)
evaluators = pgf.make_all_pipelines()
for evaluator in evaluators.values():
evaluator.configure_processors(self.runtime_environment)
processed = yield evaluators['status'].process(dict())
# the reporter should contain the test results
reporter = processed[0]['reporter']
self.assertEquals(reporter.testsRun, 7)
self.assertEquals(reporter.successes, 2)
self.assertEquals(len(reporter.failures), 1)
self.assertEquals(len(reporter.errors), 1)
self.assertEquals(len(reporter.skips), 1)
self.assertEquals(len(reporter.expectedFailures), 1)
self.assertEquals(len(reporter.unexpectedSuccesses), 1) |
drexly/openhgsenti | refs/heads/master | lib/django/contrib/gis/serializers/geojson.py | 275 | from __future__ import unicode_literals
from django.contrib.gis.gdal import HAS_GDAL
from django.core.serializers.base import (
SerializationError, SerializerDoesNotExist,
)
from django.core.serializers.json import Serializer as JSONSerializer
if HAS_GDAL:
from django.contrib.gis.gdal import CoordTransform, SpatialReference
class Serializer(JSONSerializer):
"""
Convert a queryset to GeoJSON, http://geojson.org/
"""
def _init_options(self):
super(Serializer, self)._init_options()
self.geometry_field = self.json_kwargs.pop('geometry_field', None)
self.srid = self.json_kwargs.pop('srid', 4326)
def start_serialization(self):
self._init_options()
self._cts = {} # cache of CoordTransform's
self.stream.write(
'{"type": "FeatureCollection", "crs": {"type": "name", "properties": {"name": "EPSG:%d"}},'
' "features": [' % self.srid)
def end_serialization(self):
self.stream.write(']}')
def start_object(self, obj):
super(Serializer, self).start_object(obj)
self._geometry = None
if self.geometry_field is None:
# Find the first declared geometry field
for field in obj._meta.fields:
if hasattr(field, 'geom_type'):
self.geometry_field = field.name
break
def get_dump_object(self, obj):
data = {
"type": "Feature",
"properties": self._current,
}
if self._geometry:
if self._geometry.srid != self.srid:
# If needed, transform the geometry in the srid of the global geojson srid
if not HAS_GDAL:
raise SerializationError(
'Unable to convert geometry to SRID %s when GDAL is not installed.' % self.srid
)
if self._geometry.srid not in self._cts:
srs = SpatialReference(self.srid)
self._cts[self._geometry.srid] = CoordTransform(self._geometry.srs, srs)
self._geometry.transform(self._cts[self._geometry.srid])
data["geometry"] = eval(self._geometry.geojson)
else:
data["geometry"] = None
return data
def handle_field(self, obj, field):
if field.name == self.geometry_field:
self._geometry = field.value_from_object(obj)
else:
super(Serializer, self).handle_field(obj, field)
class Deserializer(object):
def __init__(self, *args, **kwargs):
raise SerializerDoesNotExist("geojson is a serialization-only serializer")
|
breakhearts/wallstreet | refs/heads/master | wallstreet/notification/notifier.py | 1 | from mailthon import postman, email
from wallstreet import config
class Notifier(object):
def send_text(self, title, msg):
"""
:params title:title msg: raw text msg
"""
raise NotImplementedError
class EmailNotifier(Notifier):
def __init__(self, host, port, username, password, sender, receivers):
self.host = host
self.port = port
self.username = username
self.password = password
self.smtp = postman(host=self.host, port=self.port, auth=(self.username, self.password))
self.sender = sender
self.receivers = receivers
def add_receivers(self, receiver):
try:
self.receivers.index(receiver)
except:
self.receivers.append(receiver)
def send_text(self, title, msg):
self.smtp.send(email(
sender=self.sender,
subject=title,
receivers=self.receivers,
content=msg
))
email_notifier = EmailNotifier(config.get("notifier", "host"), config.get_int("notifier", "port"),
config.get("notifier", "username"), config.get("notifier", "password"),
config.get("notifier", "sender"), config.get("notifier", "receivers")) |
lumig242/Hue-Integration-with-CDAP | refs/heads/pull3 | desktop/core/src/desktop/lib/test_export_csvxls.py | 3 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import StringIO
from nose.tools import assert_equal
from openpyxl import load_workbook
from desktop.lib.export_csvxls import create_generator, make_response
def content_generator(header, data):
yield header, data
def test_export_csv():
headers = ["x", "y"]
data = [ ["1", "2"], ["3", "4"], ["5,6", "7"], [None, None] ]
# Check CSV
generator = create_generator(content_generator(headers, data), "csv")
response = make_response(generator, "csv", "foo")
assert_equal("application/csv", response["content-type"])
content = ''.join(response.streaming_content)
assert_equal('x,y\r\n1,2\r\n3,4\r\n"5,6",7\r\nNULL,NULL\r\n', content)
assert_equal("attachment; filename=foo.csv", response["content-disposition"])
def test_export_xls():
headers = ["x", "y"]
data = [ ["1", "2"], ["3", "4"], ["5,6", "7"], [None, None] ]
sheet = [headers] + data
# Check XLS
generator = create_generator(content_generator(headers, data), "xls")
response = make_response(generator, "xls", "foo")
assert_equal("application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", response["content-type"])
expected_data = [[cell is not None and cell or "NULL" for cell in row] for row in sheet]
sheet_data = _read_xls_sheet_data(response)
assert_equal(expected_data, sheet_data)
assert_equal("attachment; filename=foo.xlsx", response["content-disposition"])
def _read_xls_sheet_data(response):
content = ''.join(response.streaming_content)
data = StringIO.StringIO()
data.write(content)
wb = load_workbook(filename=data, read_only=True)
ws = wb.active
return [[cell.value if cell else cell for cell in row] for row in ws.rows]
|
tgavankar/PlaydohSlideSync | refs/heads/master | bin/update_site.py | 32 | #!/usr/bin/env python
"""
Usage: update_site.py [options]
Updates a server's sources, vendor libraries, packages CSS/JS
assets, migrates the database, and other nifty deployment tasks.
Options:
-h, --help show this help message and exit
-e ENVIRONMENT, --environment=ENVIRONMENT
Type of environment. One of (prod|dev|stage) Example:
update_site.py -e stage
-v, --verbose Echo actions before taking them.
"""
import os
import sys
from textwrap import dedent
from optparse import OptionParser
from hashlib import md5
# Constants
PROJECT = 0
VENDOR = 1
ENV_BRANCH = {
# 'environment': [PROJECT_BRANCH, VENDOR_BRANCH],
'dev': ['base', 'master'],
'stage': ['master', 'master'],
'prod': ['prod', 'master'],
}
# The URL of the SVN repository with the localization files (*.po). If you set
# it to a non-empty value, remember to `git rm --cached -r locale` in the root
# of the project. Example:
# LOCALE_REPO_URL = 'https://svn.mozilla.org/projects/l10n-misc/trunk/playdoh/locale'
LOCALE_REPO_URL = ''
GIT_PULL = "git pull -q origin %(branch)s"
GIT_SUBMODULE = "git submodule update --init"
SVN_CO = "svn checkout --force %(url)s locale"
SVN_UP = "svn update"
COMPILE_MO = "./bin/compile-mo.sh %(localedir)s %(unique)s"
EXEC = 'exec'
CHDIR = 'chdir'
def update_site(env, debug):
"""Run through commands to update this site."""
error_updating = False
here = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
locale = os.path.join(here, 'locale')
unique = md5(locale).hexdigest()
project_branch = {'branch': ENV_BRANCH[env][PROJECT]}
vendor_branch = {'branch': ENV_BRANCH[env][VENDOR]}
commands = [
(CHDIR, here),
(EXEC, GIT_PULL % project_branch),
(EXEC, GIT_SUBMODULE),
]
# Checkout the locale repo into locale/ if the URL is known
if LOCALE_REPO_URL and not os.path.exists(os.path.join(locale, '.svn')):
commands += [
(EXEC, SVN_CO % {'url': LOCALE_REPO_URL}),
(EXEC, COMPILE_MO % {'localedir': locale, 'unique': unique}),
]
# Update locale dir if applicable
if os.path.exists(os.path.join(locale, '.svn')):
commands += [
(CHDIR, locale),
(EXEC, SVN_UP),
(CHDIR, here),
(EXEC, COMPILE_MO % {'localedir': locale, 'unique': unique}),
]
elif os.path.exists(os.path.join(locale, '.git')):
commands += [
(CHDIR, locale),
(EXEC, GIT_PULL % 'master'),
(CHDIR, here),
]
commands += [
(CHDIR, os.path.join(here, 'vendor')),
(EXEC, GIT_PULL % vendor_branch),
(EXEC, GIT_SUBMODULE),
(CHDIR, os.path.join(here)),
(EXEC, 'python2.6 vendor/src/schematic/schematic migrations/'),
(EXEC, 'python2.6 manage.py collectstatic --noinput'),
# un-comment if you haven't moved to django-compressor yet
#(EXEC, 'python2.6 manage.py compress_assets'),
]
for cmd, cmd_args in commands:
if CHDIR == cmd:
if debug:
sys.stdout.write("cd %s\n" % cmd_args)
os.chdir(cmd_args)
elif EXEC == cmd:
if debug:
sys.stdout.write("%s\n" % cmd_args)
if not 0 == os.system(cmd_args):
error_updating = True
break
else:
raise Exception("Unknown type of command %s" % cmd)
if error_updating:
sys.stderr.write("There was an error while updating. Please try again "
"later. Aborting.\n")
def main():
""" Handels command line args. """
debug = False
usage = dedent("""\
%prog [options]
Updates a server's sources, vendor libraries, packages CSS/JS
assets, migrates the database, and other nifty deployment tasks.
""".rstrip())
options = OptionParser(usage=usage)
e_help = "Type of environment. One of (%s) Example: update_site.py \
-e stage" % '|'.join(ENV_BRANCH.keys())
options.add_option("-e", "--environment", help=e_help)
options.add_option("-v", "--verbose",
help="Echo actions before taking them.",
action="store_true", dest="verbose")
(opts, _) = options.parse_args()
if opts.verbose:
debug = True
if opts.environment in ENV_BRANCH.keys():
update_site(opts.environment, debug)
else:
sys.stderr.write("Invalid environment!\n")
options.print_help(sys.stderr)
sys.exit(1)
if __name__ == '__main__':
main()
|
dnozay/lettuce | refs/heads/master | lettuce/django/steps/mail.py | 20 | """
Step definitions for working with Django email.
"""
from smtplib import SMTPException
from django.core import mail
from lettuce import step
STEP_PREFIX = r'(?:Given|And|Then|When) '
CHECK_PREFIX = r'(?:And|Then) '
EMAIL_PARTS = ('subject', 'body', 'from_email', 'to', 'bcc', 'cc')
GOOD_MAIL = mail.EmailMessage.send
@step(CHECK_PREFIX + r'I have sent (\d+) emails?')
def mail_sent_count(step, count):
"""
Then I have sent 2 emails
"""
count = int(count)
assert len(mail.outbox) == count, "Length of outbox is {0}".format(count)
@step(r'I have not sent any emails')
def mail_not_sent(step):
"""
I have not sent any emails
"""
return mail_sent_count(step, 0)
@step(CHECK_PREFIX + (r'I have sent an email with "([^"]*)" in the ({0})'
'').format('|'.join(EMAIL_PARTS)))
def mail_sent_content(step, text, part):
"""
Then I have sent an email with "pandas" in the body
"""
assert any(text in getattr(email, part)
for email
in mail.outbox
), "An email contained expected text in the {0}".format(part)
@step(CHECK_PREFIX + r'I have sent an email with the following in the body:')
def mail_sent_content_multiline(step):
"""
I have sent an email with the following in the body:
\"""
Name: Mr. Panda
\"""
"""
return mail_sent_content(step, step.multiline, 'body')
@step(STEP_PREFIX + r'I clear my email outbox')
def mail_clear(step):
"""
I clear my email outbox
"""
mail.EmailMessage.send = GOOD_MAIL
mail.outbox = []
def broken_send(*args, **kwargs):
"""
Broken send function for email_broken step
"""
raise SMTPException("Failure mocked by lettuce")
@step(STEP_PREFIX + r'sending email does not work')
def email_broken(step):
"""
Break email sending
"""
mail.EmailMessage.send = broken_send
|
boberfly/gaffer | refs/heads/master | python/GafferSceneUI/ShaderAssignmentUI.py | 11 | ##########################################################################
#
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import Gaffer
import GafferUI
import GafferScene
import GafferSceneUI
Gaffer.Metadata.registerNode(
GafferScene.ShaderAssignment,
"description",
"""
Assigns shaders to objects.
""",
plugs = {
"shader" : [
"description",
"""
The shader to be assigned.
""",
"noduleLayout:section", "left",
"nodule:type", "GafferUI::StandardNodule",
]
}
)
|
nubbel/swift-tensorflow | refs/heads/master | PythonGenerated/tensorflow/core/example/example_pb2.py | 1 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tensorflow/core/example/example.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from tensorflow.core.example import feature_pb2 as tensorflow_dot_core_dot_example_dot_feature__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='tensorflow/core/example/example.proto',
package='tensorflow',
syntax='proto3',
serialized_pb=_b('\n%tensorflow/core/example/example.proto\x12\ntensorflow\x1a%tensorflow/core/example/feature.proto\"1\n\x07\x45xample\x12&\n\x08\x66\x65\x61tures\x18\x01 \x01(\x0b\x32\x14.tensorflow.Features\"i\n\x0fSequenceExample\x12%\n\x07\x63ontext\x18\x01 \x01(\x0b\x32\x14.tensorflow.Features\x12/\n\rfeature_lists\x18\x02 \x01(\x0b\x32\x18.tensorflow.FeatureListsB,\n\x16org.tensorflow.exampleB\rExampleProtosP\x01\xf8\x01\x01\x62\x06proto3')
,
dependencies=[tensorflow_dot_core_dot_example_dot_feature__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_EXAMPLE = _descriptor.Descriptor(
name='Example',
full_name='tensorflow.Example',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='features', full_name='tensorflow.Example.features', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=92,
serialized_end=141,
)
_SEQUENCEEXAMPLE = _descriptor.Descriptor(
name='SequenceExample',
full_name='tensorflow.SequenceExample',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='context', full_name='tensorflow.SequenceExample.context', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='feature_lists', full_name='tensorflow.SequenceExample.feature_lists', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=143,
serialized_end=248,
)
_EXAMPLE.fields_by_name['features'].message_type = tensorflow_dot_core_dot_example_dot_feature__pb2._FEATURES
_SEQUENCEEXAMPLE.fields_by_name['context'].message_type = tensorflow_dot_core_dot_example_dot_feature__pb2._FEATURES
_SEQUENCEEXAMPLE.fields_by_name['feature_lists'].message_type = tensorflow_dot_core_dot_example_dot_feature__pb2._FEATURELISTS
DESCRIPTOR.message_types_by_name['Example'] = _EXAMPLE
DESCRIPTOR.message_types_by_name['SequenceExample'] = _SEQUENCEEXAMPLE
Example = _reflection.GeneratedProtocolMessageType('Example', (_message.Message,), dict(
DESCRIPTOR = _EXAMPLE,
__module__ = 'tensorflow.core.example.example_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.Example)
))
_sym_db.RegisterMessage(Example)
SequenceExample = _reflection.GeneratedProtocolMessageType('SequenceExample', (_message.Message,), dict(
DESCRIPTOR = _SEQUENCEEXAMPLE,
__module__ = 'tensorflow.core.example.example_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.SequenceExample)
))
_sym_db.RegisterMessage(SequenceExample)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\026org.tensorflow.exampleB\rExampleProtosP\001\370\001\001'))
try:
# THESE ELEMENTS WILL BE DEPRECATED.
# Please use the generated *_pb2_grpc.py files instead.
import grpc
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
from grpc.beta import implementations as beta_implementations
from grpc.beta import interfaces as beta_interfaces
except ImportError:
pass
# @@protoc_insertion_point(module_scope)
|
chugunovyar/factoryForBuild | refs/heads/master | env/lib/python2.7/site-packages/scipy/weave/numpy_scalar_spec.py | 100 | """ Converters for all of NumPy's scalar types such as
int32, float32, complex128, etc.
"""
from __future__ import absolute_import, print_function
import numpy
from . import c_spec
class numpy_complex_scalar_converter(c_spec.complex_converter):
""" Handles conversion of all the NumPy complex types.
This uses the same machinery as the standard python
complex converter.
"""
def init_info(self):
# First, set up all the same specifications the normal
# complex converter uses.
c_spec.complex_converter.init_info(self)
# But set this converter up to match the numpy complex
# types.
self.matching_types = numpy.sctypes['complex']
|
xpol/gyp | refs/heads/master | test/subdirectory/gyptest-SYMROOT-all.py | 102 | #!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies building a target and a subsidiary dependent target from a
.gyp file in a subdirectory, without specifying an explicit output build
directory, and using the generated solution or project file at the top
of the tree as the entry point.
The configuration sets the Xcode SYMROOT variable and uses --depth=
to make Xcode behave like the other build tools--that is, put all
built targets in a single output build directory at the top of the tree.
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('prog1.gyp', '-Dset_symroot=1', '--depth=.', chdir='src')
test.relocate('src', 'relocate/src')
# Suppress the test infrastructure's setting SYMROOT on the command line.
test.build('prog1.gyp', test.ALL, SYMROOT=None, chdir='relocate/src')
test.run_built_executable('prog1',
stdout="Hello from prog1.c\n",
chdir='relocate/src')
test.run_built_executable('prog2',
stdout="Hello from prog2.c\n",
chdir='relocate/src')
test.pass_test()
|
xaroth8088/tournament-of-lulz | refs/heads/master | servers/python/tournament_of_lulz/modules/top_images/model_top_images.py | 1 | from tournament_of_lulz.database.database import fetchall
from tournament_of_lulz.modules.image.model_image import ModelImage
class ModelTopImages():
def __init__(self, db_connection):
self.db_connection = db_connection
self.top_images = []
def load_top_images(self, start, limit):
self.top_images = []
sql = (
"SELECT image_id, image_url_hash, image_url, page_url, thumbnail_url, title, rating, rd, volatility "
"FROM images "
"ORDER BY rating DESC "
"LIMIT %(start)s, %(limit)s"
)
params = {
'start': start,
'limit': limit
}
data = fetchall(self.db_connection, sql, params)
for row in data:
image = ModelImage()
image.init_with_db_row(row)
self.top_images.append(image)
|
ycaihua/kbengine | refs/heads/master | kbe/res/scripts/common/Lib/test/test_json/test_fail.py | 84 | from test.test_json import PyTest, CTest
import re
# 2007-10-05
JSONDOCS = [
# http://json.org/JSON_checker/test/fail1.json
'"A JSON payload should be an object or array, not a string."',
# http://json.org/JSON_checker/test/fail2.json
'["Unclosed array"',
# http://json.org/JSON_checker/test/fail3.json
'{unquoted_key: "keys must be quoted"}',
# http://json.org/JSON_checker/test/fail4.json
'["extra comma",]',
# http://json.org/JSON_checker/test/fail5.json
'["double extra comma",,]',
# http://json.org/JSON_checker/test/fail6.json
'[ , "<-- missing value"]',
# http://json.org/JSON_checker/test/fail7.json
'["Comma after the close"],',
# http://json.org/JSON_checker/test/fail8.json
'["Extra close"]]',
# http://json.org/JSON_checker/test/fail9.json
'{"Extra comma": true,}',
# http://json.org/JSON_checker/test/fail10.json
'{"Extra value after close": true} "misplaced quoted value"',
# http://json.org/JSON_checker/test/fail11.json
'{"Illegal expression": 1 + 2}',
# http://json.org/JSON_checker/test/fail12.json
'{"Illegal invocation": alert()}',
# http://json.org/JSON_checker/test/fail13.json
'{"Numbers cannot have leading zeroes": 013}',
# http://json.org/JSON_checker/test/fail14.json
'{"Numbers cannot be hex": 0x14}',
# http://json.org/JSON_checker/test/fail15.json
'["Illegal backslash escape: \\x15"]',
# http://json.org/JSON_checker/test/fail16.json
'[\\naked]',
# http://json.org/JSON_checker/test/fail17.json
'["Illegal backslash escape: \\017"]',
# http://json.org/JSON_checker/test/fail18.json
'[[[[[[[[[[[[[[[[[[[["Too deep"]]]]]]]]]]]]]]]]]]]]',
# http://json.org/JSON_checker/test/fail19.json
'{"Missing colon" null}',
# http://json.org/JSON_checker/test/fail20.json
'{"Double colon":: null}',
# http://json.org/JSON_checker/test/fail21.json
'{"Comma instead of colon", null}',
# http://json.org/JSON_checker/test/fail22.json
'["Colon instead of comma": false]',
# http://json.org/JSON_checker/test/fail23.json
'["Bad value", truth]',
# http://json.org/JSON_checker/test/fail24.json
"['single quote']",
# http://json.org/JSON_checker/test/fail25.json
'["\ttab\tcharacter\tin\tstring\t"]',
# http://json.org/JSON_checker/test/fail26.json
'["tab\\ character\\ in\\ string\\ "]',
# http://json.org/JSON_checker/test/fail27.json
'["line\nbreak"]',
# http://json.org/JSON_checker/test/fail28.json
'["line\\\nbreak"]',
# http://json.org/JSON_checker/test/fail29.json
'[0e]',
# http://json.org/JSON_checker/test/fail30.json
'[0e+]',
# http://json.org/JSON_checker/test/fail31.json
'[0e+-1]',
# http://json.org/JSON_checker/test/fail32.json
'{"Comma instead if closing brace": true,',
# http://json.org/JSON_checker/test/fail33.json
'["mismatch"}',
# http://code.google.com/p/simplejson/issues/detail?id=3
'["A\u001FZ control characters in string"]',
]
SKIPS = {
1: "why not have a string payload?",
18: "spec doesn't specify any nesting limitations",
}
class TestFail:
def test_failures(self):
for idx, doc in enumerate(JSONDOCS):
idx = idx + 1
if idx in SKIPS:
self.loads(doc)
continue
try:
self.loads(doc)
except ValueError:
pass
else:
self.fail("Expected failure for fail{0}.json: {1!r}".format(idx, doc))
def test_non_string_keys_dict(self):
data = {'a' : 1, (1, 2) : 2}
#This is for c encoder
self.assertRaises(TypeError, self.dumps, data)
#This is for python encoder
self.assertRaises(TypeError, self.dumps, data, indent=True)
def test_truncated_input(self):
test_cases = [
('', 'Expecting value', 0),
('[', 'Expecting value', 1),
('[42', "Expecting ',' delimiter", 3),
('[42,', 'Expecting value', 4),
('["', 'Unterminated string starting at', 1),
('["spam', 'Unterminated string starting at', 1),
('["spam"', "Expecting ',' delimiter", 7),
('["spam",', 'Expecting value', 8),
('{', 'Expecting property name enclosed in double quotes', 1),
('{"', 'Unterminated string starting at', 1),
('{"spam', 'Unterminated string starting at', 1),
('{"spam"', "Expecting ':' delimiter", 7),
('{"spam":', 'Expecting value', 8),
('{"spam":42', "Expecting ',' delimiter", 10),
('{"spam":42,', 'Expecting property name enclosed in double quotes', 11),
]
test_cases += [
('"', 'Unterminated string starting at', 0),
('"spam', 'Unterminated string starting at', 0),
]
for data, msg, idx in test_cases:
self.assertRaisesRegex(ValueError,
r'^{0}: line 1 column {1} \(char {2}\)'.format(
re.escape(msg), idx + 1, idx),
self.loads, data)
def test_unexpected_data(self):
test_cases = [
('[,', 'Expecting value', 1),
('{"spam":[}', 'Expecting value', 9),
('[42:', "Expecting ',' delimiter", 3),
('[42 "spam"', "Expecting ',' delimiter", 4),
('[42,]', 'Expecting value', 4),
('{"spam":[42}', "Expecting ',' delimiter", 11),
('["]', 'Unterminated string starting at', 1),
('["spam":', "Expecting ',' delimiter", 7),
('["spam",]', 'Expecting value', 8),
('{:', 'Expecting property name enclosed in double quotes', 1),
('{,', 'Expecting property name enclosed in double quotes', 1),
('{42', 'Expecting property name enclosed in double quotes', 1),
('[{]', 'Expecting property name enclosed in double quotes', 2),
('{"spam",', "Expecting ':' delimiter", 7),
('{"spam"}', "Expecting ':' delimiter", 7),
('[{"spam"]', "Expecting ':' delimiter", 8),
('{"spam":}', 'Expecting value', 8),
('[{"spam":]', 'Expecting value', 9),
('{"spam":42 "ham"', "Expecting ',' delimiter", 11),
('[{"spam":42]', "Expecting ',' delimiter", 11),
('{"spam":42,}', 'Expecting property name enclosed in double quotes', 11),
]
for data, msg, idx in test_cases:
self.assertRaisesRegex(ValueError,
r'^{0}: line 1 column {1} \(char {2}\)'.format(
re.escape(msg), idx + 1, idx),
self.loads, data)
def test_extra_data(self):
test_cases = [
('[]]', 'Extra data', 2),
('{}}', 'Extra data', 2),
('[],[]', 'Extra data', 2),
('{},{}', 'Extra data', 2),
]
test_cases += [
('42,"spam"', 'Extra data', 2),
('"spam",42', 'Extra data', 6),
]
for data, msg, idx in test_cases:
self.assertRaisesRegex(ValueError,
r'^{0}: line 1 column {1} - line 1 column {2}'
r' \(char {3} - {4}\)'.format(
re.escape(msg), idx + 1, len(data) + 1, idx, len(data)),
self.loads, data)
def test_linecol(self):
test_cases = [
('!', 1, 1, 0),
(' !', 1, 2, 1),
('\n!', 2, 1, 1),
('\n \n\n !', 4, 6, 10),
]
for data, line, col, idx in test_cases:
self.assertRaisesRegex(ValueError,
r'^Expecting value: line {0} column {1}'
r' \(char {2}\)$'.format(line, col, idx),
self.loads, data)
class TestPyFail(TestFail, PyTest): pass
class TestCFail(TestFail, CTest): pass
|
Narcolapser/PyGameLearningByDoing | refs/heads/master | Old PyGame stuff/pong/pong.py | 1 | ####################################################################################################
# Name: Pygame Pong Experiement
# Purpose: Make a simple pygame game to get a handle on PyGame
# Date: 2014/02/26
# Programmer: Toben "Littlefoo" "Narcolapser" Archer
# Version: 0.1
####################################################################################################
import sys, pygame
from pygame.locals import *
from random import randint
import math
class Paddle:
def __init__(self,x,y):
self.x = x
self.y = y
self.rect = pygame.Rect(x-8,y-30,16,60)
def draw(self,surface):
white = pygame.Color(255,255,255)
pygame.draw.rect(surface,white,self.rect,0)
def move(self,direction):
if self.y > 30 and direction < 0:
self.rect.move_ip(0,direction)
self.y += direction
elif self.y < 450 and direction > 0:
self.rect.move_ip(0,direction)
self.y += direction
class Puck:
def __init__(self,x,y,sx,sy):
self.size = 5
self.x = x
self.y = y
self.sx = sx
self.sy = sy
def draw(self,surface):
white = pygame.Color(255,255,255)
pygame.draw.circle(surface,white,(int(self.x),int(self.y)),self.size,0)
def move(self):
if self.y > 475:
self.y = 475 - (self.y - 475)
self.sy *= -1
if self.y < 5:
self.y = self.y + 5
self.sy *= -1
if self.x > 635:
return 1
if self.x < 5:
return -1
self.x += self.sx
self.y += self.sy
return 0
def puckCollidesPaddle(self,paddle):
if abs(self.x - paddle.x) < 5:
if abs(self.y - paddle.y) < 55:
self.sx *= -1.1
self.sy += (self.y - paddle.y)/50.0
self.x += self.sx
pygame.init()
fps = pygame.time.Clock()
pygame.key.set_repeat(5,5)
window = pygame.display.set_mode((640,480))
pygame.display.set_caption('Beep boop use w and s and up arrow and down arrow')
leftPaddle = Paddle(10,240)
rightPaddle = Paddle(630,240)
puck = Puck(320,240,2,2)
speed = 120
quit = False
while not quit:
# events = pygame.event.get()
pygame.event.pump()
keys = pygame.key.get_pressed()
window.fill((0,0,0))
if keys[K_q]:
quit = True
if keys[K_s]:
leftPaddle.move(5)
if keys[K_w]:
leftPaddle.move(-5)
if keys[K_DOWN]:
rightPaddle.move(5)
if keys[K_UP]:
rightPaddle.move(-5)
scored = puck.move()
if scored != 0:
if scored == 1:
print "Left wins."
if scored == -1:
print "AWH YEA RIGHT! YOU ARE AWESOME!"
quit = True
puck.puckCollidesPaddle(leftPaddle)
puck.puckCollidesPaddle(rightPaddle)
leftPaddle.draw(window)
rightPaddle.draw(window)
puck.draw(window)
pygame.display.update()
fps.tick(speed)
# quit = snakeBitten or snakeCrashed or quit
#print "you ate:",snakeLength-3,"apples!"
#if randint(0,100)>95:
# print "big question here: do snakes eat apples?"
|
nawarian/PHPBot | refs/heads/master | ext/pyautogui/bin/build/bdist.win-amd64/winexe/temp/_hashlib.py | 29 |
def __load():
import imp, os, sys
try:
dirname = os.path.dirname(__loader__.archive)
except NameError:
dirname = sys.prefix
path = os.path.join(dirname, '_hashlib.pyd')
#print "py2exe extension module", __name__, "->", path
mod = imp.load_dynamic(__name__, path)
## mod.frozen = 1
__load()
del __load
|
blakerouse/python-libmaas | refs/heads/master | maas/client/bones/tests/test_helpers.py | 3 | """Tests for `maas.client.bones.helpers`."""
import json
from unittest.mock import Mock
from urllib.parse import urlparse, urlsplit
import aiohttp.web
from macaroonbakery.httpbakery import Client
from testtools import ExpectedException
from testtools.matchers import Equals, Is, IsInstance, MatchesDict
from .. import helpers, testing
from ...testing import AsyncCallableMock, make_name, make_name_without_spaces, TestCase
from ...utils import api_url, profiles
from ...utils.testing import make_Credentials
from ..testing import api_descriptions
from ..testing.server import ApplicationBuilder
class TestFetchAPIDescription(TestCase):
"""Tests for `fetch_api_description`."""
def test__raises_RemoteError_when_request_fails(self):
fixture = self.useFixture(testing.DescriptionServer(b"bogus"))
error = self.assertRaises(
helpers.RemoteError,
self.loop.run_until_complete,
helpers.fetch_api_description(fixture.url + "bogus/"),
)
self.assertEqual(fixture.url + "bogus/ -> 404 Not Found", str(error))
def test__raises_RemoteError_when_content_not_json(self):
fixture = self.useFixture(testing.DescriptionServer())
fixture.handler.content_type = "text/json"
error = self.assertRaises(
helpers.RemoteError,
self.loop.run_until_complete,
helpers.fetch_api_description(fixture.url),
)
self.assertEqual("Expected application/json, got: text/json", str(error))
class TestFetchAPIDescriptionURLs(TestCase):
"""Tests for URL types accepted by `fetch_api_description`."""
scenarios = (
("string", dict(prepare=str)),
("split", dict(prepare=urlsplit)),
("parsed", dict(prepare=urlparse)),
)
def test__accepts_prepared_url(self):
description = {"foo": make_name_without_spaces("bar")}
description_json = json.dumps(description).encode("ascii")
fixture = self.useFixture(testing.DescriptionServer(description_json))
description_url = self.prepare(fixture.url) # Parse, perhaps.
description_fetched = self.loop.run_until_complete(
helpers.fetch_api_description(description_url)
)
self.assertThat(description_fetched, Equals(description))
class TestFetchAPIDescription_APIVersions(TestCase):
"""Tests for `fetch_api_description` with multiple API versions."""
scenarios = tuple(
(name, dict(version=version, path=path))
for name, version, path in testing.list_api_descriptions()
)
def test__downloads_description(self):
description = self.path.read_bytes()
fixture = self.useFixture(testing.DescriptionServer(description))
description_fetched = self.loop.run_until_complete(
helpers.fetch_api_description(fixture.url)
)
self.assertThat(
description_fetched, Equals(json.loads(description.decode("utf-8")))
)
class TestConnect(TestCase):
"""Tests for `maas.client.utils.connect.connect`."""
def setUp(self):
super(TestConnect, self).setUp()
self.patch(helpers, "fetch_api_description", AsyncCallableMock(return_value={}))
def test__anonymous(self):
# Connect without an apikey.
profile = helpers.connect("http://example.org:5240/MAAS/")
helpers.fetch_api_description.assert_called_once_with(
urlparse("http://example.org:5240/MAAS/api/2.0/"), False
)
# A Profile instance was returned with no credentials.
self.assertThat(profile, IsInstance(profiles.Profile))
self.assertThat(profile.credentials, Is(None))
def test__connected_when_apikey_provided(self):
credentials = make_Credentials()
# Connect with an apikey.
profile = helpers.connect(
"http://example.org:5240/MAAS/", apikey=str(credentials)
)
# The description was fetched.
helpers.fetch_api_description.assert_called_once_with(
urlparse("http://example.org:5240/MAAS/api/2.0/"), False
)
# A Profile instance was returned with the expected credentials.
self.assertThat(profile, IsInstance(profiles.Profile))
self.assertThat(profile.credentials, Equals(credentials))
def test__complains_when_username_in_URL(self):
self.assertRaises(
helpers.ConnectError,
helpers.connect,
"http://foo:[email protected]:5240/MAAS/",
)
def test__complains_when_password_in_URL(self):
self.assertRaises(
helpers.ConnectError, helpers.connect, "http://:[email protected]:5240/MAAS/"
)
def test__URL_is_normalised_to_point_at_API_endpoint(self):
profile = helpers.connect("http://example.org:5240/MAAS/")
self.assertThat(profile.url, Equals(api_url("http://example.org:5240/MAAS/")))
def test__profile_is_given_default_name_based_on_URL(self):
domain = make_name_without_spaces("domain")
profile = helpers.connect("http://%s/MAAS/" % domain)
self.assertThat(profile.name, Equals(domain))
def test__API_description_is_saved_in_profile(self):
description = helpers.fetch_api_description.return_value = {"foo": "bar"}
profile = helpers.connect("http://example.org:5240/MAAS/")
self.assertThat(profile.description, Equals(description))
def test__API_description_is_fetched_insecurely_if_requested(self):
profile = helpers.connect("http://example.org:5240/MAAS/", insecure=True)
helpers.fetch_api_description.assert_called_once_with(
urlparse("http://example.org:5240/MAAS/api/2.0/"), True
)
self.assertTrue(profile.other["insecure"])
class TestLogin(TestCase):
"""Tests for `maas.client.utils.login.login`."""
def setUp(self):
super(TestLogin, self).setUp()
self.patch(helpers, "authenticate", AsyncCallableMock(return_value=None))
self.patch(helpers, "fetch_api_description", AsyncCallableMock(return_value={}))
def test__anonymous(self):
# Log-in anonymously.
profile = helpers.login("http://example.org:5240/MAAS/", anonymous=True)
# No token was obtained, but the description was fetched.
helpers.authenticate.assert_not_called()
# A Profile instance was returned with no credentials.
self.assertThat(profile, IsInstance(profiles.Profile))
self.assertThat(profile.credentials, Is(None))
def test__macaroon_auth_with_no_username_and_password(self):
credentials = make_Credentials()
self.patch(
helpers,
"authenticate_with_macaroon",
AsyncCallableMock(return_value=credentials),
)
# Log-in without a user-name or a password.
profile = helpers.login("http://example.org:5240/MAAS/")
# A token is obtained via macaroons, but the description was fetched.
# The description was fetched.
helpers.fetch_api_description.assert_called_once_with(
urlparse("http://example.org:5240/MAAS/api/2.0/"), False
)
# The returned profile uses credentials obtained from the
# authentication
self.assertThat(profile, IsInstance(profiles.Profile))
self.assertThat(profile.credentials, Is(credentials))
def test__authenticated_when_username_and_password_provided(self):
credentials = make_Credentials()
helpers.authenticate.return_value = credentials
# Log-in with a user-name and a password.
profile = helpers.login("http://foo:[email protected]:5240/MAAS/")
# A token was obtained, and the description was fetched.
helpers.authenticate.assert_called_once_with(
"http://example.org:5240/MAAS/api/2.0/", "foo", "bar", insecure=False
)
# A Profile instance was returned with the expected credentials.
self.assertThat(profile, IsInstance(profiles.Profile))
self.assertThat(profile.credentials, Is(credentials))
def test__complains_when_username_but_not_password(self):
self.assertRaises(
helpers.UsernameWithoutPassword,
helpers.login,
"http://example.org:5240/MAAS/",
username="alice",
)
def test__complains_when_password_but_not_username(self):
self.assertRaises(
helpers.PasswordWithoutUsername,
helpers.login,
"http://example.org:5240/MAAS/",
password="wonderland",
)
def test__complains_when_username_in_URL_and_passed_explicitly(self):
self.assertRaises(
helpers.LoginError,
helpers.login,
"http://foo:[email protected]:5240/MAAS/",
username="alice",
)
def test__complains_when_empty_username_in_URL_and_passed_explicitly(self):
self.assertRaises(
helpers.LoginError,
helpers.login,
"http://:[email protected]:5240/MAAS/",
username="alice",
)
def test__complains_when_password_in_URL_and_passed_explicitly(self):
self.assertRaises(
helpers.LoginError,
helpers.login,
"http://foo:[email protected]:5240/MAAS/",
password="wonderland",
)
def test__complains_when_empty_password_in_URL_and_passed_explicitly(self):
self.assertRaises(
helpers.LoginError,
helpers.login,
"http://foo:@example.org:5240/MAAS/",
password="wonderland",
)
def test__URL_is_normalised_to_point_at_API_endpoint(self):
profile = helpers.login("http://example.org:5240/MAAS/", anonymous=True)
self.assertThat(profile.url, Equals(api_url("http://example.org:5240/MAAS/")))
def test__profile_is_given_default_name_based_on_URL(self):
domain = make_name_without_spaces("domain")
profile = helpers.login("http://%s/MAAS/" % domain, anonymous=True)
self.assertThat(profile.name, Equals(domain))
def test__API_description_is_saved_in_profile(self):
description = {make_name("key"): make_name("value")}
helpers.fetch_api_description.return_value = description
profile = helpers.login("http://example.org:5240/MAAS/", anonymous=True)
self.assertThat(profile.description, Equals(description))
def test__API_token_is_fetched_insecurely_if_requested(self):
profile = helpers.login("http://foo:[email protected]:5240/MAAS/", insecure=True)
helpers.authenticate.assert_called_once_with(
"http://example.org:5240/MAAS/api/2.0/", "foo", "bar", insecure=True
)
self.assertTrue(profile.other["insecure"])
def test__API_description_is_fetched_insecurely_if_requested(self):
helpers.login("http://example.org:5240/MAAS/", anonymous=True, insecure=True)
helpers.fetch_api_description.assert_called_once_with(
urlparse("http://example.org:5240/MAAS/api/2.0/"), True
)
def test__uses_username_from_URL_if_set(self):
helpers.login("http://[email protected]/", password="bar")
helpers.authenticate.assert_called_once_with(
"http://maas.io/api/2.0/", "foo", "bar", insecure=False
)
def test__uses_username_and_password_from_URL_if_set(self):
helpers.login("http://foo:[email protected]/")
helpers.authenticate.assert_called_once_with(
"http://maas.io/api/2.0/", "foo", "bar", insecure=False
)
def test__uses_empty_username_and_password_in_URL_if_set(self):
helpers.login("http://:@maas.io/")
helpers.authenticate.assert_called_once_with(
"http://maas.io/api/2.0/", "", "", insecure=False
)
class TestAuthenticate(TestCase):
"""Tests for `authenticate`."""
scenarios = tuple(
(name, dict(version=version, description=description))
for name, version, description in api_descriptions
)
async def test__obtains_credentials_from_server(self):
builder = ApplicationBuilder(self.description)
@builder.handle("anon:Version.read")
async def version(request):
return {"capabilities": ["authenticate-api"]}
credentials = make_Credentials()
parameters = None
@builder.route("POST", "/accounts/authenticate/")
async def deploy(request):
nonlocal parameters
parameters = await request.post()
return {
"consumer_key": credentials.consumer_key,
"token_key": credentials.token_key,
"token_secret": credentials.token_secret,
}
username = make_name_without_spaces("username")
password = make_name_without_spaces("password")
async with builder.serve() as baseurl:
credentials_observed = await helpers.authenticate(
baseurl, username, password
)
self.assertThat(credentials_observed, Equals(credentials))
self.assertThat(
parameters,
MatchesDict(
{
"username": Equals(username),
"password": Equals(password),
"consumer": IsInstance(str),
}
),
)
async def test__raises_error_when_server_does_not_support_authn(self):
builder = ApplicationBuilder(self.description)
@builder.handle("anon:Version.read")
async def version(request):
return {"capabilities": []}
async with builder.serve() as baseurl:
with ExpectedException(helpers.LoginNotSupported):
await helpers.authenticate(baseurl, "username", "password")
async def test__raises_error_when_server_rejects_credentials(self):
builder = ApplicationBuilder(self.description)
@builder.handle("anon:Version.read")
async def version(request):
return {"capabilities": ["authenticate-api"]}
@builder.route("POST", "/accounts/authenticate/")
async def deploy(request):
raise aiohttp.web.HTTPForbidden()
async with builder.serve() as baseurl:
with ExpectedException(helpers.RemoteError):
await helpers.authenticate(baseurl, "username", "password")
class TestAuthenticateWithMacaroon(TestCase):
def setUp(self):
super().setUp()
self.mock_client_request = self.patch(Client, "request")
self.token_result = {
"consumer_key": "abc",
"token_key": "123",
"token_secret": "xyz",
}
self.mock_response = Mock()
self.mock_response.status_code = 200
self.mock_response.json.return_value = self.token_result
self.mock_client_request.return_value = self.mock_response
async def test__authenticate_with_bakery_creates_token(self):
credentials = await helpers.authenticate_with_macaroon("http://example.com")
self.assertEqual(credentials, "abc:123:xyz")
# a call to create an API token is made
self.mock_client_request.assert_called_once_with(
"POST",
"http://example.com/account/?op=create_authorisation_token",
verify=True,
)
async def test__authenticate_failed_request(self):
self.mock_response.status_code = 500
self.mock_response.text = "error!"
try:
await helpers.authenticate_with_macaroon("http://example.com")
except helpers.LoginError as e:
self.assertEqual(str(e), "Login failed: error!")
else:
self.fail("LoginError not raised")
async def test__authenticate_macaroon_not_supported(self):
self.mock_response.status_code = 401
try:
await helpers.authenticate_with_macaroon("http://example.com")
except helpers.MacaroonLoginNotSupported as e:
self.assertEqual(str(e), "Macaroon authentication not supported")
else:
self.fail("MacaroonLoginNotSupported not raised")
class TestDeriveResourceName(TestCase):
"""Tests for `derive_resource_name`."""
def test__removes_Anon_prefix(self):
self.assertThat(helpers.derive_resource_name("AnonFooBar"), Equals("FooBar"))
def test__removes_Handler_suffix(self):
self.assertThat(helpers.derive_resource_name("FooBarHandler"), Equals("FooBar"))
def test__normalises_Maas_to_MAAS(self):
self.assertThat(helpers.derive_resource_name("Maas"), Equals("MAAS"))
def test__does_all_the_above(self):
self.assertThat(helpers.derive_resource_name("AnonMaasHandler"), Equals("MAAS"))
|
lyndon160/REF | refs/heads/master | openflow_bandwidth/report_throughput.py | 1 | #!/usr/bin/python
# coding: utf-8
# This is the command line interface to the JSON-RPC service for the services report_all_ports, report_port and report_switch_ports
# implemented in the server enforce_bandwodth_simple_switch
# if called with -a (for all) then report_all_ports is invoked
# if called with -s (for switch) then report_all_ports is invoked
# unless -p (ports) is also given, in which case report_port is called
#
# In every case, the output from the RPC call is simply printed as a python object, decoded from the JSON response
import json
import pyjsonrpc
import sys, getopt
from pprint import pprint
def __init__(self):
http_client = None
def main(argv):
http_client = pyjsonrpc.HttpClient(url = "http://localhost:4000/jsonrpc")
if http_client is None:
print 'Could not connect to rcp server'
sys.exit()
usage = "\nusage: report_throughput.py <url> [options]\n"\
"\nOptions:\n-a\t\tall ports all switchs\n"\
"-s <switch_id>\tall ports on <switch_id>\n"\
"-p <port_no>\tport <port_no>. To be used with -s.\n"\
"-m request max stats not current stats\n"
al = False
max_wanted = False
flows_wanted = False
switch = None
port = None
try:
opts, args = getopt.getopt(argv,"fmas:p:",[])
except getopt.GetoptError:
print usage
sys.exit(2)
for opt, arg in opts:
if opt == '-f':
flows_wanted = True
elif opt == '-m':
max_wanted = True
elif opt == '-a':
al = True
elif opt == '-s':
switch = arg
elif opt == '-p':
port = arg
else:
print usage
sys.exit(2)
if al == True:
pprint(http_client.call("report_all_ports", flows_wanted, max_wanted))
elif switch is not None and port is not None:
pprint(http_client.call("report_port", flows_wanted, max_wanted, switch, port))
elif switch is not None:
pprint(http_client.call("report_switch_ports", flows_wanted, max_wanted, switch))
else:
print usage
if __name__== "__main__":
main(sys.argv[1:])
|
gizmachi/sorting_students | refs/heads/master | inri.py | 1 | #!/usr/bin/python
# coding=utf-8
from random import randint
import openpyxl
import copy
import os
import sys
import datetime
data_filename = "enkater.xlsx"
mission_filename = "missions.txt"
classes_filename = "klasslista.txt"
class_sorting_filename = "klasssortering.txt"
questions_filename = "questions.txt"
sorting_filename = "sorting.txt"
nollan_filename = "nollan.txt"
columns_about_nollan = [
u"Tidstämpel",
u"Förnamn",
u"Efternamn",
u"Personnummer",
u"Adress",
u"Telefonnr",
u"Kön",
u"Program",
]
columns_one2five = [
u"1. Hur bra är Ø:an egentligen på att... [...snickra/bygga]",
u"1. Hur bra är Ø:an egentligen på att... [...konstruera]",
u"1. Hur bra är Ø:an egentligen på att... [...måla/teckna]",
u"1. Hur bra är Ø:an egentligen på att... [...sy]",
u"1. Hur bra är Ø:an egentligen på att... [...designa]",
u"1. Hur bra är Ø:an egentligen på att... [...pyssla]",
u"1. Hur bra är Ø:an egentligen på att... [...heja]",
u"1. Hur bra är Ø:an egentligen på att... [...samla saker]",
u"1. Hur bra är Ø:an egentligen på att... [...lösa gåtor]",
u"1. Hur bra är Ø:an egentligen på att... [...smyga]",
u"1. Hur bra är Ø:an egentligen på att... [...spionera]",
u"1. Hur bra är Ø:an egentligen på att... [...uppträda]",
u"1. Hur bra är Ø:an egentligen på att... [...sjunga]",
u"1. Hur bra är Ø:an egentligen på att... [...skriva låtar]",
u"1. Hur bra är Ø:an egentligen på att... [...dansa]",
u"1. Hur bra är Ø:an egentligen på att... [...hålla tal]",
u"1. Hur bra är Ø:an egentligen på att... [...leka/busa]",
u"1. Hur bra är Ø:an egentligen på att... [...peppa folk]",
u"1. Hur bra är Ø:an egentligen på att... [...tävla]",
u"1. Hur bra är Ø:an egentligen på att... [...ge smicker]",
u"1. Hur bra är Ø:an egentligen på att... [...samla poäng]",
u"1. Hur bra är Ø:an egentligen på att... [...laga mat]",
u"1. Hur bra är Ø:an egentligen på att... [...göra egna recept]",
u"1. Hur bra är Ø:an egentligen på att... [...baka]",
u"1. Hur bra är Ø:an egentligen på att... [...äta kakor]",
u"1. Hur bra är Ø:an egentligen på att... [...servera]",
u"1. Hur bra är Ø:an egentligen på att... [...arrangera fester]",
u"1. Hur bra är Ø:an egentligen på att... [...använda sociala nätverk]",
u"1. Hur bra är Ø:an egentligen på att... [...videofilma]",
u"1. Hur bra är Ø:an egentligen på att... [...skriva artiklar]",
u"1. Hur bra är Ø:an egentligen på att... [...redigera film]",
u"1. Hur bra är Ø:an egentligen på att... [...blogga]",
u"1. Hur bra är Ø:an egentligen på att... [...fotografera]",
u"1. Hur bra är Ø:an egentligen på att... [...ta selfies]",
u"1. Hur bra är Ø:an egentligen på att... [...snapchatta]",
u"1. Hur bra är Ø:an egentligen på att... [...skrika]",
u"1. Hur bra är Ø:an egentligen på att... [...spela fotboll]",
u"1. Hur bra är Ø:an egentligen på att... [...hålla uppvärmning]",
u"1. Hur bra är Ø:an egentligen på att... [...köra bil]",
u"1. Hur bra är Ø:an egentligen på att... [...pricka rätt]",
]
columns_attributes = [
u"2. Spelar Ø:an gitarr, maracas, banjo, orgel eller något annat instrument kanske?",
u"3. Isåfall, vad ∅:an?",
u"4. Kommer Ø:an ta med det till Linköping?",
u"5. Känner Ø:an någon eller några som blivit upphöjd till etta på Linköpings Tekniska Högskola?",
u"6. Isåfall, Vem/vilka? Klass? Program? Massa frågor ∅:an!",
u"7. Har Ø:an någon slags allergi eller specialkost?",
u"8. Vilka egenskapsord passa in på Ø:an?",
u"9. Vad har Ø:an i Linköping?",
u"10. Har Ø:an erfarenhet av något av följande yrken?",
u"Annat yrke:",
u"11. Vilka sporter utövar Ø:an?",
u"Annan sport:",
u"12. Kan Ø:an tänka sig att uppträda inför publik?",
]
columns_extra = [
u"13. Har Ø:an varit på gymnasiebesök på Maskinteknologsektionen på LiTH?",
u"14. Har ∅:an studerat på universitet/högskola tidigare?",
u"15. Vad tycker Ø:an att det allsmäktiga Phadderiet mer bör veta om Ø:an?",
u"16. Vad är ∅:ans visdomsord?",
]
class Nollan:
def __init__(self, firstname, familyname, sex, id_nr, program, one2five, attr):
self.firstname = firstname
self.familyname = familyname
self.name = firstname + ' ' + familyname
self.sex = sex
self.program = program
self.school_class = "N/A"
self.id_nr = id_nr
self.match = {}
self.match_relative = {}
self.assigned = None
self.one2five = one2five
self.attr = attr
self.dealbreaker = []
self.question_post = "N/A"
self.question_random = "N/A"
def __str__(self):
s = "Nollan: "
if self.name is not None:
s += self.name
# if self.id_nr is not None:
# s += ", " + unicode(self.id_nr)
if self.program is not None:
s += ", " + self.program
s += '\n'
return s
def print_all(self):
s = "Nollan: "
if self.name is not None:
s += self.name + ", "
else:
s += "N/A, "
if self.assigned is not None:
s += self.assigned + ", "
else:
s += "N/A, "
return s
def set_match(self, mission, score):
self.match[mission] = score
def set_match_relative(self, mission, score):
self.match_relative[mission] = score
class Mission:
def __init__(self, name):
self.name = name
self.priority = 2
self.count = 0
self.boys = 0
self.girls = 0
self.instrument = 0
self.dpu = 0
self.dpu_a = 0
self.dpu_b = 0
self.m = 0
self.m_a = 0
self.m_b = 0
self.m_c = 0
self.m_d = 0
self.emm = 0
self.emm_a = 0
self.emm_b = 0
self.one2five = ""
self.attr = ""
self.dealbreaker = []
self.assigned = []
self.questions = []
self.sex = '-'
self.school_class = '-'
self.program = '-'
self.id = id(self)
def is_valid(self):
if self.name is None:
print "ERROR: Could not create mission without name."
return False
if self.count == 0:
print "ERROR: Could not create mission without any nollan."
return False
if self.one2five == "" and self.attr == "":
print "ERROR: Could not create mission without any attributes or 1 to 5 questions."
return False
if self.questions == []:
print "ERROR: Could not create mission without any questions."
return False
return True
def __str__(self):
return "Name: " + str(self.name) \
+ "\nPriority: " + str(self.priority) \
+ "\nNollan: " + str(self.count)
def read_missions(path):
missions = []
current_mission = None
if not os.path.exists(path):
print "ERROR: Could not find " + path
sys.exit(0)
print "Reading missions from", mission_filename
with open(path) as f:
try:
for line in f:
if line != "\n" and line[0] != '#':
line = line.translate(None, '\n')
line = line.replace(': ', ':')
if line[0] == '%':
if current_mission is not None:
add_missions(current_mission, missions)
current_mission = Mission(line[2:])
else:
key = line.split(':')[0].lower()
arg = line.split(':')[1]
klasser = ["m_a", "m_b", "m_c", "m_d", "emm_a", "emm_b", "dpu_a", "dpu_b"]
tot_klasser = 0
if key == "prio":
current_mission.priority = int(arg)
elif key == "fraga" or key == "fråga":
current_mission.questions.append(arg)
elif key == "totalt antal" or key == "antal":
if int(arg) > current_mission.boys + current_mission.girls:
current_mission.count = int(arg)
# instrument, gyckel
elif key == "instrument":
current_mission.instrument = int(arg)
if int(arg) > current_mission.count:
current_mission.count = int(arg)
# Kon
elif key == "killar" or key == "pojkar":
current_mission.boys = int(arg)
if current_mission.boys + current_mission.girls > current_mission.count:
current_mission.count = current_mission.boys + current_mission.girls
elif key == "flickor" or key == "tjejer":
current_mission.girls = int(arg)
if current_mission.boys + current_mission.girls > current_mission.count:
current_mission.count = current_mission.boys + current_mission.girls
# Program
elif key == "m":
current_mission.m = int(arg)
if current_mission.m + current_mission.dpu + current_mission.emm > current_mission.count:
current_mission.count = current_mission.m + current_mission.dpu + current_mission.emm
elif key == "emm":
current_mission.emm = int(arg)
if current_mission.m + current_mission.dpu + current_mission.emm > current_mission.count:
current_mission.count = current_mission.m + current_mission.dpu + current_mission.emm
elif key == "dpu":
current_mission.dpu = int(arg)
if current_mission.m + current_mission.dpu + current_mission.emm > current_mission.count:
current_mission.count = current_mission.m + current_mission.dpu + current_mission.emm
# Klasser
elif key in klasser:
if key == "m_a":
current_mission.m_a = int(arg)
elif key == "m_b":
current_mission.m_b = int(arg)
elif key == "m_c":
current_mission.m_c = int(arg)
elif key == "m_d":
current_mission.m_d = int(arg)
elif key == "emm_a":
current_mission.emm_a = int(arg)
elif key == "emm_b":
current_mission.emm_b = int(arg)
elif key == "dpu_a":
current_mission.dpu_a = int(arg)
elif key == "dpu_b":
current_mission.dpu_b = int(arg)
tot_klasser = tot_klasser + int(arg)
if tot_klasser > current_mission.count:
current_mission.count = tot_klasser
# Enkatsvar
elif key == "fragor" or key == "frågor" or key == "1-5":
tmp = arg.replace(', ', ',')
if tmp[-1] == ',':
tmp = tmp[:-1]
tmp = tmp.split(',')
current_mission.one2five = tmp
elif key == "egenskaper":
if tmp[-1] == ',':
tmp = tmp[:-1]
tmp = arg.replace(', ', ',')
tmp = tmp.split(',')
current_mission.attr = tmp
elif key == "deal-breaker":
current_mission.dealbreaker = arg.split()
else:
print "ERROR: Unknown keyword " + key
except:
print "ERROR: Failed to parse " + line
# Add last one to list
if current_mission is not None:
add_missions(current_mission, missions)
f.close()
return missions
def add_missions(new, missions):
if new.is_valid():
if new.m > 0:
new.program = 'M'
elif new.dpu > 0:
new.program = 'DPU'
elif new.emm > 0:
new.program = 'EMM'
# Instrument
if new.instrument > 0:
tmp = copy.deepcopy(new)
tmp.count = tmp.instrument
tmp.dealbreaker.append("instrument")
tmp.id = id(tmp)
missions.append(tmp)
new.count -= new.instrument
# Gender
if new.boys > 0:
tmp = copy.deepcopy(new)
tmp.count = tmp.boys
tmp.sex = 'M'
tmp.id = id(tmp)
missions.append(tmp)
new.count -= new.boys
if new.girls > 0:
tmp = copy.deepcopy(new)
tmp.count = tmp.boys
tmp.sex = 'F'
tmp.id = id(tmp)
missions.append(tmp)
new.count -= new.girls
# Class
if new.m_a > 0:
tmp = copy.deepcopy(new)
tmp.count = tmp.m_a
tmp.school_class = 'M_A'
tmp.id = id(tmp)
missions.append(tmp)
new.count -= new.m_a
if new.m_b > 0:
tmp = copy.deepcopy(new)
tmp.count = tmp.m_b
tmp.school_class = 'M_B'
tmp.id = id(tmp)
missions.append(tmp)
new.count -= new.m_b
if new.m_c > 0:
tmp = copy.deepcopy(new)
tmp.count = tmp.m_c
tmp.school_class = 'M_C'
tmp.id = id(tmp)
missions.append(tmp)
new.count -= new.m_c
if new.m_d > 0:
tmp = copy.deepcopy(new)
tmp.count = tmp.m_d
tmp.school_class = 'M_D'
tmp.id = id(tmp)
missions.append(tmp)
new.count -= new.m_d
if new.emm_a > 0:
tmp = copy.deepcopy(new)
tmp.count = tmp.emm_a
tmp.school_class = 'EMM_A'
tmp.id = id(tmp)
missions.append(tmp)
new.count -= new.emm_a
if new.emm_b > 0:
tmp = copy.deepcopy(new)
tmp.count = tmp.emm_b
tmp.school_class = 'EMM_B'
tmp.id = id(tmp)
missions.append(tmp)
new.count -= new.emm_b
if new.dpu_a > 0:
tmp = copy.deepcopy(new)
tmp.count = tmp.dpu_a
tmp.school_class = 'DPU_A'
tmp.id = id(tmp)
missions.append(tmp)
new.count -= new.dpu_a
if new.dpu_b > 0:
tmp = copy.deepcopy(new)
tmp.count = tmp.dpu_b
tmp.school_class = 'DPU_B'
tmp.id = id(tmp)
missions.append(tmp)
new.count -= new.dpu_b
if new.count > 0:
missions.append(new)
else:
print "ERROR: Failed to add incomplete mission " + new.name
def read_questions(path):
if not os.path.exists(path):
print "ERROR: Could not find " + path
sys.exit(0)
questions = []
with open(path) as f:
for line in f:
if line != "\n" and line[0] != '#':
line = line.translate(None, '\n')
questions.append(line)
f.close()
return questions
def set_classes(path, nollan):
# Sould be run AFTER setting program.
# Program is used as sanity check as there can be people with the same name.
# Undefined if there are people in the same program with the same name.
if not os.path.exists(path):
print "ERROR: Could not find " + path
sys.exit(0)
classes = {}
current_class = None
only_in_klasslista = []
only_in_nollan = []
with open(path) as f:
for line in f:
if line != "\n" and line[0] != '#':
line = line.translate(None, '\n')
if line[0] == '%':
line = line.translate(None, ' ')
current_class = line[1:]
classes[current_class] = []
elif current_class is not None:
tmp = line.split()
tmp = tmp[1] + ' ' + tmp[0]
classes[current_class].append(tmp)
else:
print "WARNING: Trying to read nollan from " + path + " before setting class!"
f.close()
for n in nollan:
# Some nollan are confused about what their first/last names are...
found = False
for c in classes:
for name in classes[c]:
tmp_name = name.split()[1] + ' ' + name.split()[0]
if name.upper().decode('utf-8') == n.name.upper() or tmp_name.upper().decode('utf-8') == n.name.upper():
n.school_class = c
found = True
break
if found:
break
if not found:
only_in_nollan.append(n)
if len(only_in_nollan) > 0:
print "WARNING: " + str(len(only_in_nollan)) + " of " + str(len(nollan)) + " nollan not found in " + path
for n in only_in_nollan:
print unicode(n.name)
def read_nollan(path):
global columns_one2five
global columns_attributes
global columns_dealbreakers
global columns_about_nollan
ans_to_int = [
u"zero-not-used!",
u"va?!?",
u"sissodär...",
u"lagom..!",
u"helt ok!",
u"braaa!",
]
if not os.path.exists(path):
print "ERROR: Could not find " + path
sys.exit(0)
print "Reading Nollan from", data_filename
nollan = []
incorrect_entries = []
ids = []
names = []
workbook = openpyxl.load_workbook(filename = path, use_iterators = True)
worksheet = workbook.get_sheet_by_name(workbook.get_sheet_names()[0])
first_row = True
for row in worksheet.iter_rows():
# Sanity check, same number of columns in document and lists
if first_row:
first_row = False
total_columns = len(columns_one2five) + len(columns_about_nollan) + \
len(columns_attributes) + len(columns_extra)
actual_columns = 0
while True:
cont = row[actual_columns].value
if cont is None:
break
actual_columns += 1
if actual_columns != total_columns:
print "WARNING: Document contains " + str(actual_columns) + " columns, expected " + str(total_columns) +'!'
print "Total number of columns: " + str(actual_columns)
else:
try:
one2five_attributes = []
for item in range(len(columns_one2five)):
one2five_attributes.append( ans_to_int.index(row[item + len(columns_about_nollan)].value))
attributes = []
tmp = row[columns_attributes.index(u"8. Vilka egenskapsord passa in på Ø:an?") \
+ len(columns_about_nollan) + len(columns_one2five)].value
if tmp is not None:
attributes += tmp.split(', ')
tmp = row[columns_attributes.index(u"9. Vad har Ø:an i Linköping?") \
+ len(columns_about_nollan) + len(columns_one2five)].value
if tmp is not None:
attributes += tmp.split(', ')
tmp = row[columns_attributes.index(u"10. Har Ø:an erfarenhet av något av följande yrken?") \
+ len(columns_about_nollan) + len(columns_one2five)].value
if tmp is not None:
attributes += tmp.split(', ')
tmp = row[columns_attributes.index(u"11. Vilka sporter utövar Ø:an?") \
+ len(columns_about_nollan) + len(columns_one2five)].value
if tmp is not None:
attributes += tmp.split(', ')
kon_val = row[columns_about_nollan.index(u"Kön")].value
if kon_val == "Man":
kon = 'M'
elif kon_val == "Kvinna":
kon = 'F'
else:
kon = '-'
new_nollan = Nollan(row[columns_about_nollan.index(u"Förnamn")].value,
row[columns_about_nollan.index(u"Efternamn")].value,
# row[columns_about_nollan.index(u"Kön")].value,
# "-",
kon,
row[columns_about_nollan.index(u"Personnummer")].value,
row[columns_about_nollan.index(u"Program")].value,
one2five_attributes, attributes)
# instrument
val1 = row[columns_attributes.index(u"2. Spelar Ø:an gitarr, maracas, banjo, orgel eller något annat instrument kanske?") \
+ len(columns_about_nollan) + len(columns_one2five)].value
val2 = row[columns_attributes.index(u"4. Kommer Ø:an ta med det till Linköping?") \
+ len(columns_about_nollan) + len(columns_one2five)].value
if val1 == u"Ja!" and val2 == u"Ja!":
new_nollan.attr.append(u"instrument")
# studied before
val = row[columns_extra.index(u"14. Har ∅:an studerat på universitet/högskola tidigare?") + len(columns_attributes) \
+ len(columns_about_nollan) + len(columns_one2five)].value
if val == u"Ja!":
new_nollan.attr.append(u"pluggat tidigare")
# can perform on stage
val = row[columns_attributes.index(u"12. Kan Ø:an tänka sig att uppträda inför publik?") \
+ len(columns_about_nollan) + len(columns_one2five)].value
if val == u"Ja!":
new_nollan.attr.append(u"Uppträda")
# Has to be run AFTER "studied before"
# For any mission at vikinga, make sure nollan is suitable
if is_viking(new_nollan):
new_nollan.attr.append(u"Viking")
if not new_nollan.id_nr in ids:
nollan.append(new_nollan)
ids.append(new_nollan.id_nr)
except:
incorrect_entries.append(row[0].row)
print "WARNING: Failed to read line " + unicode(row[0].row) + " in " + path
return nollan
def is_viking(nollan):
# Nollan participating at vikinga should be more "experienced"
# Nollan is suitable if nollan either has studied before or is old enough
age = 21
try:
# Studied before
if "pluggat tidigare" in nollan.attr:
return True
# Age
t = datetime.date.today().strftime("%Y%m%d")
latest_born = int(str(datetime.date.today() - datetime.timedelta(days=age*365)).translate(None, '-'))
if int(str(nollan.id_nr)[:2]) > int(t[2:4]):
born = int(str('19' + nollan.id_nr)[:8])
else:
born = int(str('20' + nollan.id_nr)[:8])
if born < latest_born:
return True
except:
print "ERROR: could not determine if nollan is a true Viking :( "
return False
return False
def write_nollan(nollan, missions, path):
print "Writing nollans attributes to ", path
nollan.sort(key=lambda x: x.name)
mission_map = {}
for m in missions:
mission_map[m.id] = unicode(m.name, "utf-8")
f = open(path,'w')
for item in nollan:
printed = []
try:
s = item.name + ', ' + item.sex + ', ' + item.program
if item.assigned is not None:
s += ' (' + unicode(item.assigned.name, "utf-8") + ')\n'
f.write(s.encode('utf8'))
f.write(item.question_post + '\n')
f.write(item.question_random + '\n')
for a in item.match:
if item.match_relative[a] != 0 and mission_map[a] not in printed:
s = mission_map[a] + ": " + unicode(item.match_relative[a]) + u'%\n'
f.write(s.encode('utf8'))
printed.append(mission_map[a])
f.write('\n')
except:
print "ERROR: Failed to write " + unicode(item)[:-1]
f.close()
def write_sorting(sorting, path):
print "Writing sorting to", path
f = open(path,'w')
prev_item = None
sorting.sort(key=lambda x: x.name)
for item in sorting:
try:
if prev_item != item.name:
f.write('\n' + item.name.upper() + '\n')
item.assigned.sort(key=lambda x: x.name)
for a in item.assigned:
s = a.name + ": " + str(a.school_class) + '\n'
f.write(s.encode('utf8'))
prev_item = item.name
except:
print "ERROR: failed to write " + item.name + " to " + path
f.close()
def write_class_sorting(nollan, path):
print "Writing sorting to", path
# Sort nollan by class and by name
nollan.sort(key=lambda x: x.name)
nollan.sort(key=lambda x: x.school_class)
f = open(path,'w')
prev_class = None
for item in nollan:
try:
if prev_class != item.school_class:
f.write('\n' + item.school_class.upper() + '\n')
s = item.name + " (" + unicode(item.assigned.name, "utf-8") + ') ' + unicode(item.question_post, "utf-8") \
+ ', ' + unicode(item.question_random, "utf-8") + '\n'
f.write(s.encode('utf8'))
prev_class = item.school_class
except:
print "ERROR: failed to write " + item.name + " to " + path
f.close()
def match(nollan, mission):
global columns_one2five
# set 0 for wrong sex/class/program
if mission.sex != "-":
if mission.sex != nollan.sex:
return 0
if mission.program != "-":
if mission.program != nollan.program:
return 0
if mission.school_class != "-":
if mission.school_class.upper() != nollan.school_class.upper():
return 0
# match 1-5 questions
res_1 = 0
for f in mission.one2five:
try:
tmp = u"1. Hur bra är Ø:an egentligen på att... [..." + unicode(f, "utf-8").lower() + u']'
res_1 += nollan.one2five[columns_one2five.index(tmp)]
except:
print "ERROR: Could not find question " + tmp
# match attribute
res_2 = 0
for a in mission.attr:
if unicode(a, "utf-8") in nollan.attr:
res_2 += 1
# dealbreakers
res_3 = 1
for d in mission.dealbreaker:
if not unicode(d, "utf-8") in nollan.attr:
res_3 = 0
# Math for determining how suitable a nollan is
return (res_1 + 5*res_2) * res_3
def normalize(nollan, mission):
for m in mission:
tot = 0
maximum = 0
for n in nollan:
tot += n.match[m.id]
maximum = max(n.match[m.id], maximum)
for n in nollan:
if maximum == 0:
print "ERROR: Maximum match is 0 for " + m.name
n.set_match_relative(m.id, 0)
else:
n.set_match_relative(m.id, n.match[m.id]*100/maximum)
def most_urgent(mission):
# Priority groups
# 1 = high, 2 = low, 3 = optional, 9 = unset
top_prio = 9
high_prio_missions = []
for m in mission:
if m.count > len(m.assigned):
if m.priority < top_prio:
top_prio = m.priority
high_prio_missions = []
if m.priority <= top_prio:
high_prio_missions.append(m)
most_empty = None
emptyness = float(2)
for hpm in high_prio_missions:
tmp_empty = float(1 + len(hpm.assigned))/float(hpm.count)
if tmp_empty < emptyness:
emptyness = tmp_empty
most_empty = hpm
return most_empty
def best_nollan(nollan, mission):
bn = None
maximum = 0
for n in nollan:
if nollan[n].match_relative[mission.id] > maximum:
maximum = nollan[n].match_relative[mission.id]
bn = nollan[n]
if bn is None:
print "ERROR: No matching nollan found for " + mission.name
return bn
def select(nollan, mission):
unassigned = {}
total_empty = 0
for n in nollan:
unassigned[n.id_nr] = n
while len(unassigned) != 0:
mamma_mu = most_urgent(mission)
if mamma_mu is None:
print "WARNIGN: No posts left to fill! " + str(len(unassigned)) + " Nollan with no misson assigned."
break
bn = best_nollan(unassigned, mamma_mu)
if bn is not None:
mamma_mu.assigned.append(bn)
bn.assigned = mamma_mu
del unassigned[bn.id_nr]
else:
print "ERROR: No suitable nollan found for mission!"
break
# Count the number of empty spots
total_empty = []
for m in mission:
while m.priority >= len(total_empty):
total_empty.append(0)
total_empty[m.priority] += m.count - len(m.assigned)
if total_empty > 0:
s = "WARNING: Total " + str(sum(total_empty)) + " unfilled positions. ("
for i in range(len(total_empty)):
if total_empty[i] != 0:
s += " prio " + str(i) + ': ' + str(total_empty[i]) + '. '
s += ')'
print s
def set_questions(nollan):
random_questions = read_questions(questions_filename)
for n in nollan:
if n.assigned is not None:
try:
if n.assigned == None:
print "WARNING: Could not set questions. No mission assigned to " + unicode(n.print_all())
else:
l = n.assigned.questions
n.question_post = l[randint(0,len(l)-1)]
n.question_random = random_questions[randint(0,len(random_questions)-1)]
except:
print "ERROR: Failed to set questions for " + unicode(n)[:-1]
if __name__ == "__main__":
missions = read_missions(mission_filename)
nollan = read_nollan(data_filename)
set_classes(classes_filename, nollan)
for n in nollan:
for m in missions:
n.set_match(m.id, match(n,m))
normalize(nollan, missions)
select(nollan, missions)
set_questions(nollan)
write_sorting(missions, sorting_filename)
write_nollan(nollan, missions, nollan_filename)
write_class_sorting(nollan, class_sorting_filename)
print "\n I am the Lord, thy God:\n \
1: Thou shalt remember DVBF. \n \
2: Thou shalt honor thy elders.\n \
3: Thou shalt understand and be in awe of the meaning of Inri.\n \
4: Thou shalt turn water into wine (and give to your elders)\n \
5: Thou shalt worship no false Idols (beside Barbara). \n \
6: Thou shalt show thankfullness [sv: Tackfestfullhet, red. anm.]\n \
7: Thou shalt look at my horse, my horse is amazing. \n \
8: Thou shalt not covet thy neighbors ass (the animal, stupid!)\n \
9: Thou shalt covet thy neighbor (the one on the left).\n \
10: Thou shalt show respect when thou calleth tech support.\n \
"
|
idegtiarov/gnocchi-rep | refs/heads/master | gnocchi/ceilometer/dispatcher.py | 1 | #
# Copyright 2014 eNovance
#
# Authors: Julien Danjou <[email protected]>
# Mehdi Abaakouk <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import fnmatch
import threading
import itertools
import json
import operator
import os
import yaml
from ceilometer import dispatcher
from ceilometer.i18n import _
from oslo_config import cfg
from oslo_log import log
import requests
import six
import stevedore.dispatch
from gnocchi.ceilometer import utils
LOG = log.getLogger(__name__)
dispatcher_opts = [
cfg.BoolOpt('filter_service_activity',
default=True,
help='Filter out samples generated by Gnocchi '
'service activity'),
cfg.StrOpt('filter_project',
default='gnocchi',
help='Gnocchi project used to filter out samples '
'generated by Gnocchi service activity'),
cfg.StrOpt('url',
default="http://localhost:8041",
help='URL to Gnocchi.'),
cfg.StrOpt('archive_policy',
default="low",
help='The archive policy to use when the dispatcher '
'create a new metric.'),
cfg.StrOpt('archive_policy_file',
default='/etc/ceilometer/gnocchi_archive_policy_map.yaml',
help=_('The Yaml file that defines per metric archive '
'policies.')),
]
cfg.CONF.register_opts(dispatcher_opts, group="dispatcher_gnocchi")
class UnexpectedWorkflowError(Exception):
pass
class NoSuchMetric(Exception):
pass
class MetricAlreadyExists(Exception):
pass
class NoSuchResource(Exception):
pass
class ResourceAlreadyExists(Exception):
pass
def log_and_ignore_unexpected_workflow_error(func):
def log_and_ignore(self, *args, **kwargs):
try:
func(self, *args, **kwargs)
except requests.ConnectionError as e:
with self._gnocchi_api_lock:
self._gnocchi_api = None
LOG.warn("Connection error, reconnecting...")
except UnexpectedWorkflowError as e:
LOG.error(six.text_type(e))
return log_and_ignore
class GnocchiDispatcher(dispatcher.Base):
def __init__(self, conf):
super(GnocchiDispatcher, self).__init__(conf)
self.conf = conf
self.filter_service_activity = (
conf.dispatcher_gnocchi.filter_service_activity)
self._ks_client = utils.get_keystone_client()
self.gnocchi_url = conf.dispatcher_gnocchi.url
self.gnocchi_archive_policy_default = (
conf.dispatcher_gnocchi.archive_policy)
self.gnocchi_archive_policy_data = self._load_archive_policy(conf)
self.mgmr = stevedore.dispatch.DispatchExtensionManager(
'gnocchi.ceilometer.resource', lambda x: True,
invoke_on_load=True)
self._gnocchi_project_id = None
self._gnocchi_project_id_lock = threading.Lock()
self._gnocchi_api = None
self._gnocchi_api_lock = threading.Lock()
def _get_headers(self, content_type="application/json"):
return {
'Content-Type': content_type,
'X-Auth-Token': self._ks_client.auth_token,
}
def _load_archive_policy(self, conf):
policy_config_file = self._get_config_file(conf)
data = {}
if policy_config_file is not None:
with open(policy_config_file) as data_file:
try:
data = yaml.safe_load(data_file)
except ValueError:
data = {}
return data
def get_archive_policy(self, metric_name):
archive_policy = {}
if self.gnocchi_archive_policy_data is not None:
policy_match = self._match_metric(metric_name)
archive_policy['archive_policy_name'] = (
policy_match or self.gnocchi_archive_policy_default)
else:
LOG.debug(_("No archive policy file found!"
" Using default config."))
archive_policy['archive_policy_name'] = (
self.gnocchi_archive_policy_default)
return archive_policy
@staticmethod
def _get_config_file(conf):
config_file = conf.dispatcher_gnocchi.archive_policy_file
if not os.path.exists(config_file):
config_file = cfg.CONF.find_file(config_file)
return config_file
def _match_metric(self, metric_name):
for metric, policy in enumerate(self.gnocchi_archive_policy_data):
# Support wild cards such as disk.*
if fnmatch.fnmatch(metric_name, metric):
return policy
@property
def gnocchi_project_id(self):
if self._gnocchi_project_id is not None:
return self._gnocchi_project_id
with self._gnocchi_project_id_lock:
if self._gnocchi_project_id is None:
try:
project = self._ks_client.tenants.find(
name=self.conf.dispatcher_gnocchi.filter_project)
except Exception:
LOG.exception('fail to retreive user of Gnocchi service')
raise
self._gnocchi_project_id = project.id
LOG.debug("gnocchi project found: %s" %
self.gnocchi_project_id)
return self._gnocchi_project_id
@property
def gnocchi_api(self):
"""return a working requests session object"""
if self._gnocchi_api is not None:
return self._gnocchi_api
with self._gnocchi_api_lock:
if self._gnocchi_api is None:
self._gnocchi_api = requests.session()
# NOTE(sileht): wait when the pool is empty
# instead of raising errors.
adapter = requests.adapters.HTTPAdapter(pool_block=True)
self._gnocchi_api.mount("http://", adapter)
self._gnocchi_api.mount("https://", adapter)
return self._gnocchi_api
def _is_gnocchi_activity(self, sample):
return (self.filter_service_activity and (
# avoid anything from the user used by gnocchi
sample['project_id'] == self.gnocchi_project_id or
# avoid anything in the swift account used by gnocchi
(sample['resource_id'] == self.gnocchi_project_id and
sample['counter_name'] in
self.mgmr['swift_account'].obj.get_metrics_names())
))
def record_metering_data(self, data):
# NOTE(sileht): skip sample generated by gnocchi itself
data = [s for s in data if not self._is_gnocchi_activity(s)]
# FIXME(sileht): This method bulk the processing of samples
# grouped by resource_id and metric_name but this is not
# efficient yet because the data received here doesn't often
# contains a lot of different kind of samples
# So perhaps the next step will be to pool the received data from
# message bus.
resource_grouped_samples = itertools.groupby(
data, key=operator.itemgetter('resource_id'))
for resource_id, samples_of_resource in resource_grouped_samples:
resource_need_to_be_updated = True
metric_grouped_samples = itertools.groupby(
list(samples_of_resource),
key=operator.itemgetter('counter_name'))
for metric_name, samples in metric_grouped_samples:
for ext in self.mgmr:
if metric_name in ext.obj.get_metrics_names():
self._process_samples(
ext, resource_id, metric_name, list(samples),
resource_need_to_be_updated)
# FIXME(sileht): Does it reasonable to skip the resource
# update here ? Does differents kind of counter_name
# can have different metadata set ?
# (ie: one have only flavor_id, and an other one have only
# image_ref ?)
#
# resource_need_to_be_updated = False
@log_and_ignore_unexpected_workflow_error
def _process_samples(self, ext, resource_id, metric_name, samples,
resource_need_to_be_updated):
resource_type = ext.name
measure_attributes = [{'timestamp': sample['timestamp'],
'value': sample['counter_volume']}
for sample in samples]
try:
self._post_measure(resource_type, resource_id, metric_name,
measure_attributes)
except NoSuchMetric:
# NOTE(sileht): we try first to create the resource, because
# they more chance that the resource doesn't exists than the metric
# is missing, the should be reduce the number of resource API call
resource_attributes = self._get_resource_attributes(
ext, resource_id, metric_name, samples)
try:
self._create_resource(resource_type, resource_id,
resource_attributes)
except ResourceAlreadyExists:
try:
self._create_metric(resource_type, resource_id,
metric_name)
except MetricAlreadyExists:
# NOTE(sileht): Just ignore the metric have been created in
# the meantime.
pass
else:
# No need to update it we just created it
# with everything we need
resource_need_to_be_updated = False
# NOTE(sileht): we retry to post the measure but if it fail we
# don't catch the exception to just log it and continue to process
# other samples
self._post_measure(resource_type, resource_id, metric_name,
measure_attributes)
if resource_need_to_be_updated:
resource_attributes = self._get_resource_attributes(
ext, resource_id, metric_name, samples, for_update=True)
self._update_resource(resource_type, resource_id,
resource_attributes)
def _get_resource_attributes(self, ext, resource_id, metric_name, samples,
for_update=False):
# FIXME(sileht): Should I merge attibutes of all samples ?
# Or keep only the last one is sufficient ?
attributes = ext.obj.get_resource_extra_attributes(
samples[-1])
if not for_update:
attributes["id"] = resource_id
attributes["user_id"] = samples[-1]['user_id']
attributes["project_id"] = samples[-1]['project_id']
attributes["metrics"] = dict(
(metric_name, self.get_archive_policy(metric_name))
for metric_name in ext.obj.get_metrics_names()
)
return attributes
def _post_measure(self, resource_type, resource_id, metric_name,
measure_attributes):
r = self.gnocchi_api.post("%s/v1/resource/%s/%s/metric/%s/measures"
% (self.gnocchi_url, resource_type,
resource_id, metric_name),
headers=self._get_headers(),
data=json.dumps(measure_attributes))
if r.status_code == 404:
LOG.debug(_("The metric %(metric_name)s of "
"resource %(resource_id)s doesn't exists: "
"%(status_code)d"),
{'metric_name': metric_name,
'resource_id': resource_id,
'status_code': r.status_code})
raise NoSuchMetric
elif int(r.status_code / 100) != 2:
raise UnexpectedWorkflowError(
_("Fail to post measure on metric %(metric_name)s of "
"resource %(resource_id)s with status: "
"%(status_code)d: %(msg)s") %
{'metric_name': metric_name,
'resource_id': resource_id,
'status_code': r.status_code,
'msg': r.text})
else:
LOG.debug("Measure posted on metric %s of resource %s",
metric_name, resource_id)
def _create_resource(self, resource_type, resource_id,
resource_attributes):
r = self.gnocchi_api.post("%s/v1/resource/%s"
% (self.gnocchi_url, resource_type),
headers=self._get_headers(),
data=json.dumps(resource_attributes))
if r.status_code == 409:
LOG.debug("Resource %s already exists", resource_id)
raise ResourceAlreadyExists
elif int(r.status_code / 100) != 2:
raise UnexpectedWorkflowError(
_("Resource %(resource_id)s creation failed with "
"status: %(status_code)d: %(msg)s") %
{'resource_id': resource_id,
'status_code': r.status_code,
'msg': r.text})
else:
LOG.debug("Resource %s created", resource_id)
def _update_resource(self, resource_type, resource_id,
resource_attributes):
r = self.gnocchi_api.patch(
"%s/v1/resource/%s/%s"
% (self.gnocchi_url, resource_type, resource_id),
headers=self._get_headers(),
data=json.dumps(resource_attributes))
if int(r.status_code / 100) != 2:
raise UnexpectedWorkflowError(
_("Resource %(resource_id)s update failed with "
"status: %(status_code)d: %(msg)s") %
{'resource_id': resource_id,
'status_code': r.status_code,
'msg': r.text})
else:
LOG.debug("Resource %s updated", resource_id)
def _create_metric(self, resource_type, resource_id, metric_name):
params = {metric_name: self.get_archive_policy(metric_name)}
r = self.gnocchi_api.post("%s/v1/resource/%s/%s/metric"
% (self.gnocchi_url, resource_type,
resource_id),
headers=self._get_headers(),
data=json.dumps(params))
if r.status_code == 409:
LOG.debug("Metric %s of resource %s already exists",
metric_name, resource_id)
raise MetricAlreadyExists
elif int(r.status_code / 100) != 2:
raise UnexpectedWorkflowError(
_("Fail to create metric %(metric_name)s of "
"resource %(resource_id)s with status: "
"%(status_code)d: %(msg)s") %
{'metric_name': metric_name,
'resource_id': resource_id,
'status_code': r.status_code,
'msg': r.text})
else:
LOG.debug("Metric %s of resource %s created",
metric_name, resource_id)
@staticmethod
def record_events(events):
raise NotImplementedError
|
yunstanford/sanic | refs/heads/master | tests/test_custom_protocol.py | 3 | from sanic.response import text
from sanic.server import HttpProtocol
class CustomHttpProtocol(HttpProtocol):
def write_response(self, response):
if isinstance(response, str):
response = text(response)
self.transport.write(response.output(self.request.version))
self.transport.close()
def test_use_custom_protocol(app):
@app.route("/1")
async def handler_1(request):
return "OK"
server_kwargs = {"protocol": CustomHttpProtocol}
request, response = app.test_client.get("/1", server_kwargs=server_kwargs)
assert response.status == 200
assert response.text == "OK"
|
shrimpboyho/git.js | refs/heads/master | emscript/python/2.7.5.1_32bit/Lib/test/test_exception_variations.py | 214 |
from test.test_support import run_unittest
import unittest
class ExceptionTestCase(unittest.TestCase):
def test_try_except_else_finally(self):
hit_except = False
hit_else = False
hit_finally = False
try:
raise Exception, 'nyaa!'
except:
hit_except = True
else:
hit_else = True
finally:
hit_finally = True
self.assertTrue(hit_except)
self.assertTrue(hit_finally)
self.assertFalse(hit_else)
def test_try_except_else_finally_no_exception(self):
hit_except = False
hit_else = False
hit_finally = False
try:
pass
except:
hit_except = True
else:
hit_else = True
finally:
hit_finally = True
self.assertFalse(hit_except)
self.assertTrue(hit_finally)
self.assertTrue(hit_else)
def test_try_except_finally(self):
hit_except = False
hit_finally = False
try:
raise Exception, 'yarr!'
except:
hit_except = True
finally:
hit_finally = True
self.assertTrue(hit_except)
self.assertTrue(hit_finally)
def test_try_except_finally_no_exception(self):
hit_except = False
hit_finally = False
try:
pass
except:
hit_except = True
finally:
hit_finally = True
self.assertFalse(hit_except)
self.assertTrue(hit_finally)
def test_try_except(self):
hit_except = False
try:
raise Exception, 'ahoy!'
except:
hit_except = True
self.assertTrue(hit_except)
def test_try_except_no_exception(self):
hit_except = False
try:
pass
except:
hit_except = True
self.assertFalse(hit_except)
def test_try_except_else(self):
hit_except = False
hit_else = False
try:
raise Exception, 'foo!'
except:
hit_except = True
else:
hit_else = True
self.assertFalse(hit_else)
self.assertTrue(hit_except)
def test_try_except_else_no_exception(self):
hit_except = False
hit_else = False
try:
pass
except:
hit_except = True
else:
hit_else = True
self.assertFalse(hit_except)
self.assertTrue(hit_else)
def test_try_finally_no_exception(self):
hit_finally = False
try:
pass
finally:
hit_finally = True
self.assertTrue(hit_finally)
def test_nested(self):
hit_finally = False
hit_inner_except = False
hit_inner_finally = False
try:
try:
raise Exception, 'inner exception'
except:
hit_inner_except = True
finally:
hit_inner_finally = True
finally:
hit_finally = True
self.assertTrue(hit_inner_except)
self.assertTrue(hit_inner_finally)
self.assertTrue(hit_finally)
def test_nested_else(self):
hit_else = False
hit_finally = False
hit_except = False
hit_inner_except = False
hit_inner_else = False
try:
try:
pass
except:
hit_inner_except = True
else:
hit_inner_else = True
raise Exception, 'outer exception'
except:
hit_except = True
else:
hit_else = True
finally:
hit_finally = True
self.assertFalse(hit_inner_except)
self.assertTrue(hit_inner_else)
self.assertFalse(hit_else)
self.assertTrue(hit_finally)
self.assertTrue(hit_except)
def test_main():
run_unittest(ExceptionTestCase)
if __name__ == '__main__':
test_main()
|
siemens/rpyc | refs/heads/master | tests/test_refcount.py | 9 | import rpyc
import gc
import unittest
class TestRefcount(unittest.TestCase):
def setUp(self):
self.conn = rpyc.classic.connect_thread()
def tearDown(self):
self.conn.close()
def test_refcount(self):
self.conn.execute("""
deleted_objects = []
class DummyObject(object):
def __init__(self, name):
self.name = name
def __del__(self):
deleted_objects.append(self.name)""")
rDummyObject = self.conn.namespace["DummyObject"]
d1 = rDummyObject("d1")
d2 = rDummyObject("d2")
d3 = rDummyObject("d3")
d4 = rDummyObject("d4") #@UnusedVariable
d2_copy = d2
del d1
del d3
gc.collect()
self.assertEqual(set(self.conn.namespace["deleted_objects"]), set(["d1", "d3"]))
del d2
gc.collect()
self.assertEqual(set(self.conn.namespace["deleted_objects"]), set(["d1", "d3"]))
del d2_copy
gc.collect()
self.assertEqual(set(self.conn.namespace["deleted_objects"]), set(["d1", "d2", "d3"]))
if __name__ == "__main__":
unittest.main()
|
mariosky/evo-drawings | refs/heads/master | venv/lib/python2.7/site-packages/django/contrib/gis/db/backends/spatialite/operations.py | 63 | import re
import sys
from decimal import Decimal
from django.contrib.gis.db.backends.base import BaseSpatialOperations
from django.contrib.gis.db.backends.util import SpatialOperation, SpatialFunction
from django.contrib.gis.db.backends.spatialite.adapter import SpatiaLiteAdapter
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.measure import Distance
from django.core.exceptions import ImproperlyConfigured
from django.db.backends.sqlite3.base import DatabaseOperations
from django.db.utils import DatabaseError
from django.utils import six
from django.utils.functional import cached_property
class SpatiaLiteOperator(SpatialOperation):
"For SpatiaLite operators (e.g. `&&`, `~`)."
def __init__(self, operator):
super(SpatiaLiteOperator, self).__init__(operator=operator)
class SpatiaLiteFunction(SpatialFunction):
"For SpatiaLite function calls."
def __init__(self, function, **kwargs):
super(SpatiaLiteFunction, self).__init__(function, **kwargs)
class SpatiaLiteFunctionParam(SpatiaLiteFunction):
"For SpatiaLite functions that take another parameter."
sql_template = '%(function)s(%(geo_col)s, %(geometry)s, %%s)'
class SpatiaLiteDistance(SpatiaLiteFunction):
"For SpatiaLite distance operations."
dist_func = 'Distance'
sql_template = '%(function)s(%(geo_col)s, %(geometry)s) %(operator)s %%s'
def __init__(self, operator):
super(SpatiaLiteDistance, self).__init__(self.dist_func,
operator=operator)
class SpatiaLiteRelate(SpatiaLiteFunctionParam):
"For SpatiaLite Relate(<geom>, <pattern>) calls."
pattern_regex = re.compile(r'^[012TF\*]{9}$')
def __init__(self, pattern):
if not self.pattern_regex.match(pattern):
raise ValueError('Invalid intersection matrix pattern "%s".' % pattern)
super(SpatiaLiteRelate, self).__init__('Relate')
# Valid distance types and substitutions
dtypes = (Decimal, Distance, float) + six.integer_types
def get_dist_ops(operator):
"Returns operations for regular distances; spherical distances are not currently supported."
return (SpatiaLiteDistance(operator),)
class SpatiaLiteOperations(DatabaseOperations, BaseSpatialOperations):
compiler_module = 'django.contrib.gis.db.models.sql.compiler'
name = 'spatialite'
spatialite = True
version_regex = re.compile(r'^(?P<major>\d)\.(?P<minor1>\d)\.(?P<minor2>\d+)')
valid_aggregates = dict([(k, None) for k in ('Extent', 'Union')])
Adapter = SpatiaLiteAdapter
Adaptor = Adapter # Backwards-compatibility alias.
area = 'Area'
centroid = 'Centroid'
contained = 'MbrWithin'
difference = 'Difference'
distance = 'Distance'
envelope = 'Envelope'
intersection = 'Intersection'
length = 'GLength' # OpenGis defines Length, but this conflicts with an SQLite reserved keyword
num_geom = 'NumGeometries'
num_points = 'NumPoints'
point_on_surface = 'PointOnSurface'
scale = 'ScaleCoords'
svg = 'AsSVG'
sym_difference = 'SymDifference'
transform = 'Transform'
translate = 'ShiftCoords'
union = 'GUnion' # OpenGis defines Union, but this conflicts with an SQLite reserved keyword
unionagg = 'GUnion'
from_text = 'GeomFromText'
from_wkb = 'GeomFromWKB'
select = 'AsText(%s)'
geometry_functions = {
'equals' : SpatiaLiteFunction('Equals'),
'disjoint' : SpatiaLiteFunction('Disjoint'),
'touches' : SpatiaLiteFunction('Touches'),
'crosses' : SpatiaLiteFunction('Crosses'),
'within' : SpatiaLiteFunction('Within'),
'overlaps' : SpatiaLiteFunction('Overlaps'),
'contains' : SpatiaLiteFunction('Contains'),
'intersects' : SpatiaLiteFunction('Intersects'),
'relate' : (SpatiaLiteRelate, six.string_types),
# Returns true if B's bounding box completely contains A's bounding box.
'contained' : SpatiaLiteFunction('MbrWithin'),
# Returns true if A's bounding box completely contains B's bounding box.
'bbcontains' : SpatiaLiteFunction('MbrContains'),
# Returns true if A's bounding box overlaps B's bounding box.
'bboverlaps' : SpatiaLiteFunction('MbrOverlaps'),
# These are implemented here as synonyms for Equals
'same_as' : SpatiaLiteFunction('Equals'),
'exact' : SpatiaLiteFunction('Equals'),
}
distance_functions = {
'distance_gt' : (get_dist_ops('>'), dtypes),
'distance_gte' : (get_dist_ops('>='), dtypes),
'distance_lt' : (get_dist_ops('<'), dtypes),
'distance_lte' : (get_dist_ops('<='), dtypes),
}
geometry_functions.update(distance_functions)
def __init__(self, connection):
super(DatabaseOperations, self).__init__(connection)
# Creating the GIS terms dictionary.
self.gis_terms = set(['isnull'])
self.gis_terms.update(self.geometry_functions)
@cached_property
def spatial_version(self):
"""Determine the version of the SpatiaLite library."""
try:
version = self.spatialite_version_tuple()[1:]
except Exception as msg:
new_msg = (
'Cannot determine the SpatiaLite version for the "%s" '
'database (error was "%s"). Was the SpatiaLite initialization '
'SQL loaded on this database?') % (self.connection.settings_dict['NAME'], msg)
six.reraise(ImproperlyConfigured, ImproperlyConfigured(new_msg), sys.exc_info()[2])
if version < (2, 3, 0):
raise ImproperlyConfigured('GeoDjango only supports SpatiaLite versions '
'2.3.0 and above')
return version
@property
def _version_greater_2_4_0_rc4(self):
if self.spatial_version >= (2, 4, 1):
return True
elif self.spatial_version < (2, 4, 0):
return False
else:
# Spatialite 2.4.0-RC4 added AsGML and AsKML, however both
# RC2 (shipped in popular Debian/Ubuntu packages) and RC4
# report version as '2.4.0', so we fall back to feature detection
try:
self._get_spatialite_func("AsGML(GeomFromText('POINT(1 1)'))")
except DatabaseError:
return False
return True
@cached_property
def gml(self):
return 'AsGML' if self._version_greater_2_4_0_rc4 else None
@cached_property
def kml(self):
return 'AsKML' if self._version_greater_2_4_0_rc4 else None
@cached_property
def geojson(self):
return 'AsGeoJSON' if self.spatial_version >= (3, 0, 0) else None
def check_aggregate_support(self, aggregate):
"""
Checks if the given aggregate name is supported (that is, if it's
in `self.valid_aggregates`).
"""
agg_name = aggregate.__class__.__name__
return agg_name in self.valid_aggregates
def convert_geom(self, wkt, geo_field):
"""
Converts geometry WKT returned from a SpatiaLite aggregate.
"""
if wkt:
return Geometry(wkt, geo_field.srid)
else:
return None
def geo_db_type(self, f):
"""
Returns None because geometry columnas are added via the
`AddGeometryColumn` stored procedure on SpatiaLite.
"""
return None
def get_distance(self, f, value, lookup_type):
"""
Returns the distance parameters for the given geometry field,
lookup value, and lookup type. SpatiaLite only supports regular
cartesian-based queries (no spheroid/sphere calculations for point
geometries like PostGIS).
"""
if not value:
return []
value = value[0]
if isinstance(value, Distance):
if f.geodetic(self.connection):
raise ValueError('SpatiaLite does not support distance queries on '
'geometry fields with a geodetic coordinate system. '
'Distance objects; use a numeric value of your '
'distance in degrees instead.')
else:
dist_param = getattr(value, Distance.unit_attname(f.units_name(self.connection)))
else:
dist_param = value
return [dist_param]
def get_geom_placeholder(self, f, value):
"""
Provides a proper substitution value for Geometries that are not in the
SRID of the field. Specifically, this routine will substitute in the
Transform() and GeomFromText() function call(s).
"""
def transform_value(value, srid):
return not (value is None or value.srid == srid)
if hasattr(value, 'expression'):
if transform_value(value, f.srid):
placeholder = '%s(%%s, %s)' % (self.transform, f.srid)
else:
placeholder = '%s'
# No geometry value used for F expression, substitue in
# the column name instead.
return placeholder % self.get_expression_column(value)
else:
if transform_value(value, f.srid):
# Adding Transform() to the SQL placeholder.
return '%s(%s(%%s,%s), %s)' % (self.transform, self.from_text, value.srid, f.srid)
else:
return '%s(%%s,%s)' % (self.from_text, f.srid)
def _get_spatialite_func(self, func):
"""
Helper routine for calling SpatiaLite functions and returning
their result.
"""
cursor = self.connection._cursor()
try:
try:
cursor.execute('SELECT %s' % func)
row = cursor.fetchone()
except:
# Responsibility of caller to perform error handling.
raise
finally:
cursor.close()
return row[0]
def geos_version(self):
"Returns the version of GEOS used by SpatiaLite as a string."
return self._get_spatialite_func('geos_version()')
def proj4_version(self):
"Returns the version of the PROJ.4 library used by SpatiaLite."
return self._get_spatialite_func('proj4_version()')
def spatialite_version(self):
"Returns the SpatiaLite library version as a string."
return self._get_spatialite_func('spatialite_version()')
def spatialite_version_tuple(self):
"""
Returns the SpatiaLite version as a tuple (version string, major,
minor, subminor).
"""
# Getting the SpatiaLite version.
try:
version = self.spatialite_version()
except DatabaseError:
# The `spatialite_version` function first appeared in version 2.3.1
# of SpatiaLite, so doing a fallback test for 2.3.0 (which is
# used by popular Debian/Ubuntu packages).
version = None
try:
tmp = self._get_spatialite_func("X(GeomFromText('POINT(1 1)'))")
if tmp == 1.0: version = '2.3.0'
except DatabaseError:
pass
# If no version string defined, then just re-raise the original
# exception.
if version is None: raise
m = self.version_regex.match(version)
if m:
major = int(m.group('major'))
minor1 = int(m.group('minor1'))
minor2 = int(m.group('minor2'))
else:
raise Exception('Could not parse SpatiaLite version string: %s' % version)
return (version, major, minor1, minor2)
def spatial_aggregate_sql(self, agg):
"""
Returns the spatial aggregate SQL template and function for the
given Aggregate instance.
"""
agg_name = agg.__class__.__name__
if not self.check_aggregate_support(agg):
raise NotImplementedError('%s spatial aggregate is not implmented for this backend.' % agg_name)
agg_name = agg_name.lower()
if agg_name == 'union': agg_name += 'agg'
sql_template = self.select % '%(function)s(%(field)s)'
sql_function = getattr(self, agg_name)
return sql_template, sql_function
def spatial_lookup_sql(self, lvalue, lookup_type, value, field, qn):
"""
Returns the SpatiaLite-specific SQL for the given lookup value
[a tuple of (alias, column, db_type)], lookup type, lookup
value, the model field, and the quoting function.
"""
alias, col, db_type = lvalue
# Getting the quoted field as `geo_col`.
geo_col = '%s.%s' % (qn(alias), qn(col))
if lookup_type in self.geometry_functions:
# See if a SpatiaLite geometry function matches the lookup type.
tmp = self.geometry_functions[lookup_type]
# Lookup types that are tuples take tuple arguments, e.g., 'relate' and
# distance lookups.
if isinstance(tmp, tuple):
# First element of tuple is the SpatiaLiteOperation instance, and the
# second element is either the type or a tuple of acceptable types
# that may passed in as further parameters for the lookup type.
op, arg_type = tmp
# Ensuring that a tuple _value_ was passed in from the user
if not isinstance(value, (tuple, list)):
raise ValueError('Tuple required for `%s` lookup type.' % lookup_type)
# Geometry is first element of lookup tuple.
geom = value[0]
# Number of valid tuple parameters depends on the lookup type.
if len(value) != 2:
raise ValueError('Incorrect number of parameters given for `%s` lookup type.' % lookup_type)
# Ensuring the argument type matches what we expect.
if not isinstance(value[1], arg_type):
raise ValueError('Argument type should be %s, got %s instead.' % (arg_type, type(value[1])))
# For lookup type `relate`, the op instance is not yet created (has
# to be instantiated here to check the pattern parameter).
if lookup_type == 'relate':
op = op(value[1])
elif lookup_type in self.distance_functions:
op = op[0]
else:
op = tmp
geom = value
# Calling the `as_sql` function on the operation instance.
return op.as_sql(geo_col, self.get_geom_placeholder(field, geom))
elif lookup_type == 'isnull':
# Handling 'isnull' lookup type
return "%s IS %sNULL" % (geo_col, ('' if value else 'NOT ')), []
raise TypeError("Got invalid lookup_type: %s" % repr(lookup_type))
# Routines for getting the OGC-compliant models.
def geometry_columns(self):
from django.contrib.gis.db.backends.spatialite.models import GeometryColumns
return GeometryColumns
def spatial_ref_sys(self):
from django.contrib.gis.db.backends.spatialite.models import SpatialRefSys
return SpatialRefSys
|
DirkdeDraak/easybuild-easyblocks | refs/heads/master | easybuild/easyblocks/w/wrf.py | 10 | ##
# Copyright 2009-2015 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing WRF, implemented as an easyblock
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
"""
import fileinput
import os
import re
import sys
import easybuild.tools.environment as env
import easybuild.tools.toolchain as toolchain
from easybuild.easyblocks.netcdf import set_netcdf_env_vars # @UnresolvedImport
from easybuild.framework.easyblock import EasyBlock
from easybuild.framework.easyconfig import CUSTOM, MANDATORY
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.filetools import patch_perl_script_autoflush
from easybuild.tools.modules import get_software_root
from easybuild.tools.run import run_cmd, run_cmd_qa
class EB_WRF(EasyBlock):
"""Support for building/installing WRF."""
def __init__(self, *args, **kwargs):
"""Add extra config options specific to WRF."""
super(EB_WRF, self).__init__(*args, **kwargs)
self.build_in_installdir = True
self.wrfsubdir = None
self.comp_fam = None
@staticmethod
def extra_options():
extra_vars = {
'buildtype': [None, "Specify the type of build (serial, smpar (OpenMP), " \
"dmpar (MPI), dm+sm (hybrid OpenMP/MPI)).", MANDATORY],
'rewriteopts': [True, "Replace -O3 with CFLAGS/FFLAGS", CUSTOM],
'runtest': [True, "Build and run WRF tests", CUSTOM],
}
return EasyBlock.extra_options(extra_vars)
def configure_step(self):
"""Configure build:
- set some magic environment variables
- run configure script
- adjust configure.wrf file if needed
"""
# netCDF dependency
set_netcdf_env_vars(self.log)
# HDF5 (optional) dependency
hdf5 = get_software_root('HDF5')
if hdf5:
# check if this is parallel HDF5
phdf5_bins = ['h5pcc', 'ph5diff']
parallel_hdf5 = True
for f in phdf5_bins:
if not os.path.exists(os.path.join(hdf5, 'bin', f)):
parallel_hdf5 = False
break
if not (hdf5 or parallel_hdf5):
raise EasyBuildError("Parallel HDF5 module not loaded?")
else:
env.setvar('PHDF5', hdf5)
else:
self.log.info("HDF5 module not loaded, assuming that's OK...")
# JasPer dependency check + setting env vars
jasper = get_software_root('JasPer')
if jasper:
jasperlibdir = os.path.join(jasper, "lib")
env.setvar('JASPERINC', os.path.join(jasper, "include"))
env.setvar('JASPERLIB', jasperlibdir)
else:
if os.getenv('JASPERINC') or os.getenv('JASPERLIB'):
raise EasyBuildError("JasPer module not loaded, but JASPERINC and/or JASPERLIB still set?")
else:
self.log.info("JasPer module not loaded, assuming that's OK...")
# enable support for large file support in netCDF
env.setvar('WRFIO_NCD_LARGE_FILE_SUPPORT', '1')
# patch arch/Config_new.pl script, so that run_cmd_qa receives all output to answer questions
patch_perl_script_autoflush(os.path.join("arch", "Config_new.pl"))
# determine build type option to look for
build_type_option = None
self.comp_fam = self.toolchain.comp_family()
if self.comp_fam == toolchain.INTELCOMP: #@UndefinedVariable
build_type_option = "Linux x86_64 i486 i586 i686, ifort compiler with icc"
elif self.comp_fam == toolchain.GCC: #@UndefinedVariable
build_type_option = "x86_64 Linux, gfortran compiler with gcc"
else:
raise EasyBuildError("Don't know how to figure out build type to select.")
# fetch selected build type (and make sure it makes sense)
known_build_types = ['serial', 'smpar', 'dmpar', 'dm+sm']
self.parallel_build_types = ["dmpar", "smpar", "dm+sm"]
bt = self.cfg['buildtype']
if not bt in known_build_types:
raise EasyBuildError("Unknown build type: '%s'. Supported build types: %s", bt, known_build_types)
# fetch option number based on build type option and selected build type
build_type_question = "\s*(?P<nr>[0-9]+).\s*%s\s*\(%s\)" % (build_type_option, bt)
# run configure script
cmd = "./configure"
qa = {
# named group in match will be used to construct answer
"Compile for nesting? (1=basic, 2=preset moves, 3=vortex following) [default 1]:": "1",
"Compile for nesting? (0=no nesting, 1=basic, 2=preset moves, 3=vortex following) [default 0]:": "0"
}
no_qa = [
"testing for fseeko and fseeko64",
r"If you wish to change the default options, edit the file:[\s\n]*arch/configure_new.defaults"
]
std_qa = {
# named group in match will be used to construct answer
r"%s.*\n(.*\n)*Enter selection\s*\[[0-9]+-[0-9]+\]\s*:" % build_type_question: "%(nr)s",
}
run_cmd_qa(cmd, qa, no_qa=no_qa, std_qa=std_qa, log_all=True, simple=True)
cfgfile = 'configure.wrf'
# make sure correct compilers are being used
comps = {
'SCC': os.getenv('CC'),
'SFC': os.getenv('F90'),
'CCOMP': os.getenv('CC'),
'DM_FC': os.getenv('MPIF90'),
'DM_CC': "%s -DMPI2_SUPPORT" % os.getenv('MPICC'),
}
for line in fileinput.input(cfgfile, inplace=1, backup='.orig.comps'):
for (k, v) in comps.items():
line = re.sub(r"^(%s\s*=\s*).*$" % k, r"\1 %s" % v, line)
sys.stdout.write(line)
# rewrite optimization options if desired
if self.cfg['rewriteopts']:
# replace default -O3 option in configure.wrf with CFLAGS/FFLAGS from environment
self.log.info("Rewriting optimization options in %s" % cfgfile)
# set extra flags for Intel compilers
# see http://software.intel.com/en-us/forums/showthread.php?t=72109&p=1#146748
if self.comp_fam == toolchain.INTELCOMP: #@UndefinedVariable
# -O3 -heap-arrays is required to resolve compilation error
for envvar in ['CFLAGS', 'FFLAGS']:
val = os.getenv(envvar)
if '-O3' in val:
env.setvar(envvar, '%s -heap-arrays' % val)
self.log.info("Updated %s to '%s'" % (envvar, os.getenv(envvar)))
# replace -O3 with desired optimization options
for line in fileinput.input(cfgfile, inplace=1, backup='.orig.rewriteopts'):
line = re.sub(r"^(FCOPTIM.*)(\s-O3)(\s.*)$", r"\1 %s \3" % os.getenv('FFLAGS'), line)
line = re.sub(r"^(CFLAGS_LOCAL.*)(\s-O3)(\s.*)$", r"\1 %s \3" % os.getenv('CFLAGS'), line)
sys.stdout.write(line)
def build_step(self):
"""Build and install WRF and testcases using provided compile script."""
# enable parallel build
p = self.cfg['parallel']
self.par = ""
if p:
self.par = "-j %s" % p
# build wrf (compile script uses /bin/csh )
cmd = "tcsh ./compile %s wrf" % self.par
run_cmd(cmd, log_all=True, simple=True, log_output=True)
# build two testcases to produce ideal.exe and real.exe
for test in ["em_real", "em_b_wave"]:
cmd = "tcsh ./compile %s %s" % (self.par, test)
run_cmd(cmd, log_all=True, simple=True, log_output=True)
def test_step(self):
"""Build and run tests included in the WRF distribution."""
if self.cfg['runtest']:
# get list of WRF test cases
self.testcases = []
try:
self.testcases = os.listdir('test')
except OSError, err:
raise EasyBuildError("Failed to determine list of test cases: %s", err)
# exclude 2d testcases in non-parallel WRF builds
if self.cfg['buildtype'] in self.parallel_build_types:
self.testcases = [test for test in self.testcases if not "2d_" in test]
# exclude real testcases
self.testcases = [test for test in self.testcases if not test.endswith("_real")]
self.log.debug("intermediate list of testcases: %s" % self.testcases)
# exclude tests that should not be run
for test in ["em_esmf_exp", "em_scm_xy", "nmm_tropical_cyclone"]:
if test in self.testcases:
self.testcases.remove(test)
# some tests hang when WRF is built with Intel compilers
if self.comp_fam == toolchain.INTELCOMP: #@UndefinedVariable
for test in ["em_heldsuarez"]:
if test in self.testcases:
self.testcases.remove(test)
# determine parallel setting (1/2 of available processors + 1)
n = self.cfg['parallel'] / 2 + 1
# prepare run command
# stack limit needs to be set to unlimited for WRF to work well
if self.cfg['buildtype'] in self.parallel_build_types:
test_cmd = "ulimit -s unlimited && %s && %s" % (self.toolchain.mpi_cmd_for("./ideal.exe", 1),
self.toolchain.mpi_cmd_for("./wrf.exe", n))
else:
test_cmd = "ulimit -s unlimited && ./ideal.exe && ./wrf.exe" % n
def run_test():
"""Run a single test and check for success."""
# regex to check for successful test run
re_success = re.compile("SUCCESS COMPLETE WRF")
# run test
run_cmd(test_cmd, log_all=True, simple=True)
# check for success
fn = "rsl.error.0000"
try:
f = open(fn, "r")
txt = f.read()
f.close()
except IOError, err:
raise EasyBuildError("Failed to read output file %s: %s", fn, err)
if re_success.search(txt):
self.log.info("Test %s ran successfully." % test)
else:
raise EasyBuildError("Test %s failed, pattern '%s' not found.", test, re_success.pattern)
# clean up stuff that gets in the way
fn_prefs = ["wrfinput_", "namelist.output", "wrfout_", "rsl.out.", "rsl.error."]
for f in os.listdir('.'):
for p in fn_prefs:
if f.startswith(p):
os.remove(f)
self.log.debug("Cleaned up file %s." % f)
# build an run each test case individually
for test in self.testcases:
self.log.debug("Building and running test %s" % test)
#build_and_install
cmd = "tcsh ./compile %s %s" % (self.par, test)
run_cmd(cmd, log_all=True, simple=True)
# run test
try:
os.chdir('run')
if test in ["em_fire"]:
# handle tests with subtests seperately
testdir = os.path.join("..", "test", test)
for subtest in [x for x in os.listdir(testdir) if os.path.isdir(x)]:
subtestdir = os.path.join(testdir, subtest)
# link required files
for f in os.listdir(subtestdir):
if os.path.exists(f):
os.remove(f)
os.symlink(os.path.join(subtestdir, f), f)
# run test
run_test()
else:
# run test
run_test()
os.chdir('..')
except OSError, err:
raise EasyBuildError("An error occured when running test %s: %s", test, err)
# building/installing is done in build_step, so we can run tests
def install_step(self):
"""Building was done in install dir, so nothing to do in install_step."""
pass
def sanity_check_step(self):
"""Custom sanity check for WRF."""
mainver = self.version.split('.')[0]
self.wrfsubdir = "WRFV%s" % mainver
fs = ["libwrflib.a", "wrf.exe", "ideal.exe", "real.exe", "ndown.exe", "nup.exe", "tc.exe"]
ds = ["main", "run"]
custom_paths = {
'files': [os.path.join(self.wrfsubdir, "main", x) for x in fs],
'dirs': [os.path.join(self.wrfsubdir, x) for x in ds]
}
super(EB_WRF, self).sanity_check_step(custom_paths=custom_paths)
def make_module_req_guess(self):
mainver = self.version.split('.')[0]
self.wrfsubdir = "WRFV%s"%mainver
maindir = os.path.join(self.wrfsubdir, "main")
return {
'PATH': [maindir],
'LD_LIBRARY_PATH': [maindir],
'MANPATH': [],
}
def make_module_extra(self):
"""Add netCDF environment variables to module file."""
txt = super(EB_WRF, self).make_module_extra()
txt += self.module_generator.set_environment('NETCDF', os.getenv('NETCDF'))
if os.getenv('NETCDFF', None) is not None:
txt += self.module_generator.set_environment('NETCDFF', os.getenv('NETCDFF'))
return txt
|
7agner/Python3-Curso-em-Video | refs/heads/master | Mundo 01 Fundamentos/Aulas e Desafios/Aula 09/Codigo 05 - Print de Texto Longo.py | 1 | """
Usando aspas triplas dentro dos parênteses do comando "print", ao invés do texto se tornar meramamente
um comentário ignorando pelo Python, como este, é possível exibir um texto inteiro com várias linhas,
sem precisar repetir o "print" em cada uma delas.
"""
print("""Lorem ipsum dolor sit amet, consectetur adipiscing elit.
Cras aliquam sapien non augue pharetra finibus. Integer ac justo quam.
Pellentesque at eros vel tortor pharetra mollis sit amet convallis sem.
Cras vel lobortis metus. Donec metus magna, fermentum eget dolor at, egestas dapibus sem.
Etiam varius, enim ut tincidunt tristique, dui risus tristique quam, id iaculis ipsum sem nec elit.
Nunc iaculis sit amet diam id lobortis. Nam egestas congue lectus vitae maximus.""")
|
zasdfgbnm/tensorflow | refs/heads/master | tensorflow/tools/quantization/quantize_graph_test.py | 64 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests the graph quantization script.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
from tensorflow.core.framework import graph_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops as ops_lib
from tensorflow.python.platform import flags as flags_lib
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
from tensorflow.tools.quantization import quantize_graph
flags = flags_lib
FLAGS = flags.FLAGS
def run_graph_def(graph_def, input_map, outputs):
graph = ops_lib.Graph()
with graph.as_default():
importer.import_graph_def(graph_def, input_map={}, name="")
with session.Session(graph=graph) as sess:
results = sess.run(outputs, feed_dict=input_map)
return results
def test_mat_mul(m, n, k, a, b):
"""Tests a MatMul replacement."""
a_constant_name = "a_constant"
b_constant_name = "b_constant"
mat_mul_name = "mat_mul"
float_graph_def = graph_pb2.GraphDef()
a_constant = quantize_graph.create_constant_node(
a_constant_name, value=a, dtype=dtypes.float32, shape=[m, k])
float_graph_def.node.extend([a_constant])
b_constant = quantize_graph.create_constant_node(
b_constant_name, value=b, dtype=dtypes.float32, shape=[k, n])
float_graph_def.node.extend([b_constant])
mat_mul_node = quantize_graph.create_node("MatMul", mat_mul_name,
[a_constant_name, b_constant_name])
quantize_graph.set_attr_dtype(mat_mul_node, "T", dtypes.float32)
quantize_graph.set_attr_bool(mat_mul_node, "transpose_a", False)
quantize_graph.set_attr_bool(mat_mul_node, "transpose_b", False)
float_graph_def.node.extend([mat_mul_node])
test_graph(float_graph_def, {}, [mat_mul_name])
def test_conv(depth, image_width, image_height, image_batch_count, filter_size,
filter_count, stride, padding, input_values, filter_values):
"""Tests a Conv replacement."""
input_constant_name = "input_constant"
filter_constant_name = "filter_constant"
conv_name = "conv"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=input_values,
dtype=dtypes.float32,
shape=[image_batch_count, image_height, image_width, depth])
float_graph_def.node.extend([input_constant])
filter_constant = quantize_graph.create_constant_node(
filter_constant_name,
value=filter_values,
dtype=dtypes.float32,
shape=[filter_size, filter_size, depth, filter_count])
float_graph_def.node.extend([filter_constant])
conv_node = quantize_graph.create_node(
"Conv2D", conv_name, [input_constant_name, filter_constant_name])
quantize_graph.set_attr_dtype(conv_node, "T", dtypes.float32)
quantize_graph.set_attr_int_list(conv_node, "strides", [1, stride, stride, 1])
quantize_graph.set_attr_string(conv_node, "padding", padding)
float_graph_def.node.extend([conv_node])
test_graph(float_graph_def, {}, [conv_name])
def are_tensors_near(a, b, tolerance):
"""Tests whether two tensors are nearly identical.
This is a specialized comparison function designed to help debug problems with
quantization. It prints out information about the differences between tensors
on failure, paying special attention to possible biases by looking at the mean
and absolute average errors.
Args:
a: First comparison tensor.
b: Second comparison tensor.
tolerance: Float value indicating how large an error between values is ok.
Returns:
Boolean indicating whether the two inputs were close enough.
"""
flat_a = a.flatten()
flat_b = b.flatten()
if len(flat_a) != len(flat_b):
print("Tensors are different sizes: " + str(len(flat_a)) + " vs " + str(
len(flat_b)))
return False
value_count = len(flat_a)
how_many_different = 0
total_difference = 0
total_abs_difference = 0
for index in range(value_count):
a_value = flat_a[index]
b_value = flat_b[index]
difference = a_value - b_value
total_difference += difference
total_abs_difference += abs(difference)
if abs(difference) > tolerance:
how_many_different += 1
mean_difference = total_difference / value_count
mean_abs_difference = total_abs_difference / value_count
proportion_different = (how_many_different * 1.0) / value_count
if how_many_different == 0:
return True
else:
print("Tensors have {0} different values ({1}%), with mean difference"
" {2} and mean absolute difference {3}".format(
how_many_different, proportion_different * 100, mean_difference,
mean_abs_difference))
return False
def get_top_value(input_values):
max_value = None
max_index = None
for index, value in enumerate(input_values.flatten()):
if max_value is None or value > max:
max_value = value
max_index = index
return max_index, max_value
def test_graph(float_graph_def, input_map, output_names, log_graph=False):
"""Runs the float graph through the rewriter and tests the results."""
float_results = run_graph_def(
float_graph_def, input_map,
[output_name + ":0" for output_name in output_names])
# TODO(petewarden): round test is currently failing because there is no
# RoundToSteps op available.
# round_rewriter = quantize_graph.GraphRewriter(float_graph_def, "round")
# round_graph_def = round_rewriter.rewrite(output_name)
# round_results = run_graph_def(round_graph_def, input_map,
# [output_name + ":0"])
# assert are_tensors_near(expected, round_results[0], 1.0)
#
# TODO(petewarden): Add test for "quantize" mode.
eightbit_rewriter = quantize_graph.GraphRewriter(
float_graph_def, "eightbit", quantized_input_range=None)
eightbit_graph_def = eightbit_rewriter.rewrite(output_names)
eightbit_results = run_graph_def(
eightbit_graph_def, input_map,
[output_name + ":0" for output_name in output_names])
for expected, result in zip(float_results, eightbit_results):
assert are_tensors_near(expected, result, 1.0)
if log_graph:
tf_logging.info("8bit:\n%s", str(eightbit_graph_def))
# Test the weights_rounded mode. This uses the default bit_depth.
weights_rounded_rewriter = quantize_graph.GraphRewriter(
float_graph_def, "weights_rounded", quantized_input_range=None)
weights_rounded_graph_def = weights_rounded_rewriter.rewrite(output_names)
weights_rounded_results = run_graph_def(
weights_rounded_graph_def, input_map,
[output_name + ":0" for output_name in output_names])
for expected, result in zip(float_results, weights_rounded_results):
assert are_tensors_near(expected, result, 1.0)
class QuantizeGraphTest(test.TestCase):
def test_negative_const_problem(self):
shape_constant_name = "shape_constant"
shape_constant = quantize_graph.create_constant_node(
shape_constant_name, value=-0.8, dtype=dtypes.float32, shape=[1])
quantization_result = quantize_graph.quantize_weight_eightbit(
shape_constant, b"MIN_COMBINED")
self.assertEqual(4, len(quantization_result))
def test_odd_padding_problem(self):
"""Tests one error case we ran into in a real graph."""
test_conv(1, 4, 4, 1, 3, 1, 2, b"SAME",
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
[1, 2, 3, 4, 5, 6, 7, 8, 9])
def test_mat_mul_tiny(self):
# These tests are added to test the generate case where
# min(matrix) == max(matrix), which used to cause problems.
test_mat_mul(1, 1, 1, [2], [3])
test_mat_mul(1, 2, 1, [1], [2, 3])
test_mat_mul(1, 1, 2, [1, 1], [1, 1])
test_mat_mul(1, 1, 2, [0, 0], [1, 1])
# The general case.
test_mat_mul(1, 1, 2, [1, 2], [1, 2])
def test_mat_mul_small(self):
test_mat_mul(2, 4, 3, [1, 2, 3, 4, 5, 6],
[7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18])
def test_conv(self):
test_conv(1, 4, 3, 1, 3, 1, 1, b"SAME",
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
[1, 4, 7, 2, 5, 8, 3, 6, 9])
def test_reshape(self):
"""Tests that MatMul->Reshape->MatMul avoids extra quantize/dequantize."""
def make_matmul(name, a, b):
n = quantize_graph.create_node("MatMul", name, [a.name, b.name])
quantize_graph.set_attr_dtype(n, "T", dtypes.float32)
quantize_graph.set_attr_bool(n, "transpose_a", False)
quantize_graph.set_attr_bool(n, "transpose_b", False)
return n
# matmul_1 = input*weight_1
input_node = quantize_graph.create_constant_node(
"input", value=[0, 1, 2, 3], dtype=dtypes.float32, shape=[4, 1])
weight_1_node = quantize_graph.create_constant_node(
"weight_1",
value=[.5, .6, .7, .8, .9],
dtype=dtypes.float32,
shape=[1, 5])
matmul_1_node = make_matmul("matmul_1", input_node, weight_1_node)
# Reshape 4x5 to 10x2.
new_shape_node = quantize_graph.create_constant_node(
"new_shape_node", value=[10, 2], dtype=dtypes.int32, shape=[2])
reshape_node = quantize_graph.create_node(
"Reshape", "reshape", [matmul_1_node.name, new_shape_node.name])
quantize_graph.set_attr_dtype(reshape_node, "T", dtypes.float32)
# matmul_2_node = reshape*weight_2
weight_2_node = quantize_graph.create_constant_node(
"weight_2", value=[1.5, 2.5], dtype=dtypes.float32, shape=[2, 1])
matmul_2_node = make_matmul("matmul_2", reshape_node, weight_2_node)
g = graph_pb2.GraphDef()
g.node.extend([
input_node, weight_1_node, matmul_1_node, new_shape_node, reshape_node,
weight_2_node, matmul_2_node
])
# Test the graph
test_graph(g, {}, ["matmul_2"])
# Verify there is only one Quantize and one Requantize op.
eightbit_rewriter = quantize_graph.GraphRewriter(
g, "eightbit", quantized_input_range=None)
eightbit_graph_def = eightbit_rewriter.rewrite(["matmul_2"])
ops = [node.op for node in eightbit_graph_def.node]
# No quantize since all inputs are const and can be quantized up-front.
self.assertEqual(0, ops.count("QuantizeV2") + ops.count("Quantize"))
self.assertEqual(1, ops.count("QuantizedReshape"))
# One dequantize at the end.
self.assertEqual(1, ops.count("Dequantize"))
def test_quantize_array(self):
# Test invalid parameters (empty array, or 0 buckets.
self.assertRaises(ValueError, quantize_graph.quantize_array, np.array([]),
2)
self.assertRaises(ValueError, quantize_graph.quantize_array,
np.array([1, 2]), 0)
# Test input array of length 1.
arr = np.array([1])
qarr = quantize_graph.quantize_array(arr, 1)
self.assertEqual(arr, qarr)
qarr = quantize_graph.quantize_array(arr, 2)
self.assertEqual(arr, qarr)
# Test input array with all elements equal.
arr = np.array([1, 1, 1])
qarr = quantize_graph.quantize_array(arr, 10)
self.assertTrue((np.array([1, 1, 1]) == qarr).all())
# Test "normal" input arrays.
arr = np.array([0, 0.3, 0.6, 1])
qarr = quantize_graph.quantize_array(arr, 1)
self.assertTrue((np.array([0.5, 0.5, 0.5, 0.5]) == qarr).all())
qarr = quantize_graph.quantize_array(arr, 2)
self.assertTrue((np.array([0.25, 0.25, 0.75, 0.75]) == qarr).all())
qarr = quantize_graph.quantize_array(arr.reshape((2, 2)), 2)
self.assertTrue((np.array([[0.25, 0.25], [0.75, 0.75]]) == qarr).all())
def test_non_float_concat(self):
concat_dim = quantize_graph.create_constant_node(
"concat_dim", value=0, dtype=dtypes.int32, shape=[])
a = quantize_graph.create_constant_node(
"a",
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.int32,
shape=[2, 2, 3])
b = quantize_graph.create_constant_node(
"b",
value=[13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24],
dtype=dtypes.int32,
shape=[2, 2, 3])
concat = quantize_graph.create_node("Concat", "concat",
[concat_dim.name, a.name, b.name])
quantize_graph.set_attr_int(concat, "N", 2)
quantize_graph.set_attr_dtype(concat, "T", dtypes.int32)
g = graph_pb2.GraphDef()
g.node.extend([concat_dim, a, b, concat])
test_graph(g, {}, [concat.name])
def test_non_float_reshape(self):
a = quantize_graph.create_constant_node(
"a",
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.int32,
shape=[2, 2, 3])
shape = quantize_graph.create_constant_node(
"shape", value=[12], dtype=dtypes.int32, shape=[1])
reshape = quantize_graph.create_node("Reshape", "reshape",
[a.name, shape.name])
quantize_graph.set_attr_dtype(reshape, "T", dtypes.int32)
g = graph_pb2.GraphDef()
g.node.extend([a, shape, reshape])
test_graph(g, {}, [reshape.name])
def test_concat(self):
shape_constant_name = "shape_constant"
a_constant_name = "a_constant"
b_constant_name = "b_constant"
concat_name = "concat"
float_graph_def = graph_pb2.GraphDef()
shape_constant = quantize_graph.create_constant_node(
shape_constant_name, value=0, dtype=dtypes.int32, shape=[])
float_graph_def.node.extend([shape_constant])
a_constant = quantize_graph.create_constant_node(
a_constant_name,
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[2, 2, 3])
float_graph_def.node.extend([a_constant])
b_constant = quantize_graph.create_constant_node(
b_constant_name,
value=[13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24],
dtype=dtypes.float32,
shape=[2, 2, 3])
float_graph_def.node.extend([b_constant])
concat_node = quantize_graph.create_node(
"Concat", concat_name,
[shape_constant_name, a_constant_name, b_constant_name])
quantize_graph.set_attr_int(concat_node, "N", 2)
quantize_graph.set_attr_dtype(concat_node, "T", dtypes.float32)
float_graph_def.node.extend([concat_node])
test_graph(float_graph_def, {}, [concat_name])
# Verify the concat is quantized.
eightbit_rewriter = quantize_graph.GraphRewriter(
float_graph_def, "eightbit", quantized_input_range=None)
eightbit_graph_def = eightbit_rewriter.rewrite([concat_name])
ops = [node.op for node in eightbit_graph_def.node]
self.assertEqual(1, ops.count("QuantizedConcat"))
def test_multiple_outputs(self):
input_constant_name = "input_constant"
split_constant_name = "split_constant"
split_name = "split"
concat_constant_name = "concat_constant"
concat_name = "concat"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[2, 6])
float_graph_def.node.extend([input_constant])
split_constant = quantize_graph.create_constant_node(
split_constant_name, value=1, dtype=dtypes.int32, shape=[])
float_graph_def.node.extend([split_constant])
split_node = quantize_graph.create_node(
"Split", split_name, [split_constant_name, input_constant_name])
quantize_graph.set_attr_int(split_node, "num_split", 2)
quantize_graph.set_attr_dtype(split_node, "T", dtypes.float32)
float_graph_def.node.extend([split_node])
concat_constant = quantize_graph.create_constant_node(
concat_constant_name, value=1, dtype=dtypes.int32, shape=[])
float_graph_def.node.extend([concat_constant])
concat_node = quantize_graph.create_node(
"Concat", concat_name,
[concat_constant_name, split_name + ":0", split_name + ":1"])
quantize_graph.set_attr_int(concat_node, "N", 2)
quantize_graph.set_attr_dtype(concat_node, "T", dtypes.float32)
float_graph_def.node.extend([concat_node])
test_graph(float_graph_def, {}, [concat_name])
def test_node_name_from_input(self):
self.assertEqual("SomeName",
quantize_graph.node_name_from_input("^SomeName:2"))
def test_unique_node_name_from_input(self):
self.assertEqual("__hat__SomeName__port__2",
quantize_graph.unique_node_name_from_input("^SomeName:2"))
def test_identity(self):
input_constant_name = "input_constant"
identity_name = "identity"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[2, 6])
float_graph_def.node.extend([input_constant])
identity_node = quantize_graph.create_node("Identity", identity_name,
[input_constant_name])
quantize_graph.set_attr_dtype(identity_node, "T", dtypes.float32)
float_graph_def.node.extend([identity_node])
mul_name = "mul"
mul_node = quantize_graph.create_node("Mul", mul_name,
[identity_name, identity_name])
quantize_graph.set_attr_dtype(mul_node, "T", dtypes.float32)
float_graph_def.node.extend([mul_node])
test_graph(float_graph_def, {}, [mul_name])
def test_keep_control_edges(self):
no_op_name = "no_op"
a_constant_name = "a_constant"
b_constant_name = "b_constant"
a_check_name = "a_check"
b_check_name = "b_check"
a_identity_name = "a_identity"
b_identity_name = "b_identity"
add_name = "add"
graph_def = graph_pb2.GraphDef()
no_op = quantize_graph.create_node("NoOp", no_op_name, [])
graph_def.node.extend([no_op])
a_constant = quantize_graph.create_constant_node(
a_constant_name, value=1, dtype=dtypes.float32, shape=[])
graph_def.node.extend([a_constant])
a_check_node = quantize_graph.create_node("CheckNumerics", a_check_name,
[a_constant_name])
graph_def.node.extend([a_check_node])
a_identity_node = quantize_graph.create_node(
"Identity", a_identity_name,
[a_constant_name, "^" + a_check_name, "^" + no_op_name])
graph_def.node.extend([a_identity_node])
b_constant = quantize_graph.create_constant_node(
b_constant_name, value=1, dtype=dtypes.float32, shape=[])
graph_def.node.extend([b_constant])
b_check_node = quantize_graph.create_node("CheckNumerics", b_check_name,
[b_constant_name])
graph_def.node.extend([b_check_node])
b_identity_node = quantize_graph.create_node(
"Identity", b_identity_name, [b_constant_name, "^" + b_check_name])
graph_def.node.extend([b_identity_node])
add_node = quantize_graph.create_node("Add", add_name,
[a_identity_name, b_identity_name])
quantize_graph.set_attr_dtype(add_node, "T", dtypes.float32)
graph_def.node.extend([add_node])
expected_output = graph_pb2.GraphDef()
no_op = quantize_graph.create_node("NoOp", no_op_name, [])
expected_output.node.extend([no_op])
a_constant = quantize_graph.create_constant_node(
a_constant_name, value=1, dtype=dtypes.float32, shape=[])
expected_output.node.extend([a_constant])
a_identity_node = quantize_graph.create_node(
"Identity", a_identity_name, [a_constant_name, "^" + no_op_name])
expected_output.node.extend([a_identity_node])
b_constant = quantize_graph.create_constant_node(
b_constant_name, value=1, dtype=dtypes.float32, shape=[])
expected_output.node.extend([b_constant])
add_node = quantize_graph.create_node("Add", add_name,
[a_identity_name, b_constant_name])
quantize_graph.set_attr_dtype(add_node, "T", dtypes.float32)
expected_output.node.extend([add_node])
expected_output.versions.CopyFrom(graph_def.versions)
expected_output.library.CopyFrom(graph_def.library)
output = graph_util.remove_training_nodes(graph_def)
stripped_output = graph_util.extract_sub_graph(output, [add_name])
self.assertProtoEquals(expected_output, stripped_output)
def test_batch_norm(self):
input_constant_name = "input_constant"
mean_constant_name = "mean_constant"
variance_constant_name = "variance_constant"
beta_constant_name = "beta_constant"
gamma_constant_name = "gamma_constant"
batch_norm_name = "batch_norm"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=[1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6],
dtype=dtypes.float32,
shape=[1, 1, 6, 2])
float_graph_def.node.extend([input_constant])
mean_constant = quantize_graph.create_constant_node(
mean_constant_name, value=[10, 20], dtype=dtypes.float32, shape=[2])
float_graph_def.node.extend([mean_constant])
variance_constant = quantize_graph.create_constant_node(
variance_constant_name,
value=[0.25, 0.5],
dtype=dtypes.float32,
shape=[2])
float_graph_def.node.extend([variance_constant])
beta_constant = quantize_graph.create_constant_node(
beta_constant_name, value=[0.1, 0.6], dtype=dtypes.float32, shape=[2])
float_graph_def.node.extend([beta_constant])
gamma_constant = quantize_graph.create_constant_node(
gamma_constant_name, value=[0, 0], dtype=dtypes.float32, shape=[2])
float_graph_def.node.extend([gamma_constant])
batch_norm_node = quantize_graph.create_node(
"BatchNormWithGlobalNormalization", batch_norm_name, [
input_constant_name, mean_constant_name, variance_constant_name,
beta_constant_name, gamma_constant_name
])
quantize_graph.set_attr_dtype(batch_norm_node, "T", dtypes.float32)
quantize_graph.set_attr_bool(batch_norm_node, "scale_after_normalization",
False)
quantize_graph.set_attr_float(batch_norm_node, "variance_epsilon", 0.001)
float_graph_def.node.extend([batch_norm_node])
test_graph(float_graph_def, {}, [batch_norm_name])
def test_max_pool(self):
input_constant_name = "input_constant"
max_pool_name = "max_pool"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[1, 2, 6, 1])
float_graph_def.node.extend([input_constant])
max_pool_node = quantize_graph.create_node("MaxPool", max_pool_name,
[input_constant_name])
quantize_graph.set_attr_int_list(max_pool_node, "ksize", [1, 2, 2, 1])
quantize_graph.set_attr_int_list(max_pool_node, "strides", [1, 1, 1, 1])
quantize_graph.set_attr_string(max_pool_node, "padding", b"SAME")
float_graph_def.node.extend([max_pool_node])
test_graph(float_graph_def, {}, [max_pool_name])
def test_avg_pool(self):
input_constant_name = "input_constant"
avg_pool_name = "avg_pool"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[1, 2, 6, 1])
float_graph_def.node.extend([input_constant])
avg_pool_node = quantize_graph.create_node("AvgPool", avg_pool_name,
[input_constant_name])
quantize_graph.set_attr_dtype(avg_pool_node, "T", dtypes.float32)
quantize_graph.set_attr_int_list(avg_pool_node, "ksize", [1, 2, 2, 1])
quantize_graph.set_attr_int_list(avg_pool_node, "strides", [1, 1, 1, 1])
quantize_graph.set_attr_string(avg_pool_node, "padding", b"SAME")
float_graph_def.node.extend([avg_pool_node])
test_graph(float_graph_def, {}, [avg_pool_name])
def test_relu(self):
input_constant_name = "input_constant"
relu_name = "relu"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[1, 2, 6, 1])
float_graph_def.node.extend([input_constant])
relu_node = quantize_graph.create_node("Relu", relu_name,
[input_constant_name])
quantize_graph.set_attr_dtype(relu_node, "T", dtypes.float32)
float_graph_def.node.extend([relu_node])
test_graph(float_graph_def, {}, [relu_name])
def test_relu_w_fake_quant_w_min_max_vars(self):
input_node = quantize_graph.create_constant_node(
"input",
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[1, 2, 6, 1])
relu_node = quantize_graph.create_node("Relu", "relu", [input_node.name])
quantize_graph.set_attr_dtype(relu_node, "T", dtypes.float32)
min_node = quantize_graph.create_constant_node(
"min_bias_add", value=0, dtype=dtypes.float32, shape=[])
max_node = quantize_graph.create_constant_node(
"max_bias_add", value=12, dtype=dtypes.float32, shape=[])
fake_quant_node = quantize_graph.create_node(
"FakeQuantWithMinMaxVars", "fake_quant",
[relu_node.name, min_node.name, max_node.name])
float_graph_def = graph_pb2.GraphDef()
float_graph_def.node.extend(
[input_node, relu_node, min_node, max_node, fake_quant_node])
test_graph(float_graph_def, {}, [fake_quant_node.name], log_graph=True)
# Verify there is only one Quantize and one Requantize op.
eightbit_rewriter = quantize_graph.GraphRewriter(
float_graph_def, "eightbit", quantized_input_range=None)
eightbit_graph_def = eightbit_rewriter.rewrite([fake_quant_node.name])
ops = [node.op for node in eightbit_graph_def.node]
# No quantize since all inputs are const and can be quantized up-front.
self.assertEqual(0, ops.count("QuantizeV2") + ops.count("Quantize"))
# One dequantize at the end.
self.assertEqual(1, ops.count("Dequantize"))
def test_relu6(self):
input_constant_name = "input_constant"
relu6_name = "relu6"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[1, 2, 6, 1])
float_graph_def.node.extend([input_constant])
relu6_node = quantize_graph.create_node("Relu6", relu6_name,
[input_constant_name])
quantize_graph.set_attr_dtype(relu6_node, "T", dtypes.float32)
float_graph_def.node.extend([relu6_node])
test_graph(float_graph_def, {}, [relu6_name])
def test_bias_add(self):
input_constant_name = "input_constant"
offset_constant_name = "offset_constant"
bias_add_name = "bias_add"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[1, 1, 2, 6])
float_graph_def.node.extend([input_constant])
offset_constant = quantize_graph.create_constant_node(
offset_constant_name,
value=[1, 2, 3, 4, 5, 6],
dtype=dtypes.float32,
shape=[6])
float_graph_def.node.extend([offset_constant])
bias_add_node = quantize_graph.create_node(
"BiasAdd", bias_add_name, [input_constant_name, offset_constant_name])
quantize_graph.set_attr_dtype(bias_add_node, "T", dtypes.float32)
float_graph_def.node.extend([bias_add_node])
test_graph(float_graph_def, {}, [bias_add_name])
def test_quantized_input_range_errors(self):
with self.assertRaises(ValueError):
# Invalid mode.
quantize_graph.GraphRewriter(graph_pb2.GraphDef(), "weights_rounded",
[0, 1])
with self.assertRaises(ValueError):
# Invalid range.
quantize_graph.GraphRewriter(graph_pb2.GraphDef(), "eightbit", [0, -1])
def test_quantized_input_range_bias_add(self):
input_shape = [1, 1, 2, 6]
input_n = quantize_graph.create_node("Placeholder", "input", [])
quantize_graph.set_attr_dtype(input_n, "dtype", dtypes.float32)
quantize_graph.set_attr_shape(input_n, "shape", input_shape)
offset_n = quantize_graph.create_constant_node(
"offset", value=[1, 2, 3, 4, 5, 6], dtype=dtypes.float32, shape=[6])
bias_add_n = quantize_graph.create_node("BiasAdd", "bias_add",
[input_n.name, offset_n.name])
quantize_graph.set_attr_dtype(bias_add_n, "T", dtypes.float32)
float_graph_def = graph_pb2.GraphDef()
float_graph_def.node.extend([input_n, offset_n, bias_add_n])
input_map = {
input_n.name + ":0":
np.reshape([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], input_shape)
}
self._RunTestsForQuantizedInputRange(float_graph_def, input_map,
[bias_add_n.name], [-1, 20.])
self._RunTestsForQuantizedInputRange(float_graph_def, input_map,
[bias_add_n.name], [0, 12.])
def test_quantized_input_range_mat_mul(self):
shapes = [[3, 2], [2, 4]]
inputs = []
for i, shape in enumerate(shapes):
node = quantize_graph.create_node("Placeholder", "input_%s" % i, [])
quantize_graph.set_attr_dtype(node, "dtype", dtypes.float32)
quantize_graph.set_attr_shape(node, "shape", shape)
inputs.append(node)
mat_mul_node = quantize_graph.create_node("MatMul", "mat_mul",
[n.name for n in inputs])
quantize_graph.set_attr_dtype(mat_mul_node, "T", dtypes.float32)
float_graph_def = graph_pb2.GraphDef()
float_graph_def.node.extend(inputs + [mat_mul_node])
input_map = {
inputs[0].name + ":0":
np.reshape([1, 2, 3, 4, 5, 6], shapes[0]),
inputs[1].name + ":0":
np.reshape([.8, .7, .6, .5, .4, .3, .2, .1], shapes[1])
}
self._RunTestsForQuantizedInputRange(float_graph_def, input_map,
[mat_mul_node.name], [-1, 20.])
self._RunTestsForQuantizedInputRange(float_graph_def, input_map,
[mat_mul_node.name], [0, 6.])
def _RunTestsForQuantizedInputRange(self, float_graph_def, input_map,
output_names, input_range):
if sys.version_info[0] == 3:
# uint8->quint8 conversion for numpy is not working currently.
return
quantized_input_map = {}
for k, v in input_map.items():
arr = [
int(
round((n - input_range[0]) * 255 / (input_range[1] - input_range[
0]))) for n in v.flat
]
arr = np.array(arr, np.uint8)
arr = arr.reshape(v.shape)
arr = arr.astype(dtypes.quint8.as_numpy_dtype)
quantized_input_map[k] = arr
output_tensors = [output_name + ":0" for output_name in output_names]
float_results = run_graph_def(float_graph_def, input_map, output_tensors)
# Quantize treating the input as quantized in range <input_range>.
rewriter = quantize_graph.GraphRewriter(float_graph_def, "eightbit",
input_range)
graph_def = rewriter.rewrite(output_names)
results = run_graph_def(graph_def, quantized_input_map, output_tensors)
for expected, result in zip(float_results, results):
assert are_tensors_near(expected, result, .5)
ops = [node.op for node in graph_def.node]
self.assertEqual(0, ops.count("QuantizeV2") + ops.count("Quantize"))
self.assertEqual(len(output_names), ops.count("Dequantize"))
# Quantize without treating input as quantized.
rewriter = quantize_graph.GraphRewriter(
float_graph_def, "eightbit", quantized_input_range=None)
graph_def = rewriter.rewrite(output_names)
results = run_graph_def(graph_def, input_map, output_tensors)
for expected, result in zip(float_results, results):
assert are_tensors_near(expected, result, .5)
ops = [node.op for node in graph_def.node]
self.assertEqual(
len(input_map), ops.count("QuantizeV2") + ops.count("Quantize"))
self.assertEqual(len(output_names), ops.count("Dequantize"))
def test_bias_add_w_fake_quant_w_min_max_vars(self):
input_node = quantize_graph.create_constant_node(
"input",
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
dtype=dtypes.float32,
shape=[1, 1, 2, 5])
offset_node = quantize_graph.create_constant_node(
"offset", value=[1, 2, 3, 4, 5], dtype=dtypes.float32, shape=[5])
bias_add_node = quantize_graph.create_node(
"BiasAdd", "bias_add", [input_node.name, offset_node.name])
quantize_graph.set_attr_dtype(bias_add_node, "T", dtypes.float32)
min_node = quantize_graph.create_constant_node(
"min_bias_add", value=-.5, dtype=dtypes.float32, shape=[])
max_node = quantize_graph.create_constant_node(
"max_bias_add", value=15.5, dtype=dtypes.float32, shape=[])
fake_quant_node = quantize_graph.create_node(
"FakeQuantWithMinMaxVars", "fake_quant",
[bias_add_node.name, min_node.name, max_node.name])
float_graph_def = graph_pb2.GraphDef()
float_graph_def.node.extend([
input_node, offset_node, bias_add_node, min_node, max_node,
fake_quant_node
])
test_graph(float_graph_def, {}, [fake_quant_node.name], log_graph=True)
# Verify there is only one Quantize and one Requantize op.
# Pass in fallback_quantization_range, although it will have no effect
# because the FakeQuantWithMinMaxVars are used instead.
eightbit_rewriter = quantize_graph.GraphRewriter(
float_graph_def,
"eightbit",
quantized_input_range=None,
fallback_quantization_range=[-100, 100])
eightbit_graph_def = eightbit_rewriter.rewrite([fake_quant_node.name])
ops = [node.op for node in eightbit_graph_def.node]
node_names = [node.name for node in eightbit_graph_def.node]
# No quantize since all inputs are const and can be quantized up-front.
self.assertEqual(0, ops.count("QuantizeV2") + ops.count("Quantize"))
# One dequantize at the end.
self.assertEqual(1, ops.count("Dequantize"))
# The fallback constants are not in the graph.
self.assertEqual(0, node_names.count("fallback_quantization_min_value"))
self.assertEqual(0, node_names.count("fallback_quantization_max_value"))
def test_bias_add_w_fallback_min_max_vars(self):
input_node = quantize_graph.create_constant_node(
"input",
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
dtype=dtypes.float32,
shape=[1, 1, 2, 5])
offset_node = quantize_graph.create_constant_node(
"offset", value=[1, 2, 3, 4, 5], dtype=dtypes.float32, shape=[5])
bias_add_node = quantize_graph.create_node(
"BiasAdd", "bias_add", [input_node.name, offset_node.name])
quantize_graph.set_attr_dtype(bias_add_node, "T", dtypes.float32)
float_graph_def = graph_pb2.GraphDef()
float_graph_def.node.extend([input_node, offset_node, bias_add_node])
test_graph(float_graph_def, {}, [bias_add_node.name], log_graph=True)
# Verify there is only one Quantize, one Requantize op, and no
# RequantizationRange op.
eightbit_rewriter = quantize_graph.GraphRewriter(
float_graph_def,
"eightbit",
quantized_input_range=None,
fallback_quantization_range=[-.5, 15.5])
eightbit_graph_def = eightbit_rewriter.rewrite([bias_add_node.name])
ops = [node.op for node in eightbit_graph_def.node]
node_names = [node.name for node in eightbit_graph_def.node]
# No quantize since all inputs are const and can be quantized up-front.
self.assertEqual(0, ops.count("QuantizeV2") + ops.count("Quantize"))
# One dequantize at the end.
self.assertEqual(1, ops.count("Dequantize"))
# No RequantizationRange
self.assertEqual(0, ops.count("RequantizationRange"))
# The fallback constants are in the graph.
self.assertEqual(1, node_names.count("fallback_quantization_min_value"))
self.assertEqual(1, node_names.count("fallback_quantization_max_value"))
def test_remove_redundant_quantization(self):
a_constant_name = "a_constant"
a_constant_min_name = "a_constant_min"
a_constant_max_name = "a_constant_max"
a_dequantize_name = "a_dequantize"
a_quantize_name = "a_quantize"
b_constant_name = "b_constant"
b_constant_min_name = "b_constant_min"
b_constant_max_name = "b_constant_max"
b_dequantize_name = "b_dequantize"
b_quantize_name = "b_quantize"
mat_mul_name = "mat_mul"
graph_def = graph_pb2.GraphDef()
a_constant = quantize_graph.create_constant_node(
a_constant_name, value=(0,), dtype=dtypes.quint8, shape=[])
graph_def.node.extend([a_constant])
a_constant_min = quantize_graph.create_constant_node(
a_constant_min_name, value=2, dtype=dtypes.float32, shape=[])
graph_def.node.extend([a_constant_min])
a_constant_max = quantize_graph.create_constant_node(
a_constant_max_name, value=2, dtype=dtypes.float32, shape=[])
graph_def.node.extend([a_constant_max])
a_dequantize_node = quantize_graph.create_node(
"Dequantize", a_dequantize_name,
[a_constant_name, a_constant_min_name, a_constant_max_name])
quantize_graph.set_attr_dtype(a_dequantize_node, "T", dtypes.uint8)
graph_def.node.extend([a_dequantize_node])
a_quantize_node = quantize_graph.create_node(
"QuantizeV2", a_quantize_name,
[a_dequantize_name, a_dequantize_name + ":1", a_dequantize_name + ":2"])
quantize_graph.set_attr_dtype(a_quantize_node, "T", dtypes.uint8)
graph_def.node.extend([a_quantize_node])
b_constant = quantize_graph.create_constant_node(
b_constant_name, value=(0,), dtype=dtypes.quint8, shape=[])
graph_def.node.extend([b_constant])
b_constant_min = quantize_graph.create_constant_node(
b_constant_min_name, value=3, dtype=dtypes.float32, shape=[])
graph_def.node.extend([b_constant_min])
b_constant_max = quantize_graph.create_constant_node(
b_constant_max_name, value=3, dtype=dtypes.float32, shape=[])
graph_def.node.extend([b_constant_max])
b_dequantize_node = quantize_graph.create_node(
"Dequantize", b_dequantize_name,
[b_constant_name, b_constant_min_name, b_constant_max_name])
quantize_graph.set_attr_dtype(b_dequantize_node, "T", dtypes.uint8)
graph_def.node.extend([b_dequantize_node])
b_quantize_node = quantize_graph.create_node(
"QuantizeV2", b_quantize_name,
[b_dequantize_name, b_dequantize_name + ":1", b_dequantize_name + ":2"])
quantize_graph.set_attr_dtype(b_quantize_node, "T", dtypes.uint8)
graph_def.node.extend([b_quantize_node])
mat_mul_node = quantize_graph.create_node("QuantizedMatMul", mat_mul_name, [
a_quantize_name, b_quantize_name, a_quantize_name + ":1",
a_quantize_name + ":2", b_quantize_name + ":1", b_quantize_name + ":2"
])
quantize_graph.set_attr_dtype(mat_mul_node, "T1", dtypes.uint8)
quantize_graph.set_attr_dtype(mat_mul_node, "T2", dtypes.int32)
graph_def.node.extend([mat_mul_node])
expected_output = graph_pb2.GraphDef()
a_constant = quantize_graph.create_constant_node(
a_constant_name, value=(0,), dtype=dtypes.quint8, shape=[])
expected_output.node.extend([a_constant])
a_constant_min = quantize_graph.create_constant_node(
a_constant_min_name, value=2, dtype=dtypes.float32, shape=[])
expected_output.node.extend([a_constant_min])
a_constant_max = quantize_graph.create_constant_node(
a_constant_max_name, value=2, dtype=dtypes.float32, shape=[])
expected_output.node.extend([a_constant_max])
b_constant = quantize_graph.create_constant_node(
b_constant_name, value=(0,), dtype=dtypes.quint8, shape=[])
expected_output.node.extend([b_constant])
b_constant_min = quantize_graph.create_constant_node(
b_constant_min_name, value=3, dtype=dtypes.float32, shape=[])
expected_output.node.extend([b_constant_min])
b_constant_max = quantize_graph.create_constant_node(
b_constant_max_name, value=3, dtype=dtypes.float32, shape=[])
expected_output.node.extend([b_constant_max])
mat_mul_node = quantize_graph.create_node("QuantizedMatMul", mat_mul_name, [
a_constant_name, b_constant_name, a_constant_min_name,
a_constant_max_name, b_constant_min_name, b_constant_max_name
])
quantize_graph.set_attr_dtype(mat_mul_node, "T1", dtypes.uint8)
quantize_graph.set_attr_dtype(mat_mul_node, "T2", dtypes.int32)
expected_output.node.extend([mat_mul_node])
expected_output.versions.CopyFrom(graph_def.versions)
expected_output.library.CopyFrom(graph_def.library)
rewriter = quantize_graph.GraphRewriter(
graph_def, [mat_mul_name], quantized_input_range=None)
output = rewriter.remove_redundant_quantization(graph_def)
stripped_output = graph_util.extract_sub_graph(output, [mat_mul_name])
self.assertProtoEquals(expected_output, stripped_output)
if __name__ == "__main__":
test.main()
|
coolsnowwolf/lede | refs/heads/master | scripts/cfe-partition-tag.py | 27 | #!/usr/bin/env python3
"""
CFE Partition Tag
{
u32 part_id;
u32 part_size;
u16 flags;
char part_name[33];
char part_version[21];
u32 part_crc32;
}
"""
import argparse
import os
import struct
PART_NAME_SIZE = 33
PART_VERSION_SIZE = 21
CRC32_INIT = 0xFFFFFFFF
CRC32_TABLE = [
0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA,
0x076DC419, 0x706AF48F, 0xE963A535, 0x9E6495A3,
0x0EDB8832, 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988,
0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, 0x90BF1D91,
0x1DB71064, 0x6AB020F2, 0xF3B97148, 0x84BE41DE,
0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7,
0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC,
0x14015C4F, 0x63066CD9, 0xFA0F3D63, 0x8D080DF5,
0x3B6E20C8, 0x4C69105E, 0xD56041E4, 0xA2677172,
0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B,
0x35B5A8FA, 0x42B2986C, 0xDBBBC9D6, 0xACBCF940,
0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59,
0x26D930AC, 0x51DE003A, 0xC8D75180, 0xBFD06116,
0x21B4F4B5, 0x56B3C423, 0xCFBA9599, 0xB8BDA50F,
0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924,
0x2F6F7C87, 0x58684C11, 0xC1611DAB, 0xB6662D3D,
0x76DC4190, 0x01DB7106, 0x98D220BC, 0xEFD5102A,
0x71B18589, 0x06B6B51F, 0x9FBFE4A5, 0xE8B8D433,
0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818,
0x7F6A0DBB, 0x086D3D2D, 0x91646C97, 0xE6635C01,
0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E,
0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457,
0x65B0D9C6, 0x12B7E950, 0x8BBEB8EA, 0xFCB9887C,
0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65,
0x4DB26158, 0x3AB551CE, 0xA3BC0074, 0xD4BB30E2,
0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB,
0x4369E96A, 0x346ED9FC, 0xAD678846, 0xDA60B8D0,
0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9,
0x5005713C, 0x270241AA, 0xBE0B1010, 0xC90C2086,
0x5768B525, 0x206F85B3, 0xB966D409, 0xCE61E49F,
0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4,
0x59B33D17, 0x2EB40D81, 0xB7BD5C3B, 0xC0BA6CAD,
0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A,
0xEAD54739, 0x9DD277AF, 0x04DB2615, 0x73DC1683,
0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8,
0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1,
0xF00F9344, 0x8708A3D2, 0x1E01F268, 0x6906C2FE,
0xF762575D, 0x806567CB, 0x196C3671, 0x6E6B06E7,
0xFED41B76, 0x89D32BE0, 0x10DA7A5A, 0x67DD4ACC,
0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5,
0xD6D6A3E8, 0xA1D1937E, 0x38D8C2C4, 0x4FDFF252,
0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B,
0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, 0x41047A60,
0xDF60EFC3, 0xA867DF55, 0x316E8EEF, 0x4669BE79,
0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236,
0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F,
0xC5BA3BBE, 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04,
0xC2D7FFA7, 0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D,
0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A,
0x9C0906A9, 0xEB0E363F, 0x72076785, 0x05005713,
0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38,
0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21,
0x86D3D2D4, 0xF1D4E242, 0x68DDB3F8, 0x1FDA836E,
0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777,
0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C,
0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45,
0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2,
0xA7672661, 0xD06016F7, 0x4969474D, 0x3E6E77DB,
0xAED16A4A, 0xD9D65ADC, 0x40DF0B66, 0x37D83BF0,
0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9,
0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6,
0xBAD03605, 0xCDD70693, 0x54DE5729, 0x23D967BF,
0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94,
0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D
]
def auto_int(x):
return int(x, 0)
def crc32(bytes, size, crc):
i = 0
while (i < size):
crc = (crc >> 8) ^ CRC32_TABLE[(crc ^ bytes[i]) & 0xff]
i += 1
return crc
def str_to_bytes_pad(string, size):
str_bytes = string.encode()
num_bytes = len(str_bytes)
if (num_bytes >= size):
str_bytes = str_bytes[:size - 1] + '\0'.encode()
else:
str_bytes += '\0'.encode() * (size - num_bytes)
return str_bytes
def create_tag(args, in_bytes, size):
crc = crc32(in_bytes, size, CRC32_INIT)
tag = bytearray()
tag += struct.pack('>I', args.part_id)
tag += struct.pack('>I', size)
tag += struct.pack('>H', args.part_flags)
tag += str_to_bytes_pad(args.part_name, PART_NAME_SIZE)
tag += str_to_bytes_pad(args.part_version, PART_VERSION_SIZE)
tag += struct.pack('>I', crc)
return tag
def create_output(args):
in_st = os.stat(args.input_file)
in_size = in_st.st_size
in_f = open(args.input_file, 'r+b')
in_bytes = in_f.read(in_size)
in_f.close()
tag = create_tag(args, in_bytes, in_size)
out_f = open(args.output_file, 'w+b')
out_f.write(tag)
out_f.close()
def main():
global args
parser = argparse.ArgumentParser(description='')
parser.add_argument('--flags',
dest='part_flags',
action='store',
type=auto_int,
help='Partition Flags')
parser.add_argument('--id',
dest='part_id',
action='store',
type=auto_int,
help='Partition ID')
parser.add_argument('--input-file',
dest='input_file',
action='store',
type=str,
help='Input file')
parser.add_argument('--output-file',
dest='output_file',
action='store',
type=str,
help='Output file')
parser.add_argument('--name',
dest='part_name',
action='store',
type=str,
help='Partition Name')
parser.add_argument('--version',
dest='part_version',
action='store',
type=str,
help='Partition Version')
args = parser.parse_args()
if ((not args.part_flags) or
(not args.part_id) or
(not args.input_file) or
(not args.output_file) or
(not args.part_name) or
(not args.part_version)):
parser.print_help()
else:
create_output(args)
main()
|
pjdelport/Django-facebook | refs/heads/master | docs/docs_env/Lib/ntpath.py | 32 | # Module 'ntpath' -- common operations on WinNT/Win95 pathnames
"""Common pathname manipulations, WindowsNT/95 version.
Instead of importing this module directly, import os and refer to this
module as os.path.
"""
import os
import stat
import sys
__all__ = ["normcase","isabs","join","splitdrive","split","splitext",
"basename","dirname","commonprefix","getsize","getmtime",
"getatime","getctime", "islink","exists","lexists","isdir","isfile",
"ismount","walk","expanduser","expandvars","normpath","abspath",
"splitunc","curdir","pardir","sep","pathsep","defpath","altsep",
"extsep","devnull","realpath","supports_unicode_filenames"]
# strings representing various path-related bits and pieces
curdir = '.'
pardir = '..'
extsep = '.'
sep = '\\'
pathsep = ';'
altsep = '/'
defpath = '.;C:\\bin'
if 'ce' in sys.builtin_module_names:
defpath = '\\Windows'
elif 'os2' in sys.builtin_module_names:
# OS/2 w/ VACPP
altsep = '/'
devnull = 'nul'
# Normalize the case of a pathname and map slashes to backslashes.
# Other normalizations (such as optimizing '../' away) are not done
# (this is done by normpath).
def normcase(s):
"""Normalize case of pathname.
Makes all characters lowercase and all slashes into backslashes."""
return s.replace("/", "\\").lower()
# Return whether a path is absolute.
# Trivial in Posix, harder on the Mac or MS-DOS.
# For DOS it is absolute if it starts with a slash or backslash (current
# volume), or if a pathname after the volume letter and colon / UNC resource
# starts with a slash or backslash.
def isabs(s):
"""Test whether a path is absolute"""
s = splitdrive(s)[1]
return s != '' and s[:1] in '/\\'
# Join two (or more) paths.
def join(a, *p):
"""Join two or more pathname components, inserting "\\" as needed"""
path = a
for b in p:
b_wins = 0 # set to 1 iff b makes path irrelevant
if path == "":
b_wins = 1
elif isabs(b):
# This probably wipes out path so far. However, it's more
# complicated if path begins with a drive letter:
# 1. join('c:', '/a') == 'c:/a'
# 2. join('c:/', '/a') == 'c:/a'
# But
# 3. join('c:/a', '/b') == '/b'
# 4. join('c:', 'd:/') = 'd:/'
# 5. join('c:/', 'd:/') = 'd:/'
if path[1:2] != ":" or b[1:2] == ":":
# Path doesn't start with a drive letter, or cases 4 and 5.
b_wins = 1
# Else path has a drive letter, and b doesn't but is absolute.
elif len(path) > 3 or (len(path) == 3 and
path[-1] not in "/\\"):
# case 3
b_wins = 1
if b_wins:
path = b
else:
# Join, and ensure there's a separator.
assert len(path) > 0
if path[-1] in "/\\":
if b and b[0] in "/\\":
path += b[1:]
else:
path += b
elif path[-1] == ":":
path += b
elif b:
if b[0] in "/\\":
path += b
else:
path += "\\" + b
else:
# path is not empty and does not end with a backslash,
# but b is empty; since, e.g., split('a/') produces
# ('a', ''), it's best if join() adds a backslash in
# this case.
path += '\\'
return path
# Split a path in a drive specification (a drive letter followed by a
# colon) and the path specification.
# It is always true that drivespec + pathspec == p
def splitdrive(p):
"""Split a pathname into drive and path specifiers. Returns a 2-tuple
"(drive,path)"; either part may be empty"""
if p[1:2] == ':':
return p[0:2], p[2:]
return '', p
# Parse UNC paths
def splitunc(p):
"""Split a pathname into UNC mount point and relative path specifiers.
Return a 2-tuple (unc, rest); either part may be empty.
If unc is not empty, it has the form '//host/mount' (or similar
using backslashes). unc+rest is always the input path.
Paths containing drive letters never have an UNC part.
"""
if p[1:2] == ':':
return '', p # Drive letter present
firstTwo = p[0:2]
if firstTwo == '//' or firstTwo == '\\\\':
# is a UNC path:
# vvvvvvvvvvvvvvvvvvvv equivalent to drive letter
# \\machine\mountpoint\directories...
# directory ^^^^^^^^^^^^^^^
normp = normcase(p)
index = normp.find('\\', 2)
if index == -1:
##raise RuntimeError, 'illegal UNC path: "' + p + '"'
return ("", p)
index = normp.find('\\', index + 1)
if index == -1:
index = len(p)
return p[:index], p[index:]
return '', p
# Split a path in head (everything up to the last '/') and tail (the
# rest). After the trailing '/' is stripped, the invariant
# join(head, tail) == p holds.
# The resulting head won't end in '/' unless it is the root.
def split(p):
"""Split a pathname.
Return tuple (head, tail) where tail is everything after the final slash.
Either part may be empty."""
d, p = splitdrive(p)
# set i to index beyond p's last slash
i = len(p)
while i and p[i-1] not in '/\\':
i = i - 1
head, tail = p[:i], p[i:] # now tail has no slashes
# remove trailing slashes from head, unless it's all slashes
head2 = head
while head2 and head2[-1] in '/\\':
head2 = head2[:-1]
head = head2 or head
return d + head, tail
# Split a path in root and extension.
# The extension is everything starting at the last dot in the last
# pathname component; the root is everything before that.
# It is always true that root + ext == p.
def splitext(p):
"""Split the extension from a pathname.
Extension is everything from the last dot to the end.
Return (root, ext), either part may be empty."""
i = p.rfind('.')
if i<=max(p.rfind('/'), p.rfind('\\')):
return p, ''
else:
return p[:i], p[i:]
# Return the tail (basename) part of a path.
def basename(p):
"""Returns the final component of a pathname"""
return split(p)[1]
# Return the head (dirname) part of a path.
def dirname(p):
"""Returns the directory component of a pathname"""
return split(p)[0]
# Return the longest prefix of all list elements.
def commonprefix(m):
"Given a list of pathnames, returns the longest common leading component"
if not m: return ''
s1 = min(m)
s2 = max(m)
n = min(len(s1), len(s2))
for i in xrange(n):
if s1[i] != s2[i]:
return s1[:i]
return s1[:n]
# Get size, mtime, atime of files.
def getsize(filename):
"""Return the size of a file, reported by os.stat()"""
return os.stat(filename).st_size
def getmtime(filename):
"""Return the last modification time of a file, reported by os.stat()"""
return os.stat(filename).st_mtime
def getatime(filename):
"""Return the last access time of a file, reported by os.stat()"""
return os.stat(filename).st_atime
def getctime(filename):
"""Return the creation time of a file, reported by os.stat()."""
return os.stat(filename).st_ctime
# Is a path a symbolic link?
# This will always return false on systems where posix.lstat doesn't exist.
def islink(path):
"""Test for symbolic link. On WindowsNT/95 always returns false"""
return False
# Does a path exist?
def exists(path):
"""Test whether a path exists"""
try:
st = os.stat(path)
except os.error:
return False
return True
lexists = exists
# Is a path a dos directory?
# This follows symbolic links, so both islink() and isdir() can be true
# for the same path.
def isdir(path):
"""Test whether a path is a directory"""
try:
st = os.stat(path)
except os.error:
return False
return stat.S_ISDIR(st.st_mode)
# Is a path a regular file?
# This follows symbolic links, so both islink() and isdir() can be true
# for the same path.
def isfile(path):
"""Test whether a path is a regular file"""
try:
st = os.stat(path)
except os.error:
return False
return stat.S_ISREG(st.st_mode)
# Is a path a mount point? Either a root (with or without drive letter)
# or an UNC path with at most a / or \ after the mount point.
def ismount(path):
"""Test whether a path is a mount point (defined as root of drive)"""
unc, rest = splitunc(path)
if unc:
return rest in ("", "/", "\\")
p = splitdrive(path)[1]
return len(p) == 1 and p[0] in '/\\'
# Directory tree walk.
# For each directory under top (including top itself, but excluding
# '.' and '..'), func(arg, dirname, filenames) is called, where
# dirname is the name of the directory and filenames is the list
# of files (and subdirectories etc.) in the directory.
# The func may modify the filenames list, to implement a filter,
# or to impose a different order of visiting.
def walk(top, func, arg):
"""Directory tree walk with callback function.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), call func(arg, dirname, fnames).
dirname is the name of the directory, and fnames a list of the names of
the files and subdirectories in dirname (excluding '.' and '..'). func
may modify the fnames list in-place (e.g. via del or slice assignment),
and walk will only recurse into the subdirectories whose names remain in
fnames; this can be used to implement a filter, or to impose a specific
order of visiting. No semantics are defined for, or required of, arg,
beyond that arg is always passed to func. It can be used, e.g., to pass
a filename pattern, or a mutable object designed to accumulate
statistics. Passing None for arg is common."""
try:
names = os.listdir(top)
except os.error:
return
func(arg, top, names)
exceptions = ('.', '..')
for name in names:
if name not in exceptions:
name = join(top, name)
if isdir(name):
walk(name, func, arg)
# Expand paths beginning with '~' or '~user'.
# '~' means $HOME; '~user' means that user's home directory.
# If the path doesn't begin with '~', or if the user or $HOME is unknown,
# the path is returned unchanged (leaving error reporting to whatever
# function is called with the expanded path as argument).
# See also module 'glob' for expansion of *, ? and [...] in pathnames.
# (A function should also be defined to do full *sh-style environment
# variable expansion.)
def expanduser(path):
"""Expand ~ and ~user constructs.
If user or $HOME is unknown, do nothing."""
if path[:1] != '~':
return path
i, n = 1, len(path)
while i < n and path[i] not in '/\\':
i = i + 1
if i == 1:
if 'HOME' in os.environ:
userhome = os.environ['HOME']
elif not 'HOMEPATH' in os.environ:
return path
else:
try:
drive = os.environ['HOMEDRIVE']
except KeyError:
drive = ''
userhome = join(drive, os.environ['HOMEPATH'])
else:
return path
return userhome + path[i:]
# Expand paths containing shell variable substitutions.
# The following rules apply:
# - no expansion within single quotes
# - no escape character, except for '$$' which is translated into '$'
# - ${varname} is accepted.
# - varnames can be made out of letters, digits and the character '_'
# XXX With COMMAND.COM you can use any characters in a variable name,
# XXX except '^|<>='.
def expandvars(path):
"""Expand shell variables of form $var and ${var}.
Unknown variables are left unchanged."""
if '$' not in path:
return path
import string
varchars = string.ascii_letters + string.digits + '_-'
res = ''
index = 0
pathlen = len(path)
while index < pathlen:
c = path[index]
if c == '\'': # no expansion within single quotes
path = path[index + 1:]
pathlen = len(path)
try:
index = path.index('\'')
res = res + '\'' + path[:index + 1]
except ValueError:
res = res + path
index = pathlen - 1
elif c == '$': # variable or '$$'
if path[index + 1:index + 2] == '$':
res = res + c
index = index + 1
elif path[index + 1:index + 2] == '{':
path = path[index+2:]
pathlen = len(path)
try:
index = path.index('}')
var = path[:index]
if var in os.environ:
res = res + os.environ[var]
except ValueError:
res = res + path
index = pathlen - 1
else:
var = ''
index = index + 1
c = path[index:index + 1]
while c != '' and c in varchars:
var = var + c
index = index + 1
c = path[index:index + 1]
if var in os.environ:
res = res + os.environ[var]
if c != '':
res = res + c
else:
res = res + c
index = index + 1
return res
# Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A\B.
# Previously, this function also truncated pathnames to 8+3 format,
# but as this module is called "ntpath", that's obviously wrong!
def normpath(path):
"""Normalize path, eliminating double slashes, etc."""
path = path.replace("/", "\\")
prefix, path = splitdrive(path)
# We need to be careful here. If the prefix is empty, and the path starts
# with a backslash, it could either be an absolute path on the current
# drive (\dir1\dir2\file) or a UNC filename (\\server\mount\dir1\file). It
# is therefore imperative NOT to collapse multiple backslashes blindly in
# that case.
# The code below preserves multiple backslashes when there is no drive
# letter. This means that the invalid filename \\\a\b is preserved
# unchanged, where a\\\b is normalised to a\b. It's not clear that there
# is any better behaviour for such edge cases.
if prefix == '':
# No drive letter - preserve initial backslashes
while path[:1] == "\\":
prefix = prefix + "\\"
path = path[1:]
else:
# We have a drive letter - collapse initial backslashes
if path.startswith("\\"):
prefix = prefix + "\\"
path = path.lstrip("\\")
comps = path.split("\\")
i = 0
while i < len(comps):
if comps[i] in ('.', ''):
del comps[i]
elif comps[i] == '..':
if i > 0 and comps[i-1] != '..':
del comps[i-1:i+1]
i -= 1
elif i == 0 and prefix.endswith("\\"):
del comps[i]
else:
i += 1
else:
i += 1
# If the path is now empty, substitute '.'
if not prefix and not comps:
comps.append('.')
return prefix + "\\".join(comps)
# Return an absolute path.
try:
from nt import _getfullpathname
except ImportError: # not running on Windows - mock up something sensible
def abspath(path):
"""Return the absolute version of a path."""
if not isabs(path):
path = join(os.getcwd(), path)
return normpath(path)
else: # use native Windows method on Windows
def abspath(path):
"""Return the absolute version of a path."""
if path: # Empty path must return current working directory.
try:
path = _getfullpathname(path)
except WindowsError:
pass # Bad path - return unchanged.
else:
path = os.getcwd()
return normpath(path)
# realpath is a no-op on systems without islink support
realpath = abspath
# Win9x family and earlier have no Unicode filename support.
supports_unicode_filenames = (hasattr(sys, "getwindowsversion") and
sys.getwindowsversion()[3] >= 2)
|
giojavi04/Sais | refs/heads/master | apps/usersProfiles/tests.py | 24123 | from django.test import TestCase
# Create your tests here.
|
ctripcorp/tars | refs/heads/master | tars/__init__.py | 1 | from tars.engine.celery_app import app
|
n0m4dz/odoo | refs/heads/8.0 | addons/portal_sale/res_config.py | 445 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class sale_portal_config_settings(osv.TransientModel):
_inherit = 'account.config.settings'
_columns = {
'group_payment_options': fields.boolean('Show payment buttons to employees too',
implied_group='portal_sale.group_payment_options',
help="Show online payment options on Sale Orders and Customer Invoices to employees. "
"If not checked, these options are only visible to portal users."),
} |
cloudify-cosmo/cloudify-manager | refs/heads/master | rest-service/manager_rest/rest/resources_v3_1/summary.py | 1 | #########
# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from .. import rest_utils
from flask import request
from dateutil import rrule
from manager_rest.rest import rest_decorators
from manager_rest.security import SecuredResource
from manager_rest.security.authorization import authorize
from manager_rest.storage import (get_storage_manager,
models)
from manager_rest import manager_exceptions
from cloudify_rest_client.responses import ListResponse
from functools import wraps
class BaseSummary(SecuredResource):
summary_fields = []
auth_req = None
model = None
def get(self, pagination=None, all_tenants=None, filters=None):
target_field = request.args.get('_target_field')
subfield = request.args.get('_sub_field')
get_all_results = rest_utils.verify_and_convert_bool(
'_get_all_results',
request.args.get('_get_all_results', False)
)
if target_field not in self.summary_fields:
raise manager_exceptions.BadParametersError(
'Field {target} is not available for summary. Valid fields '
'are: {valid}'.format(
target=target_field,
valid=', '.join(self.summary_fields),
)
)
if subfield and subfield not in self.summary_fields:
raise manager_exceptions.BadParametersError(
'Field {target} is not available for summary. Valid fields '
'are: {valid}'.format(
target=subfield,
valid=', '.join(self.summary_fields),
)
)
return get_storage_manager().summarize(
target_field=target_field,
sub_field=subfield,
model_class=self.model,
pagination=pagination,
all_tenants=all_tenants,
get_all_results=get_all_results,
filters=filters,
)
def marshal_summary(summary_type):
def build_wrapper(func):
@wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
target_field = request.args.get('_target_field')
subfield = request.args.get('_sub_field')
marshalled_items = {}
for item in result.items:
if item[0] not in marshalled_items:
marshalled_items[item[0]] = {
target_field: item[0],
summary_type: 0,
}
if subfield:
subfield_key = 'by {0}'.format(subfield)
if subfield_key not in marshalled_items[item[0]]:
marshalled_items[item[0]][subfield_key] = []
marshalled_items[item[0]][subfield_key].append({
subfield: item[1],
summary_type: item[-1],
})
marshalled_items[item[0]][summary_type] += item[-1]
return {"items": list(marshalled_items.values()),
"metadata": result.metadata}
return wrapper
return build_wrapper
class SummarizeDeployments(BaseSummary):
summary_fields = [
'blueprint_id',
'tenant_name',
'visibility',
'site_name',
'deployment_status',
]
auth_req = 'deployment_list'
model = models.Deployment
@authorize(auth_req, allow_all_tenants=True)
@marshal_summary('deployments')
@rest_decorators.all_tenants
@rest_decorators.create_filters(models.Deployment)
@rest_decorators.paginate
def get(self, *args, **kwargs):
return super(SummarizeDeployments, self).get(*args, **kwargs)
class SummarizeNodes(BaseSummary):
summary_fields = [
'deployment_id',
'tenant_name',
'visibility',
]
auth_req = 'node_list'
model = models.Node
@authorize(auth_req, allow_all_tenants=True)
@marshal_summary('nodes')
@rest_decorators.create_filters(models.Node)
@rest_decorators.paginate
@rest_decorators.all_tenants
def get(self, *args, **kwargs):
return super(SummarizeNodes, self).get(*args, **kwargs)
class SummarizeNodeInstances(BaseSummary):
summary_fields = [
'deployment_id',
'index',
'node_id',
'state',
'host_id',
'tenant_name',
'visibility',
]
auth_req = 'node_instance_list'
model = models.NodeInstance
@authorize(auth_req, allow_all_tenants=True)
@marshal_summary('node_instances')
@rest_decorators.create_filters(models.NodeInstance)
@rest_decorators.paginate
@rest_decorators.all_tenants
def get(self, *args, **kwargs):
return super(SummarizeNodeInstances, self).get(*args, **kwargs)
class SummarizeExecutions(BaseSummary):
summary_fields = [
'status',
'status_display',
'blueprint_id',
'deployment_id',
'workflow_id',
'tenant_name',
'visibility',
]
auth_req = 'execution_list'
model = models.Execution
@authorize(auth_req, allow_all_tenants=True)
@marshal_summary('executions')
@rest_decorators.create_filters(models.Execution)
@rest_decorators.paginate
@rest_decorators.all_tenants
def get(self, *args, **kwargs):
return super(SummarizeExecutions, self).get(*args, **kwargs)
class SummarizeBlueprints(BaseSummary):
summary_fields = [
'tenant_name',
'visibility',
]
auth_req = 'blueprint_list'
model = models.Blueprint
@authorize(auth_req, allow_all_tenants=True)
@marshal_summary('blueprints')
@rest_decorators.all_tenants
@rest_decorators.create_filters(models.Blueprint)
@rest_decorators.paginate
def get(self, *args, **kwargs):
return super(SummarizeBlueprints, self).get(*args, **kwargs)
class SummarizeExecutionSchedules(BaseSummary):
summary_fields = [
'deployment_id',
'workflow_id',
'tenant_name',
'visibility',
]
auth_req = 'execution_schedule_list'
model = models.ExecutionSchedule
@authorize(auth_req, allow_all_tenants=True)
@marshal_summary('execution_schedules')
@rest_decorators.all_tenants
@rest_decorators.create_filters(models.ExecutionSchedule)
@rest_decorators.paginate
def get(self, *args, **kwargs):
target_field = request.args.get('_target_field')
get_all_results = rest_utils.verify_and_convert_bool(
'_get_all_results',
request.args.get('_get_all_results', False)
)
if target_field not in self.summary_fields:
raise manager_exceptions.BadParametersError(
'Field {target} is not available for summary. Valid fields '
'are: {valid}'.format(
target=target_field,
valid=', '.join(self.summary_fields),
)
)
schedules_list = get_storage_manager().list(
models.ExecutionSchedule,
pagination=kwargs.get('pagination'),
all_tenants=kwargs.get('all_tenants'),
get_all_results=get_all_results,
filters=kwargs.get('filters'),
)
summary_dict = {}
for schedule in schedules_list:
recurring = self.is_recurring(schedule.rule)
key = (getattr(schedule, target_field),
'recurring' if recurring else 'single')
summary_dict[key] = summary_dict.get(key, 0) + 1
summary_list = []
for k, v in summary_dict.items():
summary_list.append(k + (v,))
metadata = schedules_list.metadata
schedules_list.metadata['pagination']['total'] = len(summary_dict)
return ListResponse(summary_list, metadata)
@staticmethod
def is_recurring(rule):
if 'recurrence' in rule and rule.get('count') != 1:
return True
if 'rrule' in rule:
rrule_dates = rrule.rrulestr(rule['rrule'])
try:
if rrule_dates[1]:
return True
except IndexError:
return False
return False
|
AnasGhrab/scikit-learn | refs/heads/master | sklearn/cluster/tests/common.py | 416 | """
Common utilities for testing clustering.
"""
import numpy as np
###############################################################################
# Generate sample data
def generate_clustered_data(seed=0, n_clusters=3, n_features=2,
n_samples_per_cluster=20, std=.4):
prng = np.random.RandomState(seed)
# the data is voluntary shifted away from zero to check clustering
# algorithm robustness with regards to non centered data
means = np.array([[1, 1, 1, 0],
[-1, -1, 0, 1],
[1, -1, 1, 1],
[-1, 1, 1, 0],
]) + 10
X = np.empty((0, n_features))
for i in range(n_clusters):
X = np.r_[X, means[i][:n_features]
+ std * prng.randn(n_samples_per_cluster, n_features)]
return X
|
rspavel/spack | refs/heads/develop | var/spack/repos/builtin/packages/popt/package.py | 3 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Popt(AutotoolsPackage):
"""The popt library parses command line options."""
homepage = "https://launchpad.net/popt"
url = "https://launchpad.net/popt/head/1.16/+download/popt-1.16.tar.gz"
version('1.16', sha256='e728ed296fe9f069a0e005003c3d6b2dde3d9cad453422a10d6558616d304cc8')
|
xuru/pyvisdk | refs/heads/master | pyvisdk/do/user_upgrade_event.py | 1 |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def UserUpgradeEvent(vim, *args, **kwargs):
'''This event is a general user event from upgrade.'''
obj = vim.client.factory.create('ns0:UserUpgradeEvent')
# do some validation checking...
if (len(args) + len(kwargs)) < 5:
raise IndexError('Expected at least 6 arguments got: %d' % len(args))
required = [ 'message', 'chainId', 'createdTime', 'key', 'userName' ]
optional = [ 'changeTag', 'computeResource', 'datacenter', 'ds', 'dvs',
'fullFormattedMessage', 'host', 'net', 'vm', 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
|
jmhsi/justin_tinker | refs/heads/master | data_science/lendingclub_bak/investing/count_loans.py | 2 | #import requests
from datetime import datetime
import json
import lendingclub.account_info as acc_info
import re
import lendingclub.dataprep_and_modeling.modeling_utils.data_prep_new as data_prep
import lendingclub.investing.investing_utils as investing_utils
import pandas as pd
# constants
token = acc_info.token
inv_acc_id = acc_info.investor_id
portfolio_id = acc_info.portfolio_id
header = {
'Authorization': token,
'Content-Type': 'application/json',
'X-LC-LISTING-VERSION': '1.2'
}
acc_summary_url = 'https://api.lendingclub.com/api/investor/v1/accounts/' + \
str(inv_acc_id) + '/summary'
print(datetime.utcnow().strftime('%Y-%m-%d:%H-%M-%S.%f')[:-3])
api_loans, api_ids = investing_utils.get_loans_and_ids(
header, exclude_already=True)
print(len(api_ids))
print(datetime.utcnow().strftime('%Y-%m-%d:%H-%M-%S.%f')[:-3])
|
takeflight/wagtail | refs/heads/master | wagtail/core/migrations/0011_page_first_published_at.py | 24 | # -*- coding: utf-8 -*-
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0010_change_page_owner_to_null_on_delete'),
]
operations = [
migrations.AddField(
model_name='page',
name='first_published_at',
field=models.DateTimeField(editable=False, null=True),
preserve_default=True,
),
]
|
bioinfinio/seqmagick | refs/heads/master | seqmagick/test/test_subcommands_backtrans_align.py | 3 | import unittest
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Data import CodonTable
from seqmagick.subcommands import backtrans_align
class BatchTestCase(unittest.TestCase):
def test_no_input(self):
i = []
b = backtrans_align.batch(i, 1)
self.assertRaises(StopIteration, next, b)
def test_singletons(self):
i = range(3)
b = backtrans_align.batch(i, 1)
self.assertEquals([[0], [1], [2]], list(b))
def test_doubles(self):
i = range(6)
b = backtrans_align.batch(i, 2)
self.assertEquals([[0, 1], [2, 3], [4, 5]], list(b))
def test_partial(self):
i = range(5)
b = backtrans_align.batch(i, 2)
self.assertEquals([[0, 1], [2, 3], [4]], list(b))
class AlignmentMapperTestCase(unittest.TestCase):
def setUp(self):
self.instance = backtrans_align.AlignmentMapper(CodonTable.unambiguous_dna_by_name['Standard'])
def test_validate_valid(self):
nucl = 'TTTAAG'
prot = 'FK'
self.assertTrue(self.instance._validate_translation(prot, nucl))
def test_validate_invalid(self):
nucl = 'AAGTTT'
prot = 'KK'
self.assertRaisesRegexp(ValueError, r'Codon TTT translates to F, not K',
self.instance._validate_translation, prot, nucl)
def test_map_alignment(self):
nucl = [SeqRecord(Seq('AAGTTT'), id='1'), # KF
SeqRecord(Seq('AAGGTCTTC'), id='2'), # KVF
SeqRecord(Seq('GGGGTTTTT'), id='3')] # GVF
prot = [SeqRecord(Seq('-K-F'), id='1'),
SeqRecord(Seq('-KVF'), id='2'),
SeqRecord(Seq('G-VF'), id='3')]
result = self.instance.map_all(prot, nucl)
result = [(s.id, str(s.seq)) for s in result]
self.assertEqual([('1', '---AAG---TTT'),
('2', '---AAGGTCTTC'),
('3', 'GGG---GTTTTT')], result)
def test_map_alignment_insufficient_codons(self):
nucl = [SeqRecord(Seq('AAGTTT'), id='1'), # KF
SeqRecord(Seq('AAGGTC'), id='2')] # KV
prot = [SeqRecord(Seq('K-F'), id='1'),
SeqRecord(Seq('KVF'), id='2')]
mapped = self.instance.map_all(prot, nucl)
self.assertRaises(ValueError, list, mapped)
def test_map_alignment_excess_codons(self):
nucl = [SeqRecord(Seq('AAGTTT'), id='1'), # KF
SeqRecord(Seq('AAGGTCTTC'), id='2')] # KVF
prot = [SeqRecord(Seq('K-F'), id='1'),
SeqRecord(Seq('KV-'), id='2')]
mapped = self.instance.map_all(prot, nucl)
self.assertRaises(ValueError, list, mapped)
|
alexschiller/osf.io | refs/heads/develop | api_tests/registrations/views/test_registration_forks.py | 2 | import mock
import pytest
from nose.tools import * # flake8: noqa
from framework.auth.core import Auth
from website.models import Node
from website.util import permissions
from api.base.settings.defaults import API_BASE
from tests.base import ApiTestCase
from osf_tests.factories import (
NodeFactory,
ProjectFactory,
RegistrationFactory,
AuthUserFactory,
WithdrawnRegistrationFactory,
ForkFactory
)
class TestRegistrationForksList(ApiTestCase):
def setUp(self):
super(TestRegistrationForksList, self).setUp()
self.user = AuthUserFactory()
self.private_project = ProjectFactory(creator=self.user)
self.private_project.save()
self.component = NodeFactory(parent=self.private_project, creator=self.user)
self.pointer = ProjectFactory(creator=self.user)
self.private_project.add_pointer(self.pointer, auth=Auth(self.user), save=True)
self.private_registration = RegistrationFactory(project=self.private_project, creator=self.user)
self.private_fork = ForkFactory(project=self.private_registration, user=self.user)
self.private_registration_url = '/{}registrations/{}/forks/'.format(API_BASE, self.private_registration._id)
self.public_project = ProjectFactory(is_public=True, creator=self.user)
self.public_project.save()
self.public_component = NodeFactory(parent=self.public_project, creator=self.user, is_public=True)
self.public_registration = RegistrationFactory(project = self.public_project, creator=self.user, is_public=True)
self.public_registration_url = '/{}registrations/{}/forks/'.format(API_BASE, self.public_registration._id)
self.public_fork = ForkFactory(project=self.public_registration, user=self.user)
self.user_two = AuthUserFactory()
def test_can_access_public_registration_forks_list_when_unauthenticated(self):
res = self.app.get(self.public_registration_url)
assert_equal(len(res.json['data']), 0)
# Fork defaults to private
assert_equal(self.public_fork.is_public, False)
self.public_fork.is_public = True
self.public_fork.save()
res = self.app.get(self.public_registration_url)
assert_equal(res.status_code, 200)
assert_equal(len(res.json['data']), 1)
assert_equal(self.public_fork.is_public, True)
data = res.json['data'][0]
assert_equal(data['attributes']['title'], 'Fork of ' + self.public_registration.title)
assert_equal(data['id'], self.public_fork._id)
assert_equal(data['attributes']['registration'], False)
assert_equal(data['attributes']['fork'], True)
def test_can_access_public_registration_forks_list_authenticated_contributor(self):
res = self.app.get(self.public_registration_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(self.public_fork.is_public, False)
assert_equal(len(res.json['data']), 1)
data = res.json['data'][0]
assert_equal(data['attributes']['title'], 'Fork of ' + self.public_project.title)
assert_equal(data['id'], self.public_fork._id)
assert_equal(data['attributes']['registration'], False)
assert_equal(data['attributes']['fork'], True)
def test_can_access_public_registration_forks_list_authenticated_non_contributor(self):
res = self.app.get(self.public_registration_url, auth=self.user_two.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json['data']), 0)
# Fork defaults to private
assert_equal(self.public_fork.is_public, False)
self.public_fork.is_public = True
self.public_fork.save()
res = self.app.get(self.public_registration_url)
assert_equal(len(res.json['data']), 1)
assert_equal(self.public_fork.is_public, True)
data = res.json['data'][0]
assert_equal(data['attributes']['title'], 'Fork of ' + self.public_project.title)
assert_equal(data['id'], self.public_fork._id)
assert_equal(data['attributes']['registration'], False)
assert_equal(data['attributes']['fork'], True)
def test_cannot_access_private_registration_forks_list_unauthenticated(self):
res = self.app.get(self.private_registration_url, expect_errors=True)
assert_equal(res.status_code, 401)
assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.')
def test_authenticated_contributor_can_access_private_registration_forks_list(self):
res = self.app.get(self.private_registration_url + '?embed=children&embed=node_links&embed=logs&embed=contributors&embed=forked_from', auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json['data']), 1)
data = res.json['data'][0]
assert_equal(data['attributes']['title'], 'Fork of ' + self.private_project.title)
assert_equal(data['id'], self.private_fork._id)
fork_contributors = data['embeds']['contributors']['data'][0]['embeds']['users']['data']
assert_equal(fork_contributors['attributes']['family_name'], self.user.family_name)
assert_equal(fork_contributors['id'], self.user._id)
forked_children = data['embeds']['children']['data'][0]
assert_equal(forked_children['id'], self.private_registration.forks.first().nodes.first()._id)
assert_equal(forked_children['attributes']['title'], self.component.title)
forked_node_links = data['embeds']['node_links']['data'][0]['embeds']['target_node']['data']
assert_equal(forked_node_links['id'], self.pointer._id)
assert_equal(forked_node_links['attributes']['title'], self.pointer.title)
assert_equal(data['attributes']['registration'], False)
assert_equal(data['attributes']['fork'], True)
expected_logs = list(self.private_registration.logs.values_list('action', flat=True))
expected_logs.append(self.private_registration.nodes.first().logs.latest().action)
expected_logs.append('node_forked')
expected_logs.append('node_forked')
forked_logs = data['embeds']['logs']['data']
assert_equal(set(expected_logs), set(log['attributes']['action'] for log in forked_logs))
assert_equal(len(forked_logs), 6)
forked_from = data['embeds']['forked_from']['data']
assert_equal(forked_from['id'], self.private_registration._id)
def test_authenticated_non_contributor_cannot_access_private_registration_forks_list(self):
res = self.app.get(self.private_registration_url, auth=self.user_two.auth, expect_errors=True)
assert_equal(res.status_code, 403)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
class TestRegistrationForkCreate(ApiTestCase):
def setUp(self):
super(TestRegistrationForkCreate, self).setUp()
self.user = AuthUserFactory()
self.user_two = AuthUserFactory()
self.user_three = AuthUserFactory()
self.private_project = ProjectFactory(creator=self.user)
private_pointer = ProjectFactory(creator=self.user_two)
actual_pointer = self.private_project.add_pointer(private_pointer, auth=Auth(self.user_two), save=True)
self.private_registration = RegistrationFactory(creator=self.user, project=self.private_project)
self.fork_data = {
'data': {
'type': 'nodes'
}
}
self.fork_data_with_title = {
'data': {
'type': 'nodes',
'attributes':
{'title': 'My Forked Project'}
}
}
self.private_registration_url = '/{}registrations/{}/forks/'.format(API_BASE, self.private_registration._id)
self.public_project = ProjectFactory(is_public=True, creator=self.user)
self.public_registration = RegistrationFactory(creator=self.user, project=self.public_project, is_public=True)
self.public_registration_url = '/{}registrations/{}/forks/'.format(API_BASE, self.public_registration._id)
def test_create_fork_from_public_registration_with_new_title(self):
res = self.app.post_json_api(self.public_registration_url, self.fork_data_with_title, auth=self.user.auth)
assert_equal(res.status_code, 201)
data = res.json['data']
assert_equal(data['id'], self.public_registration.forks.first()._id)
assert_equal(data['attributes']['title'], self.fork_data_with_title['data']['attributes']['title'])
assert_equal(data['attributes']['registration'], False)
assert_equal(data['attributes']['fork'], True)
def test_create_fork_from_private_registration_with_new_title(self):
res = self.app.post_json_api(self.private_registration_url, self.fork_data_with_title, auth=self.user.auth)
assert_equal(res.status_code, 201)
data = res.json['data']
assert_equal(data['id'], self.private_registration.forks.first()._id)
assert_equal(data['attributes']['title'], self.fork_data_with_title['data']['attributes']['title'])
assert_equal(data['attributes']['registration'], False)
assert_equal(data['attributes']['fork'], True)
def test_can_fork_public_registration_logged_in(self):
res = self.app.post_json_api(self.public_registration_url, self.fork_data, auth=self.user_two.auth)
assert_equal(res.status_code, 201)
data = res.json['data']
assert_equal(data['id'], self.public_registration.forks.first()._id)
assert_equal(data['attributes']['title'], 'Fork of ' + self.public_registration.title)
assert_equal(data['attributes']['registration'], False)
assert_equal(data['attributes']['fork'], True)
def test_cannot_fork_public_registration_logged_out(self):
res = self.app.post_json_api(self.public_registration_url, self.fork_data, expect_errors=True)
assert_equal(res.status_code, 401)
assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.')
def test_can_fork_public_registration_logged_in_contributor(self):
res = self.app.post_json_api(self.public_registration_url, self.fork_data, auth=self.user.auth)
assert_equal(res.status_code, 201)
data = res.json['data']
assert_equal(data['id'], self.public_registration.forks.first()._id)
assert_equal(data['attributes']['title'], 'Fork of ' + self.public_registration.title)
assert_equal(data['attributes']['registration'], False)
assert_equal(data['attributes']['fork'], True)
def test_cannot_fork_private_registration_logged_out(self):
res = self.app.post_json_api(self.private_registration_url, self.fork_data, expect_errors=True)
assert_equal(res.status_code, 401)
assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.')
def test_cannot_fork_private_registration_logged_in_non_contributor(self):
res = self.app.post_json_api(self.private_registration_url, self.fork_data, auth=self.user_two.auth, expect_errors=True)
assert_equal(res.status_code, 403)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
def test_can_fork_private_registration_logged_in_contributor(self):
res = self.app.post_json_api(self.private_registration_url + '?embed=children&embed=node_links&embed=logs&embed=contributors&embed=forked_from', self.fork_data, auth=self.user.auth)
assert_equal(res.status_code, 201)
data = res.json['data']
assert_equal(data['attributes']['title'], 'Fork of ' + self.private_registration.title)
assert_equal(data['attributes']['registration'], False)
assert_equal(data['attributes']['fork'], True)
fork_contributors = data['embeds']['contributors']['data'][0]['embeds']['users']['data']
assert_equal(fork_contributors['attributes']['family_name'], self.user.family_name)
assert_equal(fork_contributors['id'], self.user._id)
forked_from = data['embeds']['forked_from']['data']
assert_equal(forked_from['id'], self.private_registration._id)
def test_fork_private_components_no_access(self):
url = self.public_registration_url + '?embed=children'
private_component = NodeFactory(parent=self.public_registration, creator=self.user_two, is_public=False)
res = self.app.post_json_api(url, self.fork_data, auth=self.user_three.auth)
assert_equal(res.status_code, 201)
# Private components that you do not have access to are not forked
assert_equal(res.json['data']['embeds']['children']['links']['meta']['total'], 0)
def test_fork_components_you_can_access(self):
url = self.private_registration_url + '?embed=children'
new_component = NodeFactory(parent=self.private_registration, creator=self.user)
res = self.app.post_json_api(url, self.fork_data, auth=self.user.auth)
assert_equal(res.status_code, 201)
assert_equal(res.json['data']['embeds']['children']['links']['meta']['total'], 1)
assert_equal(res.json['data']['embeds']['children']['data'][0]['id'], new_component.forks.first()._id)
def test_fork_private_node_links(self):
url = self.private_registration_url + '?embed=node_links'
# Node link is forked, but shows up as a private node link
res = self.app.post_json_api(url, self.fork_data, auth=self.user.auth)
assert_equal(res.json['data']['embeds']['node_links']['data'][0]['embeds']['target_node']['errors'][0]['detail'],
'You do not have permission to perform this action.')
assert_equal(res.json['data']['embeds']['node_links']['links']['meta']['total'], 1)
def test_fork_node_links_you_can_access(self):
pointer = ProjectFactory(creator=self.user)
self.private_project.add_pointer(pointer, auth=Auth(self.user), save=True)
new_registration = RegistrationFactory(project = self.private_project, creator=self.user)
url = '/{}registrations/{}/forks/'.format(API_BASE, new_registration._id) + '?embed=node_links'
res = self.app.post_json_api(url, self.fork_data, auth=self.user.auth)
assert_equal(res.json['data']['embeds']['node_links']['data'][1]['embeds']['target_node']['data']['id'], pointer._id)
assert_equal(res.json['data']['embeds']['node_links']['links']['meta']['total'], 2)
def test_cannot_fork_retractions(self):
with mock.patch('osf.models.AbstractNode.update_search'):
retraction = WithdrawnRegistrationFactory(registration=self.private_registration, user=self.user)
url = '/{}registrations/{}/forks/'.format(API_BASE, self.private_registration._id) + '?embed=forked_from'
res = self.app.post_json_api(url, self.fork_data, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 403)
|
GustavoHennig/ansible | refs/heads/devel | lib/ansible/plugins/connection/__init__.py | 21 | # (c) 2015 Toshio Kuratomi <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import fcntl
import gettext
import os
import shlex
from abc import ABCMeta, abstractmethod, abstractproperty
from functools import wraps
from ansible.compat.six import with_metaclass
from ansible import constants as C
from ansible.compat.six import string_types
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_bytes, to_text
from ansible.plugins import shell_loader
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
__all__ = ['ConnectionBase', 'ensure_connect']
BUFSIZE = 65536
def ensure_connect(func):
@wraps(func)
def wrapped(self, *args, **kwargs):
if not self._connected:
self._connect()
return func(self, *args, **kwargs)
return wrapped
class ConnectionBase(with_metaclass(ABCMeta, object)):
'''
A base class for connections to contain common code.
'''
has_pipelining = False
has_native_async = False # eg, winrm
always_pipeline_modules = False # eg, winrm
become_methods = C.BECOME_METHODS
# When running over this connection type, prefer modules written in a certain language
# as discovered by the specified file extension. An empty string as the
# language means any language.
module_implementation_preferences = ('',)
allow_executable = True
def __init__(self, play_context, new_stdin, *args, **kwargs):
# All these hasattrs allow subclasses to override these parameters
if not hasattr(self, '_play_context'):
self._play_context = play_context
if not hasattr(self, '_new_stdin'):
self._new_stdin = new_stdin
# Backwards compat: self._display isn't really needed, just import the global display and use that.
if not hasattr(self, '_display'):
self._display = display
if not hasattr(self, '_connected'):
self._connected = False
self.success_key = None
self.prompt = None
self._connected = False
# load the shell plugin for this action/connection
if play_context.shell:
shell_type = play_context.shell
elif hasattr(self, '_shell_type'):
shell_type = getattr(self, '_shell_type')
else:
shell_type = 'sh'
shell_filename = os.path.basename(self._play_context.executable)
for shell in shell_loader.all():
if shell_filename in shell.COMPATIBLE_SHELLS:
shell_type = shell.SHELL_FAMILY
break
self._shell = shell_loader.get(shell_type)
if not self._shell:
raise AnsibleError("Invalid shell type specified (%s), or the plugin for that shell type is missing." % shell_type)
@property
def connected(self):
'''Read-only property holding whether the connection to the remote host is active or closed.'''
return self._connected
def _become_method_supported(self):
''' Checks if the current class supports this privilege escalation method '''
if self._play_context.become_method in self.become_methods:
return True
raise AnsibleError("Internal Error: this connection module does not support running commands via %s" % self._play_context.become_method)
def set_host_overrides(self, host, hostvars=None):
'''
An optional method, which can be used to set connection plugin parameters
from variables set on the host (or groups to which the host belongs)
Any connection plugin using this should first initialize its attributes in
an overridden `def __init__(self):`, and then use `host.get_vars()` to find
variables which may be used to set those attributes in this method.
'''
pass
@staticmethod
def _split_ssh_args(argstring):
"""
Takes a string like '-o Foo=1 -o Bar="foo bar"' and returns a
list ['-o', 'Foo=1', '-o', 'Bar=foo bar'] that can be added to
the argument list. The list will not contain any empty elements.
"""
try:
# Python 2.6.x shlex doesn't handle unicode type so we have to
# convert args to byte string for that case. More efficient to
# try without conversion first but python2.6 doesn't throw an
# exception, it merely mangles the output:
# >>> shlex.split(u't e')
# ['t\x00\x00\x00', '\x00\x00\x00e\x00\x00\x00']
return [to_text(x.strip()) for x in shlex.split(to_bytes(argstring)) if x.strip()]
except AttributeError:
# In Python3, shlex.split doesn't work on a byte string.
return [to_text(x.strip()) for x in shlex.split(argstring) if x.strip()]
@abstractproperty
def transport(self):
"""String used to identify this Connection class from other classes"""
pass
@abstractmethod
def _connect(self):
"""Connect to the host we've been initialized with"""
# Check if PE is supported
if self._play_context.become:
self._become_method_supported()
@ensure_connect
@abstractmethod
def exec_command(self, cmd, in_data=None, sudoable=True):
"""Run a command on the remote host.
:arg cmd: byte string containing the command
:kwarg in_data: If set, this data is passed to the command's stdin.
This is used to implement pipelining. Currently not all
connection plugins implement pipelining.
:kwarg sudoable: Tell the connection plugin if we're executing
a command via a privilege escalation mechanism. This may affect
how the connection plugin returns data. Note that not all
connections can handle privilege escalation.
:returns: a tuple of (return code, stdout, stderr) The return code is
an int while stdout and stderr are both byte strings.
When a command is executed, it goes through multiple commands to get
there. It looks approximately like this::
[LocalShell] ConnectionCommand [UsersLoginShell (*)] ANSIBLE_SHELL_EXECUTABLE [(BecomeCommand ANSIBLE_SHELL_EXECUTABLE)] Command
:LocalShell: Is optional. It is run locally to invoke the
``Connection Command``. In most instances, the
``ConnectionCommand`` can be invoked directly instead. The ssh
connection plugin which can have values that need expanding
locally specified via ssh_args is the sole known exception to
this. Shell metacharacters in the command itself should be
processed on the remote machine, not on the local machine so no
shell is needed on the local machine. (Example, ``/bin/sh``)
:ConnectionCommand: This is the command that connects us to the remote
machine to run the rest of the command. ``ansible_ssh_user``,
``ansible_ssh_host`` and so forth are fed to this piece of the
command to connect to the correct host (Examples ``ssh``,
``chroot``)
:UsersLoginShell: This shell may or may not be created depending on
the ConnectionCommand used by the connection plugin. This is the
shell that the ``ansible_ssh_user`` has configured as their login
shell. In traditional UNIX parlance, this is the last field of
a user's ``/etc/passwd`` entry We do not specifically try to run
the ``UsersLoginShell`` when we connect. Instead it is implicit
in the actions that the ``ConnectionCommand`` takes when it
connects to a remote machine. ``ansible_shell_type`` may be set
to inform ansible of differences in how the ``UsersLoginShell``
handles things like quoting if a shell has different semantics
than the Bourne shell.
:ANSIBLE_SHELL_EXECUTABLE: This is the shell set via the inventory var
``ansible_shell_executable`` or via
``constants.DEFAULT_EXECUTABLE`` if the inventory var is not set.
We explicitly invoke this shell so that we have predictable
quoting rules at this point. ``ANSIBLE_SHELL_EXECUTABLE`` is only
settable by the user because some sudo setups may only allow
invoking a specific shell. (For instance, ``/bin/bash`` may be
allowed but ``/bin/sh``, our default, may not). We invoke this
twice, once after the ``ConnectionCommand`` and once after the
``BecomeCommand``. After the ConnectionCommand, this is run by
the ``UsersLoginShell``. After the ``BecomeCommand`` we specify
that the ``ANSIBLE_SHELL_EXECUTABLE`` is being invoked directly.
:BecomeComand ANSIBLE_SHELL_EXECUTABLE: Is the command that performs
privilege escalation. Setting this up is performed by the action
plugin prior to running ``exec_command``. So we just get passed
:param:`cmd` which has the BecomeCommand already added.
(Examples: sudo, su) If we have a BecomeCommand then we will
invoke a ANSIBLE_SHELL_EXECUTABLE shell inside of it so that we
have a consistent view of quoting.
:Command: Is the command we're actually trying to run remotely.
(Examples: mkdir -p $HOME/.ansible, python $HOME/.ansible/tmp-script-file)
"""
pass
@ensure_connect
@abstractmethod
def put_file(self, in_path, out_path):
"""Transfer a file from local to remote"""
pass
@ensure_connect
@abstractmethod
def fetch_file(self, in_path, out_path):
"""Fetch a file from remote to local"""
pass
@abstractmethod
def close(self):
"""Terminate the connection"""
pass
def check_become_success(self, b_output):
b_success_key = to_bytes(self._play_context.success_key)
for b_line in b_output.splitlines(True):
if b_success_key == b_line.rstrip():
return True
return False
def check_password_prompt(self, b_output):
if self._play_context.prompt is None:
return False
elif isinstance(self._play_context.prompt, string_types):
b_prompt = to_bytes(self._play_context.prompt)
return b_prompt in b_output
else:
return self._play_context.prompt(b_output)
def check_incorrect_password(self, b_output):
b_incorrect_password = to_bytes(gettext.dgettext(self._play_context.become_method, C.BECOME_ERROR_STRINGS[self._play_context.become_method]))
return b_incorrect_password and b_incorrect_password in b_output
def check_missing_password(self, b_output):
b_missing_password = to_bytes(gettext.dgettext(self._play_context.become_method, C.BECOME_MISSING_STRINGS[self._play_context.become_method]))
return b_missing_password and b_missing_password in b_output
def connection_lock(self):
f = self._play_context.connection_lockfd
display.vvvv('CONNECTION: pid %d waiting for lock on %d' % (os.getpid(), f), host=self._play_context.remote_addr)
fcntl.lockf(f, fcntl.LOCK_EX)
display.vvvv('CONNECTION: pid %d acquired lock on %d' % (os.getpid(), f), host=self._play_context.remote_addr)
def connection_unlock(self):
f = self._play_context.connection_lockfd
fcntl.lockf(f, fcntl.LOCK_UN)
display.vvvv('CONNECTION: pid %d released lock on %d' % (os.getpid(), f), host=self._play_context.remote_addr)
|
eloquence/unisubs | refs/heads/staging | apps/videos/migrations/0101_fix_subtitle_language_is_complete.py | 5 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
if db.dry_run:
return
count = orm.SubtitleLanguage.objects.count()
i = 0
for sl in orm.SubtitleLanguage.objects.select_related('video'):
if i % 100 == 0:
print "{0}/{1}".format(i, count)
try:
#fix SubtitleLanguage.last_version
#I don't think it is broken, juet to be sure
last_version = sl.subtitleversion_set.order_by('-version_no')[:1].get()
sl.last_version = last_version
sl.save()
except models.ObjectDoesNotExist:
pass
if (not sl.last_version or not sl.last_version.subtitle_set.exists()) and sl.is_complete:
sl.is_complete = False
sl.save()
if not sl.video.subtitlelanguage_set.exclude(is_complete=False).exists() and sl.video.complete_date:
sl.video.complete_date = None
sl.video.save()
i += 1
def backwards(self, orm):
"Write your backwards methods here."
models = {
'auth.customuser': {
'Meta': {'object_name': 'CustomUser', '_ormbases': ['auth.User']},
'autoplay_preferences': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'award_points': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'biography': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'changes_notification': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'homepage': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'last_ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'picture': ('utils.amazon.fields.S3EnabledImageField', [], {'max_length': '100', 'blank': 'True'}),
'preferred_language': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'}),
'valid_email': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'comments.comment': {
'Meta': {'object_name': 'Comment'},
'content': ('django.db.models.fields.TextField', [], {'max_length': '3000'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'content_type_set_for_comment'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_pk': ('django.db.models.fields.TextField', [], {}),
'reply_to': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['comments.Comment']", 'null': 'True', 'blank': 'True'}),
'submit_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']"})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'videos.action': {
'Meta': {'object_name': 'Action'},
'action_type': ('django.db.models.fields.IntegerField', [], {}),
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['comments.Comment']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.SubtitleLanguage']", 'null': 'True', 'blank': 'True'}),
'new_video_title': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True', 'blank': 'True'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video']"})
},
'videos.subtitle': {
'Meta': {'unique_together': "(('version', 'subtitle_id'),)", 'object_name': 'Subtitle'},
'draft': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.SubtitleDraft']", 'null': 'True'}),
'end_time': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start_time': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'subtitle_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'subtitle_order': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'subtitle_text': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'version': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.SubtitleVersion']", 'null': 'True'})
},
'videos.subtitledraft': {
'Meta': {'object_name': 'SubtitleDraft'},
'browser_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'datetime_started': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_forked': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.SubtitleLanguage']"}),
'last_saved_packet': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'parent_version': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.SubtitleVersion']", 'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True'})
},
'videos.subtitlelanguage': {
'Meta': {'unique_together': "(('video', 'language', 'standard_language'),)", 'object_name': 'SubtitleLanguage'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'followers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'followed_languages'", 'blank': 'True', 'to': "orm['auth.CustomUser']"}),
'had_version': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'has_version': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_forked': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_original': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'last_version': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.SubtitleVersion']", 'null': 'True', 'blank': 'True'}),
'percent_done': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'standard_language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.SubtitleLanguage']", 'null': 'True', 'blank': 'True'}),
'subtitles_fetched_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'blank': 'True'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video']"}),
'writelock_owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True', 'blank': 'True'}),
'writelock_session_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'writelock_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
'videos.subtitleversion': {
'Meta': {'unique_together': "(('language', 'version_no'),)", 'object_name': 'SubtitleVersion'},
'datetime_started': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_forked': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.SubtitleLanguage']"}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'notification_sent': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'text_change': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'time_change': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True'}),
'version_no': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'videos.usertestresult': {
'Meta': {'object_name': 'UserTestResult'},
'browser': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'get_updates': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'task1': ('django.db.models.fields.TextField', [], {}),
'task2': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'task3': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'videos.video': {
'Meta': {'object_name': 'Video'},
'allow_community_edits': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'allow_video_urls_edit': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'complete_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'duration': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'edited': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'followers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'followed_videos'", 'blank': 'True', 'to': "orm['auth.CustomUser']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_subtitled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'languages_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
's3_thumbnail': ('utils.amazon.fields.S3EnabledImageField', [], {'max_length': '100', 'blank': 'True'}),
'subtitles_fetched_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True'}),
'thumbnail': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True', 'blank': 'True'}),
'video_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'was_subtitled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'widget_views_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True'}),
'writelock_owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'writelock_owners'", 'null': 'True', 'to': "orm['auth.CustomUser']"}),
'writelock_session_key': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'writelock_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
'videos.videofeed': {
'Meta': {'object_name': 'VideoFeed'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True', 'blank': 'True'})
},
'videos.videourl': {
'Meta': {'object_name': 'VideoUrl'},
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'original': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'primary': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '255'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video']"}),
'videoid': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'})
}
}
complete_apps = ['videos']
|
mzie/RATRACTION | refs/heads/master | graphical_programming_v5.py | 1 | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 3 00:53:41 2016
@author: Matt
"""
from PyQt4 import Qt
from collections import OrderedDict
import sys
class MainWindow(Qt.QMainWindow):
def __init__(self, pins_assigned, arduino_sp, parent=None):
'''
Constructor
'''
Qt.QMainWindow.__init__(self, parent)
self.central_widget = Qt.QStackedWidget()
self.setCentralWidget(self.central_widget)
self.start_screen = Start(pins_assigned)
self.second_screen = Second(arduino_sp)
self.central_widget.addWidget(self.start_screen)
self.central_widget.addWidget(self.second_screen)
self.central_widget.setCurrentWidget(self.start_screen)
self.start_screen.click.connect(lambda: self.central_widget.setCurrentWidget(self.second_screen))
self.second_screen.click.connect(lambda: self.central_widget.setCurrentWidget(self.start_screen))
global ainputs, dinputs, outputs, other
ainputs = []
dinputs = []
outputs = []
for key in pins_assigned.keys():
if pins_assigned[key][1] == "INPUT":
if pins_assigned[key][2] == "ANALOG":
ainputs.append(pins_assigned[key][0])
elif pins_assigned[key][2] == "DIGITAL":
dinputs.append(pins_assigned[key][0])
elif pins_assigned[key][1] == "OUTPUT":
outputs.append(pins_assigned[key][0])
other = ["vidTrack_time", "ard_loop_time", "pos", "self.actButton1_on", "self.actButton2_on", "self.actButton3_on",
"self.actButton4_on","self.actButton5_on", "self.actButton6_on", "self.actButton7_on", "self.actButton8_on"]
class Start(Qt.QWidget):
click = Qt.pyqtSignal()
def __init__(self, pins_assigned, parent=None):
super(Start, self).__init__(parent)
self.pins = pins_assigned
self.tabs2 = Qt.QTabWidget()
self.tab_2_1 = Qt.QWidget()
self.tab_2_2 = Qt.QWidget()
self.tab_2_3 = Qt.QWidget()
self.tab_2_4 = Qt.QWidget()
self.tabs2.addTab(self.tab_2_4, "Description")
self.tabs2.addTab(self.tab_2_1, "Naming")
self.tabs2.addTab(self.tab_2_2, "Setup")
self.tabs2.addTab(self.tab_2_3, "Looping")
self.tab_2_1UI()
self.tab_2_2UI()
self.tab_2_3UI()
self.tab_2_4UI()
self.tabs2.setCurrentIndex(3)
self.run = Qt.QPushButton('Run')
self.run.clicked.connect(self.runcode)
self.tp = Qt.QPushButton("To Text Programming")
self.tp.clicked.connect(self._build)
self.tp.clicked.connect(self.click.emit)
self.upBtn = Qt.QPushButton()
self.upBtn.setIcon(Qt.QIcon("up_icon.png"))
self.upBtn.clicked.connect(self.moveCurrentRowUp)
self.dwnBtn = Qt.QPushButton()
self.dwnBtn.setIcon(Qt.QIcon("down_icon.png"))
self.dwnBtn.clicked.connect(self.moveCurrentRowDown)
self.delBtn = Qt.QPushButton()
self.delBtn.setIcon(Qt.QIcon("close_x.png"))
self.delBtn.clicked.connect(self.delCurrentRow)
self.tabs = Qt.QTabWidget()
self.tab1 = Qt.QWidget()
self.tab2 = Qt.QWidget()
self.tab3 = Qt.QWidget()
self.tab4 = Qt.QWidget()
self.tabs.addTab(self.tab1, "Tasks")
self.tabs.addTab(self.tab2, "Decisions")
self.tabs.addTab(self.tab3, "Repetition")
self.tabs.addTab(self.tab4, "Subprograms")
self.tab1UI()
self.tab2UI()
self.tab3UI()
self.tab4UI()
# policy = self.tabs.sizePolicy()
# policy.setHorizontalStretch()
# self.tabs.setSizePolicy(policy)
self.tabs.setFixedWidth(300)
self.layout1 = Qt.QHBoxLayout()
self.layout1.addWidget(self.tp)
self.layout1.addWidget(self.run)
self.layout1.addStretch()
self.layout2 = Qt.QHBoxLayout()
self.layout2.addWidget(self.upBtn)
self.layout2.addWidget(self.dwnBtn)
self.layout2.addWidget(self.delBtn)
self.layout2.addStretch()
self.layout3 = Qt.QVBoxLayout()
self.layout3.addLayout(self.layout1)
self.layout3.addLayout(self.layout2)
self.layout3.addWidget(self.tabs2)
self.layout4 = Qt.QHBoxLayout()
self.layout4.addWidget(self.tabs)
self.layout4.addLayout(self.layout3)
self.setLayout(self.layout4)
self.connect(self.tabs2, Qt.SIGNAL('currentChanged(int)'), self.selector)
self.setMinimumSize(1200,600)
def selector(self, selected_index):
if selected_index == 0:
self.tabs.setTabEnabled(0, False)
self.tabs.setTabEnabled(1, False)
self.tabs.setTabEnabled(2, False)
self.tabs.setTabEnabled(3, False)
self.upBtn.setEnabled(False)
self.dwnBtn.setEnabled(False)
self.delBtn.setEnabled(False)
elif selected_index == 1:
self.tabs.setTabEnabled(0, False)
self.tabs.setTabEnabled(1, False)
self.tabs.setTabEnabled(2, False)
self.tabs.setTabEnabled(3, False)
self.upBtn.setEnabled(False)
self.dwnBtn.setEnabled(False)
self.delBtn.setEnabled(False)
elif selected_index == 2:
self.tabs.setTabEnabled(0, False)
self.tabs.setTabEnabled(1, False)
self.tabs.setTabEnabled(2, False)
self.tabs.setTabEnabled(3, False)
self.upBtn.setEnabled(False)
self.dwnBtn.setEnabled(False)
self.delBtn.setEnabled(False)
elif selected_index == 3:
self.tabs.setTabEnabled(0, True)
self.tabs.setTabEnabled(1, True)
self.tabs.setTabEnabled(2, True)
self.tabs.setTabEnabled(3, True)
self.upBtn.setEnabled(True)
self.dwnBtn.setEnabled(True)
self.delBtn.setEnabled(True)
def _build(self):
global instructions_list
instructions_list = {}
naming_list = []
setup_list = []
loop_list = []
naming_items = (self.table1.cellWidget(i, 0) for i in list(range(self.table1.rowCount())))
for item in naming_items:
naming_list.append(item.text())
setup_items = (self.table2.cellWidget(i, 0) for i in list(range(self.table2.rowCount())))
for item in setup_items:
setup_list.append(item.text())
list_items = (self.table.cellWidget(i, 0) for i in list(range(self.table.rowCount())))
for item in list_items:
loop_list.append(item.get_instructions())
description = self.txtEdt1.toPlainText()
instructions_list = {'description':description,'naming_list':naming_list, 'setup_list':setup_list, 'loop_list':loop_list}
def runcode(self):
self._build()
def moveCurrentRowUp(self):
row = self.table.currentRow()
if row > 0:
self.table.insertRow(row-1)
self.table.setCellWidget(row-1,0,self.table.cellWidget(row+1,0))
self.table.setCurrentCell(row-1,0)
self.table.removeRow(row+1)
def moveCurrentRowDown(self):
row = self.table.currentRow()
if row < self.table.rowCount()-1:
self.table.insertRow(row+2)
self.table.setCellWidget(row+2,0,self.table.cellWidget(row,0))
self.table.setCurrentCell(row+2,0)
self.table.removeRow(row)
def delCurrentRow(self):
row = self.table.currentRow()
self.table.removeRow(row)
self.table_row_count -= 1
def update_table_rows(self):
self.table_row_count += 1
self.table.setRowCount(self.table_row_count)
def tab1UI(self):
self.addButton1 = Qt.QPushButton("Analog Read")
self.addButton1.clicked.connect(self.addWidget1)
self.addButton2 = Qt.QPushButton("Digital Read")
self.addButton2.clicked.connect(self.addWidget2)
self.addButton3 = Qt.QPushButton("Digital Write")
self.addButton3.clicked.connect(self.addWidget3)
self.addButton4 = Qt.QPushButton("Sleep")
self.addButton4.clicked.connect(self.addWidget4)
layout = Qt.QVBoxLayout()
layout.addWidget(self.addButton1)
layout.addWidget(self.addButton2)
layout.addWidget(self.addButton3)
layout.addWidget(self.addButton4)
layout.addStretch(True)
self.tab1.setLayout(layout)
def tab2UI(self):
self.addButton5 = Qt.QPushButton("do Y if X something W else do Z")
self.addButton5.clicked.connect(self.addWidget5)
layout = Qt.QVBoxLayout()
layout.addWidget(self.addButton5)
layout.addStretch(True)
self.tab2.setLayout(layout)
def tab3UI(self):
self.addButton6 = Qt.QPushButton("for i in X do Y")
self.addButton6.clicked.connect(self.addWidget6)
self.addButton7 = Qt.QPushButton("while X do Y")
self.addButton7.clicked.connect(self.addWidget7)
layout = Qt.QVBoxLayout()
layout.addWidget(self.addButton6)
layout.addWidget(self.addButton7)
layout.addStretch(True)
self.tab3.setLayout(layout)
def tab4UI(self):
pass
def addWidget1(self):
self.update_table_rows()
self.table.setCellWidget(self.table_row_count-1, 0, TestButton1())
def addWidget2(self):
self.update_table_rows()
self.table.setCellWidget(self.table_row_count-1, 0, TestButton2())
def addWidget3(self):
self.update_table_rows()
self.table.setCellWidget(self.table_row_count-1, 0, TestButton3())
def addWidget4(self):
self.update_table_rows()
self.table.setCellWidget(self.table_row_count-1, 0, TestButton4())
def addWidget5(self):
self.update_table_rows()
self.table.setCellWidget(self.table_row_count-1, 0, TestButton5())
def addWidget6(self):
self.update_table_rows()
self.table.setCellWidget(self.table_row_count-1, 0, TestButton6())
def addWidget7(self):
self.update_table_rows()
self.table.setCellWidget(self.table_row_count-1, 0, TestButton7())
def tab_2_1UI(self):
self.table1 = Qt.QTableWidget(self)
self.table1_row_count = 0
self.table1.setRowCount(self.table1_row_count)
self.table1.setColumnCount(1)
#self.table1.setShowGrid(False)
self.table1.horizontalHeader().setResizeMode(Qt.QHeaderView.Stretch)
self.table1.horizontalHeader().setVisible(False)
self.table1.verticalHeader().setDefaultSectionSize(40)
self.table1.setSelectionBehavior(Qt.QTableWidget.SelectRows)
self.table1.setSelectionMode(Qt.QAbstractItemView.SingleSelection)
self.table1_row_count = len(self.pins)
self.table1.setRowCount(self.table1_row_count)
for i, key in enumerate(self.pins.keys()):
temp_label = Qt.QLabel("%s = %s" %(self.pins[key][0], key))
temp_label.setIndent(10)
self.table1.setCellWidget(i, 0, temp_label)
layout = Qt.QVBoxLayout()
layout.addWidget(self.table1)
self.tab_2_1.setLayout(layout)
def tab_2_2UI(self):
self.table2 = Qt.QTableWidget(self)
self.table2_row_count = 0
self.table2.setRowCount(self.table2_row_count)
self.table2.setColumnCount(1)
#self.table2.setShowGrid(False)
self.table2.horizontalHeader().setResizeMode(Qt.QHeaderView.Stretch)
self.table2.horizontalHeader().setVisible(False)
self.table2.verticalHeader().setDefaultSectionSize(40)
self.table2.setSelectionBehavior(Qt.QTableWidget.SelectRows)
self.table2.setSelectionMode(Qt.QAbstractItemView.SingleSelection)
self.table2_row_count = len(self.pins)
self.table2.setRowCount(self.table2_row_count)
for i, key in enumerate(self.pins.keys()):
if self.pins[key][1] == 'INPUT':
temp_label = Qt.QLabel('a.pinMode(%s, a.INPUT)' % (self.pins[key][0]))
elif self.pins[key][1] == 'OUTPUT':
temp_label = Qt.QLabel('a.pinMode(%s, a.OUTPUT)' % (self.pins[key][0]))
temp_label.setIndent(10)
self.table2.setCellWidget(i, 0, temp_label)
layout = Qt.QVBoxLayout()
layout.addWidget(self.table2)
self.tab_2_2.setLayout(layout)
def tab_2_3UI(self):
self.table = Qt.QTableWidget(self)
self.table_row_count = 0
self.table.setRowCount(self.table_row_count)
self.table.setColumnCount(1)
#self.table.setShowGrid(False)
self.table.horizontalHeader().setResizeMode(Qt.QHeaderView.Stretch)
self.table.horizontalHeader().setVisible(False)
self.table.verticalHeader().setDefaultSectionSize(40)
self.table.setSelectionBehavior(Qt.QTableWidget.SelectRows)
self.table.setSelectionMode(Qt.QAbstractItemView.SingleSelection)
layout = Qt.QVBoxLayout()
layout.addWidget(self.table)
self.tab_2_3.setLayout(layout)
def tab_2_4UI(self):
self.txtEdt1 = Qt.QTextEdit()
self.txtEdt1.setText("Replace this text with a description of what the Arduino method should do")
layout = Qt.QVBoxLayout()
layout.addWidget(self.txtEdt1)
self.tab_2_4.setLayout(layout)
class Second(Qt.QWidget):
click = Qt.pyqtSignal()
def __init__(self, arduino_sp, parent=None):
super(Second, self).__init__(parent)
self.ard_setup_parameters = arduino_sp
self.gp = Qt.QPushButton("To Graphical Programming")
self.gp.clicked.connect(self.click.emit)
self.run = Qt.QPushButton('Run')
self.run.clicked.connect(self.runcode)
self.build = Qt.QPushButton('Build')
self.build.clicked.connect(self._build)
self.tabs = Qt.QTabWidget()
self.tab1 = Qt.QWidget()
self.tab2 = Qt.QWidget()
self.tab3 = Qt.QWidget()
self.tab4 = Qt.QWidget()
self.tabs.addTab(self.tab4, "Description")
self.tabs.addTab(self.tab1, "Naming")
self.tabs.addTab(self.tab2, "Setup")
self.tabs.addTab(self.tab3, "Looping")
self.tab1UI()
self.tab2UI()
self.tab3UI()
self.tab4UI()
self.tabs.setCurrentIndex(3)
layout1 = Qt.QHBoxLayout()
layout1.addWidget(self.gp)
layout1.addWidget(self.build)
layout1.addWidget(self.run)
layout1.addStretch(True)
layout2 = Qt.QHBoxLayout()
layout2.addWidget(self.tabs)
layout2 = Qt.QVBoxLayout()
layout2.addLayout(layout1)
layout2.addWidget(self.tabs)
self.setLayout(layout2)
self.setMinimumSize(1200,600)
def tab1UI(self):
self.txtEdt = Qt.QTextEdit()
try:
for name in self.ard_setup_parameters['naming_list']:
self.txtEdt.append(name)
except:
pass
layout = Qt.QVBoxLayout()
layout.addWidget(self.txtEdt)
self.tab1.setLayout(layout)
def tab2UI(self):
self.txtEdt1 = Qt.QTextEdit()
try:
for line in self.ard_setup_parameters['setup_list']:
self.txtEdt1.append(line)
except:
pass
layout = Qt.QVBoxLayout()
layout.addWidget(self.txtEdt1)
self.tab2.setLayout(layout)
def tab3UI(self):
self.txtEdt2 = Qt.QTextEdit()
try:
for line in self.ard_setup_parameters['loop_list']:
self.txtEdt2.append(line)
except:
pass
layout = Qt.QVBoxLayout()
layout.addWidget(self.txtEdt2)
self.tab3.setLayout(layout)
def tab4UI(self):
self.txtEdt3 = Qt.QTextEdit()
try:
self.txtEdt3.setText(self.ard_setup_parameters['description'])
except:
pass
layout = Qt.QVBoxLayout()
layout.addWidget(self.txtEdt3)
self.tab4.setLayout(layout)
def _build(self):
self.txtEdt.clear()
self.txtEdt1.clear()
self.txtEdt2.clear()
self.txtEdt3.clear()
global instructions_list
for name in instructions_list['naming_list']:
self.txtEdt.append(name)
for line in instructions_list['setup_list']:
self.txtEdt1.append(line)
for line in instructions_list['loop_list']:
self.txtEdt2.append(line)
self.txtEdt3.setText(instructions_list['description'])
def runcode(self):
global instructions_list
instructions_list = {}
naming_list = []
setup_list = []
loop_list = []
for name in self.txtEdt.toPlainText().split('\n'):
naming_list.append(name)
for line in self.txtEdt1.toPlainText().split('\n'):
setup_list.append(line)
for line in self.txtEdt2.toPlainText().split('\n'):
loop_list.append(line)
description = self.txtEdt3.toPlainText()
instructions_list = {'description':description, 'naming_list':naming_list, 'setup_list':setup_list, 'loop_list':loop_list}
class TestButton1(Qt.QWidget):
'''
analogRead(analog_input_pin)
'''
def __init__(self, parent=None):
super(TestButton1, self).__init__(parent)
self.label = Qt.QLabel("Analog Read")
self.cmBox = Qt.QComboBox()
self.cmBox.addItems(ainputs)
layout = Qt.QHBoxLayout()
layout.addWidget(self.label)
layout.addWidget(self.cmBox)
layout.addStretch(True)
self.setLayout(layout)
def get_instructions(self):
instructions = 'ard_results["%s"].append(a.analogRead(%s))' %(self.cmBox.currentText(), self.cmBox.currentText())
return instructions
class TestButton2(Qt.QWidget):
'''
digitalRead(digital_input_pin)
'''
def __init__(self, parent=None):
super(TestButton2, self).__init__(parent)
self.label = Qt.QLabel("Digital Read")
self.cmBox = Qt.QComboBox()
self.cmBox.addItems(dinputs)
layout = Qt.QHBoxLayout()
layout.addWidget(self.label)
layout.addWidget(self.cmBox)
layout.addStretch(True)
self.setLayout(layout)
def get_instructions(self):
instructions = 'ard_results["%s"].append(a.digitalRead(%s))' %(self.cmBox.currentText(), self.cmBox.currentText())
return instructions
class TestButton3(Qt.QWidget):
'''
digitalWrite(output_pin, HIGH/LOW)
'''
def __init__(self, parent=None):
super(TestButton3, self).__init__(parent)
self.label = Qt.QLabel("Digital Write")
self.cmBox1 = Qt.QComboBox()
self.cmBox1.addItems(outputs)
self.cmBox2 = Qt.QComboBox()
self.cmBox2.addItems(["HIGH", "LOW"])
layout = Qt.QHBoxLayout()
layout.addWidget(self.label)
layout.addWidget(self.cmBox1)
layout.addWidget(self.cmBox2)
layout.addStretch(True)
self.setLayout(layout)
def get_instructions(self):
instructions = 'a.digitalWrite(%s,a.%s)\nard_results["%s"].append("%s")' %(self.cmBox1.currentText(),self.cmBox2.currentText(),self.cmBox1.currentText(),self.cmBox2.currentText())
return instructions
class TestButton4(Qt.QWidget):
'''
sleep(#_of_seconds)
'''
def __init__(self, parent=None):
super(TestButton4, self).__init__(parent)
self.label = Qt.QLabel("Sleep")
self.spnBox = Qt.QSpinBox()
layout = Qt.QHBoxLayout()
layout.addWidget(self.label)
layout.addWidget(self.spnBox)
layout.addStretch(True)
self.setLayout(layout)
def get_instructions(self):
instructions = 'sleep(%s)' %(str(self.spnBox.value()))
return instructions
class TestButton5(Qt.QWidget):
'''
do Y if X something W else do Z
'''
def __init__(self, parent=None):
super(TestButton5, self).__init__(parent)
self.label1 = Qt.QLabel("do")
self.cmBox1 = Qt.QComboBox()
self.cmBox1.addItems([("a.analogRead(%s)" %(item)) for item in ainputs])
self.cmBox1.addItems([("a.digitalRead(%s)" %(item)) for item in dinputs])
self.cmBox1.addItems([("a.digitalWrite(%s, a.HIGH)" %(item)) for item in outputs])
self.cmBox1.addItems([("a.digitalWrite(%s, a.LOW)" %(item)) for item in outputs])
self.label2 = Qt.QLabel("if")
self.cmBox2 = Qt.QComboBox()
self.cmBox2.addItems([("%s" %(item)) for item in outputs])
self.cmBox2.addItems([("%s" %(item)) for item in ainputs])
self.cmBox2.addItems([("%s" %(item)) for item in dinputs])
self.cmBox2.addItems(other)
self.cmBox3 = Qt.QComboBox()
self.cmBox3.addItems([">", "<", "==", "!=", "in", "not in"])
self.cmBox3.setCurrentIndex(3)
self.lneEdt2 = Qt.QLineEdit()
self.cmBox4 = Qt.QComboBox()
self.cmBox4.setLineEdit(self.lneEdt2)
self.cmBox4.addItems(["True", "False", "1", "0", "HIGH", "LOW", "None", "(TL_x,TL_y,BR_x,BR_y)"])
self.label3 = Qt.QLabel("else do")
self.cmBox5 = Qt.QComboBox()
self.cmBox5.addItem("None")
self.cmBox5.addItems([("a.analogRead(%s)" %(item)) for item in ainputs])
self.cmBox5.addItems([("a.digitalRead(%s)" %(item)) for item in dinputs])
self.cmBox5.addItems([("a.digitalWrite(%s, a.HIGH)" %(item)) for item in outputs])
self.cmBox5.addItems([("a.digitalWrite(%s, a.LOW)" %(item)) for item in outputs])
layout = Qt.QHBoxLayout()
layout.addWidget(self.label1)
layout.addWidget(self.cmBox1)
layout.addWidget(self.label2)
layout.addWidget(self.cmBox2)
layout.addWidget(self.cmBox3)
layout.addWidget(self.cmBox4)
layout.addWidget(self.label3)
layout.addWidget(self.cmBox5)
layout.addStretch(True)
self.setLayout(layout)
def get_instructions(self):
if (self.cmBox2.currentText() == "pos") and (self.cmBox3.currentText() == "in"):
instructions = '%s if ((vid_tracking_methods.mod_pt[0] >= eval(str(%s))[0]) and (vid_tracking_methods.mod_pt[1] >= eval(str(%s))[1]) and (vid_tracking_methods.mod_pt[0] <= eval(str(%s))[2]) and (vid_tracking_methods.mod_pt[1] <= eval(str(%s))[3])) else %s' %(self.cmBox1.currentText(), self.lneEdt2.text(), self.lneEdt2.text(), self.lneEdt2.text(), self.lneEdt2.text(), self.cmBox5.currentText())
elif (self.cmBox2.currentText() == "pos") and (self.cmBox3.currentText() == "not in"):
instructions = '%s if (((vid_tracking_methods.mod_pt[0] <= eval(str(%s))[0]) or (vid_tracking_methods.mod_pt[0] >= eval(str(%s))[2])) or ((vid_tracking_methods.mod_pt[1] <= eval(str(%s))[1]) or (vid_tracking_methods.mod_pt[1] >= eval(str(%s))[3]))) else %s' %(self.cmBox1.currentText(), self.lneEdt2.text(), self.lneEdt2.text(), self.lneEdt2.text(), self.lneEdt2.text(), self.cmBox5.currentText())
elif self.cmBox2.currentText() == "vidTrack_time":
instructions = '%s if vid_tracking_methods.run_tme_ %s int(%s) else %s' %(self.cmBox1.currentText(), self.cmBox3.currentText(), self.lneEdt2.text(), self.cmBox5.currentText())
elif self.cmBox2.currentText() == "ard_loop_time":
instructions = '%s if current_loop_time %s int(%s) else %s' %(self.cmBox1.currentText(), self.cmBox3.currentText(), self.lneEdt2.text(), self.cmBox5.currentText())
elif self.cmBox2.currentText() in outputs:
instructions = '%s if ard_results["%s"][-1] %s "%s" else %s' %(self.cmBox1.currentText(), self.cmBox2.currentText(), self.cmBox3.currentText(), self.lneEdt2.text(), self.cmBox5.currentText())
else:
instructions = '%s if %s %s %s else %s' %(self.cmBox1.currentText(), self.cmBox2.currentText(), self.cmBox3.currentText(), self.lneEdt2.text(), self.cmBox5.currentText())
return instructions
class TestButton6(Qt.QWidget):
'''
for i in X do Y
'''
def __init__(self, parent = None):
super(TestButton6, self).__init__(parent)
self.label1 = Qt.QLabel("for")
self.lneEdt1 = Qt.QLineEdit()
self.cmBox1 = Qt.QComboBox()
self.cmBox1.setLineEdit(self.lneEdt1)
self.label2 = Qt.QLabel("in")
self.lneEdt2 = Qt.QLineEdit()
self.cmBox2 = Qt.QComboBox()
self.cmBox2.setLineEdit(self.lneEdt2)
self.label3 = Qt.QLabel("do")
self.lneEdt3 = Qt.QLineEdit()
self.cmBox3 = Qt.QComboBox()
self.cmBox3.setLineEdit(self.lneEdt3)
layout = Qt.QHBoxLayout()
layout.addWidget(self.label1)
layout.addWidget(self.cmBox1)
layout.addWidget(self.label2)
layout.addWidget(self.cmBox2)
layout.addWidget(self.label3)
layout.addWidget(self.cmBox3)
layout.addStretch(True)
self.setLayout(layout)
def get_instructions(self):
instructions = 'for %s in %s: %s' %(self.lneEdt1.text(),self.lneEdt2.text(),self.lneEdt3.text())
return instructions
class TestButton7(Qt.QWidget):
'''
while X do Y
'''
def __init__(self, parent = None):
super(TestButton7, self).__init__(parent)
self.label1 = Qt.QLabel("while")
self.lneEdt1 = Qt.QLineEdit()
self.cmBox1 = Qt.QComboBox()
self.cmBox1.addItems([("a.digitalWrite(%s, a.HIGH)" %(item)) for item in outputs])
self.cmBox1.addItems([("a.digitalWrite(%s, a.LOW)" %(item)) for item in outputs])
self.cmBox1.addItems(other)
self.cmBox1.setLineEdit(self.lneEdt1)
self.label2 = Qt.QLabel("do")
self.lneEdt2 = Qt.QLineEdit()
self.cmBox2 = Qt.QComboBox()
self.cmBox2.addItems([("a.analogRead(%s)" %(item)) for item in ainputs])
self.cmBox2.addItems([("a.digitalRead(%s)" %(item)) for item in dinputs])
self.cmBox2.addItems([("a.digitalWrite(%s, a.HIGH)" %(item)) for item in outputs])
self.cmBox2.addItems([("a.digitalWrite(%s, a.LOW)" %(item)) for item in outputs])
self.cmBox2.setLineEdit(self.lneEdt2)
layout = Qt.QHBoxLayout()
layout.addWidget(self.label1)
layout.addWidget(self.cmBox1)
layout.addWidget(self.label2)
layout.addWidget(self.cmBox2)
layout.addStretch(True)
self.setLayout(layout)
def get_instructions(self):
instructions = 'while %s: %s' %(self.lneEdt1.text(), self.lneEdt2.text())
return instructions
# main ==============================================
def main():
app = Qt.QApplication(sys.argv)
myWindow = MainWindow()
myWindow.show()
sys.exit(app.exec_())
if __name__ == "__main__":
main()
|
mlperf/inference_results_v0.5 | refs/heads/master | closed/Habana/code/resnet/Habana_benchmark/preprocess/ssd_mx_symbol/common.py | 4 | '''
Parts were adapted from https://github.com/zhreshold/mxnet-ssd/blob/master/symbol/common.py
This is mathematicly equivelent mxnet implementation - weights are imported from MLPerf resnet34-ssd1200.onnx model
'''
import mxnet as mx
import numpy as np
import math
import sys
import importlib
from mxnet.symbol import FullyConnected
from mxnet.symbol import Pooling
from mxnet.symbol import Convolution
from mxnet.symbol import Activation
from mxnet.symbol import broadcast_mul
from mxnet.symbol import L2Normalization
from mxnet.symbol import concat as Concat
from mxnet.symbol import softmax
from mxnet.symbol import Flatten
name_generator = mx.name.NameManager()
def conv_act_layer(from_layer, name, num_filter, kernel=(1,1), pad=(0,0), \
stride=(1,1), act_type="relu",no_bias=False):
"""
wrapper for a small Convolution group
Parameters:
----------
from_layer : mx.symbol
continue on which layer
name : str
base name of the new layers
num_filter : int
how many filters to use in Convolution layer
kernel : tuple (int, int)
kernel size (h, w)
pad : tuple (int, int)
padding size (h, w)
stride : tuple (int, int)
stride size (h, w)
act_type : str
activation type, can be relu...
use_batchnorm : bool
whether to use batch normalization
Returns:
----------
(conv, relu) mx.Symbols
"""
bias = mx.symbol.Variable(name="{}_bias".format(name),
init=mx.init.Constant(0.0), attr={'__lr_mult__': '2.0'})
conv = Convolution(data=from_layer, kernel=kernel, pad=pad, \
stride=stride, num_filter=num_filter, name=name, bias=None if no_bias else bias,no_bias=no_bias)
act = mx.symbol.Activation(data=conv, act_type=act_type, name="{}_{}".format(name, act_type))
return act
def conv_act_layer_old(from_layer, name, num_filter, kernel=(1,1), pad=(0,0), \
stride=(1,1), act_type="relu", use_batchnorm=False,tf_pad=False,in_shape=None,no_bias=False):
"""
wrapper for a small Convolution group
Parameters:
----------
from_layer : mx.symbol
continue on which layer
name : str
base name of the new layers
num_filter : int
how many filters to use in Convolution layer
kernel : tuple (int, int)
kernel size (h, w)
pad : tuple (int, int)
padding size (h, w)
stride : tuple (int, int)
stride size (h, w)
act_type : str
activation type, can be relu...
use_batchnorm : bool
whether to use batch normalization
Returns:
----------
(conv, relu) mx.Symbols
"""
bias = mx.symbol.Variable(name="{}_conv_bias".format(name),
init=mx.init.Constant(0.0), attr={'__lr_mult__': '2.0'})
if tf_pad:
assert in_shape, 'must provide input shape to simulate tensorflow SAME padding'
from_layer,out_shape,pad = same_pad(from_layer,in_shape,kernel,stride)
conv = Convolution(data=from_layer, kernel=kernel, pad=pad, \
stride=stride, num_filter=num_filter, name="{}_conv".format(name), bias=None if no_bias else bias,no_bias=no_bias)
if use_batchnorm:
conv = mx.symbol.BatchNorm(data=conv,fix_gamma=False, name="{}_bn".format(name))
if act_type == 'relu6':
act = mx.symbol.clip(conv,0,6,"{}_relu6".format(name))
else:
act = mx.symbol.Activation(data=conv, act_type=act_type, \
name="{}_{}".format(name, act_type))
if in_shape:
return act,out_shape
else:
return act
def multi_layer_feature(body, from_layers, num_filters, strides, pads, min_filter=128,tf_pad=False,in_shapes=None,
act_type='relu',absorb_bn=False,use_batchnorm=False,multi_feat_no_bias=False,reshape_like_tf=False,**kwargs):
"""Wrapper function to extract features from base network, attaching extra
layers and SSD specific layers
Parameters
----------
from_layers : list of str
feature extraction layers, use '' for add extra layers
For example:
from_layers = ['relu4_3', 'fc7', '', '', '', '']
which means extract feature from relu4_3 and fc7, adding 4 extra layers
on top of fc7
num_filters : list of int
number of filters for extra layers, you can use -1 for extracted features,
however, if normalization and scale is applied, the number of filter for
that layer must be provided.
For example:
num_filters = [512, -1, 512, 256, 256, 256]
strides : list of int
strides for the 3x3 convolution appended, -1 can be used for extracted
feature layers
pads : list of int
paddings for the 3x3 convolution, -1 can be used for extracted layers
min_filter : int
minimum number of filters used in 1x1 convolution
Returns
-------
list of mx.Symbols
"""
# arguments check
assert len(from_layers) > 0
assert isinstance(from_layers[0], str) and len(from_layers[0].strip()) > 0
assert len(from_layers) == len(num_filters) == len(strides) == len(pads)
internals = body.get_internals()
layers = []
no_bias = False if absorb_bn else multi_feat_no_bias
use_batchnorm = not absorb_bn and use_batchnorm
for k, params in enumerate(zip(from_layers, num_filters, strides, pads)):
from_layer, num_filter, s, p = params
if from_layer.strip():
# extract from base network
layer = internals[from_layer.strip() + '_output']
layers.append(layer)
else:
# attach from last feature layer
assert len(layers) > 0
assert num_filter > 0
layer = layers[-1]
num_1x1 = max(min_filter, num_filter // 2)
name='backbone.additional_blocks.%d.%d'%(k-1,0)
conv_1x1 = conv_act_layer(layer, name,
num_1x1, kernel=(1, 1), pad=(0, 0), stride=(1, 1), act_type=act_type,
no_bias=no_bias)
name='backbone.additional_blocks.%d.%d'%(k-1,2)
conv_3x3 = conv_act_layer(conv_1x1, name,
num_filter, kernel=(3, 3), pad=(p, p), stride=(s, s), act_type=act_type,
no_bias=no_bias)
layers.append(conv_3x3)
return layers
def multibox_layer(from_layers, num_classes, sizes=[.2, .95],
ratios=[1], normalization=-1, num_channels=[],
clip=False, interm_layer=0, steps=[],
transpose_cat=True, ext_anchors=None, anchors_per_scale=None,prob_type='softmax',
detector_kernel=(3,3),detector_padding=(1,1),detector_stride=(1,1),
no_bias=False,**kwargs):
"""
the basic aggregation module for SSD detection. Takes in multiple layers,
generate multiple object detection targets by customized layers
Parameters:
----------
from_layers : list of mx.symbol
generate multibox detection from layers
num_classes : int
number of classes excluding background, will automatically handle
background in this function
sizes : list or list of list
[min_size, max_size] for all layers or [[], [], []...] for specific layers
ratios : list or list of list
[ratio1, ratio2...] for all layers or [[], [], ...] for specific layers
normalizations : int or list of int
use normalizations value for all layers or [...] for specific layers,
-1 indicate no normalizations and scales
num_channels : list of int
number of input layer channels, used when normalization is enabled, the
length of list should equals to number of normalization layers
clip : bool
whether to clip out-of-image boxes
interm_layer : int
if > 0, will add a intermediate Convolution layer
steps : list
specify steps for each MultiBoxPrior layer, leave empty, it will calculate
according to layer dimensions
Returns:
----------
list of outputs, as [loc_preds, cls_preds, anchor_boxes]
loc_preds : localization regression prediction
cls_preds : classification prediction
anchor_boxes : generated anchor boxes
"""
assert len(from_layers) > 0, "from_layers must not be empty list"
assert num_classes > 0, \
"num_classes {} must be larger than 0".format(num_classes)
assert len(ratios) > 0, "aspect ratios must not be empty list"
if not isinstance(ratios[0], list):
# provided only one ratio list, broadcast to all from_layers
ratios = [ratios] * len(from_layers)
assert len(ratios) == len(from_layers), \
"ratios and from_layers must have same length"
assert len(sizes) > 0, "sizes must not be empty list"
if len(sizes) == 2 and not isinstance(sizes[0], list):
# provided size range, we need to compute the sizes for each layer
assert sizes[0] > 0 and sizes[0] < 1
assert sizes[1] > 0 and sizes[1] < 1 and sizes[1] > sizes[0]
tmp = np.linspace(sizes[0], sizes[1], num=(len(from_layers)-1))
min_sizes = [start_offset] + tmp.tolist()
max_sizes = tmp.tolist() + [tmp[-1]+start_offset]
sizes = zip(min_sizes, max_sizes)
assert len(sizes) == len(from_layers), \
"sizes and from_layers must have same length"
if not isinstance(normalization, list):
normalization = [normalization] * len(from_layers)
assert len(normalization) == len(from_layers)
assert sum(x > 0 for x in normalization) <= len(num_channels), \
"must provide number of channels for each normalized layer"
if steps:
assert len(steps) == len(from_layers), "provide steps for all layers or leave empty"
loc_pred_layers = []
cls_pred_layers = []
anchor_layers = []
num_classes += 1 # always use background as label 0
for k, from_layer in enumerate(from_layers):
from_name = from_layer.name
# normalize
if normalization[k] > 0:
from_layer = L2Normalization(data=from_layer, \
mode="channel", name="{}_norm".format(from_name))
scale = mx.symbol.Variable(name="{}_scale".format(from_name),
shape=(1, num_channels.pop(0), 1, 1),
init=mx.init.Constant(normalization[k]),
attr={'__wd_mult__': '0.1'})
from_layer = mx.symbol.broadcast_mul(lhs=scale, rhs=from_layer)
if interm_layer > 0:
from_layer = Convolution(data=from_layer, kernel=(3,3), \
stride=(1,1), pad=(1,1), num_filter=interm_layer, \
name="{}_inter_conv".format(from_name))
from_layer = Activation(data=from_layer, act_type="relu", \
name="{}_inter_relu".format(from_name))
# estimate number of anchors per location
# here I follow the original version in caffe
# TODO: better way to shape the anchors??
size = sizes[k]
assert len(size) > 0, "must provide at least one size"
size_str = "(" + ",".join([str(x) for x in size]) + ")"
ratio = ratios[k]
assert len(ratio) > 0, "must provide at least one ratio"
ratio_str = "(" + ",".join([str(x) for x in ratio]) + ")"
if not anchors_per_scale:
num_anchors = len(size) -1 + len(ratio)
else:
num_anchors = anchors_per_scale[k]
# create location prediction layer
num_loc_pred = num_anchors * 4
name = 'backbone.loc.%d'%(k)
bias = mx.symbol.Variable(name=name+'_bias',
init=mx.init.Constant(0.0), attr={'__lr_mult__': '2.0'})
loc_pred = Convolution(data=from_layer, bias=None if no_bias else bias,no_bias=no_bias, kernel=detector_kernel,\
stride=detector_stride, pad=detector_padding, num_filter=num_loc_pred, \
name=name)
if transpose_cat:
loc_pred = mx.symbol.transpose(loc_pred, axes=(0,2,3,1))
loc_pred = Flatten(data=loc_pred,name='flatten_loc_preds_{}'.format(k))
else:
loc_pred = loc_pred.reshape((0, 4, -1),name='reshape_{}'.format(loc_pred.name))
loc_pred_layers.append(loc_pred)
# create class prediction layer
num_cls_pred = num_anchors * num_classes
name='backbone.conf.%d'%(k)
bias = mx.symbol.Variable(name=name+'_bias',
init=mx.init.Constant(0.0), attr={'__lr_mult__': '2.0'})
cls_pred = Convolution(data=from_layer, bias=None if no_bias else bias,no_bias=no_bias, kernel=detector_kernel, \
stride=detector_stride, pad=detector_padding, num_filter=num_cls_pred, \
name=name)
if transpose_cat:
# usual mxnet-ssd case, channels are in the fast changing dim
cls_pred = mx.symbol.transpose(cls_pred, axes=(0, 2, 3, 1))
cls_pred = mx.symbol.Reshape(data=cls_pred, shape=(0, -1, num_classes),
name='reshape_{}'.format(cls_pred.name))
else:
# mlperf onnx model replaces the nhwc transpose with simple reshape,
# class predictions should be [B,#class,#anchors], but apx softmax expect
# the classes in the last dimension, thus we always transpose for softmax on last dim then transpose again
cls_pred = mx.symbol.Reshape(data=cls_pred, shape=(0, num_classes, -1),
name='reshape_{}'.format(cls_pred.name))
assert prob_type in ['softmax','sigmoid'], 'prob type can only be in [softmax,sigmoid] got {}'.format(prob_type)
# float case
if transpose_cat:
if prob_type == 'softmax':
cls_prob = softmax(data=cls_pred, name='{}_cls_prob'.format(from_name), axis=-1)
elif prob_type == 'sigmoid':
cls_prob = Activation(data=cls_pred, act_type='sigmoid', name='{}_cls_prob'.format(from_name))
cls_prob = mx.symbol.transpose(cls_prob, axes=(0, 2, 1), name='{}_transpose_out'.format(from_name))
else:
if prob_type == 'softmax':
name
cls_prob = softmax(data=cls_pred, name='{}_cls_prob'.format(from_name), axis=1)
elif prob_type == 'sigmoid':
cls_prob = Activation(data=cls_pred, act_type='sigmoid', name='{}_cls_prob'.format(from_name))
# prob concat now on dim 2
cls_pred_layers.append(cls_prob)
if ext_anchors is None:
# create anchor generation layer
if steps:
step = (steps[k], steps[k])
else:
step = '(-1.0, -1.0)'
anchors = mx.contrib.symbol.MultiBoxPrior(from_layer, sizes=size_str, ratios=ratio_str, \
clip=clip, name="{}_anchors".format(from_name), steps=step)
anchors = Flatten(data=anchors)
anchor_layers.append(anchors)
if ext_anchors is None:
anchor_boxes = Concat(*anchor_layers, dim=1)
anchor_boxes = mx.symbol.Reshape(data=anchor_boxes, shape=(0, -1, 4), name="multibox_anchors")
else:
# overwrite with external anchors
anchor_boxes = mx.symbol.Variable('multibox_anchors', shape=ext_anchors.shape,
init=mx.init.Constant(ext_anchors.tolist()))
# this is how the float model will look without the additional nodes for i16 softmax
loc_preds = Concat(*loc_pred_layers, dim=1 if transpose_cat else 2, name="multibox_loc_pred")
cls_preds = Concat(*cls_pred_layers, dim=2, name='cls_pred_concat')
return [loc_preds, cls_preds, anchor_boxes]
|
noppanit/airfare-recommendation | refs/heads/master | scripts/import_data.py | 1 | import sys
import os.path
parent = os.path.abspath(os.path.join(os.path.dirname(__file__),'..'))
sys.path.append(parent)
from airfare.atc import create_unique_city, create_flight_details
import csv
with open('data/domestic-flights.csv', 'rt') as csv_file:
routes = csv.reader(csv_file)
for idx, route in enumerate(routes):
if idx != 0:
year = route[2]
quarter = route[3]
from_city_id_1 = route[4]
to_city_id_1 = route[5]
from_city = route[6]
to_city = route[7]
miles = route[8]
fare = route[10]
from_node = create_unique_city(from_city_id_1, from_city)
to_node = create_unique_city(to_city_id_1, to_city)
create_flight_details(fm=from_node, to=to_node, miles=miles, fare=fare, year=year, quarter=quarter)
|
codeAshu/cgt | refs/heads/master | cgt/tests/test_conv.py | 3 | import numpy as np
import cgt
from cgt import nn
def test_conv():
np.random.seed(0)
x = np.random.randn(2,2,5,17)
filt = np.random.randn(3,2,4,7)
filtrows = filt.shape[2]
filtcols = filt.shape[3]
batchsize = x.shape[0]
outchans = filt.shape[0]
try:
import scipy.signal
except ImportError:
print "skipping because we don't have ndimage"
return
out = np.zeros((batchsize,outchans,x.shape[2]+filtrows-1,x.shape[3]+filtcols-1))
for b in xrange(x.shape[0]):
for inchan in xrange(x.shape[1]):
for outchan in xrange(outchans):
out[b,outchan] += scipy.signal.convolve2d(x[b,inchan],filt[outchan,inchan][::-1,::-1],mode='full')
def check_conv(precision):
cgt.reset_config()
cgt.set_precision(precision)
f = cgt.function([], nn.conv2d(cgt.constant(x), cgt.constant(filt), kernelshape=(filtrows,filtcols), pad=(filtrows-1, filtcols-1)))
out1 = f()
# out1 = cgt.numeric_eval1(nn.conv2d(cgt.constant(x), cgt.constant(f), kersize=(filtrows,filtcols)), {})
np.testing.assert_allclose(out, out1, atol={"single":1e-3,"double":1e-6}[precision])
yield check_conv,"single"
yield check_conv,"double"
if __name__ == "__main__":
for (fn,arg) in test_conv():
fn(arg) |
ejpbruel/servo | refs/heads/master | tests/power/PowerMeasure.py | 216 | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# ---------Power measurement ------------------------------#
# This script will run the servo with the given benchmark and
# get the power usage using Powermetrics. Results will be put
# in sperate files with that name.
# Do not forget to run the script in servo/tests/power folder
# --------------------------------------------------------#
from __future__ import print_function, unicode_literals
import os
from os import path
import time
import argparse
# ------------------------PowerCollector----------------------------#
# Collecting all the power data and put them into files
TOP_DIR = path.join("..", "..")
def PowerCollector(OutputDir, Benchmarks, LayoutThreads, Renderer):
print("Running the power collector")
power_dir = path.join(OutputDir, "power")
time_dir = path.join(OutputDir, "time")
etc_dir = path.join(OutputDir, "etc")
for d in [power_dir, time_dir, etc_dir]:
os.mkdir(d)
SleepTime = 20
GuardTime = 0.5
powerTiming = 1
ExperimentNum = 21
for ExpNum in range(1, ExperimentNum):
for layoutT in range(1, LayoutThreads + 1):
print(" layoutT=%d ExpNum=%d" % (layoutT, ExpNum))
PowerFiles = path.join(
power_dir, "power-Layout%d-set%d.csv" % (layoutT, ExpNum))
TimeFiles = path.join(
time_dir, "time-Layout%d-set%d.csv" % (layoutT, ExpNum))
ServoCmd = "(time ../../target/release/servo -x -y %d %s %s) 2> %s" % \
(layoutT, Renderer, Benchmarks, TimeFiles)
Metrics = path.join(
etc_dir, "metrics-Layout%d-set%d-css.csv" % (layoutT, ExpNum))
cmd = "(sudo powermetrics -i %d | " \
"grep \"energy\\|elapsed\\|servo\" > %s &_) 2> %s" % \
(powerTiming, PowerFiles, Metrics)
time.sleep(SleepTime)
os.system(cmd)
time.sleep(GuardTime)
os.system(ServoCmd)
time.sleep(GuardTime)
os.system('sudo pkill -9 powermetrics')
time.sleep(SleepTime)
# -------------------PowerParser ---------------------------------#
# Parsing collected power by PowerCollector fucntion
def PowerParser(OutputDir, LayoutThreads):
print("Running the PowerParser")
ExperimentNum = 21
ResultTable = OutputDir + "ResultTable.csv"
ResultFile = open(ResultTable, "w")
ResultFile.write("LayoutThreads, MeanPower, MaxPower , MinPower, MeanTime , MaxTime, "
"MinTime \n")
for layoutT in range(1, LayoutThreads + 1):
MaxTime = 0
MinTime = 1000000
MaxPower = 0
MinPower = 1000000
TotalPower = 0
TotalTime = 0
TimeGen = 0
PowerGen = 0
for ExpNum in range(1, ExperimentNum):
print(" layoutT=%d ExpNum=%d" % (layoutT, ExpNum))
Files = path.join(
OutputDir, "power", "power-Layout%d-set%d.csv" %
(layoutT, ExpNum))
NewFile = path.join(OutputDir, "power", "Servo-Layout%d-set%d.csv" %
(layoutT, ExpNum))
File = open(Files, 'r')
PowerFile = open(NewFile, 'w')
TimeFiles = path.join(OutputDir, "time", "time-Layout%d-set%d.csv" %
(layoutT, ExpNum))
# ----Putting the power the power and its time into a table---- #
for line in File:
words = line.split()
if words[0] == "***":
insertingWord = words[10][1:-2] + " "
elif words[0] == "Intel":
insertingWord += words[7][:-1]
insertingWord += "\n"
PowerFile.write(insertingWord)
File.close()
PowerFile.close()
# ---------------geting the total power of experiments-------- #
TempFile = open(NewFile, 'r')
Power = 0
for line in TempFile:
words2 = line.split()
Power += float(words2[0]) * float(words2[1])
TotalPower = float(Power / 1000.0)
if TotalPower > MaxPower:
MaxPower = TotalPower
if TotalPower < MinPower:
MinPower = TotalPower
# -------------getting the total time of execution---------- #
TempFile2 = open(TimeFiles, "r")
for line in TempFile2:
words3 = line.split()
if line != "\n" and words3[0] == "real":
TotalTime = (float(words3[1][0]) * 60) + \
float(words3[1][2:-1])
if TotalTime > MaxTime:
MaxTime = TotalTime
if TotalTime < MinTime:
MinTime = TotalTime
TimeGen = TimeGen + TotalTime
PowerGen = PowerGen + TotalPower
TotalPower = PowerGen / float(ExperimentNum - 1)
TotalTime = TimeGen / float(ExperimentNum - 1)
ResultFile.write(str(layoutT) + " , " + str(TotalPower) + " , " +
str(MaxPower) + " , " + str(MinPower) + " , " +
str(TotalTime) + " , " + str(MaxTime) + " , " +
str(MinTime) + "\n")
ResultFile.close()
Opener = ResultFile = open(ResultTable, "r")
for line in Opener:
print(line)
print("Also you can find all the numbers for Power "
"and Performance in : ", ResultTable)
# ----------------------------------------------------#
def main():
LayoutThreads = 8 # Maximum number of threads considered for Layout
Benchmarks = path.join(TOP_DIR, "tests", "html", "perf-rainbow.html")
OutputDir = "Experiments"
os.mkdir(OutputDir)
Renderer = ""
# Parsing the input of the script
parser = argparse.ArgumentParser(description="Measuring \
power and performance of your Servo runs")
parser.add_argument("-b", "--benchmark", help="Gets the \
benchmark, for example \"-B perf-rainbow.html\"")
parser.add_argument("-c", "--CPU", help="Rendering with \
CPU instead of GPU, for example -C")
parser.add_argument("-l", "--LayoutThreads", help="Specify \
the maximum number of threads for layout, for example \" -L 5\"")
parser.add_argument("-o", "--Output", help="Specify \
the output directory")
args = parser.parse_args()
if args.benchmark:
Benchmarks = args.benchmark
if args.CPU:
Renderer = "-c"
if args.LayoutThreads:
LayoutThreads = int(args.LayoutThreads)
if args.Output:
OutputDir = args.Output
PowerCollector(OutputDir, Benchmarks, LayoutThreads, Renderer)
PowerParser(OutputDir, LayoutThreads)
if __name__ == "__main__":
main()
|
VincentGong/chess | refs/heads/master | cocos2d-x/tools/bindings-generator/clang/cindex.py | 21 | #===- cindex.py - Python Indexing Library Bindings -----------*- python -*--===#
#
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
#
#===------------------------------------------------------------------------===#
r"""
Clang Indexing Library Bindings
===============================
This module provides an interface to the Clang indexing library. It is a
low-level interface to the indexing library which attempts to match the Clang
API directly while also being "pythonic". Notable differences from the C API
are:
* string results are returned as Python strings, not CXString objects.
* null cursors are translated to None.
* access to child cursors is done via iteration, not visitation.
The major indexing objects are:
Index
The top-level object which manages some global library state.
TranslationUnit
High-level object encapsulating the AST for a single translation unit. These
can be loaded from .ast files or parsed on the fly.
Cursor
Generic object for representing a node in the AST.
SourceRange, SourceLocation, and File
Objects representing information about the input source.
Most object information is exposed using properties, when the underlying API
call is efficient.
"""
# TODO
# ====
#
# o API support for invalid translation units. Currently we can't even get the
# diagnostics on failure because they refer to locations in an object that
# will have been invalidated.
#
# o fix memory management issues (currently client must hold on to index and
# translation unit, or risk crashes).
#
# o expose code completion APIs.
#
# o cleanup ctypes wrapping, would be nice to separate the ctypes details more
# clearly, and hide from the external interface (i.e., help(cindex)).
#
# o implement additional SourceLocation, SourceRange, and File methods.
from ctypes import *
import collections
import clang.enumerations
# ctypes doesn't implicitly convert c_void_p to the appropriate wrapper
# object. This is a problem, because it means that from_parameter will see an
# integer and pass the wrong value on platforms where int != void*. Work around
# this by marshalling object arguments as void**.
c_object_p = POINTER(c_void_p)
callbacks = {}
### Exception Classes ###
class TranslationUnitLoadError(Exception):
"""Represents an error that occurred when loading a TranslationUnit.
This is raised in the case where a TranslationUnit could not be
instantiated due to failure in the libclang library.
FIXME: Make libclang expose additional error information in this scenario.
"""
pass
class TranslationUnitSaveError(Exception):
"""Represents an error that occurred when saving a TranslationUnit.
Each error has associated with it an enumerated value, accessible under
e.save_error. Consumers can compare the value with one of the ERROR_
constants in this class.
"""
# Indicates that an unknown error occurred. This typically indicates that
# I/O failed during save.
ERROR_UNKNOWN = 1
# Indicates that errors during translation prevented saving. The errors
# should be available via the TranslationUnit's diagnostics.
ERROR_TRANSLATION_ERRORS = 2
# Indicates that the translation unit was somehow invalid.
ERROR_INVALID_TU = 3
def __init__(self, enumeration, message):
assert isinstance(enumeration, int)
if enumeration < 1 or enumeration > 3:
raise Exception("Encountered undefined TranslationUnit save error "
"constant: %d. Please file a bug to have this "
"value supported." % enumeration)
self.save_error = enumeration
Exception.__init__(self, 'Error %d: %s' % (enumeration, message))
### Structures and Utility Classes ###
class CachedProperty(object):
"""Decorator that lazy-loads the value of a property.
The first time the property is accessed, the original property function is
executed. The value it returns is set as the new value of that instance's
property, replacing the original method.
"""
def __init__(self, wrapped):
self.wrapped = wrapped
try:
self.__doc__ = wrapped.__doc__
except:
pass
def __get__(self, instance, instance_type=None):
if instance is None:
return self
value = self.wrapped(instance)
setattr(instance, self.wrapped.__name__, value)
return value
class _CXString(Structure):
"""Helper for transforming CXString results."""
_fields_ = [("spelling", c_char_p), ("free", c_int)]
def __del__(self):
conf.lib.clang_disposeString(self)
@staticmethod
def from_result(res, fn, args):
assert isinstance(res, _CXString)
return conf.lib.clang_getCString(res)
class SourceLocation(Structure):
"""
A SourceLocation represents a particular location within a source file.
"""
_fields_ = [("ptr_data", c_void_p * 2), ("int_data", c_uint)]
_data = None
def _get_instantiation(self):
if self._data is None:
f, l, c, o = c_object_p(), c_uint(), c_uint(), c_uint()
conf.lib.clang_getInstantiationLocation(self, byref(f), byref(l),
byref(c), byref(o))
if f:
f = File(f)
else:
f = None
self._data = (f, int(l.value), int(c.value), int(o.value))
return self._data
@staticmethod
def from_position(tu, file, line, column):
"""
Retrieve the source location associated with a given file/line/column in
a particular translation unit.
"""
return conf.lib.clang_getLocation(tu, file, line, column)
@staticmethod
def from_offset(tu, file, offset):
"""Retrieve a SourceLocation from a given character offset.
tu -- TranslationUnit file belongs to
file -- File instance to obtain offset from
offset -- Integer character offset within file
"""
return conf.lib.clang_getLocationForOffset(tu, file, offset)
@property
def file(self):
"""Get the file represented by this source location."""
return self._get_instantiation()[0]
@property
def line(self):
"""Get the line represented by this source location."""
return self._get_instantiation()[1]
@property
def column(self):
"""Get the column represented by this source location."""
return self._get_instantiation()[2]
@property
def offset(self):
"""Get the file offset represented by this source location."""
return self._get_instantiation()[3]
def __eq__(self, other):
return conf.lib.clang_equalLocations(self, other)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
if self.file:
filename = self.file.name
else:
filename = None
return "<SourceLocation file %r, line %r, column %r>" % (
filename, self.line, self.column)
class SourceRange(Structure):
"""
A SourceRange describes a range of source locations within the source
code.
"""
_fields_ = [
("ptr_data", c_void_p * 2),
("begin_int_data", c_uint),
("end_int_data", c_uint)]
# FIXME: Eliminate this and make normal constructor? Requires hiding ctypes
# object.
@staticmethod
def from_locations(start, end):
return conf.lib.clang_getRange(start, end)
@property
def start(self):
"""
Return a SourceLocation representing the first character within a
source range.
"""
return conf.lib.clang_getRangeStart(self)
@property
def end(self):
"""
Return a SourceLocation representing the last character within a
source range.
"""
return conf.lib.clang_getRangeEnd(self)
def __eq__(self, other):
return conf.lib.clang_equalRanges(self, other)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "<SourceRange start %r, end %r>" % (self.start, self.end)
class Diagnostic(object):
"""
A Diagnostic is a single instance of a Clang diagnostic. It includes the
diagnostic severity, the message, the location the diagnostic occurred, as
well as additional source ranges and associated fix-it hints.
"""
Ignored = 0
Note = 1
Warning = 2
Error = 3
Fatal = 4
def __init__(self, ptr):
self.ptr = ptr
def __del__(self):
conf.lib.clang_disposeDiagnostic(self)
@property
def severity(self):
return conf.lib.clang_getDiagnosticSeverity(self)
@property
def location(self):
return conf.lib.clang_getDiagnosticLocation(self)
@property
def spelling(self):
return conf.lib.clang_getDiagnosticSpelling(self)
@property
def ranges(self):
class RangeIterator:
def __init__(self, diag):
self.diag = diag
def __len__(self):
return int(conf.lib.clang_getDiagnosticNumRanges(self.diag))
def __getitem__(self, key):
if (key >= len(self)):
raise IndexError
return conf.lib.clang_getDiagnosticRange(self.diag, key)
return RangeIterator(self)
@property
def fixits(self):
class FixItIterator:
def __init__(self, diag):
self.diag = diag
def __len__(self):
return int(conf.lib.clang_getDiagnosticNumFixIts(self.diag))
def __getitem__(self, key):
range = SourceRange()
value = conf.lib.clang_getDiagnosticFixIt(self.diag, key,
byref(range))
if len(value) == 0:
raise IndexError
return FixIt(range, value)
return FixItIterator(self)
@property
def category_number(self):
"""The category number for this diagnostic."""
return conf.lib.clang_getDiagnosticCategory(self)
@property
def category_name(self):
"""The string name of the category for this diagnostic."""
return conf.lib.clang_getDiagnosticCategoryName(self.category_number)
@property
def option(self):
"""The command-line option that enables this diagnostic."""
return conf.lib.clang_getDiagnosticOption(self, None)
@property
def disable_option(self):
"""The command-line option that disables this diagnostic."""
disable = _CXString()
conf.lib.clang_getDiagnosticOption(self, byref(disable))
return conf.lib.clang_getCString(disable)
def __repr__(self):
return "<Diagnostic severity %r, location %r, spelling %r>" % (
self.severity, self.location, self.spelling)
def from_param(self):
return self.ptr
class FixIt(object):
"""
A FixIt represents a transformation to be applied to the source to
"fix-it". The fix-it shouldbe applied by replacing the given source range
with the given value.
"""
def __init__(self, range, value):
self.range = range
self.value = value
def __repr__(self):
return "<FixIt range %r, value %r>" % (self.range, self.value)
### Access Specifier Kinds ###
class AccessSpecifierKind(object):
"""
An AccessSpecifierKind describes the kind of access specifier a cursor
points to.
"""
_kinds = []
_name_map = None
def __init__(self, value):
if value >= len(AccessSpecifierKind._kinds):
AccessSpecifierKind._kinds += [None] * (value - len(AccessSpecifierKind._kinds) + 1)
if AccessSpecifierKind._kinds[value] is not None:
raise ValueError,'AccessSpecifierKind already loaded'
self.value = value
AccessSpecifierKind._kinds[value] = self
AccessSpecifierKind._name_map = None
def from_param(self):
return self.value
@property
def name(self):
"""Get the enumeration name of this access specifier kind"""
if self._name_map is None:
self._name_map = {}
for key,value in AccessSpecifierKind.__dict__.items():
if isinstance(value,AccessSpecifierKind):
self._name_map[value] = key
return self._name_map[self]
@staticmethod
def from_id(id):
if id >= len(AccessSpecifierKind._kinds) or AccessSpecifierKind._kinds[id] is None:
raise ValueError,'Unknown access specifier kind'
return AccessSpecifierKind._kinds[id]
@staticmethod
def get_all_kinds():
"""Return all AccessSpecifierKind enumeration instances."""
return filter(None, AccessSpecifierKind._kinds)
def __repr__(self):
return 'AccessSpecifierKind.%s' % (self.name,)
###
# Declaration Kinds
AccessSpecifierKind.INVALID = AccessSpecifierKind(0)
AccessSpecifierKind.PUBLIC = AccessSpecifierKind(1)
AccessSpecifierKind.PROTECTED = AccessSpecifierKind(2)
AccessSpecifierKind.PRIVATE = AccessSpecifierKind(3)
### Availability Kinds ###
class AvailabilityKind(object):
"""
An AvailabilityKind describes the kind of availability a cursor
points to.
"""
_kinds = []
_name_map = None
def __init__(self, value):
if value >= len(AvailabilityKind._kinds):
AvailabilityKind._kinds += [None] * (value - len(AvailabilityKind._kinds) + 1)
if AvailabilityKind._kinds[value] is not None:
raise ValueError,'AvailabilityKind already loaded'
self.value = value
AvailabilityKind._kinds[value] = self
AvailabilityKind._name_map = None
def from_param(self):
return self.value
@property
def name(self):
"""Get the enumeration name of this availability kind"""
if self._name_map is None:
self._name_map = {}
for key,value in AvailabilityKind.__dict__.items():
if isinstance(value,AvailabilityKind):
self._name_map[value] = key
return self._name_map[self]
@staticmethod
def from_id(id):
if id >= len(AvailabilityKind._kinds) or AvailabilityKind._kinds[id] is None:
raise ValueError,'Unknown availability kind'
return AvailabilityKind._kinds[id]
@staticmethod
def get_all_kinds():
"""Return all AvailabilityKind enumeration instances."""
return filter(None, AvailabilityKind._kinds)
def __repr__(self):
return 'AvailabilityKind.%s' % (self.name,)
###
# Declaration Kinds
AvailabilityKind.AVAILABLE = AvailabilityKind(0)
AvailabilityKind.DEPRECATED = AvailabilityKind(1)
AvailabilityKind.NOTAVAILABLE = AvailabilityKind(2)
AvailabilityKind.NOTACCESSIBLE = AvailabilityKind(3)
class TokenGroup(object):
"""Helper class to facilitate token management.
Tokens are allocated from libclang in chunks. They must be disposed of as a
collective group.
One purpose of this class is for instances to represent groups of allocated
tokens. Each token in a group contains a reference back to an instance of
this class. When all tokens from a group are garbage collected, it allows
this class to be garbage collected. When this class is garbage collected,
it calls the libclang destructor which invalidates all tokens in the group.
You should not instantiate this class outside of this module.
"""
def __init__(self, tu, memory, count):
self._tu = tu
self._memory = memory
self._count = count
def __del__(self):
conf.lib.clang_disposeTokens(self._tu, self._memory, self._count)
@staticmethod
def get_tokens(tu, extent):
"""Helper method to return all tokens in an extent.
This functionality is needed multiple places in this module. We define
it here because it seems like a logical place.
"""
tokens_memory = POINTER(Token)()
tokens_count = c_uint()
conf.lib.clang_tokenize(tu, extent, byref(tokens_memory),
byref(tokens_count))
count = int(tokens_count.value)
# If we get no tokens, no memory was allocated. Be sure not to return
# anything and potentially call a destructor on nothing.
if count < 1:
return
tokens_array = cast(tokens_memory, POINTER(Token * count)).contents
token_group = TokenGroup(tu, tokens_memory, tokens_count)
for i in xrange(0, count):
token = Token()
token.int_data = tokens_array[i].int_data
token.ptr_data = tokens_array[i].ptr_data
token._tu = tu
token._group = token_group
yield token
class TokenKind(object):
"""Describes a specific type of a Token."""
_value_map = {} # int -> TokenKind
def __init__(self, value, name):
"""Create a new TokenKind instance from a numeric value and a name."""
self.value = value
self.name = name
def __repr__(self):
return 'TokenKind.%s' % (self.name,)
@staticmethod
def from_value(value):
"""Obtain a registered TokenKind instance from its value."""
result = TokenKind._value_map.get(value, None)
if result is None:
raise ValueError('Unknown TokenKind: %d' % value)
return result
@staticmethod
def register(value, name):
"""Register a new TokenKind enumeration.
This should only be called at module load time by code within this
package.
"""
if value in TokenKind._value_map:
raise ValueError('TokenKind already registered: %d' % value)
kind = TokenKind(value, name)
TokenKind._value_map[value] = kind
setattr(TokenKind, name, kind)
### Cursor Kinds ###
class CursorKind(object):
"""
A CursorKind describes the kind of entity that a cursor points to.
"""
# The unique kind objects, indexed by id.
_kinds = []
_name_map = None
def __init__(self, value):
if value >= len(CursorKind._kinds):
CursorKind._kinds += [None] * (value - len(CursorKind._kinds) + 1)
if CursorKind._kinds[value] is not None:
raise ValueError,'CursorKind already loaded'
self.value = value
CursorKind._kinds[value] = self
CursorKind._name_map = None
def from_param(self):
return self.value
@property
def name(self):
"""Get the enumeration name of this cursor kind."""
if self._name_map is None:
self._name_map = {}
for key,value in CursorKind.__dict__.items():
if isinstance(value,CursorKind):
self._name_map[value] = key
return self._name_map[self]
@staticmethod
def from_id(id):
if id >= len(CursorKind._kinds) or CursorKind._kinds[id] is None:
raise ValueError,'Unknown cursor kind'
return CursorKind._kinds[id]
@staticmethod
def get_all_kinds():
"""Return all CursorKind enumeration instances."""
return filter(None, CursorKind._kinds)
def is_declaration(self):
"""Test if this is a declaration kind."""
return conf.lib.clang_isDeclaration(self)
def is_reference(self):
"""Test if this is a reference kind."""
return conf.lib.clang_isReference(self)
def is_expression(self):
"""Test if this is an expression kind."""
return conf.lib.clang_isExpression(self)
def is_statement(self):
"""Test if this is a statement kind."""
return conf.lib.clang_isStatement(self)
def is_attribute(self):
"""Test if this is an attribute kind."""
return conf.lib.clang_isAttribute(self)
def is_invalid(self):
"""Test if this is an invalid kind."""
return conf.lib.clang_isInvalid(self)
def is_translation_unit(self):
"""Test if this is a translation unit kind."""
return conf.lib.clang_isTranslationUnit(self)
def is_preprocessing(self):
"""Test if this is a preprocessing kind."""
return conf.lib.clang_isPreprocessing(self)
def is_unexposed(self):
"""Test if this is an unexposed kind."""
return conf.lib.clang_isUnexposed(self)
def __repr__(self):
return 'CursorKind.%s' % (self.name,)
# FIXME: Is there a nicer way to expose this enumeration? We could potentially
# represent the nested structure, or even build a class hierarchy. The main
# things we want for sure are (a) simple external access to kinds, (b) a place
# to hang a description and name, (c) easy to keep in sync with Index.h.
###
# Declaration Kinds
# A declaration whose specific kind is not exposed via this interface.
#
# Unexposed declarations have the same operations as any other kind of
# declaration; one can extract their location information, spelling, find their
# definitions, etc. However, the specific kind of the declaration is not
# reported.
CursorKind.UNEXPOSED_DECL = CursorKind(1)
# A C or C++ struct.
CursorKind.STRUCT_DECL = CursorKind(2)
# A C or C++ union.
CursorKind.UNION_DECL = CursorKind(3)
# A C++ class.
CursorKind.CLASS_DECL = CursorKind(4)
# An enumeration.
CursorKind.ENUM_DECL = CursorKind(5)
# A field (in C) or non-static data member (in C++) in a struct, union, or C++
# class.
CursorKind.FIELD_DECL = CursorKind(6)
# An enumerator constant.
CursorKind.ENUM_CONSTANT_DECL = CursorKind(7)
# A function.
CursorKind.FUNCTION_DECL = CursorKind(8)
# A variable.
CursorKind.VAR_DECL = CursorKind(9)
# A function or method parameter.
CursorKind.PARM_DECL = CursorKind(10)
# An Objective-C @interface.
CursorKind.OBJC_INTERFACE_DECL = CursorKind(11)
# An Objective-C @interface for a category.
CursorKind.OBJC_CATEGORY_DECL = CursorKind(12)
# An Objective-C @protocol declaration.
CursorKind.OBJC_PROTOCOL_DECL = CursorKind(13)
# An Objective-C @property declaration.
CursorKind.OBJC_PROPERTY_DECL = CursorKind(14)
# An Objective-C instance variable.
CursorKind.OBJC_IVAR_DECL = CursorKind(15)
# An Objective-C instance method.
CursorKind.OBJC_INSTANCE_METHOD_DECL = CursorKind(16)
# An Objective-C class method.
CursorKind.OBJC_CLASS_METHOD_DECL = CursorKind(17)
# An Objective-C @implementation.
CursorKind.OBJC_IMPLEMENTATION_DECL = CursorKind(18)
# An Objective-C @implementation for a category.
CursorKind.OBJC_CATEGORY_IMPL_DECL = CursorKind(19)
# A typedef.
CursorKind.TYPEDEF_DECL = CursorKind(20)
# A C++ class method.
CursorKind.CXX_METHOD = CursorKind(21)
# A C++ namespace.
CursorKind.NAMESPACE = CursorKind(22)
# A linkage specification, e.g. 'extern "C"'.
CursorKind.LINKAGE_SPEC = CursorKind(23)
# A C++ constructor.
CursorKind.CONSTRUCTOR = CursorKind(24)
# A C++ destructor.
CursorKind.DESTRUCTOR = CursorKind(25)
# A C++ conversion function.
CursorKind.CONVERSION_FUNCTION = CursorKind(26)
# A C++ template type parameter
CursorKind.TEMPLATE_TYPE_PARAMETER = CursorKind(27)
# A C++ non-type template paramater.
CursorKind.TEMPLATE_NON_TYPE_PARAMETER = CursorKind(28)
# A C++ template template parameter.
CursorKind.TEMPLATE_TEMPLATE_PARAMETER = CursorKind(29)
# A C++ function template.
CursorKind.FUNCTION_TEMPLATE = CursorKind(30)
# A C++ class template.
CursorKind.CLASS_TEMPLATE = CursorKind(31)
# A C++ class template partial specialization.
CursorKind.CLASS_TEMPLATE_PARTIAL_SPECIALIZATION = CursorKind(32)
# A C++ namespace alias declaration.
CursorKind.NAMESPACE_ALIAS = CursorKind(33)
# A C++ using directive
CursorKind.USING_DIRECTIVE = CursorKind(34)
# A C++ using declaration
CursorKind.USING_DECLARATION = CursorKind(35)
# A Type alias decl.
CursorKind.TYPE_ALIAS_DECL = CursorKind(36)
# A Objective-C synthesize decl
CursorKind.OBJC_SYNTHESIZE_DECL = CursorKind(37)
# A Objective-C dynamic decl
CursorKind.OBJC_DYNAMIC_DECL = CursorKind(38)
# A C++ access specifier decl.
CursorKind.CXX_ACCESS_SPEC_DECL = CursorKind(39)
###
# Reference Kinds
CursorKind.OBJC_SUPER_CLASS_REF = CursorKind(40)
CursorKind.OBJC_PROTOCOL_REF = CursorKind(41)
CursorKind.OBJC_CLASS_REF = CursorKind(42)
# A reference to a type declaration.
#
# A type reference occurs anywhere where a type is named but not
# declared. For example, given:
# typedef unsigned size_type;
# size_type size;
#
# The typedef is a declaration of size_type (CXCursor_TypedefDecl),
# while the type of the variable "size" is referenced. The cursor
# referenced by the type of size is the typedef for size_type.
CursorKind.TYPE_REF = CursorKind(43)
CursorKind.CXX_BASE_SPECIFIER = CursorKind(44)
# A reference to a class template, function template, template
# template parameter, or class template partial specialization.
CursorKind.TEMPLATE_REF = CursorKind(45)
# A reference to a namespace or namepsace alias.
CursorKind.NAMESPACE_REF = CursorKind(46)
# A reference to a member of a struct, union, or class that occurs in
# some non-expression context, e.g., a designated initializer.
CursorKind.MEMBER_REF = CursorKind(47)
# A reference to a labeled statement.
CursorKind.LABEL_REF = CursorKind(48)
# A reference toa a set of overloaded functions or function templates
# that has not yet been resolved to a specific function or function template.
CursorKind.OVERLOADED_DECL_REF = CursorKind(49)
# /**
# * \brief A reference to a variable that occurs in some non-expression
# * context, e.g., a C++ lambda capture list.
# */
CursorKind.VARIABLE_REF = CursorKind(50)
###
# Invalid/Error Kinds
CursorKind.INVALID_FILE = CursorKind(70)
CursorKind.NO_DECL_FOUND = CursorKind(71)
CursorKind.NOT_IMPLEMENTED = CursorKind(72)
CursorKind.INVALID_CODE = CursorKind(73)
###
# Expression Kinds
# An expression whose specific kind is not exposed via this interface.
#
# Unexposed expressions have the same operations as any other kind of
# expression; one can extract their location information, spelling, children,
# etc. However, the specific kind of the expression is not reported.
CursorKind.UNEXPOSED_EXPR = CursorKind(100)
# An expression that refers to some value declaration, such as a function,
# varible, or enumerator.
CursorKind.DECL_REF_EXPR = CursorKind(101)
# An expression that refers to a member of a struct, union, class, Objective-C
# class, etc.
CursorKind.MEMBER_REF_EXPR = CursorKind(102)
# An expression that calls a function.
CursorKind.CALL_EXPR = CursorKind(103)
# An expression that sends a message to an Objective-C object or class.
CursorKind.OBJC_MESSAGE_EXPR = CursorKind(104)
# An expression that represents a block literal.
CursorKind.BLOCK_EXPR = CursorKind(105)
# An integer literal.
CursorKind.INTEGER_LITERAL = CursorKind(106)
# A floating point number literal.
CursorKind.FLOATING_LITERAL = CursorKind(107)
# An imaginary number literal.
CursorKind.IMAGINARY_LITERAL = CursorKind(108)
# A string literal.
CursorKind.STRING_LITERAL = CursorKind(109)
# A character literal.
CursorKind.CHARACTER_LITERAL = CursorKind(110)
# A parenthesized expression, e.g. "(1)".
#
# This AST node is only formed if full location information is requested.
CursorKind.PAREN_EXPR = CursorKind(111)
# This represents the unary-expression's (except sizeof and
# alignof).
CursorKind.UNARY_OPERATOR = CursorKind(112)
# [C99 6.5.2.1] Array Subscripting.
CursorKind.ARRAY_SUBSCRIPT_EXPR = CursorKind(113)
# A builtin binary operation expression such as "x + y" or
# "x <= y".
CursorKind.BINARY_OPERATOR = CursorKind(114)
# Compound assignment such as "+=".
CursorKind.COMPOUND_ASSIGNMENT_OPERATOR = CursorKind(115)
# The ?: ternary operator.
CursorKind.CONDITIONAL_OPERATOR = CursorKind(116)
# An explicit cast in C (C99 6.5.4) or a C-style cast in C++
# (C++ [expr.cast]), which uses the syntax (Type)expr.
#
# For example: (int)f.
CursorKind.CSTYLE_CAST_EXPR = CursorKind(117)
# [C99 6.5.2.5]
CursorKind.COMPOUND_LITERAL_EXPR = CursorKind(118)
# Describes an C or C++ initializer list.
CursorKind.INIT_LIST_EXPR = CursorKind(119)
# The GNU address of label extension, representing &&label.
CursorKind.ADDR_LABEL_EXPR = CursorKind(120)
# This is the GNU Statement Expression extension: ({int X=4; X;})
CursorKind.StmtExpr = CursorKind(121)
# Represents a C11 generic selection.
CursorKind.GENERIC_SELECTION_EXPR = CursorKind(122)
# Implements the GNU __null extension, which is a name for a null
# pointer constant that has integral type (e.g., int or long) and is the same
# size and alignment as a pointer.
#
# The __null extension is typically only used by system headers, which define
# NULL as __null in C++ rather than using 0 (which is an integer that may not
# match the size of a pointer).
CursorKind.GNU_NULL_EXPR = CursorKind(123)
# C++'s static_cast<> expression.
CursorKind.CXX_STATIC_CAST_EXPR = CursorKind(124)
# C++'s dynamic_cast<> expression.
CursorKind.CXX_DYNAMIC_CAST_EXPR = CursorKind(125)
# C++'s reinterpret_cast<> expression.
CursorKind.CXX_REINTERPRET_CAST_EXPR = CursorKind(126)
# C++'s const_cast<> expression.
CursorKind.CXX_CONST_CAST_EXPR = CursorKind(127)
# Represents an explicit C++ type conversion that uses "functional"
# notion (C++ [expr.type.conv]).
#
# Example:
# \code
# x = int(0.5);
# \endcode
CursorKind.CXX_FUNCTIONAL_CAST_EXPR = CursorKind(128)
# A C++ typeid expression (C++ [expr.typeid]).
CursorKind.CXX_TYPEID_EXPR = CursorKind(129)
# [C++ 2.13.5] C++ Boolean Literal.
CursorKind.CXX_BOOL_LITERAL_EXPR = CursorKind(130)
# [C++0x 2.14.7] C++ Pointer Literal.
CursorKind.CXX_NULL_PTR_LITERAL_EXPR = CursorKind(131)
# Represents the "this" expression in C++
CursorKind.CXX_THIS_EXPR = CursorKind(132)
# [C++ 15] C++ Throw Expression.
#
# This handles 'throw' and 'throw' assignment-expression. When
# assignment-expression isn't present, Op will be null.
CursorKind.CXX_THROW_EXPR = CursorKind(133)
# A new expression for memory allocation and constructor calls, e.g:
# "new CXXNewExpr(foo)".
CursorKind.CXX_NEW_EXPR = CursorKind(134)
# A delete expression for memory deallocation and destructor calls,
# e.g. "delete[] pArray".
CursorKind.CXX_DELETE_EXPR = CursorKind(135)
# Represents a unary expression.
CursorKind.CXX_UNARY_EXPR = CursorKind(136)
# ObjCStringLiteral, used for Objective-C string literals i.e. "foo".
CursorKind.OBJC_STRING_LITERAL = CursorKind(137)
# ObjCEncodeExpr, used for in Objective-C.
CursorKind.OBJC_ENCODE_EXPR = CursorKind(138)
# ObjCSelectorExpr used for in Objective-C.
CursorKind.OBJC_SELECTOR_EXPR = CursorKind(139)
# Objective-C's protocol expression.
CursorKind.OBJC_PROTOCOL_EXPR = CursorKind(140)
# An Objective-C "bridged" cast expression, which casts between
# Objective-C pointers and C pointers, transferring ownership in the process.
#
# \code
# NSString *str = (__bridge_transfer NSString *)CFCreateString();
# \endcode
CursorKind.OBJC_BRIDGE_CAST_EXPR = CursorKind(141)
# Represents a C++0x pack expansion that produces a sequence of
# expressions.
#
# A pack expansion expression contains a pattern (which itself is an
# expression) followed by an ellipsis. For example:
CursorKind.PACK_EXPANSION_EXPR = CursorKind(142)
# Represents an expression that computes the length of a parameter
# pack.
CursorKind.SIZE_OF_PACK_EXPR = CursorKind(143)
# Represents a C++ lambda expression that produces a local function object.
# void abssort(float *x, unsigned N) {
# std::sort(x, x + N,
# [](float a, float b) {
# return std::abs(a) < std::abs(b);
# });
# }
CursorKind.LAMBDA_EXPR = CursorKind(144)
# Objective-c Boolean Literal.
CursorKind.OBJC_BOOL_LITERAL_EXPR = CursorKind(145)
# Represents the "self" expression in a ObjC method.
CursorKind.OBJC_SELF_EXPR = CursorKind(146)
# A statement whose specific kind is not exposed via this interface.
#
# Unexposed statements have the same operations as any other kind of statement;
# one can extract their location information, spelling, children, etc. However,
# the specific kind of the statement is not reported.
CursorKind.UNEXPOSED_STMT = CursorKind(200)
# A labelled statement in a function.
CursorKind.LABEL_STMT = CursorKind(201)
# A compound statement
CursorKind.COMPOUND_STMT = CursorKind(202)
# A case statement.
CursorKind.CASE_STMT = CursorKind(203)
# A default statement.
CursorKind.DEFAULT_STMT = CursorKind(204)
# An if statement.
CursorKind.IF_STMT = CursorKind(205)
# A switch statement.
CursorKind.SWITCH_STMT = CursorKind(206)
# A while statement.
CursorKind.WHILE_STMT = CursorKind(207)
# A do statement.
CursorKind.DO_STMT = CursorKind(208)
# A for statement.
CursorKind.FOR_STMT = CursorKind(209)
# A goto statement.
CursorKind.GOTO_STMT = CursorKind(210)
# An indirect goto statement.
CursorKind.INDIRECT_GOTO_STMT = CursorKind(211)
# A continue statement.
CursorKind.CONTINUE_STMT = CursorKind(212)
# A break statement.
CursorKind.BREAK_STMT = CursorKind(213)
# A return statement.
CursorKind.RETURN_STMT = CursorKind(214)
# A GNU-style inline assembler statement.
CursorKind.ASM_STMT = CursorKind(215)
# Objective-C's overall @try-@catch-@finally statement.
CursorKind.OBJC_AT_TRY_STMT = CursorKind(216)
# Objective-C's @catch statement.
CursorKind.OBJC_AT_CATCH_STMT = CursorKind(217)
# Objective-C's @finally statement.
CursorKind.OBJC_AT_FINALLY_STMT = CursorKind(218)
# Objective-C's @throw statement.
CursorKind.OBJC_AT_THROW_STMT = CursorKind(219)
# Objective-C's @synchronized statement.
CursorKind.OBJC_AT_SYNCHRONIZED_STMT = CursorKind(220)
# Objective-C's autorealease pool statement.
CursorKind.OBJC_AUTORELEASE_POOL_STMT = CursorKind(221)
# Objective-C's for collection statement.
CursorKind.OBJC_FOR_COLLECTION_STMT = CursorKind(222)
# C++'s catch statement.
CursorKind.CXX_CATCH_STMT = CursorKind(223)
# C++'s try statement.
CursorKind.CXX_TRY_STMT = CursorKind(224)
# C++'s for (* : *) statement.
CursorKind.CXX_FOR_RANGE_STMT = CursorKind(225)
# Windows Structured Exception Handling's try statement.
CursorKind.SEH_TRY_STMT = CursorKind(226)
# Windows Structured Exception Handling's except statement.
CursorKind.SEH_EXCEPT_STMT = CursorKind(227)
# Windows Structured Exception Handling's finally statement.
CursorKind.SEH_FINALLY_STMT = CursorKind(228)
# The null statement.
CursorKind.NULL_STMT = CursorKind(230)
# Adaptor class for mixing declarations with statements and expressions.
CursorKind.DECL_STMT = CursorKind(231)
###
# Other Kinds
# Cursor that represents the translation unit itself.
#
# The translation unit cursor exists primarily to act as the root cursor for
# traversing the contents of a translation unit.
CursorKind.TRANSLATION_UNIT = CursorKind(300)
###
# Attributes
# An attribute whoe specific kind is note exposed via this interface
CursorKind.UNEXPOSED_ATTR = CursorKind(400)
CursorKind.IB_ACTION_ATTR = CursorKind(401)
CursorKind.IB_OUTLET_ATTR = CursorKind(402)
CursorKind.IB_OUTLET_COLLECTION_ATTR = CursorKind(403)
CursorKind.CXX_FINAL_ATTR = CursorKind(404)
CursorKind.CXX_OVERRIDE_ATTR = CursorKind(405)
CursorKind.ANNOTATE_ATTR = CursorKind(406)
CursorKind.ASM_LABEL_ATTR = CursorKind(407)
###
# Preprocessing
CursorKind.PREPROCESSING_DIRECTIVE = CursorKind(500)
CursorKind.MACRO_DEFINITION = CursorKind(501)
CursorKind.MACRO_INSTANTIATION = CursorKind(502)
CursorKind.INCLUSION_DIRECTIVE = CursorKind(503)
# Extra Declarations
# A module import declaration.
CursorKind.MODULE_IMPORT_DECL = CursorKind(600)
### Cursors ###
class Cursor(Structure):
"""
The Cursor class represents a reference to an element within the AST. It
acts as a kind of iterator.
"""
_fields_ = [("_kind_id", c_int), ("xdata", c_int), ("data", c_void_p * 3)]
@staticmethod
def from_location(tu, location):
# We store a reference to the TU in the instance so the TU won't get
# collected before the cursor.
cursor = conf.lib.clang_getCursor(tu, location)
cursor._tu = tu
return cursor
def __eq__(self, other):
return conf.lib.clang_equalCursors(self, other)
def __ne__(self, other):
return not self.__eq__(other)
def is_definition(self):
"""
Returns true if the declaration pointed at by the cursor is also a
definition of that entity.
"""
return conf.lib.clang_isCursorDefinition(self)
"""
Determine the availability of the entity that this cursor refers to,
taking the current target platform into account.
returns The availability of the cursor.
"""
def get_availability(self):
res = conf.lib.clang_getCursorAvailability(self)
return res
def is_static_method(self):
"""Returns True if the cursor refers to a C++ member function or member
function template that is declared 'static'.
"""
return conf.lib.clang_CXXMethod_isStatic(self)
def get_definition(self):
"""
If the cursor is a reference to a declaration or a declaration of
some entity, return a cursor that points to the definition of that
entity.
"""
# TODO: Should probably check that this is either a reference or
# declaration prior to issuing the lookup.
return conf.lib.clang_getCursorDefinition(self)
def get_usr(self):
"""Return the Unified Symbol Resultion (USR) for the entity referenced
by the given cursor (or None).
A Unified Symbol Resolution (USR) is a string that identifies a
particular entity (function, class, variable, etc.) within a
program. USRs can be compared across translation units to determine,
e.g., when references in one translation refer to an entity defined in
another translation unit."""
return conf.lib.clang_getCursorUSR(self)
@property
def kind(self):
"""Return the kind of this cursor."""
return CursorKind.from_id(self._kind_id)
@property
def spelling(self):
"""Return the spelling of the entity pointed at by the cursor."""
if not self.kind.is_declaration():
# FIXME: clang_getCursorSpelling should be fixed to not assert on
# this, for consistency with clang_getCursorUSR.
return None
if not hasattr(self, '_spelling'):
self._spelling = conf.lib.clang_getCursorSpelling(self)
return self._spelling
@property
def displayname(self):
"""
Return the display name for the entity referenced by this cursor.
The display name contains extra information that helps identify the cursor,
such as the parameters of a function or template or the arguments of a
class template specialization.
"""
if not hasattr(self, '_displayname'):
self._displayname = conf.lib.clang_getCursorDisplayName(self)
return self._displayname
@property
def location(self):
"""
Return the source location (the starting character) of the entity
pointed at by the cursor.
"""
if not hasattr(self, '_loc'):
self._loc = conf.lib.clang_getCursorLocation(self)
return self._loc
@property
def extent(self):
"""
Return the source range (the range of text) occupied by the entity
pointed at by the cursor.
"""
if not hasattr(self, '_extent'):
self._extent = conf.lib.clang_getCursorExtent(self)
return self._extent
@property
def type(self):
"""
Retrieve the Type (if any) of the entity pointed at by the cursor.
"""
if not hasattr(self, '_type'):
self._type = conf.lib.clang_getCursorType(self)
return self._type
@property
def canonical(self):
"""Return the canonical Cursor corresponding to this Cursor.
The canonical cursor is the cursor which is representative for the
underlying entity. For example, if you have multiple forward
declarations for the same class, the canonical cursor for the forward
declarations will be identical.
"""
if not hasattr(self, '_canonical'):
self._canonical = conf.lib.clang_getCanonicalCursor(self)
return self._canonical
@property
def result_type(self):
"""Retrieve the Type of the result for this Cursor."""
if not hasattr(self, '_result_type'):
self._result_type = conf.lib.clang_getResultType(self.type)
return self._result_type
@property
def underlying_typedef_type(self):
"""Return the underlying type of a typedef declaration.
Returns a Type for the typedef this cursor is a declaration for. If
the current cursor is not a typedef, this raises.
"""
if not hasattr(self, '_underlying_type'):
assert self.kind.is_declaration()
self._underlying_type = \
conf.lib.clang_getTypedefDeclUnderlyingType(self)
return self._underlying_type
@property
def enum_type(self):
"""Return the integer type of an enum declaration.
Returns a Type corresponding to an integer. If the cursor is not for an
enum, this raises.
"""
if not hasattr(self, '_enum_type'):
assert self.kind == CursorKind.ENUM_DECL
self._enum_type = conf.lib.clang_getEnumDeclIntegerType(self)
return self._enum_type
@property
def enum_value(self):
"""Return the value of an enum constant."""
if not hasattr(self, '_enum_value'):
assert self.kind == CursorKind.ENUM_CONSTANT_DECL
# Figure out the underlying type of the enum to know if it
# is a signed or unsigned quantity.
underlying_type = self.type
if underlying_type.kind == TypeKind.ENUM:
underlying_type = underlying_type.get_declaration().enum_type
if underlying_type.kind in (TypeKind.CHAR_U,
TypeKind.UCHAR,
TypeKind.CHAR16,
TypeKind.CHAR32,
TypeKind.USHORT,
TypeKind.UINT,
TypeKind.ULONG,
TypeKind.ULONGLONG,
TypeKind.UINT128):
self._enum_value = \
conf.lib.clang_getEnumConstantDeclUnsignedValue(self)
else:
self._enum_value = conf.lib.clang_getEnumConstantDeclValue(self)
return self._enum_value
@property
def objc_type_encoding(self):
"""Return the Objective-C type encoding as a str."""
if not hasattr(self, '_objc_type_encoding'):
self._objc_type_encoding = \
conf.lib.clang_getDeclObjCTypeEncoding(self)
return self._objc_type_encoding
@property
def hash(self):
"""Returns a hash of the cursor as an int."""
if not hasattr(self, '_hash'):
self._hash = conf.lib.clang_hashCursor(self)
return self._hash
@property
def semantic_parent(self):
"""Return the semantic parent for this cursor."""
if not hasattr(self, '_semantic_parent'):
self._semantic_parent = conf.lib.clang_getCursorSemanticParent(self)
return self._semantic_parent
@property
def lexical_parent(self):
"""Return the lexical parent for this cursor."""
if not hasattr(self, '_lexical_parent'):
self._lexical_parent = conf.lib.clang_getCursorLexicalParent(self)
return self._lexical_parent
@property
def translation_unit(self):
"""Returns the TranslationUnit to which this Cursor belongs."""
# If this triggers an AttributeError, the instance was not properly
# created.
return self._tu
@property
def referenced(self):
"""
For a cursor that is a reference, returns a cursor
representing the entity that it references.
"""
if not hasattr(self, '_referenced'):
self._referenced = conf.lib.clang_getCursorReferenced(self)
return self._referenced
def get_arguments(self):
"""Return an iterator for accessing the arguments of this cursor."""
num_args = conf.lib.clang_Cursor_getNumArguments(self)
for i in range(0, num_args):
yield conf.lib.clang_Cursor_getArgument(self, i)
def get_children(self):
"""Return an iterator for accessing the children of this cursor."""
# FIXME: Expose iteration from CIndex, PR6125.
def visitor(child, parent, children):
# FIXME: Document this assertion in API.
# FIXME: There should just be an isNull method.
assert child != conf.lib.clang_getNullCursor()
# Create reference to TU so it isn't GC'd before Cursor.
child._tu = self._tu
children.append(child)
return 1 # continue
children = []
conf.lib.clang_visitChildren(self, callbacks['cursor_visit'](visitor),
children)
return iter(children)
def get_children_array(self):
"""Return an iterator for accessing the children of this cursor."""
# FIXME: Expose iteration from CIndex, PR6125.
def visitor(child, parent, children):
# FIXME: Document this assertion in API.
# FIXME: There should just be an isNull method.
assert child != conf.lib.clang_getNullCursor()
# Create reference to TU so it isn't GC'd before Cursor.
child._tu = self._tu
children.append(child)
return 1 # continue
children = []
conf.lib.clang_visitChildren(self, callbacks['cursor_visit'](visitor),
children)
return children
def get_tokens(self):
"""Obtain Token instances formulating that compose this Cursor.
This is a generator for Token instances. It returns all tokens which
occupy the extent this cursor occupies.
"""
return TokenGroup.get_tokens(self._tu, self.extent)
def is_bitfield(self):
"""
Check if the field is a bitfield.
"""
return conf.lib.clang_Cursor_isBitField(self)
def get_bitfield_width(self):
"""
Retrieve the width of a bitfield.
"""
return conf.lib.clang_getFieldDeclBitWidth(self)
def get_access_specifier(self):
assert self.kind == CursorKind.CXX_ACCESS_SPEC_DECL
return conf.lib.clang_getCXXAccessSpecifier(self)
@staticmethod
def from_result(res, fn, args):
assert isinstance(res, Cursor)
# FIXME: There should just be an isNull method.
if res == conf.lib.clang_getNullCursor():
return None
# Store a reference to the TU in the Python object so it won't get GC'd
# before the Cursor.
tu = None
for arg in args:
if isinstance(arg, TranslationUnit):
tu = arg
break
if hasattr(arg, 'translation_unit'):
tu = arg.translation_unit
break
assert tu is not None
res._tu = tu
return res
@staticmethod
def from_cursor_result(res, fn, args):
assert isinstance(res, Cursor)
if res == conf.lib.clang_getNullCursor():
return None
res._tu = args[0]._tu
return res
### Type Kinds ###
class TypeKind(object):
"""
Describes the kind of type.
"""
# The unique kind objects, indexed by id.
_kinds = []
_name_map = None
def __init__(self, value):
if value >= len(TypeKind._kinds):
TypeKind._kinds += [None] * (value - len(TypeKind._kinds) + 1)
if TypeKind._kinds[value] is not None:
raise ValueError,'TypeKind already loaded'
self.value = value
TypeKind._kinds[value] = self
TypeKind._name_map = None
def from_param(self):
return self.value
@property
def name(self):
"""Get the enumeration name of this cursor kind."""
if self._name_map is None:
self._name_map = {}
for key,value in TypeKind.__dict__.items():
if isinstance(value,TypeKind):
self._name_map[value] = key
return self._name_map[self]
@property
def spelling(self):
"""Retrieve the spelling of this TypeKind."""
return conf.lib.clang_getTypeKindSpelling(self.value)
@staticmethod
def from_id(id):
if id >= len(TypeKind._kinds) or TypeKind._kinds[id] is None:
raise ValueError,'Unknown type kind %d' % id
return TypeKind._kinds[id]
def __repr__(self):
return 'TypeKind.%s' % (self.name,)
TypeKind.INVALID = TypeKind(0)
TypeKind.UNEXPOSED = TypeKind(1)
TypeKind.VOID = TypeKind(2)
TypeKind.BOOL = TypeKind(3)
TypeKind.CHAR_U = TypeKind(4)
TypeKind.UCHAR = TypeKind(5)
TypeKind.CHAR16 = TypeKind(6)
TypeKind.CHAR32 = TypeKind(7)
TypeKind.USHORT = TypeKind(8)
TypeKind.UINT = TypeKind(9)
TypeKind.ULONG = TypeKind(10)
TypeKind.ULONGLONG = TypeKind(11)
TypeKind.UINT128 = TypeKind(12)
TypeKind.CHAR_S = TypeKind(13)
TypeKind.SCHAR = TypeKind(14)
TypeKind.WCHAR = TypeKind(15)
TypeKind.SHORT = TypeKind(16)
TypeKind.INT = TypeKind(17)
TypeKind.LONG = TypeKind(18)
TypeKind.LONGLONG = TypeKind(19)
TypeKind.INT128 = TypeKind(20)
TypeKind.FLOAT = TypeKind(21)
TypeKind.DOUBLE = TypeKind(22)
TypeKind.LONGDOUBLE = TypeKind(23)
TypeKind.NULLPTR = TypeKind(24)
TypeKind.OVERLOAD = TypeKind(25)
TypeKind.DEPENDENT = TypeKind(26)
TypeKind.OBJCID = TypeKind(27)
TypeKind.OBJCCLASS = TypeKind(28)
TypeKind.OBJCSEL = TypeKind(29)
TypeKind.COMPLEX = TypeKind(100)
TypeKind.POINTER = TypeKind(101)
TypeKind.BLOCKPOINTER = TypeKind(102)
TypeKind.LVALUEREFERENCE = TypeKind(103)
TypeKind.RVALUEREFERENCE = TypeKind(104)
TypeKind.RECORD = TypeKind(105)
TypeKind.ENUM = TypeKind(106)
TypeKind.TYPEDEF = TypeKind(107)
TypeKind.OBJCINTERFACE = TypeKind(108)
TypeKind.OBJCOBJECTPOINTER = TypeKind(109)
TypeKind.FUNCTIONNOPROTO = TypeKind(110)
TypeKind.FUNCTIONPROTO = TypeKind(111)
TypeKind.CONSTANTARRAY = TypeKind(112)
TypeKind.VECTOR = TypeKind(113)
TypeKind.INCOMPLETEARRAY = TypeKind(114)
TypeKind.MEMBERPOINTER = TypeKind(117)
class Type(Structure):
"""
The type of an element in the abstract syntax tree.
"""
_fields_ = [("_kind_id", c_int), ("data", c_void_p * 2)]
@property
def kind(self):
"""Return the kind of this type."""
return TypeKind.from_id(self._kind_id)
def argument_types(self):
"""Retrieve a container for the non-variadic arguments for this type.
The returned object is iterable and indexable. Each item in the
container is a Type instance.
"""
class ArgumentsIterator(collections.Sequence):
def __init__(self, parent):
self.parent = parent
self.length = None
def __len__(self):
if self.length is None:
self.length = conf.lib.clang_getNumArgTypes(self.parent)
return self.length
def __getitem__(self, key):
# FIXME Support slice objects.
if not isinstance(key, int):
raise TypeError("Must supply a non-negative int.")
if key < 0:
raise IndexError("Only non-negative indexes are accepted.")
if key >= len(self):
raise IndexError("Index greater than container length: "
"%d > %d" % ( key, len(self) ))
result = conf.lib.clang_getArgType(self.parent, key)
if result.kind == TypeKind.INVALID:
raise IndexError("Argument could not be retrieved.")
return result
assert self.kind == TypeKind.FUNCTIONPROTO
return ArgumentsIterator(self)
@property
def element_type(self):
"""Retrieve the Type of elements within this Type.
If accessed on a type that is not an array, complex, or vector type, an
exception will be raised.
"""
result = conf.lib.clang_getElementType(self)
if result.kind == TypeKind.INVALID:
raise Exception('Element type not available on this type.')
return result
@property
def element_count(self):
"""Retrieve the number of elements in this type.
Returns an int.
If the Type is not an array or vector, this raises.
"""
result = conf.lib.clang_getNumElements(self)
if result < 0:
raise Exception('Type does not have elements.')
return result
@property
def translation_unit(self):
"""The TranslationUnit to which this Type is associated."""
# If this triggers an AttributeError, the instance was not properly
# instantiated.
return self._tu
@staticmethod
def from_result(res, fn, args):
assert isinstance(res, Type)
tu = None
for arg in args:
if hasattr(arg, 'translation_unit'):
tu = arg.translation_unit
break
assert tu is not None
res._tu = tu
return res
def get_canonical(self):
"""
Return the canonical type for a Type.
Clang's type system explicitly models typedefs and all the
ways a specific type can be represented. The canonical type
is the underlying type with all the "sugar" removed. For
example, if 'T' is a typedef for 'int', the canonical type for
'T' would be 'int'.
"""
return conf.lib.clang_getCanonicalType(self)
def is_const_qualified(self):
"""Determine whether a Type has the "const" qualifier set.
This does not look through typedefs that may have added "const"
at a different level.
"""
return conf.lib.clang_isConstQualifiedType(self)
def is_volatile_qualified(self):
"""Determine whether a Type has the "volatile" qualifier set.
This does not look through typedefs that may have added "volatile"
at a different level.
"""
return conf.lib.clang_isVolatileQualifiedType(self)
def is_restrict_qualified(self):
"""Determine whether a Type has the "restrict" qualifier set.
This does not look through typedefs that may have added "restrict" at
a different level.
"""
return conf.lib.clang_isRestrictQualifiedType(self)
def is_function_variadic(self):
"""Determine whether this function Type is a variadic function type."""
assert self.kind == TypeKind.FUNCTIONPROTO
return conf.lib.clang_isFunctionTypeVariadic(self)
def is_pod(self):
"""Determine whether this Type represents plain old data (POD)."""
return conf.lib.clang_isPODType(self)
def get_pointee(self):
"""
For pointer types, returns the type of the pointee.
"""
return conf.lib.clang_getPointeeType(self)
def get_declaration(self):
"""
Return the cursor for the declaration of the given type.
"""
return conf.lib.clang_getTypeDeclaration(self)
def get_result(self):
"""
Retrieve the result type associated with a function type.
"""
return conf.lib.clang_getResultType(self)
def get_array_element_type(self):
"""
Retrieve the type of the elements of the array type.
"""
return conf.lib.clang_getArrayElementType(self)
def get_array_size(self):
"""
Retrieve the size of the constant array.
"""
return conf.lib.clang_getArraySize(self)
def get_align(self):
"""
Retrieve the alignment of the record.
"""
return conf.lib.clang_Type_getAlignOf(self)
def get_size(self):
"""
Retrieve the size of the record.
"""
return conf.lib.clang_Type_getSizeOf(self)
def get_offset(self, fieldname):
"""
Retrieve the offset of a field in the record.
"""
return conf.lib.clang_Type_getOffsetOf(self, c_char_p(fieldname))
def __eq__(self, other):
if type(other) != type(self):
return False
return conf.lib.clang_equalTypes(self, other)
def __ne__(self, other):
return not self.__eq__(other)
## CIndex Objects ##
# CIndex objects (derived from ClangObject) are essentially lightweight
# wrappers attached to some underlying object, which is exposed via CIndex as
# a void*.
class ClangObject(object):
"""
A helper for Clang objects. This class helps act as an intermediary for
the ctypes library and the Clang CIndex library.
"""
def __init__(self, obj):
assert isinstance(obj, c_object_p) and obj
self.obj = self._as_parameter_ = obj
def from_param(self):
return self._as_parameter_
class _CXUnsavedFile(Structure):
"""Helper for passing unsaved file arguments."""
_fields_ = [("name", c_char_p), ("contents", c_char_p), ('length', c_ulong)]
# Functions calls through the python interface are rather slow. Fortunately,
# for most symboles, we do not need to perform a function call. Their spelling
# never changes and is consequently provided by this spelling cache.
SpellingCache = {
# 0: CompletionChunk.Kind("Optional"),
# 1: CompletionChunk.Kind("TypedText"),
# 2: CompletionChunk.Kind("Text"),
# 3: CompletionChunk.Kind("Placeholder"),
# 4: CompletionChunk.Kind("Informative"),
# 5 : CompletionChunk.Kind("CurrentParameter"),
6: '(', # CompletionChunk.Kind("LeftParen"),
7: ')', # CompletionChunk.Kind("RightParen"),
8: ']', # CompletionChunk.Kind("LeftBracket"),
9: ']', # CompletionChunk.Kind("RightBracket"),
10: '{', # CompletionChunk.Kind("LeftBrace"),
11: '}', # CompletionChunk.Kind("RightBrace"),
12: '<', # CompletionChunk.Kind("LeftAngle"),
13: '>', # CompletionChunk.Kind("RightAngle"),
14: ', ', # CompletionChunk.Kind("Comma"),
# 15: CompletionChunk.Kind("ResultType"),
16: ':', # CompletionChunk.Kind("Colon"),
17: ';', # CompletionChunk.Kind("SemiColon"),
18: '=', # CompletionChunk.Kind("Equal"),
19: ' ', # CompletionChunk.Kind("HorizontalSpace"),
# 20: CompletionChunk.Kind("VerticalSpace")
}
class CompletionChunk:
class Kind:
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
def __repr__(self):
return "<ChunkKind: %s>" % self
def __init__(self, completionString, key):
self.cs = completionString
self.key = key
self.__kindNumberCache = -1
def __repr__(self):
return "{'" + self.spelling + "', " + str(self.kind) + "}"
@CachedProperty
def spelling(self):
if self.__kindNumber in SpellingCache:
return SpellingCache[self.__kindNumber]
return conf.lib.clang_getCompletionChunkText(self.cs, self.key).spelling
# We do not use @CachedProperty here, as the manual implementation is
# apparently still significantly faster. Please profile carefully if you
# would like to add CachedProperty back.
@property
def __kindNumber(self):
if self.__kindNumberCache == -1:
self.__kindNumberCache = \
conf.lib.clang_getCompletionChunkKind(self.cs, self.key)
return self.__kindNumberCache
@CachedProperty
def kind(self):
return completionChunkKindMap[self.__kindNumber]
@CachedProperty
def string(self):
res = conf.lib.clang_getCompletionChunkCompletionString(self.cs,
self.key)
if (res):
return CompletionString(res)
else:
None
def isKindOptional(self):
return self.__kindNumber == 0
def isKindTypedText(self):
return self.__kindNumber == 1
def isKindPlaceHolder(self):
return self.__kindNumber == 3
def isKindInformative(self):
return self.__kindNumber == 4
def isKindResultType(self):
return self.__kindNumber == 15
completionChunkKindMap = {
0: CompletionChunk.Kind("Optional"),
1: CompletionChunk.Kind("TypedText"),
2: CompletionChunk.Kind("Text"),
3: CompletionChunk.Kind("Placeholder"),
4: CompletionChunk.Kind("Informative"),
5: CompletionChunk.Kind("CurrentParameter"),
6: CompletionChunk.Kind("LeftParen"),
7: CompletionChunk.Kind("RightParen"),
8: CompletionChunk.Kind("LeftBracket"),
9: CompletionChunk.Kind("RightBracket"),
10: CompletionChunk.Kind("LeftBrace"),
11: CompletionChunk.Kind("RightBrace"),
12: CompletionChunk.Kind("LeftAngle"),
13: CompletionChunk.Kind("RightAngle"),
14: CompletionChunk.Kind("Comma"),
15: CompletionChunk.Kind("ResultType"),
16: CompletionChunk.Kind("Colon"),
17: CompletionChunk.Kind("SemiColon"),
18: CompletionChunk.Kind("Equal"),
19: CompletionChunk.Kind("HorizontalSpace"),
20: CompletionChunk.Kind("VerticalSpace")}
class CompletionString(ClangObject):
class Availability:
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
def __repr__(self):
return "<Availability: %s>" % self
def __len__(self):
self.num_chunks
@CachedProperty
def num_chunks(self):
return conf.lib.clang_getNumCompletionChunks(self.obj)
def __getitem__(self, key):
if self.num_chunks <= key:
raise IndexError
return CompletionChunk(self.obj, key)
@property
def priority(self):
return conf.lib.clang_getCompletionPriority(self.obj)
@property
def availability(self):
res = conf.lib.clang_getCompletionAvailability(self.obj)
return availabilityKinds[res]
@property
def briefComment(self):
if conf.function_exists("clang_getCompletionBriefComment"):
return conf.lib.clang_getCompletionBriefComment(self.obj)
return _CXString()
def __repr__(self):
return " | ".join([str(a) for a in self]) \
+ " || Priority: " + str(self.priority) \
+ " || Availability: " + str(self.availability) \
+ " || Brief comment: " + str(self.briefComment.spelling)
availabilityKinds = {
0: CompletionChunk.Kind("Available"),
1: CompletionChunk.Kind("Deprecated"),
2: CompletionChunk.Kind("NotAvailable"),
3: CompletionChunk.Kind("NotAccessible")}
class CodeCompletionResult(Structure):
_fields_ = [('cursorKind', c_int), ('completionString', c_object_p)]
def __repr__(self):
return str(CompletionString(self.completionString))
@property
def kind(self):
return CursorKind.from_id(self.cursorKind)
@property
def string(self):
return CompletionString(self.completionString)
class CCRStructure(Structure):
_fields_ = [('results', POINTER(CodeCompletionResult)),
('numResults', c_int)]
def __len__(self):
return self.numResults
def __getitem__(self, key):
if len(self) <= key:
raise IndexError
return self.results[key]
class CodeCompletionResults(ClangObject):
def __init__(self, ptr):
assert isinstance(ptr, POINTER(CCRStructure)) and ptr
self.ptr = self._as_parameter_ = ptr
def from_param(self):
return self._as_parameter_
def __del__(self):
conf.lib.clang_disposeCodeCompleteResults(self)
@property
def results(self):
return self.ptr.contents
@property
def diagnostics(self):
class DiagnosticsItr:
def __init__(self, ccr):
self.ccr= ccr
def __len__(self):
return int(\
conf.lib.clang_codeCompleteGetNumDiagnostics(self.ccr))
def __getitem__(self, key):
return conf.lib.clang_codeCompleteGetDiagnostic(self.ccr, key)
return DiagnosticsItr(self)
class Index(ClangObject):
"""
The Index type provides the primary interface to the Clang CIndex library,
primarily by providing an interface for reading and parsing translation
units.
"""
@staticmethod
def create(excludeDecls=False):
"""
Create a new Index.
Parameters:
excludeDecls -- Exclude local declarations from translation units.
"""
return Index(conf.lib.clang_createIndex(excludeDecls, 0))
def __del__(self):
conf.lib.clang_disposeIndex(self)
def read(self, path):
"""Load a TranslationUnit from the given AST file."""
return TranslationUnit.from_ast(path, self)
def parse(self, path, args=None, unsaved_files=None, options = 0):
"""Load the translation unit from the given source code file by running
clang and generating the AST before loading. Additional command line
parameters can be passed to clang via the args parameter.
In-memory contents for files can be provided by passing a list of pairs
to as unsaved_files, the first item should be the filenames to be mapped
and the second should be the contents to be substituted for the
file. The contents may be passed as strings or file objects.
If an error was encountered during parsing, a TranslationUnitLoadError
will be raised.
"""
return TranslationUnit.from_source(path, args, unsaved_files, options,
self)
class TranslationUnit(ClangObject):
"""Represents a source code translation unit.
This is one of the main types in the API. Any time you wish to interact
with Clang's representation of a source file, you typically start with a
translation unit.
"""
# Default parsing mode.
PARSE_NONE = 0
# Instruct the parser to create a detailed processing record containing
# metadata not normally retained.
PARSE_DETAILED_PROCESSING_RECORD = 1
# Indicates that the translation unit is incomplete. This is typically used
# when parsing headers.
PARSE_INCOMPLETE = 2
# Instruct the parser to create a pre-compiled preamble for the translation
# unit. This caches the preamble (included files at top of source file).
# This is useful if the translation unit will be reparsed and you don't
# want to incur the overhead of reparsing the preamble.
PARSE_PRECOMPILED_PREAMBLE = 4
# Cache code completion information on parse. This adds time to parsing but
# speeds up code completion.
PARSE_CACHE_COMPLETION_RESULTS = 8
# Flags with values 16 and 32 are deprecated and intentionally omitted.
# Do not parse function bodies. This is useful if you only care about
# searching for declarations/definitions.
PARSE_SKIP_FUNCTION_BODIES = 64
# Used to indicate that brief documentation comments should be included
# into the set of code completions returned from this translation unit.
PARSE_INCLUDE_BRIEF_COMMENTS_IN_CODE_COMPLETION = 128
@classmethod
def from_source(cls, filename, args=None, unsaved_files=None, options=0,
index=None):
"""Create a TranslationUnit by parsing source.
This is capable of processing source code both from files on the
filesystem as well as in-memory contents.
Command-line arguments that would be passed to clang are specified as
a list via args. These can be used to specify include paths, warnings,
etc. e.g. ["-Wall", "-I/path/to/include"].
In-memory file content can be provided via unsaved_files. This is an
iterable of 2-tuples. The first element is the str filename. The
second element defines the content. Content can be provided as str
source code or as file objects (anything with a read() method). If
a file object is being used, content will be read until EOF and the
read cursor will not be reset to its original position.
options is a bitwise or of TranslationUnit.PARSE_XXX flags which will
control parsing behavior.
index is an Index instance to utilize. If not provided, a new Index
will be created for this TranslationUnit.
To parse source from the filesystem, the filename of the file to parse
is specified by the filename argument. Or, filename could be None and
the args list would contain the filename(s) to parse.
To parse source from an in-memory buffer, set filename to the virtual
filename you wish to associate with this source (e.g. "test.c"). The
contents of that file are then provided in unsaved_files.
If an error occurs, a TranslationUnitLoadError is raised.
Please note that a TranslationUnit with parser errors may be returned.
It is the caller's responsibility to check tu.diagnostics for errors.
Also note that Clang infers the source language from the extension of
the input filename. If you pass in source code containing a C++ class
declaration with the filename "test.c" parsing will fail.
"""
if args is None:
args = []
if unsaved_files is None:
unsaved_files = []
if index is None:
index = Index.create()
args_array = None
if len(args) > 0:
args_array = (c_char_p * len(args))(* args)
unsaved_array = None
if len(unsaved_files) > 0:
unsaved_array = (_CXUnsavedFile * len(unsaved_files))()
for i, (name, contents) in enumerate(unsaved_files):
if hasattr(contents, "read"):
contents = contents.read()
unsaved_array[i].name = name
unsaved_array[i].contents = contents
unsaved_array[i].length = len(contents)
ptr = conf.lib.clang_parseTranslationUnit(index, filename, args_array,
len(args), unsaved_array,
len(unsaved_files), options)
if not ptr:
raise TranslationUnitLoadError("Error parsing translation unit.")
return cls(ptr, index=index)
@classmethod
def from_ast_file(cls, filename, index=None):
"""Create a TranslationUnit instance from a saved AST file.
A previously-saved AST file (provided with -emit-ast or
TranslationUnit.save()) is loaded from the filename specified.
If the file cannot be loaded, a TranslationUnitLoadError will be
raised.
index is optional and is the Index instance to use. If not provided,
a default Index will be created.
"""
if index is None:
index = Index.create()
ptr = conf.lib.clang_createTranslationUnit(index, filename)
if not ptr:
raise TranslationUnitLoadError(filename)
return cls(ptr=ptr, index=index)
def __init__(self, ptr, index):
"""Create a TranslationUnit instance.
TranslationUnits should be created using one of the from_* @classmethod
functions above. __init__ is only called internally.
"""
assert isinstance(index, Index)
ClangObject.__init__(self, ptr)
def __del__(self):
conf.lib.clang_disposeTranslationUnit(self)
@property
def cursor(self):
"""Retrieve the cursor that represents the given translation unit."""
return conf.lib.clang_getTranslationUnitCursor(self)
@property
def spelling(self):
"""Get the original translation unit source file name."""
return conf.lib.clang_getTranslationUnitSpelling(self)
def get_includes(self):
"""
Return an iterable sequence of FileInclusion objects that describe the
sequence of inclusions in a translation unit. The first object in
this sequence is always the input file. Note that this method will not
recursively iterate over header files included through precompiled
headers.
"""
def visitor(fobj, lptr, depth, includes):
if depth > 0:
loc = lptr.contents
includes.append(FileInclusion(loc.file, File(fobj), loc, depth))
# Automatically adapt CIndex/ctype pointers to python objects
includes = []
conf.lib.clang_getInclusions(self,
callbacks['translation_unit_includes'](visitor), includes)
return iter(includes)
def get_file(self, filename):
"""Obtain a File from this translation unit."""
return File.from_name(self, filename)
def get_location(self, filename, position):
"""Obtain a SourceLocation for a file in this translation unit.
The position can be specified by passing:
- Integer file offset. Initial file offset is 0.
- 2-tuple of (line number, column number). Initial file position is
(0, 0)
"""
f = self.get_file(filename)
if isinstance(position, int):
return SourceLocation.from_offset(self, f, position)
return SourceLocation.from_position(self, f, position[0], position[1])
def get_extent(self, filename, locations):
"""Obtain a SourceRange from this translation unit.
The bounds of the SourceRange must ultimately be defined by a start and
end SourceLocation. For the locations argument, you can pass:
- 2 SourceLocation instances in a 2-tuple or list.
- 2 int file offsets via a 2-tuple or list.
- 2 2-tuple or lists of (line, column) pairs in a 2-tuple or list.
e.g.
get_extent('foo.c', (5, 10))
get_extent('foo.c', ((1, 1), (1, 15)))
"""
f = self.get_file(filename)
if len(locations) < 2:
raise Exception('Must pass object with at least 2 elements')
start_location, end_location = locations
if hasattr(start_location, '__len__'):
start_location = SourceLocation.from_position(self, f,
start_location[0], start_location[1])
elif isinstance(start_location, int):
start_location = SourceLocation.from_offset(self, f,
start_location)
if hasattr(end_location, '__len__'):
end_location = SourceLocation.from_position(self, f,
end_location[0], end_location[1])
elif isinstance(end_location, int):
end_location = SourceLocation.from_offset(self, f, end_location)
assert isinstance(start_location, SourceLocation)
assert isinstance(end_location, SourceLocation)
return SourceRange.from_locations(start_location, end_location)
@property
def diagnostics(self):
"""
Return an iterable (and indexable) object containing the diagnostics.
"""
class DiagIterator:
def __init__(self, tu):
self.tu = tu
def __len__(self):
return int(conf.lib.clang_getNumDiagnostics(self.tu))
def __getitem__(self, key):
diag = conf.lib.clang_getDiagnostic(self.tu, key)
if not diag:
raise IndexError
return Diagnostic(diag)
return DiagIterator(self)
def reparse(self, unsaved_files=None, options=0):
"""
Reparse an already parsed translation unit.
In-memory contents for files can be provided by passing a list of pairs
as unsaved_files, the first items should be the filenames to be mapped
and the second should be the contents to be substituted for the
file. The contents may be passed as strings or file objects.
"""
if unsaved_files is None:
unsaved_files = []
unsaved_files_array = 0
if len(unsaved_files):
unsaved_files_array = (_CXUnsavedFile * len(unsaved_files))()
for i,(name,value) in enumerate(unsaved_files):
if not isinstance(value, str):
# FIXME: It would be great to support an efficient version
# of this, one day.
value = value.read()
print value
if not isinstance(value, str):
raise TypeError,'Unexpected unsaved file contents.'
unsaved_files_array[i].name = name
unsaved_files_array[i].contents = value
unsaved_files_array[i].length = len(value)
ptr = conf.lib.clang_reparseTranslationUnit(self, len(unsaved_files),
unsaved_files_array, options)
def save(self, filename):
"""Saves the TranslationUnit to a file.
This is equivalent to passing -emit-ast to the clang frontend. The
saved file can be loaded back into a TranslationUnit. Or, if it
corresponds to a header, it can be used as a pre-compiled header file.
If an error occurs while saving, a TranslationUnitSaveError is raised.
If the error was TranslationUnitSaveError.ERROR_INVALID_TU, this means
the constructed TranslationUnit was not valid at time of save. In this
case, the reason(s) why should be available via
TranslationUnit.diagnostics().
filename -- The path to save the translation unit to.
"""
options = conf.lib.clang_defaultSaveOptions(self)
result = int(conf.lib.clang_saveTranslationUnit(self, filename,
options))
if result != 0:
raise TranslationUnitSaveError(result,
'Error saving TranslationUnit.')
def codeComplete(self, path, line, column, unsaved_files=None,
include_macros=False, include_code_patterns=False,
include_brief_comments=False):
"""
Code complete in this translation unit.
In-memory contents for files can be provided by passing a list of pairs
as unsaved_files, the first items should be the filenames to be mapped
and the second should be the contents to be substituted for the
file. The contents may be passed as strings or file objects.
"""
options = 0
if include_macros:
options += 1
if include_code_patterns:
options += 2
if include_brief_comments:
options += 4
if unsaved_files is None:
unsaved_files = []
unsaved_files_array = 0
if len(unsaved_files):
unsaved_files_array = (_CXUnsavedFile * len(unsaved_files))()
for i,(name,value) in enumerate(unsaved_files):
if not isinstance(value, str):
# FIXME: It would be great to support an efficient version
# of this, one day.
value = value.read()
print value
if not isinstance(value, str):
raise TypeError,'Unexpected unsaved file contents.'
unsaved_files_array[i].name = name
unsaved_files_array[i].contents = value
unsaved_files_array[i].length = len(value)
ptr = conf.lib.clang_codeCompleteAt(self, path, line, column,
unsaved_files_array, len(unsaved_files), options)
if ptr:
return CodeCompletionResults(ptr)
return None
def get_tokens(self, locations=None, extent=None):
"""Obtain tokens in this translation unit.
This is a generator for Token instances. The caller specifies a range
of source code to obtain tokens for. The range can be specified as a
2-tuple of SourceLocation or as a SourceRange. If both are defined,
behavior is undefined.
"""
if locations is not None:
extent = SourceRange(start=locations[0], end=locations[1])
return TokenGroup.get_tokens(self, extent)
class File(ClangObject):
"""
The File class represents a particular source file that is part of a
translation unit.
"""
@staticmethod
def from_name(translation_unit, file_name):
"""Retrieve a file handle within the given translation unit."""
return File(conf.lib.clang_getFile(translation_unit, file_name))
@property
def name(self):
"""Return the complete file and path name of the file."""
return conf.lib.clang_getCString(conf.lib.clang_getFileName(self))
@property
def time(self):
"""Return the last modification time of the file."""
return conf.lib.clang_getFileTime(self)
def __str__(self):
return self.name
def __repr__(self):
return "<File: %s>" % (self.name)
@staticmethod
def from_cursor_result(res, fn, args):
assert isinstance(res, File)
# Copy a reference to the TranslationUnit to prevent premature GC.
res._tu = args[0]._tu
return res
class FileInclusion(object):
"""
The FileInclusion class represents the inclusion of one source file by
another via a '#include' directive or as the input file for the translation
unit. This class provides information about the included file, the including
file, the location of the '#include' directive and the depth of the included
file in the stack. Note that the input file has depth 0.
"""
def __init__(self, src, tgt, loc, depth):
self.source = src
self.include = tgt
self.location = loc
self.depth = depth
@property
def is_input_file(self):
"""True if the included file is the input file."""
return self.depth == 0
class CompilationDatabaseError(Exception):
"""Represents an error that occurred when working with a CompilationDatabase
Each error is associated to an enumerated value, accessible under
e.cdb_error. Consumers can compare the value with one of the ERROR_
constants in this class.
"""
# An unknown error occured
ERROR_UNKNOWN = 0
# The database could not be loaded
ERROR_CANNOTLOADDATABASE = 1
def __init__(self, enumeration, message):
assert isinstance(enumeration, int)
if enumeration > 1:
raise Exception("Encountered undefined CompilationDatabase error "
"constant: %d. Please file a bug to have this "
"value supported." % enumeration)
self.cdb_error = enumeration
Exception.__init__(self, 'Error %d: %s' % (enumeration, message))
class CompileCommand(object):
"""Represents the compile command used to build a file"""
def __init__(self, cmd, ccmds):
self.cmd = cmd
# Keep a reference to the originating CompileCommands
# to prevent garbage collection
self.ccmds = ccmds
@property
def directory(self):
"""Get the working directory for this CompileCommand"""
return conf.lib.clang_CompileCommand_getDirectory(self.cmd)
@property
def arguments(self):
"""
Get an iterable object providing each argument in the
command line for the compiler invocation as a _CXString.
Invariant : the first argument is the compiler executable
"""
length = conf.lib.clang_CompileCommand_getNumArgs(self.cmd)
for i in xrange(length):
yield conf.lib.clang_CompileCommand_getArg(self.cmd, i)
class CompileCommands(object):
"""
CompileCommands is an iterable object containing all CompileCommand
that can be used for building a specific file.
"""
def __init__(self, ccmds):
self.ccmds = ccmds
def __del__(self):
conf.lib.clang_CompileCommands_dispose(self.ccmds)
def __len__(self):
return int(conf.lib.clang_CompileCommands_getSize(self.ccmds))
def __getitem__(self, i):
cc = conf.lib.clang_CompileCommands_getCommand(self.ccmds, i)
if not cc:
raise IndexError
return CompileCommand(cc, self)
@staticmethod
def from_result(res, fn, args):
if not res:
return None
return CompileCommands(res)
class CompilationDatabase(ClangObject):
"""
The CompilationDatabase is a wrapper class around
clang::tooling::CompilationDatabase
It enables querying how a specific source file can be built.
"""
def __del__(self):
conf.lib.clang_CompilationDatabase_dispose(self)
@staticmethod
def from_result(res, fn, args):
if not res:
raise CompilationDatabaseError(0,
"CompilationDatabase loading failed")
return CompilationDatabase(res)
@staticmethod
def fromDirectory(buildDir):
"""Builds a CompilationDatabase from the database found in buildDir"""
errorCode = c_uint()
try:
cdb = conf.lib.clang_CompilationDatabase_fromDirectory(buildDir,
byref(errorCode))
except CompilationDatabaseError as e:
raise CompilationDatabaseError(int(errorCode.value),
"CompilationDatabase loading failed")
return cdb
def getCompileCommands(self, filename):
"""
Get an iterable object providing all the CompileCommands available to
build filename. Returns None if filename is not found in the database.
"""
return conf.lib.clang_CompilationDatabase_getCompileCommands(self,
filename)
class Token(Structure):
"""Represents a single token from the preprocessor.
Tokens are effectively segments of source code. Source code is first parsed
into tokens before being converted into the AST and Cursors.
Tokens are obtained from parsed TranslationUnit instances. You currently
can't create tokens manually.
"""
_fields_ = [
('int_data', c_uint * 4),
('ptr_data', c_void_p)
]
@property
def spelling(self):
"""The spelling of this token.
This is the textual representation of the token in source.
"""
return conf.lib.clang_getTokenSpelling(self._tu, self)
@property
def kind(self):
"""Obtain the TokenKind of the current token."""
return TokenKind.from_value(conf.lib.clang_getTokenKind(self))
@property
def location(self):
"""The SourceLocation this Token occurs at."""
return conf.lib.clang_getTokenLocation(self._tu, self)
@property
def extent(self):
"""The SourceRange this Token occupies."""
return conf.lib.clang_getTokenExtent(self._tu, self)
@property
def cursor(self):
"""The Cursor this Token corresponds to."""
cursor = Cursor()
conf.lib.clang_annotateTokens(self._tu, byref(self), 1, byref(cursor))
return cursor
# Now comes the plumbing to hook up the C library.
# Register callback types in common container.
callbacks['translation_unit_includes'] = CFUNCTYPE(None, c_object_p,
POINTER(SourceLocation), c_uint, py_object)
callbacks['cursor_visit'] = CFUNCTYPE(c_int, Cursor, Cursor, py_object)
# Functions strictly alphabetical order.
functionList = [
("clang_annotateTokens",
[TranslationUnit, POINTER(Token), c_uint, POINTER(Cursor)]),
("clang_CompilationDatabase_dispose",
[c_object_p]),
("clang_CompilationDatabase_fromDirectory",
[c_char_p, POINTER(c_uint)],
c_object_p,
CompilationDatabase.from_result),
("clang_CompilationDatabase_getCompileCommands",
[c_object_p, c_char_p],
c_object_p,
CompileCommands.from_result),
("clang_CompileCommands_dispose",
[c_object_p]),
("clang_CompileCommands_getCommand",
[c_object_p, c_uint],
c_object_p),
("clang_CompileCommands_getSize",
[c_object_p],
c_uint),
("clang_CompileCommand_getArg",
[c_object_p, c_uint],
_CXString,
_CXString.from_result),
("clang_CompileCommand_getDirectory",
[c_object_p],
_CXString,
_CXString.from_result),
("clang_CompileCommand_getNumArgs",
[c_object_p],
c_uint),
("clang_codeCompleteAt",
[TranslationUnit, c_char_p, c_int, c_int, c_void_p, c_int, c_int],
POINTER(CCRStructure)),
("clang_codeCompleteGetDiagnostic",
[CodeCompletionResults, c_int],
Diagnostic),
("clang_codeCompleteGetNumDiagnostics",
[CodeCompletionResults],
c_int),
("clang_createIndex",
[c_int, c_int],
c_object_p),
("clang_createTranslationUnit",
[Index, c_char_p],
c_object_p),
("clang_CXXMethod_isStatic",
[Cursor],
bool),
("clang_CXXMethod_isVirtual",
[Cursor],
bool),
("clang_defaultSaveOptions",
[TranslationUnit],
c_uint),
("clang_disposeCodeCompleteResults",
[CodeCompletionResults]),
# ("clang_disposeCXTUResourceUsage",
# [CXTUResourceUsage]),
("clang_disposeDiagnostic",
[Diagnostic]),
("clang_disposeIndex",
[Index]),
("clang_disposeString",
[_CXString]),
("clang_disposeTokens",
[TranslationUnit, POINTER(Token), c_uint]),
("clang_disposeTranslationUnit",
[TranslationUnit]),
("clang_equalCursors",
[Cursor, Cursor],
bool),
("clang_equalLocations",
[SourceLocation, SourceLocation],
bool),
("clang_equalRanges",
[SourceRange, SourceRange],
bool),
("clang_equalTypes",
[Type, Type],
bool),
("clang_getArgType",
[Type, c_uint],
Type,
Type.from_result),
("clang_getArrayElementType",
[Type],
Type,
Type.from_result),
("clang_getArraySize",
[Type],
c_longlong),
("clang_getFieldDeclBitWidth",
[Cursor],
c_int),
("clang_getCanonicalCursor",
[Cursor],
Cursor,
Cursor.from_cursor_result),
("clang_getCanonicalType",
[Type],
Type,
Type.from_result),
("clang_getCompletionAvailability",
[c_void_p],
c_int),
("clang_getCompletionBriefComment",
[c_void_p],
_CXString),
("clang_getCompletionChunkCompletionString",
[c_void_p, c_int],
c_object_p),
("clang_getCompletionChunkKind",
[c_void_p, c_int],
c_int),
("clang_getCompletionChunkText",
[c_void_p, c_int],
_CXString),
("clang_getCompletionPriority",
[c_void_p],
c_int),
("clang_getCursorAvailability",
[Cursor],
AvailabilityKind.from_id),
("clang_getCString",
[_CXString],
c_char_p),
("clang_getCursor",
[TranslationUnit, SourceLocation],
Cursor),
("clang_getCursorDefinition",
[Cursor],
Cursor,
Cursor.from_result),
("clang_getCursorDisplayName",
[Cursor],
_CXString,
_CXString.from_result),
("clang_getCursorExtent",
[Cursor],
SourceRange),
("clang_getCursorLexicalParent",
[Cursor],
Cursor,
Cursor.from_cursor_result),
("clang_getCursorLocation",
[Cursor],
SourceLocation),
("clang_getCursorReferenced",
[Cursor],
Cursor,
Cursor.from_result),
("clang_getCursorReferenceNameRange",
[Cursor, c_uint, c_uint],
SourceRange),
("clang_getCursorSemanticParent",
[Cursor],
Cursor,
Cursor.from_cursor_result),
("clang_getCursorSpelling",
[Cursor],
_CXString,
_CXString.from_result),
("clang_getCursorType",
[Cursor],
Type,
Type.from_result),
("clang_getCursorUSR",
[Cursor],
_CXString,
_CXString.from_result),
# ("clang_getCXTUResourceUsage",
# [TranslationUnit],
# CXTUResourceUsage),
("clang_getCXXAccessSpecifier",
[Cursor],
AccessSpecifierKind.from_id),
("clang_getDeclObjCTypeEncoding",
[Cursor],
_CXString,
_CXString.from_result),
("clang_getDiagnostic",
[c_object_p, c_uint],
c_object_p),
("clang_getDiagnosticCategory",
[Diagnostic],
c_uint),
("clang_getDiagnosticCategoryName",
[c_uint],
_CXString,
_CXString.from_result),
("clang_getDiagnosticFixIt",
[Diagnostic, c_uint, POINTER(SourceRange)],
_CXString,
_CXString.from_result),
("clang_getDiagnosticLocation",
[Diagnostic],
SourceLocation),
("clang_getDiagnosticNumFixIts",
[Diagnostic],
c_uint),
("clang_getDiagnosticNumRanges",
[Diagnostic],
c_uint),
("clang_getDiagnosticOption",
[Diagnostic, POINTER(_CXString)],
_CXString,
_CXString.from_result),
("clang_getDiagnosticRange",
[Diagnostic, c_uint],
SourceRange),
("clang_getDiagnosticSeverity",
[Diagnostic],
c_int),
("clang_getDiagnosticSpelling",
[Diagnostic],
_CXString,
_CXString.from_result),
("clang_getElementType",
[Type],
Type,
Type.from_result),
("clang_getEnumConstantDeclUnsignedValue",
[Cursor],
c_ulonglong),
("clang_getEnumConstantDeclValue",
[Cursor],
c_longlong),
("clang_getEnumDeclIntegerType",
[Cursor],
Type,
Type.from_result),
("clang_getFile",
[TranslationUnit, c_char_p],
c_object_p),
("clang_getFileName",
[File],
_CXString), # TODO go through _CXString.from_result?
("clang_getFileTime",
[File],
c_uint),
("clang_getIBOutletCollectionType",
[Cursor],
Type,
Type.from_result),
("clang_getIncludedFile",
[Cursor],
File,
File.from_cursor_result),
("clang_getInclusions",
[TranslationUnit, callbacks['translation_unit_includes'], py_object]),
("clang_getInstantiationLocation",
[SourceLocation, POINTER(c_object_p), POINTER(c_uint), POINTER(c_uint),
POINTER(c_uint)]),
("clang_getLocation",
[TranslationUnit, File, c_uint, c_uint],
SourceLocation),
("clang_getLocationForOffset",
[TranslationUnit, File, c_uint],
SourceLocation),
("clang_getNullCursor",
None,
Cursor),
("clang_getNumArgTypes",
[Type],
c_uint),
("clang_getNumCompletionChunks",
[c_void_p],
c_int),
("clang_getNumDiagnostics",
[c_object_p],
c_uint),
("clang_getNumElements",
[Type],
c_longlong),
("clang_getNumOverloadedDecls",
[Cursor],
c_uint),
("clang_getOverloadedDecl",
[Cursor, c_uint],
Cursor,
Cursor.from_cursor_result),
("clang_getPointeeType",
[Type],
Type,
Type.from_result),
("clang_getRange",
[SourceLocation, SourceLocation],
SourceRange),
("clang_getRangeEnd",
[SourceRange],
SourceLocation),
("clang_getRangeStart",
[SourceRange],
SourceLocation),
("clang_getResultType",
[Type],
Type,
Type.from_result),
("clang_getSpecializedCursorTemplate",
[Cursor],
Cursor,
Cursor.from_cursor_result),
("clang_getTemplateCursorKind",
[Cursor],
c_uint),
("clang_getTokenExtent",
[TranslationUnit, Token],
SourceRange),
("clang_getTokenKind",
[Token],
c_uint),
("clang_getTokenLocation",
[TranslationUnit, Token],
SourceLocation),
("clang_getTokenSpelling",
[TranslationUnit, Token],
_CXString,
_CXString.from_result),
("clang_getTranslationUnitCursor",
[TranslationUnit],
Cursor,
Cursor.from_result),
("clang_getTranslationUnitSpelling",
[TranslationUnit],
_CXString,
_CXString.from_result),
("clang_getTUResourceUsageName",
[c_uint],
c_char_p),
("clang_getTypeDeclaration",
[Type],
Cursor,
Cursor.from_result),
("clang_getTypedefDeclUnderlyingType",
[Cursor],
Type,
Type.from_result),
("clang_getTypeKindSpelling",
[c_uint],
_CXString,
_CXString.from_result),
("clang_hashCursor",
[Cursor],
c_uint),
("clang_isAttribute",
[CursorKind],
bool),
("clang_isConstQualifiedType",
[Type],
bool),
("clang_isCursorDefinition",
[Cursor],
bool),
("clang_isDeclaration",
[CursorKind],
bool),
("clang_isExpression",
[CursorKind],
bool),
("clang_isFileMultipleIncludeGuarded",
[TranslationUnit, File],
bool),
("clang_isFunctionTypeVariadic",
[Type],
bool),
("clang_isInvalid",
[CursorKind],
bool),
("clang_isPODType",
[Type],
bool),
("clang_isPreprocessing",
[CursorKind],
bool),
("clang_isReference",
[CursorKind],
bool),
("clang_isRestrictQualifiedType",
[Type],
bool),
("clang_isStatement",
[CursorKind],
bool),
("clang_isTranslationUnit",
[CursorKind],
bool),
("clang_isUnexposed",
[CursorKind],
bool),
("clang_isVirtualBase",
[Cursor],
bool),
("clang_isVolatileQualifiedType",
[Type],
bool),
("clang_parseTranslationUnit",
[Index, c_char_p, c_void_p, c_int, c_void_p, c_int, c_int],
c_object_p),
("clang_reparseTranslationUnit",
[TranslationUnit, c_int, c_void_p, c_int],
c_int),
("clang_saveTranslationUnit",
[TranslationUnit, c_char_p, c_uint],
c_int),
("clang_tokenize",
[TranslationUnit, SourceRange, POINTER(POINTER(Token)), POINTER(c_uint)]),
("clang_visitChildren",
[Cursor, callbacks['cursor_visit'], py_object],
c_uint),
("clang_Cursor_getNumArguments",
[Cursor],
c_int),
("clang_Cursor_getArgument",
[Cursor, c_uint],
Cursor,
Cursor.from_result),
("clang_Cursor_isBitField",
[Cursor],
bool),
("clang_Type_getAlignOf",
[Type],
c_longlong),
("clang_Type_getOffsetOf",
[Type, c_char_p],
c_longlong),
("clang_Type_getSizeOf",
[Type],
c_ulonglong),
]
class LibclangError(Exception):
def __init__(self, message):
self.m = message
def __str__(self):
return self.m
def register_function(lib, item, ignore_errors):
# A function may not exist, if these bindings are used with an older or
# incompatible version of libclang.so.
try:
func = getattr(lib, item[0])
except AttributeError as e:
msg = str(e) + ". Please ensure that your python bindings are "\
"compatible with your libclang.so version."
if ignore_errors:
return
raise LibclangError(msg)
if len(item) >= 2:
func.argtypes = item[1]
if len(item) >= 3:
func.restype = item[2]
if len(item) == 4:
func.errcheck = item[3]
def register_functions(lib, ignore_errors):
"""Register function prototypes with a libclang library instance.
This must be called as part of library instantiation so Python knows how
to call out to the shared library.
"""
def register(item):
return register_function(lib, item, ignore_errors)
map(register, functionList)
class Config:
library_path = None
library_file = None
compatibility_check = True
loaded = False
@staticmethod
def set_library_path(path):
"""Set the path in which to search for libclang"""
if Config.loaded:
raise Exception("library path must be set before before using " \
"any other functionalities in libclang.")
Config.library_path = path
@staticmethod
def set_library_file(filename):
"""Set the exact location of libclang"""
if Config.loaded:
raise Exception("library file must be set before before using " \
"any other functionalities in libclang.")
Config.library_file = filename
@staticmethod
def set_compatibility_check(check_status):
""" Perform compatibility check when loading libclang
The python bindings are only tested and evaluated with the version of
libclang they are provided with. To ensure correct behavior a (limited)
compatibility check is performed when loading the bindings. This check
will throw an exception, as soon as it fails.
In case these bindings are used with an older version of libclang, parts
that have been stable between releases may still work. Users of the
python bindings can disable the compatibility check. This will cause
the python bindings to load, even though they are written for a newer
version of libclang. Failures now arise if unsupported or incompatible
features are accessed. The user is required to test himself if the
features he is using are available and compatible between different
libclang versions.
"""
if Config.loaded:
raise Exception("compatibility_check must be set before before " \
"using any other functionalities in libclang.")
Config.compatibility_check = check_status
@CachedProperty
def lib(self):
lib = self.get_cindex_library()
register_functions(lib, not Config.compatibility_check)
Config.loaded = True
return lib
def get_filename(self):
if Config.library_file:
return Config.library_file
import platform
name = platform.system()
if name == 'Darwin':
file = 'libclang.dylib'
elif name == 'Windows':
file = 'libclang.dll'
else:
file = 'libclang.so'
if Config.library_path:
file = Config.library_path + '/' + file
return file
def get_cindex_library(self):
try:
library = cdll.LoadLibrary(self.get_filename())
except OSError as e:
msg = str(e) + ". To provide a path to libclang use " \
"Config.set_library_path() or " \
"Config.set_library_file()."
raise LibclangError(msg)
return library
def function_exists(self, name):
try:
getattr(self.lib, name)
except AttributeError:
return False
return True
def register_enumerations():
for name, value in clang.enumerations.TokenKinds:
TokenKind.register(value, name)
conf = Config()
register_enumerations()
__all__ = [
'Config',
'CodeCompletionResults',
'CompilationDatabase',
'CompileCommands',
'CompileCommand',
'CursorKind',
'Cursor',
'Diagnostic',
'File',
'FixIt',
'Index',
'SourceLocation',
'SourceRange',
'TokenKind',
'Token',
'TranslationUnitLoadError',
'TranslationUnit',
'TypeKind',
'Type',
]
|
tarthy6/dozer-thesis | refs/heads/master | scripts/test-OLD/force-network-video.py | 3 | print 30*'*'+' WARNING '+30*'*'+'\nFor hardware/driver/...? reasons related to 3d, this script might\nsometimes crash when the first snapshot is taken with message such as\n\n\tQGLContext::makeCurrent(): Failed.\n\nor\n\n\tFatal IO error 11 (Resource temporarily unavailable) on X server :0.0.\n\nA workaround is to open the 3d view by hand, rather than having it\nopen by SnapshotEngine automatically when the first snapshot is\nabout to be taken. Sometimes only the message is displayed,\nwithout crash.\n'+25*'*'+' This is a known bug. '+25*'*'
TriaxialTest(noFiles=True).load()
from woo import qt,utils
O.engines=O.engines+[
qt.SnapshotEngine(fileBase=O.tmpFilename(),label='snapshotter',iterPeriod=5,ignoreErrors=False),
PyRunner(iterPeriod=500,command='finito()')
]
rr=qt.Renderer()
rr.shape,rr.intrPhys=False,True
def finito():
"""This function will be called after 500 steps. Since SnapshotEngine waits for a new 3d view to open,
it must run after the script has finished and the command line appears
(see https://bugs.launchpad.net/woo/+bug/622669).
For that reason, O.run() is at the end of the script and this function will be called
once we want to exit really.
"""
utils.makeVideo(snapshotter.snapshots,out='/tmp/video.avi')
print "Video saved in /tmp/video.avi"
import sys
sys.exit(0)
O.run()
|
nealtodd/django | refs/heads/master | tests/builtin_server/tests.py | 368 | from __future__ import unicode_literals
import sys
import traceback
from io import BytesIO
from unittest import TestCase
from wsgiref import simple_server
# If data is too large, socket will choke, so write chunks no larger than 32MB
# at a time. The rationale behind the 32MB can be found on Django's Trac:
# https://code.djangoproject.com/ticket/5596#comment:4
MAX_SOCKET_CHUNK_SIZE = 32 * 1024 * 1024 # 32 MB
class ServerHandler(simple_server.ServerHandler, object):
error_status = str("500 INTERNAL SERVER ERROR")
def write(self, data):
"""'write()' callable as specified by PEP 3333"""
assert isinstance(data, bytes), "write() argument must be bytestring"
if not self.status:
raise AssertionError("write() before start_response()")
elif not self.headers_sent:
# Before the first output, send the stored headers
self.bytes_sent = len(data) # make sure we know content-length
self.send_headers()
else:
self.bytes_sent += len(data)
# XXX check Content-Length and truncate if too many bytes written?
data = BytesIO(data)
for chunk in iter(lambda: data.read(MAX_SOCKET_CHUNK_SIZE), b''):
self._write(chunk)
self._flush()
def error_output(self, environ, start_response):
super(ServerHandler, self).error_output(environ, start_response)
return ['\n'.join(traceback.format_exception(*sys.exc_info()))]
# Backport of http://hg.python.org/cpython/rev/d5af1b235dab. See #16241.
# This can be removed when support for Python <= 2.7.3 is deprecated.
def finish_response(self):
try:
if not self.result_is_file() or not self.sendfile():
for data in self.result:
self.write(data)
self.finish_content()
finally:
self.close()
class DummyHandler(object):
def log_request(self, *args, **kwargs):
pass
class FileWrapperHandler(ServerHandler):
def __init__(self, *args, **kwargs):
super(FileWrapperHandler, self).__init__(*args, **kwargs)
self.request_handler = DummyHandler()
self._used_sendfile = False
def sendfile(self):
self._used_sendfile = True
return True
def wsgi_app(environ, start_response):
start_response(str('200 OK'), [(str('Content-Type'), str('text/plain'))])
return [b'Hello World!']
def wsgi_app_file_wrapper(environ, start_response):
start_response(str('200 OK'), [(str('Content-Type'), str('text/plain'))])
return environ['wsgi.file_wrapper'](BytesIO(b'foo'))
class WSGIFileWrapperTests(TestCase):
"""
Test that the wsgi.file_wrapper works for the builting server.
Tests for #9659: wsgi.file_wrapper in the builtin server.
We need to mock a couple of handlers and keep track of what
gets called when using a couple kinds of WSGI apps.
"""
def test_file_wrapper_uses_sendfile(self):
env = {'SERVER_PROTOCOL': 'HTTP/1.0'}
handler = FileWrapperHandler(None, BytesIO(), BytesIO(), env)
handler.run(wsgi_app_file_wrapper)
self.assertTrue(handler._used_sendfile)
self.assertEqual(handler.stdout.getvalue(), b'')
self.assertEqual(handler.stderr.getvalue(), b'')
def test_file_wrapper_no_sendfile(self):
env = {'SERVER_PROTOCOL': 'HTTP/1.0'}
handler = FileWrapperHandler(None, BytesIO(), BytesIO(), env)
handler.run(wsgi_app)
self.assertFalse(handler._used_sendfile)
self.assertEqual(handler.stdout.getvalue().splitlines()[-1], b'Hello World!')
self.assertEqual(handler.stderr.getvalue(), b'')
class WriteChunkCounterHandler(ServerHandler):
"""
Server handler that counts the number of chunks written after headers were
sent. Used to make sure large response body chunking works properly.
"""
def __init__(self, *args, **kwargs):
super(WriteChunkCounterHandler, self).__init__(*args, **kwargs)
self.request_handler = DummyHandler()
self.headers_written = False
self.write_chunk_counter = 0
def send_headers(self):
super(WriteChunkCounterHandler, self).send_headers()
self.headers_written = True
def _write(self, data):
if self.headers_written:
self.write_chunk_counter += 1
self.stdout.write(data)
def send_big_data_app(environ, start_response):
start_response(str('200 OK'), [(str('Content-Type'), str('text/plain'))])
# Return a blob of data that is 1.5 times the maximum chunk size.
return [b'x' * (MAX_SOCKET_CHUNK_SIZE + MAX_SOCKET_CHUNK_SIZE // 2)]
class ServerHandlerChunksProperly(TestCase):
"""
Test that the ServerHandler chunks data properly.
Tests for #18972: The logic that performs the math to break data into
32MB (MAX_SOCKET_CHUNK_SIZE) chunks was flawed, BUT it didn't actually
cause any problems.
"""
def test_chunked_data(self):
env = {'SERVER_PROTOCOL': 'HTTP/1.0'}
handler = WriteChunkCounterHandler(None, BytesIO(), BytesIO(), env)
handler.run(send_big_data_app)
self.assertEqual(handler.write_chunk_counter, 2)
|
xia0pin9/capstone | refs/heads/next | bindings/python/capstone/xcore_const.py | 37 | # For Capstone Engine. AUTO-GENERATED FILE, DO NOT EDIT [xcore_const.py]
# Operand type for instruction's operands
XCORE_OP_INVALID = 0
XCORE_OP_REG = 1
XCORE_OP_IMM = 2
XCORE_OP_MEM = 3
# XCore registers
XCORE_REG_INVALID = 0
XCORE_REG_CP = 1
XCORE_REG_DP = 2
XCORE_REG_LR = 3
XCORE_REG_SP = 4
XCORE_REG_R0 = 5
XCORE_REG_R1 = 6
XCORE_REG_R2 = 7
XCORE_REG_R3 = 8
XCORE_REG_R4 = 9
XCORE_REG_R5 = 10
XCORE_REG_R6 = 11
XCORE_REG_R7 = 12
XCORE_REG_R8 = 13
XCORE_REG_R9 = 14
XCORE_REG_R10 = 15
XCORE_REG_R11 = 16
# pseudo registers
XCORE_REG_PC = 17
XCORE_REG_SCP = 18
XCORE_REG_SSR = 19
XCORE_REG_ET = 20
XCORE_REG_ED = 21
XCORE_REG_SED = 22
XCORE_REG_KEP = 23
XCORE_REG_KSP = 24
XCORE_REG_ID = 25
XCORE_REG_ENDING = 26
# XCore instruction
XCORE_INS_INVALID = 0
XCORE_INS_ADD = 1
XCORE_INS_ANDNOT = 2
XCORE_INS_AND = 3
XCORE_INS_ASHR = 4
XCORE_INS_BAU = 5
XCORE_INS_BITREV = 6
XCORE_INS_BLA = 7
XCORE_INS_BLAT = 8
XCORE_INS_BL = 9
XCORE_INS_BF = 10
XCORE_INS_BT = 11
XCORE_INS_BU = 12
XCORE_INS_BRU = 13
XCORE_INS_BYTEREV = 14
XCORE_INS_CHKCT = 15
XCORE_INS_CLRE = 16
XCORE_INS_CLRPT = 17
XCORE_INS_CLRSR = 18
XCORE_INS_CLZ = 19
XCORE_INS_CRC8 = 20
XCORE_INS_CRC32 = 21
XCORE_INS_DCALL = 22
XCORE_INS_DENTSP = 23
XCORE_INS_DGETREG = 24
XCORE_INS_DIVS = 25
XCORE_INS_DIVU = 26
XCORE_INS_DRESTSP = 27
XCORE_INS_DRET = 28
XCORE_INS_ECALLF = 29
XCORE_INS_ECALLT = 30
XCORE_INS_EDU = 31
XCORE_INS_EEF = 32
XCORE_INS_EET = 33
XCORE_INS_EEU = 34
XCORE_INS_ENDIN = 35
XCORE_INS_ENTSP = 36
XCORE_INS_EQ = 37
XCORE_INS_EXTDP = 38
XCORE_INS_EXTSP = 39
XCORE_INS_FREER = 40
XCORE_INS_FREET = 41
XCORE_INS_GETD = 42
XCORE_INS_GET = 43
XCORE_INS_GETN = 44
XCORE_INS_GETR = 45
XCORE_INS_GETSR = 46
XCORE_INS_GETST = 47
XCORE_INS_GETTS = 48
XCORE_INS_INCT = 49
XCORE_INS_INIT = 50
XCORE_INS_INPW = 51
XCORE_INS_INSHR = 52
XCORE_INS_INT = 53
XCORE_INS_IN = 54
XCORE_INS_KCALL = 55
XCORE_INS_KENTSP = 56
XCORE_INS_KRESTSP = 57
XCORE_INS_KRET = 58
XCORE_INS_LADD = 59
XCORE_INS_LD16S = 60
XCORE_INS_LD8U = 61
XCORE_INS_LDA16 = 62
XCORE_INS_LDAP = 63
XCORE_INS_LDAW = 64
XCORE_INS_LDC = 65
XCORE_INS_LDW = 66
XCORE_INS_LDIVU = 67
XCORE_INS_LMUL = 68
XCORE_INS_LSS = 69
XCORE_INS_LSUB = 70
XCORE_INS_LSU = 71
XCORE_INS_MACCS = 72
XCORE_INS_MACCU = 73
XCORE_INS_MJOIN = 74
XCORE_INS_MKMSK = 75
XCORE_INS_MSYNC = 76
XCORE_INS_MUL = 77
XCORE_INS_NEG = 78
XCORE_INS_NOT = 79
XCORE_INS_OR = 80
XCORE_INS_OUTCT = 81
XCORE_INS_OUTPW = 82
XCORE_INS_OUTSHR = 83
XCORE_INS_OUTT = 84
XCORE_INS_OUT = 85
XCORE_INS_PEEK = 86
XCORE_INS_REMS = 87
XCORE_INS_REMU = 88
XCORE_INS_RETSP = 89
XCORE_INS_SETCLK = 90
XCORE_INS_SET = 91
XCORE_INS_SETC = 92
XCORE_INS_SETD = 93
XCORE_INS_SETEV = 94
XCORE_INS_SETN = 95
XCORE_INS_SETPSC = 96
XCORE_INS_SETPT = 97
XCORE_INS_SETRDY = 98
XCORE_INS_SETSR = 99
XCORE_INS_SETTW = 100
XCORE_INS_SETV = 101
XCORE_INS_SEXT = 102
XCORE_INS_SHL = 103
XCORE_INS_SHR = 104
XCORE_INS_SSYNC = 105
XCORE_INS_ST16 = 106
XCORE_INS_ST8 = 107
XCORE_INS_STW = 108
XCORE_INS_SUB = 109
XCORE_INS_SYNCR = 110
XCORE_INS_TESTCT = 111
XCORE_INS_TESTLCL = 112
XCORE_INS_TESTWCT = 113
XCORE_INS_TSETMR = 114
XCORE_INS_START = 115
XCORE_INS_WAITEF = 116
XCORE_INS_WAITET = 117
XCORE_INS_WAITEU = 118
XCORE_INS_XOR = 119
XCORE_INS_ZEXT = 120
XCORE_INS_ENDING = 121
# Group of XCore instructions
XCORE_GRP_INVALID = 0
# Generic groups
XCORE_GRP_JUMP = 1
XCORE_GRP_ENDING = 2
|
tjsavage/rototutor_djangononrel | refs/heads/master | djangotoolbox/__init__.py | 12133432 | |
ejesse/gobblegobble | refs/heads/master | tests/__init__.py | 12133432 | |
pearsontechnology/st2contrib | refs/heads/master | packs/dimensiondata/actions/create_vm_mcp1.py | 6 | from libcloud.compute.base import NodeAuthPassword
from libcloud.common.dimensiondata import DimensionDataServerCpuSpecification
from lib import actions
__all__ = [
'CreateVMMcp1Action',
]
class CreateVMMcp1Action(actions.BaseAction):
def run(self, region, location, network_id, image_name,
name,
description, is_started, password,
memory_gb, cpu_count, cpu_speed, cores_per_socket):
driver = self._get_compute_driver(region)
root_pw = NodeAuthPassword(password)
location = driver.ex_get_location_by_id(location)
images = driver.list_images(location=location)
image = list(filter(lambda x: x.name == image_name,
images))[0]
networks = driver.list_networks(location)
network = list(filter(lambda x: x.id == network_id,
networks))[0]
cpu = None
if cpu_count is not None:
cpu = DimensionDataServerCpuSpecification(
cpu_count=cpu_count,
cores_per_socket=cores_per_socket,
performance=cpu_speed
)
node = driver.create_node(name=name, image=image,
auth=root_pw,
ex_description=description,
ex_network=network,
ex_cpu_specification=cpu,
ex_memory_gb=memory_gb,
ex_is_started=is_started)
return self.resultsets.formatter(node)
|
shiquanwang/numba | refs/heads/master | numba/minivect/__init__.py | 1 | # -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import os
root = os.path.dirname(os.path.abspath(__file__))
def get_include():
return os.path.join(root, 'include')
|
MichaelKohler/bedrock | refs/heads/master | tests/functional/test_styleguide.py | 11 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import pytest
from pages.styleguide import StyleGuidePage
@pytest.mark.skipif(reason='https://webqa-ci.mozilla.com/job/bedrock.dev.win10.ie/102/')
@pytest.mark.smoke
@pytest.mark.nondestructive
def test_open_close_navigation(base_url, selenium):
page = StyleGuidePage(selenium, base_url).open()
identity = page.menu[0]
identity.expand()
assert identity.is_displayed
mozilla = identity.sub_menu[0]
mozilla.expand()
assert mozilla.is_displayed
firefox_family = identity.sub_menu[1]
firefox_family.expand()
assert not mozilla.is_displayed
assert firefox_family.is_displayed
identity.collapse()
assert not identity.is_displayed
assert not firefox_family.is_displayed
|
WebSpider/SickRage | refs/heads/master | lib/hachoir_parser/misc/chm.py | 74 | """
InfoTech Storage Format (ITSF) parser, used by Microsoft's HTML Help (.chm)
Document:
- Microsoft's HTML Help (.chm) format
http://www.wotsit.org (search "chm")
- chmlib library
http://www.jedrea.com/chmlib/
- Unofficial CHM Spec
http://savannah.nongnu.org/projects/chmspec
- Microsoft's HTML Help (.chm) format
http://www.speakeasy.org/~russotto/chm/chmformat.html
Author: Victor Stinner
Creation date: 2007-03-04
"""
from hachoir_core.field import (Field, FieldSet, ParserError, RootSeekableFieldSet,
Int32, UInt16, UInt32, UInt64,
RawBytes, PaddingBytes,
Enum, String)
from hachoir_core.endian import LITTLE_ENDIAN
from hachoir_parser import HachoirParser
from hachoir_parser.common.win32 import GUID
from hachoir_parser.common.win32_lang_id import LANGUAGE_ID
from hachoir_core.text_handler import textHandler, hexadecimal, filesizeHandler
class CWord(Field):
"""
Compressed double-word
"""
def __init__(self, parent, name, description=None):
Field.__init__(self, parent, name, 8, description)
endian = self._parent.endian
stream = self._parent.stream
addr = self.absolute_address
value = 0
byte = stream.readBits(addr, 8, endian)
while byte & 0x80:
value <<= 7
value += (byte & 0x7f)
self._size += 8
if 64 < self._size:
raise ParserError("CHM: CWord is limited to 64 bits")
addr += 8
byte = stream.readBits(addr, 8, endian)
value <<= 7
value += byte
self.createValue = lambda: value
class Filesize_Header(FieldSet):
def createFields(self):
yield textHandler(UInt32(self, "unknown[]", "0x01FE"), hexadecimal)
yield textHandler(UInt32(self, "unknown[]", "0x0"), hexadecimal)
yield filesizeHandler(UInt64(self, "file_size"))
yield textHandler(UInt32(self, "unknown[]", "0x0"), hexadecimal)
yield textHandler(UInt32(self, "unknown[]", "0x0"), hexadecimal)
class ITSP(FieldSet):
def __init__(self, *args):
FieldSet.__init__(self, *args)
self._size = self["size"].value * 8
def createFields(self):
yield String(self, "magic", 4, "ITSP", charset="ASCII")
yield UInt32(self, "version", "Version (=1)")
yield filesizeHandler(UInt32(self, "size", "Length (in bytes) of the directory header (84)"))
yield UInt32(self, "unknown[]", "(=10)")
yield filesizeHandler(UInt32(self, "block_size", "Directory block size"))
yield UInt32(self, "density", "Density of quickref section, usually 2")
yield UInt32(self, "index_depth", "Depth of the index tree")
yield Int32(self, "nb_dir", "Chunk number of root index chunk")
yield UInt32(self, "first_pmgl", "Chunk number of first PMGL (listing) chunk")
yield UInt32(self, "last_pmgl", "Chunk number of last PMGL (listing) chunk")
yield Int32(self, "unknown[]", "-1")
yield UInt32(self, "nb_dir_chunk", "Number of directory chunks (total)")
yield Enum(UInt32(self, "lang_id", "Windows language ID"), LANGUAGE_ID)
yield GUID(self, "system_uuid", "{5D02926A-212E-11D0-9DF9-00A0C922E6EC}")
yield filesizeHandler(UInt32(self, "size2", "Same value than size"))
yield Int32(self, "unknown[]", "-1")
yield Int32(self, "unknown[]", "-1")
yield Int32(self, "unknown[]", "-1")
class ITSF(FieldSet):
def createFields(self):
yield String(self, "magic", 4, "ITSF", charset="ASCII")
yield UInt32(self, "version")
yield UInt32(self, "header_size", "Total header length (in bytes)")
yield UInt32(self, "one")
yield UInt32(self, "last_modified", "Lower 32 bits of the time expressed in units of 0.1 us")
yield Enum(UInt32(self, "lang_id", "Windows Language ID"), LANGUAGE_ID)
yield GUID(self, "dir_uuid", "{7C01FD10-7BAA-11D0-9E0C-00A0-C922-E6EC}")
yield GUID(self, "stream_uuid", "{7C01FD11-7BAA-11D0-9E0C-00A0-C922-E6EC}")
yield UInt64(self, "filesize_offset")
yield filesizeHandler(UInt64(self, "filesize_len"))
yield UInt64(self, "dir_offset")
yield filesizeHandler(UInt64(self, "dir_len"))
if 3 <= self["version"].value:
yield UInt64(self, "data_offset")
class PMGL_Entry(FieldSet):
def createFields(self):
yield CWord(self, "name_len")
yield String(self, "name", self["name_len"].value, charset="UTF-8")
yield CWord(self, "section", "Section number that the entry data is in.")
yield CWord(self, "start", "Start offset of the data")
yield filesizeHandler(CWord(self, "length", "Length of the data"))
def createDescription(self):
return "%s (%s)" % (self["name"].value, self["length"].display)
class PMGL(FieldSet):
def createFields(self):
# Header
yield String(self, "magic", 4, "PMGL", charset="ASCII")
yield filesizeHandler(Int32(self, "free_space",
"Length of free space and/or quickref area at end of directory chunk"))
yield Int32(self, "unknown")
yield Int32(self, "previous", "Chunk number of previous listing chunk")
yield Int32(self, "next", "Chunk number of previous listing chunk")
# Entries
stop = self.size - self["free_space"].value * 8
entry_count = 0
while self.current_size < stop:
yield PMGL_Entry(self, "entry[]")
entry_count+=1
# Padding
quickref_frequency = 1 + (1 << self["/dir/itsp/density"].value)
num_quickref = (entry_count // quickref_frequency)
if entry_count % quickref_frequency == 0:
num_quickref -= 1
print self.current_size//8, quickref_frequency, num_quickref
padding = (self["free_space"].value - (num_quickref*2+2))
if padding:
yield PaddingBytes(self, "padding", padding)
for i in range(num_quickref*quickref_frequency, 0, -quickref_frequency):
yield UInt16(self, "quickref[%i]"%i)
yield UInt16(self, "entry_count")
class PMGI_Entry(FieldSet):
def createFields(self):
yield CWord(self, "name_len")
yield String(self, "name", self["name_len"].value, charset="UTF-8")
yield CWord(self, "page")
def createDescription(self):
return "%s (page #%u)" % (self["name"].value, self["page"].value)
class PMGI(FieldSet):
def createFields(self):
yield String(self, "magic", 4, "PMGI", charset="ASCII")
yield filesizeHandler(UInt32(self, "free_space",
"Length of free space and/or quickref area at end of directory chunk"))
stop = self.size - self["free_space"].value * 8
while self.current_size < stop:
yield PMGI_Entry(self, "entry[]")
padding = (self.size - self.current_size) // 8
if padding:
yield PaddingBytes(self, "padding", padding)
class Directory(FieldSet):
def createFields(self):
yield ITSP(self, "itsp")
block_size = self["itsp/block_size"].value * 8
nb_dir = self["itsp/nb_dir"].value
if nb_dir < 0:
nb_dir = 1
for index in xrange(nb_dir):
yield PMGL(self, "pmgl[]", size=block_size)
if self.current_size < self.size:
yield PMGI(self, "pmgi", size=block_size)
class NameList(FieldSet):
def createFields(self):
yield UInt16(self, "length", "Length of name list in 2-byte blocks")
yield UInt16(self, "count", "Number of entries in name list")
for index in range(self["count"].value):
length=UInt16(self, "name_len[]", "Length of name in 2-byte blocks, excluding terminating null")
yield length
yield String(self, "name[]", length.value*2+2, charset="UTF-16-LE")
class ControlData(FieldSet):
def createFields(self):
yield UInt32(self, "count", "Number of DWORDS in this struct")
yield String(self, "type", 4, "Type of compression")
if self["type"].value!='LZXC': return
yield UInt32(self, "version", "Compression version")
version=self["version"].value
if version==1: block='bytes'
else: block='32KB blocks'
yield UInt32(self, "reset_interval", "LZX: Reset interval in %s"%block)
yield UInt32(self, "window_size", "LZX: Window size in %s"%block)
yield UInt32(self, "cache_size", "LZX: Cache size in %s"%block)
yield UInt32(self, "unknown[]")
class ResetTable(FieldSet):
def createFields(self):
yield UInt32(self, "unknown[]", "Version number?")
yield UInt32(self, "count", "Number of entries")
yield UInt32(self, "entry_size", "Size of each entry")
yield UInt32(self, "header_size", "Size of this header")
yield UInt64(self, "uncompressed_size")
yield UInt64(self, "compressed_size")
yield UInt64(self, "block_size", "Block size in bytes")
for i in xrange(self["count"].value):
yield UInt64(self, "block_location[]", "location in compressed data of 1st block boundary in uncompressed data")
class SystemEntry(FieldSet):
ENTRY_TYPE={0:"HHP: [OPTIONS]: Contents File",
1:"HHP: [OPTIONS]: Index File",
2:"HHP: [OPTIONS]: Default Topic",
3:"HHP: [OPTIONS]: Title",
4:"File Metadata",
5:"HHP: [OPTIONS]: Default Window",
6:"HHP: [OPTIONS]: Compiled file",
# 7 present only in files with Binary Index; unknown function
# 8 unknown function
9: "Version",
10: "Timestamp",
# 11 only in Binary TOC files
12: "Number of Info Types",
13: "#IDXHDR file",
# 14 unknown function
# 15 checksum??
16:"HHP: [OPTIONS]: Default Font",
}
def createFields(self):
yield Enum(UInt16(self, "type", "Type of entry"),self.ENTRY_TYPE)
yield UInt16(self, "length", "Length of entry")
yield RawBytes(self, "data", self["length"].value)
def createDescription(self):
return '#SYSTEM Entry, Type %s'%self["type"].display
class SystemFile(FieldSet):
def createFields(self):
yield UInt32(self, "version", "Either 2 or 3")
while self.current_size < self.size:
yield SystemEntry(self, "entry[]")
class ChmFile(HachoirParser, RootSeekableFieldSet):
MAGIC = "ITSF\3\0\0\0"
PARSER_TAGS = {
"id": "chm",
"category": "misc",
"file_ext": ("chm",),
"min_size": 4*8,
"magic": ((MAGIC, 0),),
"description": "Microsoft's HTML Help (.chm)",
}
endian = LITTLE_ENDIAN
def __init__(self, stream, **args):
RootSeekableFieldSet.__init__(self, None, "root", stream, None, stream.askSize(self))
HachoirParser.__init__(self, stream, **args)
def validate(self):
if self.stream.readBytes(0, len(self.MAGIC)) != self.MAGIC:
return "Invalid magic"
return True
def createFields(self):
yield ITSF(self, "itsf")
yield Filesize_Header(self, "file_size", size=self["itsf/filesize_len"].value*8)
self.seekByte(self["itsf/dir_offset"].value)
directory=Directory(self, "dir", size=self["itsf/dir_len"].value*8)
yield directory
otherentries = {}
for pmgl in directory.array("pmgl"):
for entry in pmgl.array("entry"):
if entry["section"].value != 0:
otherentries.setdefault(entry["section"].value,[]).append(entry)
continue
if entry["length"].value == 0:
continue
self.seekByte(self["itsf/data_offset"].value+entry["start"].value)
name = entry["name"].value
if name == "::DataSpace/NameList":
yield NameList(self, "name_list")
elif name.startswith('::DataSpace/Storage/'):
sectname = str(name.split('/')[2])
if name.endswith('/SpanInfo'):
yield UInt64(self, "%s_spaninfo"%sectname, "Size of uncompressed data in the %s section"%sectname)
elif name.endswith('/ControlData'):
yield ControlData(self, "%s_controldata"%sectname, "Data about the compression scheme", size=entry["length"].value*8)
elif name.endswith('/Transform/List'):
yield String(self, "%s_transform_list"%sectname, 38, description="Transform/List element", charset="UTF-16-LE")
elif name.endswith('/Transform/{7FC28940-9D31-11D0-9B27-00A0C91E9C7C}/InstanceData/ResetTable'):
yield ResetTable(self, "%s_reset_table"%sectname, "LZX Reset Table", size=entry["length"].value*8)
elif name.endswith('/Content'):
# eventually, a LZX wrapper will appear here, we hope!
yield RawBytes(self, "%s_content"%sectname, entry["length"].value, "Content for the %s section"%sectname)
else:
yield RawBytes(self, "entry_data[]", entry["length"].value, name)
elif name=="/#SYSTEM":
yield SystemFile(self, "system_file", size=entry["length"].value*8)
else:
yield RawBytes(self, "entry_data[]", entry["length"].value, name)
def getFile(self, filename):
page=0
if 'pmgi' in self['/dir']:
for entry in self['/dir/pmgi'].array('entry'):
if entry['name'].value <= filename:
page=entry['page'].value
pmgl=self['/dir/pmgl[%i]'%page]
for entry in pmgl.array('entry'):
if entry['name'].value == filename:
return entry
raise ParserError("File '%s' not found!"%filename)
def createContentSize(self):
return self["file_size/file_size"].value * 8
|
slohse/ansible | refs/heads/devel | lib/ansible/modules/network/f5/bigip_gtm_server.py | 9 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_gtm_server
short_description: Manages F5 BIG-IP GTM servers
description:
- Manage BIG-IP server configuration. This module is able to manipulate the server
definitions in a BIG-IP.
version_added: 2.5
options:
name:
description:
- The name of the server.
required: True
state:
description:
- The server state. If C(absent), an attempt to delete the server will be made.
This will only succeed if this server is not in use by a virtual server.
C(present) creates the server and enables it. If C(enabled), enable the server
if it exists. If C(disabled), create the server if needed, and set state to
C(disabled).
default: present
choices:
- present
- absent
- enabled
- disabled
datacenter:
description:
- Data center the server belongs to. When creating a new GTM server, this value
is required.
devices:
description:
- Lists the self IP addresses and translations for each device. When creating a
new GTM server, this value is required. This list is a complex list that
specifies a number of keys.
- The C(name) key specifies a name for the device. The device name must
be unique per server. This key is required.
- The C(address) key contains an IP address, or list of IP addresses, for the
destination server. This key is required.
- The C(translation) key contains an IP address to translate the C(address)
value above to. This key is optional.
- Specifying duplicate C(name) fields is a supported means of providing device
addresses. In this scenario, the addresses will be assigned to the C(name)'s list
of addresses.
server_type:
description:
- Specifies the server type. The server type determines the metrics that the
system can collect from the server. When creating a new GTM server, the default
value C(bigip) is used.
choices:
- alteon-ace-director
- cisco-css
- cisco-server-load-balancer
- generic-host
- radware-wsd
- windows-nt-4.0
- bigip
- cisco-local-director-v2
- extreme
- generic-load-balancer
- sun-solaris
- cacheflow
- cisco-local-director-v3
- foundry-server-iron
- netapp
- windows-2000-server
aliases:
- product
link_discovery:
description:
- Specifies whether the system auto-discovers the links for this server. When
creating a new GTM server, if this parameter is not specified, the default
value C(disabled) is used.
- If you set this parameter to C(enabled) or C(enabled-no-delete), you must
also ensure that the C(virtual_server_discovery) parameter is also set to
C(enabled) or C(enabled-no-delete).
choices:
- enabled
- disabled
- enabled-no-delete
virtual_server_discovery:
description:
- Specifies whether the system auto-discovers the virtual servers for this server.
When creating a new GTM server, if this parameter is not specified, the default
value C(disabled) is used.
choices:
- enabled
- disabled
- enabled-no-delete
partition:
description:
- Device partition to manage resources on.
default: Common
version_added: 2.5
iquery_options:
description:
- Specifies whether the Global Traffic Manager uses this BIG-IP
system to conduct a variety of probes before delegating traffic to it.
suboptions:
allow_path:
description:
- Specifies that the system verifies the logical network route between a data
center server and a local DNS server.
type: bool
allow_service_check:
description:
- Specifies that the system verifies that an application on a server is running,
by remotely running the application using an external service checker program.
type: bool
allow_snmp:
description:
- Specifies that the system checks the performance of a server running an SNMP
agent.
type: bool
version_added: 2.7
extends_documentation_fragment: f5
author:
- Robert Teller
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Create server "GTM_Server"
bigip_gtm_server:
server: lb.mydomain.com
user: admin
password: secret
name: GTM_Server
datacenter: /Common/New York
server_type: bigip
link_discovery: disabled
virtual_server_discovery: disabled
devices:
- {'name': 'server_1', 'address': '1.1.1.1'}
- {'name': 'server_2', 'address': '2.2.2.1', 'translation':'192.168.2.1'}
- {'name': 'server_2', 'address': '2.2.2.2'}
- {'name': 'server_3', 'addresses': [{'address':'3.3.3.1'},{'address':'3.3.3.2'}]}
- {'name': 'server_4', 'addresses': [{'address':'4.4.4.1','translation':'192.168.14.1'}, {'address':'4.4.4.2'}]}
delegate_to: localhost
- name: Create server "GTM_Server" with expanded keys
bigip_gtm_server:
server: lb.mydomain.com
user: admin
password: secret
name: GTM_Server
datacenter: /Common/New York
server_type: bigip
link_discovery: disabled
virtual_server_discovery: disabled
devices:
- name: server_1
address: 1.1.1.1
- name: server_2
address: 2.2.2.1
translation: 192.168.2.1
- name: server_2
address: 2.2.2.2
- name: server_3
addresses:
- address: 3.3.3.1
- address: 3.3.3.2
- name: server_4
addresses:
- address: 4.4.4.1
translation: 192.168.14.1
- address: 4.4.4.2
delegate_to: localhost
'''
RETURN = r'''
link_discovery:
description: The new C(link_discovery) configured on the remote device.
returned: changed
type: string
sample: enabled
virtual_server_discovery:
description: The new C(virtual_server_discovery) name for the trap destination.
returned: changed
type: string
sample: disabled
server_type:
description: The new type of the server.
returned: changed
type: string
sample: bigip
datacenter:
description: The new C(datacenter) which the server is part of.
returned: changed
type: string
sample: datacenter01
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
from distutils.version import LooseVersion
try:
from library.module_utils.network.f5.bigip import HAS_F5SDK
from library.module_utils.network.f5.bigip import F5Client
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import fq_name
try:
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
except ImportError:
from ansible.module_utils.network.f5.bigip import HAS_F5SDK
from ansible.module_utils.network.f5.bigip import F5Client
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import fq_name
try:
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
try:
from collections import OrderedDict
except ImportError:
try:
from ordereddict import OrderedDict
except ImportError:
pass
class Parameters(AnsibleF5Parameters):
api_map = {
'product': 'server_type',
'virtualServerDiscovery': 'virtual_server_discovery',
'linkDiscovery': 'link_discovery',
'addresses': 'devices',
'iqAllowPath': 'iquery_allow_path',
'iqAllowServiceCheck': 'iquery_allow_service_check',
'iqAllowSnmp': 'iquery_allow_snmp',
}
api_attributes = [
'linkDiscovery',
'virtualServerDiscovery',
'product',
'addresses',
'datacenter',
'enabled',
'disabled',
'iqAllowPath',
'iqAllowServiceCheck',
'iqAllowSnmp',
]
updatables = [
'link_discovery',
'virtual_server_discovery',
'server_type_and_devices',
'datacenter',
'state',
'iquery_allow_path',
'iquery_allow_service_check',
'iquery_allow_snmp',
]
returnables = [
'link_discovery',
'virtual_server_discovery',
'server_type',
'datacenter',
'enabled',
'iquery_allow_path',
'iquery_allow_service_check',
'iquery_allow_snmp',
]
class ApiParameters(Parameters):
@property
def devices(self):
if self._values['devices'] is None:
return None
return self._values['devices']
@property
def server_type(self):
if self._values['server_type'] is None:
return None
elif self._values['server_type'] in ['single-bigip', 'redundant-bigip']:
return 'bigip'
else:
return self._values['server_type']
@property
def raw_server_type(self):
if self._values['server_type'] is None:
return None
return self._values['server_type']
@property
def enabled(self):
if self._values['enabled'] is None:
return None
return True
@property
def disabled(self):
if self._values['disabled'] is None:
return None
return True
@property
def iquery_allow_path(self):
if self._values['iquery_allow_path'] is None:
return None
elif self._values['iquery_allow_path'] == 'yes':
return True
return False
@property
def iquery_allow_service_check(self):
if self._values['iquery_allow_service_check'] is None:
return None
elif self._values['iquery_allow_service_check'] == 'yes':
return True
return False
@property
def iquery_allow_snmp(self):
if self._values['iquery_allow_snmp'] is None:
return None
elif self._values['iquery_allow_snmp'] == 'yes':
return True
return False
class ModuleParameters(Parameters):
@property
def devices(self):
if self._values['devices'] is None:
return None
result = []
for device in self._values['devices']:
if not any(x for x in ['address', 'addresses'] if x in device):
raise F5ModuleError(
"The specified device list must contain an 'address' or 'addresses' key"
)
if 'address' in device:
translation = self._determine_translation(device)
name = device['address']
device_name = device['name']
result.append({
'name': name,
'deviceName': device_name,
'translation': translation
})
elif 'addresses' in device:
for address in device['addresses']:
translation = self._determine_translation(address)
name = address['address']
device_name = device['name']
result.append({
'name': name,
'deviceName': device_name,
'translation': translation
})
return result
def devices_list(self):
if self._values['devices'] is None:
return None
return self._values['devices']
@property
def enabled(self):
if self._values['state'] in ['present', 'enabled']:
return True
return False
@property
def datacenter(self):
if self._values['datacenter'] is None:
return None
return fq_name(self.partition, self._values['datacenter'])
def _determine_translation(self, device):
if 'translation' not in device:
return 'none'
return device['translation']
@property
def state(self):
if self._values['state'] == 'enabled':
return 'present'
return self._values['state']
@property
def iquery_allow_path(self):
if self._values['iquery_options'] is None:
return None
elif self._values['iquery_options']['allow_path'] is None:
return None
return self._values['iquery_options']['allow_path']
@property
def iquery_allow_service_check(self):
if self._values['iquery_options'] is None:
return None
elif self._values['iquery_options']['allow_service_check'] is None:
return None
return self._values['iquery_options']['allow_service_check']
@property
def iquery_allow_snmp(self):
if self._values['iquery_options'] is None:
return None
elif self._values['iquery_options']['allow_snmp'] is None:
return None
return self._values['iquery_options']['allow_snmp']
class Changes(Parameters):
def to_return(self):
result = {}
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
return result
class UsableChanges(Changes):
@property
def iquery_allow_path(self):
if self._values['iquery_allow_path'] is None:
return None
elif self._values['iquery_allow_path']:
return 'yes'
return 'no'
@property
def iquery_allow_service_check(self):
if self._values['iquery_allow_service_check'] is None:
return None
elif self._values['iquery_allow_service_check']:
return 'yes'
return 'no'
@property
def iquery_allow_snmp(self):
if self._values['iquery_allow_snmp'] is None:
return None
elif self._values['iquery_allow_snmp']:
return 'yes'
return 'no'
class ReportableChanges(Changes):
@property
def server_type(self):
if self._values['server_type'] in ['single-bigip', 'redundant-bigip']:
return 'bigip'
return self._values['server_type']
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
want = getattr(self.want, param)
try:
have = getattr(self.have, param)
if want != have:
return want
except AttributeError:
return want
def _discovery_constraints(self):
if self.want.virtual_server_discovery is None:
virtual_server_discovery = self.have.virtual_server_discovery
else:
virtual_server_discovery = self.want.virtual_server_discovery
if self.want.link_discovery is None:
link_discovery = self.have.link_discovery
else:
link_discovery = self.want.link_discovery
if link_discovery in ['enabled', 'enabled-no-delete'] and virtual_server_discovery == 'disabled':
raise F5ModuleError(
"Virtual server discovery must be enabled if link discovery is enabled"
)
def _devices_changed(self):
if self.want.devices is None and self.want.server_type is None:
return None
if self.want.devices is None:
devices = self.have.devices
else:
devices = self.want.devices
if self.have.devices is None:
have_devices = []
else:
have_devices = self.have.devices
if len(devices) == 0:
raise F5ModuleError(
"A GTM server must have at least one device associated with it."
)
want = [OrderedDict(sorted(d.items())) for d in devices]
have = [OrderedDict(sorted(d.items())) for d in have_devices]
if want != have:
return True
return False
def _server_type_changed(self):
if self.want.server_type is None:
self.want.update({'server_type': self.have.server_type})
if self.want.server_type != self.have.server_type:
return True
return False
@property
def link_discovery(self):
self._discovery_constraints()
if self.want.link_discovery != self.have.link_discovery:
return self.want.link_discovery
@property
def virtual_server_discovery(self):
self._discovery_constraints()
if self.want.virtual_server_discovery != self.have.virtual_server_discovery:
return self.want.virtual_server_discovery
def _handle_current_server_type_and_devices(self, devices_change, server_change):
result = {}
if devices_change:
result['devices'] = self.want.devices
if server_change:
result['server_type'] = self.want.server_type
return result
def _handle_legacy_server_type_and_devices(self, devices_change, server_change):
result = {}
if server_change and devices_change:
result['devices'] = self.want.devices
if len(self.want.devices) > 1 and self.want.server_type == 'bigip':
if self.have.raw_server_type != 'redundant-bigip':
result['server_type'] = 'redundant-bigip'
elif self.want.server_type == 'bigip':
if self.have.raw_server_type != 'single-bigip':
result['server_type'] = 'single-bigip'
else:
result['server_type'] = self.want.server_type
elif devices_change:
result['devices'] = self.want.devices
if len(self.want.devices) > 1 and self.have.server_type == 'bigip':
if self.have.raw_server_type != 'redundant-bigip':
result['server_type'] = 'redundant-bigip'
elif self.have.server_type == 'bigip':
if self.have.raw_server_type != 'single-bigip':
result['server_type'] = 'single-bigip'
else:
result['server_type'] = self.want.server_type
elif server_change:
if len(self.have.devices) > 1 and self.want.server_type == 'bigip':
if self.have.raw_server_type != 'redundant-bigip':
result['server_type'] = 'redundant-bigip'
elif self.want.server_type == 'bigip':
if self.have.raw_server_type != 'single-bigip':
result['server_type'] = 'single-bigip'
else:
result['server_type'] = self.want.server_type
return result
@property
def server_type_and_devices(self):
"""Compares difference between server type and devices list
These two parameters are linked with each other and, therefore, must be
compared together to ensure that the correct setting is sent to BIG-IP
:return:
"""
devices_change = self._devices_changed()
server_change = self._server_type_changed()
if not devices_change and not server_change:
return None
tmos_version = self.client.api.tmos_version
if LooseVersion(tmos_version) >= LooseVersion('13.0.0'):
result = self._handle_current_server_type_and_devices(
devices_change, server_change
)
return result
else:
result = self._handle_legacy_server_type_and_devices(
devices_change, server_change
)
return result
@property
def state(self):
if self.want.state == 'disabled' and self.have.enabled:
return dict(disabled=True)
elif self.want.state in ['present', 'enabled'] and self.have.disabled:
return dict(enabled=True)
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.kwargs = kwargs
def exec_module(self):
if not self.gtm_provisioned():
raise F5ModuleError(
"GTM must be provisioned to use this module."
)
if self.version_is_less_than('13.0.0'):
manager = self.get_manager('v1')
else:
manager = self.get_manager('v2')
return manager.exec_module()
def get_manager(self, type):
if type == 'v1':
return V1Manager(**self.kwargs)
elif type == 'v2':
return V2Manager(**self.kwargs)
def version_is_less_than(self, version):
tmos_version = self.client.api.tmos_version
if LooseVersion(tmos_version) < LooseVersion(version):
return True
else:
return False
def gtm_provisioned(self):
resource = self.client.api.tm.sys.dbs.db.load(
name='provisioned.cpu.gtm'
)
if int(resource.value) == 0:
return False
return True
class BaseManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.want = ModuleParameters(params=self.module.params)
self.want.update(dict(client=self.client))
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
diff.client = self.client
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
try:
if state in ['present', 'enabled', 'disabled']:
changed = self.present()
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def _check_link_discovery_requirements(self):
if self.want.link_discovery in ['enabled', 'enabled-no-delete'] and self.want.virtual_server_discovery == 'disabled':
raise F5ModuleError(
"Virtual server discovery must be enabled if link discovery is enabled"
)
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def create(self):
if self.want.state == 'disabled':
self.want.update({'disabled': True})
elif self.want.state in ['present', 'enabled']:
self.want.update({'enabled': True})
self.adjust_server_type_by_version()
self.should_update()
if self.want.devices is None:
raise F5ModuleError(
"You must provide an initial device."
)
self._assign_creation_defaults()
if self.module.check_mode:
return True
self.create_on_device()
if self.exists():
return True
else:
raise F5ModuleError("Failed to create the server")
def create_on_device(self):
params = self.changes.api_params()
self.client.api.tm.gtm.servers.server.create(
name=self.want.name,
partition=self.want.partition,
**params
)
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def read_current_from_device(self):
resource = self.client.api.tm.gtm.servers.server.load(
name=self.want.name,
partition=self.want.partition
)
result = resource.attrs
return ApiParameters(params=result)
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update_on_device(self):
params = self.changes.api_params()
resource = self.client.api.tm.gtm.servers.server.load(
name=self.want.name,
partition=self.want.partition
)
resource.modify(**params)
def absent(self):
changed = False
if self.exists():
changed = self.remove()
return changed
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the server")
return True
def remove_from_device(self):
resource = self.client.api.tm.gtm.servers.server.load(
name=self.want.name,
partition=self.want.partition
)
resource.delete()
def exists(self):
result = self.client.api.tm.gtm.servers.server.exists(
name=self.want.name,
partition=self.want.partition
)
return result
class V1Manager(BaseManager):
def _assign_creation_defaults(self):
if self.want.server_type is None:
if len(self.want.devices) == 0:
raise F5ModuleError(
"You must provide at least one device."
)
elif len(self.want.devices) == 1:
self.want.update({'server_type': 'single-bigip'})
else:
self.want.update({'server_type': 'redundant-bigip'})
if self.want.link_discovery is None:
self.want.update({'link_discovery': 'disabled'})
if self.want.virtual_server_discovery is None:
self.want.update({'virtual_server_discovery': 'disabled'})
self._check_link_discovery_requirements()
def adjust_server_type_by_version(self):
if len(self.want.devices) == 1 and self.want.server_type == 'bigip':
self.want.update({'server_type': 'single-bigip'})
if len(self.want.devices) > 1 and self.want.server_type == 'bigip':
self.want.update({'server_type': 'redundant-bigip'})
class V2Manager(BaseManager):
def _assign_creation_defaults(self):
if self.want.server_type is None:
self.want.update({'server_type': 'bigip'})
if self.want.link_discovery is None:
self.want.update({'link_discovery': 'disabled'})
if self.want.virtual_server_discovery is None:
self.want.update({'virtual_server_discovery': 'disabled'})
self._check_link_discovery_requirements()
def adjust_server_type_by_version(self):
pass
class ArgumentSpec(object):
def __init__(self):
self.states = ['absent', 'present', 'enabled', 'disabled']
self.server_types = [
'alteon-ace-director',
'cisco-css',
'cisco-server-load-balancer',
'generic-host',
'radware-wsd',
'windows-nt-4.0',
'bigip',
'cisco-local-director-v2',
'extreme',
'generic-load-balancer',
'sun-solaris',
'cacheflow',
'cisco-local-director-v3',
'foundry-server-iron',
'netapp',
'windows-2000-server'
]
self.supports_check_mode = True
argument_spec = dict(
state=dict(
default='present',
choices=self.states,
),
name=dict(required=True),
server_type=dict(
choices=self.server_types,
aliases=['product']
),
datacenter=dict(),
link_discovery=dict(
choices=['enabled', 'disabled', 'enabled-no-delete']
),
virtual_server_discovery=dict(
choices=['enabled', 'disabled', 'enabled-no-delete']
),
devices=dict(
type='list'
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
),
iquery_options=dict(
type='dict',
options=dict(
allow_path=dict(type='bool'),
allow_service_check=dict(type='bool'),
allow_snmp=dict(type='bool')
)
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
if not HAS_F5SDK:
module.fail_json(msg="The python f5-sdk module is required")
try:
client = F5Client(**module.params)
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
module.exit_json(**results)
except F5ModuleError as ex:
cleanup_tokens(client)
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
|
clinicalml/anchorExplorer | refs/heads/master | test.py | 2 | #from Tkinter import *
import random
from copy import deepcopy
#import tkFileDialog
import itertools
from multiprocessing import Pool
import string
import ttk
import shelve
import time
import sys
import cPickle as pickle
from collections import defaultdict
import numpy as np
import re
from sklearn import metrics
from Displays import *
from Backend import *
import xml.etree.ElementTree as ET
def show_hand_cursor(event):
event.widget.configure(cursor="hand1")
def show_arrow_cursor(event):
event.widget.configure(cursor="")
def readSettings(filename):
Tree = ET.parse(filename)
return Tree
class Display:
def __init__(self,parent, settings, loadfile=None):
self.parent = parent
self.currentConcept='dummy-concept'
self.recentPatients = {}
self.displayMode=StringVar()
self.displayMode.set('sort')
self.nProcs = 2
self.settings = readSettings(settings)
self.logfile = file(self.settings.find('logfile').attrib['path'], 'a')
self.dictionaries = []
for dat in self.settings.findall('dataTypes/datum'):
if not 'dictionary' in dat.attrib:
continue
else:
dct = dat.attrib['dictionary']
self.dictionaries.append((dat.attrib['type'], pickle.load(file(dct))))
m1 = PanedWindow()
m1.pack(fill=BOTH, expand=1)
self.leftDisplay = PanedWindow(m1, orient=VERTICAL)
m1.add(self.leftDisplay)
m2 = PanedWindow(m1, orient=VERTICAL)
m1.add(m2)
#left pane -- anchor showing
self.conceptListbox = ConceptListbox(self.leftDisplay, self)
self.buttons = []
b = Button(self.leftDisplay, text='new variable', command=self.addConceptWindow)
self.leftDisplay.add(b)
b.pack(side=TOP)
self.displayString = Label(self.leftDisplay, text='')
self.displayString.pack(side=TOP)
b = Radiobutton(self.leftDisplay, text="view recently anchored", variable=self.displayMode, value="recent", command=self.refresh)
self.leftDisplay.add(b)
b.pack(side=BOTTOM)
b = Radiobutton(self.leftDisplay, text="view selected anchored", variable=self.displayMode, value="select", command=self.refresh)
self.leftDisplay.add(b)
b.pack(side=BOTTOM)
b = Radiobutton(self.leftDisplay, text="view all anchored", variable=self.displayMode, value="filter", command=self.refresh)
self.leftDisplay.add(b)
b.pack(side=BOTTOM)
b = Radiobutton(self.leftDisplay, text="view not anchored", variable=self.displayMode, value="sort", command=self.refresh)
self.leftDisplay.add(b)
b.pack(side=BOTTOM)
#b = Radiobutton(self.leftDisplay, text="do labeling", variable=self.displayMode, value="label", command=self.refresh)
#self.leftDisplay.add(b)
#b.pack(side=BOTTOM)
self.anchorDisplay = AnchorDisplay(m2, self)
self.patientDetailDisplay = PatientDetailDisplay(m2, self)
self.patientListDisplay = PatientListDisplay(m2, self)
self.backend = Backend(self, loadfile)
self.refresh()
def displayConcept(self, conceptID=None):
if conceptID == None:
conceptID = self.currentConcept
else:
self.currentConcept = conceptID
self.backend.initConcept(conceptID)
self.anchorDisplay.showAnchors(conceptID)
self.patientListDisplay.displayPatients()
self.patientDetailDisplay.clear()
self.showStats()
def showStats(self):
displayString = ""
displayString += "current var is "+ self.currentConcept+'\n'
displayString += 'anchored patients: ' +str(len(union(self.backend.concepts[self.currentConcept].anchoredPatients.values()))) +'\n'
displayString += 'hand labeled patients: ' + str(len(self.backend.concepts[self.currentConcept].human_labels.keys())) +'\n'
#displayString += 'evaluator patients: ' + str(len(union(self.backend.concepts[self.currentConcept].evaluatorPatients.values()))) +'\n'
#displayString += 'precision@'+str(self.backend.concepts[self.currentConcept].recall)+': ' + str(self.backend.concepts[self.currentConcept].get_precision()) + '\n'
self.displayString.config(text=displayString)
def debug(self):
#IPython.getipython.get_ipython().launch_new_instance({'self':self})
print "done with debugging session"
def calculateStats(self):
nAnchored = 0
for pat in self.patients.values():
if self.currentConcept in pat['anchors']:
self.anchored_patients[self.currentConcept].add(pat['index'])
nAnchored += 1
else:
self.anchored_patients[self.currentConcept].discard(pat['index'])
display_str = ""
if self.currentConcept in self.weights and self.weights[self.currentConcept]:
status = 'up to date.'
else:
status = 'out of date!'
display_str += "model is "+status+'\n'
#display_str += "validate set size "+str(self.validate_size)+'\n'
display_str += "anchored patients="+str(nAnchored)+'\n'
display_str += "human labels (pos/neg)= ("+str(len([i for i in self.human_labels[self.currentConcept].values() if i == 1])) + '/'
display_str += str(len([i for i in self.human_labels[self.currentConcept].values() if i == 0])) + ')\n'
display_str += "display size is="+str(self.nDisplay)+'\n'
display_str += "train size is="+str(self.nTrain)+'\n'
self.stat_str.set(display_str)
def addConceptWindow(self):
display = Tk()
display.title('Add a new variable')
new_window = PanedWindow(display, orient=VERTICAL)
new_window.pack(fill=BOTH, expand=1)
label = Label(new_window, text = "Enter a new variable")
new_window.add(label)
l = Entry(new_window)
l.bind("<Return>", self.addConcept)
new_window.add(l)
def addConcept(self, event):
new_concept = event.widget.get().lower()
self.backend.newConcept(new_concept)
self.displayConcept(new_concept)
event.widget.master.master.destroy()
def suggestConcept(self):
pass
#select a patient and display
def patientSelect(self, event):
for p in event.widget.curselection():
self.displayPatient(self.toplistIDs[int(p)])
def onStructuredAnchorSuggest(self, event):
for p in event.widget.selection():
item = event.widget.item(p)
self.enterAnchor.delete(0,END)
self.enterAnchor.insert(0, item['values'][0])
event.widget.master.master.destroy()
def refresh(self):
self.displayConcept()
def resetModel(self, conceptID):
self.weights[conceptID] = None
self.orderedPatients[conceptID] = None
#self.weight_vectors[conceptID] = None
if __name__ == "__main__":
root = Tk()
try:
settings = sys.argv[1]
except:
settings = 'examples/settings.xml'
myapp = Display(root, settings)
# root.mainloop()
|
eduNEXT/edx-platform | refs/heads/master | cms/djangoapps/xblock_config/models.py | 3 | """
Models used by Studio XBlock infrastructure.
Includes:
StudioConfig: A ConfigurationModel for managing Studio.
"""
from config_models.models import ConfigurationModel
from django.db.models import TextField
class StudioConfig(ConfigurationModel):
"""
Configuration for XBlockAsides.
.. no_pii:
"""
disabled_blocks = TextField(
default="about course_info static_tab",
help_text="Space-separated list of XBlocks on which XBlockAsides should never render in studio",
)
@classmethod
def asides_enabled(cls, block_type):
"""
Return True if asides are enabled for this type of block in studio
"""
studio_config = cls.current()
return studio_config.enabled and block_type not in studio_config.disabled_blocks.split()
|
mattseymour/django | refs/heads/master | django/contrib/admin/tests.py | 113 | from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from django.test import modify_settings
from django.test.selenium import SeleniumTestCase
from django.utils.deprecation import MiddlewareMixin
from django.utils.translation import ugettext as _
class CSPMiddleware(MiddlewareMixin):
"""The admin's JavaScript should be compatible with CSP."""
def process_response(self, request, response):
response['Content-Security-Policy'] = "default-src 'self'"
return response
@modify_settings(MIDDLEWARE={'append': 'django.contrib.admin.tests.CSPMiddleware'})
class AdminSeleniumTestCase(SeleniumTestCase, StaticLiveServerTestCase):
available_apps = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
]
def wait_until(self, callback, timeout=10):
"""
Helper function that blocks the execution of the tests until the
specified callback returns a value that is not falsy. This function can
be called, for example, after clicking a link or submitting a form.
See the other public methods that call this function for more details.
"""
from selenium.webdriver.support.wait import WebDriverWait
WebDriverWait(self.selenium, timeout).until(callback)
def wait_for_popup(self, num_windows=2, timeout=10):
"""
Block until `num_windows` are present (usually 2, but can be
overridden in the case of pop-ups opening other pop-ups).
"""
self.wait_until(lambda d: len(d.window_handles) == num_windows, timeout)
def wait_for(self, css_selector, timeout=10):
"""
Helper function that blocks until a CSS selector is found on the page.
"""
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as ec
self.wait_until(
ec.presence_of_element_located((By.CSS_SELECTOR, css_selector)),
timeout
)
def wait_for_text(self, css_selector, text, timeout=10):
"""
Helper function that blocks until the text is found in the CSS selector.
"""
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as ec
self.wait_until(
ec.text_to_be_present_in_element(
(By.CSS_SELECTOR, css_selector), text),
timeout
)
def wait_for_value(self, css_selector, text, timeout=10):
"""
Helper function that blocks until the value is found in the CSS selector.
"""
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as ec
self.wait_until(
ec.text_to_be_present_in_element_value(
(By.CSS_SELECTOR, css_selector), text),
timeout
)
def wait_until_visible(self, css_selector, timeout=10):
"""
Block until the element described by the CSS selector is visible.
"""
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as ec
self.wait_until(
ec.visibility_of_element_located((By.CSS_SELECTOR, css_selector)),
timeout
)
def wait_until_invisible(self, css_selector, timeout=10):
"""
Block until the element described by the CSS selector is invisible.
"""
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as ec
self.wait_until(
ec.invisibility_of_element_located((By.CSS_SELECTOR, css_selector)),
timeout
)
def wait_page_loaded(self):
"""
Block until page has started to load.
"""
from selenium.common.exceptions import TimeoutException
try:
# Wait for the next page to be loaded
self.wait_for('body')
except TimeoutException:
# IE7 occasionally returns an error "Internet Explorer cannot
# display the webpage" and doesn't load the next page. We just
# ignore it.
pass
def admin_login(self, username, password, login_url='/admin/'):
"""
Helper function to log into the admin.
"""
self.selenium.get('%s%s' % (self.live_server_url, login_url))
username_input = self.selenium.find_element_by_name('username')
username_input.send_keys(username)
password_input = self.selenium.find_element_by_name('password')
password_input.send_keys(password)
login_text = _('Log in')
self.selenium.find_element_by_xpath(
'//input[@value="%s"]' % login_text).click()
self.wait_page_loaded()
def get_css_value(self, selector, attribute):
"""
Helper function that returns the value for the CSS attribute of an
DOM element specified by the given selector. Uses the jQuery that ships
with Django.
"""
return self.selenium.execute_script(
'return django.jQuery("%s").css("%s")' % (selector, attribute))
def get_select_option(self, selector, value):
"""
Returns the <OPTION> with the value `value` inside the <SELECT> widget
identified by the CSS selector `selector`.
"""
from selenium.common.exceptions import NoSuchElementException
options = self.selenium.find_elements_by_css_selector('%s > option' % selector)
for option in options:
if option.get_attribute('value') == value:
return option
raise NoSuchElementException('Option "%s" not found in "%s"' % (value, selector))
def _assertOptionsValues(self, options_selector, values):
if values:
options = self.selenium.find_elements_by_css_selector(options_selector)
actual_values = []
for option in options:
actual_values.append(option.get_attribute('value'))
self.assertEqual(values, actual_values)
else:
# Prevent the `find_elements_by_css_selector` call from blocking
# if the selector doesn't match any options as we expect it
# to be the case.
with self.disable_implicit_wait():
self.wait_until(
lambda driver: len(driver.find_elements_by_css_selector(options_selector)) == 0
)
def assertSelectOptions(self, selector, values):
"""
Asserts that the <SELECT> widget identified by `selector` has the
options with the given `values`.
"""
self._assertOptionsValues("%s > option" % selector, values)
def assertSelectedOptions(self, selector, values):
"""
Asserts that the <SELECT> widget identified by `selector` has the
selected options with the given `values`.
"""
self._assertOptionsValues("%s > option:checked" % selector, values)
def has_css_class(self, selector, klass):
"""
Returns True if the element identified by `selector` has the CSS class
`klass`.
"""
return (self.selenium.find_element_by_css_selector(selector)
.get_attribute('class').find(klass) != -1)
|
kreeger/etcetera | refs/heads/master | checkout/managers.py | 1 | import datetime as dt
from django.db import models
class CheckoutManager(models.Manager):
def active(self):
return self.get_query_set().filter(completion_date=None)
def closed(self):
return self.get_query_set().exclude(completion_date=None) |
Ictp/indico | refs/heads/master | bin/legacy/rebuildAuthorIndex.py | 1 | # -*- coding: utf-8 -*-
##
##
## This file is part of Indico.
## Copyright (C) 2002 - 2014 European Organization for Nuclear Research (CERN).
##
## Indico is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or (at your option) any later version.
##
## Indico is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Indico;if not, see <http://www.gnu.org/licenses/>.
from indico.core.db import DBMgr
from MaKaC import conference
DBMgr.getInstance().startRequest()
error = False
ch = conference.ConferenceHolder()
for conf in ch.getList():
conf._authorIdx=conference.AuthorIndex()
for contrib in conf.getContributionList():
if not isinstance(contrib.getCurrentStatus(),conference.ContribStatusWithdrawn):
for auth in contrib.getPrimaryAuthorList():
conf._authorIdx.index(auth)
if not error:
DBMgr.getInstance().endRequest()
print "No error. The change are saved"
else:
print "There were errors. The changes was not saved"
|
adamjmcgrath/glancydesign | refs/heads/master | src/django-nonrel/django/contrib/localflavor/ie/ie_counties.py | 503 | """
Sources:
Irish Counties: http://en.wikipedia.org/wiki/Counties_of_Ireland
"""
from django.utils.translation import ugettext_lazy as _
IE_COUNTY_CHOICES = (
('antrim', _('Antrim')),
('armagh', _('Armagh')),
('carlow', _('Carlow')),
('cavan', _('Cavan')),
('clare', _('Clare')),
('cork', _('Cork')),
('derry', _('Derry')),
('donegal', _('Donegal')),
('down', _('Down')),
('dublin', _('Dublin')),
('fermanagh', _('Fermanagh')),
('galway', _('Galway')),
('kerry', _('Kerry')),
('kildare', _('Kildare')),
('kilkenny', _('Kilkenny')),
('laois', _('Laois')),
('leitrim', _('Leitrim')),
('limerick', _('Limerick')),
('longford', _('Longford')),
('louth', _('Louth')),
('mayo', _('Mayo')),
('meath', _('Meath')),
('monaghan', _('Monaghan')),
('offaly', _('Offaly')),
('roscommon', _('Roscommon')),
('sligo', _('Sligo')),
('tipperary', _('Tipperary')),
('tyrone', _('Tyrone')),
('waterford', _('Waterford')),
('westmeath', _('Westmeath')),
('wexford', _('Wexford')),
('wicklow', _('Wicklow')),
)
|
ueshin/apache-spark | refs/heads/master | python/pyspark/ml/tuning.py | 13 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import itertools
import random
import math
from multiprocessing.pool import ThreadPool
import numpy as np
from pyspark import keyword_only, since, SparkContext, inheritable_thread_target
from pyspark.ml import Estimator, Transformer, Model
from pyspark.ml.common import inherit_doc, _py2java, _java2py
from pyspark.ml.evaluation import Evaluator
from pyspark.ml.param import Params, Param, TypeConverters
from pyspark.ml.param.shared import HasCollectSubModels, HasParallelism, HasSeed
from pyspark.ml.util import DefaultParamsReader, DefaultParamsWriter, MetaAlgorithmReadWrite, \
MLReadable, MLReader, MLWritable, MLWriter, JavaMLReader, JavaMLWriter
from pyspark.ml.wrapper import JavaParams, JavaEstimator, JavaWrapper
from pyspark.sql.functions import col, lit, rand, UserDefinedFunction
from pyspark.sql.types import BooleanType
__all__ = ['ParamGridBuilder', 'CrossValidator', 'CrossValidatorModel', 'TrainValidationSplit',
'TrainValidationSplitModel', 'ParamRandomBuilder']
def _parallelFitTasks(est, train, eva, validation, epm, collectSubModel):
"""
Creates a list of callables which can be called from different threads to fit and evaluate
an estimator in parallel. Each callable returns an `(index, metric)` pair.
Parameters
----------
est : :py:class:`pyspark.ml.baseEstimator`
he estimator to be fit.
train : :py:class:`pyspark.sql.DataFrame`
DataFrame, training data set, used for fitting.
eva : :py:class:`pyspark.ml.evaluation.Evaluator`
used to compute `metric`
validation : :py:class:`pyspark.sql.DataFrame`
DataFrame, validation data set, used for evaluation.
epm : :py:class:`collections.abc.Sequence`
Sequence of ParamMap, params maps to be used during fitting & evaluation.
collectSubModel : bool
Whether to collect sub model.
Returns
-------
tuple
(int, float, subModel), an index into `epm` and the associated metric value.
"""
modelIter = est.fitMultiple(train, epm)
def singleTask():
index, model = next(modelIter)
# TODO: duplicate evaluator to take extra params from input
# Note: Supporting tuning params in evaluator need update method
# `MetaAlgorithmReadWrite.getAllNestedStages`, make it return
# all nested stages and evaluators
metric = eva.evaluate(model.transform(validation, epm[index]))
return index, metric, model if collectSubModel else None
return [singleTask] * len(epm)
class ParamGridBuilder(object):
r"""
Builder for a param grid used in grid search-based model selection.
.. versionadded:: 1.4.0
Examples
--------
>>> from pyspark.ml.classification import LogisticRegression
>>> lr = LogisticRegression()
>>> output = ParamGridBuilder() \
... .baseOn({lr.labelCol: 'l'}) \
... .baseOn([lr.predictionCol, 'p']) \
... .addGrid(lr.regParam, [1.0, 2.0]) \
... .addGrid(lr.maxIter, [1, 5]) \
... .build()
>>> expected = [
... {lr.regParam: 1.0, lr.maxIter: 1, lr.labelCol: 'l', lr.predictionCol: 'p'},
... {lr.regParam: 2.0, lr.maxIter: 1, lr.labelCol: 'l', lr.predictionCol: 'p'},
... {lr.regParam: 1.0, lr.maxIter: 5, lr.labelCol: 'l', lr.predictionCol: 'p'},
... {lr.regParam: 2.0, lr.maxIter: 5, lr.labelCol: 'l', lr.predictionCol: 'p'}]
>>> len(output) == len(expected)
True
>>> all([m in expected for m in output])
True
"""
def __init__(self):
self._param_grid = {}
@since("1.4.0")
def addGrid(self, param, values):
"""
Sets the given parameters in this grid to fixed values.
param must be an instance of Param associated with an instance of Params
(such as Estimator or Transformer).
"""
if isinstance(param, Param):
self._param_grid[param] = values
else:
raise TypeError("param must be an instance of Param")
return self
@since("1.4.0")
def baseOn(self, *args):
"""
Sets the given parameters in this grid to fixed values.
Accepts either a parameter dictionary or a list of (parameter, value) pairs.
"""
if isinstance(args[0], dict):
self.baseOn(*args[0].items())
else:
for (param, value) in args:
self.addGrid(param, [value])
return self
@since("1.4.0")
def build(self):
"""
Builds and returns all combinations of parameters specified
by the param grid.
"""
keys = self._param_grid.keys()
grid_values = self._param_grid.values()
def to_key_value_pairs(keys, values):
return [(key, key.typeConverter(value)) for key, value in zip(keys, values)]
return [dict(to_key_value_pairs(keys, prod)) for prod in itertools.product(*grid_values)]
class ParamRandomBuilder(ParamGridBuilder):
r"""
Builder for random value parameters used in search-based model selection.
.. versionadded:: 3.2.0
"""
@since("3.2.0")
def addRandom(self, param, x, y, n):
"""
Adds n random values between x and y.
The arguments x and y can be integers, floats or a combination of the two. If either
x or y is a float, the domain of the random value will be float.
"""
if type(x) == int and type(y) == int:
values = map(lambda _: random.randrange(x, y), range(n))
elif type(x) == float or type(y) == float:
values = map(lambda _: random.uniform(x, y), range(n))
else:
raise TypeError("unable to make range for types %s and %s" % type(x) % type(y))
self.addGrid(param, values)
return self
@since("3.2.0")
def addLog10Random(self, param, x, y, n):
"""
Adds n random values scaled logarithmically (base 10) between x and y.
For instance, a distribution for x=1.0, y=10000.0 and n=5 might reasonably look like
[1.6, 65.3, 221.9, 1024.3, 8997.5]
"""
def logarithmic_random():
rand = random.uniform(math.log10(x), math.log10(y))
value = 10 ** rand
if type(x) == int and type(y) == int:
value = int(value)
return value
values = map(lambda _: logarithmic_random(), range(n))
self.addGrid(param, values)
return self
class _ValidatorParams(HasSeed):
"""
Common params for TrainValidationSplit and CrossValidator.
"""
estimator = Param(Params._dummy(), "estimator", "estimator to be cross-validated")
estimatorParamMaps = Param(Params._dummy(), "estimatorParamMaps", "estimator param maps")
evaluator = Param(
Params._dummy(), "evaluator",
"evaluator used to select hyper-parameters that maximize the validator metric")
@since("2.0.0")
def getEstimator(self):
"""
Gets the value of estimator or its default value.
"""
return self.getOrDefault(self.estimator)
@since("2.0.0")
def getEstimatorParamMaps(self):
"""
Gets the value of estimatorParamMaps or its default value.
"""
return self.getOrDefault(self.estimatorParamMaps)
@since("2.0.0")
def getEvaluator(self):
"""
Gets the value of evaluator or its default value.
"""
return self.getOrDefault(self.evaluator)
@classmethod
def _from_java_impl(cls, java_stage):
"""
Return Python estimator, estimatorParamMaps, and evaluator from a Java ValidatorParams.
"""
# Load information from java_stage to the instance.
estimator = JavaParams._from_java(java_stage.getEstimator())
evaluator = JavaParams._from_java(java_stage.getEvaluator())
if isinstance(estimator, JavaEstimator):
epms = [estimator._transfer_param_map_from_java(epm)
for epm in java_stage.getEstimatorParamMaps()]
elif MetaAlgorithmReadWrite.isMetaEstimator(estimator):
# Meta estimator such as Pipeline, OneVsRest
epms = _ValidatorSharedReadWrite.meta_estimator_transfer_param_maps_from_java(
estimator, java_stage.getEstimatorParamMaps())
else:
raise ValueError('Unsupported estimator used in tuning: ' + str(estimator))
return estimator, epms, evaluator
def _to_java_impl(self):
"""
Return Java estimator, estimatorParamMaps, and evaluator from this Python instance.
"""
gateway = SparkContext._gateway
cls = SparkContext._jvm.org.apache.spark.ml.param.ParamMap
estimator = self.getEstimator()
if isinstance(estimator, JavaEstimator):
java_epms = gateway.new_array(cls, len(self.getEstimatorParamMaps()))
for idx, epm in enumerate(self.getEstimatorParamMaps()):
java_epms[idx] = self.getEstimator()._transfer_param_map_to_java(epm)
elif MetaAlgorithmReadWrite.isMetaEstimator(estimator):
# Meta estimator such as Pipeline, OneVsRest
java_epms = _ValidatorSharedReadWrite.meta_estimator_transfer_param_maps_to_java(
estimator, self.getEstimatorParamMaps())
else:
raise ValueError('Unsupported estimator used in tuning: ' + str(estimator))
java_estimator = self.getEstimator()._to_java()
java_evaluator = self.getEvaluator()._to_java()
return java_estimator, java_epms, java_evaluator
class _ValidatorSharedReadWrite:
@staticmethod
def meta_estimator_transfer_param_maps_to_java(pyEstimator, pyParamMaps):
pyStages = MetaAlgorithmReadWrite.getAllNestedStages(pyEstimator)
stagePairs = list(map(lambda stage: (stage, stage._to_java()), pyStages))
sc = SparkContext._active_spark_context
paramMapCls = SparkContext._jvm.org.apache.spark.ml.param.ParamMap
javaParamMaps = SparkContext._gateway.new_array(paramMapCls, len(pyParamMaps))
for idx, pyParamMap in enumerate(pyParamMaps):
javaParamMap = JavaWrapper._new_java_obj("org.apache.spark.ml.param.ParamMap")
for pyParam, pyValue in pyParamMap.items():
javaParam = None
for pyStage, javaStage in stagePairs:
if pyStage._testOwnParam(pyParam.parent, pyParam.name):
javaParam = javaStage.getParam(pyParam.name)
break
if javaParam is None:
raise ValueError('Resolve param in estimatorParamMaps failed: ' + str(pyParam))
if isinstance(pyValue, Params) and hasattr(pyValue, "_to_java"):
javaValue = pyValue._to_java()
else:
javaValue = _py2java(sc, pyValue)
pair = javaParam.w(javaValue)
javaParamMap.put([pair])
javaParamMaps[idx] = javaParamMap
return javaParamMaps
@staticmethod
def meta_estimator_transfer_param_maps_from_java(pyEstimator, javaParamMaps):
pyStages = MetaAlgorithmReadWrite.getAllNestedStages(pyEstimator)
stagePairs = list(map(lambda stage: (stage, stage._to_java()), pyStages))
sc = SparkContext._active_spark_context
pyParamMaps = []
for javaParamMap in javaParamMaps:
pyParamMap = dict()
for javaPair in javaParamMap.toList():
javaParam = javaPair.param()
pyParam = None
for pyStage, javaStage in stagePairs:
if pyStage._testOwnParam(javaParam.parent(), javaParam.name()):
pyParam = pyStage.getParam(javaParam.name())
if pyParam is None:
raise ValueError('Resolve param in estimatorParamMaps failed: ' +
javaParam.parent() + '.' + javaParam.name())
javaValue = javaPair.value()
if sc._jvm.Class.forName("org.apache.spark.ml.util.DefaultParamsWritable") \
.isInstance(javaValue):
pyValue = JavaParams._from_java(javaValue)
else:
pyValue = _java2py(sc, javaValue)
pyParamMap[pyParam] = pyValue
pyParamMaps.append(pyParamMap)
return pyParamMaps
@staticmethod
def is_java_convertible(instance):
allNestedStages = MetaAlgorithmReadWrite.getAllNestedStages(instance.getEstimator())
evaluator_convertible = isinstance(instance.getEvaluator(), JavaParams)
estimator_convertible = all(map(lambda stage: hasattr(stage, '_to_java'), allNestedStages))
return estimator_convertible and evaluator_convertible
@staticmethod
def saveImpl(path, instance, sc, extraMetadata=None):
numParamsNotJson = 0
jsonEstimatorParamMaps = []
for paramMap in instance.getEstimatorParamMaps():
jsonParamMap = []
for p, v in paramMap.items():
jsonParam = {'parent': p.parent, 'name': p.name}
if (isinstance(v, Estimator) and not MetaAlgorithmReadWrite.isMetaEstimator(v)) \
or isinstance(v, Transformer) or isinstance(v, Evaluator):
relative_path = f'epm_{p.name}{numParamsNotJson}'
param_path = os.path.join(path, relative_path)
numParamsNotJson += 1
v.save(param_path)
jsonParam['value'] = relative_path
jsonParam['isJson'] = False
elif isinstance(v, MLWritable):
raise RuntimeError(
"ValidatorSharedReadWrite.saveImpl does not handle parameters of type: "
"MLWritable that are not Estimaor/Evaluator/Transformer, and if parameter "
"is estimator, it cannot be meta estimator such as Validator or OneVsRest")
else:
jsonParam['value'] = v
jsonParam['isJson'] = True
jsonParamMap.append(jsonParam)
jsonEstimatorParamMaps.append(jsonParamMap)
skipParams = ['estimator', 'evaluator', 'estimatorParamMaps']
jsonParams = DefaultParamsWriter.extractJsonParams(instance, skipParams)
jsonParams['estimatorParamMaps'] = jsonEstimatorParamMaps
DefaultParamsWriter.saveMetadata(instance, path, sc, extraMetadata, jsonParams)
evaluatorPath = os.path.join(path, 'evaluator')
instance.getEvaluator().save(evaluatorPath)
estimatorPath = os.path.join(path, 'estimator')
instance.getEstimator().save(estimatorPath)
@staticmethod
def load(path, sc, metadata):
evaluatorPath = os.path.join(path, 'evaluator')
evaluator = DefaultParamsReader.loadParamsInstance(evaluatorPath, sc)
estimatorPath = os.path.join(path, 'estimator')
estimator = DefaultParamsReader.loadParamsInstance(estimatorPath, sc)
uidToParams = MetaAlgorithmReadWrite.getUidMap(estimator)
uidToParams[evaluator.uid] = evaluator
jsonEstimatorParamMaps = metadata['paramMap']['estimatorParamMaps']
estimatorParamMaps = []
for jsonParamMap in jsonEstimatorParamMaps:
paramMap = {}
for jsonParam in jsonParamMap:
est = uidToParams[jsonParam['parent']]
param = getattr(est, jsonParam['name'])
if 'isJson' not in jsonParam or ('isJson' in jsonParam and jsonParam['isJson']):
value = jsonParam['value']
else:
relativePath = jsonParam['value']
valueSavedPath = os.path.join(path, relativePath)
value = DefaultParamsReader.loadParamsInstance(valueSavedPath, sc)
paramMap[param] = value
estimatorParamMaps.append(paramMap)
return metadata, estimator, evaluator, estimatorParamMaps
@staticmethod
def validateParams(instance):
estiamtor = instance.getEstimator()
evaluator = instance.getEvaluator()
uidMap = MetaAlgorithmReadWrite.getUidMap(estiamtor)
for elem in [evaluator] + list(uidMap.values()):
if not isinstance(elem, MLWritable):
raise ValueError(f'Validator write will fail because it contains {elem.uid} '
f'which is not writable.')
estimatorParamMaps = instance.getEstimatorParamMaps()
paramErr = 'Validator save requires all Params in estimatorParamMaps to apply to ' \
f'its Estimator, An extraneous Param was found: '
for paramMap in estimatorParamMaps:
for param in paramMap:
if param.parent not in uidMap:
raise ValueError(paramErr + repr(param))
@staticmethod
def getValidatorModelWriterPersistSubModelsParam(writer):
if 'persistsubmodels' in writer.optionMap:
persistSubModelsParam = writer.optionMap['persistsubmodels'].lower()
if persistSubModelsParam == 'true':
return True
elif persistSubModelsParam == 'false':
return False
else:
raise ValueError(
f'persistSubModels option value {persistSubModelsParam} is invalid, '
f"the possible values are True, 'True' or False, 'False'")
else:
return writer.instance.subModels is not None
_save_with_persist_submodels_no_submodels_found_err = \
'When persisting tuning models, you can only set persistSubModels to true if the tuning ' \
'was done with collectSubModels set to true. To save the sub-models, try rerunning fitting ' \
'with collectSubModels set to true.'
@inherit_doc
class CrossValidatorReader(MLReader):
def __init__(self, cls):
super(CrossValidatorReader, self).__init__()
self.cls = cls
def load(self, path):
metadata = DefaultParamsReader.loadMetadata(path, self.sc)
if not DefaultParamsReader.isPythonParamsInstance(metadata):
return JavaMLReader(self.cls).load(path)
else:
metadata, estimator, evaluator, estimatorParamMaps = \
_ValidatorSharedReadWrite.load(path, self.sc, metadata)
cv = CrossValidator(estimator=estimator,
estimatorParamMaps=estimatorParamMaps,
evaluator=evaluator)
cv = cv._resetUid(metadata['uid'])
DefaultParamsReader.getAndSetParams(cv, metadata, skipParams=['estimatorParamMaps'])
return cv
@inherit_doc
class CrossValidatorWriter(MLWriter):
def __init__(self, instance):
super(CrossValidatorWriter, self).__init__()
self.instance = instance
def saveImpl(self, path):
_ValidatorSharedReadWrite.validateParams(self.instance)
_ValidatorSharedReadWrite.saveImpl(path, self.instance, self.sc)
@inherit_doc
class CrossValidatorModelReader(MLReader):
def __init__(self, cls):
super(CrossValidatorModelReader, self).__init__()
self.cls = cls
def load(self, path):
metadata = DefaultParamsReader.loadMetadata(path, self.sc)
if not DefaultParamsReader.isPythonParamsInstance(metadata):
return JavaMLReader(self.cls).load(path)
else:
metadata, estimator, evaluator, estimatorParamMaps = \
_ValidatorSharedReadWrite.load(path, self.sc, metadata)
numFolds = metadata['paramMap']['numFolds']
bestModelPath = os.path.join(path, 'bestModel')
bestModel = DefaultParamsReader.loadParamsInstance(bestModelPath, self.sc)
avgMetrics = metadata['avgMetrics']
persistSubModels = ('persistSubModels' in metadata) and metadata['persistSubModels']
if persistSubModels:
subModels = [[None] * len(estimatorParamMaps)] * numFolds
for splitIndex in range(numFolds):
for paramIndex in range(len(estimatorParamMaps)):
modelPath = os.path.join(
path, 'subModels', f'fold{splitIndex}', f'{paramIndex}')
subModels[splitIndex][paramIndex] = \
DefaultParamsReader.loadParamsInstance(modelPath, self.sc)
else:
subModels = None
cvModel = CrossValidatorModel(bestModel, avgMetrics=avgMetrics, subModels=subModels)
cvModel = cvModel._resetUid(metadata['uid'])
cvModel.set(cvModel.estimator, estimator)
cvModel.set(cvModel.estimatorParamMaps, estimatorParamMaps)
cvModel.set(cvModel.evaluator, evaluator)
DefaultParamsReader.getAndSetParams(
cvModel, metadata, skipParams=['estimatorParamMaps'])
return cvModel
@inherit_doc
class CrossValidatorModelWriter(MLWriter):
def __init__(self, instance):
super(CrossValidatorModelWriter, self).__init__()
self.instance = instance
def saveImpl(self, path):
_ValidatorSharedReadWrite.validateParams(self.instance)
instance = self.instance
persistSubModels = _ValidatorSharedReadWrite \
.getValidatorModelWriterPersistSubModelsParam(self)
extraMetadata = {'avgMetrics': instance.avgMetrics,
'persistSubModels': persistSubModels}
_ValidatorSharedReadWrite.saveImpl(path, instance, self.sc, extraMetadata=extraMetadata)
bestModelPath = os.path.join(path, 'bestModel')
instance.bestModel.save(bestModelPath)
if persistSubModels:
if instance.subModels is None:
raise ValueError(_save_with_persist_submodels_no_submodels_found_err)
subModelsPath = os.path.join(path, 'subModels')
for splitIndex in range(instance.getNumFolds()):
splitPath = os.path.join(subModelsPath, f'fold{splitIndex}')
for paramIndex in range(len(instance.getEstimatorParamMaps())):
modelPath = os.path.join(splitPath, f'{paramIndex}')
instance.subModels[splitIndex][paramIndex].save(modelPath)
class _CrossValidatorParams(_ValidatorParams):
"""
Params for :py:class:`CrossValidator` and :py:class:`CrossValidatorModel`.
.. versionadded:: 3.0.0
"""
numFolds = Param(Params._dummy(), "numFolds", "number of folds for cross validation",
typeConverter=TypeConverters.toInt)
foldCol = Param(Params._dummy(), "foldCol", "Param for the column name of user " +
"specified fold number. Once this is specified, :py:class:`CrossValidator` " +
"won't do random k-fold split. Note that this column should be integer type " +
"with range [0, numFolds) and Spark will throw exception on out-of-range " +
"fold numbers.", typeConverter=TypeConverters.toString)
def __init__(self, *args):
super(_CrossValidatorParams, self).__init__(*args)
self._setDefault(numFolds=3, foldCol="")
@since("1.4.0")
def getNumFolds(self):
"""
Gets the value of numFolds or its default value.
"""
return self.getOrDefault(self.numFolds)
@since("3.1.0")
def getFoldCol(self):
"""
Gets the value of foldCol or its default value.
"""
return self.getOrDefault(self.foldCol)
class CrossValidator(Estimator, _CrossValidatorParams, HasParallelism, HasCollectSubModels,
MLReadable, MLWritable):
"""
K-fold cross validation performs model selection by splitting the dataset into a set of
non-overlapping randomly partitioned folds which are used as separate training and test datasets
e.g., with k=3 folds, K-fold cross validation will generate 3 (training, test) dataset pairs,
each of which uses 2/3 of the data for training and 1/3 for testing. Each fold is used as the
test set exactly once.
.. versionadded:: 1.4.0
Examples
--------
>>> from pyspark.ml.classification import LogisticRegression
>>> from pyspark.ml.evaluation import BinaryClassificationEvaluator
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.ml.tuning import CrossValidator, ParamGridBuilder, CrossValidatorModel
>>> import tempfile
>>> dataset = spark.createDataFrame(
... [(Vectors.dense([0.0]), 0.0),
... (Vectors.dense([0.4]), 1.0),
... (Vectors.dense([0.5]), 0.0),
... (Vectors.dense([0.6]), 1.0),
... (Vectors.dense([1.0]), 1.0)] * 10,
... ["features", "label"])
>>> lr = LogisticRegression()
>>> grid = ParamGridBuilder().addGrid(lr.maxIter, [0, 1]).build()
>>> evaluator = BinaryClassificationEvaluator()
>>> cv = CrossValidator(estimator=lr, estimatorParamMaps=grid, evaluator=evaluator,
... parallelism=2)
>>> cvModel = cv.fit(dataset)
>>> cvModel.getNumFolds()
3
>>> cvModel.avgMetrics[0]
0.5
>>> path = tempfile.mkdtemp()
>>> model_path = path + "/model"
>>> cvModel.write().save(model_path)
>>> cvModelRead = CrossValidatorModel.read().load(model_path)
>>> cvModelRead.avgMetrics
[0.5, ...
>>> evaluator.evaluate(cvModel.transform(dataset))
0.8333...
>>> evaluator.evaluate(cvModelRead.transform(dataset))
0.8333...
"""
@keyword_only
def __init__(self, *, estimator=None, estimatorParamMaps=None, evaluator=None, numFolds=3,
seed=None, parallelism=1, collectSubModels=False, foldCol=""):
"""
__init__(self, \\*, estimator=None, estimatorParamMaps=None, evaluator=None, numFolds=3,\
seed=None, parallelism=1, collectSubModels=False, foldCol="")
"""
super(CrossValidator, self).__init__()
self._setDefault(parallelism=1)
kwargs = self._input_kwargs
self._set(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, *, estimator=None, estimatorParamMaps=None, evaluator=None, numFolds=3,
seed=None, parallelism=1, collectSubModels=False, foldCol=""):
"""
setParams(self, \\*, estimator=None, estimatorParamMaps=None, evaluator=None, numFolds=3,\
seed=None, parallelism=1, collectSubModels=False, foldCol=""):
Sets params for cross validator.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setEstimator(self, value):
"""
Sets the value of :py:attr:`estimator`.
"""
return self._set(estimator=value)
@since("2.0.0")
def setEstimatorParamMaps(self, value):
"""
Sets the value of :py:attr:`estimatorParamMaps`.
"""
return self._set(estimatorParamMaps=value)
@since("2.0.0")
def setEvaluator(self, value):
"""
Sets the value of :py:attr:`evaluator`.
"""
return self._set(evaluator=value)
@since("1.4.0")
def setNumFolds(self, value):
"""
Sets the value of :py:attr:`numFolds`.
"""
return self._set(numFolds=value)
@since("3.1.0")
def setFoldCol(self, value):
"""
Sets the value of :py:attr:`foldCol`.
"""
return self._set(foldCol=value)
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
def setParallelism(self, value):
"""
Sets the value of :py:attr:`parallelism`.
"""
return self._set(parallelism=value)
def setCollectSubModels(self, value):
"""
Sets the value of :py:attr:`collectSubModels`.
"""
return self._set(collectSubModels=value)
def _fit(self, dataset):
est = self.getOrDefault(self.estimator)
epm = self.getOrDefault(self.estimatorParamMaps)
numModels = len(epm)
eva = self.getOrDefault(self.evaluator)
nFolds = self.getOrDefault(self.numFolds)
metrics = [0.0] * numModels
pool = ThreadPool(processes=min(self.getParallelism(), numModels))
subModels = None
collectSubModelsParam = self.getCollectSubModels()
if collectSubModelsParam:
subModels = [[None for j in range(numModels)] for i in range(nFolds)]
datasets = self._kFold(dataset)
for i in range(nFolds):
validation = datasets[i][1].cache()
train = datasets[i][0].cache()
tasks = map(
inheritable_thread_target,
_parallelFitTasks(est, train, eva, validation, epm, collectSubModelsParam))
for j, metric, subModel in pool.imap_unordered(lambda f: f(), tasks):
metrics[j] += (metric / nFolds)
if collectSubModelsParam:
subModels[i][j] = subModel
validation.unpersist()
train.unpersist()
if eva.isLargerBetter():
bestIndex = np.argmax(metrics)
else:
bestIndex = np.argmin(metrics)
bestModel = est.fit(dataset, epm[bestIndex])
return self._copyValues(CrossValidatorModel(bestModel, metrics, subModels))
def _kFold(self, dataset):
nFolds = self.getOrDefault(self.numFolds)
foldCol = self.getOrDefault(self.foldCol)
datasets = []
if not foldCol:
# Do random k-fold split.
seed = self.getOrDefault(self.seed)
h = 1.0 / nFolds
randCol = self.uid + "_rand"
df = dataset.select("*", rand(seed).alias(randCol))
for i in range(nFolds):
validateLB = i * h
validateUB = (i + 1) * h
condition = (df[randCol] >= validateLB) & (df[randCol] < validateUB)
validation = df.filter(condition)
train = df.filter(~condition)
datasets.append((train, validation))
else:
# Use user-specified fold numbers.
def checker(foldNum):
if foldNum < 0 or foldNum >= nFolds:
raise ValueError(
"Fold number must be in range [0, %s), but got %s." % (nFolds, foldNum))
return True
checker_udf = UserDefinedFunction(checker, BooleanType())
for i in range(nFolds):
training = dataset.filter(checker_udf(dataset[foldCol]) & (col(foldCol) != lit(i)))
validation = dataset.filter(
checker_udf(dataset[foldCol]) & (col(foldCol) == lit(i)))
if training.rdd.getNumPartitions() == 0 or len(training.take(1)) == 0:
raise ValueError("The training data at fold %s is empty." % i)
if validation.rdd.getNumPartitions() == 0 or len(validation.take(1)) == 0:
raise ValueError("The validation data at fold %s is empty." % i)
datasets.append((training, validation))
return datasets
def copy(self, extra=None):
"""
Creates a copy of this instance with a randomly generated uid
and some extra params. This copies creates a deep copy of
the embedded paramMap, and copies the embedded and extra parameters over.
.. versionadded:: 1.4.0
Parameters
----------
extra : dict, optional
Extra parameters to copy to the new instance
Returns
-------
:py:class:`CrossValidator`
Copy of this instance
"""
if extra is None:
extra = dict()
newCV = Params.copy(self, extra)
if self.isSet(self.estimator):
newCV.setEstimator(self.getEstimator().copy(extra))
# estimatorParamMaps remain the same
if self.isSet(self.evaluator):
newCV.setEvaluator(self.getEvaluator().copy(extra))
return newCV
@since("2.3.0")
def write(self):
"""Returns an MLWriter instance for this ML instance."""
if _ValidatorSharedReadWrite.is_java_convertible(self):
return JavaMLWriter(self)
return CrossValidatorWriter(self)
@classmethod
@since("2.3.0")
def read(cls):
"""Returns an MLReader instance for this class."""
return CrossValidatorReader(cls)
@classmethod
def _from_java(cls, java_stage):
"""
Given a Java CrossValidator, create and return a Python wrapper of it.
Used for ML persistence.
"""
estimator, epms, evaluator = super(CrossValidator, cls)._from_java_impl(java_stage)
numFolds = java_stage.getNumFolds()
seed = java_stage.getSeed()
parallelism = java_stage.getParallelism()
collectSubModels = java_stage.getCollectSubModels()
foldCol = java_stage.getFoldCol()
# Create a new instance of this stage.
py_stage = cls(estimator=estimator, estimatorParamMaps=epms, evaluator=evaluator,
numFolds=numFolds, seed=seed, parallelism=parallelism,
collectSubModels=collectSubModels, foldCol=foldCol)
py_stage._resetUid(java_stage.uid())
return py_stage
def _to_java(self):
"""
Transfer this instance to a Java CrossValidator. Used for ML persistence.
Returns
-------
py4j.java_gateway.JavaObject
Java object equivalent to this instance.
"""
estimator, epms, evaluator = super(CrossValidator, self)._to_java_impl()
_java_obj = JavaParams._new_java_obj("org.apache.spark.ml.tuning.CrossValidator", self.uid)
_java_obj.setEstimatorParamMaps(epms)
_java_obj.setEvaluator(evaluator)
_java_obj.setEstimator(estimator)
_java_obj.setSeed(self.getSeed())
_java_obj.setNumFolds(self.getNumFolds())
_java_obj.setParallelism(self.getParallelism())
_java_obj.setCollectSubModels(self.getCollectSubModels())
_java_obj.setFoldCol(self.getFoldCol())
return _java_obj
class CrossValidatorModel(Model, _CrossValidatorParams, MLReadable, MLWritable):
"""
CrossValidatorModel contains the model with the highest average cross-validation
metric across folds and uses this model to transform input data. CrossValidatorModel
also tracks the metrics for each param map evaluated.
.. versionadded:: 1.4.0
"""
def __init__(self, bestModel, avgMetrics=None, subModels=None):
super(CrossValidatorModel, self).__init__()
#: best model from cross validation
self.bestModel = bestModel
#: Average cross-validation metrics for each paramMap in
#: CrossValidator.estimatorParamMaps, in the corresponding order.
self.avgMetrics = avgMetrics or []
#: sub model list from cross validation
self.subModels = subModels
def _transform(self, dataset):
return self.bestModel.transform(dataset)
def copy(self, extra=None):
"""
Creates a copy of this instance with a randomly generated uid
and some extra params. This copies the underlying bestModel,
creates a deep copy of the embedded paramMap, and
copies the embedded and extra parameters over.
It does not copy the extra Params into the subModels.
.. versionadded:: 1.4.0
Parameters
----------
extra : dict, optional
Extra parameters to copy to the new instance
Returns
-------
:py:class:`CrossValidatorModel`
Copy of this instance
"""
if extra is None:
extra = dict()
bestModel = self.bestModel.copy(extra)
avgMetrics = list(self.avgMetrics)
subModels = [
[sub_model.copy() for sub_model in fold_sub_models]
for fold_sub_models in self.subModels
]
return self._copyValues(CrossValidatorModel(bestModel, avgMetrics, subModels), extra=extra)
@since("2.3.0")
def write(self):
"""Returns an MLWriter instance for this ML instance."""
if _ValidatorSharedReadWrite.is_java_convertible(self):
return JavaMLWriter(self)
return CrossValidatorModelWriter(self)
@classmethod
@since("2.3.0")
def read(cls):
"""Returns an MLReader instance for this class."""
return CrossValidatorModelReader(cls)
@classmethod
def _from_java(cls, java_stage):
"""
Given a Java CrossValidatorModel, create and return a Python wrapper of it.
Used for ML persistence.
"""
sc = SparkContext._active_spark_context
bestModel = JavaParams._from_java(java_stage.bestModel())
avgMetrics = _java2py(sc, java_stage.avgMetrics())
estimator, epms, evaluator = super(CrossValidatorModel, cls)._from_java_impl(java_stage)
py_stage = cls(bestModel=bestModel, avgMetrics=avgMetrics)
params = {
"evaluator": evaluator,
"estimator": estimator,
"estimatorParamMaps": epms,
"numFolds": java_stage.getNumFolds(),
"foldCol": java_stage.getFoldCol(),
"seed": java_stage.getSeed(),
}
for param_name, param_val in params.items():
py_stage = py_stage._set(**{param_name: param_val})
if java_stage.hasSubModels():
py_stage.subModels = [[JavaParams._from_java(sub_model)
for sub_model in fold_sub_models]
for fold_sub_models in java_stage.subModels()]
py_stage._resetUid(java_stage.uid())
return py_stage
def _to_java(self):
"""
Transfer this instance to a Java CrossValidatorModel. Used for ML persistence.
Returns
-------
py4j.java_gateway.JavaObject
Java object equivalent to this instance.
"""
sc = SparkContext._active_spark_context
_java_obj = JavaParams._new_java_obj("org.apache.spark.ml.tuning.CrossValidatorModel",
self.uid,
self.bestModel._to_java(),
_py2java(sc, self.avgMetrics))
estimator, epms, evaluator = super(CrossValidatorModel, self)._to_java_impl()
params = {
"evaluator": evaluator,
"estimator": estimator,
"estimatorParamMaps": epms,
"numFolds": self.getNumFolds(),
"foldCol": self.getFoldCol(),
"seed": self.getSeed(),
}
for param_name, param_val in params.items():
java_param = _java_obj.getParam(param_name)
pair = java_param.w(param_val)
_java_obj.set(pair)
if self.subModels is not None:
java_sub_models = [[sub_model._to_java() for sub_model in fold_sub_models]
for fold_sub_models in self.subModels]
_java_obj.setSubModels(java_sub_models)
return _java_obj
@inherit_doc
class TrainValidationSplitReader(MLReader):
def __init__(self, cls):
super(TrainValidationSplitReader, self).__init__()
self.cls = cls
def load(self, path):
metadata = DefaultParamsReader.loadMetadata(path, self.sc)
if not DefaultParamsReader.isPythonParamsInstance(metadata):
return JavaMLReader(self.cls).load(path)
else:
metadata, estimator, evaluator, estimatorParamMaps = \
_ValidatorSharedReadWrite.load(path, self.sc, metadata)
tvs = TrainValidationSplit(estimator=estimator,
estimatorParamMaps=estimatorParamMaps,
evaluator=evaluator)
tvs = tvs._resetUid(metadata['uid'])
DefaultParamsReader.getAndSetParams(tvs, metadata, skipParams=['estimatorParamMaps'])
return tvs
@inherit_doc
class TrainValidationSplitWriter(MLWriter):
def __init__(self, instance):
super(TrainValidationSplitWriter, self).__init__()
self.instance = instance
def saveImpl(self, path):
_ValidatorSharedReadWrite.validateParams(self.instance)
_ValidatorSharedReadWrite.saveImpl(path, self.instance, self.sc)
@inherit_doc
class TrainValidationSplitModelReader(MLReader):
def __init__(self, cls):
super(TrainValidationSplitModelReader, self).__init__()
self.cls = cls
def load(self, path):
metadata = DefaultParamsReader.loadMetadata(path, self.sc)
if not DefaultParamsReader.isPythonParamsInstance(metadata):
return JavaMLReader(self.cls).load(path)
else:
metadata, estimator, evaluator, estimatorParamMaps = \
_ValidatorSharedReadWrite.load(path, self.sc, metadata)
bestModelPath = os.path.join(path, 'bestModel')
bestModel = DefaultParamsReader.loadParamsInstance(bestModelPath, self.sc)
validationMetrics = metadata['validationMetrics']
persistSubModels = ('persistSubModels' in metadata) and metadata['persistSubModels']
if persistSubModels:
subModels = [None] * len(estimatorParamMaps)
for paramIndex in range(len(estimatorParamMaps)):
modelPath = os.path.join(path, 'subModels', f'{paramIndex}')
subModels[paramIndex] = \
DefaultParamsReader.loadParamsInstance(modelPath, self.sc)
else:
subModels = None
tvsModel = TrainValidationSplitModel(
bestModel, validationMetrics=validationMetrics, subModels=subModels)
tvsModel = tvsModel._resetUid(metadata['uid'])
tvsModel.set(tvsModel.estimator, estimator)
tvsModel.set(tvsModel.estimatorParamMaps, estimatorParamMaps)
tvsModel.set(tvsModel.evaluator, evaluator)
DefaultParamsReader.getAndSetParams(
tvsModel, metadata, skipParams=['estimatorParamMaps'])
return tvsModel
@inherit_doc
class TrainValidationSplitModelWriter(MLWriter):
def __init__(self, instance):
super(TrainValidationSplitModelWriter, self).__init__()
self.instance = instance
def saveImpl(self, path):
_ValidatorSharedReadWrite.validateParams(self.instance)
instance = self.instance
persistSubModels = _ValidatorSharedReadWrite \
.getValidatorModelWriterPersistSubModelsParam(self)
extraMetadata = {'validationMetrics': instance.validationMetrics,
'persistSubModels': persistSubModels}
_ValidatorSharedReadWrite.saveImpl(path, instance, self.sc, extraMetadata=extraMetadata)
bestModelPath = os.path.join(path, 'bestModel')
instance.bestModel.save(bestModelPath)
if persistSubModels:
if instance.subModels is None:
raise ValueError(_save_with_persist_submodels_no_submodels_found_err)
subModelsPath = os.path.join(path, 'subModels')
for paramIndex in range(len(instance.getEstimatorParamMaps())):
modelPath = os.path.join(subModelsPath, f'{paramIndex}')
instance.subModels[paramIndex].save(modelPath)
class _TrainValidationSplitParams(_ValidatorParams):
"""
Params for :py:class:`TrainValidationSplit` and :py:class:`TrainValidationSplitModel`.
.. versionadded:: 3.0.0
"""
trainRatio = Param(Params._dummy(), "trainRatio", "Param for ratio between train and\
validation data. Must be between 0 and 1.", typeConverter=TypeConverters.toFloat)
def __init__(self, *args):
super(_TrainValidationSplitParams, self).__init__(*args)
self._setDefault(trainRatio=0.75)
@since("2.0.0")
def getTrainRatio(self):
"""
Gets the value of trainRatio or its default value.
"""
return self.getOrDefault(self.trainRatio)
class TrainValidationSplit(Estimator, _TrainValidationSplitParams, HasParallelism,
HasCollectSubModels, MLReadable, MLWritable):
"""
Validation for hyper-parameter tuning. Randomly splits the input dataset into train and
validation sets, and uses evaluation metric on the validation set to select the best model.
Similar to :class:`CrossValidator`, but only splits the set once.
.. versionadded:: 2.0.0
Examples
--------
>>> from pyspark.ml.classification import LogisticRegression
>>> from pyspark.ml.evaluation import BinaryClassificationEvaluator
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.ml.tuning import TrainValidationSplit, ParamGridBuilder
>>> from pyspark.ml.tuning import TrainValidationSplitModel
>>> import tempfile
>>> dataset = spark.createDataFrame(
... [(Vectors.dense([0.0]), 0.0),
... (Vectors.dense([0.4]), 1.0),
... (Vectors.dense([0.5]), 0.0),
... (Vectors.dense([0.6]), 1.0),
... (Vectors.dense([1.0]), 1.0)] * 10,
... ["features", "label"]).repartition(1)
>>> lr = LogisticRegression()
>>> grid = ParamGridBuilder().addGrid(lr.maxIter, [0, 1]).build()
>>> evaluator = BinaryClassificationEvaluator()
>>> tvs = TrainValidationSplit(estimator=lr, estimatorParamMaps=grid, evaluator=evaluator,
... parallelism=1, seed=42)
>>> tvsModel = tvs.fit(dataset)
>>> tvsModel.getTrainRatio()
0.75
>>> tvsModel.validationMetrics
[0.5, ...
>>> path = tempfile.mkdtemp()
>>> model_path = path + "/model"
>>> tvsModel.write().save(model_path)
>>> tvsModelRead = TrainValidationSplitModel.read().load(model_path)
>>> tvsModelRead.validationMetrics
[0.5, ...
>>> evaluator.evaluate(tvsModel.transform(dataset))
0.833...
>>> evaluator.evaluate(tvsModelRead.transform(dataset))
0.833...
"""
@keyword_only
def __init__(self, *, estimator=None, estimatorParamMaps=None, evaluator=None,
trainRatio=0.75, parallelism=1, collectSubModels=False, seed=None):
"""
__init__(self, \\*, estimator=None, estimatorParamMaps=None, evaluator=None, \
trainRatio=0.75, parallelism=1, collectSubModels=False, seed=None)
"""
super(TrainValidationSplit, self).__init__()
self._setDefault(parallelism=1)
kwargs = self._input_kwargs
self._set(**kwargs)
@since("2.0.0")
@keyword_only
def setParams(self, *, estimator=None, estimatorParamMaps=None, evaluator=None,
trainRatio=0.75, parallelism=1, collectSubModels=False, seed=None):
"""
setParams(self, \\*, estimator=None, estimatorParamMaps=None, evaluator=None, \
trainRatio=0.75, parallelism=1, collectSubModels=False, seed=None):
Sets params for the train validation split.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setEstimator(self, value):
"""
Sets the value of :py:attr:`estimator`.
"""
return self._set(estimator=value)
@since("2.0.0")
def setEstimatorParamMaps(self, value):
"""
Sets the value of :py:attr:`estimatorParamMaps`.
"""
return self._set(estimatorParamMaps=value)
@since("2.0.0")
def setEvaluator(self, value):
"""
Sets the value of :py:attr:`evaluator`.
"""
return self._set(evaluator=value)
@since("2.0.0")
def setTrainRatio(self, value):
"""
Sets the value of :py:attr:`trainRatio`.
"""
return self._set(trainRatio=value)
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
def setParallelism(self, value):
"""
Sets the value of :py:attr:`parallelism`.
"""
return self._set(parallelism=value)
def setCollectSubModels(self, value):
"""
Sets the value of :py:attr:`collectSubModels`.
"""
return self._set(collectSubModels=value)
def _fit(self, dataset):
est = self.getOrDefault(self.estimator)
epm = self.getOrDefault(self.estimatorParamMaps)
numModels = len(epm)
eva = self.getOrDefault(self.evaluator)
tRatio = self.getOrDefault(self.trainRatio)
seed = self.getOrDefault(self.seed)
randCol = self.uid + "_rand"
df = dataset.select("*", rand(seed).alias(randCol))
condition = (df[randCol] >= tRatio)
validation = df.filter(condition).cache()
train = df.filter(~condition).cache()
subModels = None
collectSubModelsParam = self.getCollectSubModels()
if collectSubModelsParam:
subModels = [None for i in range(numModels)]
tasks = map(
inheritable_thread_target,
_parallelFitTasks(est, train, eva, validation, epm, collectSubModelsParam))
pool = ThreadPool(processes=min(self.getParallelism(), numModels))
metrics = [None] * numModels
for j, metric, subModel in pool.imap_unordered(lambda f: f(), tasks):
metrics[j] = metric
if collectSubModelsParam:
subModels[j] = subModel
train.unpersist()
validation.unpersist()
if eva.isLargerBetter():
bestIndex = np.argmax(metrics)
else:
bestIndex = np.argmin(metrics)
bestModel = est.fit(dataset, epm[bestIndex])
return self._copyValues(TrainValidationSplitModel(bestModel, metrics, subModels))
def copy(self, extra=None):
"""
Creates a copy of this instance with a randomly generated uid
and some extra params. This copies creates a deep copy of
the embedded paramMap, and copies the embedded and extra parameters over.
.. versionadded:: 2.0.0
Parameters
----------
extra : dict, optional
Extra parameters to copy to the new instance
Returns
-------
:py:class:`TrainValidationSplit`
Copy of this instance
"""
if extra is None:
extra = dict()
newTVS = Params.copy(self, extra)
if self.isSet(self.estimator):
newTVS.setEstimator(self.getEstimator().copy(extra))
# estimatorParamMaps remain the same
if self.isSet(self.evaluator):
newTVS.setEvaluator(self.getEvaluator().copy(extra))
return newTVS
@since("2.3.0")
def write(self):
"""Returns an MLWriter instance for this ML instance."""
if _ValidatorSharedReadWrite.is_java_convertible(self):
return JavaMLWriter(self)
return TrainValidationSplitWriter(self)
@classmethod
@since("2.3.0")
def read(cls):
"""Returns an MLReader instance for this class."""
return TrainValidationSplitReader(cls)
@classmethod
def _from_java(cls, java_stage):
"""
Given a Java TrainValidationSplit, create and return a Python wrapper of it.
Used for ML persistence.
"""
estimator, epms, evaluator = super(TrainValidationSplit, cls)._from_java_impl(java_stage)
trainRatio = java_stage.getTrainRatio()
seed = java_stage.getSeed()
parallelism = java_stage.getParallelism()
collectSubModels = java_stage.getCollectSubModels()
# Create a new instance of this stage.
py_stage = cls(estimator=estimator, estimatorParamMaps=epms, evaluator=evaluator,
trainRatio=trainRatio, seed=seed, parallelism=parallelism,
collectSubModels=collectSubModels)
py_stage._resetUid(java_stage.uid())
return py_stage
def _to_java(self):
"""
Transfer this instance to a Java TrainValidationSplit. Used for ML persistence.
Returns
-------
py4j.java_gateway.JavaObject
Java object equivalent to this instance.
"""
estimator, epms, evaluator = super(TrainValidationSplit, self)._to_java_impl()
_java_obj = JavaParams._new_java_obj("org.apache.spark.ml.tuning.TrainValidationSplit",
self.uid)
_java_obj.setEstimatorParamMaps(epms)
_java_obj.setEvaluator(evaluator)
_java_obj.setEstimator(estimator)
_java_obj.setTrainRatio(self.getTrainRatio())
_java_obj.setSeed(self.getSeed())
_java_obj.setParallelism(self.getParallelism())
_java_obj.setCollectSubModels(self.getCollectSubModels())
return _java_obj
class TrainValidationSplitModel(Model, _TrainValidationSplitParams, MLReadable, MLWritable):
"""
Model from train validation split.
.. versionadded:: 2.0.0
"""
def __init__(self, bestModel, validationMetrics=None, subModels=None):
super(TrainValidationSplitModel, self).__init__()
#: best model from train validation split
self.bestModel = bestModel
#: evaluated validation metrics
self.validationMetrics = validationMetrics or []
#: sub models from train validation split
self.subModels = subModels
def _transform(self, dataset):
return self.bestModel.transform(dataset)
def copy(self, extra=None):
"""
Creates a copy of this instance with a randomly generated uid
and some extra params. This copies the underlying bestModel,
creates a deep copy of the embedded paramMap, and
copies the embedded and extra parameters over.
And, this creates a shallow copy of the validationMetrics.
It does not copy the extra Params into the subModels.
.. versionadded:: 2.0.0
Parameters
----------
extra : dict, optional
Extra parameters to copy to the new instance
Returns
-------
:py:class:`TrainValidationSplitModel`
Copy of this instance
"""
if extra is None:
extra = dict()
bestModel = self.bestModel.copy(extra)
validationMetrics = list(self.validationMetrics)
subModels = [model.copy() for model in self.subModels]
return self._copyValues(
TrainValidationSplitModel(bestModel, validationMetrics, subModels),
extra=extra
)
@since("2.3.0")
def write(self):
"""Returns an MLWriter instance for this ML instance."""
if _ValidatorSharedReadWrite.is_java_convertible(self):
return JavaMLWriter(self)
return TrainValidationSplitModelWriter(self)
@classmethod
@since("2.3.0")
def read(cls):
"""Returns an MLReader instance for this class."""
return TrainValidationSplitModelReader(cls)
@classmethod
def _from_java(cls, java_stage):
"""
Given a Java TrainValidationSplitModel, create and return a Python wrapper of it.
Used for ML persistence.
"""
# Load information from java_stage to the instance.
sc = SparkContext._active_spark_context
bestModel = JavaParams._from_java(java_stage.bestModel())
validationMetrics = _java2py(sc, java_stage.validationMetrics())
estimator, epms, evaluator = super(TrainValidationSplitModel,
cls)._from_java_impl(java_stage)
# Create a new instance of this stage.
py_stage = cls(bestModel=bestModel,
validationMetrics=validationMetrics)
params = {
"evaluator": evaluator,
"estimator": estimator,
"estimatorParamMaps": epms,
"trainRatio": java_stage.getTrainRatio(),
"seed": java_stage.getSeed(),
}
for param_name, param_val in params.items():
py_stage = py_stage._set(**{param_name: param_val})
if java_stage.hasSubModels():
py_stage.subModels = [JavaParams._from_java(sub_model)
for sub_model in java_stage.subModels()]
py_stage._resetUid(java_stage.uid())
return py_stage
def _to_java(self):
"""
Transfer this instance to a Java TrainValidationSplitModel. Used for ML persistence.
Returns
-------
py4j.java_gateway.JavaObject
Java object equivalent to this instance.
"""
sc = SparkContext._active_spark_context
_java_obj = JavaParams._new_java_obj(
"org.apache.spark.ml.tuning.TrainValidationSplitModel",
self.uid,
self.bestModel._to_java(),
_py2java(sc, self.validationMetrics))
estimator, epms, evaluator = super(TrainValidationSplitModel, self)._to_java_impl()
params = {
"evaluator": evaluator,
"estimator": estimator,
"estimatorParamMaps": epms,
"trainRatio": self.getTrainRatio(),
"seed": self.getSeed(),
}
for param_name, param_val in params.items():
java_param = _java_obj.getParam(param_name)
pair = java_param.w(param_val)
_java_obj.set(pair)
if self.subModels is not None:
java_sub_models = [sub_model._to_java() for sub_model in self.subModels]
_java_obj.setSubModels(java_sub_models)
return _java_obj
if __name__ == "__main__":
import doctest
from pyspark.sql import SparkSession
globs = globals().copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder\
.master("local[2]")\
.appName("ml.tuning tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
if failure_count:
sys.exit(-1)
|
devs1991/test_edx_docmode | refs/heads/master | venv/lib/python2.7/site-packages/celery/loaders/__init__.py | 8 | # -*- coding: utf-8 -*-
"""
celery.loaders
~~~~~~~~~~~~~~
Loaders define how configuration is read, what happens
when workers start, when tasks are executed and so on.
"""
from __future__ import absolute_import
from celery._state import current_app
from celery.utils import deprecated
from celery.utils.imports import symbol_by_name, import_from_cwd
__all__ = ['get_loader_cls']
LOADER_ALIASES = {'app': 'celery.loaders.app:AppLoader',
'default': 'celery.loaders.default:Loader',
'django': 'djcelery.loaders:DjangoLoader'}
def get_loader_cls(loader):
"""Get loader class by name/alias"""
return symbol_by_name(loader, LOADER_ALIASES, imp=import_from_cwd)
@deprecated(deprecation=2.5, removal=4.0,
alternative='celery.current_app.loader')
def current_loader():
return current_app.loader
@deprecated(deprecation=2.5, removal=4.0,
alternative='celery.current_app.conf')
def load_settings():
return current_app.conf
|
krzysztofwos/BitcoinUnlimited | refs/heads/dev | qa/rpc-tests/test_framework/util.py | 3 | #!/usr/bin/env python3
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Copyright (c) 2015-2017 The Bitcoin Unlimited developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Helpful routines for regression testing
#
import pdb
# Add python-bitcoinrpc to module search path:
import os
import sys
import math
import binascii
from binascii import hexlify, unhexlify
from base64 import b64encode
from decimal import Decimal, ROUND_DOWN
import decimal
import json
import random
import shutil
import subprocess
import time
import re
import urllib.parse as urlparse
import errno
from . import coverage
from .authproxy import AuthServiceProxy, JSONRPCException
COVERAGE_DIR = None
DEFAULT_TX_FEE_PER_BYTE = 50
PerfectFractions = True
BTC = 100000000
mBTC = 100000
uBTC = 100
#Set Mocktime default to OFF.
#MOCKTIME is only needed for scripts that use the
#cached version of the blockchain. If the cached
#version of the blockchain is used without MOCKTIME
#then the mempools will not sync due to IBD.
MOCKTIME = 0
def enable_mocktime():
#For backwared compatibility of the python scripts
#with previous versions of the cache, set MOCKTIME
#to Jan 1, 2014 + (201 * 10 * 60)
global MOCKTIME
MOCKTIME = 1388534400 + (201 * 10 * 60)
def disable_mocktime():
global MOCKTIME
MOCKTIME = 0
def get_mocktime():
return MOCKTIME
def enable_coverage(dirname):
"""Maintain a log of which RPC calls are made during testing."""
global COVERAGE_DIR
COVERAGE_DIR = dirname
def get_rpc_proxy(url, node_number, timeout=None):
"""
Args:
url (str): URL of the RPC server to call
node_number (int): the node number (or id) that this calls to
Kwargs:
timeout (int): HTTP timeout in seconds
Returns:
AuthServiceProxy. convenience object for making RPC calls.
"""
proxy_kwargs = {}
if timeout is not None:
proxy_kwargs['timeout'] = timeout
proxy = AuthServiceProxy(url, **proxy_kwargs)
proxy.url = url # store URL on proxy for info
coverage_logfile = coverage.get_filename(
COVERAGE_DIR, node_number) if COVERAGE_DIR else None
return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile)
def p2p_port(n):
#If port is already defined then return port
if os.getenv("node" + str(n)):
return int(os.getenv("node" + str(n)))
#If no port defined then find an available port
if n == 0:
port = 11000 + n + os.getpid()%990
else:
port = int(os.getenv("node" + str(n-1))) + 1
from subprocess import check_output
netStatOut = check_output(["netstat", "-n"])
for portInUse in re.findall(b"tcp.*?:(11\d\d\d)",netStatOut.lower()):
#print portInUse
if port == int(portInUse):
port += 1
os.environ["node" + str(n)] = str(port)
#print "port node " + str(n) + " is " + str(port)
return int(port)
def rpc_port(n):
#If port is already defined then return port
if os.getenv("rpcnode" + str(n)):
return int(os.getenv("rpcnode" + str(n)))
#If no port defined then find an available port
if n == 0:
port = 12000 + n + os.getpid()%990
else:
port = int(os.getenv("rpcnode" + str(n-1))) + 1
from subprocess import check_output
netStatOut = check_output(["netstat", "-n"])
for portInUse in re.findall(b"tcp.*?:(12\d\d\d)",netStatOut.lower()):
#print portInUse
if port == int(portInUse):
port += 1
os.environ["rpcnode" + str(n)] = str(port)
#print "port rpcnode " + str(n) + " is " + str(port)
return int(port)
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def count_bytes(hex_string):
return len(bytearray.fromhex(hex_string))
def bytes_to_hex_str(byte_str):
return hexlify(byte_str).decode('ascii')
def hex_str_to_bytes(hex_str):
return unhexlify(hex_str.encode('ascii'))
def str_to_b64str(string):
return b64encode(string.encode('utf-8')).decode('ascii')
def sync_blocks(rpc_connections, wait=1):
"""
Wait until everybody has the same block count
"""
while True:
counts = [ x.getblockcount() for x in rpc_connections ]
print(counts)
if counts == [ counts[0] ]*len(counts):
break
time.sleep(wait)
def sync_mempools(rpc_connections, wait=1):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while True:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
break
time.sleep(wait)
bitcoind_processes = {}
def initialize_datadir(dirname, n,bitcoinConfDict=None,wallet=None):
datadir = os.path.join(dirname, "node"+str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
defaults = {"server":1, "discover":0, "regtest":1,"rpcuser":"rt","rpcpassword":"rt",
"port":p2p_port(n),"rpcport":str(rpc_port(n)),"listenonion":0,"maxlimitertxfee":0}
if bitcoinConfDict: defaults.update(bitcoinConfDict)
with open(os.path.join(datadir, "bitcoin.conf"), 'w') as f:
for (key,val) in defaults.items():
if type(val) is type([]):
for v in val:
f.write("%s=%s\n" % (str(key), str(v)))
else:
f.write("%s=%s\n"% (str(key), str(val)))
if wallet:
regtestdir = os.path.join(datadir,"regtest")
if not os.path.isdir(regtestdir):
os.makedirs(regtestdir)
print(regtestdir, os.path.join(regtestdir, "wallet.dat"))
shutil.copyfile(wallet,os.path.join(regtestdir, "wallet.dat"))
return datadir
def rpc_url(i, rpchost=None):
return "http://rt:rt@%s:%d" % (rpchost or '127.0.0.1', rpc_port(i))
def wait_for_bitcoind_start(process, url, i):
'''
Wait for bitcoind to start. This means that RPC is accessible and fully initialized.
Raise an exception if bitcoind exits during initialization.
'''
while True:
if process.poll() is not None:
raise Exception('bitcoind exited with status %i during initialization' % process.returncode)
try:
rpc = get_rpc_proxy(url, i)
blocks = rpc.getblockcount()
break # break out of loop on success
except IOError as e:
if e.errno != errno.ECONNREFUSED: # Port not yet open?
raise # unknown IO error
except JSONRPCException as e: # Initialization phase
if e.error['code'] != -28: # RPC in warmup?
raise # unkown JSON RPC exception
time.sleep(0.25)
def initialize_chain(test_dir,bitcoinConfDict=None,wallets=None):
"""
Create (or copy from cache) a 200-block-long chain and
4 wallets.
"""
if (not os.path.isdir(os.path.join("cache","node0"))
or not os.path.isdir(os.path.join("cache","node1"))
or not os.path.isdir(os.path.join("cache","node2"))
or not os.path.isdir(os.path.join("cache","node3"))):
#find and delete old cache directories if any exist
for i in range(4):
if os.path.isdir(os.path.join("cache","node"+str(i))):
shutil.rmtree(os.path.join("cache","node"+str(i)))
# Create cache directories, run bitcoinds:
for i in range(4):
datadir=initialize_datadir("cache", i)
args = [ os.getenv("BITCOIND", "bitcoind"), "-keypool=1", "-datadir="+datadir ]
if i > 0:
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
bitcoind_processes[i] = subprocess.Popen(args)
if os.getenv("PYTHON_DEBUG", ""):
print("initialize_chain: bitcoind started, waiting for RPC to come up")
wait_for_bitcoind_start(bitcoind_processes[i], rpc_url(i), i)
if os.getenv("PYTHON_DEBUG", ""):
print("initialize_chain: RPC succesfully started")
rpcs = []
for i in range(4):
try:
rpcs.append(get_rpc_proxy(rpc_url(i), i))
except:
sys.stderr.write("Error connecting to "+url+"\n")
sys.exit(1)
# Create a 200-block-long chain; each of the 4 nodes
# gets 25 mature blocks and 25 immature.
# blocks are created with timestamps 10 minutes apart
# starting from 2010 minutes in the past
enable_mocktime()
block_time = get_mocktime() - (201 * 10 * 60)
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(rpcs, block_time)
rpcs[peer].generate(1)
block_time += 10*60
# Must sync before next peer starts generating blocks
sync_blocks(rpcs)
# Shut them down, and clean up cache directories:
stop_nodes(rpcs)
wait_bitcoinds()
disable_mocktime()
for i in range(4):
os.remove(log_filename("cache", i, "debug.log"))
os.remove(log_filename("cache", i, "db.log"))
os.remove(log_filename("cache", i, "peers.dat"))
os.remove(log_filename("cache", i, "fee_estimates.dat"))
for i in range(4):
from_dir = os.path.join("cache", "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i,bitcoinConfDict,wallets[i] if wallets else None) # Overwrite port/rpcport in bitcoin.conf
def initialize_chain_clean(test_dir, num_nodes, bitcoinConfDict=None, wallets=None):
"""
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization.
"""
for i in range(num_nodes):
datadir=initialize_datadir(test_dir, i, bitcoinConfDict, wallets)
def _rpchost_to_args(rpchost):
'''Convert optional IP:port spec to rpcconnect/rpcport args'''
if rpchost is None:
return []
match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost)
if not match:
raise ValueError('Invalid RPC host spec ' + rpchost)
rpcconnect = match.group(1)
rpcport = match.group(2)
if rpcconnect.startswith('['): # remove IPv6 [...] wrapping
rpcconnect = rpcconnect[1:-1]
rv = ['-rpcconnect=' + rpcconnect]
if rpcport:
rv += ['-rpcport=' + rpcport]
return rv
def start_node(i, dirname, extra_args=None, rpchost=None, timewait=None, binary=None):
"""
Start a bitcoind and return RPC connection to it
"""
datadir = os.path.join(dirname, "node"+str(i))
if binary is None:
binary = os.getenv("BITCOIND", "bitcoind")
# RPC tests still depend on free transactions
args = [ binary, "-datadir="+datadir, "-rest", "-mocktime="+str(get_mocktime()) ] # // BU removed, "-keypool=1","-blockprioritysize=50000" ]
if extra_args is not None: args.extend(extra_args)
bitcoind_processes[i] = subprocess.Popen(args)
if os.getenv("PYTHON_DEBUG", ""):
print("start_node: bitcoind started, waiting for RPC to come up")
url = rpc_url(i, rpchost)
wait_for_bitcoind_start(bitcoind_processes[i], url, i)
if os.getenv("PYTHON_DEBUG", ""):
print("start_node: RPC succesfully started")
proxy = get_rpc_proxy(url, i, timeout=timewait)
if COVERAGE_DIR:
coverage.write_all_rpc_commands(COVERAGE_DIR, proxy)
return proxy
def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None, binary=None,timewait=None):
"""
Start multiple bitcoinds, return RPC connections to them
"""
if extra_args is None: extra_args = [ None for i in range(num_nodes) ]
if binary is None: binary = [ None for i in range(num_nodes) ]
rpcs = []
try:
for i in range(num_nodes):
rpcs.append(start_node(i, dirname, extra_args[i], rpchost, binary=binary[i],timewait=timewait))
except: # If one node failed to start, stop the others
stop_nodes(rpcs)
raise
return rpcs
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
def stop_node(node, i):
node.stop()
bitcoind_processes[i].wait()
del bitcoind_processes[i]
def stop_nodes(nodes):
for node in nodes:
node.stop()
del nodes[:] # Emptying array closes connections as a side effect
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def wait_bitcoinds():
# Wait for all bitcoinds to cleanly exit
for bitcoind in bitcoind_processes.values():
bitcoind.wait()
bitcoind_processes.clear()
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def interconnect_nodes(nodes):
"""Connect every node in this list to every other node in the list"""
for frm in nodes:
for to in nodes:
if frm == to: continue
up = urlparse.urlparse(to.url)
ip_port = up.hostname + ":" + str(up.port-1000) # this is the RPC port but we want the p2p port so -1000
frm.addnode(ip_port, "onetry")
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >=0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } )
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out+fee
change = amount_in - amount
if change > amount*2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def send_zeropri_transaction(from_node, to_node, amount, fee):
"""
Create&broadcast a zero-priority transaction.
Returns (txid, hex-encoded-txdata)
Ensures transaction is zero-priority by first creating a send-to-self,
then using its output
"""
# Create a send-to-self with confirmed inputs:
self_address = from_node.getnewaddress()
(total_in, inputs) = gather_inputs(from_node, amount+fee*2)
outputs = make_change(from_node, total_in, amount+fee, fee)
outputs[self_address] = float(amount+fee)
self_rawtx = from_node.createrawtransaction(inputs, outputs)
self_signresult = from_node.signrawtransaction(self_rawtx)
self_txid = from_node.sendrawtransaction(self_signresult["hex"], True)
vout = find_output(from_node, self_txid, amount+fee)
# Now immediately spend the output to create a 1-input, 1-output
# zero-priority transaction:
inputs = [ { "txid" : self_txid, "vout" : vout } ]
outputs = { to_node.getnewaddress() : float(amount) }
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"])
def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random zero-priority transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee)
return (txid, txhex, fee)
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount+fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
def split_transaction(node, prevouts, toAddrs, txfeePer=DEFAULT_TX_FEE_PER_BYTE,**kwargs):
"""
Create a transaction that divides the sum of all the passed utxos into all the destination addresses
pass:
node: (node object) where to send the RPC calls
prevouts: a single UTXO description dictionary, or a list of them
toAddrs: a list of strings specifying the output addresses
"sendtx=False" if you don't want to transaction to be submitted.
Returns (transaction in hex, Vin list, Vout list)
"""
if type(prevouts) == type({}): prevouts = [prevouts] # If the user passes just one transaction then put a list around it
txid = None
inp = []
decContext = decimal.getcontext().prec
try: # try finally block to put the decimal precision back to what it was prior to this routine
decimal.getcontext().prec = 8 + 8 # 8 digits to get to 21million, and each bitcoin is 100 million satoshis
amount = Decimal(0)
iamount = 0
count = 0
for tx in prevouts:
inp.append({"txid":str(tx["txid"]),"vout":tx["vout"]})
amount += tx["amount"]*Decimal(BTC)
iamount += int(tx["amount"]*Decimal(BTC))
count += 1
assert(amount == iamount) # make sure Decimal and integer math is consistent
txLen = (len(prevouts)*100) + (len(toAddrs)*100) # Guess the tx Size
while 1:
outp = {}
if amount - Decimal(txfeePer*txLen) < 0: # fee too big, find something smaller
txfeePer = (float(amount)/txLen)/1.5
txfee = int(math.ceil(txfeePer*txLen))
amtPer = (Decimal(amount-txfee)/len(toAddrs)).to_integral_value()
# print "amount: ", amount, " amount per: ", amtPer, "from :", len(prevouts), "to: ", len(toAddrs), "tx fee: ", txfeePer, txfee
for a in toAddrs[0:-1]:
if PerfectFractions:
outp[str(a)] = str(amtPer/Decimal(BTC))
else:
outp[str(a)] = float(amtPer/BTC)
a = toAddrs[-1]
amtPer = (amount - ((len(toAddrs)-1)*amtPer)) - txfee
# print "final amt: ", amtPer
if PerfectFractions:
outp[str(a)] = str(amtPer/BTC)
else:
outp[str(a)] = float(amtPer/BTC)
totalOutputs = sum([Decimal(x) for x in outp.values()])
assert(totalOutputs < amount)
txn = node.createrawtransaction(inp, outp)
if kwargs.get("sendtx",True):
#print time.strftime('%X %x %Z')
try:
s = str(txn)
# print "tx len: ", len(binascii.unhexlify(s))
signedtxn = node.signrawtransaction(s)
txLen = len(binascii.unhexlify(signedtxn["hex"])) # Get the actual transaction size for better tx fee estimation the next time around
finally:
#print time.strftime('%X %x %Z')
pass
if signedtxn["complete"]:
try:
txid = node.sendrawtransaction(signedtxn["hex"],True) # In the unit tests, we'll just allow high fees
return (txn,inp,outp,txid)
except JSONRPCException as e:
tmp = e.error["message"]
(code, msg) = tmp.split(":")
if code == 64: raise # bad transaction
# print tmp
if e.error["code"] == -26: # insufficient priority
txfeePer = txfeePer * 2
print(str(e))
pdb.set_trace()
print("Insufficient priority, raising tx fee per byte to: ", txfeePer)
continue
else:
raise
else:
for err in signedtxn["errors"]:
print(err["error"])
else:
return (txn,inp,outp,txid)
finally:
decimal.getcontext().prec = decContext
def assert_not_equal(thing1, thing2):
if thing1 == thing2:
raise AssertionError("%s != %s"%(str(thing1),str(thing2)))
def assert_equal(thing1, thing2):
if thing1 != thing2:
raise AssertionError("%s != %s"%(str(thing1),str(thing2)))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s"%(str(thing1),str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
try:
fun(*args, **kwds)
except exc:
pass
except Exception as e:
raise AssertionError("Unexpected exception raised: "+type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_is_hex_string(string):
try:
int(string, 16)
except Exception as e:
raise AssertionError(
"Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
def assert_is_hash_string(string, length=64):
if not isinstance(string, str):
raise AssertionError("Expected a string, got type %r" % type(string))
elif length and len(string) != length:
raise AssertionError(
"String of length %d expected; got %d" % (length, len(string)))
elif not re.match('[abcdef0-9]+$', string):
raise AssertionError(
"String %r contains invalid characters for a hash." % string)
def assert_array_result(object_array, to_match, expected, should_not_find = False):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
If the should_not_find flag is true, to_match should not be found
in object_array
"""
if should_not_find == True:
assert_equal(expected, { })
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
elif should_not_find == True:
num_matched = num_matched+1
for key,value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
num_matched = num_matched+1
if num_matched == 0 and should_not_find != True:
raise AssertionError("No objects matched %s"%(str(to_match)))
if num_matched > 0 and should_not_find == True:
raise AssertionError("Objects were found %s"%(str(to_match)))
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
# Helper to create at least "count" utxos
# Pass in a fee that is sufficient for relay and mining new transactions.
def create_confirmed_utxos(fee, node, count):
node.generate(int(0.5*count)+101)
utxos = node.listunspent()
iterations = count - len(utxos)
addr1 = node.getnewaddress()
addr2 = node.getnewaddress()
if iterations <= 0:
return utxos
for i in range(iterations):
t = utxos.pop()
inputs = []
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr1] = satoshi_round(send_value/2)
outputs[addr2] = satoshi_round(send_value/2)
raw_tx = node.createrawtransaction(inputs, outputs)
signed_tx = node.signrawtransaction(raw_tx)["hex"]
txid = node.sendrawtransaction(signed_tx)
while (node.getmempoolinfo()['size'] > 0):
node.generate(1)
utxos = node.listunspent()
assert(len(utxos) >= count)
return utxos
# Create large OP_RETURN txouts that can be appended to a transaction
# to make it large (helper for constructing large transactions).
def gen_return_txouts():
# Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create
# So we have big transactions (and therefore can't fit very many into each block)
# create one script_pubkey
script_pubkey = "6a4d0200" #OP_RETURN OP_PUSH2 512 bytes
for i in range (512):
script_pubkey = script_pubkey + "01"
# concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change
txouts = "81"
for k in range(128):
# add txout value
txouts = txouts + "0000000000000000"
# add length of script_pubkey
txouts = txouts + "fd0402"
# add script_pubkey
txouts = txouts + script_pubkey
return txouts
def create_tx(node, coinbase, to_address, amount):
inputs = [{ "txid" : coinbase, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx)
assert_equal(signresult["complete"], True)
return signresult["hex"]
# Create a spend of each passed-in utxo, splicing in "txouts" to each raw
# transaction to make it large. See gen_return_txouts() above.
def create_lots_of_big_transactions(node, txouts, utxos, fee):
addr = node.getnewaddress()
txids = []
for i in range(len(utxos)):
t = utxos.pop()
inputs = []
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr] = satoshi_round(send_value)
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + txouts
newtx = newtx + rawtx[94:]
signresult = node.signrawtransaction(newtx, None, None, "NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
txids.append(txid)
return txids
def get_bip9_status(node, key):
info = node.getblockchaininfo()
return info['bip9_softforks'][key]
|
manishpatell/erpcustomizationssaiimpex123qwe | refs/heads/master | openerp/addons/test_new_api/tests/test_new_fields.py | 15 | #
# test cases for new-style fields
#
from datetime import date, datetime
from collections import defaultdict
from openerp.tests import common
from openerp.exceptions import except_orm
class TestNewFields(common.TransactionCase):
def test_00_basics(self):
""" test accessing new fields """
# find a discussion
discussion = self.env.ref('test_new_api.discussion_0')
# read field as a record attribute or as a record item
self.assertIsInstance(discussion.name, basestring)
self.assertIsInstance(discussion['name'], basestring)
self.assertEqual(discussion['name'], discussion.name)
# read it with method read()
values = discussion.read(['name'])[0]
self.assertEqual(values['name'], discussion.name)
def test_01_basic_get_assertion(self):
""" test item getter """
# field access works on single record
record = self.env.ref('test_new_api.message_0_0')
self.assertEqual(len(record), 1)
ok = record.body
# field access fails on multiple records
records = self.env['test_new_api.message'].search([])
assert len(records) > 1
with self.assertRaises(except_orm):
faulty = records.body
def test_01_basic_set_assertion(self):
""" test item setter """
# field assignment works on single record
record = self.env.ref('test_new_api.message_0_0')
self.assertEqual(len(record), 1)
record.body = 'OK'
# field assignment fails on multiple records
records = self.env['test_new_api.message'].search([])
assert len(records) > 1
with self.assertRaises(except_orm):
records.body = 'Faulty'
def test_10_computed(self):
""" check definition of computed fields """
# by default function fields are not stored and readonly
field = self.env['test_new_api.message']._fields['size']
self.assertFalse(field.store)
self.assertTrue(field.readonly)
field = self.env['test_new_api.message']._fields['name']
self.assertTrue(field.store)
self.assertTrue(field.readonly)
def test_10_non_stored(self):
""" test non-stored fields """
# find messages
for message in self.env['test_new_api.message'].search([]):
# check definition of field
self.assertEqual(message.size, len(message.body or ''))
# check recomputation after record is modified
size = message.size
message.write({'body': (message.body or '') + "!!!"})
self.assertEqual(message.size, size + 3)
# special case: computed field without dependency must be computed
record = self.env['test_new_api.mixed'].create({})
self.assertTrue(record.now)
def test_11_stored(self):
""" test stored fields """
# find the demo discussion
discussion = self.env.ref('test_new_api.discussion_0')
self.assertTrue(len(discussion.messages) > 0)
# check messages
name0 = discussion.name or ""
for message in discussion.messages:
self.assertEqual(message.name, "[%s] %s" % (name0, message.author.name))
# modify discussion name, and check again messages
discussion.name = name1 = 'Talking about stuff...'
for message in discussion.messages:
self.assertEqual(message.name, "[%s] %s" % (name1, message.author.name))
# switch message from discussion, and check again
name2 = 'Another discussion'
discussion2 = discussion.copy({'name': name2})
message2 = discussion.messages[0]
message2.discussion = discussion2
for message in discussion2.messages:
self.assertEqual(message.name, "[%s] %s" % (name2, message.author.name))
def test_12_recursive(self):
""" test recursively dependent fields """
Category = self.env['test_new_api.category']
abel = Category.create({'name': 'Abel'})
beth = Category.create({'name': 'Bethany'})
cath = Category.create({'name': 'Catherine'})
dean = Category.create({'name': 'Dean'})
ewan = Category.create({'name': 'Ewan'})
finn = Category.create({'name': 'Finnley'})
gabe = Category.create({'name': 'Gabriel'})
cath.parent = finn.parent = gabe
abel.parent = beth.parent = cath
dean.parent = ewan.parent = finn
self.assertEqual(abel.display_name, "Gabriel / Catherine / Abel")
self.assertEqual(beth.display_name, "Gabriel / Catherine / Bethany")
self.assertEqual(cath.display_name, "Gabriel / Catherine")
self.assertEqual(dean.display_name, "Gabriel / Finnley / Dean")
self.assertEqual(ewan.display_name, "Gabriel / Finnley / Ewan")
self.assertEqual(finn.display_name, "Gabriel / Finnley")
self.assertEqual(gabe.display_name, "Gabriel")
ewan.parent = cath
self.assertEqual(ewan.display_name, "Gabriel / Catherine / Ewan")
cath.parent = finn
self.assertEqual(ewan.display_name, "Gabriel / Finnley / Catherine / Ewan")
def test_12_cascade(self):
""" test computed field depending on computed field """
message = self.env.ref('test_new_api.message_0_0')
message.invalidate_cache()
double_size = message.double_size
self.assertEqual(double_size, message.size)
def test_13_inverse(self):
""" test inverse computation of fields """
Category = self.env['test_new_api.category']
abel = Category.create({'name': 'Abel'})
beth = Category.create({'name': 'Bethany'})
cath = Category.create({'name': 'Catherine'})
dean = Category.create({'name': 'Dean'})
ewan = Category.create({'name': 'Ewan'})
finn = Category.create({'name': 'Finnley'})
gabe = Category.create({'name': 'Gabriel'})
self.assertEqual(ewan.display_name, "Ewan")
ewan.display_name = "Abel / Bethany / Catherine / Erwan"
self.assertEqual(beth.parent, abel)
self.assertEqual(cath.parent, beth)
self.assertEqual(ewan.parent, cath)
self.assertEqual(ewan.name, "Erwan")
def test_14_search(self):
""" test search on computed fields """
discussion = self.env.ref('test_new_api.discussion_0')
# determine message sizes
sizes = set(message.size for message in discussion.messages)
# search for messages based on their size
for size in sizes:
messages0 = self.env['test_new_api.message'].search(
[('discussion', '=', discussion.id), ('size', '<=', size)])
messages1 = self.env['test_new_api.message'].browse()
for message in discussion.messages:
if message.size <= size:
messages1 += message
self.assertEqual(messages0, messages1)
def test_15_constraint(self):
""" test new-style Python constraints """
discussion = self.env.ref('test_new_api.discussion_0')
# remove oneself from discussion participants: we can no longer create
# messages in discussion
discussion.participants -= self.env.user
with self.assertRaises(Exception):
self.env['test_new_api.message'].create({'discussion': discussion.id, 'body': 'Whatever'})
# make sure that assertRaises() does not leave fields to recompute
self.assertFalse(self.env.has_todo())
# put back oneself into discussion participants: now we can create
# messages in discussion
discussion.participants += self.env.user
self.env['test_new_api.message'].create({'discussion': discussion.id, 'body': 'Whatever'})
def test_20_float(self):
""" test float fields """
record = self.env['test_new_api.mixed'].create({})
# assign value, and expect rounding
record.write({'number': 2.4999999999999996})
self.assertEqual(record.number, 2.50)
# same with field setter
record.number = 2.4999999999999996
self.assertEqual(record.number, 2.50)
def test_21_date(self):
""" test date fields """
record = self.env['test_new_api.mixed'].create({})
# one may assign False or None
record.date = None
self.assertFalse(record.date)
# one may assign date and datetime objects
record.date = date(2012, 05, 01)
self.assertEqual(record.date, '2012-05-01')
record.date = datetime(2012, 05, 01, 10, 45, 00)
self.assertEqual(record.date, '2012-05-01')
# one may assign dates in the default format, and it must be checked
record.date = '2012-05-01'
self.assertEqual(record.date, '2012-05-01')
with self.assertRaises(ValueError):
record.date = '12-5-1'
def test_22_selection(self):
""" test selection fields """
record = self.env['test_new_api.mixed'].create({})
# one may assign False or None
record.lang = None
self.assertFalse(record.lang)
# one may assign a value, and it must be checked
for language in self.env['res.lang'].search([]):
record.lang = language.code
with self.assertRaises(ValueError):
record.lang = 'zz_ZZ'
def test_23_relation(self):
""" test relation fields """
demo = self.env.ref('base.user_demo')
message = self.env.ref('test_new_api.message_0_0')
# check environment of record and related records
self.assertEqual(message.env, self.env)
self.assertEqual(message.discussion.env, self.env)
demo_env = self.env(user=demo)
self.assertNotEqual(demo_env, self.env)
# check environment of record and related records
self.assertEqual(message.env, self.env)
self.assertEqual(message.discussion.env, self.env)
# "migrate" message into demo_env, and check again
demo_message = message.sudo(demo)
self.assertEqual(demo_message.env, demo_env)
self.assertEqual(demo_message.discussion.env, demo_env)
# assign record's parent to a record in demo_env
message.discussion = message.discussion.copy({'name': 'Copy'})
# both message and its parent field must be in self.env
self.assertEqual(message.env, self.env)
self.assertEqual(message.discussion.env, self.env)
def test_24_reference(self):
""" test reference fields. """
record = self.env['test_new_api.mixed'].create({})
# one may assign False or None
record.reference = None
self.assertFalse(record.reference)
# one may assign a user or a partner...
record.reference = self.env.user
self.assertEqual(record.reference, self.env.user)
record.reference = self.env.user.partner_id
self.assertEqual(record.reference, self.env.user.partner_id)
# ... but no record from a model that starts with 'ir.'
with self.assertRaises(ValueError):
record.reference = self.env['ir.model'].search([], limit=1)
def test_25_related(self):
""" test related fields. """
message = self.env.ref('test_new_api.message_0_0')
discussion = message.discussion
# by default related fields are not stored
field = message._fields['discussion_name']
self.assertFalse(field.store)
self.assertTrue(field.readonly)
# check value of related field
self.assertEqual(message.discussion_name, discussion.name)
# change discussion name, and check result
discussion.name = 'Foo'
self.assertEqual(message.discussion_name, 'Foo')
# change discussion name via related field, and check result
message.discussion_name = 'Bar'
self.assertEqual(discussion.name, 'Bar')
self.assertEqual(message.discussion_name, 'Bar')
# search on related field, and check result
search_on_related = self.env['test_new_api.message'].search([('discussion_name', '=', 'Bar')])
search_on_regular = self.env['test_new_api.message'].search([('discussion.name', '=', 'Bar')])
self.assertEqual(search_on_related, search_on_regular)
# check that field attributes are copied
message_field = message.fields_get(['discussion_name'])['discussion_name']
discussion_field = discussion.fields_get(['name'])['name']
self.assertEqual(message_field['help'], discussion_field['help'])
def test_26_inherited(self):
""" test inherited fields. """
# a bunch of fields are inherited from res_partner
for user in self.env['res.users'].search([]):
partner = user.partner_id
for field in ('is_company', 'name', 'email', 'country_id'):
self.assertEqual(getattr(user, field), getattr(partner, field))
self.assertEqual(user[field], partner[field])
def test_30_read(self):
""" test computed fields as returned by read(). """
discussion = self.env.ref('test_new_api.discussion_0')
for message in discussion.messages:
display_name = message.display_name
size = message.size
data = message.read(['display_name', 'size'])[0]
self.assertEqual(data['display_name'], display_name)
self.assertEqual(data['size'], size)
def test_40_new(self):
""" test new records. """
discussion = self.env.ref('test_new_api.discussion_0')
# create a new message
message = self.env['test_new_api.message'].new()
self.assertFalse(message.id)
# assign some fields; should have no side effect
message.discussion = discussion
message.body = BODY = "May the Force be with you."
self.assertEqual(message.discussion, discussion)
self.assertEqual(message.body, BODY)
self.assertFalse(message.author)
self.assertNotIn(message, discussion.messages)
# check computed values of fields
self.assertEqual(message.name, "[%s] %s" % (discussion.name, ''))
self.assertEqual(message.size, len(BODY))
def test_41_defaults(self):
""" test default values. """
fields = ['discussion', 'body', 'author', 'size']
defaults = self.env['test_new_api.message'].default_get(fields)
self.assertEqual(defaults, {'author': self.env.uid})
defaults = self.env['test_new_api.mixed'].default_get(['number'])
self.assertEqual(defaults, {'number': 3.14})
class TestMagicFields(common.TransactionCase):
def test_write_date(self):
record = self.env['test_new_api.discussion'].create({'name': 'Booba'})
self.assertEqual(record.create_uid, self.env.user)
self.assertEqual(record.write_uid, self.env.user)
class TestInherits(common.TransactionCase):
def test_inherits(self):
""" Check that a many2one field with delegate=True adds an entry in _inherits """
Talk = self.env['test_new_api.talk']
self.assertEqual(Talk._inherits, {'test_new_api.discussion': 'parent'})
self.assertIn('name', Talk._fields)
self.assertEqual(Talk._fields['name'].related, ('parent', 'name'))
talk = Talk.create({'name': 'Foo'})
discussion = talk.parent
self.assertTrue(discussion)
self.assertEqual(talk._name, 'test_new_api.talk')
self.assertEqual(discussion._name, 'test_new_api.discussion')
self.assertEqual(talk.name, discussion.name)
|
andybab/Impala | refs/heads/master | tests/query_test/test_sort.py | 7 | #!/usr/bin/env python
# Copyright (c) 2012 Cloudera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import sys
import re
import random
from copy import copy
from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
from tests.common.test_vector import *
from tests.common.impala_test_suite import *
def transpose_results(result):
"""Given a query result (list of strings, each string represents a row), return a list
of columns, where each column is a list of strings."""
split_result = [row.split('\t') for row in result]
return [list(l) for l in zip(*split_result)]
class TestQueryFullSort(ImpalaTestSuite):
"""Test class to do functional validation of sorting when data is spilled to disk."""
@classmethod
def get_workload(self):
return 'tpch'
@classmethod
def add_test_dimensions(cls):
super(TestQueryFullSort, cls).add_test_dimensions()
if cls.exploration_strategy() == 'core':
cls.TestMatrix.add_constraint(lambda v:\
v.get_value('table_format').file_format == 'parquet')
def test_multiple_mem_limits(self, vector):
"""Exercise the dynamic memory scaling functionality."""
"""Using lineitem table forces the multi-phase sort with low mem_limit. This test
takes about a minute"""
query = """select l_comment, l_partkey, l_orderkey, l_suppkey, l_commitdate
from lineitem order by l_comment limit 100000"""
exec_option = vector.get_value('exec_option')
exec_option['disable_outermost_topn'] = 1
table_format = vector.get_value('table_format')
"""The first run should fit in memory, the 300m run is a 2-phase disk sort,
the 150m run is a multi-phase sort (i.e. with an intermediate merge)."""
for mem_limit in ['-1', '300m', '150m']:
exec_option['mem_limit'] = mem_limit
result = transpose_results(self.execute_query(
query, exec_option, table_format=table_format).data)
assert(result[0] == sorted(result[0]))
def test_sort_join(self, vector):
"""With 200m memory limit this should be a 2-phase sort"""
query = """select o1.o_orderdate, o2.o_custkey, o1.o_comment from orders o1 join
orders o2 on (o1.o_orderkey = o2.o_orderkey) order by o1.o_orderdate limit 100000"""
exec_option = vector.get_value('exec_option')
exec_option['disable_outermost_topn'] = 1
exec_option['mem_limit'] = "1200m"
table_format = vector.get_value('table_format')
result = transpose_results(self.execute_query(
query, exec_option, table_format=table_format).data)
assert(result[0] == sorted(result[0]))
def test_sort_union(self, vector):
pytest.xfail(reason="IMPALA-1346")
query = """select o_orderdate, o_custkey, o_comment from (select * from orders union
select * from orders union all select * from orders) as i
order by o_orderdate limit 100000"""
exec_option = vector.get_value('exec_option')
exec_option['disable_outermost_topn'] = 1
exec_option['mem_limit'] = "3000m"
table_format = vector.get_value('table_format')
result = transpose_results(self.execute_query(
query, exec_option, table_format=table_format).data)
assert(result[0] == sorted(result[0]))
|
mattiamaestrini/spotipy | refs/heads/master | examples/simple0.py | 10 | import spotipy
sp = spotipy.Spotify()
results = sp.search(q='weezer', limit=20)
for i, t in enumerate(results['tracks']['items']):
print(' ', i, t['name'])
|
defance/edx-platform | refs/heads/master | openedx/core/djangoapps/credit/tests/test_signature.py | 43 | # coding=utf-8
"""
Tests for digital signatures used to validate messages to/from credit providers.
"""
from django.test import TestCase
from django.test.utils import override_settings
from openedx.core.djangoapps.credit import signature
@override_settings(CREDIT_PROVIDER_SECRET_KEYS={
"asu": u'abcd1234'
})
class SignatureTest(TestCase):
"""
Tests for digital signatures.
"""
def test_unicode_secret_key(self):
# Test a key that has type `unicode` but consists of ASCII characters
# (This can happen, for example, when loading the key from a JSON configuration file)
# When retrieving the shared secret, the type should be converted to `str`
key = signature.get_shared_secret_key("asu")
sig = signature.signature({}, key)
self.assertEqual(sig, "7d70a26b834d9881cc14466eceac8d39188fc5ef5ffad9ab281a8327c2c0d093")
@override_settings(CREDIT_PROVIDER_SECRET_KEYS={
"asu": u'\u4567'
})
def test_non_ascii_unicode_secret_key(self):
# Test a key that contains non-ASCII unicode characters
# This should return `None` and log an error; the caller
# is then responsible for logging the appropriate errors
# so we can fix the misconfiguration.
key = signature.get_shared_secret_key("asu")
self.assertIs(key, None)
def test_unicode_data(self):
""" Verify the signature generation method supports Unicode data. """
key = signature.get_shared_secret_key("asu")
sig = signature.signature({'name': u'Ed Xavíer'}, key)
self.assertEqual(sig, "76b6c9a657000829253d7c23977b35b34ad750c5681b524d7fdfb25cd5273cec")
|
DigitalPandacoin/pandacoin | refs/heads/master | test/functional/test_framework/test_framework.py | 1 | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Base class for RPC testing."""
from enum import Enum
import logging
import optparse
import os
import pdb
import shutil
import sys
import tempfile
import time
from .authproxy import JSONRPCException
from . import coverage
from .test_node import TestNode
from .util import (
MAX_NODES,
PortSeed,
assert_equal,
check_json_precision,
connect_nodes_bi,
disconnect_nodes,
get_datadir_path,
initialize_datadir,
p2p_port,
set_node_times,
sync_blocks,
sync_mempools,
)
class TestStatus(Enum):
PASSED = 1
FAILED = 2
SKIPPED = 3
TEST_EXIT_PASSED = 0
TEST_EXIT_FAILED = 1
TEST_EXIT_SKIPPED = 77
class BitcoinTestFramework():
"""Base class for a bitcoin test script.
Individual bitcoin test scripts should subclass this class and override the set_test_params() and run_test() methods.
Individual tests can also override the following methods to customize the test setup:
- add_options()
- setup_chain()
- setup_network()
- setup_nodes()
The __init__() and main() methods should not be overridden.
This class also contains various public and private helper methods."""
def __init__(self):
"""Sets test framework defaults. Do not override this method. Instead, override the set_test_params() method"""
self.setup_clean_chain = False
self.nodes = []
self.mocktime = 0
self.supports_cli = False
self.set_test_params()
assert hasattr(self, "num_nodes"), "Test must set self.num_nodes in set_test_params()"
def main(self):
"""Main function. This should not be overridden by the subclass test scripts."""
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave pandacoinds and test.* datadir on exit or error")
parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop pandacoinds after the test execution")
parser.add_option("--srcdir", dest="srcdir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../../../src"),
help="Source directory containing pandacoind/pandacoin-cli (default: %default)")
parser.add_option("--cachedir", dest="cachedir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../../cache"),
help="Directory for caching pregenerated datadirs")
parser.add_option("--tmpdir", dest="tmpdir", help="Root directory for datadirs")
parser.add_option("-l", "--loglevel", dest="loglevel", default="INFO",
help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_option("--portseed", dest="port_seed", default=os.getpid(), type='int',
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_option("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
parser.add_option("--configfile", dest="configfile",
help="Location of the test framework config file")
parser.add_option("--pdbonfailure", dest="pdbonfailure", default=False, action="store_true",
help="Attach a python debugger if test fails")
parser.add_option("--usecli", dest="usecli", default=False, action="store_true",
help="use pandacoin-cli instead of RPC for all commands")
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
PortSeed.n = self.options.port_seed
os.environ['PATH'] = self.options.srcdir + ":" + self.options.srcdir + "/qt:" + os.environ['PATH']
check_json_precision()
self.options.cachedir = os.path.abspath(self.options.cachedir)
# Set up temp directory and start logging
if self.options.tmpdir:
self.options.tmpdir = os.path.abspath(self.options.tmpdir)
os.makedirs(self.options.tmpdir, exist_ok=False)
else:
self.options.tmpdir = tempfile.mkdtemp(prefix="test")
self._start_logging()
success = TestStatus.FAILED
try:
if self.options.usecli and not self.supports_cli:
raise SkipTest("--usecli specified but test does not support using CLI")
self.setup_chain()
self.setup_network()
self.run_test()
success = TestStatus.PASSED
except JSONRPCException as e:
self.log.exception("JSONRPC error")
except SkipTest as e:
self.log.warning("Test Skipped: %s" % e.message)
success = TestStatus.SKIPPED
except AssertionError as e:
self.log.exception("Assertion failed")
except KeyError as e:
self.log.exception("Key error")
except Exception as e:
self.log.exception("Unexpected exception caught during testing")
except KeyboardInterrupt as e:
self.log.warning("Exiting after keyboard interrupt")
if success == TestStatus.FAILED and self.options.pdbonfailure:
print("Testcase failed. Attaching python debugger. Enter ? for help")
pdb.set_trace()
if not self.options.noshutdown:
self.log.info("Stopping nodes")
if self.nodes:
self.stop_nodes()
else:
for node in self.nodes:
node.cleanup_on_exit = False
self.log.info("Note: pandacoinds were not stopped and may still be running")
if not self.options.nocleanup and not self.options.noshutdown and success != TestStatus.FAILED:
self.log.info("Cleaning up")
shutil.rmtree(self.options.tmpdir)
else:
self.log.warning("Not cleaning up dir %s" % self.options.tmpdir)
if success == TestStatus.PASSED:
self.log.info("Tests successful")
exit_code = TEST_EXIT_PASSED
elif success == TestStatus.SKIPPED:
self.log.info("Test skipped")
exit_code = TEST_EXIT_SKIPPED
else:
self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir)
self.log.error("Hint: Call {} '{}' | less to consolidate and view all logs".format(os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../combine_logs.py"), self.options.tmpdir))
exit_code = TEST_EXIT_FAILED
logging.shutdown()
sys.exit(exit_code)
# Methods to override in subclass test scripts.
def set_test_params(self):
"""Tests must this method to change default values for number of nodes, topology, etc"""
raise NotImplementedError
def add_options(self, parser):
"""Override this method to add command-line options to the test"""
pass
def setup_chain(self):
"""Override this method to customize blockchain setup"""
self.log.info("Initializing test directory " + self.options.tmpdir)
if self.setup_clean_chain:
self._initialize_chain_clean()
else:
self._initialize_chain()
def setup_network(self):
"""Override this method to customize test network topology"""
self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
for i in range(self.num_nodes - 1):
connect_nodes_bi(self.nodes, i, i + 1)
self.sync_all()
def setup_nodes(self):
"""Override this method to customize test node setup"""
extra_args = None
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.add_nodes(self.num_nodes, extra_args)
self.start_nodes()
def run_test(self):
"""Tests must override this method to define test logic"""
raise NotImplementedError
# Public helper methods. These can be accessed by the subclass test scripts.
def add_nodes(self, num_nodes, extra_args=None, rpchost=None, timewait=None, binary=None):
"""Instantiate TestNode objects"""
if extra_args is None:
extra_args = [[]] * num_nodes
if binary is None:
binary = [None] * num_nodes
assert_equal(len(extra_args), num_nodes)
assert_equal(len(binary), num_nodes)
for i in range(num_nodes):
self.nodes.append(TestNode(i, self.options.tmpdir, extra_args[i], rpchost, timewait=timewait, binary=binary[i], stderr=None, mocktime=self.mocktime, coverage_dir=self.options.coveragedir, use_cli=self.options.usecli))
def start_node(self, i, *args, **kwargs):
"""Start a pandacoind"""
node = self.nodes[i]
node.start(*args, **kwargs)
node.wait_for_rpc_connection()
if self.options.coveragedir is not None:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def start_nodes(self, extra_args=None, *args, **kwargs):
"""Start multiple pandacoinds"""
if extra_args is None:
extra_args = [None] * self.num_nodes
assert_equal(len(extra_args), self.num_nodes)
try:
for i, node in enumerate(self.nodes):
node.start(extra_args[i], *args, **kwargs)
for node in self.nodes:
node.wait_for_rpc_connection()
except:
# If one node failed to start, stop the others
self.stop_nodes()
raise
if self.options.coveragedir is not None:
for node in self.nodes:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def stop_node(self, i):
"""Stop a pandacoind test node"""
self.nodes[i].stop_node()
self.nodes[i].wait_until_stopped()
def stop_nodes(self):
"""Stop multiple pandacoind test nodes"""
for node in self.nodes:
# Issue RPC to stop nodes
node.stop_node()
for node in self.nodes:
# Wait for nodes to stop
node.wait_until_stopped()
def restart_node(self, i, extra_args=None):
"""Stop and start a test node"""
self.stop_node(i)
self.start_node(i, extra_args)
def assert_start_raises_init_error(self, i, extra_args=None, expected_msg=None, *args, **kwargs):
with tempfile.SpooledTemporaryFile(max_size=2**16) as log_stderr:
try:
self.start_node(i, extra_args, stderr=log_stderr, *args, **kwargs)
self.stop_node(i)
except Exception as e:
assert 'pandacoind exited' in str(e) # node must have shutdown
self.nodes[i].running = False
self.nodes[i].process = None
if expected_msg is not None:
log_stderr.seek(0)
stderr = log_stderr.read().decode('utf-8')
if expected_msg not in stderr:
raise AssertionError("Expected error \"" + expected_msg + "\" not found in:\n" + stderr)
else:
if expected_msg is None:
assert_msg = "pandacoind should have exited with an error"
else:
assert_msg = "pandacoind should have exited with expected error " + expected_msg
raise AssertionError(assert_msg)
def wait_for_node_exit(self, i, timeout):
self.nodes[i].process.wait(timeout)
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
self.sync_all([self.nodes[:2], self.nodes[2:]])
def join_network(self):
"""
Join the (previously split) network halves together.
"""
connect_nodes_bi(self.nodes, 1, 2)
self.sync_all()
def sync_all(self, node_groups=None):
if not node_groups:
node_groups = [self.nodes]
for group in node_groups:
sync_blocks(group)
sync_mempools(group)
def enable_mocktime(self):
"""Enable mocktime for the script.
mocktime may be needed for scripts that use the cached version of the
blockchain. If the cached version of the blockchain is used without
mocktime then the mempools will not sync due to IBD.
For backwared compatibility of the python scripts with previous
versions of the cache, this helper function sets mocktime to Jan 1,
2014 + (201 * 10 * 60)"""
self.mocktime = 1388534400 + (201 * 10 * 60)
def disable_mocktime(self):
self.mocktime = 0
# Private helper methods. These should not be accessed by the subclass test scripts.
def _start_logging(self):
# Add logger and logging handlers
self.log = logging.getLogger('TestFramework')
self.log.setLevel(logging.DEBUG)
# Create file handler to log all messages
fh = logging.FileHandler(self.options.tmpdir + '/test_framework.log')
fh.setLevel(logging.DEBUG)
# Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel.
ch = logging.StreamHandler(sys.stdout)
# User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int
ll = int(self.options.loglevel) if self.options.loglevel.isdigit() else self.options.loglevel.upper()
ch.setLevel(ll)
# Format logs the same as pandacoind's debug.log with microprecision (so log files can be concatenated and sorted)
formatter = logging.Formatter(fmt='%(asctime)s.%(msecs)03d000 %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
formatter.converter = time.gmtime
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
self.log.addHandler(fh)
self.log.addHandler(ch)
if self.options.trace_rpc:
rpc_logger = logging.getLogger("BitcoinRPC")
rpc_logger.setLevel(logging.DEBUG)
rpc_handler = logging.StreamHandler(sys.stdout)
rpc_handler.setLevel(logging.DEBUG)
rpc_logger.addHandler(rpc_handler)
def _initialize_chain(self):
"""Initialize a pre-mined blockchain for use by the test.
Create a cache of a 200-block-long chain (with wallet) for MAX_NODES
Afterward, create num_nodes copies from the cache."""
assert self.num_nodes <= MAX_NODES
create_cache = False
for i in range(MAX_NODES):
if not os.path.isdir(get_datadir_path(self.options.cachedir, i)):
create_cache = True
break
if create_cache:
self.log.debug("Creating data directories from cached datadir")
# find and delete old cache directories if any exist
for i in range(MAX_NODES):
if os.path.isdir(get_datadir_path(self.options.cachedir, i)):
shutil.rmtree(get_datadir_path(self.options.cachedir, i))
# Create cache directories, run pandacoinds:
for i in range(MAX_NODES):
datadir = initialize_datadir(self.options.cachedir, i)
args = [os.getenv("BITCOIND", "pandacoind"), "-server", "-keypool=1", "-datadir=" + datadir, "-discover=0"]
if i > 0:
args.append("-connect=127.0.0.1:" + str(p2p_port(0)))
self.nodes.append(TestNode(i, self.options.cachedir, extra_args=[], rpchost=None, timewait=None, binary=None, stderr=None, mocktime=self.mocktime, coverage_dir=None))
self.nodes[i].args = args
self.start_node(i)
# Wait for RPC connections to be ready
for node in self.nodes:
node.wait_for_rpc_connection()
# Create a 200-block-long chain; each of the 4 first nodes
# gets 25 mature blocks and 25 immature.
# Note: To preserve compatibility with older versions of
# initialize_chain, only 4 nodes will generate coins.
#
# blocks are created with timestamps 10 minutes apart
# starting from 2010 minutes in the past
self.enable_mocktime()
block_time = self.mocktime - (201 * 10 * 60)
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(self.nodes, block_time)
self.nodes[peer].generate(1)
block_time += 10 * 60
# Must sync before next peer starts generating blocks
sync_blocks(self.nodes)
# Shut them down, and clean up cache directories:
self.stop_nodes()
self.nodes = []
self.disable_mocktime()
def cache_path(n, *paths):
return os.path.join(get_datadir_path(self.options.cachedir, n), "regtest", *paths)
for i in range(MAX_NODES):
for entry in os.listdir(cache_path(i)):
if entry not in ['wallets', 'chainstate', 'blocks']:
os.remove(cache_path(i, entry))
for i in range(self.num_nodes):
from_dir = get_datadir_path(self.options.cachedir, i)
to_dir = get_datadir_path(self.options.tmpdir, i)
shutil.copytree(from_dir, to_dir)
initialize_datadir(self.options.tmpdir, i) # Overwrite port/rpcport in pandacoin.conf
def _initialize_chain_clean(self):
"""Initialize empty blockchain for use by the test.
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization."""
for i in range(self.num_nodes):
initialize_datadir(self.options.tmpdir, i)
class ComparisonTestFramework(BitcoinTestFramework):
"""Test framework for doing p2p comparison testing
Sets up some pandacoind binaries:
- 1 binary: test binary
- 2 binaries: 1 test binary, 1 ref binary
- n>2 binaries: 1 test binary, n-1 ref binaries"""
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("BITCOIND", "pandacoind"),
help="pandacoind binary to test")
parser.add_option("--refbinary", dest="refbinary",
default=os.getenv("BITCOIND", "pandacoind"),
help="pandacoind binary to use for reference nodes (if any)")
def setup_network(self):
extra_args = [['-whitelist=127.0.0.1']] * self.num_nodes
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.add_nodes(self.num_nodes, extra_args,
binary=[self.options.testbinary] +
[self.options.refbinary] * (self.num_nodes - 1))
self.start_nodes()
class SkipTest(Exception):
"""This exception is raised to skip a test"""
def __init__(self, message):
self.message = message
|
thaim/ansible | refs/heads/fix-broken-link | test/integration/targets/ansible-doc/library/test_docs.py | 64 | #!/usr/bin/python
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: test_docs
short_description: Test module
description:
- Test module
author:
- Ansible Core Team
'''
EXAMPLES = '''
'''
RETURN = '''
'''
from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(
argument_spec=dict(),
)
module.exit_json()
if __name__ == '__main__':
main()
|
381426068/MissionPlanner | refs/heads/master | Lib/ihooks.py | 59 | """Import hook support.
Consistent use of this module will make it possible to change the
different mechanisms involved in loading modules independently.
While the built-in module imp exports interfaces to the built-in
module searching and loading algorithm, and it is possible to replace
the built-in function __import__ in order to change the semantics of
the import statement, until now it has been difficult to combine the
effect of different __import__ hacks, like loading modules from URLs
by rimport.py, or restricted execution by rexec.py.
This module defines three new concepts:
1) A "file system hooks" class provides an interface to a filesystem.
One hooks class is defined (Hooks), which uses the interface provided
by standard modules os and os.path. It should be used as the base
class for other hooks classes.
2) A "module loader" class provides an interface to search for a
module in a search path and to load it. It defines a method which
searches for a module in a single directory; by overriding this method
one can redefine the details of the search. If the directory is None,
built-in and frozen modules are searched instead.
Two module loader class are defined, both implementing the search
strategy used by the built-in __import__ function: ModuleLoader uses
the imp module's find_module interface, while HookableModuleLoader
uses a file system hooks class to interact with the file system. Both
use the imp module's load_* interfaces to actually load the module.
3) A "module importer" class provides an interface to import a
module, as well as interfaces to reload and unload a module. It also
provides interfaces to install and uninstall itself instead of the
default __import__ and reload (and unload) functions.
One module importer class is defined (ModuleImporter), which uses a
module loader instance passed in (by default HookableModuleLoader is
instantiated).
The classes defined here should be used as base classes for extended
functionality along those lines.
If a module importer class supports dotted names, its import_module()
must return a different value depending on whether it is called on
behalf of a "from ... import ..." statement or not. (This is caused
by the way the __import__ hook is used by the Python interpreter.) It
would also do wise to install a different version of reload().
"""
from warnings import warnpy3k, warn
warnpy3k("the ihooks module has been removed in Python 3.0", stacklevel=2)
del warnpy3k
import __builtin__
import imp
import os
import sys
__all__ = ["BasicModuleLoader","Hooks","ModuleLoader","FancyModuleLoader",
"BasicModuleImporter","ModuleImporter","install","uninstall"]
VERBOSE = 0
from imp import C_EXTENSION, PY_SOURCE, PY_COMPILED
from imp import C_BUILTIN, PY_FROZEN, PKG_DIRECTORY
BUILTIN_MODULE = C_BUILTIN
FROZEN_MODULE = PY_FROZEN
class _Verbose:
def __init__(self, verbose = VERBOSE):
self.verbose = verbose
def get_verbose(self):
return self.verbose
def set_verbose(self, verbose):
self.verbose = verbose
# XXX The following is an experimental interface
def note(self, *args):
if self.verbose:
self.message(*args)
def message(self, format, *args):
if args:
print format%args
else:
print format
class BasicModuleLoader(_Verbose):
"""Basic module loader.
This provides the same functionality as built-in import. It
doesn't deal with checking sys.modules -- all it provides is
find_module() and a load_module(), as well as find_module_in_dir()
which searches just one directory, and can be overridden by a
derived class to change the module search algorithm when the basic
dependency on sys.path is unchanged.
The interface is a little more convenient than imp's:
find_module(name, [path]) returns None or 'stuff', and
load_module(name, stuff) loads the module.
"""
def find_module(self, name, path = None):
if path is None:
path = [None] + self.default_path()
for dir in path:
stuff = self.find_module_in_dir(name, dir)
if stuff: return stuff
return None
def default_path(self):
return sys.path
def find_module_in_dir(self, name, dir):
if dir is None:
return self.find_builtin_module(name)
else:
try:
return imp.find_module(name, [dir])
except ImportError:
return None
def find_builtin_module(self, name):
# XXX frozen packages?
if imp.is_builtin(name):
return None, '', ('', '', BUILTIN_MODULE)
if imp.is_frozen(name):
return None, '', ('', '', FROZEN_MODULE)
return None
def load_module(self, name, stuff):
file, filename, info = stuff
try:
return imp.load_module(name, file, filename, info)
finally:
if file: file.close()
class Hooks(_Verbose):
"""Hooks into the filesystem and interpreter.
By deriving a subclass you can redefine your filesystem interface,
e.g. to merge it with the URL space.
This base class behaves just like the native filesystem.
"""
# imp interface
def get_suffixes(self): return imp.get_suffixes()
def new_module(self, name): return imp.new_module(name)
def is_builtin(self, name): return imp.is_builtin(name)
def init_builtin(self, name): return imp.init_builtin(name)
def is_frozen(self, name): return imp.is_frozen(name)
def init_frozen(self, name): return imp.init_frozen(name)
def get_frozen_object(self, name): return imp.get_frozen_object(name)
def load_source(self, name, filename, file=None):
return imp.load_source(name, filename, file)
def load_compiled(self, name, filename, file=None):
return imp.load_compiled(name, filename, file)
def load_dynamic(self, name, filename, file=None):
return imp.load_dynamic(name, filename, file)
def load_package(self, name, filename, file=None):
return imp.load_module(name, file, filename, ("", "", PKG_DIRECTORY))
def add_module(self, name):
d = self.modules_dict()
if name in d: return d[name]
d[name] = m = self.new_module(name)
return m
# sys interface
def modules_dict(self): return sys.modules
def default_path(self): return sys.path
def path_split(self, x): return os.path.split(x)
def path_join(self, x, y): return os.path.join(x, y)
def path_isabs(self, x): return os.path.isabs(x)
# etc.
def path_exists(self, x): return os.path.exists(x)
def path_isdir(self, x): return os.path.isdir(x)
def path_isfile(self, x): return os.path.isfile(x)
def path_islink(self, x): return os.path.islink(x)
# etc.
def openfile(self, *x): return open(*x)
openfile_error = IOError
def listdir(self, x): return os.listdir(x)
listdir_error = os.error
# etc.
class ModuleLoader(BasicModuleLoader):
"""Default module loader; uses file system hooks.
By defining suitable hooks, you might be able to load modules from
other sources than the file system, e.g. from compressed or
encrypted files, tar files or (if you're brave!) URLs.
"""
def __init__(self, hooks = None, verbose = VERBOSE):
BasicModuleLoader.__init__(self, verbose)
self.hooks = hooks or Hooks(verbose)
def default_path(self):
return self.hooks.default_path()
def modules_dict(self):
return self.hooks.modules_dict()
def get_hooks(self):
return self.hooks
def set_hooks(self, hooks):
self.hooks = hooks
def find_builtin_module(self, name):
# XXX frozen packages?
if self.hooks.is_builtin(name):
return None, '', ('', '', BUILTIN_MODULE)
if self.hooks.is_frozen(name):
return None, '', ('', '', FROZEN_MODULE)
return None
def find_module_in_dir(self, name, dir, allow_packages=1):
if dir is None:
return self.find_builtin_module(name)
if allow_packages:
fullname = self.hooks.path_join(dir, name)
if self.hooks.path_isdir(fullname):
stuff = self.find_module_in_dir("__init__", fullname, 0)
if stuff:
file = stuff[0]
if file: file.close()
return None, fullname, ('', '', PKG_DIRECTORY)
for info in self.hooks.get_suffixes():
suff, mode, type = info
fullname = self.hooks.path_join(dir, name+suff)
try:
fp = self.hooks.openfile(fullname, mode)
return fp, fullname, info
except self.hooks.openfile_error:
pass
return None
def load_module(self, name, stuff):
file, filename, info = stuff
(suff, mode, type) = info
try:
if type == BUILTIN_MODULE:
return self.hooks.init_builtin(name)
if type == FROZEN_MODULE:
return self.hooks.init_frozen(name)
if type == C_EXTENSION:
m = self.hooks.load_dynamic(name, filename, file)
elif type == PY_SOURCE:
m = self.hooks.load_source(name, filename, file)
elif type == PY_COMPILED:
m = self.hooks.load_compiled(name, filename, file)
elif type == PKG_DIRECTORY:
m = self.hooks.load_package(name, filename, file)
else:
raise ImportError, "Unrecognized module type (%r) for %s" % \
(type, name)
finally:
if file: file.close()
m.__file__ = filename
return m
class FancyModuleLoader(ModuleLoader):
"""Fancy module loader -- parses and execs the code itself."""
def load_module(self, name, stuff):
file, filename, (suff, mode, type) = stuff
realfilename = filename
path = None
if type == PKG_DIRECTORY:
initstuff = self.find_module_in_dir("__init__", filename, 0)
if not initstuff:
raise ImportError, "No __init__ module in package %s" % name
initfile, initfilename, initinfo = initstuff
initsuff, initmode, inittype = initinfo
if inittype not in (PY_COMPILED, PY_SOURCE):
if initfile: initfile.close()
raise ImportError, \
"Bad type (%r) for __init__ module in package %s" % (
inittype, name)
path = [filename]
file = initfile
realfilename = initfilename
type = inittype
if type == FROZEN_MODULE:
code = self.hooks.get_frozen_object(name)
elif type == PY_COMPILED:
import marshal
file.seek(8)
code = marshal.load(file)
elif type == PY_SOURCE:
data = file.read()
code = compile(data, realfilename, 'exec')
else:
return ModuleLoader.load_module(self, name, stuff)
m = self.hooks.add_module(name)
if path:
m.__path__ = path
m.__file__ = filename
try:
exec code in m.__dict__
except:
d = self.hooks.modules_dict()
if name in d:
del d[name]
raise
return m
class BasicModuleImporter(_Verbose):
"""Basic module importer; uses module loader.
This provides basic import facilities but no package imports.
"""
def __init__(self, loader = None, verbose = VERBOSE):
_Verbose.__init__(self, verbose)
self.loader = loader or ModuleLoader(None, verbose)
self.modules = self.loader.modules_dict()
def get_loader(self):
return self.loader
def set_loader(self, loader):
self.loader = loader
def get_hooks(self):
return self.loader.get_hooks()
def set_hooks(self, hooks):
return self.loader.set_hooks(hooks)
def import_module(self, name, globals={}, locals={}, fromlist=[]):
name = str(name)
if name in self.modules:
return self.modules[name] # Fast path
stuff = self.loader.find_module(name)
if not stuff:
raise ImportError, "No module named %s" % name
return self.loader.load_module(name, stuff)
def reload(self, module, path = None):
name = str(module.__name__)
stuff = self.loader.find_module(name, path)
if not stuff:
raise ImportError, "Module %s not found for reload" % name
return self.loader.load_module(name, stuff)
def unload(self, module):
del self.modules[str(module.__name__)]
# XXX Should this try to clear the module's namespace?
def install(self):
self.save_import_module = __builtin__.__import__
self.save_reload = __builtin__.reload
if not hasattr(__builtin__, 'unload'):
__builtin__.unload = None
self.save_unload = __builtin__.unload
__builtin__.__import__ = self.import_module
__builtin__.reload = self.reload
__builtin__.unload = self.unload
def uninstall(self):
__builtin__.__import__ = self.save_import_module
__builtin__.reload = self.save_reload
__builtin__.unload = self.save_unload
if not __builtin__.unload:
del __builtin__.unload
class ModuleImporter(BasicModuleImporter):
"""A module importer that supports packages."""
def import_module(self, name, globals=None, locals=None, fromlist=None,
level=-1):
parent = self.determine_parent(globals, level)
q, tail = self.find_head_package(parent, str(name))
m = self.load_tail(q, tail)
if not fromlist:
return q
if hasattr(m, "__path__"):
self.ensure_fromlist(m, fromlist)
return m
def determine_parent(self, globals, level=-1):
if not globals or not level:
return None
pkgname = globals.get('__package__')
if pkgname is not None:
if not pkgname and level > 0:
raise ValueError, 'Attempted relative import in non-package'
else:
# __package__ not set, figure it out and set it
modname = globals.get('__name__')
if modname is None:
return None
if "__path__" in globals:
# __path__ is set so modname is already the package name
pkgname = modname
else:
# normal module, work out package name if any
if '.' not in modname:
if level > 0:
raise ValueError, ('Attempted relative import in '
'non-package')
globals['__package__'] = None
return None
pkgname = modname.rpartition('.')[0]
globals['__package__'] = pkgname
if level > 0:
dot = len(pkgname)
for x in range(level, 1, -1):
try:
dot = pkgname.rindex('.', 0, dot)
except ValueError:
raise ValueError('attempted relative import beyond '
'top-level package')
pkgname = pkgname[:dot]
try:
return sys.modules[pkgname]
except KeyError:
if level < 1:
warn("Parent module '%s' not found while handling "
"absolute import" % pkgname, RuntimeWarning, 1)
return None
else:
raise SystemError, ("Parent module '%s' not loaded, cannot "
"perform relative import" % pkgname)
def find_head_package(self, parent, name):
if '.' in name:
i = name.find('.')
head = name[:i]
tail = name[i+1:]
else:
head = name
tail = ""
if parent:
qname = "%s.%s" % (parent.__name__, head)
else:
qname = head
q = self.import_it(head, qname, parent)
if q: return q, tail
if parent:
qname = head
parent = None
q = self.import_it(head, qname, parent)
if q: return q, tail
raise ImportError, "No module named '%s'" % qname
def load_tail(self, q, tail):
m = q
while tail:
i = tail.find('.')
if i < 0: i = len(tail)
head, tail = tail[:i], tail[i+1:]
mname = "%s.%s" % (m.__name__, head)
m = self.import_it(head, mname, m)
if not m:
raise ImportError, "No module named '%s'" % mname
return m
def ensure_fromlist(self, m, fromlist, recursive=0):
for sub in fromlist:
if sub == "*":
if not recursive:
try:
all = m.__all__
except AttributeError:
pass
else:
self.ensure_fromlist(m, all, 1)
continue
if sub != "*" and not hasattr(m, sub):
subname = "%s.%s" % (m.__name__, sub)
submod = self.import_it(sub, subname, m)
if not submod:
raise ImportError, "No module named '%s'" % subname
def import_it(self, partname, fqname, parent, force_load=0):
if not partname:
# completely empty module name should only happen in
# 'from . import' or __import__("")
return parent
if not force_load:
try:
return self.modules[fqname]
except KeyError:
pass
try:
path = parent and parent.__path__
except AttributeError:
return None
partname = str(partname)
stuff = self.loader.find_module(partname, path)
if not stuff:
return None
fqname = str(fqname)
m = self.loader.load_module(fqname, stuff)
if parent:
setattr(parent, partname, m)
return m
def reload(self, module):
name = str(module.__name__)
if '.' not in name:
return self.import_it(name, name, None, force_load=1)
i = name.rfind('.')
pname = name[:i]
parent = self.modules[pname]
return self.import_it(name[i+1:], name, parent, force_load=1)
default_importer = None
current_importer = None
def install(importer = None):
global current_importer
current_importer = importer or default_importer or ModuleImporter()
current_importer.install()
def uninstall():
global current_importer
current_importer.uninstall()
|
AbhaySingh/pjproject | refs/heads/master | tests/pjsua/mod_sendto.py | 40 | # $Id$
import imp
import sys
import inc_sip as sip
import inc_const as const
import re
from inc_cfg import *
# Read configuration
cfg_file = imp.load_source("cfg_file", ARGS[1])
# Test body function
def test_func(t):
pjsua = t.process[0]
# Create dialog
dlg = sip.Dialog("127.0.0.1", pjsua.inst_param.sip_port,
tcp=cfg_file.sendto_cfg.use_tcp)
#dlg = sip.Dialog("127.0.0.1", 5060, tcp=cfg_file.sendto_cfg.use_tcp)
cfg = cfg_file.sendto_cfg
if len(cfg.complete_msg) != 0:
req = dlg.update_fields(cfg.complete_msg)
else:
req = dlg.create_invite(cfg.sdp, cfg.extra_headers, cfg.body)
resp = dlg.send_request_wait(req, 10)
if resp=="":
raise TestError("Timed-out waiting for response")
# Check response code
code = int(sip.get_code(resp))
if code != cfg.resp_code:
dlg.hangup(code)
raise TestError("Expecting code " + str(cfg.resp_code) +
" got " + str(code))
# Check for patterns that must exist
for p in cfg.resp_include:
if re.search(p, resp, re.M | re.I)==None:
dlg.hangup(code)
raise TestError("Pattern " + p + " not found")
# Check for patterns that must not exist
for p in cfg.resp_exclude:
if re.search(p, resp, re.M | re.I)!=None:
dlg.hangup(code)
raise TestError("Excluded pattern " + p + " found")
pjsua.sync_stdout()
dlg.hangup(code)
pjsua.sync_stdout()
# Here where it all comes together
test = TestParam(cfg_file.sendto_cfg.name,
[cfg_file.sendto_cfg.inst_param],
test_func)
|
csmm/multiase | refs/heads/master | multiasecalc/lammps/dynamics.py | 1 | from multiasecalc.lammps import unitconversion
from ase.optimize.optimize import Dynamics
from ase.io.trajectory import PickleTrajectory
from ase.md.logger import MDLogger
from ase import units
from random import random
import numpy as np
class LAMMPSOptimizer(Dynamics):
""" Geometry optimizer for LAMMPS. works only with LAMMPS calculators """
def __init__(self, atoms, restart=None, logfile=None, trajectory=None, algorithm='cg', relax_cell=False):
Dynamics.__init__(self, atoms, logfile, trajectory)
self.algorithm = algorithm
self.relax_cell = relax_cell
def run(self, fmax=0.001, steps=1e8):
self.atoms.calc.minimize(self.atoms, ftol=fmax, maxeval=steps, min_style=self.algorithm, relax_cell=self.relax_cell)
class LAMMPSMolecularDynamics(Dynamics):
""" Base class for molecular dynamics with LAMMPS. Requires a LAMMPS calculator. """
def __init__(self, atoms, timestep, integrator='verlet', trajectory=None,
traj_interval=1000, logfile=None, loginterval=100):
Dynamics.__init__(self, atoms, None, None)
self.dt = timestep
if integrator == 'verlet':
self.run_style = 'verlet'
else:
raise RuntimeError('Unknown integrator: %s' % thermostat)
if trajectory:
if isinstance(trajectory, str):
trajectory = PickleTrajectory(trajectory, 'w', atoms)
self.attach(trajectory, interval=traj_interval)
if logfile:
self.attach(MDLogger(dyn=self, atoms=atoms, logfile=logfile),
interval=loginterval)
self.fix = None
self.cell_relaxed = False
def run(self, steps=50, constraints=[]):
self.nsteps = 0
fix = 'all '+self.fix
calc = self.atoms.calc
it = self.run_iterator(steps)
calc.molecular_dynamics(self.atoms, self.dt, fix, it, self.cell_relaxed, steps, constraints)
def run_iterator(self, steps):
cur_step = 0
for target_step in range(steps+1):
for function, interval, args, kwargs in self.observers:
if target_step % interval == 0:
if target_step > cur_step:
yield target_step - cur_step
cur_step = target_step
function(*args, **kwargs)
if cur_step < steps:
yield steps - cur_step
def get_time(self):
return self.nsteps * self.dt
class LAMMPS_NVE(LAMMPSMolecularDynamics):
""" Microcanonical ensemble """
def __init__(self, atoms, timestep, **kwargs):
LAMMPSMolecularDynamics.__init__(self, atoms, timestep, **kwargs)
self.fix = 'nve'
class LAMMPS_NVT(LAMMPSMolecularDynamics):
""" Constant temperature calculations with Nose-Hoover or Berendsen """
def __init__(self, atoms, timestep, temperature, t_damp=100*units.fs,
thermostat='Nose-Hoover', ramp_to_temp=None, **kwargs):
LAMMPSMolecularDynamics.__init__(self, atoms, timestep, **kwargs)
if thermostat == 'Nose-Hoover':
cmd = 'nvt temp'
elif thermostat == 'Berendsen':
cmd = 'temp/berendsen'
else:
raise RuntimeError('Unknown thermostat: %s' % thermostat)
t_damp = atoms.calc.from_ase_units(t_damp, 'time')
if not ramp_to_temp: ramp_to_temp = temperature
self.fix = '%s %f %f %f' %(cmd, temperature, ramp_to_temp, t_damp)
class LAMMPS_NPT(LAMMPSMolecularDynamics):
""" Constant temperature and pressure calculations with Nose-Hoover """
def __init__(self, atoms, timestep, temperature, externalstress, isotropic=True, t_damp=100*units.fs, p_damp=1000*units.fs, ramp_to_temp=None, **kwargs):
LAMMPSMolecularDynamics.__init__(self, atoms, timestep, **kwargs)
pressure = atoms.calc.from_ase_units(externalstress, 'pressure')
t_damp = atoms.calc.from_ase_units(t_damp, 'time')
p_damp = atoms.calc.from_ase_units(p_damp, 'time')
if not ramp_to_temp: ramp_to_temp = temperature
if hasattr(pressure, 'shape'):
px, pxy, pxz = pressure[0,:]
py, pyz = pressure[1,1:]
pz = pressure[2,2]
p_diags = [px, py, pz]
args = ' '.join(['%s %f %f %f' % ('xyz'[i], p_diags[i], p_diags[i], p_damp) for i in range(3) if atoms.pbc[i]])
if atoms.pbc[0] and atoms.pbc[1]:
args += ' xy %f %f %f' % (pxy, pxy, p_damp)
if atoms.pbc[1] and atoms.pbc[2]:
args += ' yz %f %f %f' % (pyz, pyz, p_damp)
if atoms.pbc[1] and atoms.pbc[2]:
args += ' xz %f %f %f' % (pxz, pxz, p_damp)
else:
pvalues = '%f %f %f' % (pressure, pressure, p_damp)
if atoms.pbc.all():
if isotropic:
coupling = 'iso'
elif (np.dot(atoms.cell, atoms.cell) == atoms.cell**2).all():
# orthogonal cell
coupling = 'aniso'
else:
coupling = 'tri'
args = '%s %s' % (coupling, pvalues)
else:
args = ' '.join(['%s %s' % ('xyz'[i], pvalues) for i in range(3) if atoms.pbc[i]])
self.fix = 'npt temp %f %f %f %s' %(temperature, ramp_to_temp, t_damp, args)
self.cell_relaxed = True
class SimpleConstraint:
def __init__(self, indices):
self.indices = indices
def get_commands(self, atoms):
fix = self.get_fix(atoms)
id = '%s%s' % (self.__class__.__name__, abs(hash(tuple(self.indices))))
groupname = 'group%s' % id
fixname = 'fix%s' % id
cmds = []
indices_str = ' '.join([str(i+1) for i in self.indices])
cmds.append('group %s id %s' % (groupname, indices_str))
cmds.append('fix %s %s %s' % (fixname, groupname, fix))
return cmds
def get_fix(self, atoms):
raise NotImplementedError()
class Spring(SimpleConstraint):
def __init__(self, indices, point, spring_constant, R0=0.0):
SimpleConstraint.__init__(self, indices)
self.point = point
self.K = spring_constant
self.R0 = R0
def get_fix(self, atoms):
K = atoms.calc.from_ase_units(self.K, 'force')
x, y, z = atoms.calc.prism.vector_to_lammps(self.point)
return 'spring tether %f %f %f %f %f' % (K, x, y, z, self.R0)
class AddForce(SimpleConstraint):
def __init__(self, indices, total_force):
SimpleConstraint.__init__(self, indices)
self.total_force = total_force
def get_fix(self, atoms):
force = self.total_force / len(self.indices)
force = atoms.calc.prism.vector_to_lammps(force)
fx, fy, fz = atoms.calc.from_ase_units(force, 'force')
return 'addforce %f %f %f' % (fx, fy, fz)
class LJWall:
def __init__(self, face, epsilon, sigma, wall_offset=None, final_wall_offset=None, mixing='arithmetic'):
self.face = face
self.epsilon = epsilon
self.sigma = sigma
self.offset = wall_offset
self.final_offset = final_wall_offset
self.mixing = mixing
self.commands = []
self.ngroups = 0
self.nfixes = 0
self.id = '%s%s' % (self.__class__.__name__, abs(hash(epsilon + sigma) + hash(face)))
#if 'hi' in face:
# self.offset = -abs(self.offset)
def get_commands(self, atoms):
ffdata = atoms.calc.ff_data
if self.final_offset != None:
rampname = 'ramp%s' % self.id
self.commands.append('variable %s equal ramp(%f,%f)' % (rampname, self.offset, self.final_offset))
coord = 'v_%s' % rampname
elif self.offset != None:
coord = '%f' % self.offset
else:
coord = 'EDGE'
for tp in atoms.calc.data.atom_typeorder:
actual_type = ffdata.get_actual_type('atom', tp)
eps, sig = ffdata.get_params('atom', actual_type)['Pair Coeffs']
mixeps = np.sqrt(self.epsilon*eps)
if self.mixing == 'arithmetic':
mixsig = (self.sigma+sig)/2
elif self.mixing == 'geometric':
mixsig = np.sqrt(self.sigma*sig)
else:
raise RuntimeError('Invalid mixing type: %s' % self.mixing)
typeid = atoms.calc.data.atom_typeorder.index(tp) + 1
groupname = self.create_group_by_type(typeid)
cutoff = 10.0
fixstr = 'wall/lj126 %s %s %f %f %f units box pbc yes' % (self.face, coord, mixeps, mixsig, cutoff)
self.create_fix(groupname, fixstr)
return self.commands
def create_group_by_type(self, typeid):
groupname = 'group%s%s' % (self.id, typeid)
self.commands.append('group %s type %i' % (groupname, typeid))
self.ngroups += 1
return groupname
def create_fix(self, groupname, fixstr):
fixname = 'fix%s%i' % (self.id, self.nfixes)
self.commands.append('fix %s %s %s' % (fixname, groupname, fixstr))
self.nfixes += 1
return fixname
|
pavlovml/tensorflow | refs/heads/master | tensorflow/python/ops/common_shapes.py | 4 | """A library of common shape functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import tensor_shape
def scalar_shape(unused_op):
"""Shape function for ops that output a scalar value."""
return [tensor_shape.scalar()]
def unchanged_shape(op):
"""Shape function for ops that output an tensor like their first input."""
return [op.inputs[0].get_shape()]
def unchanged_shape_with_rank(rank):
"""Returns a shape function for ops that constrain the rank of their input.
Args:
rank: The exact rank of the input and output.
Returns:
A shape function for ops that output a tensor of the same size as their
input, with a particular rank.
"""
def _ShapeFunction(op):
return [op.inputs[0].get_shape().with_rank(rank)]
return _ShapeFunction
def unchanged_shape_with_rank_at_least(rank):
"""Returns a shape function for ops that constrain the rank of their input.
Args:
rank: A lower bound on the rank of the input and output.
Returns:
A shape function for ops that output a tensor of the same size as their
input, with a particular rank.
"""
def _ShapeFunction(op):
return [op.inputs[0].get_shape().with_rank_at_least(rank)]
return _ShapeFunction
def unchanged_shape_with_rank_at_most(rank):
"""Returns a shape function for ops that constrain the rank of their input.
Args:
rank: An upper bound on the rank of the input and output.
Returns:
A shape function for ops that output a tensor of the same size as their
input, with a particular rank.
"""
def _ShapeFunction(op):
return [op.inputs[0].get_shape().with_rank_at_most(rank)]
return _ShapeFunction
def matmul_shape(op):
"""Shape function for a MatMul op."""
a_shape = op.inputs[0].get_shape().with_rank(2)
transpose_a = op.get_attr("transpose_a")
b_shape = op.inputs[1].get_shape().with_rank(2)
transpose_b = op.get_attr("transpose_b")
output_rows = a_shape[1] if transpose_a else a_shape[0]
output_cols = b_shape[0] if transpose_b else b_shape[1]
inner_a = a_shape[0] if transpose_a else a_shape[1]
inner_b = b_shape[1] if transpose_b else b_shape[0]
inner_a.assert_is_compatible_with(inner_b)
return [tensor_shape.TensorShape([output_rows, output_cols])]
def bias_add_shape(op):
"""Shape function for a BiasAdd op."""
input_shape = op.inputs[0].get_shape().with_rank_at_least(2)
bias_shape = op.inputs[1].get_shape().with_rank(1)
if input_shape.ndims is not None:
# Output has the same shape as input, and matches the length of
# bias in its last dimension.
output_shape = input_shape[0:-1].concatenate(
input_shape[-1].merge_with(bias_shape[0]))
else:
output_shape = tensor_shape.unknown_shape()
return [output_shape]
def _Get2DOutputSize(input_height, input_width, filter_height, filter_width,
row_stride, col_stride, padding_type):
"""Returns the number of rows and columns in a convolution/pooling output."""
input_height = tensor_shape.as_dimension(input_height)
input_width = tensor_shape.as_dimension(input_width)
filter_height = tensor_shape.as_dimension(filter_height)
filter_width = tensor_shape.as_dimension(filter_width)
row_stride = int(row_stride)
col_stride = int(col_stride)
if filter_height.value == 1 and filter_width.value == 1 and (
row_stride == 1 and col_stride == 1):
return input_height, input_width
else:
if filter_height > input_height or filter_width > input_width:
raise ValueError("filter must not be larger than the input: ",
"Filter: [", filter_height, "x", filter_width, "] ",
"Input: [", input_height, "x", input_width, "] ")
if row_stride > filter_height or col_stride > filter_width:
raise ValueError("stride must be less than or equal to filter size",
"stride: [", row_stride, "x", col_stride, "] ",
"filter: [", filter_height, "x", filter_width, "] ")
# Compute number of rows in the output, based on the padding.
if input_height.value is None or filter_height.value is None:
out_rows = None
elif padding_type == "VALID":
out_rows = ((input_height.value - filter_height.value + row_stride) //
row_stride)
elif padding_type == "SAME":
out_rows = (input_height.value + row_stride - 1) // row_stride
else:
raise ValueError("Invalid value for padding: %r" % padding_type)
# Compute number of columns in the output, based on the padding.
if input_width.value is None or filter_width.value is None:
out_cols = None
elif padding_type == "VALID":
out_cols = ((input_width.value - filter_width.value + col_stride) //
col_stride)
elif padding_type == "SAME":
out_cols = (input_width.value + col_stride - 1) // col_stride
return out_rows, out_cols
def conv2d_shape(op):
"""Shape function for a Conv2D op.
This op has two inputs:
* input, a 4D tensor with shape = [batch_size, rows, cols, depth_in]
* filter, a 4D tensor with shape = [filter_rows, filter_cols,
depth_in, depth_out]
The output is a 4D tensor with shape = [batch_size, out_rows,
out_cols, depth_out], where out_rows and out_cols depend on the
value of the op's "padding" and "strides" attrs.
Args:
op: A Conv2D Operation.
Returns:
A list containing the Shape of the Conv2D output.
Raises:
ValueError: If the shapes of the input or filter are incompatible.
"""
input_shape = op.inputs[0].get_shape().with_rank(4)
filter_shape = op.inputs[1].get_shape().with_rank(4)
batch_size = input_shape[0]
in_rows = input_shape[1]
in_cols = input_shape[2]
filter_rows = filter_shape[0]
filter_cols = filter_shape[1]
depth_out = filter_shape[3]
# Check that the input depths are compatible.
input_shape[3].assert_is_compatible_with(filter_shape[2])
stride_b, stride_r, stride_c, stride_d = op.get_attr("strides")
if stride_b != 1 or stride_d != 1:
raise ValueError("Current implementation does not yet support "
"strides in the batch and depth dimensions.")
if stride_r != stride_c:
# TODO(shlens): Add support for this.
raise ValueError("Current implementation only supports equal length "
"strides in the row and column dimensions.")
# TODO(mrry,shlens): Raise an error if the stride would cause
# information in the input to be ignored. This will require a change
# in the kernel implementation.
stride = stride_r
padding = op.get_attr("padding")
out_rows, out_cols = _Get2DOutputSize(
in_rows, in_cols, filter_rows, filter_cols, stride, stride, padding)
return [tensor_shape.TensorShape([batch_size, out_rows, out_cols, depth_out])]
def separable_conv2d_shape(op):
"""Shape function for a SeparableConv2D op.
This op has three inputs:
* input, a 4D tensor with shape = [batch_size, rows, cols, depth_in]
* depthwise_filter, a 4D tensor with shape = [filter_rows,
filter_cols, depth_in, depth_multiplier]
* pointwise_filter, a 4D tensor with shape = [1, 1, depth_in *
depth_multiplier, depth_out]
The output is a 4D tensor with shape = [batch_size, out_rows,
out_cols, depth_out], where out_rows and out_cols depend on the
value of the op's "padding" and "strides" attrs.
Args:
op: A SeparableConv2D Operation.
Returns:
A list containing the Shape of the SeparableConv2D output.
Raises:
ValueError: If the shapes of the input or filter are incompatible.
"""
input_shape = op.inputs[0].get_shape().with_rank(4)
depthwise_filter_shape = op.inputs[1].get_shape().merge_with(
tensor_shape.TensorShape([None, None, input_shape[3], None]))
pointwise_depth_in = depthwise_filter_shape[2] * depthwise_filter_shape[3]
pointwise_filter_shape = op.inputs[2].get_shape().merge_with(
tensor_shape.TensorShape([1, 1, pointwise_depth_in, None]))
batch_size = input_shape[0]
in_rows = input_shape[1]
in_cols = input_shape[2]
filter_rows = depthwise_filter_shape[0]
filter_cols = depthwise_filter_shape[1]
depth_out = pointwise_filter_shape[3]
stride_b, stride_r, stride_c, stride_d = op.get_attr("strides")
if stride_b != 1 or stride_d != 1:
raise ValueError("Current implementation does not yet support "
"strides in the batch and depth dimensions.")
if stride_r != stride_c:
# TODO(shlens): Add support for this.
raise ValueError("Current implementation only supports equal length "
"strides in the row and column dimensions.")
# TODO(mrry,shlens): Raise an error if the stride would cause
# information in the input to be ignored. This will require a change
# in the kernel implementation.
stride = stride_r
padding = op.get_attr("padding")
out_rows, out_cols = _Get2DOutputSize(
in_rows, in_cols, filter_rows, filter_cols, stride, stride, padding)
return [tensor_shape.TensorShape([batch_size, out_rows, out_cols, depth_out])]
def avg_pool_shape(op):
"""Shape function for an AvgPool op.
This op has one input:
* input, a 4D tensor with shape = [batch_size, rows, cols, depth]
The output is a 4D tensor with shape = [batch_size, out_rows,
out_cols, depth_out], where out_rows and out_cols depend on the
value of the op's "ksize", "strides", and "padding" attrs.
Args:
op: An AvgPool Operation.
Returns:
A single-element list containing the Shape of the AvgPool output.
Raises:
ValueError: If the shape of the input is invalid or incompatible with
the values of the attrs.
"""
input_shape = op.inputs[0].get_shape().with_rank(4)
ksize_b, ksize_r, ksize_c, ksize_d = op.get_attr("ksize")
stride_b, stride_r, stride_c, stride_d = op.get_attr("strides")
batch_size = input_shape[0]
in_rows = input_shape[1]
in_cols = input_shape[2]
depth = input_shape[3]
if ksize_b != 1 or ksize_d != 1:
raise ValueError("Current implementation does not support pooling "
"in the batch and depth dimensions.")
if stride_b != 1 or stride_d != 1:
raise ValueError("Current implementation does not support strides "
"in the batch and depth dimensions.")
# TODO(mrry,shlens): Raise an error if the stride would cause
# information in the input to be ignored. This will require a change
# in the kernel implementation.
padding = op.get_attr("padding")
out_rows, out_cols = _Get2DOutputSize(
in_rows, in_cols, ksize_r, ksize_c, stride_r, stride_c, padding)
return [tensor_shape.TensorShape([batch_size, out_rows, out_cols, depth])]
def max_pool_shape(op):
"""Shape function for a MaxPool op.
This op has one input:
* input, a 4D tensor with shape = [batch_size, rows, cols, depth_in]
The output is a 4D tensor with shape = [batch_size, out_rows,
out_cols, depth_out], where out_rows, out_cols, and depth_out depend
on the value of the op's "ksize", "strides", and "padding" attrs.
Args:
op: A MaxPool Operation.
Returns:
A single-element list containing the Shape of the MaxPool output.
Raises:
ValueError: If the shape of the input is invalid or incompatible with
the values of the attrs.
"""
input_shape = op.inputs[0].get_shape().with_rank(4)
ksize_b, ksize_r, ksize_c, ksize_d = op.get_attr("ksize")
stride_b, stride_r, stride_c, stride_d = op.get_attr("strides")
batch_size = input_shape[0]
in_rows = input_shape[1]
in_cols = input_shape[2]
depth = input_shape[3]
if ksize_b != 1:
raise ValueError("Current implementation does not support pooling "
"in the batch dimension.")
if stride_b != 1:
raise ValueError("Current implementation does not support strides "
"in the batch dimension.")
if not ((ksize_r == 1 and ksize_c == 1) or ksize_d == 1):
raise ValueError("MaxPooling supports exactly one of pooling across depth "
"or pooling across width/height.")
# TODO(mrry,shlens): Raise an error if the stride would cause
# information in the input to be ignored. This will require a change
# in the kernel implementation.
if ksize_d == 1:
padding = op.get_attr("padding")
out_rows, out_cols = _Get2DOutputSize(
in_rows, in_cols, ksize_r, ksize_c, stride_r, stride_c, padding)
return [tensor_shape.TensorShape([batch_size, out_rows, out_cols, depth])]
else:
if depth % ksize_d > 0:
raise ValueError("Depthwise max pooling requires the depth window "
"to evenly divide the input depth.")
if stride_d != ksize_d:
raise ValueError("Depthwise max pooling requires the depth window "
"to equal the depth stride.")
return [tensor_shape.TensorShape([batch_size, in_rows, in_cols, depth //
ksize_d])]
def no_outputs(unused_op):
"""Shape function for use with ops that have no outputs."""
return []
def unknown_shape(op):
"""Shape function for use with ops whose output shapes are unknown."""
return [tensor_shape.unknown_shape() for _ in op.outputs]
|
AnturK/-tg-station | refs/heads/master | tools/minibot/config.py | 207 | # Configuration for the minibot.py bot starts here
server = "irc.rizon.net"
port = 6667
channels = ["#asdfgbus", "#botbus"]
defaultchannel = "#asdfgbus"
nick = "minibot-testing-ss13"
altnick = "minibot-testing-ss13_"
name = "minibot"
ident = "minibot"
realname = "minibot"
password = "CHANGETHIS"
# Configuration ends here
|
HyperBaton/ansible | refs/heads/devel | lib/ansible/modules/network/aci/mso_schema_site_anp_epg_staticport.py | 13 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Dag Wieers (@dagwieers) <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: mso_schema_site_anp_epg_staticport
short_description: Manage site-local EPG static ports in schema template
description:
- Manage site-local EPG static ports in schema template on Cisco ACI Multi-Site.
author:
- Dag Wieers (@dagwieers)
version_added: '2.8'
options:
schema:
description:
- The name of the schema.
type: str
required: yes
site:
description:
- The name of the site.
type: str
required: yes
template:
description:
- The name of the template.
type: str
required: yes
anp:
description:
- The name of the ANP.
type: str
epg:
description:
- The name of the EPG.
type: str
type:
description:
- The path type of the static port
type: str
choices: [ port, vpc ]
default: port
pod:
description:
- The pod of the static port.
type: str
leaf:
description:
- The leaf of the static port.
type: str
path:
description:
- The path of the static port.
type: str
vlan:
description:
- The port encap VLAN id of the static port.
type: int
deployment_immediacy:
description:
- The deployment immediacy of the static port.
- C(immediate) means B(Deploy immediate).
- C(lazy) means B(deploy on demand).
type: str
choices: [ immediate, lazy ]
mode:
description:
- The mode of the static port.
- C(native) means B(Access (802.1p)).
- C(regular) means B(Trunk).
- C(untagged) means B(Access (untagged)).
type: str
choices: [ native, regular, untagged ]
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
type: str
choices: [ absent, present, query ]
default: present
notes:
- The ACI MultiSite PATCH API has a deficiency requiring some objects to be referenced by index.
This can cause silent corruption on concurrent access when changing/removing an object as
the wrong object may be referenced. This module is affected by this deficiency.
seealso:
- module: mso_schema_site_anp_epg
- module: mso_schema_template_anp_epg
extends_documentation_fragment: mso
'''
EXAMPLES = r'''
- name: Add a new static port to a site EPG
mso_schema_site_anp_epg_staticport:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema1
site: Site1
template: Template1
anp: ANP1
epg: EPG1
type: port
pod: pod-1
leaf: 101
path: eth1/1
vlan: 126
deployment_immediacy: immediate
state: present
delegate_to: localhost
- name: Remove a static port from a site EPG
mso_schema_site_anp_epg_staticport:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema1
site: Site1
template: Template1
anp: ANP1
epg: EPG1
type: port
pod: pod-1
leaf: 101
path: eth1/1
state: absent
delegate_to: localhost
- name: Query a specific site EPG static port
mso_schema_site_anp_epg_staticport:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema1
site: Site1
template: Template1
anp: ANP1
epg: EPG1
type: port
pod: pod-1
leaf: 101
path: eth1/1
state: query
delegate_to: localhost
register: query_result
- name: Query all site EPG static ports
mso_schema_site_anp_epg_staticport:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema1
site: Site1
template: Template1
anp: ANP1
state: query
delegate_to: localhost
register: query_result
'''
RETURN = r'''
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.aci.mso import MSOModule, mso_argument_spec
def main():
argument_spec = mso_argument_spec()
argument_spec.update(
schema=dict(type='str', required=True),
site=dict(type='str', required=True),
template=dict(type='str', required=True),
anp=dict(type='str', required=True),
epg=dict(type='str', required=True),
type=dict(type='str', default='port', choices=['port', 'vpc']),
pod=dict(type='str'), # This parameter is not required for querying all objects
leaf=dict(type='str'), # This parameter is not required for querying all objects
path=dict(type='str'), # This parameter is not required for querying all objects
vlan=dict(type='int'), # This parameter is not required for querying all objects
deployment_immediacy=dict(type='str', choices=['immediate', 'lazy']),
mode=dict(type='str', choices=['native', 'regular', 'untagged']),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['type', 'pod', 'leaf', 'path', 'vlan']],
['state', 'present', ['type', 'pod', 'leaf', 'path', 'vlan']],
],
)
schema = module.params.get('schema')
site = module.params.get('site')
template = module.params.get('template')
anp = module.params.get('anp')
epg = module.params.get('epg')
path_type = module.params.get('type')
pod = module.params.get('pod')
leaf = module.params.get('leaf')
path = module.params.get('path')
vlan = module.params.get('vlan')
deployment_immediacy = module.params.get('deployment_immediacy')
mode = module.params.get('mode')
state = module.params.get('state')
if path_type == 'port':
portpath = 'topology/{0}/paths-{1}/pathep-[{2}]'.format(pod, leaf, path)
elif path_type == 'vpc':
portpath = 'topology/{0}/protpaths-{1}/pathep-[{2}]'.format(pod, leaf, path)
mso = MSOModule(module)
# Get schema_id
schema_obj = mso.get_obj('schemas', displayName=schema)
if not schema_obj:
mso.fail_json(msg="Provided schema '{0}' does not exist".format(schema))
schema_path = 'schemas/{id}'.format(**schema_obj)
schema_id = schema_obj.get('id')
# Get site
site_id = mso.lookup_site(site)
# Get site_idx
sites = [(s.get('siteId'), s.get('templateName')) for s in schema_obj.get('sites')]
if (site_id, template) not in sites:
mso.fail_json(msg="Provided site/template '{0}-{1}' does not exist. Existing sites/templates: {2}".format(site, template, ', '.join(sites)))
# Schema-access uses indexes
site_idx = sites.index((site_id, template))
# Path-based access uses site_id-template
site_template = '{0}-{1}'.format(site_id, template)
# Get ANP
anp_ref = mso.anp_ref(schema_id=schema_id, template=template, anp=anp)
anps = [a.get('anpRef') for a in schema_obj.get('sites')[site_idx]['anps']]
if anp_ref not in anps:
mso.fail_json(msg="Provided anp '{0}' does not exist. Existing anps: {1}".format(anp, ', '.join(anps)))
anp_idx = anps.index(anp_ref)
# Get EPG
epg_ref = mso.epg_ref(schema_id=schema_id, template=template, anp=anp, epg=epg)
epgs = [e.get('epgRef') for e in schema_obj.get('sites')[site_idx]['anps'][anp_idx]['epgs']]
if epg_ref not in epgs:
mso.fail_json(msg="Provided epg '{0}' does not exist. Existing epgs: {1}".format(epg, ', '.join(epgs)))
epg_idx = epgs.index(epg_ref)
# Get Leaf
portpaths = [p.get('path') for p in schema_obj.get('sites')[site_idx]['anps'][anp_idx]['epgs'][epg_idx]['staticPorts']]
if portpath in portpaths:
portpath_idx = portpaths.index(portpath)
# FIXME: Changes based on index are DANGEROUS
port_path = '/sites/{0}/anps/{1}/epgs/{2}/staticPorts/{3}'.format(site_template, anp, epg, portpath_idx)
mso.existing = schema_obj.get('sites')[site_idx]['anps'][anp_idx]['epgs'][epg_idx]['staticPorts'][portpath_idx]
if state == 'query':
if leaf is None or vlan is None:
mso.existing = schema_obj.get('sites')[site_idx]['anps'][anp_idx]['epgs'][epg_idx]['staticPorts']
elif not mso.existing:
mso.fail_json(msg="Static port '{portpath}' not found".format(portpath=portpath))
mso.exit_json()
ports_path = '/sites/{0}/anps/{1}/epgs/{2}/staticPorts'.format(site_template, anp, epg)
ops = []
mso.previous = mso.existing
if state == 'absent':
if mso.existing:
mso.sent = mso.existing = {}
ops.append(dict(op='remove', path=port_path))
elif state == 'present':
if not mso.existing:
if deployment_immediacy is None:
deployment_immediacy = 'lazy'
if mode is None:
mode = 'untagged'
payload = dict(
deploymentImmediacy=deployment_immediacy,
mode=mode,
path=portpath,
portEncapVlan=vlan,
type=path_type,
)
mso.sanitize(payload, collate=True)
if mso.existing:
ops.append(dict(op='replace', path=port_path, value=mso.sent))
else:
ops.append(dict(op='add', path=ports_path + '/-', value=mso.sent))
mso.existing = mso.proposed
if not module.check_mode:
mso.request(schema_path, method='PATCH', data=ops)
mso.exit_json()
if __name__ == "__main__":
main()
|
markfinger/python-js-host | refs/heads/master | js_host/js_host.py | 1 | import atexit
import sys
from requests.exceptions import ConnectionError as RequestsConnectionError
from .conf import settings
from .exceptions import ProcessError
from .utils import six, verbosity
from .base_server import BaseServer
class JSHost(BaseServer):
expected_type_name = 'Host'
manager = None
logfile = None
connection = None
def __init__(self, manager=None, logfile=None, *args, **kwargs):
self.manager = manager
self.logfile = logfile
super(JSHost, self).__init__(*args, **kwargs)
def stop(self):
if not self.manager:
raise NotImplementedError('{} must be stopped manually'.format(self.get_name()))
self.manager.stop_host(self.config_file)
if settings.VERBOSITY >= verbosity.PROCESS_STOP:
print('Stopped {}'.format(self.get_name()))
def restart(self):
if not self.manager:
raise NotImplementedError('{} must be restarted manually'.format(self.get_name()))
self.manager.restart_host(self.config_file)
self.status = self.request_status()
def connect(self):
if self.manager:
if not self.connection:
data = self.manager.open_connection_to_host(self.config_file)
self.connection = data['connection']
# Ensure that the connection is closed once the python
# process has exited
atexit.register(self.disconnect)
super(JSHost, self).connect()
def disconnect(self):
if not self.manager:
raise NotImplementedError('Only managed hosts can disconnect'.format(self.get_name()))
if not self.connection or not self.manager.is_running():
return
data = self.manager.close_connection_to_host(self.config_file, self.connection)
if data['started'] and settings.VERBOSITY >= verbosity.DISCONNECT:
message = 'Closed connection to {} - {}'.format(self.get_name(), self.connection)
if data['stopTimeout']:
message += '. Host will stop in {} seconds unless another connection is opened'.format(
data['stopTimeout'] / 1000.0
)
print(message)
self.connection = None
def send_request(self, *args, **kwargs):
"""
Intercept connection errors which suggest that a managed host has
crashed and raise an exception indicating the location of the log
"""
try:
return super(JSHost, self).send_request(*args, **kwargs)
except RequestsConnectionError as e:
if (
self.manager and
self.has_connected and
self.logfile and
'unsafe' not in kwargs
):
raise ProcessError(
'{} appears to have crashed, you can inspect the log file at {}'.format(
self.get_name(),
self.logfile,
)
)
raise six.reraise(RequestsConnectionError, RequestsConnectionError(*e.args), sys.exc_info()[2]) |
ax003d/openerp | refs/heads/master | openerp/addons/account/__openerp__.py | 14 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name' : 'eInvoicing',
'version' : '1.1',
'author' : 'OpenERP SA',
'category' : 'Accounting & Finance',
'description' : """
Accounting and Financial Management.
====================================
Financial and accounting module that covers:
--------------------------------------------
* General Accounting
* Cost/Analytic accounting
* Third party accounting
* Taxes management
* Budgets
* Customer and Supplier Invoices
* Bank statements
* Reconciliation process by partner
Creates a dashboard for accountants that includes:
--------------------------------------------------
* List of Customer Invoice to Approve
* Company Analysis
* Graph of Treasury
The processes like maintaining of general ledger is done through the defined financial Journals (entry move line orgrouping is maintained through journal)
for a particular financial year and for preparation of vouchers there is a module named account_voucher.
""",
'website': 'http://www.openerp.com',
'images' : ['images/accounts.jpeg','images/bank_statement.jpeg','images/cash_register.jpeg','images/chart_of_accounts.jpeg','images/customer_invoice.jpeg','images/journal_entries.jpeg'],
'depends' : ['base_setup', 'product', 'analytic', 'process', 'board', 'edi'],
'data': [
'security/account_security.xml',
'security/ir.model.access.csv',
'account_menuitem.xml',
'report/account_invoice_report_view.xml',
'report/account_entries_report_view.xml',
'report/account_treasury_report_view.xml',
'report/account_report_view.xml',
'report/account_analytic_entries_report_view.xml',
'wizard/account_move_bank_reconcile_view.xml',
'wizard/account_use_model_view.xml',
'account_installer.xml',
'wizard/account_period_close_view.xml',
'wizard/account_reconcile_view.xml',
'wizard/account_unreconcile_view.xml',
'account_view.xml',
'account_report.xml',
'account_financial_report_data.xml',
'wizard/account_report_common_view.xml',
'wizard/account_invoice_refund_view.xml',
'wizard/account_fiscalyear_close_state.xml',
'wizard/account_chart_view.xml',
'wizard/account_tax_chart_view.xml',
'wizard/account_move_line_reconcile_select_view.xml',
'wizard/account_open_closed_fiscalyear_view.xml',
'wizard/account_move_line_unreconcile_select_view.xml',
'wizard/account_vat_view.xml',
'wizard/account_report_print_journal_view.xml',
'wizard/account_report_general_journal_view.xml',
'wizard/account_report_central_journal_view.xml',
'wizard/account_subscription_generate_view.xml',
'wizard/account_fiscalyear_close_view.xml',
'wizard/account_state_open_view.xml',
'wizard/account_journal_select_view.xml',
'wizard/account_change_currency_view.xml',
'wizard/account_validate_move_view.xml',
'wizard/account_report_general_ledger_view.xml',
'wizard/account_invoice_state_view.xml',
'wizard/account_report_partner_balance_view.xml',
'wizard/account_report_account_balance_view.xml',
'wizard/account_report_aged_partner_balance_view.xml',
'wizard/account_report_partner_ledger_view.xml',
'wizard/account_reconcile_partner_process_view.xml',
'wizard/account_automatic_reconcile_view.xml',
'wizard/account_financial_report_view.xml',
'wizard/pos_box.xml',
'project/wizard/project_account_analytic_line_view.xml',
'account_end_fy.xml',
'account_invoice_view.xml',
'partner_view.xml',
'data/account_data.xml',
'data/data_account_type.xml',
'data/configurable_account_chart.xml',
'account_invoice_workflow.xml',
'project/project_view.xml',
'project/project_report.xml',
'project/wizard/account_analytic_balance_report_view.xml',
'project/wizard/account_analytic_cost_ledger_view.xml',
'project/wizard/account_analytic_inverted_balance_report.xml',
'project/wizard/account_analytic_journal_report_view.xml',
'project/wizard/account_analytic_cost_ledger_for_journal_report_view.xml',
'project/wizard/account_analytic_chart_view.xml',
'product_view.xml',
'account_assert_test.xml',
'process/statement_process.xml',
'process/customer_invoice_process.xml',
'process/supplier_invoice_process.xml',
'ir_sequence_view.xml',
'company_view.xml',
'board_account_view.xml',
'edi/invoice_action_data.xml',
'account_bank_view.xml',
'res_config_view.xml',
'account_pre_install.yml'
],
'js': [
'static/src/js/account_move_reconciliation.js',
'static/src/js/account_move_line_quickadd.js',
],
'qweb' : [
"static/src/xml/account_move_reconciliation.xml",
"static/src/xml/account_move_line_quickadd.xml",
],
'css':[
'static/src/css/account_move_reconciliation.css',
'static/src/css/account_move_line_quickadd.css'
],
'demo': [
'demo/account_demo.xml',
'project/project_demo.xml',
'project/analytic_account_demo.xml',
'demo/account_minimal.xml',
'demo/account_invoice_demo.xml',
'account_unit_test.xml',
],
'test': [
'test/account_customer_invoice.yml',
'test/account_supplier_invoice.yml',
'test/account_change_currency.yml',
'test/chart_of_account.yml',
'test/account_period_close.yml',
'test/account_use_model.yml',
'test/account_validate_account_move.yml',
'test/account_fiscalyear_close.yml',
#'test/account_bank_statement.yml',
#'test/account_cash_statement.yml',
'test/test_edi_invoice.yml',
'test/account_report.yml',
'test/account_fiscalyear_close_state.yml', #last test, as it will definitively close the demo fiscalyear
],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
sreejithr/emacs.d | refs/heads/master | pyenv/emacs/lib/python2.7/site-packages/setuptools/command/test.py | 285 | from setuptools import Command
from distutils.errors import DistutilsOptionError
import sys
from pkg_resources import *
from pkg_resources import _namespace_packages
from unittest import TestLoader, main
class ScanningLoader(TestLoader):
def loadTestsFromModule(self, module):
"""Return a suite of all tests cases contained in the given module
If the module is a package, load tests from all the modules in it.
If the module has an ``additional_tests`` function, call it and add
the return value to the tests.
"""
tests = []
if module.__name__!='setuptools.tests.doctest': # ugh
tests.append(TestLoader.loadTestsFromModule(self,module))
if hasattr(module, "additional_tests"):
tests.append(module.additional_tests())
if hasattr(module, '__path__'):
for file in resource_listdir(module.__name__, ''):
if file.endswith('.py') and file!='__init__.py':
submodule = module.__name__+'.'+file[:-3]
else:
if resource_exists(
module.__name__, file+'/__init__.py'
):
submodule = module.__name__+'.'+file
else:
continue
tests.append(self.loadTestsFromName(submodule))
if len(tests)!=1:
return self.suiteClass(tests)
else:
return tests[0] # don't create a nested suite for only one return
class test(Command):
"""Command to run unit tests after in-place build"""
description = "run unit tests after in-place build"
user_options = [
('test-module=','m', "Run 'test_suite' in specified module"),
('test-suite=','s',
"Test suite to run (e.g. 'some_module.test_suite')"),
]
def initialize_options(self):
self.test_suite = None
self.test_module = None
self.test_loader = None
def finalize_options(self):
if self.test_suite is None:
if self.test_module is None:
self.test_suite = self.distribution.test_suite
else:
self.test_suite = self.test_module+".test_suite"
elif self.test_module:
raise DistutilsOptionError(
"You may specify a module or a suite, but not both"
)
self.test_args = [self.test_suite]
if self.verbose:
self.test_args.insert(0,'--verbose')
if self.test_loader is None:
self.test_loader = getattr(self.distribution,'test_loader',None)
if self.test_loader is None:
self.test_loader = "setuptools.command.test:ScanningLoader"
def with_project_on_sys_path(self, func):
if sys.version_info >= (3,) and getattr(self.distribution, 'use_2to3', False):
# If we run 2to3 we can not do this inplace:
# Ensure metadata is up-to-date
self.reinitialize_command('build_py', inplace=0)
self.run_command('build_py')
bpy_cmd = self.get_finalized_command("build_py")
build_path = normalize_path(bpy_cmd.build_lib)
# Build extensions
self.reinitialize_command('egg_info', egg_base=build_path)
self.run_command('egg_info')
self.reinitialize_command('build_ext', inplace=0)
self.run_command('build_ext')
else:
# Without 2to3 inplace works fine:
self.run_command('egg_info')
# Build extensions in-place
self.reinitialize_command('build_ext', inplace=1)
self.run_command('build_ext')
ei_cmd = self.get_finalized_command("egg_info")
old_path = sys.path[:]
old_modules = sys.modules.copy()
try:
sys.path.insert(0, normalize_path(ei_cmd.egg_base))
working_set.__init__()
add_activation_listener(lambda dist: dist.activate())
require('%s==%s' % (ei_cmd.egg_name, ei_cmd.egg_version))
func()
finally:
sys.path[:] = old_path
sys.modules.clear()
sys.modules.update(old_modules)
working_set.__init__()
def run(self):
if self.distribution.install_requires:
self.distribution.fetch_build_eggs(self.distribution.install_requires)
if self.distribution.tests_require:
self.distribution.fetch_build_eggs(self.distribution.tests_require)
if self.test_suite:
cmd = ' '.join(self.test_args)
if self.dry_run:
self.announce('skipping "unittest %s" (dry run)' % cmd)
else:
self.announce('running "unittest %s"' % cmd)
self.with_project_on_sys_path(self.run_tests)
def run_tests(self):
import unittest
# Purge modules under test from sys.modules. The test loader will
# re-import them from the build location. Required when 2to3 is used
# with namespace packages.
if sys.version_info >= (3,) and getattr(self.distribution, 'use_2to3', False):
module = self.test_args[-1].split('.')[0]
if module in _namespace_packages:
del_modules = []
if module in sys.modules:
del_modules.append(module)
module += '.'
for name in sys.modules:
if name.startswith(module):
del_modules.append(name)
list(map(sys.modules.__delitem__, del_modules))
loader_ep = EntryPoint.parse("x="+self.test_loader)
loader_class = loader_ep.load(require=False)
cks = loader_class()
unittest.main(
None, None, [unittest.__file__]+self.test_args,
testLoader = cks
)
|
fedya/ajenti | refs/heads/master | plugins/sysload/api.py | 20 | from ajenti.apis import API
from ajenti.com import Interface
class SysStat(API):
class ISysStat(Interface):
def get_load(self):
pass
def get_ram(self):
pass
def get_swap(self):
pass
|
dialounke/pylayers | refs/heads/master | pylayers/location/geometric/constraints/tdoa.py | 2 | """
.. autoclass:: TDOA
:members:
"""
# -*- coding:Utf-8 -*-
#####################################################################
#This file is part of RGPA.
#Foobar is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#Foobar is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with Foobar. If not, see <http://www.gnu.org/licenses/>.
#-------------------------------------------------------------------
#authors :
#Nicolas AMIOT : [email protected]
#Bernard UGUEN : [email protected]
#Mohamed LAARAIEDH : [email protected]
#####################################################################
import numpy as np
import scipy as sp
from copy import copy
from pylayers.location.geometric.util.boxn import *
from pylayers.location.geometric.constraints.constraint import *
class TDOA(Constraint):
""" TDOA Constraint
Description and evaluation of TDOA constraints
Parameters
----------
value : float
Constraint value in ns.
std : np.array
Value standard deviation in ns.
vcw : float
scale factor.
p : np.array 2 x ndim
constraint centers
Attributes
----------
drange : difference of distance conversion from time self.value.
sstd : difference of distance conversion from time self.std
runable : True NOT USED
evaluated :False NOT USED
self.Id : Constraint ID
from annulus bound:
min : minimum value of observable
max : maximum value of observable
mean : mean value of observable
Methods
-------
annulus_bound(self) : Compute the minimum and maximum distance of the enclosing annulus of the constraint
tdoa_box(vcw) : find the enclosing box of TDOA constraint for a given vcw
rescale(self,vcw) : rescale contraint boundary with a given scale factor 'vcw'
inclusive(self,b) : Is constraint center is inside a box ?
valid(self,b) : Test if Lbox is compatible with the constraint
valid_v(self,lv) : Test if a liste of a vertexes from a box is compatible with the constraint. vertexes are obtained thanks to LBoxN.
bd2coordinates()
estvol(self) : Constraint Volume estimation
See Also
--------
pylayers.location.geometric.constraints.Constraint
"""
def __init__(self, id='0', value=np.array(([45])), std=np.array((4.0)), vcw=3, p=np.array([[0, 0, 0], [10, 10, 10]]), origin={}):
Constraint.__init__(self, type='TDOA', id=id, p=p, origin=origin)
self.tdoa_axes(p)
self.f = self.nv / 2
self.Dmax = self.nv
self.value = min(value, 2 * self.f / 0.3)
self.value = max(self.value, -2 * self.f / 0.3)
self.std = std
self.vcw = vcw
self.drange = self.value * 0.3
self.sstd = self.std * 0.3
self.tdoa_box(vcw)
self.annulus_bound()
def update(self):
""" update constraint information
"""
# if self.p.any():
# self.runable = True
# else:
# self.runable = False
self.updc('p',value=self.p)
self.updc('value',value=self.value)
self.updc('std',value=self.std)
self.sstd=self.std * 0.3
self.range=self.value *0.3
self.rescale(self.vcw)
self.evaluated = False
self.annulus_bound()
def tdoa_axes(self, p):
"""triedre [vn,wn,tn], support of the contraint
"""
#
# Dmax
#
#
# La boite du tdoa est tronquee
#
self.F1 = p[0, :]
self.F2 = p[1, :]
#
#
#
v = self.F2 - self.F1
self.nv = np.sqrt(np.dot(v, v))
vn = v / (self.nv * 1.0)
if self.ndim > 2:
if np.abs(v[2]) < 0.9:
w = np.array([v[1], -v[0], 0])
else:
w = np.array([v[2], 0, -v[0]])
nw = np.sqrt(np.dot(w, w))
wn = w / (nw * 1.0)
tn = np.cross(vn, wn)
self.triedre = [wn, tn, vn] # [z,x,y]
else:
w = np.array([v[1], -v[0]])
nw = np.sqrt(np.dot(w, w))
wn = w / (nw * 1.0)
self.triedre = [wn, vn]
def tdoa_box(self, vcw):
"""create the inclusive box for a given vcw
"""
if self.ndim == 3:
wn = self.triedre[0]
tn = self.triedre[1]
vn = self.triedre[2]
if self.ndim == 2:
wn = self.triedre[0]
vn = self.triedre[1]
eps = vcw * self.sstd
delta = self.drange
deltap = min(delta + eps, self.nv)
deltam = max(delta - eps, -self.nv)
c = delta / 2.
cp = deltap / 2.
cm = deltam / 2.
arge = self.f ** 2 - c ** 2
argep = self.f ** 2 - cp ** 2
argem = self.f ** 2 - cm ** 2
try:
e = np.sqrt(arge)
except:
pdb.set_trace()
ep = np.sqrt(argep)
em = np.sqrt(argem)
if cp < 0:
pp = self.F1 + (self.f + cp) * vn
else:
if ep > 0:
offset = (cp * np.sqrt((self.Dmax / ep) ** 2 + 1) - self.f)
#print "ep >0 : offset ",offset
else:
offset = -self.Dmax
pp = self.F2 + offset * vn
if cm < 0:
if em > 0:
offset = (cm * np.sqrt((self.Dmax / em) ** 2 + 1) + self.f)
#print "em >0 : offset ",offset
else:
offset = self.Dmax
pm = self.F1 + offset * vn
else:
pm = self.F2 - (self.f - cm) * vn
if self.ndim == 3:
p1 = pp + self.Dmax * wn - self.Dmax * tn
p2 = pp + self.Dmax * wn + self.Dmax * tn
p3 = pp - self.Dmax * wn + self.Dmax * tn
p4 = pp - self.Dmax * wn - self.Dmax * tn
p5 = pm + self.Dmax * wn - self.Dmax * tn
p6 = pm + self.Dmax * wn + self.Dmax * tn
p7 = pm - self.Dmax * wn + self.Dmax * tn
p8 = pm - self.Dmax * wn - self.Dmax * tn
pquad = np.vstack((p1, p2, p3, p4, p5, p6, p7, p8))
if self.ndim == 2:
p1 = pp + self.Dmax * wn
p2 = pp - self.Dmax * wn
p3 = pm + self.Dmax * wn
p4 = pm - self.Dmax * wn
pquad = np.vstack((p1, p2, p3, p4))
imin = np.min(pquad, axis=0)
imax = np.max(pquad, axis=0)
self.ep = ep
self.em = em
self.cp = cp
self.cm = cm
self.lbox = LBoxN(
[BoxN(np.vstack((imin, imax)), ndim=np.shape(self.p)[1])])
def annulus_bound(self):
""" Compute the minimum and maximum distance of the enclosing annulus of the constraint for a given self.vcw
"""
if self.value > 0:
self.cmin = self.drange - self.vcw * self.sstd
self.cmax = self.drange + self.vcw * self.sstd
else:
self.cmin = self.drange + self.vcw * self.sstd
self.cmax = self.drange - self.vcw * self.sstd
self.mean = (self.cmin + self.cmax) / 2
def repart(self, DD):
"""
"""
return(1. / (self.sstd * np.sqrt(2 * np.pi)) * np.exp(-(DD - self.mean) ** 2 / (2 * self.sstd ** 2)))
def rescale(self, vcw):
"""
rescale(vcw) : rescale constraint with vcw factor
"""
self.vcw = vcw
#print self.vcw
# pdb.set_trace()
self.tdoa_box(self.vcw)
# print 'TDOA', self.vcw
#self.estvol() <= TO BE DONE IN TDOA
# def inclusive(self, b):
# """A box b is inclusive for the constraint if self.p is included in the box
# Parameters
# ----------
# b : BoxN
# test if self.p is included in box b
# """
# if b.inbox(self.p):
# return True
# else:
# return False
def valid(self, b):
"""
valid(b) : check if box b is valid for the given constraint
A box is valid if it not not valid
A box is not valid if all distances are greater than rangemax
or all distances are less than rangemin
"""
v = b.bd2coord()
P0 = np.outer(np.ones(len(v)), self.p[0, :])
P1 = np.outer(np.ones(len(v)), self.p[1, :])
F1v = np.sqrt(np.sum((P0 - v) * (P0 - v), axis=1))
F2v = np.sqrt(np.sum((P1 - v) * (P1 - v), axis=1))
D = (F1v - F2v)
if self.value > 0:
DDcmin = sum(D >= self.cmin)
DDcmax = sum(D <= self.cmax)
else:
DDcmin = sum(D >= self.cmax)
DDcmax = sum(D <= self.cmin)
if DDcmin + DDcmax > 15:
return(True)
elif (DDcmin < 1) | (DDcmax < 1): # si toute points sont inf a cmin ou sup a cmax
return('out')
else:
return(False)
def valid_v(self, v):
"""check if vertex are valid for the given constraint
A box is valid if it not not valid
A box is not valid if all distances are greater than rangemax
or all distances are less than rangemin
"""
ppb = pow(2, len(self.p[0, :]))
nbbox = len(v) / ppb
DDbound = np.zeros((4, len(v)), dtype='bool')
TB = np.zeros((4, nbbox), dtype='bool')
P0 = np.outer(np.ones(len(v)), self.p[0, :])
P1 = np.outer(np.ones(len(v)), self.p[1, :])
F1v = np.sqrt(np.sum((P0 - v) * (P0 - v), axis=1))
F2v = np.sqrt(np.sum((P1 - v) * (P1 - v), axis=1))
DD = (F1v - F2v)
# if self.value > 0:
# DD2 = (D>=self.cmin) & (D<=self.cmax)
# else :
# DD2 = (D>=self.cmax) & (D<=self.cmin)
if self.value > 0:
# calculate all distance from constraint origin to all vertexes
#DD = np.sqrt(np.sum(D*D,axis=1))
# for each box , find the vertex closest to the constraint origin and the farest.
T = np.array((np.min(DD.reshape(nbbox, ppb),
axis=1), np.max(DD.reshape(nbbox, ppb), axis=1)))
TB[0, :] = (T[0, :] <= self.cmin)
TB[1, :] = (T[0, :] <= self.cmax)
TB[2, :] = (T[1, :] <= self.cmin)
TB[3, :] = (T[1, :] <= self.cmax)
DDbound[0, :] = (DD >= self.cmin)
DDbound[1, :] = (DD <= self.cmax)
else:
# calculate all distance from constraint origin to all vertexes
#DD = np.sqrt(np.sum(D*D,axis=1))
# for each box , find the vertex closest to the constraint origin and the farest.
T = np.array((np.min(DD.reshape(nbbox, ppb),
axis=1), np.max(DD.reshape(nbbox, ppb), axis=1)))
TB[0, :] = (T[0, :] <= self.cmax)
TB[1, :] = (T[0, :] <= self.cmin)
TB[2, :] = (T[1, :] <= self.cmax)
TB[3, :] = (T[1, :] <= self.cmin)
DDbound[0, :] = (DD >= self.cmax)
DDbound[1, :] = (DD <= self.cmin)
return DDbound, TB
#
# return DD2
def inclusive(self, b):
""" A box b is inclusive for the constraint if self.p is included in the box
"""
if b.inbox(self.p[0]) | b.inbox(self.p[1]):
return True
else:
return False
|
davido/buck | refs/heads/master | third-party/py/pywatchman/pywatchman/encoding.py | 29 | # Copyright 2016-present Facebook, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name Facebook nor the names of its contributors may be used to
# endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# no unicode literals
'''Module to deal with filename encoding on the local system, as returned by
Watchman.'''
import sys
from . import (
compat,
)
if compat.PYTHON3:
default_local_errors = 'surrogateescape'
def get_local_encoding():
if sys.platform == 'win32':
# Watchman always returns UTF-8 encoded strings on Windows.
return 'utf-8'
# On the Python 3 versions we support, sys.getfilesystemencoding never
# returns None.
return sys.getfilesystemencoding()
else:
# Python 2 doesn't support surrogateescape, so use 'strict' by
# default. Users can register a custom surrogateescape error handler and use
# that if they so desire.
default_local_errors = 'strict'
def get_local_encoding():
if sys.platform == 'win32':
# Watchman always returns UTF-8 encoded strings on Windows.
return 'utf-8'
fsencoding = sys.getfilesystemencoding()
if fsencoding is None:
# This is very unlikely to happen, but if it does, just use UTF-8
fsencoding = 'utf-8'
return fsencoding
def encode_local(s):
return s.encode(get_local_encoding(), default_local_errors)
def decode_local(bs):
return bs.decode(get_local_encoding(), default_local_errors)
|
klmitch/pbr | refs/heads/master | pbr/hooks/base.py | 101 | # Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class BaseConfig(object):
section = None
def __init__(self, config):
self._global_config = config
self.config = self._global_config.get(self.section, dict())
self.pbr_config = config.get('pbr', dict())
def run(self):
self.hook()
self.save()
def hook(self):
pass
def save(self):
self._global_config[self.section] = self.config
|
jordanemedlock/psychtruths | refs/heads/master | temboo/core/Library/Twilio/ConnectApps/ListConnectApps.py | 5 | # -*- coding: utf-8 -*-
###############################################################################
#
# ListConnectApps
# Returns a list of Connect Apps within your Twilio account.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class ListConnectApps(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the ListConnectApps Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(ListConnectApps, self).__init__(temboo_session, '/Library/Twilio/ConnectApps/ListConnectApps')
def new_input_set(self):
return ListConnectAppsInputSet()
def _make_result_set(self, result, path):
return ListConnectAppsResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return ListConnectAppsChoreographyExecution(session, exec_id, path)
class ListConnectAppsInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the ListConnectApps
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccountSID(self, value):
"""
Set the value of the AccountSID input for this Choreo. ((required, string) The AccountSID provided when you signed up for a Twilio account.)
"""
super(ListConnectAppsInputSet, self)._set_input('AccountSID', value)
def set_AuthToken(self, value):
"""
Set the value of the AuthToken input for this Choreo. ((required, string) The authorization token provided when you signed up for a Twilio account.)
"""
super(ListConnectAppsInputSet, self)._set_input('AuthToken', value)
def set_PageSize(self, value):
"""
Set the value of the PageSize input for this Choreo. ((optional, integer) The number of results per page.)
"""
super(ListConnectAppsInputSet, self)._set_input('PageSize', value)
def set_Page(self, value):
"""
Set the value of the Page input for this Choreo. ((optional, integer) The page of results to retrieve. Defaults to 0.)
"""
super(ListConnectAppsInputSet, self)._set_input('Page', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Valid values are: json (the default) and xml.)
"""
super(ListConnectAppsInputSet, self)._set_input('ResponseFormat', value)
class ListConnectAppsResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the ListConnectApps Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Twilio.)
"""
return self._output.get('Response', None)
class ListConnectAppsChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return ListConnectAppsResultSet(response, path)
|
mvernacc/proptools | refs/heads/master | proptools/nozzle.py | 1 | """Nozzle flow calculations.
.. autosummary::
thrust_coef
c_star
er_from_p
throat_area
mass_flow
thrust
mach_from_er
mach_from_pr
is_choked
mach_from_area_subsonic
area_from_mach
pressure_from_er
"""
import numpy as np
from scipy.optimize import fsolve
import warnings
import proptools.constants
R_univ = proptools.constants.R_univ
g = proptools.constants.g # pylint: disable=invalid-name
def thrust_coef(p_c, p_e, gamma, p_a=None, er=None):
"""Nozzle thrust coefficient, :math:`C_F`.
The thrust coefficient is a figure of merit for the nozzle expansion process.
See :ref:`thrust-coefficient-label` for a description of the physical meaning of the
thrust coefficient.
Reference: Equation 1-33a in Huzel and Huang.
Arguments:
p_c (scalar): Nozzle stagnation chamber pressure [units: pascal].
p_e (scalar): Nozzle exit static pressure [units: pascal].
gamma (scalar): Exhaust gas ratio of specific heats [units: dimensionless].
p_a (scalar, optional): Ambient pressure [units: pascal]. If None,
then p_a = p_e.
er (scalar, optional): Nozzle area expansion ratio [units: dimensionless]. If None,
then p_a = p_e.
Returns:
scalar: The thrust coefficient, :math:`C_F` [units: dimensionless].
"""
if (p_a is None and er is not None) or (er is None and p_a is not None):
raise ValueError('Both p_a and er must be provided.')
C_F = (2 * gamma**2 / (gamma - 1) \
* (2 / (gamma + 1))**((gamma + 1) / (gamma - 1)) \
* (1 - (p_e / p_c)**((gamma - 1) / gamma))
)**0.5
if p_a is not None and er is not None:
C_F += er * (p_e - p_a) / p_c
return C_F
def c_star(gamma, m_molar, T_c):
"""Characteristic velocity, :math:`c^*`.
The characteristic velocity is a figure of merit for the propellants and combustion
process.
See :ref:`characteristic_velocity-tutorial-label` for a description of the physical
meaning of the characteristic velocity.
Reference: Equation 1-32a in Huzel and Huang.
Arguments:
gamma (scalar): Exhaust gas ratio of specific heats [units: dimensionless].
m_molar (scalar): Exhaust gas mean molar mass [units: kilogram mole**-1].
T_c (scalar): Nozzle stagnation temperature [units: kelvin].
Returns:
scalar: The characteristic velocity [units: meter second**-1].
"""
# Note that the g in Huzel is removed here, because Huzel uses US units while this
# function uses SI.
return (gamma * (R_univ / m_molar) * T_c)**0.5 \
/ gamma \
/ (2 / (gamma + 1))**((gamma + 1) / (2 * (gamma - 1)))
def er_from_p(p_c, p_e, gamma):
"""Find the nozzle expansion ratio from the chamber and exit pressures.
See :ref:`expansion-ratio-tutorial-label` for a physical description of the
expansion ratio.
Reference: Rocket Propulsion Elements, 8th Edition, Equation 3-25
Arguments:
p_c (scalar): Nozzle stagnation chamber pressure [units: pascal].
p_e (scalar): Nozzle exit static pressure [units: pascal].
gamma (scalar): Exhaust gas ratio of specific heats [units: dimensionless].
Returns:
scalar: Expansion ratio :math:`\\epsilon = A_e / A_t` [units: dimensionless]
"""
AtAe = ((gamma + 1) / 2)**(1 / (gamma - 1)) \
* (p_e / p_c)**(1 / gamma) \
* ((gamma + 1) / (gamma - 1)*( 1 - (p_e / p_c)**((gamma -1) / gamma)))**0.5
er = 1/AtAe
return er
def pressure_from_er(er, gamma):
"""Find the exit/chamber pressure ratio from the nozzle expansion ratio.
See :ref:`expansion-ratio-tutorial-label` for a physical description of the
expansion ratio.
Reference: Rocket Propulsion Elements, 8th Edition, Equation 3-25
Arguments:
er (scalar): Expansion ratio :math:`\\epsilon = A_e / A_t` [units: dimensionless].
gamma (scalar): Exhaust gas ratio of specific heats [units: dimensionless].
Returns:
scalar: Pressure ratio :math:`p_e/p_c` [units: dimensionless].
"""
pressure_ratio = fsolve(lambda x: er - er_from_p(1., x, gamma), x0=1e-3 / er)[0]
assert pressure_ratio < 1
return pressure_ratio
def throat_area(m_dot, p_c, T_c, gamma, m_molar):
"""Find the nozzle throat area.
Given gas stagnation conditions and a mass flow rate, find the required throat area
of a choked nozzle. See :ref:`choked-flow-tutorial-label` for details.
Reference: Rocket Propulsion Elements, 8th Edition, Equation 3-24
Arguments:
m_dot (scalar): Propellant mass flow rate [units: kilogram second**-1].
p_c (scalar): Nozzle stagnation chamber pressure [units: pascal].
T_c (scalar): Nozzle stagnation temperature [units: kelvin].
gamma (scalar): Exhaust gas ratio of specific heats [units: dimensionless].
m_molar (scalar): Exhaust gas mean molar mass [units: kilogram mole**-1].
Returns:
scalar: Throat area [units: meter**2].
"""
R = R_univ / m_molar
# Find the Throat Area require for the specified mass flow, using
# Eocket Propulsion Equations 7th Ed, Equation 3-24
A_t = m_dot / ( p_c * gamma \
* (2 / (gamma + 1))**((gamma + 1) / (2*gamma - 2)) \
/ (gamma * R * T_c)**0.5)
return A_t
def mass_flow(A_t, p_c, T_c, gamma, m_molar):
"""Find the mass flow through a choked nozzle.
Given gas stagnation conditions and a throat area, find the mass flow through a
choked nozzle. See :ref:`choked-flow-tutorial-label` for details.
Reference: Rocket Propulsion Elements, 8th Edition, Equation 3-24.
Arguments:
A_t (scalar): Nozzle throat area [units: meter**2].
p_c (scalar): Nozzle stagnation chamber pressure [units: pascal].
T_c (scalar): Nozzle stagnation temperature [units: kelvin].
gamma (scalar): Exhaust gas ratio of specific heats [units: dimensionless].
m_molar (scalar): Exhaust gas mean molar mass [units: kilogram mole**-1].
Returns:
scalar: Mass flow rate :math:`\dot{m}` [units: kilogram second**-1].
"""
return (A_t * p_c * gamma / (gamma * R_univ / m_molar * T_c)**0.5
* (2 / (gamma + 1))**((gamma + 1) / (2 * (gamma - 1))))
def thrust(A_t, p_c, p_e, gamma, p_a=None, er=None):
"""Nozzle thrust force.
Arguments:
A_t (scalar): Nozzle throat area [units: meter**2].
p_c (scalar): Nozzle stagnation chamber pressure [units: pascal].
p_e (scalar): Nozzle exit static pressure [units: pascal].
gamma (scalar): Exhaust gas ratio of specific heats [units: dimensionless].
p_a (scalar, optional): Ambient pressure [units: pascal]. If None,
then p_a = p_e.
er (scalar, optional): Nozzle area expansion ratio [units: dimensionless]. If None,
then p_a = p_e.
Returns:
scalar: Thrust force [units: newton].
"""
return A_t * p_c * thrust_coef(p_c, p_e, gamma, p_a, er)
def mach_from_er(er, gamma):
"""Find the exit Mach number from the area expansion ratio.
Reference: J. Majdalani and B. A. Maickie, http://maji.utsi.edu/publications/pdf/HT02_11.pdf
Arguments:
er (scalar): Nozzle area expansion ratio, A_e / A_t [units: dimensionless].
gamma (scalar): Exhaust gas ratio of specific heats [units: dimensionless].
Returns:
scalar: The exit Mach number [units: dimensionless].
"""
n = 5 # order of the approximation
X = np.zeros((n,))
M = np.zeros((n,))
e = 1/float(er) # expansion ratio
y = gamma # ratio of specific heats
B = (y+1)/(y-1)
k = np.sqrt( 0.5*(y-1) )
u = e**(1/B) / np.sqrt( 1+k**2 )
X[0] = (u*k)**(B/(1-B))
M[0] = X[0]
for i in range(1, n):
lamb = 1/( 2*M[i-1]**(2/B)*(B-2) + M[i-1]**2 *B**2*k**2*u**2 )
X[i] = lamb*M[i-1]*B*( M[i-1]**(2/B) - M[i-1]**2*B*k**2*u**2 \
+ ( M[i-1]**(2+2/B)*k**2*u**2*(B**2-4*B+4) \
- M[i-1]**2*B**2*k**2*u**4 + M[i-1]**(4/B)*(2*B-3) \
+ 2*M[i-1]**(2/B)*u**2*(2-B) )**0.5 )
M[i] = M[i-1] + X[i]
if abs( np.imag( M[n-1] ) ) > 1e-5:
warnings.warn('Exit Mach Number has nonzero imaginary part!')
Me = float(np.real(M[n-1]))
return Me
def mach_from_pr(p_c, p_e, gamma):
"""Find the exit Mach number from the pressure ratio.
Arguments:
p_c (scalar): Nozzle stagnation chamber pressure [units: pascal].
p_e (scalar): Nozzle exit static pressure [units: pascal].
gamma (scalar): Exhaust gas ratio of specific heats [units: dimensionless].
Returns:
scalar: Exit Mach number [units: dimensionless].
"""
return (2 / (gamma - 1) * ((p_e / p_c)**((1 - gamma) / gamma) -1))**0.5
def is_choked(p_c, p_e, gamma):
"""Determine whether the nozzle flow is choked.
See :ref:`choked-flow-tutorial-label` for details.
Reference: Rocket Propulsion Elements, 8th Edition, Equation 3-20.
Arguments:
p_c (scalar): Nozzle stagnation chamber pressure [units: pascal].
p_e (scalar): Nozzle exit static pressure [units: pascal].
gamma (scalar): Exhaust gas ratio of specific heats [units: dimensionless].
Returns:
bool: True if flow is choked, false otherwise.
"""
return p_e/p_c < (2 / (gamma + 1))**(gamma / (gamma - 1))
def mach_from_area_subsonic(area_ratio, gamma):
"""Find the Mach number as a function of area ratio for subsonic flow.
Arguments:
area_ratio (scalar): Area / throat area [units: dimensionless].
gamma (scalar): Ratio of specific heats [units: dimensionless].
Returns:
scalar: Mach number of the flow in a passage with ``area = area_ratio * (throat area)``.
"""
# See https://www.grc.nasa.gov/WWW/winddocs/utilities/b4wind_guide/mach.html
P = 2 / (gamma + 1)
Q = 1 - P
E = 1 / Q
R = area_ratio**2
a = P**(1 / Q)
r = (R - 1) / (2 * a)
X_init = 1 / ((1 + r) + (r * (r + 2))**0.5)
X = fsolve(
lambda X: (P + Q * X)**E - R * X,
X_init
)
return X[0]**0.5
def area_from_mach(M, gamma):
"""Find the area ratio for a given Mach number.
For isentropic nozzle flow, a station where the Mach number is :math:`M` will have an area
:math:`A`. This function returns that area, normalized by the area of the nozzle throat
:math:`A_t`.
See :ref:`mach-area-tutorial-label` for a physical description of the Mach-Area relation.
Reference: Rocket Propulsion Elements, 8th Edition, Equation 3-14.
Arguments:
M (scalar): Mach number [units: dimensionless].
gamma (scalar): Ratio of specific heats [units: dimensionless].
Returns:
scalar: Area ratio :math:`A / A_t`.
"""
return 1 / M * (2 / (gamma + 1) * (1 + (gamma - 1) / 2 * M**2)) \
**((gamma + 1) / (2 * (gamma - 1)))
|
azavea/nyc-trees | refs/heads/develop | src/nyc_trees/apps/home/templatetags/training.py | 4 | # -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from django import template
from apps.home.training import training_summary
register = template.Library()
@register.filter
def flatpage_next_url(flatpage):
# the url for this flatpage must have a TrainingStep
# that matches the url without slashes
# ex: `/the_flatpage/` must correspond to a TrainingStep
# called `the_flatpage`
flatpage_name = flatpage.url[1:-1]
return training_summary.get_step(flatpage_name).mark_progress_url()
|
garg10may/youtube-dl | refs/heads/master | youtube_dl/extractor/fivemin.py | 102 | from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import (
compat_str,
compat_urllib_parse,
)
from ..utils import (
ExtractorError,
)
class FiveMinIE(InfoExtractor):
IE_NAME = '5min'
_VALID_URL = r'''(?x)
(?:https?://[^/]*?5min\.com/Scripts/PlayerSeed\.js\?(?:.*?&)?playList=|
https?://(?:(?:massively|www)\.)?joystiq\.com/video/|
5min:)
(?P<id>\d+)
'''
_TESTS = [
{
# From http://www.engadget.com/2013/11/15/ipad-mini-retina-display-review/
'url': 'http://pshared.5min.com/Scripts/PlayerSeed.js?sid=281&width=560&height=345&playList=518013791',
'md5': '4f7b0b79bf1a470e5004f7112385941d',
'info_dict': {
'id': '518013791',
'ext': 'mp4',
'title': 'iPad Mini with Retina Display Review',
},
},
{
# From http://on.aol.com/video/how-to-make-a-next-level-fruit-salad-518086247
'url': '5min:518086247',
'md5': 'e539a9dd682c288ef5a498898009f69e',
'info_dict': {
'id': '518086247',
'ext': 'mp4',
'title': 'How to Make a Next-Level Fruit Salad',
},
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
embed_url = 'https://embed.5min.com/playerseed/?playList=%s' % video_id
embed_page = self._download_webpage(embed_url, video_id,
'Downloading embed page')
sid = self._search_regex(r'sid=(\d+)', embed_page, 'sid')
query = compat_urllib_parse.urlencode({
'func': 'GetResults',
'playlist': video_id,
'sid': sid,
'isPlayerSeed': 'true',
'url': embed_url,
})
response = self._download_json(
'https://syn.5min.com/handlers/SenseHandler.ashx?' + query,
video_id)
if not response['success']:
err_msg = response['errorMessage']
if err_msg == 'ErrorVideoUserNotGeo':
msg = 'Video not available from your location'
else:
msg = 'Aol said: %s' % err_msg
raise ExtractorError(msg, expected=True, video_id=video_id)
info = response['binding'][0]
second_id = compat_str(int(video_id[:-2]) + 1)
formats = []
for quality, height in [(1, 320), (2, 480), (4, 720), (8, 1080)]:
if any(r['ID'] == quality for r in info['Renditions']):
formats.append({
'format_id': compat_str(quality),
'url': 'http://avideos.5min.com/%s/%s/%s_%s.mp4' % (second_id[-3:], second_id, video_id, quality),
'height': height,
})
return {
'id': video_id,
'title': info['Title'],
'formats': formats,
}
|
richardnpaul/FWL-Website | refs/heads/master | lib/python2.7/sre_compile.py | 4 | /usr/lib/python2.7/sre_compile.py |
rdezavalia/ansible | refs/heads/devel | lib/ansible/cli/doc.py | 4 | # (c) 2014, James Tanner <[email protected]>
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# ansible-vault is a script that encrypts/decrypts YAML files. See
# http://docs.ansible.com/playbooks_vault.html for more details.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import datetime
import os
import traceback
import textwrap
from ansible.compat.six import iteritems
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.plugins import module_loader
from ansible.cli import CLI
from ansible.utils import module_docs
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class DocCLI(CLI):
""" Vault command line class """
def __init__(self, args):
super(DocCLI, self).__init__(args)
self.module_list = []
def parse(self):
self.parser = CLI.base_parser(
usage='usage: %prog [options] [module...]',
epilog='Show Ansible module documentation',
module_opts=True,
)
self.parser.add_option("-l", "--list", action="store_true", default=False, dest='list_dir',
help='List available modules')
self.parser.add_option("-s", "--snippet", action="store_true", default=False, dest='show_snippet',
help='Show playbook snippet for specified module(s)')
self.options, self.args = self.parser.parse_args(self.args[1:])
display.verbosity = self.options.verbosity
def run(self):
super(DocCLI, self).run()
if self.options.module_path is not None:
for i in self.options.module_path.split(os.pathsep):
module_loader.add_directory(i)
# list modules
if self.options.list_dir:
paths = module_loader._get_paths()
for path in paths:
self.find_modules(path)
self.pager(self.get_module_list_text())
return 0
if len(self.args) == 0:
raise AnsibleOptionsError("Incorrect options passed")
# process command line module list
text = ''
for module in self.args:
try:
# if the module lives in a non-python file (eg, win_X.ps1), require the corresponding python file for docs
filename = module_loader.find_plugin(module, mod_type='.py')
if filename is None:
display.warning("module %s not found in %s\n" % (module, DocCLI.print_paths(module_loader)))
continue
if any(filename.endswith(x) for x in C.BLACKLIST_EXTS):
continue
try:
doc, plainexamples, returndocs = module_docs.get_docstring(filename, verbose=(self.options.verbosity > 0))
except:
display.vvv(traceback.print_exc())
display.error("module %s has a documentation error formatting or is missing documentation\nTo see exact traceback use -vvv" % module)
continue
if doc is not None:
all_keys = []
for (k,v) in iteritems(doc['options']):
all_keys.append(k)
all_keys = sorted(all_keys)
doc['option_keys'] = all_keys
doc['filename'] = filename
doc['docuri'] = doc['module'].replace('_', '-')
doc['now_date'] = datetime.date.today().strftime('%Y-%m-%d')
doc['plainexamples'] = plainexamples
doc['returndocs'] = returndocs
if self.options.show_snippet:
text += self.get_snippet_text(doc)
else:
text += self.get_man_text(doc)
else:
# this typically means we couldn't even parse the docstring, not just that the YAML is busted,
# probably a quoting issue.
raise AnsibleError("Parsing produced an empty object.")
except Exception as e:
display.vvv(traceback.print_exc())
raise AnsibleError("module %s missing documentation (or could not parse documentation): %s\n" % (module, str(e)))
self.pager(text)
return 0
def find_modules(self, path):
if os.path.isdir(path):
for module in os.listdir(path):
if module.startswith('.'):
continue
elif os.path.isdir(module):
self.find_modules(module)
elif any(module.endswith(x) for x in C.BLACKLIST_EXTS):
continue
elif module.startswith('__'):
continue
elif module in C.IGNORE_FILES:
continue
elif module.startswith('_'):
fullpath = '/'.join([path,module])
if os.path.islink(fullpath): # avoids aliases
continue
module = os.path.splitext(module)[0] # removes the extension
self.module_list.append(module)
def get_module_list_text(self):
columns = display.columns
displace = max(len(x) for x in self.module_list)
linelimit = columns - displace - 5
text = []
deprecated = []
for module in sorted(set(self.module_list)):
if module in module_docs.BLACKLIST_MODULES:
continue
# if the module lives in a non-python file (eg, win_X.ps1), require the corresponding python file for docs
filename = module_loader.find_plugin(module, mod_type='.py')
if filename is None:
continue
if filename.endswith(".ps1"):
continue
if os.path.isdir(filename):
continue
try:
doc, plainexamples, returndocs = module_docs.get_docstring(filename)
desc = self.tty_ify(doc.get('short_description', '?')).strip()
if len(desc) > linelimit:
desc = desc[:linelimit] + '...'
if module.startswith('_'): # Handle deprecated
deprecated.append("%-*s %-*.*s" % (displace, module[1:], linelimit, len(desc), desc))
else:
text.append("%-*s %-*.*s" % (displace, module, linelimit, len(desc), desc))
except:
raise AnsibleError("module %s has a documentation error formatting or is missing documentation\n" % module)
if len(deprecated) > 0:
text.append("\nDEPRECATED:")
text.extend(deprecated)
return "\n".join(text)
@staticmethod
def print_paths(finder):
''' Returns a string suitable for printing of the search path '''
# Uses a list to get the order right
ret = []
for i in finder._get_paths():
if i not in ret:
ret.append(i)
return os.pathsep.join(ret)
def get_snippet_text(self, doc):
text = []
desc = CLI.tty_ify(doc['short_description'])
text.append("- name: %s" % (desc))
text.append(" action: %s" % (doc['module']))
pad = 31
subdent = ''.join([" " for a in xrange(pad)])
limit = display.columns - pad
for o in sorted(doc['options'].keys()):
opt = doc['options'][o]
desc = CLI.tty_ify(" ".join(opt['description']))
required = opt.get('required', False)
if not isinstance(required, bool):
raise("Incorrect value for 'Required', a boolean is needed.: %s" % required)
if required:
s = o + "="
else:
s = o
text.append(" %-20s # %s" % (s, textwrap.fill(desc, limit, subsequent_indent=subdent)))
text.append('')
return "\n".join(text)
def get_man_text(self, doc):
opt_indent=" "
text = []
text.append("> %s\n" % doc['module'].upper())
pad = display.columns * 0.20
limit = max(display.columns - int(pad), 70)
if isinstance(doc['description'], list):
desc = " ".join(doc['description'])
else:
desc = doc['description']
text.append("%s\n" % textwrap.fill(CLI.tty_ify(desc), limit, initial_indent=" ", subsequent_indent=" "))
if 'deprecated' in doc and doc['deprecated'] is not None and len(doc['deprecated']) > 0:
text.append("DEPRECATED: \n%s\n" % doc['deprecated'])
if 'option_keys' in doc and len(doc['option_keys']) > 0:
text.append("Options (= is mandatory):\n")
for o in sorted(doc['option_keys']):
opt = doc['options'][o]
required = opt.get('required', False)
if not isinstance(required, bool):
raise("Incorrect value for 'Required', a boolean is needed.: %s" % required)
if required:
opt_leadin = "="
else:
opt_leadin = "-"
text.append("%s %s" % (opt_leadin, o))
if isinstance(opt['description'], list):
desc = " ".join(opt['description'])
else:
desc = opt['description']
if 'choices' in opt:
choices = ", ".join(str(i) for i in opt['choices'])
desc = desc + " (Choices: " + choices + ")"
if 'default' in opt or not required:
default = str(opt.get('default', '(null)'))
desc = desc + " [Default: " + default + "]"
text.append("%s\n" % textwrap.fill(CLI.tty_ify(desc), limit, initial_indent=opt_indent, subsequent_indent=opt_indent))
if 'notes' in doc and doc['notes'] and len(doc['notes']) > 0:
notes = " ".join(doc['notes'])
text.append("Notes:%s\n" % textwrap.fill(CLI.tty_ify(notes), limit-6, initial_indent=" ", subsequent_indent=opt_indent))
if 'requirements' in doc and doc['requirements'] is not None and len(doc['requirements']) > 0:
req = ", ".join(doc['requirements'])
text.append("Requirements:%s\n" % textwrap.fill(CLI.tty_ify(req), limit-16, initial_indent=" ", subsequent_indent=opt_indent))
if 'examples' in doc and len(doc['examples']) > 0:
text.append("Example%s:\n" % ('' if len(doc['examples']) < 2 else 's'))
for ex in doc['examples']:
text.append("%s\n" % (ex['code']))
if 'plainexamples' in doc and doc['plainexamples'] is not None:
text.append("EXAMPLES:")
text.append(doc['plainexamples'])
if 'returndocs' in doc and doc['returndocs'] is not None:
text.append("RETURN VALUES:")
text.append(doc['returndocs'])
text.append('')
maintainers = set()
if 'author' in doc:
if isinstance(doc['author'], basestring):
maintainers.add(doc['author'])
else:
maintainers.update(doc['author'])
if 'maintainers' in doc:
if isinstance(doc['maintainers'], basestring):
maintainers.add(doc['author'])
else:
maintainers.update(doc['author'])
text.append('MAINTAINERS: ' + ', '.join(maintainers))
text.append('')
return "\n".join(text)
|
dentaku65/pelisalacarta | refs/heads/master | python/main-classic/servers/sharpfile.py | 44 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Conector para sharpfile
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os
from core import scrapertools
from core import logger
from core import config
def test_video_exists( page_url ):
logger.info("[sharpfile.py] test_video_exists(page_url='%s')" % page_url)
return True,""
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
logger.info("[sharpfile.py] get_video_url(page_url='%s')" % page_url)
video_urls = []
return video_urls
# Encuentra vídeos del servidor en el texto pasado
def find_videos(data):
encontrados = set()
devuelve = []
#http://www.sharpfile.com/8fgbj6dtq4xc/house.05x19.pionnerdj.avi.html
patronvideos = "http://(www.sharpfile.com/.*?\.html)"
logger.info("[sharpfile.py] find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(data)
for match in matches:
titulo = "[sharpfile]"
url = "http://"+match
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'sharpfile' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
return devuelve
|
guitarmanj/king-phisher | refs/heads/master | tests/client/dialogs.py | 4 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# tests/client/dialogs.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import unittest
from king_phisher import testing
from king_phisher.client import dialogs
from king_phisher.client import gui_utilities
class ClientDialogTests(testing.KingPhisherTestCase):
def test_client_dialog_classes(self):
dialog_names = list(dialog for dialog in dir(dialogs) if dialog.endswith('Dialog'))
self.assertGreater(len(dialog_names), 0, msg='failed to identify any dialog objects')
for dialog_name in dialog_names:
dialog_obj = getattr(dialogs, dialog_name)
msg = "{0} is not a subclass of GladeGObject".format(dialog_name)
self.assertIsSubclass(dialog_obj, gui_utilities.GladeGObject, msg=msg)
msg = "{0}.top_gobject is not 'dialog'".format(dialog_name)
self.assertEqual(getattr(dialog_obj, 'top_gobject', None), 'dialog', msg=msg)
msg = "{0} has no 'interact' method".format(dialog_name)
self.assertHasAttribute(dialog_obj, 'interact', msg=msg)
if __name__ == '__main__':
unittest.main()
|
ProfessionalIT/professionalit-webiste | refs/heads/master | sdk/google_appengine/lib/django-1.5/django/conf/locale/sk/formats.py | 108 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. F Y'
TIME_FORMAT = 'G:i:s'
DATETIME_FORMAT = 'j. F Y G:i:s'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y G:i:s'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
'%y-%m-%d', # '06-10-25'
# '%d. %B %Y', '%d. %b. %Y', # '25. October 2006', '25. Oct. 2006'
)
DATETIME_INPUT_FORMATS = (
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # non-breaking space
NUMBER_GROUPING = 3
|
skullydazed/kle2xy | refs/heads/master | kle2xy.py | 1 | import hjson
from decimal import Decimal
class KLE2xy(list):
"""Abstract interface for interacting with a KLE layout.
"""
def __init__(self, layout=None, name='', invert_y=True):
super(KLE2xy, self).__init__()
self.name = name
self.invert_y = invert_y
self.key_width = Decimal('19.05')
self.key_skel = {
'decal': False,
'border_color': 'none',
'keycap_profile': '',
'keycap_color': 'grey',
'label_color': 'black',
'label_size': 3,
'label_style': 4,
'width': Decimal('1'), 'height': Decimal('1'),
'x': Decimal('0'), 'y': Decimal('0')
}
self.rows = Decimal(0)
self.columns = Decimal(0)
if layout:
self.parse_layout(layout)
@property
def width(self):
"""Returns the width of the keyboard plate.
"""
return (Decimal(self.columns) * self.key_width) + self.key_width/2
@property
def height(self):
"""Returns the height of the keyboard plate.
"""
return (self.rows * self.key_width) + self.key_width/2
@property
def size(self):
"""Returns the size of the keyboard plate.
"""
return (self.width, self.height)
def attrs(self, properties):
"""Parse the keyboard properties dictionary.
"""
# FIXME: Store more than just the keyboard name.
if 'name' in properties:
self.name = properties['name']
def parse_layout(self, layout):
# Wrap this in a dictionary so hjson will parse KLE raw data
layout = '{"layout": [' + layout + ']}'
layout = hjson.loads(layout)['layout']
# Initialize our state machine
current_key = self.key_skel.copy()
current_row = Decimal(0)
current_col = Decimal(0)
current_x = 0
current_y = self.key_width / 2
if isinstance(layout[0], dict):
self.attrs(layout[0])
layout = layout[1:]
for row_num, row in enumerate(layout):
self.append([])
# Process the current row
for key in row:
if isinstance(key, dict):
if 'w' in key and key['w'] != Decimal(1):
current_key['width'] = Decimal(key['w'])
if 'w2' in key and 'h2' in key and key['w2'] == 1.5 and key['h2'] == 1:
# FIXME: ISO Key uses these params: {x:0.25,w:1.25,h:2,w2:1.5,h2:1,x2:-0.25}
current_key['isoenter'] = True
if 'h' in key and key['h'] != Decimal(1):
current_key['height'] = Decimal(key['h'])
if 'a' in key:
current_key['label_style'] = self.key_skel['label_style'] = int(key['a'])
if current_key['label_style'] < 0:
current_key['label_style'] = 0
elif current_key['label_style'] > 9:
current_key['label_style'] = 9
if 'f' in key:
font_size = int(key['f'])
if font_size > 9:
font_size = 9
elif font_size < 1:
font_size = 1
current_key['label_size'] = self.key_skel['label_size'] = font_size
if 'p' in key:
current_key['keycap_profile'] = self.key_skel['keycap_profile'] = key['p']
if 'c' in key:
current_key['keycap_color'] = self.key_skel['keycap_color'] = key['c']
if 't' in key:
# FIXME: Need to do better validation, plus figure out how to support multiple colors
if '\n' in key['t']:
key['t'] = key['t'].split('\n')[0]
if key['t'] == "0":
key['t'] = "#000000"
current_key['label_color'] = self.key_skel['label_color'] = key['t']
if 'x' in key:
current_col += Decimal(key['x'])
current_x += Decimal(key['x']) * self.key_width
if 'y' in key:
current_row += Decimal(key['y'])
current_y += Decimal(key['y']) * self.key_width
if 'd' in key:
current_key['decal'] = True
else:
current_key['name'] = key
current_key['row'] = current_row
current_key['column'] = current_col
# Determine the X center
x_center = (current_key['width'] * self.key_width) / 2
current_x += x_center
current_key['x'] = current_x
current_x += x_center
# Determine the Y center
y_center = (current_key['height'] * self.key_width) / 2
y_offset = y_center - (self.key_width / 2)
current_key['y'] = (current_y + y_offset)
# Tend to our row/col count
current_col += current_key['width']
if current_col > self.columns:
self.columns = current_col
# Invert the y-axis if neccesary
if self.invert_y:
current_key['y'] = -current_key['y']
# Store this key
self[-1].append(current_key)
current_key = self.key_skel.copy()
# Move to the next row
current_x = 0
current_y += self.key_width
current_col = Decimal(0)
current_row += Decimal(1)
if current_row > self.rows:
self.rows = Decimal(current_row)
|
tedelhourani/ansible | refs/heads/devel | lib/ansible/modules/network/avi/avi_cloudproperties.py | 27 | #!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi ([email protected])
# Eric Anderson ([email protected])
# module_check: supported
# Avi Version: 17.1.1
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_cloudproperties
author: Gaurav Rastogi ([email protected])
short_description: Module for setup of CloudProperties Avi RESTful Object
description:
- This module is used to configure CloudProperties object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
cc_props:
description:
- Cloudconnector properties.
cc_vtypes:
description:
- Cloud types supported by cloudconnector.
- Enum options - CLOUD_NONE, CLOUD_VCENTER, CLOUD_OPENSTACK, CLOUD_AWS, CLOUD_VCA, CLOUD_APIC, CLOUD_MESOS, CLOUD_LINUXSERVER, CLOUD_DOCKER_UCP,
- CLOUD_RANCHER, CLOUD_OSHIFT_K8S.
hyp_props:
description:
- Hypervisor properties.
info:
description:
- Properties specific to a cloud type.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create CloudProperties object
avi_cloudproperties:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_cloudproperties
"""
RETURN = '''
obj:
description: CloudProperties (api/cloudproperties) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
cc_props=dict(type='dict',),
cc_vtypes=dict(type='list',),
hyp_props=dict(type='list',),
info=dict(type='list',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'cloudproperties',
set([]))
if __name__ == '__main__':
main()
|
KokareIITP/django | refs/heads/master | tests/transactions/models.py | 411 | """
Transactions
Django handles transactions in three different ways. The default is to commit
each transaction upon a write, but you can decorate a function to get
commit-on-success behavior. Alternatively, you can manage the transaction
manually.
"""
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Reporter(models.Model):
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=30)
email = models.EmailField()
class Meta:
ordering = ('first_name', 'last_name')
def __str__(self):
return ("%s %s" % (self.first_name, self.last_name)).strip()
|
zhouzhenghui/python-for-android | refs/heads/master | python-modules/zope/zope/interface/verify.py | 50 | ##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Verify interface implementations
$Id: verify.py 110699 2010-04-09 08:16:17Z regebro $
"""
from zope.interface.exceptions import BrokenImplementation, DoesNotImplement
from zope.interface.exceptions import BrokenMethodImplementation
from types import FunctionType, MethodType
from zope.interface.interface import fromMethod, fromFunction, Method
import sys
# This will be monkey-patched when running under Zope 2, so leave this
# here:
MethodTypes = (MethodType, )
def _verify(iface, candidate, tentative=0, vtype=None):
"""Verify that 'candidate' might correctly implements 'iface'.
This involves:
o Making sure the candidate defines all the necessary methods
o Making sure the methods have the correct signature
o Making sure the candidate asserts that it implements the interface
Note that this isn't the same as verifying that the class does
implement the interface.
If optional tentative is true, suppress the "is implemented by" test.
"""
if vtype == 'c':
tester = iface.implementedBy
else:
tester = iface.providedBy
if not tentative and not tester(candidate):
raise DoesNotImplement(iface)
# Here the `desc` is either an `Attribute` or `Method` instance
for name, desc in iface.namesAndDescriptions(1):
try:
attr = getattr(candidate, name)
except AttributeError:
if (not isinstance(desc, Method)) and vtype == 'c':
# We can't verify non-methods on classes, since the
# class may provide attrs in it's __init__.
continue
raise BrokenImplementation(iface, name)
if not isinstance(desc, Method):
# If it's not a method, there's nothing else we can test
continue
if isinstance(attr, FunctionType):
if sys.version[0] == '3' and isinstance(candidate, type):
# This is an "unbound method" in Python 3.
meth = fromFunction(attr, iface, name=name, imlevel=1)
else:
# Nope, just a normal function
meth = fromFunction(attr, iface, name=name)
elif (isinstance(attr, MethodTypes)
and type(attr.im_func) is FunctionType):
meth = fromMethod(attr, iface, name)
else:
if not callable(attr):
raise BrokenMethodImplementation(name, "Not a method")
# sigh, it's callable, but we don't know how to intrspect it, so
# we have to give it a pass.
continue
# Make sure that the required and implemented method signatures are
# the same.
desc = desc.getSignatureInfo()
meth = meth.getSignatureInfo()
mess = _incompat(desc, meth)
if mess:
raise BrokenMethodImplementation(name, mess)
return True
def verifyClass(iface, candidate, tentative=0):
return _verify(iface, candidate, tentative, vtype='c')
def verifyObject(iface, candidate, tentative=0):
return _verify(iface, candidate, tentative, vtype='o')
def _incompat(required, implemented):
#if (required['positional'] !=
# implemented['positional'][:len(required['positional'])]
# and implemented['kwargs'] is None):
# return 'imlementation has different argument names'
if len(implemented['required']) > len(required['required']):
return 'implementation requires too many arguments'
if ((len(implemented['positional']) < len(required['positional']))
and not implemented['varargs']):
return "implementation doesn't allow enough arguments"
if required['kwargs'] and not implemented['kwargs']:
return "implementation doesn't support keyword arguments"
if required['varargs'] and not implemented['varargs']:
return "implementation doesn't support variable arguments"
|
wonikjang/wonikjang.github.io | refs/heads/master | _posts/cnn111.py | 2 | import numpy as np
import matplotlib.pyplot as plt
# y = w1 * x1 + w2 * x2 + b
n = 30
epoch = 5
np.random.seed(0)
# x = np.random.uniform(0, 5, n)
x = np.random.rand(n,2)
x.shape
#x = np.reshape( np.arange(60), (30,2))
#x.shape
d = np.random.uniform(0, 5, n)
d.shape
#d = 2 * x[:,0]
w = np.random.rand(1,2)
b = np.random.uniform(0.001, 0.002, 1)
alpha = 0.9
#fig1 = plt.figure()
#ax1 = fig1.add_subplot(111)
#ax1.plot(x, d, 'ro')
epdist = np.zeros((epoch,2))
wdata = np.zeros((epoch,2))
bdata = np.zeros((epoch,1))
for j in range(0,epoch):
print(j)
for i in range(0,n):
y = x[i,:].dot(w.T) + b
e = d[i] - y
dw = alpha * e * x[i,:] # (1,2)
db = alpha * e
w += dw
b += db
# print(w, b)
wdata[j,:] = w
bdata[j,:] = b
d1 = x.dot(w.T) + b
dist = d1 - y
dist1 = np.mean(abs(dist)) # MAE
epdist[j,0] = j ; epdist[j,1] = dist1 # Matrix for epoch # and MAE
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(epdist[:,0], epdist[:,1])
ax.set_xlabel('Number of Epoch')
ax.set_ylabel('Mean Absolute Error')
ax.set_title('Trend of MAE')
# Regression visualization
import seaborn as sns; sns.set(color_codes=True)
ax = sns.regplot(x=x[:,0], y=d)
# Result visualizaion
y_pred = np.zeros((epoch, n))
for j in range(0, epoch):
y_pred[j] = x.dot(wdata[j, :].T) + bdata[j]
import pandas as pd
num = np.repeat(np.arange(epoch), n)
x1 = np.tile(x[:,0],epoch)
y1 = np.concatenate(y_pred)
df = pd.DataFrame({'epoch':num ,'x':x1, 'y':y1})
sns.lmplot("x", "y", data=df, hue='epoch', fit_reg=True)
'''
#fig1 = plt.figure()
#ax1 = fig1.add_subplot(111)
for j in range(0, epoch):
y_pred[j] = x.dot(wdata[j,:].T) + bdata[j]
#plt.plot(x, d, 'ro')
#plt.plot(x[:,0], y_pred[j])
fit = np.polyfit(x[:, 0], y_pred[j], 1)
fit_fn = np.poly1d(fit)
ax1.plot(x[:,0], y_pred[j], 'yo', x, fit_fn(x), '--k')
#colormap = plt.cm.gist_ncar
#colors = [colormap(i) for i in np.linspace(0, 1,len(ax1.lines))]
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
ax1.set_color_cycle(['red', 'green', 'blue', 'yellow'])
ax1.plot(x, d, 'ro')
for j in range(0, epoch):
y_pred[j] = x.dot(wdata[j, :].T) + bdata[j]
# plt.plot(x, d, 'ro')
# plt.plot(x[:,0], y_pred[j])
fit = np.polyfit(x[:, 0], y_pred[j], 1)
fit_fn = np.poly1d(fit)
ax1.plot(x[:,0], y_pred[j])
'''
|
Alberto-Beralix/Beralix | refs/heads/master | i386-squashfs-root/usr/lib/python2.7/encodings/mac_iceland.py | 593 | """ Python Character Mapping Codec mac_iceland generated from 'MAPPINGS/VENDORS/APPLE/ICELAND.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-iceland',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> CONTROL CHARACTER
u'\x01' # 0x01 -> CONTROL CHARACTER
u'\x02' # 0x02 -> CONTROL CHARACTER
u'\x03' # 0x03 -> CONTROL CHARACTER
u'\x04' # 0x04 -> CONTROL CHARACTER
u'\x05' # 0x05 -> CONTROL CHARACTER
u'\x06' # 0x06 -> CONTROL CHARACTER
u'\x07' # 0x07 -> CONTROL CHARACTER
u'\x08' # 0x08 -> CONTROL CHARACTER
u'\t' # 0x09 -> CONTROL CHARACTER
u'\n' # 0x0A -> CONTROL CHARACTER
u'\x0b' # 0x0B -> CONTROL CHARACTER
u'\x0c' # 0x0C -> CONTROL CHARACTER
u'\r' # 0x0D -> CONTROL CHARACTER
u'\x0e' # 0x0E -> CONTROL CHARACTER
u'\x0f' # 0x0F -> CONTROL CHARACTER
u'\x10' # 0x10 -> CONTROL CHARACTER
u'\x11' # 0x11 -> CONTROL CHARACTER
u'\x12' # 0x12 -> CONTROL CHARACTER
u'\x13' # 0x13 -> CONTROL CHARACTER
u'\x14' # 0x14 -> CONTROL CHARACTER
u'\x15' # 0x15 -> CONTROL CHARACTER
u'\x16' # 0x16 -> CONTROL CHARACTER
u'\x17' # 0x17 -> CONTROL CHARACTER
u'\x18' # 0x18 -> CONTROL CHARACTER
u'\x19' # 0x19 -> CONTROL CHARACTER
u'\x1a' # 0x1A -> CONTROL CHARACTER
u'\x1b' # 0x1B -> CONTROL CHARACTER
u'\x1c' # 0x1C -> CONTROL CHARACTER
u'\x1d' # 0x1D -> CONTROL CHARACTER
u'\x1e' # 0x1E -> CONTROL CHARACTER
u'\x1f' # 0x1F -> CONTROL CHARACTER
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> CONTROL CHARACTER
u'\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0x81 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc7' # 0x82 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xd1' # 0x84 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe0' # 0x88 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe2' # 0x89 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe3' # 0x8B -> LATIN SMALL LETTER A WITH TILDE
u'\xe5' # 0x8C -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe7' # 0x8D -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE
u'\xe8' # 0x8F -> LATIN SMALL LETTER E WITH GRAVE
u'\xea' # 0x90 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x91 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE
u'\xec' # 0x93 -> LATIN SMALL LETTER I WITH GRAVE
u'\xee' # 0x94 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0x95 -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xf1' # 0x96 -> LATIN SMALL LETTER N WITH TILDE
u'\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf2' # 0x98 -> LATIN SMALL LETTER O WITH GRAVE
u'\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf5' # 0x9B -> LATIN SMALL LETTER O WITH TILDE
u'\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE
u'\xf9' # 0x9D -> LATIN SMALL LETTER U WITH GRAVE
u'\xfb' # 0x9E -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xdd' # 0xA0 -> LATIN CAPITAL LETTER Y WITH ACUTE
u'\xb0' # 0xA1 -> DEGREE SIGN
u'\xa2' # 0xA2 -> CENT SIGN
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa7' # 0xA4 -> SECTION SIGN
u'\u2022' # 0xA5 -> BULLET
u'\xb6' # 0xA6 -> PILCROW SIGN
u'\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S
u'\xae' # 0xA8 -> REGISTERED SIGN
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\u2122' # 0xAA -> TRADE MARK SIGN
u'\xb4' # 0xAB -> ACUTE ACCENT
u'\xa8' # 0xAC -> DIAERESIS
u'\u2260' # 0xAD -> NOT EQUAL TO
u'\xc6' # 0xAE -> LATIN CAPITAL LETTER AE
u'\xd8' # 0xAF -> LATIN CAPITAL LETTER O WITH STROKE
u'\u221e' # 0xB0 -> INFINITY
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO
u'\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO
u'\xa5' # 0xB4 -> YEN SIGN
u'\xb5' # 0xB5 -> MICRO SIGN
u'\u2202' # 0xB6 -> PARTIAL DIFFERENTIAL
u'\u2211' # 0xB7 -> N-ARY SUMMATION
u'\u220f' # 0xB8 -> N-ARY PRODUCT
u'\u03c0' # 0xB9 -> GREEK SMALL LETTER PI
u'\u222b' # 0xBA -> INTEGRAL
u'\xaa' # 0xBB -> FEMININE ORDINAL INDICATOR
u'\xba' # 0xBC -> MASCULINE ORDINAL INDICATOR
u'\u03a9' # 0xBD -> GREEK CAPITAL LETTER OMEGA
u'\xe6' # 0xBE -> LATIN SMALL LETTER AE
u'\xf8' # 0xBF -> LATIN SMALL LETTER O WITH STROKE
u'\xbf' # 0xC0 -> INVERTED QUESTION MARK
u'\xa1' # 0xC1 -> INVERTED EXCLAMATION MARK
u'\xac' # 0xC2 -> NOT SIGN
u'\u221a' # 0xC3 -> SQUARE ROOT
u'\u0192' # 0xC4 -> LATIN SMALL LETTER F WITH HOOK
u'\u2248' # 0xC5 -> ALMOST EQUAL TO
u'\u2206' # 0xC6 -> INCREMENT
u'\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS
u'\xa0' # 0xCA -> NO-BREAK SPACE
u'\xc0' # 0xCB -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc3' # 0xCC -> LATIN CAPITAL LETTER A WITH TILDE
u'\xd5' # 0xCD -> LATIN CAPITAL LETTER O WITH TILDE
u'\u0152' # 0xCE -> LATIN CAPITAL LIGATURE OE
u'\u0153' # 0xCF -> LATIN SMALL LIGATURE OE
u'\u2013' # 0xD0 -> EN DASH
u'\u2014' # 0xD1 -> EM DASH
u'\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK
u'\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK
u'\xf7' # 0xD6 -> DIVISION SIGN
u'\u25ca' # 0xD7 -> LOZENGE
u'\xff' # 0xD8 -> LATIN SMALL LETTER Y WITH DIAERESIS
u'\u0178' # 0xD9 -> LATIN CAPITAL LETTER Y WITH DIAERESIS
u'\u2044' # 0xDA -> FRACTION SLASH
u'\u20ac' # 0xDB -> EURO SIGN
u'\xd0' # 0xDC -> LATIN CAPITAL LETTER ETH
u'\xf0' # 0xDD -> LATIN SMALL LETTER ETH
u'\xde' # 0xDE -> LATIN CAPITAL LETTER THORN
u'\xfe' # 0xDF -> LATIN SMALL LETTER THORN
u'\xfd' # 0xE0 -> LATIN SMALL LETTER Y WITH ACUTE
u'\xb7' # 0xE1 -> MIDDLE DOT
u'\u201a' # 0xE2 -> SINGLE LOW-9 QUOTATION MARK
u'\u201e' # 0xE3 -> DOUBLE LOW-9 QUOTATION MARK
u'\u2030' # 0xE4 -> PER MILLE SIGN
u'\xc2' # 0xE5 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xca' # 0xE6 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xc1' # 0xE7 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xcb' # 0xE8 -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xc8' # 0xE9 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xcd' # 0xEA -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xEB -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0xEC -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xcc' # 0xED -> LATIN CAPITAL LETTER I WITH GRAVE
u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xEF -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\uf8ff' # 0xF0 -> Apple logo
u'\xd2' # 0xF1 -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xda' # 0xF2 -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xF3 -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xd9' # 0xF4 -> LATIN CAPITAL LETTER U WITH GRAVE
u'\u0131' # 0xF5 -> LATIN SMALL LETTER DOTLESS I
u'\u02c6' # 0xF6 -> MODIFIER LETTER CIRCUMFLEX ACCENT
u'\u02dc' # 0xF7 -> SMALL TILDE
u'\xaf' # 0xF8 -> MACRON
u'\u02d8' # 0xF9 -> BREVE
u'\u02d9' # 0xFA -> DOT ABOVE
u'\u02da' # 0xFB -> RING ABOVE
u'\xb8' # 0xFC -> CEDILLA
u'\u02dd' # 0xFD -> DOUBLE ACUTE ACCENT
u'\u02db' # 0xFE -> OGONEK
u'\u02c7' # 0xFF -> CARON
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
reinvantveer/semcontext | refs/heads/master | lib/rdflib/serializer.py | 25 | """
Serializer plugin interface.
This module is useful for those wanting to write a serializer that can
plugin to rdflib. If you are wanting to invoke a serializer you likely
want to do so through the Graph class serialize method.
TODO: info for how to write a serializer that can plugin to rdflib.
See also rdflib.plugin
"""
from rdflib.term import URIRef
__all__ = ['Serializer']
class Serializer(object):
def __init__(self, store):
self.store = store
self.encoding = "UTF-8"
self.base = None
def serialize(self, stream, base=None, encoding=None, **args):
"""Abstract method"""
def relativize(self, uri):
base = self.base
if base is not None and uri.startswith(base):
uri = URIRef(uri.replace(base, "", 1))
return uri
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.