repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
FoxerLee/iOS_sitp
|
Pods/AVOSCloudCrashReporting/Breakpad/src/tools/gyp/test/mac/gyptest-installname.py
|
244
|
2512
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that LD_DYLIB_INSTALL_NAME and DYLIB_INSTALL_NAME_BASE are handled
correctly.
"""
import TestGyp
import re
import subprocess
import sys
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
CHDIR = 'installname'
test.run_gyp('test.gyp', chdir=CHDIR)
test.build('test.gyp', test.ALL, chdir=CHDIR)
def GetInstallname(p):
p = test.built_file_path(p, chdir=CHDIR)
r = re.compile(r'cmd LC_ID_DYLIB.*?name (.*?) \(offset \d+\)', re.DOTALL)
proc = subprocess.Popen(['otool', '-l', p], stdout=subprocess.PIPE)
o = proc.communicate()[0]
assert not proc.returncode
m = r.search(o)
assert m
return m.group(1)
if (GetInstallname('libdefault_installname.dylib') !=
'/usr/local/lib/libdefault_installname.dylib'):
test.fail_test()
if (GetInstallname('My Framework.framework/My Framework') !=
'/Library/Frameworks/My Framework.framework/'
'Versions/A/My Framework'):
test.fail_test()
if (GetInstallname('libexplicit_installname.dylib') !=
'Trapped in a dynamiclib factory'):
test.fail_test()
if (GetInstallname('libexplicit_installname_base.dylib') !=
'@executable_path/../../../libexplicit_installname_base.dylib'):
test.fail_test()
if (GetInstallname('My Other Framework.framework/My Other Framework') !=
'@executable_path/../../../My Other Framework.framework/'
'Versions/A/My Other Framework'):
test.fail_test()
if (GetInstallname('libexplicit_installname_with_base.dylib') !=
'/usr/local/lib/libexplicit_installname_with_base.dylib'):
test.fail_test()
if (GetInstallname('libexplicit_installname_with_explicit_base.dylib') !=
'@executable_path/../libexplicit_installname_with_explicit_base.dylib'):
test.fail_test()
if (GetInstallname('libboth_base_and_installname.dylib') !=
'Still trapped in a dynamiclib factory'):
test.fail_test()
if (GetInstallname('install_name_with_info_plist.framework/'
'install_name_with_info_plist') !=
'/Library/Frameworks/install_name_with_info_plist.framework/'
'Versions/A/install_name_with_info_plist'):
test.fail_test()
if ('DYLIB_INSTALL_NAME_BASE:standardizepath: command not found' in
test.stdout()):
test.fail_test()
test.pass_test()
|
mit
|
libvirt/autotest
|
frontend/migrations/046_merge_databases.py
|
1
|
1604
|
try:
import autotest.common as common
except ImportError:
import common
from autotest_lib.database import db_utils, migrate
TKO_MIGRATION_NAME = '031_rename_tko_tables'
migrations_module = __import__('autotest_lib.tko.migrations', globals(),
locals(), [TKO_MIGRATION_NAME])
tko_migration = getattr(migrations_module, TKO_MIGRATION_NAME)
TABLE_NAMES = tko_migration.RENAMES_UP.values()
def migrate_up(manager):
tko_manager = migrate.get_migration_manager(db_name='TKO', debug=False,
force=False)
if tko_manager.get_db_version() < 31:
raise Exception('You must update the TKO database to at least version '
'31 before applying AUTOTEST_WEB migration 46')
if manager.simulate:
tko_manager.initialize_and_fill_test_db()
if not manager.force:
response = raw_input(
'This migration will merge the autotest_web and tko databases. '
'Following the migration, the tko database will be dropped. '
'Any user-added tables in tko will NOT be migrated. This '
'migration is NOT reversible. Are you sure you want to '
'continue? (yes/no) ')
if response != 'yes':
raise Exception('User has chosen to abort migration')
db_utils.move_tables(manager, tko_manager, TABLE_NAMES)
db_utils.drop_database(tko_manager)
manager.execute_script(tko_migration.RECREATE_VIEWS_UP)
def migrate_down(manager):
raise Exception('Migration 46 is not reversible!')
|
gpl-2.0
|
tvalacarta/tvalacarta
|
python/main-classic/lib/youtube_dl/extractor/ro220.py
|
64
|
1452
|
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_urllib_parse_unquote
class Ro220IE(InfoExtractor):
IE_NAME = '220.ro'
_VALID_URL = r'(?x)(?:https?://)?(?:www\.)?220\.ro/(?P<category>[^/]+)/(?P<shorttitle>[^/]+)/(?P<id>[^/]+)'
_TEST = {
'url': 'http://www.220.ro/sport/Luati-Le-Banii-Sez-4-Ep-1/LYV6doKo7f/',
'md5': '03af18b73a07b4088753930db7a34add',
'info_dict': {
'id': 'LYV6doKo7f',
'ext': 'mp4',
'title': 'Luati-le Banii sez 4 ep 1',
'description': r're:^Iata-ne reveniti dupa o binemeritata vacanta\. +Va astept si pe Facebook cu pareri si comentarii.$',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
url = compat_urllib_parse_unquote(self._search_regex(
r'(?s)clip\s*:\s*{.*?url\s*:\s*\'([^\']+)\'', webpage, 'url'))
title = self._og_search_title(webpage)
description = self._og_search_description(webpage)
thumbnail = self._og_search_thumbnail(webpage)
formats = [{
'format_id': 'sd',
'url': url,
'ext': 'mp4',
}]
return {
'id': video_id,
'formats': formats,
'title': title,
'description': description,
'thumbnail': thumbnail,
}
|
gpl-3.0
|
ARL-UTEP-OC/emubox
|
workshop-creator/python27-64bit-gtk3/Lib/site-packages/setuptools/command/alias.py
|
455
|
2426
|
from distutils.errors import DistutilsOptionError
from setuptools.extern.six.moves import map
from setuptools.command.setopt import edit_config, option_base, config_file
def shquote(arg):
"""Quote an argument for later parsing by shlex.split()"""
for c in '"', "'", "\\", "#":
if c in arg:
return repr(arg)
if arg.split() != [arg]:
return repr(arg)
return arg
class alias(option_base):
"""Define a shortcut that invokes one or more commands"""
description = "define a shortcut to invoke one or more commands"
command_consumes_arguments = True
user_options = [
('remove', 'r', 'remove (unset) the alias'),
] + option_base.user_options
boolean_options = option_base.boolean_options + ['remove']
def initialize_options(self):
option_base.initialize_options(self)
self.args = None
self.remove = None
def finalize_options(self):
option_base.finalize_options(self)
if self.remove and len(self.args) != 1:
raise DistutilsOptionError(
"Must specify exactly one argument (the alias name) when "
"using --remove"
)
def run(self):
aliases = self.distribution.get_option_dict('aliases')
if not self.args:
print("Command Aliases")
print("---------------")
for alias in aliases:
print("setup.py alias", format_alias(alias, aliases))
return
elif len(self.args) == 1:
alias, = self.args
if self.remove:
command = None
elif alias in aliases:
print("setup.py alias", format_alias(alias, aliases))
return
else:
print("No alias definition found for %r" % alias)
return
else:
alias = self.args[0]
command = ' '.join(map(shquote, self.args[1:]))
edit_config(self.filename, {'aliases': {alias: command}}, self.dry_run)
def format_alias(name, aliases):
source, command = aliases[name]
if source == config_file('global'):
source = '--global-config '
elif source == config_file('user'):
source = '--user-config '
elif source == config_file('local'):
source = ''
else:
source = '--filename=%r' % source
return source + name + ' ' + command
|
gpl-2.0
|
alexteodor/odoo
|
addons/sale_layout/__openerp__.py
|
322
|
1793
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2014-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Sale Layout',
'version': '1.0',
'sequence': 14,
'summary': 'Sale Layout, page-break, subtotals, separators, report',
'description': """
Manage your sales reports
=========================
With this module you can personnalize the sale order and invoice report with
separators, page-breaks or subtotals.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/crm',
'depends': ['sale', 'report'],
'category': 'Sale',
'data': ['views/sale_layout_category_view.xml',
'views/report_invoice_layouted.xml',
'views/report_quotation_layouted.xml',
'views/sale_layout_template.xml',
'security/ir.model.access.csv'],
'demo': ['data/sale_layout_category_data.xml'],
'installable': True,
}
|
agpl-3.0
|
bud4/samba
|
selftest/perf_tests.py
|
5
|
4081
|
#!/usr/bin/python
# This script generates a list of testsuites that should be run to
# test Samba performance.
#
# These tests are not intended to exercise aspect of Samba, but
# perform common simple functions or to ascertain performance.
#
# The syntax for a testsuite is "-- TEST --" on a single line, followed
# by the name of the test, the environment it needs and the command to run, all
# three separated by newlines. All other lines in the output are considered
# comments.
from selftesthelpers import *
samba4srcdir = source4dir()
samba4bindir = bindir()
plantestsuite_loadlist("samba4.ldap.ad_dc_performance.python(ad_dc_ntvfs)",
"ad_dc_ntvfs",
[python, os.path.join(samba4srcdir,
"dsdb/tests/python/ad_dc_performance.py"),
'$SERVER', '-U"$USERNAME%$PASSWORD"',
'--workgroup=$DOMAIN',
'$LOADLIST', '$LISTOPT'])
plantestsuite_loadlist("samba4.ndr_pack_performance.python(ad_dc_ntvfs)",
"ad_dc_ntvfs",
[python, os.path.join(samba4srcdir,
"dsdb/tests/python/ndr_pack_performance.py"),
'$SERVER', '-U"$USERNAME%$PASSWORD"',
'--workgroup=$DOMAIN',
'$LOADLIST', '$LISTOPT'])
plantestsuite_loadlist("samba4.provision_performance.python(ad_dc_ntvfs)",
"ad_dc_ntvfs",
[python, os.path.join(samba4srcdir,
"dsdb/tests/python/ad_dc_provision_performance.py"),
'$SERVER', '-U"$USERNAME%$PASSWORD"',
'--workgroup=$DOMAIN',
'$LOADLIST', '$LISTOPT'])
plantestsuite_loadlist("samba4.ldap.ad_dc_search_performance.python(ad_dc_ntvfs)",
"ad_dc_ntvfs",
[python,
os.path.join(samba4srcdir,
"dsdb/tests/python/ad_dc_search_performance.py"),
'$SERVER', '-U"$USERNAME%$PASSWORD"',
'--workgroup=$DOMAIN',
'$LOADLIST', '$LISTOPT'])
plantestsuite_loadlist("samba4.ldap.ad_dc_multi_bind.ntlm.python(ad_dc_ntvfs)",
"ad_dc_ntvfs",
[python, os.path.join(samba4srcdir,
"dsdb/tests/python/ad_dc_multi_bind.py"),
'$SERVER', '-U"$USERNAME%$PASSWORD"', '-k no',
'--workgroup=$DOMAIN',
'$LOADLIST', '$LISTOPT'])
plantestsuite_loadlist("samba4.ldap.ad_dc_multi_bind.krb5.python(ad_dc_ntvfs)",
"ad_dc_ntvfs",
[python, os.path.join(samba4srcdir,
"dsdb/tests/python/ad_dc_multi_bind.py"),
'$SERVER', '-U"$USERNAME%$PASSWORD"', '-k yes',
'--realm=$REALM',
'$LOADLIST', '$LISTOPT'])
plantestsuite_loadlist("samba4.ldb.multi_connect.python(ad_dc_ntvfs)",
"ad_dc_ntvfs",
[python, os.path.join(samba4srcdir,
"dsdb/tests/python/ad_dc_multi_bind.py"),
'tdb://$PREFIX_ABS/ad_dc_ntvfs/private/sam.ldb'
'$LOADLIST', '$LISTOPT'])
# this one doesn't tidy itself up fully, so leave it as last unless
# you want a messy database.
plantestsuite_loadlist("samba4.ldap.ad_dc_medley_performance.python(ad_dc_ntvfs)",
"ad_dc_ntvfs",
[python,
os.path.join(samba4srcdir,
"dsdb/tests/python/ad_dc_medley_performance.py"),
'$SERVER', '-U"$USERNAME%$PASSWORD"',
'--workgroup=$DOMAIN',
'$LOADLIST', '$LISTOPT'])
|
gpl-3.0
|
AZtheAsian/zulip
|
zerver/lib/str_utils.py
|
13
|
3652
|
"""
String Utilities:
This module helps in converting strings from one type to another.
Currently we have strings of 3 semantic types:
1. text strings: These strings are used to represent all textual data,
like people's names, stream names, content of messages, etc.
These strings can contain non-ASCII characters, so its type should be
typing.Text (which is `str` in python 3 and `unicode` in python 2).
2. binary strings: These strings are used to represent binary data.
This should be of type six.binary_type (which is `bytes` in python 3
and `str` in python 2).
3. native strings: These strings are for internal use only. Strings of
this type are not meant to be stored in database, displayed to end
users, etc. Things like exception names, parameter names, attribute
names, etc should be native strings. These strings should only
contain ASCII characters and they should have type `str`.
There are 3 utility functions provided for converting strings from one type
to another - force_text, force_bytes, force_str
Interconversion between text strings and binary strings can be done by
using encode and decode appropriately or by using the utility functions
force_text and force_bytes.
It is recommended to use the utility functions for other string conversions.
"""
import six
from six import binary_type
from typing import Any, Mapping, Union, TypeVar, Text
NonBinaryStr = TypeVar('NonBinaryStr', str, Text)
# This is used to represent text or native strings
def force_text(s, encoding='utf-8'):
# type: (Union[Text, binary_type], str) -> Text
"""converts a string to a text string"""
if isinstance(s, Text):
return s
elif isinstance(s, binary_type):
return s.decode(encoding)
else:
raise TypeError("force_text expects a string type")
def force_bytes(s, encoding='utf-8'):
# type: (Union[Text, binary_type], str) -> binary_type
"""converts a string to binary string"""
if isinstance(s, binary_type):
return s
elif isinstance(s, Text):
return s.encode(encoding)
else:
raise TypeError("force_bytes expects a string type")
def force_str(s, encoding='utf-8'):
# type: (Union[Text, binary_type], str) -> str
"""converts a string to a native string"""
if isinstance(s, str):
return s
elif isinstance(s, Text):
return s.encode(encoding)
elif isinstance(s, binary_type):
return s.decode(encoding)
else:
raise TypeError("force_str expects a string type")
def dict_with_str_keys(dct, encoding='utf-8'):
# type: (Mapping[NonBinaryStr, Any], str) -> Dict[str, Any]
"""applies force_str on the keys of a dict (non-recursively)"""
return {force_str(key, encoding): value for key, value in six.iteritems(dct)}
class ModelReprMixin(object):
"""
This mixin provides a python 2 and 3 compatible way of handling string representation of a model.
When declaring a model, inherit this mixin before django.db.models.Model.
Define __unicode__ on your model which returns a typing.Text object.
This mixin will automatically define __str__ and __repr__.
"""
def __unicode__(self):
# type: () -> Text
# Originally raised an exception, but Django (e.g. the ./manage.py shell)
# was catching the exception and not displaying any sort of error
return u"Implement __unicode__ in your subclass of ModelReprMixin!"
def __str__(self):
# type: () -> str
return force_str(self.__unicode__())
def __repr__(self):
# type: () -> str
return force_str(self.__unicode__())
|
apache-2.0
|
mnahm5/django-estore
|
Lib/site-packages/django/db/backends/mysql/schema.py
|
20
|
4566
|
from django.db.backends.base.schema import BaseDatabaseSchemaEditor
from django.db.models import NOT_PROVIDED
class DatabaseSchemaEditor(BaseDatabaseSchemaEditor):
sql_rename_table = "RENAME TABLE %(old_table)s TO %(new_table)s"
sql_alter_column_null = "MODIFY %(column)s %(type)s NULL"
sql_alter_column_not_null = "MODIFY %(column)s %(type)s NOT NULL"
sql_alter_column_type = "MODIFY %(column)s %(type)s"
sql_rename_column = "ALTER TABLE %(table)s CHANGE %(old_column)s %(new_column)s %(type)s"
sql_delete_unique = "ALTER TABLE %(table)s DROP INDEX %(name)s"
sql_create_fk = (
"ALTER TABLE %(table)s ADD CONSTRAINT %(name)s FOREIGN KEY "
"(%(column)s) REFERENCES %(to_table)s (%(to_column)s)"
)
sql_delete_fk = "ALTER TABLE %(table)s DROP FOREIGN KEY %(name)s"
sql_delete_index = "DROP INDEX %(name)s ON %(table)s"
alter_string_set_null = 'MODIFY %(column)s %(type)s NULL;'
alter_string_drop_null = 'MODIFY %(column)s %(type)s NOT NULL;'
sql_create_pk = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s PRIMARY KEY (%(columns)s)"
sql_delete_pk = "ALTER TABLE %(table)s DROP PRIMARY KEY"
def quote_value(self, value):
# Inner import to allow module to fail to load gracefully
import MySQLdb.converters
return MySQLdb.escape(value, MySQLdb.converters.conversions)
def skip_default(self, field):
"""
MySQL doesn't accept default values for longtext and longblob
and implicitly treats these columns as nullable.
"""
return field.db_type(self.connection) in {'longtext', 'longblob'}
def add_field(self, model, field):
super(DatabaseSchemaEditor, self).add_field(model, field)
# Simulate the effect of a one-off default.
if self.skip_default(field) and field.default not in {None, NOT_PROVIDED}:
effective_default = self.effective_default(field)
self.execute('UPDATE %(table)s SET %(column)s = %%s' % {
'table': self.quote_name(model._meta.db_table),
'column': self.quote_name(field.column),
}, [effective_default])
def _model_indexes_sql(self, model):
storage = self.connection.introspection.get_storage_engine(
self.connection.cursor(), model._meta.db_table
)
if storage == "InnoDB":
for field in model._meta.local_fields:
if field.db_index and not field.unique and field.get_internal_type() == "ForeignKey":
# Temporary setting db_index to False (in memory) to disable
# index creation for FKs (index automatically created by MySQL)
field.db_index = False
return super(DatabaseSchemaEditor, self)._model_indexes_sql(model)
def _delete_composed_index(self, model, fields, *args):
"""
MySQL can remove an implicit FK index on a field when that field is
covered by another index like a unique_together. "covered" here means
that the more complex index starts like the simpler one.
http://bugs.mysql.com/bug.php?id=37910 / Django ticket #24757
We check here before removing the [unique|index]_together if we have to
recreate a FK index.
"""
first_field = model._meta.get_field(fields[0])
if first_field.get_internal_type() == 'ForeignKey':
constraint_names = self._constraint_names(model, [first_field.column], index=True)
if not constraint_names:
self.execute(self._create_index_sql(model, [first_field], suffix=""))
return super(DatabaseSchemaEditor, self)._delete_composed_index(model, fields, *args)
def _set_field_new_type_null_status(self, field, new_type):
"""
Keep the null property of the old field. If it has changed, it will be
handled separately.
"""
if field.null:
new_type += " NULL"
else:
new_type += " NOT NULL"
return new_type
def _alter_column_type_sql(self, table, old_field, new_field, new_type):
new_type = self._set_field_new_type_null_status(old_field, new_type)
return super(DatabaseSchemaEditor, self)._alter_column_type_sql(table, old_field, new_field, new_type)
def _rename_field_sql(self, table, old_field, new_field, new_type):
new_type = self._set_field_new_type_null_status(old_field, new_type)
return super(DatabaseSchemaEditor, self)._rename_field_sql(table, old_field, new_field, new_type)
|
mit
|
rsvip/Django
|
tests/gis_tests/gis_migrations/test_operations.py
|
284
|
7957
|
from __future__ import unicode_literals
from django.contrib.gis.db.models import fields
from django.core.exceptions import ImproperlyConfigured
from django.db import connection, migrations, models
from django.db.migrations.migration import Migration
from django.db.migrations.state import ProjectState
from django.test import (
TransactionTestCase, skipIfDBFeature, skipUnlessDBFeature,
)
from ..utils import mysql
if connection.features.gis_enabled:
try:
GeometryColumns = connection.ops.geometry_columns()
HAS_GEOMETRY_COLUMNS = True
except NotImplementedError:
HAS_GEOMETRY_COLUMNS = False
@skipUnlessDBFeature('gis_enabled')
class OperationTests(TransactionTestCase):
available_apps = ['gis_tests.gis_migrations']
def tearDown(self):
# Delete table after testing
if hasattr(self, 'current_state'):
self.apply_operations('gis', self.current_state, [migrations.DeleteModel('Neighborhood')])
super(OperationTests, self).tearDown()
def get_table_description(self, table):
with connection.cursor() as cursor:
return connection.introspection.get_table_description(cursor, table)
def assertColumnExists(self, table, column):
self.assertIn(column, [c.name for c in self.get_table_description(table)])
def assertColumnNotExists(self, table, column):
self.assertNotIn(column, [c.name for c in self.get_table_description(table)])
def apply_operations(self, app_label, project_state, operations):
migration = Migration('name', app_label)
migration.operations = operations
with connection.schema_editor() as editor:
return migration.apply(project_state, editor)
def set_up_test_model(self, force_raster_creation=False):
test_fields = [
('id', models.AutoField(primary_key=True)),
('name', models.CharField(max_length=100, unique=True)),
('geom', fields.MultiPolygonField(srid=4326))
]
if connection.features.supports_raster or force_raster_creation:
test_fields += [('rast', fields.RasterField(srid=4326))]
operations = [migrations.CreateModel('Neighborhood', test_fields)]
return self.apply_operations('gis', ProjectState(), operations)
def assertGeometryColumnsCount(self, expected_count):
table_name = 'gis_neighborhood'
if connection.features.uppercases_column_names:
table_name = table_name.upper()
self.assertEqual(
GeometryColumns.objects.filter(**{
GeometryColumns.table_name_col(): table_name,
}).count(),
expected_count
)
def assertSpatialIndexExists(self, table, column):
with connection.cursor() as cursor:
indexes = connection.introspection.get_indexes(cursor, table)
self.assertIn(column, indexes)
def alter_gis_model(self, migration_class, model_name, field_name,
blank=False, field_class=None):
project_state = self.set_up_test_model()
self.current_state = project_state
args = [model_name, field_name]
if field_class:
args.append(field_class(srid=4326, blank=blank))
operation = migration_class(*args)
new_state = project_state.clone()
operation.state_forwards('gis', new_state)
with connection.schema_editor() as editor:
operation.database_forwards('gis', editor, project_state, new_state)
self.current_state = new_state
def test_add_geom_field(self):
"""
Test the AddField operation with a geometry-enabled column.
"""
self.alter_gis_model(migrations.AddField, 'Neighborhood',
'path', False, fields.LineStringField)
self.assertColumnExists('gis_neighborhood', 'path')
# Test GeometryColumns when available
if HAS_GEOMETRY_COLUMNS:
self.assertGeometryColumnsCount(2)
# Test spatial indices when available
if self.has_spatial_indexes:
self.assertSpatialIndexExists('gis_neighborhood', 'path')
@skipUnlessDBFeature('supports_raster')
def test_add_raster_field(self):
"""
Test the AddField operation with a raster-enabled column.
"""
self.alter_gis_model(migrations.AddField, 'Neighborhood',
'heatmap', False, fields.RasterField)
self.assertColumnExists('gis_neighborhood', 'heatmap')
# Test spatial indices when available
if self.has_spatial_indexes:
self.assertSpatialIndexExists('gis_neighborhood', 'heatmap')
@skipIfDBFeature('supports_raster')
def test_create_raster_model_on_db_without_raster_support(self):
"""
Test creating a model with a raster field on a db without raster support.
"""
msg = 'Raster fields require backends with raster support.'
with self.assertRaisesMessage(ImproperlyConfigured, msg):
self.set_up_test_model(True)
@skipIfDBFeature('supports_raster')
def test_add_raster_field_on_db_without_raster_support(self):
"""
Test adding a raster field on a db without raster support.
"""
msg = 'Raster fields require backends with raster support.'
with self.assertRaisesMessage(ImproperlyConfigured, msg):
self.alter_gis_model(
migrations.AddField, 'Neighborhood', 'heatmap',
False, fields.RasterField
)
def test_add_blank_geom_field(self):
"""
Should be able to add a GeometryField with blank=True.
"""
self.alter_gis_model(migrations.AddField, 'Neighborhood',
'path', True, fields.LineStringField)
self.assertColumnExists('gis_neighborhood', 'path')
# Test GeometryColumns when available
if HAS_GEOMETRY_COLUMNS:
self.assertGeometryColumnsCount(2)
# Test spatial indices when available
if self.has_spatial_indexes:
self.assertSpatialIndexExists('gis_neighborhood', 'path')
@skipUnlessDBFeature('supports_raster')
def test_add_blank_raster_field(self):
"""
Should be able to add a RasterField with blank=True.
"""
self.alter_gis_model(migrations.AddField, 'Neighborhood',
'heatmap', True, fields.RasterField)
self.assertColumnExists('gis_neighborhood', 'heatmap')
# Test spatial indices when available
if self.has_spatial_indexes:
self.assertSpatialIndexExists('gis_neighborhood', 'heatmap')
def test_remove_geom_field(self):
"""
Test the RemoveField operation with a geometry-enabled column.
"""
self.alter_gis_model(migrations.RemoveField, 'Neighborhood', 'geom')
self.assertColumnNotExists('gis_neighborhood', 'geom')
# Test GeometryColumns when available
if HAS_GEOMETRY_COLUMNS:
self.assertGeometryColumnsCount(0)
@skipUnlessDBFeature('supports_raster')
def test_remove_raster_field(self):
"""
Test the RemoveField operation with a raster-enabled column.
"""
self.alter_gis_model(migrations.RemoveField, 'Neighborhood', 'rast')
self.assertColumnNotExists('gis_neighborhood', 'rast')
def test_create_model_spatial_index(self):
self.current_state = self.set_up_test_model()
if not self.has_spatial_indexes:
self.skipTest('No support for Spatial indexes')
self.assertSpatialIndexExists('gis_neighborhood', 'geom')
if connection.features.supports_raster:
self.assertSpatialIndexExists('gis_neighborhood', 'rast')
@property
def has_spatial_indexes(self):
if mysql:
with connection.cursor() as cursor:
return connection.introspection.supports_spatial_index(cursor, 'gis_neighborhood')
return True
|
bsd-3-clause
|
CodigoSur/cyclope
|
cyclope/apps/dynamicforms/tests.py
|
2
|
1484
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010-2013 Código Sur Sociedad Civil
# All rights reserved.
#
# This file is part of Cyclope.
#
# Cyclope is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Cyclope is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.contrib.sites.models import Site
from cyclope.tests import ViewableTestCase
from models import DynamicForm
class DynamicFormTestCase(ViewableTestCase):
test_model = DynamicForm
def setUp(self):
super(DynamicFormTestCase, self).setUp()
form = DynamicForm.objects.create(title="An instance")
form.sites.add(Site.objects.get_current())
form.save()
form.fields.create(label="field", field_type=1, required=True, visible=True)
self.test_object = form
def test_empty_form(self):
url = u'/form/%s/' % self.test_object.slug
response = self.client.post(url, data={})
self.assertEqual(response.status_code, 200)
|
gpl-3.0
|
zencodex/csip
|
CSipSimple/jni/pjsip/sources/tests/pjsua/runall.py
|
26
|
6052
|
# $Id: runall.py 4183 2012-06-28 09:16:03Z nanang $
import os
import sys
import time
import re
import shutil
PYTHON = os.path.basename(sys.executable)
# Usage:
# runall.py [test-to-resume]
# Initialize test list
tests = []
# Excluded tests (because they fail?)
excluded_tests = [ "svn",
"pyc",
"scripts-call/150_srtp_2_1", # SRTP optional 'cannot' call SRTP mandatory
"scripts-call/150_srtp_2_3.py", # temporarily disabled until #1267 done
"scripts-call/301_ice_public_a.py", # Unreliable, proxy returns 408 sometimes
"scripts-call/301_ice_public_b.py", # Doesn't work because OpenSER modifies SDP
"scripts-pres/200_publish.py", # Ok from cmdline, error from runall.py
"scripts-media-playrec/100_resample_lf_8_11.py", # related to clock-rate 11 kHz problem
"scripts-media-playrec/100_resample_lf_8_22.py", # related to clock-rate 22 kHz problem
"scripts-media-playrec/100_resample_lf_11" # related to clock-rate 11 kHz problem
]
# Add basic tests
for f in os.listdir("scripts-run"):
tests.append("mod_run.py scripts-run/" + f)
# Add basic call tests
for f in os.listdir("scripts-call"):
tests.append("mod_call.py scripts-call/" + f)
# Add presence tests
for f in os.listdir("scripts-pres"):
tests.append("mod_pres.py scripts-pres/" + f)
# Add mod_sendto tests
for f in os.listdir("scripts-sendto"):
tests.append("mod_sendto.py scripts-sendto/" + f)
# Add mod_media_playrec tests
for f in os.listdir("scripts-media-playrec"):
tests.append("mod_media_playrec.py scripts-media-playrec/" + f)
# Add mod_pesq tests
for f in os.listdir("scripts-pesq"):
tests.append("mod_pesq.py scripts-pesq/" + f)
# Add recvfrom tests
for f in os.listdir("scripts-recvfrom"):
tests.append("mod_recvfrom.py scripts-recvfrom/" + f)
# Add sipp tests
for f in os.listdir("scripts-sipp"):
if f.endswith(".xml"):
tests.append("mod_sipp.py scripts-sipp/" + f)
# Filter-out excluded tests
for pat in excluded_tests:
tests = [t for t in tests if t.find(pat)==-1]
resume_script=""
shell_cmd=""
# Parse arguments
sys.argv.pop(0)
while len(sys.argv):
if sys.argv[0]=='/h' or sys.argv[0]=='-h' or sys.argv[0]=='--help' or sys.argv[0]=='/help':
sys.argv.pop(0)
print "Usage:"
print " runall.py [OPTIONS] [run.py-OPTIONS]"
print "OPTIONS:"
print " --list"
print " List the tests"
print " --list-xml"
print " List the tests as XML format suitable for ccdash"
print " --resume,-r RESUME"
print " RESUME is string/substring to specify where to resume tests."
print " If this argument is omited, tests will start from the beginning."
print " --shell,-s SHELL"
print " Run the tests with the specified SHELL cmd. This can also be"
print " used to run the test with ccdash. Example:"
print " --shell '/bin/sh -c'"
print " run.py-OPTIONS are applicable here"
sys.exit(0)
elif sys.argv[0] == '-r' or sys.argv[0] == '--resume':
if len(sys.argv) > 1:
resume_script=sys.argv[1]
sys.argv.pop(0)
sys.argv.pop(0)
else:
sys.argv.pop(0)
sys.stderr.write("Error: argument value required")
sys.exit(1)
elif sys.argv[0] == '--list':
sys.argv.pop(0)
for t in tests:
print t
sys.exit(0)
elif sys.argv[0] == '--list-xml':
sys.argv.pop(0)
for t in tests:
(mod,param) = t.split(None,2)
tname = mod[4:mod.find(".py")] + "_" + \
param[param.find("/")+1:param.rfind(".")]
c = ""
if len(sys.argv):
c = " ".join(sys.argv) + " "
tcmd = PYTHON + ' run.py ' + c + t
print '\t\t<Test name="%s" cmd="%s" wdir="tests/pjsua" />' % (tname, tcmd)
sys.exit(0)
elif sys.argv[0] == '-s' or sys.argv[0] == '--shell':
if len(sys.argv) > 1:
shell_cmd = sys.argv[1]
sys.argv.pop(0)
sys.argv.pop(0)
else:
sys.argv.pop(0)
sys.stderr.write("Error: argument value required")
sys.exit(1)
else:
# should be run.py options
break
# Generate arguments for run.py
argv_st = " ".join(sys.argv) + " "
# Init vars
fails_cnt = 0
tests_cnt = 0
# Re-create "logs" directory
try:
shutil.rmtree("logs")
except:
print "Warning: failed in removing directory 'logs'"
try:
os.mkdir("logs")
except:
print "Warning: failed in creating directory 'logs'"
# Now run the tests
total_cnt = len(tests)
for t in tests:
if resume_script!="" and t.find(resume_script)==-1:
print "Skipping " + t +".."
total_cnt = total_cnt - 1
continue
resume_script=""
cmdline = "python run.py " + argv_st + t
if shell_cmd:
cmdline = "%s '%s'" % (shell_cmd, cmdline)
t0 = time.time()
msg = "Running %d/%d: %s..." % (tests_cnt+1, total_cnt, cmdline)
sys.stdout.write(msg)
sys.stdout.flush()
ret = os.system(cmdline + " > output.log")
t1 = time.time()
if ret != 0:
dur = int(t1 - t0)
print " failed!! [" + str(dur) + "s]"
logname = re.search(".*\s+(.*)", t).group(1)
logname = re.sub("[\\\/]", "_", logname)
logname = re.sub("\.py$", ".log", logname)
logname = re.sub("\.xml$", ".log", logname)
logname = "logs/" + logname
shutil.move("output.log", logname)
print "Please see '" + logname + "' for the test log."
fails_cnt += 1
else:
dur = int(t1 - t0)
print " ok [" + str(dur) + "s]"
tests_cnt += 1
if fails_cnt == 0:
print "All " + str(tests_cnt) + " tests completed successfully"
else:
print str(tests_cnt) + " tests completed, " + str(fails_cnt) + " test(s) failed"
|
gpl-3.0
|
camradal/ansible
|
lib/ansible/modules/network/junos/junos_package.py
|
50
|
5163
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = """
---
module: junos_package
version_added: "2.1"
author: "Peter Sprygada (@privateip)"
short_description: Installs packages on remote devices running Junos
description:
- This module can install new and updated packages on remote
devices running Junos. The module will compare the specified
package with the one running on the remote device and install
the specified version if there is a mismatch
extends_documentation_fragment: junos
options:
src:
description:
- The I(src) argument specifies the path to the source package to be
installed on the remote device in the advent of a version mismatch.
The I(src) argument can be either a localized path or a full
path to the package file to install.
required: true
default: null
aliases: ['package']
version:
description:
- The I(version) argument can be used to explicitly specify the
version of the package that should be installed on the remote
device. If the I(version) argument is not specified, then
the version is extracts from the I(src) filename.
required: false
default: null
reboot:
description:
- In order for a package to take effect, the remote device must be
restarted. When enabled, this argument will instruct the module
to reboot the device once the updated package has been installed.
If disabled or the remote package does not need to be changed,
the device will not be started.
required: true
default: true
choices: ['true', 'false']
no_copy:
description:
- The I(no_copy) argument is responsible for instructing the remote
device on where to install the package from. When enabled, the
package is transferred to the remote device prior to installing.
required: false
default: false
choices: ['true', 'false']
force:
description:
- The I(force) argument instructs the module to bypass the package
version check and install the packaged identified in I(src) on
the remote device.
required: true
default: false
choices: ['true', 'false']
requirements:
- junos-eznc
notes:
- This module requires the netconf system service be enabled on
the remote device being managed
"""
EXAMPLES = """
# the required set of connection arguments have been purposely left off
# the examples for brevity
- name: install local package on remote device
junos_package:
src: junos-vsrx-12.1X46-D10.2-domestic.tgz
- name: install local package on remote device without rebooting
junos_package:
src: junos-vsrx-12.1X46-D10.2-domestic.tgz
reboot: no
"""
import ansible.module_utils.junos
from ansible.module_utils.network import NetworkModule
try:
from jnpr.junos.utils.sw import SW
HAS_SW = True
except ImportError:
HAS_SW = False
def install_package(module):
junos = SW(module.connection.device)
package = module.params['src']
no_copy = module.params['no_copy']
progress_log = lambda x, y: module.log(y)
module.log('installing package')
result = junos.install(package, progress=progress_log, no_copy=no_copy)
if not result:
module.fail_json(msg='Unable to install package on device')
if module.params['reboot']:
module.log('rebooting system')
junos.reboot()
def main():
spec = dict(
src=dict(type='path', required=True, aliases=['package']),
version=dict(),
reboot=dict(type='bool', default=True),
no_copy=dict(default=False, type='bool'),
force=dict(type='bool', default=False),
transport=dict(default='netconf', choices=['netconf'])
)
module = NetworkModule(argument_spec=spec,
supports_check_mode=True)
if not HAS_SW:
module.fail_json(msg='Missing jnpr.junos.utils.sw module')
result = dict(changed=False)
do_upgrade = module.params['force'] or False
if not module.params['force']:
has_ver = module.connection.get_facts().get('version')
wants_ver = module.params['version']
do_upgrade = has_ver != wants_ver
if do_upgrade:
if not module.check_mode:
install_package(module)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
|
gpl-3.0
|
AustP/Gitso
|
ArgsParser.py
|
1
|
6563
|
#! /usr/bin/env python
"""
Gisto - Gitso is to support others
Gitso is a utility to facilitate the connection of VNC
@author: Aaron Gerber ('gerberad') <[email protected]>
@author: Derek Buranen ('burner') <[email protected]>
@author: AustP
@copyright: 2008 - 2014
Gitso is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Gitso is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Gitso. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import signal
import os.path
import urllib
import re
class ArgsParser:
def __init__(self):
# Initialize Self.paths here.
self.paths = dict()
self.paths['resources'] = os.path.join(sys.path[0], "./")
self.paths['preferences'] = ''
self.paths['copyright'] = ''
self.paths['main'] = ''
self.paths['listen'] = False
self.paths['connect'] = ''
self.paths['list'] = []
self.paths['mode'] = ''
self.paths['low-colors'] = False
self.port = "5500"
if re.match('(?:open|free|net)bsd|linux',sys.platform):
self.paths['main'] = os.path.join(sys.path[0], '..', 'share', 'gitso')
self.paths['copyright'] = os.path.join(sys.path[0], '..', 'share', 'doc', 'gitso', 'COPYING')
elif sys.platform == "darwin":
self.paths['main'] = sys.path[0]
self.paths['copyright'] = os.path.join(sys.path[0], 'COPYING')
else:
self.paths['main'] = os.path.join(sys.path[0], '..')
self.paths['copyright'] = os.path.join(sys.path[0], '..', 'COPYING')
#for i in range(1, len(sys.argv)):
i = 1
while i < len(sys.argv):
if sys.argv[i] == '--help': # --help
self.HelpMenu()
elif sys.argv[i] == '--version': # --version
print "Gitso 0.6.3 -- Copyright 2007 - 2014 Aaron Gerber and Derek Buranen and AustP."
exit(0)
elif sys.argv[i] == '--dev': # --dev
print "Running in 'Development Mode'"
self.paths['mode'] = 'dev'
if sys.platform == "darwin":
if not os.path.exists('build/OSXvnc'):
os.popen("mkdir build; cp arch/osx/OSXvnc.tar.gz build ; cd build ; tar xvfz OSXvnc.tar.gz > /dev/null")
if not os.path.exists('build/cotvnc.app'):
os.popen("cp arch/osx/cotvnc.app.tar.gz build ; cd build ; tar xvfz cotvnc.app.tar.gz > /dev/null")
self.paths['resources'] = 'build/'
self.paths['main'] = sys.path[0]
self.paths['copyright'] = os.path.join(sys.path[0], 'COPYING')
elif sys.platform == "win32":
self.paths['copyright'] = os.path.join(sys.path[0], 'COPYING')
self.paths['main'] = os.path.join(sys.path[0])
self.paths['resources'] = 'arch/win32/'
else:
self.paths['resources'] = 'arch/linux/'
self.paths['main'] = os.path.join(sys.path[0])
self.paths['copyright'] = os.path.join(sys.path[0], 'COPYING')
elif sys.argv[i] == '--listen': # --listen
if self.paths['connect'] <> "":
print "Error: --connect and --listen can not be used at the same time."
self.HelpMenu()
i = i + 1
if i >= len(sys.argv):
self.port = "5500"
else:
if sys.argv[i][0] + sys.argv[i][1] <> "--":
self.port = sys.argv[i]
else:
self.port = "5500"
i = i - 1
self.paths['listen'] = True
elif sys.argv[i] == '--connect': # --connect
i = i + 1
if i >= len(sys.argv):
print "Error: No IP or domain name given."
self.HelpMenu()
if self.paths['listen']:
print "Error: --connect and --listen can not be used at the same time."
self.HelpMenu()
if sys.argv[i][0] + sys.argv[i][1] <> "--":
self.paths['connect'] = sys.argv[i]
else:
print "Error: '" + sys.argv[i] + "' is not a valid host with '--connect'."
self.HelpMenu()
elif sys.argv[i] == '--low-colors': # --low-colors
self.paths['low-colors'] = True;
elif sys.argv[i] == '--list': # --list
i = i + 1
if i >= len(sys.argv):
print "Error: No List file given."
self.HelpMenu()
if sys.argv[i][0] + sys.argv[i][1] <> "--":
self.paths['list'] = self.getHosts(sys.argv[i])
else:
print "Error: '" + sys.argv[i] + "' is not a valid list with '--list'."
self.HelpMenu()
else:
print "Error: '" + sys.argv[i] + "' is not a valid argument."
self.HelpMenu()
i = i + 1
if sys.platform == "darwin":
self.paths['preferences'] = os.path.join(os.path.expanduser("~"), "Library", "Application Support", "Gitso")
if os.path.exists(self.paths['preferences']) != True:
os.makedirs(self.paths['preferences'], 0700)
self.paths['preferences'] = os.path.join(self.paths['preferences'], "hosts")
elif sys.platform == "win32":
self.paths['preferences'] = os.path.join(os.getenv('USERPROFILE'), "gitso-hosts")
else:
self.paths['preferences'] = os.path.join(os.path.expanduser("~"), ".gitso-hosts")
#Help Menu
def HelpMenu(self):
print "Usage: " + os.path.basename(sys.argv[0]) + " [OPTION]"
print " OPTIONS"
print " --dev\t\tSet self.paths for development."
print " --listen {PORT}\tListen for incoming connections."
print " --connect {IP|DN}\tConnects to host (support giver)."
print " --list {URL|FILE}\tAlternative Support list."
print " --low-colors\t\tUse 8bit colors (for slow connections). Linux only."
print " --version\t\tThe current Gitso version."
print " --help\t\tThis Menu."
sys.exit(1)
def GetPaths(self):
return self.paths
def GetPort(self):
return self.port
def getHosts(self, file):
list = []
fileList = ""
if len(file) > 3:
prefix = file[0] + file[1] + file[2] + file[3]
else:
prefix = ""
if prefix == "www." or prefix == "http":
handle = urllib.urlopen(file)
fileList = handle.read()
handle.close()
else:
if os.path.exists(file):
handle = open(file, 'r')
fileList = handle.read()
handle.close()
parsedlist = fileList.split(",")
for i in range(0, len(parsedlist)):
if self.validHost(parsedlist[i].strip()):
list.append(parsedlist[i].strip())
return list
def validHost(self, host):
if host != "" and host.find(";") == -1 and host.find("/") == -1 and host.find("'") == -1 and host.find("`") == -1 and len(host) > 6:
return True
else:
return False
|
gpl-3.0
|
googleapis/python-bigquery
|
tests/unit/helpers.py
|
1
|
2391
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import google.cloud.bigquery.client
import google.cloud.bigquery.dataset
import mock
import pytest
def make_connection(*responses):
import google.cloud.bigquery._http
import mock
from google.cloud.exceptions import NotFound
mock_conn = mock.create_autospec(google.cloud.bigquery._http.Connection)
mock_conn.user_agent = "testing 1.2.3"
mock_conn.api_request.side_effect = list(responses) + [NotFound("miss")]
mock_conn.API_BASE_URL = "https://bigquery.googleapis.com"
mock_conn.get_api_base_url_for_mtls = mock.Mock(return_value=mock_conn.API_BASE_URL)
return mock_conn
def _to_pyarrow(value):
"""Convert Python value to pyarrow value."""
import pyarrow
return pyarrow.array([value])[0]
def make_client(project="PROJECT", **kw):
credentials = mock.Mock(spec=google.auth.credentials.Credentials)
return google.cloud.bigquery.client.Client(project, credentials, **kw)
def make_dataset_reference_string(project, ds_id):
return f"{project}.{ds_id}"
def make_dataset(project, ds_id):
return google.cloud.bigquery.dataset.Dataset(
google.cloud.bigquery.dataset.DatasetReference(project, ds_id)
)
def make_dataset_list_item(project, ds_id):
return google.cloud.bigquery.dataset.DatasetListItem(
dict(datasetReference=dict(projectId=project, datasetId=ds_id))
)
def identity(x):
return x
def get_reference(x):
return x.reference
dataset_like = [
(google.cloud.bigquery.dataset.DatasetReference, identity),
(make_dataset, identity),
(make_dataset_list_item, get_reference),
(
make_dataset_reference_string,
google.cloud.bigquery.dataset.DatasetReference.from_string,
),
]
dataset_polymorphic = pytest.mark.parametrize(
"make_dataset,get_reference", dataset_like
)
|
apache-2.0
|
0x7678/SJKernel-gn2
|
scripts/tracing/draw_functrace.py
|
14676
|
3560
|
#!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <[email protected]>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
|
gpl-2.0
|
houshengbo/nova_vmware_compute_driver
|
nova/scheduler/weights/ram.py
|
4
|
1604
|
# Copyright (c) 2011 OpenStack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
RAM Weigher. Weigh hosts by their RAM usage.
The default is to spread instances across all hosts evenly. If you prefer
stacking, you can set the 'ram_weight_multiplier' option to a negative
number and the weighing has the opposite effect of the default.
"""
from nova.openstack.common import cfg
from nova.scheduler import weights
ram_weight_opts = [
cfg.FloatOpt('ram_weight_multiplier',
default=1.0,
help='Multiplier used for weighing ram. Negative '
'numbers mean to stack vs spread.'),
]
CONF = cfg.CONF
CONF.register_opts(ram_weight_opts)
class RAMWeigher(weights.BaseHostWeigher):
def _weight_multiplier(self):
"""Override the weight multiplier."""
return CONF.ram_weight_multiplier
def _weigh_object(self, host_state, weight_properties):
"""Higher weights win. We want spreading to be the default."""
return host_state.free_ram_mb
|
apache-2.0
|
thanhacun/odoo
|
addons/l10n_vn/__init__.py
|
425
|
1067
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module is Copyright (c) 2009-2013 General Solutions (http://gscom.vn) All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
nvazquez/Turtlebots
|
plugins/xevents/Xlib/keysymdef/cyrillic.py
|
14
|
2540
|
XK_Serbian_dje = 0x6a1
XK_Macedonia_gje = 0x6a2
XK_Cyrillic_io = 0x6a3
XK_Ukrainian_ie = 0x6a4
XK_Ukranian_je = 0x6a4
XK_Macedonia_dse = 0x6a5
XK_Ukrainian_i = 0x6a6
XK_Ukranian_i = 0x6a6
XK_Ukrainian_yi = 0x6a7
XK_Ukranian_yi = 0x6a7
XK_Cyrillic_je = 0x6a8
XK_Serbian_je = 0x6a8
XK_Cyrillic_lje = 0x6a9
XK_Serbian_lje = 0x6a9
XK_Cyrillic_nje = 0x6aa
XK_Serbian_nje = 0x6aa
XK_Serbian_tshe = 0x6ab
XK_Macedonia_kje = 0x6ac
XK_Byelorussian_shortu = 0x6ae
XK_Cyrillic_dzhe = 0x6af
XK_Serbian_dze = 0x6af
XK_numerosign = 0x6b0
XK_Serbian_DJE = 0x6b1
XK_Macedonia_GJE = 0x6b2
XK_Cyrillic_IO = 0x6b3
XK_Ukrainian_IE = 0x6b4
XK_Ukranian_JE = 0x6b4
XK_Macedonia_DSE = 0x6b5
XK_Ukrainian_I = 0x6b6
XK_Ukranian_I = 0x6b6
XK_Ukrainian_YI = 0x6b7
XK_Ukranian_YI = 0x6b7
XK_Cyrillic_JE = 0x6b8
XK_Serbian_JE = 0x6b8
XK_Cyrillic_LJE = 0x6b9
XK_Serbian_LJE = 0x6b9
XK_Cyrillic_NJE = 0x6ba
XK_Serbian_NJE = 0x6ba
XK_Serbian_TSHE = 0x6bb
XK_Macedonia_KJE = 0x6bc
XK_Byelorussian_SHORTU = 0x6be
XK_Cyrillic_DZHE = 0x6bf
XK_Serbian_DZE = 0x6bf
XK_Cyrillic_yu = 0x6c0
XK_Cyrillic_a = 0x6c1
XK_Cyrillic_be = 0x6c2
XK_Cyrillic_tse = 0x6c3
XK_Cyrillic_de = 0x6c4
XK_Cyrillic_ie = 0x6c5
XK_Cyrillic_ef = 0x6c6
XK_Cyrillic_ghe = 0x6c7
XK_Cyrillic_ha = 0x6c8
XK_Cyrillic_i = 0x6c9
XK_Cyrillic_shorti = 0x6ca
XK_Cyrillic_ka = 0x6cb
XK_Cyrillic_el = 0x6cc
XK_Cyrillic_em = 0x6cd
XK_Cyrillic_en = 0x6ce
XK_Cyrillic_o = 0x6cf
XK_Cyrillic_pe = 0x6d0
XK_Cyrillic_ya = 0x6d1
XK_Cyrillic_er = 0x6d2
XK_Cyrillic_es = 0x6d3
XK_Cyrillic_te = 0x6d4
XK_Cyrillic_u = 0x6d5
XK_Cyrillic_zhe = 0x6d6
XK_Cyrillic_ve = 0x6d7
XK_Cyrillic_softsign = 0x6d8
XK_Cyrillic_yeru = 0x6d9
XK_Cyrillic_ze = 0x6da
XK_Cyrillic_sha = 0x6db
XK_Cyrillic_e = 0x6dc
XK_Cyrillic_shcha = 0x6dd
XK_Cyrillic_che = 0x6de
XK_Cyrillic_hardsign = 0x6df
XK_Cyrillic_YU = 0x6e0
XK_Cyrillic_A = 0x6e1
XK_Cyrillic_BE = 0x6e2
XK_Cyrillic_TSE = 0x6e3
XK_Cyrillic_DE = 0x6e4
XK_Cyrillic_IE = 0x6e5
XK_Cyrillic_EF = 0x6e6
XK_Cyrillic_GHE = 0x6e7
XK_Cyrillic_HA = 0x6e8
XK_Cyrillic_I = 0x6e9
XK_Cyrillic_SHORTI = 0x6ea
XK_Cyrillic_KA = 0x6eb
XK_Cyrillic_EL = 0x6ec
XK_Cyrillic_EM = 0x6ed
XK_Cyrillic_EN = 0x6ee
XK_Cyrillic_O = 0x6ef
XK_Cyrillic_PE = 0x6f0
XK_Cyrillic_YA = 0x6f1
XK_Cyrillic_ER = 0x6f2
XK_Cyrillic_ES = 0x6f3
XK_Cyrillic_TE = 0x6f4
XK_Cyrillic_U = 0x6f5
XK_Cyrillic_ZHE = 0x6f6
XK_Cyrillic_VE = 0x6f7
XK_Cyrillic_SOFTSIGN = 0x6f8
XK_Cyrillic_YERU = 0x6f9
XK_Cyrillic_ZE = 0x6fa
XK_Cyrillic_SHA = 0x6fb
XK_Cyrillic_E = 0x6fc
XK_Cyrillic_SHCHA = 0x6fd
XK_Cyrillic_CHE = 0x6fe
XK_Cyrillic_HARDSIGN = 0x6ff
|
mit
|
openthread/openthread
|
tests/scripts/thread-cert/test_route_table.py
|
3
|
2943
|
#!/usr/bin/env python3
#
# Copyright (c) 2019, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
import thread_cert
LEADER = 1
ROUTER1 = 2
ROUTER2 = 3
# Topology:
# LEADER -- ROUTER1 -- ROUTER2
#
class TestRouteTable(thread_cert.TestCase):
SUPPORT_NCP = False
TOPOLOGY = {
LEADER: {
'mode': 'rdn',
'allowlist': [ROUTER1]
},
ROUTER1: {
'mode': 'rdn',
'allowlist': [LEADER, ROUTER2]
},
ROUTER2: {
'mode': 'rdn',
'allowlist': [ROUTER1]
},
}
def test(self):
self.nodes[LEADER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[ROUTER1].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ROUTER1].get_state(), 'router')
self.nodes[ROUTER2].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ROUTER2].get_state(), 'router')
self.simulator.go(100)
router_ids = set(_node.get_router_id() for _node in self.nodes.values())
for _node in self.nodes.values():
self.assertEqual(set(_node.router_list()), router_ids)
for _node in self.nodes.values():
router_table = _node.router_table()
self.assertEqual(set(router_table), router_ids)
if __name__ == '__main__':
unittest.main()
|
bsd-3-clause
|
marios-zindilis/musicbrainz-django-models
|
musicbrainz_django_models/models/l_release_group_url.py
|
1
|
2149
|
"""
.. module:: l_release_group_url
The **L Release Group Url** Model.
PostgreSQL Definition
---------------------
The :code:`l_release_group_url` table is defined in the MusicBrainz Server as:
.. code-block:: sql
CREATE TABLE l_release_group_url ( -- replicate
id SERIAL,
link INTEGER NOT NULL, -- references link.id
entity0 INTEGER NOT NULL, -- references release_group.id
entity1 INTEGER NOT NULL, -- references url.id
edits_pending INTEGER NOT NULL DEFAULT 0 CHECK (edits_pending >= 0),
last_updated TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
link_order INTEGER NOT NULL DEFAULT 0 CHECK (link_order >= 0),
entity0_credit TEXT NOT NULL DEFAULT '',
entity1_credit TEXT NOT NULL DEFAULT ''
);
"""
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class l_release_group_url(models.Model):
"""
Not all parameters are listed here, only those that present some interest
in their Django implementation.
:param int edits_pending: the MusicBrainz Server uses a PostgreSQL `check`
to validate that the value is a positive integer. In Django, this is
done with `models.PositiveIntegerField()`.
:param int link_order: the MusicBrainz Server uses a PostgreSQL `check`
to validate that the value is a positive integer. In Django, this is
done with `models.PositiveIntegerField()`.
"""
id = models.AutoField(primary_key=True)
link = models.ForeignKey('link')
entity0 = models.ForeignKey('release_group', related_name='links_to_url')
entity1 = models.ForeignKey('url')
edits_pending = models.PositiveIntegerField(default=0)
last_updated = models.DateTimeField(auto_now=True)
link_order = models.PositiveIntegerField(default=0)
entity0 = models.TextField(default='')
entity1 = models.TextField(default='')
def __str__(self):
return 'L Release Group Url'
class Meta:
db_table = 'l_release_group_url'
|
gpl-2.0
|
copasi/COPASI
|
copasi/bindings/python/examples/add_optitems.py
|
2
|
3275
|
#!/bin/env python
# This script reads a COPASI file, as well as a tab delimited file, and creates optimization items (that is
# parameters that COPASI might vary during an optimization / parameter estimation task) for the entries contained
# therein. The resulting file will be written into an output file that is provided as final argument.
#
# The tab delimited file should have the following structure:
#
# name min_value max_value start_value
#
# where 'name' is a display name as used in COPASI, 'min_value' and 'max_value' are strings representing the lower
# and upper bound of the range and 'start_value' is the start value that the next optimization should take. all double
# values are to be written in either floating point or scientific notation, using a dot as decimal separator.
#
# Examples for display names are:
#
# (R1).k1 -> representing a local parameter 'k1' in a Reaction named 'R1'
# [X]_0 -> for the initial concentration of a species 'X'
# Values[scale].InitialValue -> the initial value of a global parameter 'scale'
#
#
import COPASI
import sys
import csv
dm = COPASI.CRootContainer.addDatamodel()
assert (isinstance(dm, COPASI.CDataModel))
print("using COPASI: %s" % COPASI.CVersion.VERSION.getVersion())
def parse_settings(file_name):
# simple helper function parsing the csv file and returning a list of items found
opt_items = []
with open(file_name) as tab_data:
data_reader = csv.reader(tab_data, delimiter='\t')
for entry in data_reader:
opt_items.append(entry)
return opt_items
def add_optitems_from_files(input_cps_file, settings_file, output_file):
global dm
# read COPASI file
if not dm.loadModel(input_cps_file):
print("Couldn't read COPASI file")
raise ValueError(COPASI.CCopasiMessage_getAllMessageText())
# read settings file
settings = parse_settings(settings_file)
if len(settings) == 0:
# nothing to do
return
# get fit task & problem
task = dm.getTask('Parameter Estimation') # could also be dm.getTask('Optimization') for an optimization problem
problem = task.getProblem()
# add items
for entry in settings:
p = dm.findObjectByDisplayName(entry[0]) # we locate the parameter in the currently loaded file
if p is None: # skip over non-existing items
continue
item = problem.addOptItem(p.getCN()) # if we found it, we can get its internal identifier and create the item
item.setLowerBound(COPASI.CCommonName(entry[1])) # set the lower
item.setUpperBound(COPASI.CCommonName(entry[2])) # and upper bound
item.setStartValue(float(entry[3])) # as well as the initial value
print('...added optitem for %s in range(%s, %s) and start value %s' % (entry[0], entry[1], entry[2], entry[3]))
# finally save result file
dm.saveModel(output_file, True)
if __name__ == "__main__":
num_args = len(sys.argv)
if num_args < 4:
print ("usage: add_optitems <in cps file> <tab settings file> <out cps_file>")
sys.exit(1)
input_file = sys.argv[1]
tab_settings = sys.argv[2]
out_file = sys.argv[3]
add_optitems_from_files(input_file, tab_settings, out_file)
|
artistic-2.0
|
HarryElSuzio/ShaniXBMCWork
|
other/jsunpackMM.py
|
12
|
5644
|
"""
urlresolver XBMC Addon
Copyright (C) 2011 t0mm0
Updated by Shani_08 for muchmovies, here they have done the double encrypt.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
def unpack(sJavascript,iteration=1, totaliterations=2 ):
print 'iteration',iteration
if sJavascript.startswith('var _0xcb8a='):
aSplit=sJavascript.split('var _0xcb8a=')
ss="myarray="+aSplit[1].split("eval(")[0]
exec(ss)
a1=62
c1=int(aSplit[1].split(",62,")[1].split(',')[0])
p1=myarray[0]
k1=myarray[3]
with open('temp file'+str(iteration)+'.js', "wb") as filewriter:
filewriter.write(str(k1))
#aa=1/0
else:
aSplit = sJavascript.split("rn p}('")
p1,a1,c1,k1=('','0','0','')
ss="p1,a1,c1,k1=('"+aSplit[1].split(".spli")[0]+')'
exec(ss)
k1=k1.split('|')
aSplit = aSplit[1].split("))'")
# print ' p array is ',len(aSplit)
# print len(aSplit )
#p=str(aSplit[0]+'))')#.replace("\\","")#.replace('\\\\','\\')
#print aSplit[1]
#aSplit = aSplit[1].split(",")
#print aSplit[0]
#a = int(aSplit[1])
#c = int(aSplit[2])
#k = aSplit[3].split(".")[0].replace("'", '').split('|')
#a=int(a)
#c=int(c)
#p=p.replace('\\', '')
# print 'p val is ',p[0:100],'............',p[-100:],len(p)
# print 'p1 val is ',p1[0:100],'............',p1[-100:],len(p1)
#print a,a1
#print c,a1
#print 'k val is ',k[-10:],len(k)
# print 'k1 val is ',k1[-10:],len(k1)
e = ''
d = ''#32823
#sUnpacked = str(__unpack(p, a, c, k, e, d))
sUnpacked1 = str(__unpack(p1, a1, c1, k1, e, d,iteration))
#print sUnpacked[:200]+'....'+sUnpacked[-100:], len(sUnpacked)
# print sUnpacked1[:200]+'....'+sUnpacked1[-100:], len(sUnpacked1)
#exec('sUnpacked1="'+sUnpacked1+'"')
if iteration>=totaliterations:
# print 'final res',sUnpacked1[:200]+'....'+sUnpacked1[-100:], len(sUnpacked1)
return sUnpacked1#.replace('\\\\', '\\')
else:
# print 'final res for this iteration is',iteration
return unpack(sUnpacked1,iteration+1)#.replace('\\', ''),iteration)#.replace('\\', '');#unpack(sUnpacked.replace('\\', ''))
def __unpack(p, a, c, k, e, d, iteration):
with open('before file'+str(iteration)+'.js', "wb") as filewriter:
filewriter.write(str(p))
while (c > 1):
c = c -1
if (k[c]):
aa=str(__itoaNew(c, a))
#re.sub('\\b' + aa +'\\b', k[c], p) THIS IS Bloody slow!
p=findAndReplaceWord(p,aa,k[c])
with open('after file'+str(iteration)+'.js', "wb") as filewriter:
filewriter.write(str(p))
return p
#
#function equalavent to re.sub('\\b' + aa +'\\b', k[c], p)
def findAndReplaceWord(source_str, word_to_find,replace_with):
splits=None
splits=source_str.split(word_to_find)
if len(splits)>1:
new_string=[]
current_index=0
for current_split in splits:
#print 'here',i
new_string.append(current_split)
val=word_to_find#by default assume it was wrong to split
#if its first one and item is blank then check next item is valid or not
if current_index==len(splits)-1:
val='' # last one nothing to append normally
else:
if len(current_split)==0: #if blank check next one with current split value
if ( len(splits[current_index+1])==0 and word_to_find[0].lower() not in 'abcdefghijklmnopqrstuvwxyz1234567890_') or (len(splits[current_index+1])>0 and splits[current_index+1][0].lower() not in 'abcdefghijklmnopqrstuvwxyz1234567890_'):# first just just check next
val=replace_with
#not blank, then check current endvalue and next first value
else:
if (splits[current_index][-1].lower() not in 'abcdefghijklmnopqrstuvwxyz1234567890_') and (( len(splits[current_index+1])==0 and word_to_find[0].lower() not in 'abcdefghijklmnopqrstuvwxyz1234567890_') or (len(splits[current_index+1])>0 and splits[current_index+1][0].lower() not in 'abcdefghijklmnopqrstuvwxyz1234567890_')):# first just just check next
val=replace_with
new_string.append(val)
current_index+=1
#aaaa=1/0
source_str=''.join(new_string)
return source_str
def __itoa(num, radix):
# print 'num red',num, radix
result = ""
if num==0: return '0'
while num > 0:
result = "0123456789abcdefghijklmnopqrstuvwxyz"[num % radix] + result
num /= radix
return result
def __itoaNew(cc, a):
aa="" if cc < a else __itoaNew(int(cc / a),a)
cc = (cc % a)
bb=chr(cc + 29) if cc> 35 else str(__itoa(cc,36))
return aa+bb
|
gpl-2.0
|
Cynerd/linux-conf-perf
|
scripts/database.py
|
1
|
4605
|
import os
import datetime
import postgresql
import collections
import utils
import exceptions
from conf import conf
from conf import sf
def __git_describe__():
return utils.callsubprocess('git_describe',
conf.git_describe_cmd, False, True)[0]
def __git_commit__():
return utils.callsubprocess('git_rev_parse',
conf.git_commit_cmd, False, True)[0]
def __timestamp__():
return datetime.datetime.now().strftime('%y-%m-%d-%H-%M-%S')
Config = collections.namedtuple('Config', 'id hash config') # Named tuple for configuration
Measure = collections.namedtuple('Measure', 'id conf_id output value') # Named tuple for measurement
class database:
"Class used for accessing PostgreSQL project database."
def __init__(self):
self.db = postgresql.open(database = conf.db_database,
user = conf.db_user,
password = conf.db_password,
host = conf.db_host,
port = conf.db_port
)
# check if tables are present
tables = ('toolsgit', 'configurations', 'measure')
for tab in tables:
val = self.db.prepare("""SELECT COUNT(*) FROM pg_class
WHERE relname = $1""")(tab)[0][0]
if val < 1:
raise exceptions.DatabaseUninitialized()
def check_toolsgit(self):
"Return id of toolsgit row. If missing, it is inserted"
ds = __git_describe__()
cm = __git_commit__()
ps = self.db.prepare("""SELECT id FROM toolsgit
WHERE git_describe = $1 AND git_commit = $2
""")
id = ps(ds, cm)
if id:
return id[0][0]
ps = self.db.prepare("""INSERT INTO toolsgit
(git_describe, git_commit)
VALUES
($1, $2);
""")
ps(ds, cm)
return self.check_toolsgit()
def check_linuxgit(self):
"Return id of linuxgit row. If missing, it is inserted."
wd = os.getcwd()
os.chdir(sf(conf.linux_sources))
ds = __git_describe__()
cm = __git_commit__()
os.chdir(wd)
ps = self.db.prepare("""SELECT id FROM linuxgit
WHERE git_describe = $1 AND git_commit = $2
""")
id = ps(ds, cm)
if id:
return id[0][0]
ps = self.db.prepare("""INSERT INTO linuxgit
(git_describe, git_commit)
VALUES
($1, $2);
""")
ps(ds, cm)
return self.check_linuxgit()
def add_configuration(self, hash, txtconfig, generator):
"Add configuration to database."
ps = self.db.prepare("""INSERT INTO configurations
(hash, config, gtime, toolgit, linuxgit, generator)
VALUES
($1, $2, $3, $4, $5, $6);
""")
gt = self.check_toolsgit()
lgt = self.check_linuxgit()
tm = datetime.datetime.now()
ps(hash, '\n'.join(txtconfig), tm, gt, lgt, generator)
def get_configration(self, hash):
"Return configration id for inserted hash."
ps = self.db.prepare("""SELECT id, config FROM configurations
WHERE hash = $1""")
rtn = []
for dt in ps(hash):
rtn.append(Config(dt[0], hash, dt[1].split('\n')))
return rtn
def add_measure(self, output, result, conf_id, value = None):
"Add measurement."
ps = self.db.prepare("""INSERT INTO measure
(conf, output, value, mtime, toolgit,
linuxgit, measurement, result)
VALUES
($1, $2, $3, $4, $5, $6, $7, $8);
""")
gt = self.check_toolsgit()
lgt = self.check_linuxgit()
tm = datetime.datetime.now()
ps(conf_id, output, value, tm, gt, lgt, conf.measure_identifier, result)
def update_measure(self, measure_id, value):
"Update measured value"
ps = self.db.prepare("""UPDATE measure SET
(value) = ($2)
WHERE
id = $1;
""")
ps(measure_id, value)
def get_measures(self, conf_id):
"Get measures for configuration with conf_id id"
ps = self.db.prepare("""SELECT id, output, value FROM measure
WHERE conf = $1;
""")
rtn = []
for dt in ps(conf_id):
rtn.append(Measure(dt[0], conf_id, dt[1], dt[2]))
return rtn
def get_unmeasured(self):
"Returns list of all unmeasured configurations."
ps = self.db.prepare("""SELECT id, hash, config FROM configurations
WHERE id NOT IN
(SELECT conf FROM measure)
""")
rtn = []
for dt in ps():
rtn.append(Config(dt[0], dt[1], dt[2].split('\n')))
return rtn
def add_configsort(self, configopt):
"Add configuration option to sorted list"
ps = self.db.prepare("""INSERT INTO configopt
(configopt) VALUES ($1)
""")
ps(configopt)
def get_configsort(self):
"Returns sorted list of all configuration options"
ps = self.db.prepare("""SELECT id, configopt FROM configopt
ORDER BY id ASC
""")
rtn = []
itms = ps()
for id, config in itms:
rtn.append(config)
return rtn
|
gpl-2.0
|
AkA84/edx-platform
|
common/lib/xmodule/xmodule/tests/test_util_duedate.py
|
244
|
1645
|
"""
Tests for extended due date utilities.
"""
import mock
import unittest
from ..util import duedate
class TestGetExtendedDueDate(unittest.TestCase):
"""
Test `get_extended_due_date` function.
"""
def call_fut(self, node):
"""
Call function under test.
"""
fut = duedate.get_extended_due_date
return fut(node)
def test_no_due_date(self):
"""
Test no due date.
"""
node = object()
self.assertEqual(self.call_fut(node), None)
def test_due_date_no_extension(self):
"""
Test due date without extension.
"""
node = mock.Mock(due=1, extended_due=None)
self.assertEqual(self.call_fut(node), 1)
def test_due_date_with_extension(self):
"""
Test due date with extension.
"""
node = mock.Mock(due=1, extended_due=2)
self.assertEqual(self.call_fut(node), 2)
def test_due_date_extension_is_earlier(self):
"""
Test due date with extension, but due date is later than extension.
"""
node = mock.Mock(due=2, extended_due=1)
self.assertEqual(self.call_fut(node), 2)
def test_extension_without_due_date(self):
"""
Test non-sensical extension without due date.
"""
node = mock.Mock(due=None, extended_due=1)
self.assertEqual(self.call_fut(node), None)
def test_due_date_with_extension_dict(self):
"""
Test due date with extension when node is a dict.
"""
node = {'due': 1, 'extended_due': 2}
self.assertEqual(self.call_fut(node), 2)
|
agpl-3.0
|
gregoil/rotest
|
src/rotest/cli/discover.py
|
1
|
2610
|
# pylint: disable=protected-access
from __future__ import absolute_import
import os
import unittest
from fnmatch import fnmatch
from collections import OrderedDict
import py
from rotest.common import core_log
from rotest.core.case import TestCase
from rotest.core.flow import TestFlow
from rotest.common.config import DISCOVERER_BLACKLIST
WHITE_LIST = ["*.py"]
def is_test_class(test):
"""Return if the provided object is a runnable test.
Args:
test (object): the object to be inspected.
Returns:
bool: whether it's either TestCase or TestFlow, and should be ran.
"""
return (isinstance(test, type) and
issubclass(test, (TestCase, TestFlow)) and
test not in (TestFlow, TestCase) and
("__test__" not in test.__dict__ or getattr(test, "__test__")))
def get_test_files(paths):
"""Return test files that match whitelist and blacklist patterns.
Args:
paths (iterable): list of filesystem paths to be looked recursively.
Yields:
str: path of test file.
"""
for path in paths:
path = os.path.abspath(path)
filename = os.path.basename(path)
if any(fnmatch(path, pattern) or fnmatch(filename, pattern)
for pattern in DISCOVERER_BLACKLIST):
continue
if os.path.isfile(path):
if not any(fnmatch(filename, pattern) for pattern in WHITE_LIST):
continue
yield path
else:
sub_files = (os.path.join(path, filename)
for filename in os.listdir(path))
for sub_file in get_test_files(sub_files):
yield sub_file
def discover_tests_under_paths(paths):
"""Search recursively for every test class under the given paths.
Args:
paths (iterable): list of filesystem paths to be searched.
Returns:
set: all discovered tests.
"""
loader = unittest.TestLoader()
loader.suiteClass = list
loader.loadTestsFromTestCase = lambda test: test
tests = OrderedDict()
for path in get_test_files(paths):
core_log.debug("Discovering tests in %s", path)
module = py.path.local(path).pyimport()
tests_discovered = loader.loadTestsFromModule(module)
tests_discovered = {test: test
for test in tests_discovered
if is_test_class(test)}
core_log.debug("Discovered %d tests in %s",
len(tests_discovered), path)
tests.update(tests_discovered)
return list(tests.values())
|
mit
|
testmana2/profitpy
|
profit/lib/breadfan.py
|
18
|
8109
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2007 Troy Melhase
# Distributed under the terms of the GNU General Public License v2
# Author: Troy Melhase <[email protected]>
ffnet = NN = inf = None
try:
from ffnet import ffnet, mlgraph, loadnet, savenet, savenet, loadnet
from scipy import inf
except (ImportError, ):
from profit.lib.bpnn import NN
class NeuralNetwork(object):
def __init__(self):
self.network = None
class SimpleNeuralNetwork(NeuralNetwork):
train_meta = {
'backprop':{
'name':'Backprop',
'desc':'Simple backprop.',
'method':'train',
'params':{
'iterations':
{'type':int, 'default':1000, 'help':'Iterations'},
'N':
{'type':float, 'default':0.5, 'help':'Learning rate'},
'M':
{'type':float, 'default':0.1, 'help':'momentum factor'},
}
},
}
def __init__(self):
self.network = NN(ni=3, nh=10, no=1)
def save(self, *args):
pass
def load(self, *args):
pass
def inputs(self):
return self.network.ni - 1
def hidden(self):
return self.network.nh
def outno(self):
return self.network.no
@property
def trained(self):
return 'backprop' if bool(self.network.ao) else ''
class FfnetNeuralNetwork(NeuralNetwork):
train_meta = {
'momentum':{
'name':'Backprop with momentum',
'desc':'Simple backpropagation training with momentum.',
'method':'train_momentum',
'params':{
'eta':
{'type':float, 'default':0.2,
'help':'descent scaling parameter'},
'momentum':
{'type':float, 'default':0.8,
'help':'momentum coefficient'},
'maxiter':
{'type':int, 'default':10000,
'help':'the maximum number of iterations'},
'disp':
{'type':int, 'default':0,
'help':'print convergence message if non-zero'},
}
},
'rprop':{
'name':'Rprop',
'desc':'Rprop training algorithm.',
'method':'train_rprop',
'params':{
'a':
{'type':float, 'default':1.2,
'help':'training step increasing parameter'},
'b':
{'type':float, 'default':0.5,
'help':'training step decreasing parameter'},
'mimin':
{'type':float, 'default':0.000001,
'help':'minimum training step'},
'mimax':
{'type':float, 'default':50.0,
'help':'maximum training step'},
'xmi':
{'type':float, 'default':0.1,
'help':'initial weight scalar; vector not supported'},
'maxiter':
{'type':int, 'default':10000,
'help':'the maximum number of iterations'},
'disp':
{'type':int, 'default':0,
'help':'print convergence message if non-zero'},
}
},
'genetic':{
'name':'Genetic',
'desc':'Global weights optimization with genetic algorithm.',
'method':'train_genetic',
'params':{
'lower':
{'type':float, 'default':-25.0,
'help':'lower bound of weights values'},
'upper':
{'type':float, 'default':25.0,
'help':'upper bound of weights values'},
'individuals':
{'type':int, 'default':20,
'help':'number of individuals in a population'},
'generations':
{'type':int, 'default':500,
'help':'number of generations over which solution is to evolve'},
'crossover':
{'type':float, 'default':0.85,
'help':'crossover probability; must be <= 1.0'},
'mutation':
{'type':int, 'default':2,
'help':'', 'choices':[(1, 'one-point mutation, fixed rate'),
(2, 'one-point, adjustable rate based on fitness'),
(3, 'one-point, adjustable rate based on distance'),
(4, 'one-point+creep, fixed rate'),
(5, 'one-point+creep, adjustable rate based on fitness'),
(6, 'one-point+creep, adjustable rate based on distance'),
]},
'initrate':
{'type':float, 'default':0.005,
'help':'initial mutation rate; should be small; mutation rate is the probability that any one gene locus will mutate in any one generation.'},
'minrate':
{'type':float, 'default':0.0005,
'help':'minimum mutation rate; must be >= 0.0'},
'maxrate':
{'type':float, 'default':0.25, 'min':0, 'max':1.0,
'help':'maximum mutation rate; must be <= 1.0'},
'fitnessdiff':
{'type':float, 'default':1.0, 'min':0, 'max':1.0, 'min_special':'none',
'help':'relative fitness differential'},
'reproduction':
{'type':int, 'default':3,
'help':'reproduction plan', 'choices':[(1, 'Full generational replacement'),
(2, 'Steady-state-replace-random'),
(3, 'Steady-state-replace-worst')]},
'elitism':
{'type':int, 'default':0, 'checkbox':True,
'help':'elitism flag; (Applies only to reproduction plans 1 and 2)'},
'verbosity':
{'type':int, 'default':0,
'help':'printed output', 'choices':[(0, 'None'),
(1, 'Minimal'),
(2, 'Verbose')]},
}
},
'cg':{
'name':'Conjugate Gradient',
'desc':'nonlinear conjugate gradient algorithm of Polak and Ribiere.',
'method':'train_cg',
'params':{
'gtol':
{'type':float, 'default':0.00001,
'help':'stop when norm of gradient is less than gtol'},
'norm':
{'type':float, 'default':inf,
'help':'order of vector norm to use', 'min_special':'inf', },
'maxiter':
{'type':int, 'default':10000,
'help':'the maximum number of iterations'},
'disp':
{'type':int, 'default':1,
'help':'print convergence message if non-zero'},
},
},
## add support for train_bfgs and train_tnc here
}
def __init__(self, con=(2,2,1)):
self.network = ffnet(mlgraph(con))
def save(self, filename):
savenet(self.network, filename)
def load(self, filename):
self.network = loadnet(filename)
def inputs(self):
return len(self.network.inno)
def hidden(self):
return len(self.network.hidno)
def outno(self):
return len(self.network.outno)
@property
def trained(self):
return self.network.trained
def make_network():
if ffnet:
return FfnetNeuralNetwork()
#elif ...
else:
return SimpleNeuralNetwork()
|
gpl-2.0
|
mydongistiny/external_chromium_org
|
tools/valgrind/memcheck/PRESUBMIT.py
|
33
|
3382
|
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into gcl.
"""
import re
def CheckChange(input_api, output_api):
"""Checks the memcheck suppressions files for bad data."""
sup_regex = re.compile('suppressions.*\.txt$')
suppressions = {}
errors = []
check_for_memcheck = False
# skip_next_line has 3 possible values:
# - False: don't skip the next line.
# - 'skip_suppression_name': the next line is a suppression name, skip.
# - 'skip_param': the next line is a system call parameter error, skip.
skip_next_line = False
for f in filter(lambda x: sup_regex.search(x.LocalPath()),
input_api.AffectedFiles()):
for line, line_num in zip(f.NewContents(),
xrange(1, len(f.NewContents()) + 1)):
line = line.lstrip()
if line.startswith('#') or not line:
continue
if skip_next_line:
if skip_next_line == 'skip_suppression_name':
if 'insert_a_suppression_name_here' in line:
errors.append('"insert_a_suppression_name_here" is not a valid '
'suppression name')
if suppressions.has_key(line):
if f.LocalPath() == suppressions[line][1]:
errors.append('suppression with name "%s" at %s line %s '
'has already been defined at line %s' %
(line, f.LocalPath(), line_num,
suppressions[line][1]))
else:
errors.append('suppression with name "%s" at %s line %s '
'has already been defined at %s line %s' %
(line, f.LocalPath(), line_num,
suppressions[line][0], suppressions[line][1]))
else:
suppressions[line] = (f, line_num)
check_for_memcheck = True;
skip_next_line = False
continue
if check_for_memcheck:
if not line.startswith('Memcheck:'):
errors.append('"%s" should be "Memcheck:..." in %s line %s' %
(line, f.LocalPath(), line_num))
check_for_memcheck = False;
if line == '{':
skip_next_line = 'skip_suppression_name'
continue
if line == "Memcheck:Param":
skip_next_line = 'skip_param'
continue
if (line.startswith('fun:') or line.startswith('obj:') or
line.startswith('Memcheck:') or line == '}' or
line == '...'):
continue
errors.append('"%s" is probably wrong: %s line %s' % (line, f.LocalPath(),
line_num))
if errors:
return [output_api.PresubmitError('\n'.join(errors))]
return []
def CheckChangeOnUpload(input_api, output_api):
return CheckChange(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return CheckChange(input_api, output_api)
def GetPreferredTryMasters(project, change):
return {
'tryserver.chromium.linux': {
'linux_valgrind': set(['defaulttests']),
},
'tryserver.chromium.mac': {
'mac_valgrind': set(['defaulttests']),
}
}
|
bsd-3-clause
|
rickhurst/Django-non-rel-blog
|
django/contrib/gis/db/backends/util.py
|
377
|
1749
|
"""
A collection of utility routines and classes used by the spatial
backends.
"""
def gqn(val):
"""
The geographic quote name function; used for quoting tables and
geometries (they use single rather than the double quotes of the
backend quotename function).
"""
if isinstance(val, basestring):
if isinstance(val, unicode): val = val.encode('ascii')
return "'%s'" % val
else:
return str(val)
class SpatialOperation(object):
"""
Base class for generating spatial SQL.
"""
sql_template = '%(geo_col)s %(operator)s %(geometry)s'
def __init__(self, function='', operator='', result='', **kwargs):
self.function = function
self.operator = operator
self.result = result
self.extra = kwargs
def as_sql(self, geo_col, geometry='%s'):
return self.sql_template % self.params(geo_col, geometry)
def params(self, geo_col, geometry):
params = {'function' : self.function,
'geo_col' : geo_col,
'geometry' : geometry,
'operator' : self.operator,
'result' : self.result,
}
params.update(self.extra)
return params
class SpatialFunction(SpatialOperation):
"""
Base class for generating spatial SQL related to a function.
"""
sql_template = '%(function)s(%(geo_col)s, %(geometry)s)'
def __init__(self, func, result='', operator='', **kwargs):
# Getting the function prefix.
default = {'function' : func,
'operator' : operator,
'result' : result
}
kwargs.update(default)
super(SpatialFunction, self).__init__(**kwargs)
|
bsd-3-clause
|
SerpentCS/odoo
|
addons/lunch/wizard/lunch_validation.py
|
440
|
1296
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2012 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class lunch_validation(osv.Model):
""" lunch validation """
_name = 'lunch.validation'
_description = 'lunch validation for order'
def confirm(self,cr,uid,ids,context=None):
return self.pool.get('lunch.order.line').confirm(cr, uid, ids, context=context)
|
agpl-3.0
|
7kbird/chrome
|
build/android/pylib/symbols/elf_symbolizer_unittest.py
|
108
|
5692
|
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import functools
import logging
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(__file__))
import elf_symbolizer
import mock_addr2line
_MOCK_A2L_PATH = os.path.join(os.path.dirname(mock_addr2line.__file__),
'mock_addr2line')
_INCOMPLETE_MOCK_ADDR = 1024 * 1024
_UNKNOWN_MOCK_ADDR = 2 * 1024 * 1024
_INLINE_MOCK_ADDR = 3 * 1024 * 1024
class ELFSymbolizerTest(unittest.TestCase):
def setUp(self):
self._callback = functools.partial(
ELFSymbolizerTest._SymbolizeCallback, self)
self._resolved_addresses = set()
# Mute warnings, we expect them due to the crash/hang tests.
logging.getLogger().setLevel(logging.ERROR)
def testParallelism1(self):
self._RunTest(max_concurrent_jobs=1, num_symbols=100)
def testParallelism4(self):
self._RunTest(max_concurrent_jobs=4, num_symbols=100)
def testParallelism8(self):
self._RunTest(max_concurrent_jobs=8, num_symbols=100)
def testCrash(self):
os.environ['MOCK_A2L_CRASH_EVERY'] = '99'
self._RunTest(max_concurrent_jobs=1, num_symbols=100)
os.environ['MOCK_A2L_CRASH_EVERY'] = '0'
def testHang(self):
os.environ['MOCK_A2L_HANG_EVERY'] = '99'
self._RunTest(max_concurrent_jobs=1, num_symbols=100)
os.environ['MOCK_A2L_HANG_EVERY'] = '0'
def testInlines(self):
"""Stimulate the inline processing logic."""
symbolizer = elf_symbolizer.ELFSymbolizer(
elf_file_path='/path/doesnt/matter/mock_lib1.so',
addr2line_path=_MOCK_A2L_PATH,
callback=self._callback,
inlines=True,
max_concurrent_jobs=4)
for addr in xrange(1000):
exp_inline = False
exp_unknown = False
# First 100 addresses with inlines.
if addr < 100:
addr += _INLINE_MOCK_ADDR
exp_inline = True
# Followed by 100 without inlines.
elif addr < 200:
pass
# Followed by 100 interleaved inlines and not inlines.
elif addr < 300:
if addr & 1:
addr += _INLINE_MOCK_ADDR
exp_inline = True
# Followed by 100 interleaved inlines and unknonwn.
elif addr < 400:
if addr & 1:
addr += _INLINE_MOCK_ADDR
exp_inline = True
else:
addr += _UNKNOWN_MOCK_ADDR
exp_unknown = True
exp_name = 'mock_sym_for_addr_%d' % addr if not exp_unknown else None
exp_source_path = 'mock_src/mock_lib1.so.c' if not exp_unknown else None
exp_source_line = addr if not exp_unknown else None
cb_arg = (addr, exp_name, exp_source_path, exp_source_line, exp_inline)
symbolizer.SymbolizeAsync(addr, cb_arg)
symbolizer.Join()
def testIncompleteSyminfo(self):
"""Stimulate the symbol-not-resolved logic."""
symbolizer = elf_symbolizer.ELFSymbolizer(
elf_file_path='/path/doesnt/matter/mock_lib1.so',
addr2line_path=_MOCK_A2L_PATH,
callback=self._callback,
max_concurrent_jobs=1)
# Test symbols with valid name but incomplete path.
addr = _INCOMPLETE_MOCK_ADDR
exp_name = 'mock_sym_for_addr_%d' % addr
exp_source_path = None
exp_source_line = None
cb_arg = (addr, exp_name, exp_source_path, exp_source_line, False)
symbolizer.SymbolizeAsync(addr, cb_arg)
# Test symbols with no name or sym info.
addr = _UNKNOWN_MOCK_ADDR
exp_name = None
exp_source_path = None
exp_source_line = None
cb_arg = (addr, exp_name, exp_source_path, exp_source_line, False)
symbolizer.SymbolizeAsync(addr, cb_arg)
symbolizer.Join()
def _RunTest(self, max_concurrent_jobs, num_symbols):
symbolizer = elf_symbolizer.ELFSymbolizer(
elf_file_path='/path/doesnt/matter/mock_lib1.so',
addr2line_path=_MOCK_A2L_PATH,
callback=self._callback,
max_concurrent_jobs=max_concurrent_jobs,
addr2line_timeout=0.5)
for addr in xrange(num_symbols):
exp_name = 'mock_sym_for_addr_%d' % addr
exp_source_path = 'mock_src/mock_lib1.so.c'
exp_source_line = addr
cb_arg = (addr, exp_name, exp_source_path, exp_source_line, False)
symbolizer.SymbolizeAsync(addr, cb_arg)
symbolizer.Join()
# Check that all the expected callbacks have been received.
for addr in xrange(num_symbols):
self.assertIn(addr, self._resolved_addresses)
self._resolved_addresses.remove(addr)
# Check for unexpected callbacks.
self.assertEqual(len(self._resolved_addresses), 0)
def _SymbolizeCallback(self, sym_info, cb_arg):
self.assertTrue(isinstance(sym_info, elf_symbolizer.ELFSymbolInfo))
self.assertTrue(isinstance(cb_arg, tuple))
self.assertEqual(len(cb_arg), 5)
# Unpack expectations from the callback extra argument.
(addr, exp_name, exp_source_path, exp_source_line, exp_inlines) = cb_arg
if exp_name is None:
self.assertIsNone(sym_info.name)
else:
self.assertTrue(sym_info.name.startswith(exp_name))
self.assertEqual(sym_info.source_path, exp_source_path)
self.assertEqual(sym_info.source_line, exp_source_line)
if exp_inlines:
self.assertEqual(sym_info.name, exp_name + '_inner')
self.assertEqual(sym_info.inlined_by.name, exp_name + '_middle')
self.assertEqual(sym_info.inlined_by.inlined_by.name,
exp_name + '_outer')
# Check against duplicate callbacks.
self.assertNotIn(addr, self._resolved_addresses)
self._resolved_addresses.add(addr)
if __name__ == '__main__':
unittest.main()
|
bsd-3-clause
|
ismir/conference-archive
|
tests/test_zen_api.py
|
1
|
1981
|
import pytest
import os
import zen.api
OFFLINE = not zen.api._is_online()
OFFLINE_REASON = 'Not online, skipping integration tests'
@pytest.mark.skipif(OFFLINE, reason=OFFLINE_REASON)
def test_zen_api_create_id():
assert zen.api.create_id(stage=zen.api.DEV) is not None
@pytest.fixture()
def pdf_file(resources_dir):
return os.path.join(resources_dir, 'sample.pdf')
@pytest.mark.skipif(OFFLINE, reason=OFFLINE_REASON)
def test_zen_upload_file(pdf_file):
zid = zen.api.create_id(stage=zen.api.DEV)
result = zen.api.upload_file(zid, filepath=pdf_file, stage=zen.api.DEV)
# TODO: Verify something interesting here.
assert result is not None
@pytest.fixture()
def dummy_metadata():
return dict(upload_type='blob')
@pytest.mark.skipif(OFFLINE, reason=OFFLINE_REASON)
def test_zen_api_update_metadata(dummy_metadata):
zid = zen.api.create_id(stage=zen.api.DEV)
resp = zen.api.update_metadata(zid, dummy_metadata, stage=zen.api.DEV)
# TODO: Verify something interesting here.
assert resp is not None
@pytest.mark.skipif(OFFLINE, reason=OFFLINE_REASON)
def test_zen_api_publish(dummy_metadata):
zid = zen.api.create_id(stage=zen.api.DEV)
zen.api.update_metadata(zid, dummy_metadata, stage=zen.api.DEV)
resp = zen.api.publish(zid, stage=zen.api.DEV)
# TODO: Verify something interesting here.
assert resp is not None
@pytest.mark.skipif(OFFLINE, reason=OFFLINE_REASON)
def test_zen_api_get(dummy_metadata):
zid = zen.api.create_id(stage=zen.api.DEV)
zen.api.update_metadata(zid, dummy_metadata, stage=zen.api.DEV)
resp1 = zen.api.publish(zid, stage=zen.api.DEV)
resp2 = zen.api.get(zid, stage=zen.api.DEV)
assert resp1 == resp2
with pytest.raises(BaseException):
zen.api.get(999999999999, stage=zen.api.DEV)
@pytest.mark.skipif(OFFLINE, reason=OFFLINE_REASON)
def test_zen_api_list_items():
results = zen.api.list_items(stage=zen.api.DEV)
assert len(results) > 0
|
mit
|
aifeiasdf/Template-tookit
|
template/constants.py
|
3
|
1976
|
#
# The Template-Python distribution is Copyright (C) Sean McAfee 2007-2008,
# derived from the Perl Template Toolkit Copyright (C) 1996-2007 Andy
# Wardley. All Rights Reserved.
#
# The file "LICENSE" at the top level of this source distribution describes
# the terms under which this file may be distributed.
#
# STATUS constants returned by directives
STATUS_OK = 0 # ok
STATUS_RETURN = 1 # ok, block ended by RETURN
STATUS_STOP = 2 # ok, stoppped by STOP
STATUS_DONE = 3 # ok, iterator done
STATUS_DECLINED = 4 # ok, declined to service request
STATUS_ERROR = 255 # error condition
# ERROR constants for indicating exception types
ERROR_RETURN = 'return' # return a status code
ERROR_FILE = 'file' # file error: I/O, parse, recursion
ERROR_VIEW = 'view' # view error
ERROR_UNDEF = 'undef' # undefined variable value used
ERROR_PYTHON = 'python' # error in [% PYTHON %] block
ERROR_FILTER = 'filter' # filter error
ERROR_PLUGIN = 'plugin' # plugin error
# CHOMP constants for PRE_CHOMP and POST_CHOMP
CHOMP_NONE = 0 # do not remove whitespace
CHOMP_ALL = 1 # remove whitespace up to newline
CHOMP_ONE = 1 # new name for CHOMP_ALL
CHOMP_COLLAPSE = 2 # collapse whitespace to a single space
CHOMP_GREEDY = 3 # remove all whitespace including newlines
# DEBUG constants to enable various debugging options
DEBUG_OFF = 0 # do nothing
DEBUG_ON = 1 # basic debugging flag
DEBUG_UNDEF = 2 # throw undef on undefined variables
DEBUG_VARS = 4 # general variable debugging
DEBUG_DIRS = 8 # directive debugging
DEBUG_STASH = 16 # general stash debugging
DEBUG_CONTEXT = 32 # context debugging
DEBUG_PARSER = 64 # parser debugging
DEBUG_PROVIDER = 128 # provider debugging
DEBUG_PLUGINS = 256 # plugins debugging
DEBUG_FILTERS = 512 # filters debugging
DEBUG_SERVICE = 1024 # context debugging
DEBUG_ALL = 2047 # everything
# extra debugging flags
DEBUG_CALLER = 4096 # add caller file/line
DEBUG_FLAGS = 4096 # bitmask to extraxt flags
|
artistic-2.0
|
sankha93/servo
|
tests/wpt/css-tests/tools/pywebsocket/src/test/testdata/handlers/blank_wsh.py
|
499
|
1557
|
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# intentionally left blank
|
mpl-2.0
|
hynnet/hiwifi-openwrt-HC5661-HC5761
|
staging_dir/host/lib/python2.7/lib2to3/fixes/fix_idioms.py
|
327
|
4889
|
"""Adjust some old Python 2 idioms to their modern counterparts.
* Change some type comparisons to isinstance() calls:
type(x) == T -> isinstance(x, T)
type(x) is T -> isinstance(x, T)
type(x) != T -> not isinstance(x, T)
type(x) is not T -> not isinstance(x, T)
* Change "while 1:" into "while True:".
* Change both
v = list(EXPR)
v.sort()
foo(v)
and the more general
v = EXPR
v.sort()
foo(v)
into
v = sorted(EXPR)
foo(v)
"""
# Author: Jacques Frechet, Collin Winter
# Local imports
from .. import fixer_base
from ..fixer_util import Call, Comma, Name, Node, BlankLine, syms
CMP = "(n='!=' | '==' | 'is' | n=comp_op< 'is' 'not' >)"
TYPE = "power< 'type' trailer< '(' x=any ')' > >"
class FixIdioms(fixer_base.BaseFix):
explicit = True # The user must ask for this fixer
PATTERN = r"""
isinstance=comparison< %s %s T=any >
|
isinstance=comparison< T=any %s %s >
|
while_stmt< 'while' while='1' ':' any+ >
|
sorted=any<
any*
simple_stmt<
expr_stmt< id1=any '='
power< list='list' trailer< '(' (not arglist<any+>) any ')' > >
>
'\n'
>
sort=
simple_stmt<
power< id2=any
trailer< '.' 'sort' > trailer< '(' ')' >
>
'\n'
>
next=any*
>
|
sorted=any<
any*
simple_stmt< expr_stmt< id1=any '=' expr=any > '\n' >
sort=
simple_stmt<
power< id2=any
trailer< '.' 'sort' > trailer< '(' ')' >
>
'\n'
>
next=any*
>
""" % (TYPE, CMP, CMP, TYPE)
def match(self, node):
r = super(FixIdioms, self).match(node)
# If we've matched one of the sort/sorted subpatterns above, we
# want to reject matches where the initial assignment and the
# subsequent .sort() call involve different identifiers.
if r and "sorted" in r:
if r["id1"] == r["id2"]:
return r
return None
return r
def transform(self, node, results):
if "isinstance" in results:
return self.transform_isinstance(node, results)
elif "while" in results:
return self.transform_while(node, results)
elif "sorted" in results:
return self.transform_sort(node, results)
else:
raise RuntimeError("Invalid match")
def transform_isinstance(self, node, results):
x = results["x"].clone() # The thing inside of type()
T = results["T"].clone() # The type being compared against
x.prefix = u""
T.prefix = u" "
test = Call(Name(u"isinstance"), [x, Comma(), T])
if "n" in results:
test.prefix = u" "
test = Node(syms.not_test, [Name(u"not"), test])
test.prefix = node.prefix
return test
def transform_while(self, node, results):
one = results["while"]
one.replace(Name(u"True", prefix=one.prefix))
def transform_sort(self, node, results):
sort_stmt = results["sort"]
next_stmt = results["next"]
list_call = results.get("list")
simple_expr = results.get("expr")
if list_call:
list_call.replace(Name(u"sorted", prefix=list_call.prefix))
elif simple_expr:
new = simple_expr.clone()
new.prefix = u""
simple_expr.replace(Call(Name(u"sorted"), [new],
prefix=simple_expr.prefix))
else:
raise RuntimeError("should not have reached here")
sort_stmt.remove()
btwn = sort_stmt.prefix
# Keep any prefix lines between the sort_stmt and the list_call and
# shove them right after the sorted() call.
if u"\n" in btwn:
if next_stmt:
# The new prefix should be everything from the sort_stmt's
# prefix up to the last newline, then the old prefix after a new
# line.
prefix_lines = (btwn.rpartition(u"\n")[0], next_stmt[0].prefix)
next_stmt[0].prefix = u"\n".join(prefix_lines)
else:
assert list_call.parent
assert list_call.next_sibling is None
# Put a blank line after list_call and set its prefix.
end_line = BlankLine()
list_call.parent.append_child(end_line)
assert list_call.next_sibling is end_line
# The new prefix should be everything up to the first new line
# of sort_stmt's prefix.
end_line.prefix = btwn.rpartition(u"\n")[0]
|
gpl-2.0
|
VerstandInvictus/PatternsEmerge
|
backend/mdcore.py
|
1
|
2290
|
import codecs
import config
import unidecode
from pyvirtualdisplay import Display
from time import sleep
from selenium import webdriver, common
user_agent = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36' \
' (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36'
class mdLogger:
def __init__(self, logfile):
self.logfile = logfile
def logEntry(self, entry, level):
with codecs.open(self.logfile, mode='a+', encoding='utf-8') as log:
log.write(entry + '\n')
if 'progress' in level:
print unidecode.unidecode(entry)
class marketdelta:
def __init__(self, logobj, strategy):
self.logger = logobj
self.user = config.mdUsers[strategy]
self.password = config.mdPasses[strategy]
self.display = Display(visible=0, size=(800, 600))
self.display.start()
self.profile = webdriver.FirefoxProfile()
self.br = self.loginToMD()
def loginToMD(self):
self.profile.set_preference("general.useragent.override", user_agent)
browser = webdriver.Firefox(self.profile)
browser.implicitly_wait(15)
browser.get('https://app.marketdelta.com/signon')
emailfield = browser.find_element_by_id('email')
emailfield.send_keys(self.user)
pwfield = browser.find_element_by_name('password')
pwfield.send_keys(self.password)
submitbutton = browser.find_element_by_xpath(
'//*[@id="frame-content"]/form/div[3]/div/input')
submitbutton.click()
sleep(15) # give it time to load the order list
self.logger.logEntry("Logged in successfully", 'info')
return browser
def getOrderList(self):
try:
orderlist = self.br.find_element_by_class_name('watchlist')
otable = orderlist.get_attribute('innerHTML')
self.logger.logEntry("Got order list", 'info')
except common.exceptions.UnexpectedAlertPresentException:
self.logger.logEntry('No orders to get', 'info')
otable = "<th></th><tr><td></td></tr>"
return otable
def exit(self):
self.br.quit()
self.display.stop()
self.logger.logEntry("Quit FF and Xvfb", 'info')
if __name__ == '__main__':
exit()
|
mit
|
tonyli71/tempest
|
tempest/api_schema/response/compute/v2_1/keypairs.py
|
38
|
3928
|
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
get_keypair = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'keypair': {
'type': 'object',
'properties': {
'public_key': {'type': 'string'},
'name': {'type': 'string'},
'fingerprint': {'type': 'string'},
'user_id': {'type': 'string'},
'deleted': {'type': 'boolean'},
'created_at': {'type': 'string'},
'updated_at': {'type': ['string', 'null']},
'deleted_at': {'type': ['string', 'null']},
'id': {'type': 'integer'}
},
'additionalProperties': False,
# When we run the get keypair API, response body includes
# all the above mentioned attributes.
# But in Nova API sample file, response body includes only
# 'public_key', 'name' & 'fingerprint'. So only 'public_key',
# 'name' & 'fingerprint' are defined as 'required'.
'required': ['public_key', 'name', 'fingerprint']
}
},
'additionalProperties': False,
'required': ['keypair']
}
}
create_keypair = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'keypair': {
'type': 'object',
'properties': {
'fingerprint': {'type': 'string'},
'name': {'type': 'string'},
'public_key': {'type': 'string'},
'user_id': {'type': 'string'},
'private_key': {'type': 'string'}
},
'additionalProperties': False,
# When create keypair API is being called with 'Public key'
# (Importing keypair) then, response body does not contain
# 'private_key' So it is not defined as 'required'
'required': ['fingerprint', 'name', 'public_key', 'user_id']
}
},
'additionalProperties': False,
'required': ['keypair']
}
}
delete_keypair = {
'status_code': [202],
}
list_keypairs = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'keypairs': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'keypair': {
'type': 'object',
'properties': {
'public_key': {'type': 'string'},
'name': {'type': 'string'},
'fingerprint': {'type': 'string'}
},
'additionalProperties': False,
'required': ['public_key', 'name', 'fingerprint']
}
},
'additionalProperties': False,
'required': ['keypair']
}
}
},
'additionalProperties': False,
'required': ['keypairs']
}
}
|
apache-2.0
|
xmyth/rt-thread
|
bsp/efm32/rtconfig.py
|
35
|
2289
|
import os
# toolchains options
ARCH = 'arm'
CPU = 'cortex-m3'
CROSS_TOOL = 'gcc'
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC')
if CROSS_TOOL == 'gcc':
PLATFORM = 'gcc'
EXEC_PATH = 'C:\Program Files (x86)\CodeSourcery\Sourcery G++ Lite\bin'
#EXEC_PATH = 'C:\Program Files (x86)\yagarto\bin'
elif CROSS_TOOL == 'keil':
print '================ERROR============================'
print 'Not support keil yet!'
print '================================================='
exit(0)
elif CROSS_TOOL == 'iar':
print '================ERROR============================'
print 'Not support iar yet!'
print '================================================='
exit(0)
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
BUILD = 'debug'
# EFM32_BOARD = 'EFM32_G8XX_STK'
# EFM32_BOARD = 'EFM32_GXXX_DK'
EFM32_BOARD = 'EFM32GG_DK3750'
if EFM32_BOARD == 'EFM32_G8XX_STK':
EFM32_FAMILY = 'Gecko'
EFM32_TYPE = 'EFM32G890F128'
EFM32_LCD = 'none'
elif EFM32_BOARD == 'EFM32_GXXX_DK':
EFM32_FAMILY = 'Gecko'
EFM32_TYPE = 'EFM32G290F128'
EFM32_LCD = 'none'
elif EFM32_BOARD == 'EFM32GG_DK3750':
EFM32_FAMILY = 'Giant Gecko'
EFM32_TYPE = 'EFM32GG990F1024'
# EFM32_LCD = 'LCD_MAPPED'
EFM32_LCD = 'LCD_DIRECT'
if PLATFORM == 'gcc':
# toolchains
PREFIX = 'arm-none-eabi-'
CC = PREFIX + 'gcc'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'axf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -mcpu=cortex-m3 -mthumb -ffunction-sections -fdata-sections'
CFLAGS = DEVICE
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp'
LFLAGS = DEVICE + ' -Wl,--gc-sections,-Map=rtthread-efm32.map,-cref,-u,__cs3_reset -T'
if EFM32_BOARD == 'EFM32_G8XX_STK' or EFM32_BOARD == 'EFM32_GXXX_DK':
LFLAGS += ' efm32g_rom.ld'
elif EFM32_BOARD == 'EFM32GG_DK3750':
LFLAGS += ' efm32gg_rom.ld'
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -gdwarf-2'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2'
POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n'
|
gpl-2.0
|
lanyuwen/openthread
|
tests/scripts/thread-cert/Cert_7_1_02_BorderRouterAsRouter.py
|
2
|
4084
|
#!/usr/bin/env python3
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
import config
import thread_cert
LEADER = 1
ROUTER = 2
ED2 = 3
SED2 = 4
MTDS = [ED2, SED2]
class Cert_7_1_2_BorderRouterAsRouter(thread_cert.TestCase):
TOPOLOGY = {
LEADER: {
'mode': 'rsdn',
'panid': 0xface,
'whitelist': [ROUTER]
},
ROUTER: {
'mode': 'rsdn',
'panid': 0xface,
'router_selection_jitter': 1,
'whitelist': [LEADER, ED2, SED2]
},
ED2: {
'is_mtd': True,
'mode': 'rsn',
'panid': 0xface,
'whitelist': [ROUTER]
},
SED2: {
'is_mtd': True,
'mode': 's',
'panid': 0xface,
'timeout': config.DEFAULT_CHILD_TIMEOUT,
'whitelist': [ROUTER]
},
}
def test(self):
self.nodes[LEADER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[ROUTER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ROUTER].get_state(), 'router')
self.nodes[ROUTER].add_prefix('2001:2:0:1::/64', 'paros')
self.nodes[ROUTER].add_prefix('2001:2:0:2::/64', 'paro')
self.nodes[ROUTER].register_netdata()
# Set lowpan context of sniffer
self.simulator.set_lowpan_context(1, '2001:2:0:1::/64')
self.simulator.set_lowpan_context(2, '2001:2:0:2::/64')
self.nodes[ED2].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ED2].get_state(), 'child')
self.nodes[SED2].start()
self.simulator.go(5)
self.assertEqual(self.nodes[SED2].get_state(), 'child')
addrs = self.nodes[ED2].get_addrs()
self.assertTrue(any('2001:2:0:1' in addr[0:10] for addr in addrs))
self.assertTrue(any('2001:2:0:2' in addr[0:10] for addr in addrs))
for addr in addrs:
if addr[0:10] == '2001:2:0:1' or addr[0:10] == '2001:2:0:2':
self.assertTrue(self.nodes[LEADER].ping(addr))
addrs = self.nodes[SED2].get_addrs()
self.assertTrue(any('2001:2:0:1' in addr[0:10] for addr in addrs))
self.assertFalse(any('2001:2:0:2' in addr[0:10] for addr in addrs))
for addr in addrs:
if addr[0:10] == '2001:2:0:1' or addr[0:10] == '2001:2:0:2':
self.assertTrue(self.nodes[LEADER].ping(addr))
if __name__ == '__main__':
unittest.main()
|
bsd-3-clause
|
clumsy/intellij-community
|
python/helpers/pydev/third_party/pep8/lib2to3/lib2to3/pgen2/tokenize.py
|
115
|
19125
|
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006 Python Software Foundation.
# All rights reserved.
"""Tokenization help for Python programs.
generate_tokens(readline) is a generator that breaks a stream of
text into Python tokens. It accepts a readline-like method which is called
repeatedly to get the next line of input (or "" for EOF). It generates
5-tuples with these members:
the token type (see token.py)
the token (a string)
the starting (row, column) indices of the token (a 2-tuple of ints)
the ending (row, column) indices of the token (a 2-tuple of ints)
the original line (string)
It is designed to match the working of the Python tokenizer exactly, except
that it produces COMMENT tokens for comments and gives type OP for all
operators
Older entry points
tokenize_loop(readline, tokeneater)
tokenize(readline, tokeneater=printtoken)
are the same, except instead of generating tokens, tokeneater is a callback
function to which the 5 fields described above are passed as 5 arguments,
each time a new token is found."""
__author__ = 'Ka-Ping Yee <[email protected]>'
__credits__ = \
'GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro'
import string, re
from codecs import BOM_UTF8, lookup
from lib2to3.pgen2.token import *
from . import token
__all__ = [x for x in dir(token) if x[0] != '_'] + ["tokenize",
"generate_tokens", "untokenize"]
del token
try:
bytes
except NameError:
# Support bytes type in Python <= 2.5, so 2to3 turns itself into
# valid Python 3 code.
bytes = str
def group(*choices): return '(' + '|'.join(choices) + ')'
def any(*choices): return group(*choices) + '*'
def maybe(*choices): return group(*choices) + '?'
Whitespace = r'[ \f\t]*'
Comment = r'#[^\r\n]*'
Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
Name = r'[a-zA-Z_]\w*'
Binnumber = r'0[bB][01]*'
Hexnumber = r'0[xX][\da-fA-F]*[lL]?'
Octnumber = r'0[oO]?[0-7]*[lL]?'
Decnumber = r'[1-9]\d*[lL]?'
Intnumber = group(Binnumber, Hexnumber, Octnumber, Decnumber)
Exponent = r'[eE][-+]?\d+'
Pointfloat = group(r'\d+\.\d*', r'\.\d+') + maybe(Exponent)
Expfloat = r'\d+' + Exponent
Floatnumber = group(Pointfloat, Expfloat)
Imagnumber = group(r'\d+[jJ]', Floatnumber + r'[jJ]')
Number = group(Imagnumber, Floatnumber, Intnumber)
# Tail end of ' string.
Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
# Tail end of " string.
Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
# Tail end of ''' string.
Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
# Tail end of """ string.
Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
Triple = group("[ubUB]?[rR]?'''", '[ubUB]?[rR]?"""')
# Single-line ' or " string.
String = group(r"[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
# Because of leftmost-then-longest match semantics, be sure to put the
# longest operators first (e.g., if = came before ==, == would get
# recognized as two instances of =).
Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"<>", r"!=",
r"//=?", r"->",
r"[+\-*/%&|^=<>]=?",
r"~")
Bracket = '[][(){}]'
Special = group(r'\r?\n', r'[:;.,`@]')
Funny = group(Operator, Bracket, Special)
PlainToken = group(Number, Funny, String, Name)
Token = Ignore + PlainToken
# First (or only) line of ' or " string.
ContStr = group(r"[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
group("'", r'\\\r?\n'),
r'[uUbB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
group('"', r'\\\r?\n'))
PseudoExtras = group(r'\\\r?\n', Comment, Triple)
PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
tokenprog, pseudoprog, single3prog, double3prog = map(
re.compile, (Token, PseudoToken, Single3, Double3))
endprogs = {"'": re.compile(Single), '"': re.compile(Double),
"'''": single3prog, '"""': double3prog,
"r'''": single3prog, 'r"""': double3prog,
"u'''": single3prog, 'u"""': double3prog,
"b'''": single3prog, 'b"""': double3prog,
"ur'''": single3prog, 'ur"""': double3prog,
"br'''": single3prog, 'br"""': double3prog,
"R'''": single3prog, 'R"""': double3prog,
"U'''": single3prog, 'U"""': double3prog,
"B'''": single3prog, 'B"""': double3prog,
"uR'''": single3prog, 'uR"""': double3prog,
"Ur'''": single3prog, 'Ur"""': double3prog,
"UR'''": single3prog, 'UR"""': double3prog,
"bR'''": single3prog, 'bR"""': double3prog,
"Br'''": single3prog, 'Br"""': double3prog,
"BR'''": single3prog, 'BR"""': double3prog,
'r': None, 'R': None,
'u': None, 'U': None,
'b': None, 'B': None}
triple_quoted = {}
for t in ("'''", '"""',
"r'''", 'r"""', "R'''", 'R"""',
"u'''", 'u"""', "U'''", 'U"""',
"b'''", 'b"""', "B'''", 'B"""',
"ur'''", 'ur"""', "Ur'''", 'Ur"""',
"uR'''", 'uR"""', "UR'''", 'UR"""',
"br'''", 'br"""', "Br'''", 'Br"""',
"bR'''", 'bR"""', "BR'''", 'BR"""',):
triple_quoted[t] = t
single_quoted = {}
for t in ("'", '"',
"r'", 'r"', "R'", 'R"',
"u'", 'u"', "U'", 'U"',
"b'", 'b"', "B'", 'B"',
"ur'", 'ur"', "Ur'", 'Ur"',
"uR'", 'uR"', "UR'", 'UR"',
"br'", 'br"', "Br'", 'Br"',
"bR'", 'bR"', "BR'", 'BR"', ):
single_quoted[t] = t
tabsize = 8
class TokenError(Exception): pass
class StopTokenizing(Exception): pass
def printtoken(type, token, start, end, line): # for testing
(srow, scol) = start
(erow, ecol) = end
print "%d,%d-%d,%d:\t%s\t%s" % \
(srow, scol, erow, ecol, tok_name[type], repr(token))
def tokenize(readline, tokeneater=printtoken):
"""
The tokenize() function accepts two parameters: one representing the
input stream, and one providing an output mechanism for tokenize().
The first parameter, readline, must be a callable object which provides
the same interface as the readline() method of built-in file objects.
Each call to the function should return one line of input as a string.
The second parameter, tokeneater, must also be a callable object. It is
called once for each token, with five arguments, corresponding to the
tuples generated by generate_tokens().
"""
try:
tokenize_loop(readline, tokeneater)
except StopTokenizing:
pass
# backwards compatible interface
def tokenize_loop(readline, tokeneater):
for token_info in generate_tokens(readline):
tokeneater(*token_info)
class Untokenizer:
def __init__(self):
self.tokens = []
self.prev_row = 1
self.prev_col = 0
def add_whitespace(self, start):
row, col = start
assert row <= self.prev_row
col_offset = col - self.prev_col
if col_offset:
self.tokens.append(" " * col_offset)
def untokenize(self, iterable):
for t in iterable:
if len(t) == 2:
self.compat(t, iterable)
break
tok_type, token, start, end, line = t
self.add_whitespace(start)
self.tokens.append(token)
self.prev_row, self.prev_col = end
if tok_type in (NEWLINE, NL):
self.prev_row += 1
self.prev_col = 0
return "".join(self.tokens)
def compat(self, token, iterable):
startline = False
indents = []
toks_append = self.tokens.append
toknum, tokval = token
if toknum in (NAME, NUMBER):
tokval += ' '
if toknum in (NEWLINE, NL):
startline = True
for tok in iterable:
toknum, tokval = tok[:2]
if toknum in (NAME, NUMBER):
tokval += ' '
if toknum == INDENT:
indents.append(tokval)
continue
elif toknum == DEDENT:
indents.pop()
continue
elif toknum in (NEWLINE, NL):
startline = True
elif startline and indents:
toks_append(indents[-1])
startline = False
toks_append(tokval)
cookie_re = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)')
def _get_normal_name(orig_enc):
"""Imitates get_normal_name in tokenizer.c."""
# Only care about the first 12 characters.
enc = orig_enc[:12].lower().replace("_", "-")
if enc == "utf-8" or enc.startswith("utf-8-"):
return "utf-8"
if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
return "iso-8859-1"
return orig_enc
def detect_encoding(readline):
"""
The detect_encoding() function is used to detect the encoding that should
be used to decode a Python source file. It requires one argment, readline,
in the same way as the tokenize() generator.
It will call readline a maximum of twice, and return the encoding used
(as a string) and a list of any lines (left as bytes) it has read
in.
It detects the encoding from the presence of a utf-8 bom or an encoding
cookie as specified in pep-0263. If both a bom and a cookie are present, but
disagree, a SyntaxError will be raised. If the encoding cookie is an invalid
charset, raise a SyntaxError. Note that if a utf-8 bom is found,
'utf-8-sig' is returned.
If no encoding is specified, then the default of 'utf-8' will be returned.
"""
bom_found = False
encoding = None
default = 'utf-8'
def read_or_stop():
try:
return readline()
except StopIteration:
return bytes()
def find_cookie(line):
try:
line_string = line.decode('ascii')
except UnicodeDecodeError:
return None
match = cookie_re.match(line_string)
if not match:
return None
encoding = _get_normal_name(match.group(1))
try:
codec = lookup(encoding)
except LookupError:
# This behaviour mimics the Python interpreter
raise SyntaxError("unknown encoding: " + encoding)
if bom_found:
if codec.name != 'utf-8':
# This behaviour mimics the Python interpreter
raise SyntaxError('encoding problem: utf-8')
encoding += '-sig'
return encoding
first = read_or_stop()
if first.startswith(BOM_UTF8):
bom_found = True
first = first[3:]
default = 'utf-8-sig'
if not first:
return default, []
encoding = find_cookie(first)
if encoding:
return encoding, [first]
second = read_or_stop()
if not second:
return default, [first]
encoding = find_cookie(second)
if encoding:
return encoding, [first, second]
return default, [first, second]
def untokenize(iterable):
"""Transform tokens back into Python source code.
Each element returned by the iterable must be a token sequence
with at least two elements, a token number and token value. If
only two tokens are passed, the resulting output is poor.
Round-trip invariant for full input:
Untokenized source will match input source exactly
Round-trip invariant for limited intput:
# Output text will tokenize the back to the input
t1 = [tok[:2] for tok in generate_tokens(f.readline)]
newcode = untokenize(t1)
readline = iter(newcode.splitlines(1)).next
t2 = [tok[:2] for tokin generate_tokens(readline)]
assert t1 == t2
"""
ut = Untokenizer()
return ut.untokenize(iterable)
def generate_tokens(readline):
"""
The generate_tokens() generator requires one argment, readline, which
must be a callable object which provides the same interface as the
readline() method of built-in file objects. Each call to the function
should return one line of input as a string. Alternately, readline
can be a callable function terminating with StopIteration:
readline = open(myfile).next # Example of alternate readline
The generator produces 5-tuples with these members: the token type; the
token string; a 2-tuple (srow, scol) of ints specifying the row and
column where the token begins in the source; a 2-tuple (erow, ecol) of
ints specifying the row and column where the token ends in the source;
and the line on which the token was found. The line passed is the
logical line; continuation lines are included.
"""
lnum = parenlev = continued = 0
namechars, numchars = string.ascii_letters + '_', '0123456789'
contstr, needcont = '', 0
contline = None
indents = [0]
while 1: # loop over lines in stream
try:
line = readline()
except StopIteration:
line = ''
lnum = lnum + 1
pos, max = 0, len(line)
if contstr: # continued string
if not line:
raise TokenError, ("EOF in multi-line string", strstart)
endmatch = endprog.match(line)
if endmatch:
pos = end = endmatch.end(0)
yield (STRING, contstr + line[:end],
strstart, (lnum, end), contline + line)
contstr, needcont = '', 0
contline = None
elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
yield (ERRORTOKEN, contstr + line,
strstart, (lnum, len(line)), contline)
contstr = ''
contline = None
continue
else:
contstr = contstr + line
contline = contline + line
continue
elif parenlev == 0 and not continued: # new statement
if not line: break
column = 0
while pos < max: # measure leading whitespace
if line[pos] == ' ': column = column + 1
elif line[pos] == '\t': column = (column//tabsize + 1)*tabsize
elif line[pos] == '\f': column = 0
else: break
pos = pos + 1
if pos == max: break
if line[pos] in '#\r\n': # skip comments or blank lines
if line[pos] == '#':
comment_token = line[pos:].rstrip('\r\n')
nl_pos = pos + len(comment_token)
yield (COMMENT, comment_token,
(lnum, pos), (lnum, pos + len(comment_token)), line)
yield (NL, line[nl_pos:],
(lnum, nl_pos), (lnum, len(line)), line)
else:
yield ((NL, COMMENT)[line[pos] == '#'], line[pos:],
(lnum, pos), (lnum, len(line)), line)
continue
if column > indents[-1]: # count indents or dedents
indents.append(column)
yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
while column < indents[-1]:
if column not in indents:
raise IndentationError(
"unindent does not match any outer indentation level",
("<tokenize>", lnum, pos, line))
indents = indents[:-1]
yield (DEDENT, '', (lnum, pos), (lnum, pos), line)
else: # continued statement
if not line:
raise TokenError, ("EOF in multi-line statement", (lnum, 0))
continued = 0
while pos < max:
pseudomatch = pseudoprog.match(line, pos)
if pseudomatch: # scan for tokens
start, end = pseudomatch.span(1)
spos, epos, pos = (lnum, start), (lnum, end), end
token, initial = line[start:end], line[start]
if initial in numchars or \
(initial == '.' and token != '.'): # ordinary number
yield (NUMBER, token, spos, epos, line)
elif initial in '\r\n':
newline = NEWLINE
if parenlev > 0:
newline = NL
yield (newline, token, spos, epos, line)
elif initial == '#':
assert not token.endswith("\n")
yield (COMMENT, token, spos, epos, line)
elif token in triple_quoted:
endprog = endprogs[token]
endmatch = endprog.match(line, pos)
if endmatch: # all on one line
pos = endmatch.end(0)
token = line[start:pos]
yield (STRING, token, spos, (lnum, pos), line)
else:
strstart = (lnum, start) # multiple lines
contstr = line[start:]
contline = line
break
elif initial in single_quoted or \
token[:2] in single_quoted or \
token[:3] in single_quoted:
if token[-1] == '\n': # continued string
strstart = (lnum, start)
endprog = (endprogs[initial] or endprogs[token[1]] or
endprogs[token[2]])
contstr, needcont = line[start:], 1
contline = line
break
else: # ordinary string
yield (STRING, token, spos, epos, line)
elif initial in namechars: # ordinary name
yield (NAME, token, spos, epos, line)
elif initial == '\\': # continued stmt
# This yield is new; needed for better idempotency:
yield (NL, token, spos, (lnum, pos), line)
continued = 1
else:
if initial in '([{': parenlev = parenlev + 1
elif initial in ')]}': parenlev = parenlev - 1
yield (OP, token, spos, epos, line)
else:
yield (ERRORTOKEN, line[pos],
(lnum, pos), (lnum, pos+1), line)
pos = pos + 1
for indent in indents[1:]: # pop remaining indent levels
yield (DEDENT, '', (lnum, 0), (lnum, 0), '')
yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '')
if __name__ == '__main__': # testing
import sys
if len(sys.argv) > 1: tokenize(open(sys.argv[1]).readline)
else: tokenize(sys.stdin.readline)
|
apache-2.0
|
alrifqi/django
|
django/utils/jslex.py
|
251
|
7779
|
"""JsLex: a lexer for Javascript"""
# Originally from https://bitbucket.org/ned/jslex
from __future__ import unicode_literals
import re
class Tok(object):
"""
A specification for a token class.
"""
num = 0
def __init__(self, name, regex, next=None):
self.id = Tok.num
Tok.num += 1
self.name = name
self.regex = regex
self.next = next
def literals(choices, prefix="", suffix=""):
"""
Create a regex from a space-separated list of literal `choices`.
If provided, `prefix` and `suffix` will be attached to each choice
individually.
"""
return "|".join(prefix + re.escape(c) + suffix for c in choices.split())
class Lexer(object):
"""
A generic multi-state regex-based lexer.
"""
def __init__(self, states, first):
self.regexes = {}
self.toks = {}
for state, rules in states.items():
parts = []
for tok in rules:
groupid = "t%d" % tok.id
self.toks[groupid] = tok
parts.append("(?P<%s>%s)" % (groupid, tok.regex))
self.regexes[state] = re.compile("|".join(parts), re.MULTILINE | re.VERBOSE)
self.state = first
def lex(self, text):
"""
Lexically analyze `text`.
Yields pairs (`name`, `tokentext`).
"""
end = len(text)
state = self.state
regexes = self.regexes
toks = self.toks
start = 0
while start < end:
for match in regexes[state].finditer(text, start):
name = match.lastgroup
tok = toks[name]
toktext = match.group(name)
start += len(toktext)
yield (tok.name, toktext)
if tok.next:
state = tok.next
break
self.state = state
class JsLexer(Lexer):
"""
A Javascript lexer
>>> lexer = JsLexer()
>>> list(lexer.lex("a = 1"))
[('id', 'a'), ('ws', ' '), ('punct', '='), ('ws', ' '), ('dnum', '1')]
This doesn't properly handle non-ASCII characters in the Javascript source.
"""
# Because these tokens are matched as alternatives in a regex, longer
# possibilities must appear in the list before shorter ones, for example,
# '>>' before '>'.
#
# Note that we don't have to detect malformed Javascript, only properly
# lex correct Javascript, so much of this is simplified.
# Details of Javascript lexical structure are taken from
# http://www.ecma-international.org/publications/files/ECMA-ST/ECMA-262.pdf
# A useful explanation of automatic semicolon insertion is at
# http://inimino.org/~inimino/blog/javascript_semicolons
both_before = [
Tok("comment", r"/\*(.|\n)*?\*/"),
Tok("linecomment", r"//.*?$"),
Tok("ws", r"\s+"),
Tok("keyword", literals("""
break case catch class const continue debugger
default delete do else enum export extends
finally for function if import in instanceof
new return super switch this throw try typeof
var void while with
""", suffix=r"\b"), next='reg'),
Tok("reserved", literals("null true false", suffix=r"\b"), next='div'),
Tok("id", r"""
([a-zA-Z_$ ]|\\u[0-9a-fA-Z]{4}) # first char
([a-zA-Z_$0-9]|\\u[0-9a-fA-F]{4})* # rest chars
""", next='div'),
Tok("hnum", r"0[xX][0-9a-fA-F]+", next='div'),
Tok("onum", r"0[0-7]+"),
Tok("dnum", r"""
( (0|[1-9][0-9]*) # DecimalIntegerLiteral
\. # dot
[0-9]* # DecimalDigits-opt
([eE][-+]?[0-9]+)? # ExponentPart-opt
|
\. # dot
[0-9]+ # DecimalDigits
([eE][-+]?[0-9]+)? # ExponentPart-opt
|
(0|[1-9][0-9]*) # DecimalIntegerLiteral
([eE][-+]?[0-9]+)? # ExponentPart-opt
)
""", next='div'),
Tok("punct", literals("""
>>>= === !== >>> <<= >>= <= >= == != << >> &&
|| += -= *= %= &= |= ^=
"""), next="reg"),
Tok("punct", literals("++ -- ) ]"), next='div'),
Tok("punct", literals("{ } ( [ . ; , < > + - * % & | ^ ! ~ ? : ="), next='reg'),
Tok("string", r'"([^"\\]|(\\(.|\n)))*?"', next='div'),
Tok("string", r"'([^'\\]|(\\(.|\n)))*?'", next='div'),
]
both_after = [
Tok("other", r"."),
]
states = {
# slash will mean division
'div': both_before + [
Tok("punct", literals("/= /"), next='reg'),
] + both_after,
# slash will mean regex
'reg': both_before + [
Tok("regex",
r"""
/ # opening slash
# First character is..
( [^*\\/[] # anything but * \ / or [
| \\. # or an escape sequence
| \[ # or a class, which has
( [^\]\\] # anything but \ or ]
| \\. # or an escape sequence
)* # many times
\]
)
# Following characters are same, except for excluding a star
( [^\\/[] # anything but \ / or [
| \\. # or an escape sequence
| \[ # or a class, which has
( [^\]\\] # anything but \ or ]
| \\. # or an escape sequence
)* # many times
\]
)* # many times
/ # closing slash
[a-zA-Z0-9]* # trailing flags
""", next='div'),
] + both_after,
}
def __init__(self):
super(JsLexer, self).__init__(self.states, 'reg')
def prepare_js_for_gettext(js):
"""
Convert the Javascript source `js` into something resembling C for
xgettext.
What actually happens is that all the regex literals are replaced with
"REGEX".
"""
def escape_quotes(m):
"""Used in a regex to properly escape double quotes."""
s = m.group(0)
if s == '"':
return r'\"'
else:
return s
lexer = JsLexer()
c = []
for name, tok in lexer.lex(js):
if name == 'regex':
# C doesn't grok regexes, and they aren't needed for gettext,
# so just output a string instead.
tok = '"REGEX"'
elif name == 'string':
# C doesn't have single-quoted strings, so make all strings
# double-quoted.
if tok.startswith("'"):
guts = re.sub(r"\\.|.", escape_quotes, tok[1:-1])
tok = '"' + guts + '"'
elif name == 'id':
# C can't deal with Unicode escapes in identifiers. We don't
# need them for gettext anyway, so replace them with something
# innocuous
tok = tok.replace("\\", "U")
c.append(tok)
return ''.join(c)
|
bsd-3-clause
|
reviewboard/rbtools
|
rbtools/utils/console.py
|
1
|
5835
|
from __future__ import print_function, unicode_literals
import getpass
import logging
import os
import subprocess
import sys
from distutils.util import strtobool
from six.moves import input
from rbtools.utils.encoding import force_unicode
from rbtools.utils.errors import EditorError
from rbtools.utils.filesystem import make_tempfile
logger = logging.getLogger(__name__)
def get_input(prompt, require=False):
"""Ask the user for input.
Args:
prompt (unicode):
The text to prompt the user with.
require (bool, optional):
Whether to require a result. If ``True``, this will keep prompting
until a non-empty value is entered.
Returns:
unicode:
The entered user data.
"""
def _get_input():
# `input`'s usual prompt gets written to stdout, which results in
# really crummy behavior if stdout is redirected to a file. Because
# this is often paired with getpass (entering a username/password
# combination), we mimic the behavior there, writing the prompt to
# stderr.
sys.stderr.write(prompt)
return input()
prompt = str(prompt)
if require:
value = None
while not value:
value = _get_input()
else:
value = _get_input()
return value
def get_pass(prompt, require=False):
"""Ask the user for a password.
Args:
prompt (unicode):
The text to prompt the user with.
require (bool, optional):
Whether to require a result. If ``True``, this will keep prompting
until a non-empty value is entered.
Returns:
bytes:
The entered password.
"""
prompt = str(prompt)
if require:
password = None
while not password:
password = getpass.getpass(prompt)
else:
password = getpass.getpass(prompt)
return password
def confirm(question):
"""Interactively prompt for a Yes/No answer.
Accepted values (case-insensitive) depend on distutils.util.strtobool():
'Yes' values: y, yes, t, true, on, 1
'No' values: n, no , f, false, off, 0
"""
while True:
full_question = '%s [Yes/No]: ' % question
answer = get_input(full_question).lower()
try:
return strtobool(answer)
except ValueError:
print('%s is not a valid answer.' % answer)
def confirm_select(question, options_length):
"""Interactively prompt for a specific answer from a list of options.
Accepted answers are integers starting from 1 until an integer n
representing the nth of element within options.
Args:
question (unicode):
The prompt to be displayed.
options_length (int):
The number of available options that the user can choose a
response from.
Returns:
unicode:
The user's chosen response. If the user decides to cancel the
prompt, None is returned.
"""
while True:
answer = get_input('%s [1-%i]: ' % (question, options_length))
try:
int_answer = int(answer)
if 1 <= int_answer <= options_length:
return int_answer
raise ValueError
except ValueError:
print('%s is not a valid answer.' % answer)
def edit_file(filename):
"""Run a user-configured editor to edit an existing file.
This will run a configured text editor (trying the :envvar:`VISUAL` or
:envvar:`EDITOR` environment variables, falling back on :program:`vi`)
to request text for use in a commit message or some other purpose.
Args:
filename (unicode):
The file to edit.
Returns:
unicode:
The resulting content.
Raises:
rbcommons.utils.errors.EditorError:
The configured editor could not be run, or it failed with an
error.
"""
if not os.path.exists(filename):
raise EditorError('The file "%s" does not exist or is not accessible.'
% filename)
editor = force_unicode(
os.environ.get(str('RBTOOLS_EDITOR')) or
os.environ.get(str('VISUAL')) or
os.environ.get(str('EDITOR')) or
'vi'
)
try:
subprocess.call(editor.split() + [filename])
except OSError:
raise EditorError('The editor "%s" was not found or could not be run. '
'Make sure the EDITOR environment variable is set '
'to your preferred editor.'
% editor)
try:
with open(filename, 'r') as fp:
return force_unicode(fp.read())
except IOError:
raise EditorError('The edited file "%s" was deleted during edit.'
% filename)
def edit_text(content='', filename=None):
"""Run a user-configured editor to prompt for text.
This will run a configured text editor (trying the :envvar:`VISUAL` or
:envvar:`EDITOR` environment variables, falling back on :program:`vi`)
to request text for use in a commit message or some other purpose.
Args:
content (unicode, optional):
Existing content to edit.
filename (unicode, optional):
The optional name of the temp file to edit. This can be used to
help the editor provide a proper editing environment for the
file.
Returns:
unicode:
The resulting content.
Raises:
rbcommons.utils.errors.EditorError:
The configured editor could not be run, or it failed with an
error.
"""
tempfile = make_tempfile(content.encode('utf8'), filename=filename)
result = edit_file(tempfile)
os.unlink(tempfile)
return result
|
mit
|
tejasnikumbh/ThesisCode
|
lib/python2.7/site-packages/numpy/distutils/fcompiler/absoft.py
|
229
|
5612
|
# http://www.absoft.com/literature/osxuserguide.pdf
# http://www.absoft.com/documentation.html
# Notes:
# - when using -g77 then use -DUNDERSCORE_G77 to compile f2py
# generated extension modules (works for f2py v2.45.241_1936 and up)
from __future__ import division, absolute_import, print_function
import os
from numpy.distutils.cpuinfo import cpu
from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file
from numpy.distutils.misc_util import cyg2win32
compilers = ['AbsoftFCompiler']
class AbsoftFCompiler(FCompiler):
compiler_type = 'absoft'
description = 'Absoft Corp Fortran Compiler'
#version_pattern = r'FORTRAN 77 Compiler (?P<version>[^\s*,]*).*?Absoft Corp'
version_pattern = r'(f90:.*?(Absoft Pro FORTRAN Version|FORTRAN 77 Compiler|Absoft Fortran Compiler Version|Copyright Absoft Corporation.*?Version))'+\
r' (?P<version>[^\s*,]*)(.*?Absoft Corp|)'
# on windows: f90 -V -c dummy.f
# f90: Copyright Absoft Corporation 1994-1998 mV2; Cray Research, Inc. 1994-1996 CF90 (2.x.x.x f36t87) Version 2.3 Wed Apr 19, 2006 13:05:16
# samt5735(8)$ f90 -V -c dummy.f
# f90: Copyright Absoft Corporation 1994-2002; Absoft Pro FORTRAN Version 8.0
# Note that fink installs g77 as f77, so need to use f90 for detection.
executables = {
'version_cmd' : None, # set by update_executables
'compiler_f77' : ["f77"],
'compiler_fix' : ["f90"],
'compiler_f90' : ["f90"],
'linker_so' : ["<F90>"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
if os.name=='nt':
library_switch = '/out:' #No space after /out:!
module_dir_switch = None
module_include_switch = '-p'
def update_executables(self):
f = cyg2win32(dummy_fortran_file())
self.executables['version_cmd'] = ['<F90>', '-V', '-c',
f+'.f', '-o', f+'.o']
def get_flags_linker_so(self):
if os.name=='nt':
opt = ['/dll']
# The "-K shared" switches are being left in for pre-9.0 versions
# of Absoft though I don't think versions earlier than 9 can
# actually be used to build shared libraries. In fact, version
# 8 of Absoft doesn't recognize "-K shared" and will fail.
elif self.get_version() >= '9.0':
opt = ['-shared']
else:
opt = ["-K", "shared"]
return opt
def library_dir_option(self, dir):
if os.name=='nt':
return ['-link', '/PATH:"%s"' % (dir)]
return "-L" + dir
def library_option(self, lib):
if os.name=='nt':
return '%s.lib' % (lib)
return "-l" + lib
def get_library_dirs(self):
opt = FCompiler.get_library_dirs(self)
d = os.environ.get('ABSOFT')
if d:
if self.get_version() >= '10.0':
# use shared libraries, the static libraries were not compiled -fPIC
prefix = 'sh'
else:
prefix = ''
if cpu.is_64bit():
suffix = '64'
else:
suffix = ''
opt.append(os.path.join(d, '%slib%s' % (prefix, suffix)))
return opt
def get_libraries(self):
opt = FCompiler.get_libraries(self)
if self.get_version() >= '11.0':
opt.extend(['af90math', 'afio', 'af77math', 'amisc'])
elif self.get_version() >= '10.0':
opt.extend(['af90math', 'afio', 'af77math', 'U77'])
elif self.get_version() >= '8.0':
opt.extend(['f90math', 'fio', 'f77math', 'U77'])
else:
opt.extend(['fio', 'f90math', 'fmath', 'U77'])
if os.name =='nt':
opt.append('COMDLG32')
return opt
def get_flags(self):
opt = FCompiler.get_flags(self)
if os.name != 'nt':
opt.extend(['-s'])
if self.get_version():
if self.get_version()>='8.2':
opt.append('-fpic')
return opt
def get_flags_f77(self):
opt = FCompiler.get_flags_f77(self)
opt.extend(['-N22', '-N90', '-N110'])
v = self.get_version()
if os.name == 'nt':
if v and v>='8.0':
opt.extend(['-f', '-N15'])
else:
opt.append('-f')
if v:
if v<='4.6':
opt.append('-B108')
else:
# Though -N15 is undocumented, it works with
# Absoft 8.0 on Linux
opt.append('-N15')
return opt
def get_flags_f90(self):
opt = FCompiler.get_flags_f90(self)
opt.extend(["-YCFRL=1", "-YCOM_NAMES=LCS", "-YCOM_PFX", "-YEXT_PFX",
"-YCOM_SFX=_", "-YEXT_SFX=_", "-YEXT_NAMES=LCS"])
if self.get_version():
if self.get_version()>'4.6':
opt.extend(["-YDEALLOC=ALL"])
return opt
def get_flags_fix(self):
opt = FCompiler.get_flags_fix(self)
opt.extend(["-YCFRL=1", "-YCOM_NAMES=LCS", "-YCOM_PFX", "-YEXT_PFX",
"-YCOM_SFX=_", "-YEXT_SFX=_", "-YEXT_NAMES=LCS"])
opt.extend(["-f", "fixed"])
return opt
def get_flags_opt(self):
opt = ['-O']
return opt
if __name__ == '__main__':
from distutils import log
log.set_verbosity(2)
from numpy.distutils.fcompiler import new_fcompiler
compiler = new_fcompiler(compiler='absoft')
compiler.customize()
print(compiler.get_version())
|
mit
|
tracierenea/gnuradio
|
gr-channels/python/channels/qa_fading_model.py
|
47
|
1949
|
#!/usr/bin/env python
#
# Copyright 2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest, analog, blocks, channels
import math
class test_fading_model(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_000(self):
N = 1000 # number of samples to use
fs = 1000 # baseband sampling rate
freq = 100
fDTs = 0.01
K = 4
signal = analog.sig_source_c(fs, analog.GR_SIN_WAVE, freq, 1)
head = blocks.head(gr.sizeof_gr_complex, N)
op = channels.fading_model(8, fDTs=fDTs, LOS=True,
K=K, seed=0)
snk = blocks.vector_sink_c()
snk1 = blocks.vector_sink_c()
self.assertAlmostEqual(K, op.K(), 4)
self.assertAlmostEqual(fDTs, op.fDTs(), 4)
#self.tb.connect(signal, head, op, snk)
#self.tb.connect(op, snk1)
#self.tb.run()
#dst_data = snk.data()
#exp_data = snk1.data()
#self.assertComplexTuplesAlmostEqual(exp_data, dst_data, 5)
if __name__ == '__main__':
gr_unittest.run(test_fading_model, "test_fading_model.xml")
|
gpl-3.0
|
dpapathanasiou/cmdline-news
|
cmdlinenews.py
|
1
|
10334
|
#!/usr/bin/env python
"""
This module uses feedparser to get item titles and links from any rss
feed, and presents the results as simple command-line output.
It's great for browsing your favorite sites unobtrusively, without having
to open a browser window.
Feeds are memoized for feed_memoize_interval seconds after parsing
(the default is 15 minutes) to prevent unnecessary server requests.
Define your favorite site feeds in the associated sites.py interests
dict for convenience, although you can also just enter feed urls at the
command prompt when this module runs.
"""
import feedparser
import pycurl
import time
import sys
import curses
from string import Template
from cStringIO import StringIO
from readability.readability import Document
from BeautifulSoup import BeautifulSoup
from textwrap import wrap
from sites import interests
UA = "cmdline-news/1.0 +http://github.com/dpapathanasiou/cmdline-news"
feedparser.USER_AGENT = UA
FEED_MEMO_HASH = {} # k=feed url, v=(tuple: timestamp feed was fetched, content)
FEED_MEMOIZE_INTERVAL = 900 # 15 minutes
WINDOW_COLS = 80
WINDOW_ROWS = 40
def initscr(screen):
"""Use the curses library to get the terminal row + column size"""
global WINDOW_COLS, WINDOW_ROWS
screen.refresh()
win = curses.newwin(0, 0)
WINDOW_ROWS, WINDOW_COLS = win.getmaxyx()
try:
curses.wrapper(initscr)
except curses.error:
pass
def purge_expired (data_hash, interval):
"""Remove everything in the given hash if it has exceeded
the given time interval"""
expired = []
for key, val in data_hash.items():
set_time = val[0]
if (time.time() - set_time) > interval:
expired.append(key)
for ex_k in expired:
del data_hash[ex_k]
def parse_feed (url, interval=FEED_MEMOIZE_INTERVAL):
"""Retrieve and parse the contents of the given feed url,
unless it has already been memoized within the accepted interval"""
purge_expired(FEED_MEMO_HASH, interval)
if FEED_MEMO_HASH.has_key(url):
return FEED_MEMO_HASH[url][1]
else:
feed = feedparser.parse(url)
if feed:
if feed.version and len(feed.version) > 0:
FEED_MEMO_HASH[url] = ( time.time(), feed )
return feed
def _read_item_data (feed_url, get_fn):
"""Return a list of item data (specified by the get_fn)
found in this feed url"""
data = []
feed = parse_feed(feed_url)
if feed is not None:
for entry in feed.entries:
try:
data.append(get_fn(entry))
except AttributeError, detail:
print >> sys.stderr, detail
data.append(None)
return data
def item_titles (feed_url):
"""Return a list of the item titles found in this feed url"""
return _read_item_data(feed_url, lambda x: x.title)
def strip_url_parameters (url):
"""Remove any client or user parameters from this url, and return
the string without them (it leaves urls w/o parameters as-is)"""
return url.split('?')[0]
def item_links (feed_url, strip_parameters):
"""Return a list of the item links found in this feed url"""
links = _read_item_data(feed_url, lambda x: x.link)
if strip_parameters:
return map(strip_url_parameters, links)
else:
return links
def load_url (url, referrer=None):
"""Attempt to load the url using pycurl and return the data
(which is None if unsuccessful)"""
data = None
databuffer = StringIO()
curl = pycurl.Curl()
curl.setopt(pycurl.URL, url)
curl.setopt(pycurl.FOLLOWLOCATION, 1)
curl.setopt(pycurl.CONNECTTIMEOUT, 5)
curl.setopt(pycurl.TIMEOUT, 8)
curl.setopt(pycurl.WRITEFUNCTION, databuffer.write)
curl.setopt(pycurl.USERAGENT, UA)
curl.setopt(pycurl.COOKIEFILE, '')
if referrer is not None:
curl.setopt(pycurl.REFERER, referrer)
try:
curl.perform()
data = databuffer.getvalue()
except Exception:
pass
curl.close()
return data
OUTPUT_FORMAT = Template("""
$num.\t$title
\t$link
""")
def get_items (feed_url, strip_parameters=True):
"""Get the item titles and links from this feed_url,
display them according to OUTPUT_FORMAT in stdout,
and return a dict of item number and item url,
to allow someone to drill down to a specific article."""
titles = filter(None, item_titles(feed_url))
links = filter(None, item_links(feed_url, strip_parameters))
if len(titles) == 0 or len(links) == 0:
print "Sorry, there's nothing available right now at", feed_url
output = []
posts = {} # k=item number (starting at 1), v=item url
for i, title in enumerate(titles):
try:
output.append(OUTPUT_FORMAT.substitute(num=(i+1),
title=title,
link=links[i]))
posts[(i+1)] = str(links[i].encode('utf-8'))
except KeyError:
pass
return u''.join(output), posts
def get_article (url, referrer=None):
"""Fetch the html found at url and use the readability algorithm
to return just the text content"""
html = load_url(url, referrer)
if html is not None:
doc_html = Document(html).summary(html_partial=True)
clean_html = doc_html.replace('&', u'&').replace(u' ', u'\n')
return BeautifulSoup(clean_html).getText(separator=u' ').replace(u' ', u' ')
def prompt_user (prompt):
"""Display a message to the user and wait for a reply,
returning the text string of the reply."""
print prompt,
reply = sys.stdin.readline().rstrip().lstrip()
return reply
def scroll_output (data,
rows=(WINDOW_ROWS-1),
cols=WINDOW_COLS,
prompt='\t--- more --- [enter to continue] ',
wrap_data=True,
choice_fn=None):
"""Print the data to the screen, pausing when the number of lines
meets the total size of the screen in rows. If choice_fn is defined,
it is invoked with the user reply to the prompt as input."""
if wrap_data:
margin = u' ' * (cols/4)
lines = []
for line in wrap(data, cols/2):
lines.append( margin + line )
else:
lines = data.splitlines()
for i, line in enumerate(lines):
if i > 0 and i % rows == 0:
user_choice = prompt_user(prompt)
if choice_fn is not None:
if choice_fn(user_choice):
break
print line
def show_feed_menu ():
"""Use the content of the interests dict (imported from the sites.py
file) to present a menu of codes and descriptions, if available"""
if len(interests) == 0:
print "Sorry, no feeds defined\nPlease edit the interests dict in the sites.py file\n"
else:
print '\n{0:10} ==> '.format('Code'), 'Description'
print '{0:10} '.format('----'), '-----------\n'
for code, feed_data in interests.items():
if feed_data.has_key('url'):
feed_desc = feed_data['url'] # default to display
if feed_data.has_key('desc'):
feed_desc=feed_data['desc']
print '{0:10} ==> '.format(code), feed_desc
def get_news ():
"""Create an interactive user prompt to get the feed name
or url to fetch and display"""
option_prompt = '\t--- more --- [enter to continue, or # to read] '
no_content = 'Sorry, there is no content at'
while True:
feed = prompt_user("Which feed do you want to read? Input code (! for menu, [enter] to quit) ")
if len(feed) == 0:
break
elif "!" == feed:
show_feed_menu()
else:
feed_referrer = None
try:
feed_data = interests[feed.lower()]
feed_url = feed_data['url']
strip_parameters = True
if feed_data.has_key('strip_url_parameters'):
strip_parameters = feed_data['strip_url_parameters']
if feed_data.has_key('referrer'):
feed_referrer = feed_data['referrer']
menu, links = get_items (feed_url, strip_parameters)
except KeyError:
# try interpreting the stdin typed by the user as a url
menu, links = get_items(feed)
if len(links) == 0:
print no_content, feed
break
else:
options = links.keys()
bad_option = "Please choose between (%d-%d)" % (min(options),
max(options))
def _display_article (user_choice):
"""An inner function which captures the current feed
links in a closure, and fetches the user-chosen link
for display using scroll_article()"""
break_menu = False
try:
choice = int(user_choice)
if choice in options:
article = get_article(links[choice], feed_referrer)
if article is not None:
scroll_output(article)
break_menu = True
else:
print no_content, links[choice]
else:
print bad_option
except ValueError:
pass
return break_menu
scroll_output(menu,
wrap_data=False,
prompt=option_prompt,
choice_fn=_display_article)
while True:
choice = prompt_user(
"Which article do you want to see? "\
"(%d-%d, or [enter] for none) "
% (min(options), max(options)))
if 0 == len(choice):
break
if _display_article(choice):
break
if __name__ == "__main__":
get_news()
|
mit
|
mwalter416/mse
|
mse.py
|
1
|
3866
|
import json
import urllib2
mseServers=[
'1.2.3.4',
'5.6.7.8',
'9.10.11.12'
]
def getClients(server, user, password):
passwordManager = urllib2.HTTPPasswordMgrWithDefaultRealm()
passwordManager.add_password(None,"https://"+server+"/",user,password)
authHandler = urllib2.HTTPBasicAuthHandler(passwordManager)
opener = urllib2.build_opener(authHandler)
urllib2.install_opener(opener)
request = urllib2.Request("https://"+server+"/api/contextaware/v1/location/clie nts",headers={'Accept' : 'application/json'})
pageHandle = urllib2.urlopen(request)
page= json.load(pageHandle)
entries=page['Locations']['entries']
while page['Locations']['totalPages'] != page['Locations']['currentPage']:
request = urllib2.Request(page['Locations']['nextResourceURI'],headers={'Accep t' : 'application/json'})
pageHandle = urllib2.urlopen(request)
page= json.load(pageHandle)
entries=entries+page['Locations']['entries']
return entries
def getClient(server, user, password, client):
passwordManager = urllib2.HTTPPasswordMgrWithDefaultRealm()
passwordManager.add_password(None,"https://"+server+"/",user,password)
authHandler = urllib2.HTTPBasicAuthHandler(passwordManager)
opener = urllib2.build_opener(authHandler)
urllib2.install_opener(opener)
request = urllib2.Request("https://"+server+"/api/contextaware/v1/location/clie nts/"+client,headers={'Accept' : 'application/json'})
pageHandle = urllib2.urlopen(request)
return json.load(pageHandle)['WirelessClientLocation']
def getAllClients(user, password):
clients=[]
for server in mseServers:
for client in getClients(server, user, password):
clients.append(WirelessClientLocation(client,server))
return clients
class MapInformation:
def __init__(self,json):
self.imageName= json['Image']['imageName']
self.floorRefId= json['floorRefId']
self.offsetX= json['Dimension']['offsetX']
self.offsetY= json['Dimension']['offsetY']
self.height= json['Dimension']['height']
self.width= json['Dimension']['width']
self.length= json['Dimension']['length']
self.unit= json['Dimension']['unit']
self.mapHierarchyString=json['mapHierarchyString']
class MapCoordinatePair:
def __init__(self,json):
self.y=json['y']
self.x=json['x']
self.unit=json['unit']
class WirelessClientStatistics:
def __init__(self,json):
self.currentServerTime= json['currentServerTime']
self.lastLocatedTime= json['lastLocatedTime']
self.firstLocatedTime= json['firstLocatedTime']
class WirelessClientLocation:
def __init__(self,json,server):
self.mseServer= server
self.userName= json.get('userName','N/A')
self.macAddress= json['macAddress']
self.isGuestUser= json['isGuestUser']
self.Statistics= WirelessClientStatistics(json['Statistics'])
self.currentlyTracked= json['currentlyTracked']
self.ssId= json.get('ssId','N/A')
self.dot11Status= json['dot11Status']
self.band= json['band']
self.MapCoordinate= MapCoordinatePair(json['MapCoordinate'])
self.apMacAddress= json.get('apMacAddress','N/A')
self.confidenceFactor= json['confidenceFactor']
self.ipAddress= json.get('ipAddress','N/A')
self.MapInfo= MapInformation(json['MapInfo'])
|
gpl-3.0
|
vectorijk/servo
|
tests/wpt/web-platform-tests/tools/pywebsocket/src/mod_pywebsocket/extensions.py
|
489
|
31780
|
# Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from mod_pywebsocket import common
from mod_pywebsocket import util
from mod_pywebsocket.http_header_util import quote_if_necessary
# The list of available server side extension processor classes.
_available_processors = {}
_compression_extension_names = []
class ExtensionProcessorInterface(object):
def __init__(self, request):
self._logger = util.get_class_logger(self)
self._request = request
self._active = True
def request(self):
return self._request
def name(self):
return None
def check_consistency_with_other_processors(self, processors):
pass
def set_active(self, active):
self._active = active
def is_active(self):
return self._active
def _get_extension_response_internal(self):
return None
def get_extension_response(self):
if not self._active:
self._logger.debug('Extension %s is deactivated', self.name())
return None
response = self._get_extension_response_internal()
if response is None:
self._active = False
return response
def _setup_stream_options_internal(self, stream_options):
pass
def setup_stream_options(self, stream_options):
if self._active:
self._setup_stream_options_internal(stream_options)
def _log_outgoing_compression_ratio(
logger, original_bytes, filtered_bytes, average_ratio):
# Print inf when ratio is not available.
ratio = float('inf')
if original_bytes != 0:
ratio = float(filtered_bytes) / original_bytes
logger.debug('Outgoing compression ratio: %f (average: %f)' %
(ratio, average_ratio))
def _log_incoming_compression_ratio(
logger, received_bytes, filtered_bytes, average_ratio):
# Print inf when ratio is not available.
ratio = float('inf')
if filtered_bytes != 0:
ratio = float(received_bytes) / filtered_bytes
logger.debug('Incoming compression ratio: %f (average: %f)' %
(ratio, average_ratio))
def _parse_window_bits(bits):
"""Return parsed integer value iff the given string conforms to the
grammar of the window bits extension parameters.
"""
if bits is None:
raise ValueError('Value is required')
# For non integer values such as "10.0", ValueError will be raised.
int_bits = int(bits)
# First condition is to drop leading zero case e.g. "08".
if bits != str(int_bits) or int_bits < 8 or int_bits > 15:
raise ValueError('Invalid value: %r' % bits)
return int_bits
class _AverageRatioCalculator(object):
"""Stores total bytes of original and result data, and calculates average
result / original ratio.
"""
def __init__(self):
self._total_original_bytes = 0
self._total_result_bytes = 0
def add_original_bytes(self, value):
self._total_original_bytes += value
def add_result_bytes(self, value):
self._total_result_bytes += value
def get_average_ratio(self):
if self._total_original_bytes != 0:
return (float(self._total_result_bytes) /
self._total_original_bytes)
else:
return float('inf')
class DeflateFrameExtensionProcessor(ExtensionProcessorInterface):
"""deflate-frame extension processor.
Specification:
http://tools.ietf.org/html/draft-tyoshino-hybi-websocket-perframe-deflate
"""
_WINDOW_BITS_PARAM = 'max_window_bits'
_NO_CONTEXT_TAKEOVER_PARAM = 'no_context_takeover'
def __init__(self, request):
ExtensionProcessorInterface.__init__(self, request)
self._logger = util.get_class_logger(self)
self._response_window_bits = None
self._response_no_context_takeover = False
self._bfinal = False
# Calculates
# (Total outgoing bytes supplied to this filter) /
# (Total bytes sent to the network after applying this filter)
self._outgoing_average_ratio_calculator = _AverageRatioCalculator()
# Calculates
# (Total bytes received from the network) /
# (Total incoming bytes obtained after applying this filter)
self._incoming_average_ratio_calculator = _AverageRatioCalculator()
def name(self):
return common.DEFLATE_FRAME_EXTENSION
def _get_extension_response_internal(self):
# Any unknown parameter will be just ignored.
window_bits = None
if self._request.has_parameter(self._WINDOW_BITS_PARAM):
window_bits = self._request.get_parameter_value(
self._WINDOW_BITS_PARAM)
try:
window_bits = _parse_window_bits(window_bits)
except ValueError, e:
return None
no_context_takeover = self._request.has_parameter(
self._NO_CONTEXT_TAKEOVER_PARAM)
if (no_context_takeover and
self._request.get_parameter_value(
self._NO_CONTEXT_TAKEOVER_PARAM) is not None):
return None
self._rfc1979_deflater = util._RFC1979Deflater(
window_bits, no_context_takeover)
self._rfc1979_inflater = util._RFC1979Inflater()
self._compress_outgoing = True
response = common.ExtensionParameter(self._request.name())
if self._response_window_bits is not None:
response.add_parameter(
self._WINDOW_BITS_PARAM, str(self._response_window_bits))
if self._response_no_context_takeover:
response.add_parameter(
self._NO_CONTEXT_TAKEOVER_PARAM, None)
self._logger.debug(
'Enable %s extension ('
'request: window_bits=%s; no_context_takeover=%r, '
'response: window_wbits=%s; no_context_takeover=%r)' %
(self._request.name(),
window_bits,
no_context_takeover,
self._response_window_bits,
self._response_no_context_takeover))
return response
def _setup_stream_options_internal(self, stream_options):
class _OutgoingFilter(object):
def __init__(self, parent):
self._parent = parent
def filter(self, frame):
self._parent._outgoing_filter(frame)
class _IncomingFilter(object):
def __init__(self, parent):
self._parent = parent
def filter(self, frame):
self._parent._incoming_filter(frame)
stream_options.outgoing_frame_filters.append(
_OutgoingFilter(self))
stream_options.incoming_frame_filters.insert(
0, _IncomingFilter(self))
def set_response_window_bits(self, value):
self._response_window_bits = value
def set_response_no_context_takeover(self, value):
self._response_no_context_takeover = value
def set_bfinal(self, value):
self._bfinal = value
def enable_outgoing_compression(self):
self._compress_outgoing = True
def disable_outgoing_compression(self):
self._compress_outgoing = False
def _outgoing_filter(self, frame):
"""Transform outgoing frames. This method is called only by
an _OutgoingFilter instance.
"""
original_payload_size = len(frame.payload)
self._outgoing_average_ratio_calculator.add_original_bytes(
original_payload_size)
if (not self._compress_outgoing or
common.is_control_opcode(frame.opcode)):
self._outgoing_average_ratio_calculator.add_result_bytes(
original_payload_size)
return
frame.payload = self._rfc1979_deflater.filter(
frame.payload, bfinal=self._bfinal)
frame.rsv1 = 1
filtered_payload_size = len(frame.payload)
self._outgoing_average_ratio_calculator.add_result_bytes(
filtered_payload_size)
_log_outgoing_compression_ratio(
self._logger,
original_payload_size,
filtered_payload_size,
self._outgoing_average_ratio_calculator.get_average_ratio())
def _incoming_filter(self, frame):
"""Transform incoming frames. This method is called only by
an _IncomingFilter instance.
"""
received_payload_size = len(frame.payload)
self._incoming_average_ratio_calculator.add_result_bytes(
received_payload_size)
if frame.rsv1 != 1 or common.is_control_opcode(frame.opcode):
self._incoming_average_ratio_calculator.add_original_bytes(
received_payload_size)
return
frame.payload = self._rfc1979_inflater.filter(frame.payload)
frame.rsv1 = 0
filtered_payload_size = len(frame.payload)
self._incoming_average_ratio_calculator.add_original_bytes(
filtered_payload_size)
_log_incoming_compression_ratio(
self._logger,
received_payload_size,
filtered_payload_size,
self._incoming_average_ratio_calculator.get_average_ratio())
_available_processors[common.DEFLATE_FRAME_EXTENSION] = (
DeflateFrameExtensionProcessor)
_compression_extension_names.append(common.DEFLATE_FRAME_EXTENSION)
_available_processors[common.X_WEBKIT_DEFLATE_FRAME_EXTENSION] = (
DeflateFrameExtensionProcessor)
_compression_extension_names.append(common.X_WEBKIT_DEFLATE_FRAME_EXTENSION)
def _parse_compression_method(data):
"""Parses the value of "method" extension parameter."""
return common.parse_extensions(data)
def _create_accepted_method_desc(method_name, method_params):
"""Creates accepted-method-desc from given method name and parameters"""
extension = common.ExtensionParameter(method_name)
for name, value in method_params:
extension.add_parameter(name, value)
return common.format_extension(extension)
class CompressionExtensionProcessorBase(ExtensionProcessorInterface):
"""Base class for perframe-compress and permessage-compress extension."""
_METHOD_PARAM = 'method'
def __init__(self, request):
ExtensionProcessorInterface.__init__(self, request)
self._logger = util.get_class_logger(self)
self._compression_method_name = None
self._compression_processor = None
self._compression_processor_hook = None
def name(self):
return ''
def _lookup_compression_processor(self, method_desc):
return None
def _get_compression_processor_response(self):
"""Looks up the compression processor based on the self._request and
returns the compression processor's response.
"""
method_list = self._request.get_parameter_value(self._METHOD_PARAM)
if method_list is None:
return None
methods = _parse_compression_method(method_list)
if methods is None:
return None
comression_processor = None
# The current implementation tries only the first method that matches
# supported algorithm. Following methods aren't tried even if the
# first one is rejected.
# TODO(bashi): Need to clarify this behavior.
for method_desc in methods:
compression_processor = self._lookup_compression_processor(
method_desc)
if compression_processor is not None:
self._compression_method_name = method_desc.name()
break
if compression_processor is None:
return None
if self._compression_processor_hook:
self._compression_processor_hook(compression_processor)
processor_response = compression_processor.get_extension_response()
if processor_response is None:
return None
self._compression_processor = compression_processor
return processor_response
def _get_extension_response_internal(self):
processor_response = self._get_compression_processor_response()
if processor_response is None:
return None
response = common.ExtensionParameter(self._request.name())
accepted_method_desc = _create_accepted_method_desc(
self._compression_method_name,
processor_response.get_parameters())
response.add_parameter(self._METHOD_PARAM, accepted_method_desc)
self._logger.debug(
'Enable %s extension (method: %s)' %
(self._request.name(), self._compression_method_name))
return response
def _setup_stream_options_internal(self, stream_options):
if self._compression_processor is None:
return
self._compression_processor.setup_stream_options(stream_options)
def set_compression_processor_hook(self, hook):
self._compression_processor_hook = hook
def get_compression_processor(self):
return self._compression_processor
class PerMessageDeflateExtensionProcessor(ExtensionProcessorInterface):
"""permessage-deflate extension processor. It's also used for
permessage-compress extension when the deflate method is chosen.
Specification:
http://tools.ietf.org/html/draft-ietf-hybi-permessage-compression-08
"""
_SERVER_MAX_WINDOW_BITS_PARAM = 'server_max_window_bits'
_SERVER_NO_CONTEXT_TAKEOVER_PARAM = 'server_no_context_takeover'
_CLIENT_MAX_WINDOW_BITS_PARAM = 'client_max_window_bits'
_CLIENT_NO_CONTEXT_TAKEOVER_PARAM = 'client_no_context_takeover'
def __init__(self, request, draft08=True):
"""Construct PerMessageDeflateExtensionProcessor
Args:
draft08: Follow the constraints on the parameters that were not
specified for permessage-compress but are specified for
permessage-deflate as on
draft-ietf-hybi-permessage-compression-08.
"""
ExtensionProcessorInterface.__init__(self, request)
self._logger = util.get_class_logger(self)
self._preferred_client_max_window_bits = None
self._client_no_context_takeover = False
self._draft08 = draft08
def name(self):
return 'deflate'
def _get_extension_response_internal(self):
if self._draft08:
for name in self._request.get_parameter_names():
if name not in [self._SERVER_MAX_WINDOW_BITS_PARAM,
self._SERVER_NO_CONTEXT_TAKEOVER_PARAM,
self._CLIENT_MAX_WINDOW_BITS_PARAM]:
self._logger.debug('Unknown parameter: %r', name)
return None
else:
# Any unknown parameter will be just ignored.
pass
server_max_window_bits = None
if self._request.has_parameter(self._SERVER_MAX_WINDOW_BITS_PARAM):
server_max_window_bits = self._request.get_parameter_value(
self._SERVER_MAX_WINDOW_BITS_PARAM)
try:
server_max_window_bits = _parse_window_bits(
server_max_window_bits)
except ValueError, e:
self._logger.debug('Bad %s parameter: %r',
self._SERVER_MAX_WINDOW_BITS_PARAM,
e)
return None
server_no_context_takeover = self._request.has_parameter(
self._SERVER_NO_CONTEXT_TAKEOVER_PARAM)
if (server_no_context_takeover and
self._request.get_parameter_value(
self._SERVER_NO_CONTEXT_TAKEOVER_PARAM) is not None):
self._logger.debug('%s parameter must not have a value: %r',
self._SERVER_NO_CONTEXT_TAKEOVER_PARAM,
server_no_context_takeover)
return None
# client_max_window_bits from a client indicates whether the client can
# accept client_max_window_bits from a server or not.
client_client_max_window_bits = self._request.has_parameter(
self._CLIENT_MAX_WINDOW_BITS_PARAM)
if (self._draft08 and
client_client_max_window_bits and
self._request.get_parameter_value(
self._CLIENT_MAX_WINDOW_BITS_PARAM) is not None):
self._logger.debug('%s parameter must not have a value in a '
'client\'s opening handshake: %r',
self._CLIENT_MAX_WINDOW_BITS_PARAM,
client_client_max_window_bits)
return None
self._rfc1979_deflater = util._RFC1979Deflater(
server_max_window_bits, server_no_context_takeover)
# Note that we prepare for incoming messages compressed with window
# bits upto 15 regardless of the client_max_window_bits value to be
# sent to the client.
self._rfc1979_inflater = util._RFC1979Inflater()
self._framer = _PerMessageDeflateFramer(
server_max_window_bits, server_no_context_takeover)
self._framer.set_bfinal(False)
self._framer.set_compress_outgoing_enabled(True)
response = common.ExtensionParameter(self._request.name())
if server_max_window_bits is not None:
response.add_parameter(
self._SERVER_MAX_WINDOW_BITS_PARAM,
str(server_max_window_bits))
if server_no_context_takeover:
response.add_parameter(
self._SERVER_NO_CONTEXT_TAKEOVER_PARAM, None)
if self._preferred_client_max_window_bits is not None:
if self._draft08 and not client_client_max_window_bits:
self._logger.debug('Processor is configured to use %s but '
'the client cannot accept it',
self._CLIENT_MAX_WINDOW_BITS_PARAM)
return None
response.add_parameter(
self._CLIENT_MAX_WINDOW_BITS_PARAM,
str(self._preferred_client_max_window_bits))
if self._client_no_context_takeover:
response.add_parameter(
self._CLIENT_NO_CONTEXT_TAKEOVER_PARAM, None)
self._logger.debug(
'Enable %s extension ('
'request: server_max_window_bits=%s; '
'server_no_context_takeover=%r, '
'response: client_max_window_bits=%s; '
'client_no_context_takeover=%r)' %
(self._request.name(),
server_max_window_bits,
server_no_context_takeover,
self._preferred_client_max_window_bits,
self._client_no_context_takeover))
return response
def _setup_stream_options_internal(self, stream_options):
self._framer.setup_stream_options(stream_options)
def set_client_max_window_bits(self, value):
"""If this option is specified, this class adds the
client_max_window_bits extension parameter to the handshake response,
but doesn't reduce the LZ77 sliding window size of its inflater.
I.e., you can use this for testing client implementation but cannot
reduce memory usage of this class.
If this method has been called with True and an offer without the
client_max_window_bits extension parameter is received,
- (When processing the permessage-deflate extension) this processor
declines the request.
- (When processing the permessage-compress extension) this processor
accepts the request.
"""
self._preferred_client_max_window_bits = value
def set_client_no_context_takeover(self, value):
"""If this option is specified, this class adds the
client_no_context_takeover extension parameter to the handshake
response, but doesn't reset inflater for each message. I.e., you can
use this for testing client implementation but cannot reduce memory
usage of this class.
"""
self._client_no_context_takeover = value
def set_bfinal(self, value):
self._framer.set_bfinal(value)
def enable_outgoing_compression(self):
self._framer.set_compress_outgoing_enabled(True)
def disable_outgoing_compression(self):
self._framer.set_compress_outgoing_enabled(False)
class _PerMessageDeflateFramer(object):
"""A framer for extensions with per-message DEFLATE feature."""
def __init__(self, deflate_max_window_bits, deflate_no_context_takeover):
self._logger = util.get_class_logger(self)
self._rfc1979_deflater = util._RFC1979Deflater(
deflate_max_window_bits, deflate_no_context_takeover)
self._rfc1979_inflater = util._RFC1979Inflater()
self._bfinal = False
self._compress_outgoing_enabled = False
# True if a message is fragmented and compression is ongoing.
self._compress_ongoing = False
# Calculates
# (Total outgoing bytes supplied to this filter) /
# (Total bytes sent to the network after applying this filter)
self._outgoing_average_ratio_calculator = _AverageRatioCalculator()
# Calculates
# (Total bytes received from the network) /
# (Total incoming bytes obtained after applying this filter)
self._incoming_average_ratio_calculator = _AverageRatioCalculator()
def set_bfinal(self, value):
self._bfinal = value
def set_compress_outgoing_enabled(self, value):
self._compress_outgoing_enabled = value
def _process_incoming_message(self, message, decompress):
if not decompress:
return message
received_payload_size = len(message)
self._incoming_average_ratio_calculator.add_result_bytes(
received_payload_size)
message = self._rfc1979_inflater.filter(message)
filtered_payload_size = len(message)
self._incoming_average_ratio_calculator.add_original_bytes(
filtered_payload_size)
_log_incoming_compression_ratio(
self._logger,
received_payload_size,
filtered_payload_size,
self._incoming_average_ratio_calculator.get_average_ratio())
return message
def _process_outgoing_message(self, message, end, binary):
if not binary:
message = message.encode('utf-8')
if not self._compress_outgoing_enabled:
return message
original_payload_size = len(message)
self._outgoing_average_ratio_calculator.add_original_bytes(
original_payload_size)
message = self._rfc1979_deflater.filter(
message, end=end, bfinal=self._bfinal)
filtered_payload_size = len(message)
self._outgoing_average_ratio_calculator.add_result_bytes(
filtered_payload_size)
_log_outgoing_compression_ratio(
self._logger,
original_payload_size,
filtered_payload_size,
self._outgoing_average_ratio_calculator.get_average_ratio())
if not self._compress_ongoing:
self._outgoing_frame_filter.set_compression_bit()
self._compress_ongoing = not end
return message
def _process_incoming_frame(self, frame):
if frame.rsv1 == 1 and not common.is_control_opcode(frame.opcode):
self._incoming_message_filter.decompress_next_message()
frame.rsv1 = 0
def _process_outgoing_frame(self, frame, compression_bit):
if (not compression_bit or
common.is_control_opcode(frame.opcode)):
return
frame.rsv1 = 1
def setup_stream_options(self, stream_options):
"""Creates filters and sets them to the StreamOptions."""
class _OutgoingMessageFilter(object):
def __init__(self, parent):
self._parent = parent
def filter(self, message, end=True, binary=False):
return self._parent._process_outgoing_message(
message, end, binary)
class _IncomingMessageFilter(object):
def __init__(self, parent):
self._parent = parent
self._decompress_next_message = False
def decompress_next_message(self):
self._decompress_next_message = True
def filter(self, message):
message = self._parent._process_incoming_message(
message, self._decompress_next_message)
self._decompress_next_message = False
return message
self._outgoing_message_filter = _OutgoingMessageFilter(self)
self._incoming_message_filter = _IncomingMessageFilter(self)
stream_options.outgoing_message_filters.append(
self._outgoing_message_filter)
stream_options.incoming_message_filters.append(
self._incoming_message_filter)
class _OutgoingFrameFilter(object):
def __init__(self, parent):
self._parent = parent
self._set_compression_bit = False
def set_compression_bit(self):
self._set_compression_bit = True
def filter(self, frame):
self._parent._process_outgoing_frame(
frame, self._set_compression_bit)
self._set_compression_bit = False
class _IncomingFrameFilter(object):
def __init__(self, parent):
self._parent = parent
def filter(self, frame):
self._parent._process_incoming_frame(frame)
self._outgoing_frame_filter = _OutgoingFrameFilter(self)
self._incoming_frame_filter = _IncomingFrameFilter(self)
stream_options.outgoing_frame_filters.append(
self._outgoing_frame_filter)
stream_options.incoming_frame_filters.append(
self._incoming_frame_filter)
stream_options.encode_text_message_to_utf8 = False
_available_processors[common.PERMESSAGE_DEFLATE_EXTENSION] = (
PerMessageDeflateExtensionProcessor)
# TODO(tyoshino): Reorganize class names.
_compression_extension_names.append('deflate')
class PerMessageCompressExtensionProcessor(
CompressionExtensionProcessorBase):
"""permessage-compress extension processor.
Specification:
http://tools.ietf.org/html/draft-ietf-hybi-permessage-compression
"""
_DEFLATE_METHOD = 'deflate'
def __init__(self, request):
CompressionExtensionProcessorBase.__init__(self, request)
def name(self):
return common.PERMESSAGE_COMPRESSION_EXTENSION
def _lookup_compression_processor(self, method_desc):
if method_desc.name() == self._DEFLATE_METHOD:
return PerMessageDeflateExtensionProcessor(method_desc, False)
return None
_available_processors[common.PERMESSAGE_COMPRESSION_EXTENSION] = (
PerMessageCompressExtensionProcessor)
_compression_extension_names.append(common.PERMESSAGE_COMPRESSION_EXTENSION)
class MuxExtensionProcessor(ExtensionProcessorInterface):
"""WebSocket multiplexing extension processor."""
_QUOTA_PARAM = 'quota'
def __init__(self, request):
ExtensionProcessorInterface.__init__(self, request)
self._quota = 0
self._extensions = []
def name(self):
return common.MUX_EXTENSION
def check_consistency_with_other_processors(self, processors):
before_mux = True
for processor in processors:
name = processor.name()
if name == self.name():
before_mux = False
continue
if not processor.is_active():
continue
if before_mux:
# Mux extension cannot be used after extensions
# that depend on frame boundary, extension data field, or any
# reserved bits which are attributed to each frame.
if (name == common.DEFLATE_FRAME_EXTENSION or
name == common.X_WEBKIT_DEFLATE_FRAME_EXTENSION):
self.set_active(False)
return
else:
# Mux extension should not be applied before any history-based
# compression extension.
if (name == common.DEFLATE_FRAME_EXTENSION or
name == common.X_WEBKIT_DEFLATE_FRAME_EXTENSION or
name == common.PERMESSAGE_COMPRESSION_EXTENSION or
name == common.X_WEBKIT_PERMESSAGE_COMPRESSION_EXTENSION):
self.set_active(False)
return
def _get_extension_response_internal(self):
self._active = False
quota = self._request.get_parameter_value(self._QUOTA_PARAM)
if quota is not None:
try:
quota = int(quota)
except ValueError, e:
return None
if quota < 0 or quota >= 2 ** 32:
return None
self._quota = quota
self._active = True
return common.ExtensionParameter(common.MUX_EXTENSION)
def _setup_stream_options_internal(self, stream_options):
pass
def set_quota(self, quota):
self._quota = quota
def quota(self):
return self._quota
def set_extensions(self, extensions):
self._extensions = extensions
def extensions(self):
return self._extensions
_available_processors[common.MUX_EXTENSION] = MuxExtensionProcessor
def get_extension_processor(extension_request):
"""Given an ExtensionParameter representing an extension offer received
from a client, configures and returns an instance of the corresponding
extension processor class.
"""
processor_class = _available_processors.get(extension_request.name())
if processor_class is None:
return None
return processor_class(extension_request)
def is_compression_extension(extension_name):
return extension_name in _compression_extension_names
# vi:sts=4 sw=4 et
|
mpl-2.0
|
asm666/sympy
|
examples/advanced/pyglet_plotting.py
|
92
|
7199
|
#!/usr/bin/env python
"""
Plotting Examples
Suggested Usage: python -i plotting.py
"""
from sympy import symbols
from sympy.plotting.pygletplot import PygletPlot
from sympy import sin, cos, pi, sqrt, exp
from sympy.core.compatibility import range
from time import sleep, clock
def main():
x, y, z = symbols('x,y,z')
# toggle axes visibility with F5, colors with F6
axes_options = 'visible=false; colored=true; label_ticks=true; label_axes=true; overlay=true; stride=0.5'
# axes_options = 'colored=false; overlay=false; stride=(1.0, 0.5, 0.5)'
p = PygletPlot(
width=600,
height=500,
ortho=False,
invert_mouse_zoom=False,
axes=axes_options,
antialiasing=True)
examples = []
def example_wrapper(f):
examples.append(f)
return f
@example_wrapper
def mirrored_saddles():
p[5] = x**2 - y**2, [20], [20]
p[6] = y**2 - x**2, [20], [20]
@example_wrapper
def mirrored_saddles_saveimage():
p[5] = x**2 - y**2, [20], [20]
p[6] = y**2 - x**2, [20], [20]
p.wait_for_calculations()
# although the calculation is complete,
# we still need to wait for it to be
# rendered, so we'll sleep to be sure.
sleep(1)
p.saveimage("plot_example.png")
@example_wrapper
def mirrored_ellipsoids():
p[2] = x**2 + y**2, [40], [40], 'color=zfade'
p[3] = -x**2 - y**2, [40], [40], 'color=zfade'
@example_wrapper
def saddle_colored_by_derivative():
f = x**2 - y**2
p[1] = f, 'style=solid'
p[1].color = abs(f.diff(x)), abs(f.diff(x) + f.diff(y)), abs(f.diff(y))
@example_wrapper
def ding_dong_surface():
f = sqrt(1.0 - y)*y
p[1] = f, [x, 0, 2*pi,
40], [y, -
1, 4, 100], 'mode=cylindrical; style=solid; color=zfade4'
@example_wrapper
def polar_circle():
p[7] = 1, 'mode=polar'
@example_wrapper
def polar_flower():
p[8] = 1.5*sin(4*x), [160], 'mode=polar'
p[8].color = z, x, y, (0.5, 0.5, 0.5), (
0.8, 0.8, 0.8), (x, y, None, z) # z is used for t
@example_wrapper
def simple_cylinder():
p[9] = 1, 'mode=cylindrical'
@example_wrapper
def cylindrical_hyperbola():
# (note that polar is an alias for cylindrical)
p[10] = 1/y, 'mode=polar', [x], [y, -2, 2, 20]
@example_wrapper
def extruded_hyperbolas():
p[11] = 1/x, [x, -10, 10, 100], [1], 'style=solid'
p[12] = -1/x, [x, -10, 10, 100], [1], 'style=solid'
@example_wrapper
def torus():
a, b = 1, 0.5 # radius, thickness
p[13] = (a + b*cos(x))*cos(y), (a + b*cos(x)) *\
sin(y), b*sin(x), [x, 0, pi*2, 40], [y, 0, pi*2, 40]
@example_wrapper
def warped_torus():
a, b = 2, 1 # radius, thickness
p[13] = (a + b*cos(x))*cos(y), (a + b*cos(x))*sin(y), b *\
sin(x) + 0.5*sin(4*y), [x, 0, pi*2, 40], [y, 0, pi*2, 40]
@example_wrapper
def parametric_spiral():
p[14] = cos(y), sin(y), y / 10.0, [y, -4*pi, 4*pi, 100]
p[14].color = x, (0.1, 0.9), y, (0.1, 0.9), z, (0.1, 0.9)
@example_wrapper
def multistep_gradient():
p[1] = 1, 'mode=spherical', 'style=both'
# p[1] = exp(-x**2-y**2+(x*y)/4), [-1.7,1.7,100], [-1.7,1.7,100], 'style=solid'
# p[1] = 5*x*y*exp(-x**2-y**2), [-2,2,100], [-2,2,100]
gradient = [0.0, (0.3, 0.3, 1.0),
0.30, (0.3, 1.0, 0.3),
0.55, (0.95, 1.0, 0.2),
0.65, (1.0, 0.95, 0.2),
0.85, (1.0, 0.7, 0.2),
1.0, (1.0, 0.3, 0.2)]
p[1].color = z, [None, None, z], gradient
# p[1].color = 'zfade'
# p[1].color = 'zfade3'
@example_wrapper
def lambda_vs_sympy_evaluation():
start = clock()
p[4] = x**2 + y**2, [100], [100], 'style=solid'
p.wait_for_calculations()
print("lambda-based calculation took %s seconds." % (clock() - start))
start = clock()
p[4] = x**2 + y**2, [100], [100], 'style=solid; use_sympy_eval'
p.wait_for_calculations()
print(
"sympy substitution-based calculation took %s seconds." %
(clock() - start))
@example_wrapper
def gradient_vectors():
def gradient_vectors_inner(f, i):
from sympy import lambdify
from sympy.plotting.plot_interval import PlotInterval
from pyglet.gl import glBegin, glColor3f
from pyglet.gl import glVertex3f, glEnd, GL_LINES
def draw_gradient_vectors(f, iu, iv):
"""
Create a function which draws vectors
representing the gradient of f.
"""
dx, dy, dz = f.diff(x), f.diff(y), 0
FF = lambdify([x, y], [x, y, f])
FG = lambdify([x, y], [dx, dy, dz])
iu.v_steps /= 5
iv.v_steps /= 5
Gvl = list(list([FF(u, v), FG(u, v)]
for v in iv.frange())
for u in iu.frange())
def draw_arrow(p1, p2):
"""
Draw a single vector.
"""
glColor3f(0.4, 0.4, 0.9)
glVertex3f(*p1)
glColor3f(0.9, 0.4, 0.4)
glVertex3f(*p2)
def draw():
"""
Iterate through the calculated
vectors and draw them.
"""
glBegin(GL_LINES)
for u in Gvl:
for v in u:
point = [[v[0][0], v[0][1], v[0][2]],
[v[0][0] + v[1][0], v[0][1] + v[1][1], v[0][2] + v[1][2]]]
draw_arrow(point[0], point[1])
glEnd()
return draw
p[i] = f, [-0.5, 0.5, 25], [-0.5, 0.5, 25], 'style=solid'
iu = PlotInterval(p[i].intervals[0])
iv = PlotInterval(p[i].intervals[1])
p[i].postdraw.append(draw_gradient_vectors(f, iu, iv))
gradient_vectors_inner(x**2 + y**2, 1)
gradient_vectors_inner(-x**2 - y**2, 2)
def help_str():
s = ("\nPlot p has been created. Useful commands: \n"
" help(p), p[1] = x**2, print p, p.clear() \n\n"
"Available examples (see source in plotting.py):\n\n")
for i in range(len(examples)):
s += "(%i) %s\n" % (i, examples[i].__name__)
s += "\n"
s += "e.g. >>> example(2)\n"
s += " >>> ding_dong_surface()\n"
return s
def example(i):
if callable(i):
p.clear()
i()
elif i >= 0 and i < len(examples):
p.clear()
examples[i]()
else:
print("Not a valid example.\n")
print(p)
example(0) # 0 - 15 are defined above
print(help_str())
if __name__ == "__main__":
main()
|
bsd-3-clause
|
117111302/PyGithub
|
github/tests/Framework.py
|
25
|
11595
|
# -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <[email protected]> #
# Copyright 2012 Zearin <[email protected]> #
# Copyright 2013 AKFish <[email protected]> #
# Copyright 2013 Vincent Jacques <[email protected]> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import os
import sys
import unittest
import httplib
import traceback
import github
atLeastPython26 = sys.hexversion >= 0x02060000
atLeastPython3 = sys.hexversion >= 0x03000000
atMostPython32 = sys.hexversion < 0x03030000
if atLeastPython26:
import json
else: # pragma no cover (Covered by all tests with Python 2.5)
import simplejson as json # pragma no cover (Covered by all tests with Python 2.5)
def readLine(file):
if atLeastPython3:
return file.readline().decode("utf-8").strip()
else:
return file.readline().strip()
class FakeHttpResponse:
def __init__(self, status, headers, output):
self.status = status
self.__headers = headers
self.__output = output
def getheaders(self):
return self.__headers
def read(self):
return self.__output
def fixAuthorizationHeader(headers):
if "Authorization" in headers:
if headers["Authorization"].endswith("ZmFrZV9sb2dpbjpmYWtlX3Bhc3N3b3Jk"):
# This special case is here to test the real Authorization header
# sent by PyGithub. It would have avoided issue https://github.com/jacquev6/PyGithub/issues/153
# because we would have seen that Python 3 was not generating the same
# header as Python 2
pass
elif headers["Authorization"].startswith("token "):
headers["Authorization"] = "token private_token_removed"
elif headers["Authorization"].startswith("Basic "):
headers["Authorization"] = "Basic login_and_password_removed"
class RecordingConnection: # pragma no cover (Class useful only when recording new tests, not used during automated tests)
def __init__(self, file, protocol, host, port, *args, **kwds):
self.__file = file
self.__protocol = protocol
self.__host = host
self.__port = str(port)
self.__cnx = self._realConnection(host, port, *args, **kwds)
def request(self, verb, url, input, headers):
print verb, url, input, headers,
self.__cnx.request(verb, url, input, headers)
fixAuthorizationHeader(headers)
self.__writeLine(self.__protocol)
self.__writeLine(verb)
self.__writeLine(self.__host)
self.__writeLine(self.__port)
self.__writeLine(url)
self.__writeLine(str(headers))
self.__writeLine(input.replace('\n', '').replace('\r', ''))
def getresponse(self):
res = self.__cnx.getresponse()
status = res.status
print "=>", status
headers = res.getheaders()
output = res.read()
self.__writeLine(str(status))
self.__writeLine(str(headers))
self.__writeLine(str(output))
return FakeHttpResponse(status, headers, output)
def close(self):
self.__writeLine("")
return self.__cnx.close()
def __writeLine(self, line):
self.__file.write(line + "\n")
class RecordingHttpConnection(RecordingConnection): # pragma no cover (Class useful only when recording new tests, not used during automated tests)
_realConnection = httplib.HTTPConnection
def __init__(self, file, *args, **kwds):
RecordingConnection.__init__(self, file, "http", *args, **kwds)
class RecordingHttpsConnection(RecordingConnection): # pragma no cover (Class useful only when recording new tests, not used during automated tests)
_realConnection = httplib.HTTPSConnection
def __init__(self, file, *args, **kwds):
RecordingConnection.__init__(self, file, "https", *args, **kwds)
class ReplayingConnection:
def __init__(self, testCase, file, protocol, host, port, *args, **kwds):
self.__testCase = testCase
self.__file = file
self.__protocol = protocol
self.__host = host
self.__port = str(port)
def request(self, verb, url, input, headers):
fixAuthorizationHeader(headers)
self.__testCase.assertEqual(self.__protocol, readLine(self.__file))
self.__testCase.assertEqual(verb, readLine(self.__file))
self.__testCase.assertEqual(self.__host, readLine(self.__file))
self.__testCase.assertEqual(self.__port, readLine(self.__file))
self.__testCase.assertEqual(self.__splitUrl(url), self.__splitUrl(readLine(self.__file)))
self.__testCase.assertEqual(headers, eval(readLine(self.__file)))
expectedInput = readLine(self.__file)
if input.startswith("{"):
self.__testCase.assertEqual(json.loads(input.replace('\n', '').replace('\r', '')), json.loads(expectedInput))
elif atMostPython32: # @todo Test in all cases, including Python 3.3
# In Python 3.3, dicts are not output in the same order as in Python 2.5 -> 3.2.
# So, form-data encoding is not deterministic and is difficult to test.
self.__testCase.assertEqual(input.replace('\n', '').replace('\r', ''), expectedInput)
def __splitUrl(self, url):
splitedUrl = url.split("?")
if len(splitedUrl) == 1:
return splitedUrl
self.__testCase.assertEqual(len(splitedUrl), 2)
base, qs = splitedUrl
return (base, sorted(qs.split("&")))
def getresponse(self):
status = int(readLine(self.__file))
headers = eval(readLine(self.__file))
output = readLine(self.__file)
return FakeHttpResponse(status, headers, output)
def close(self):
readLine(self.__file)
def ReplayingHttpConnection(testCase, file, *args, **kwds):
return ReplayingConnection(testCase, file, "http", *args, **kwds)
def ReplayingHttpsConnection(testCase, file, *args, **kwds):
return ReplayingConnection(testCase, file, "https", *args, **kwds)
class BasicTestCase(unittest.TestCase):
recordMode = False
def setUp(self):
unittest.TestCase.setUp(self)
self.__fileName = ""
self.__file = None
if self.recordMode: # pragma no cover (Branch useful only when recording new tests, not used during automated tests)
github.Requester.Requester.injectConnectionClasses(
lambda ignored, *args, **kwds: RecordingHttpConnection(self.__openFile("wb"), *args, **kwds),
lambda ignored, *args, **kwds: RecordingHttpsConnection(self.__openFile("wb"), *args, **kwds)
)
import GithubCredentials
self.login = GithubCredentials.login
self.password = GithubCredentials.password
self.oauth_token = GithubCredentials.oauth_token
# @todo Remove client_id and client_secret from ReplayData (as we already remove login, password and oauth_token)
# self.client_id = GithubCredentials.client_id
# self.client_secret = GithubCredentials.client_secret
else:
github.Requester.Requester.injectConnectionClasses(
lambda ignored, *args, **kwds: ReplayingHttpConnection(self, self.__openFile("rb"), *args, **kwds),
lambda ignored, *args, **kwds: ReplayingHttpsConnection(self, self.__openFile("rb"), *args, **kwds)
)
self.login = "login"
self.password = "password"
self.oauth_token = "oauth_token"
self.client_id = "client_id"
self.client_secret = "client_secret"
def tearDown(self):
unittest.TestCase.tearDown(self)
self.__closeReplayFileIfNeeded()
github.Requester.Requester.resetConnectionClasses()
def __openFile(self, mode):
for (_, _, functionName, _) in traceback.extract_stack():
if functionName.startswith("test") or functionName == "setUp" or functionName == "tearDown":
if functionName != "test": # because in class Hook(Framework.TestCase), method testTest calls Hook.test
fileName = os.path.join(os.path.dirname(__file__), "ReplayData", self.__class__.__name__ + "." + functionName + ".txt")
if fileName != self.__fileName:
self.__closeReplayFileIfNeeded()
self.__fileName = fileName
self.__file = open(self.__fileName, mode)
return self.__file
def __closeReplayFileIfNeeded(self):
if self.__file is not None:
if not self.recordMode: # pragma no branch (Branch useful only when recording new tests, not used during automated tests)
self.assertEqual(readLine(self.__file), "")
self.__file.close()
def assertListKeyEqual(self, elements, key, expectedKeys):
realKeys = [key(element) for element in elements]
self.assertEqual(realKeys, expectedKeys)
def assertListKeyBegin(self, elements, key, expectedKeys):
realKeys = [key(element) for element in elements[: len(expectedKeys)]]
self.assertEqual(realKeys, expectedKeys)
class TestCase(BasicTestCase):
def doCheckFrame(self, obj, frame):
if obj._headers == {} and frame is None:
return
if obj._headers is None and frame == {}:
return
self.assertEqual(obj._headers, frame[2])
def getFrameChecker(self):
return lambda requester, obj, frame: self.doCheckFrame(obj, frame)
def setUp(self):
BasicTestCase.setUp(self)
# Set up frame debugging
github.GithubObject.GithubObject.setCheckAfterInitFlag(True)
github.Requester.Requester.setDebugFlag(True)
github.Requester.Requester.setOnCheckMe(self.getFrameChecker())
self.g = github.Github(self.login, self.password)
def activateRecordMode(): # pragma no cover (Function useful only when recording new tests, not used during automated tests)
BasicTestCase.recordMode = True
|
gpl-3.0
|
pawelmhm/AutobahnPython
|
autobahn/autobahn/websocket/compress_snappy.py
|
18
|
15573
|
###############################################################################
##
## Copyright 2013 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from __future__ import absolute_import
__all__ = ["PerMessageSnappyMixin",
"PerMessageSnappyOffer",
"PerMessageSnappyOfferAccept",
"PerMessageSnappyResponse",
"PerMessageSnappyResponseAccept",
"PerMessageSnappy"]
import snappy
from autobahn.websocket.compress_base import PerMessageCompressOffer, \
PerMessageCompressOfferAccept, \
PerMessageCompressResponse, \
PerMessageCompressResponseAccept, \
PerMessageCompress
class PerMessageSnappyMixin:
"""
Mixin class for this extension.
"""
EXTENSION_NAME = "permessage-snappy"
"""
Name of this WebSocket extension.
"""
class PerMessageSnappyOffer(PerMessageCompressOffer, PerMessageSnappyMixin):
"""
Set of extension parameters for `permessage-snappy` WebSocket extension
offered by a client to a server.
"""
@classmethod
def parse(Klass, params):
"""
Parses a WebSocket extension offer for `permessage-snappy` provided by a client to a server.
:param params: Output from :method:`autobahn.websocket.WebSocketProtocol._parseExtensionsHeader`.
:type params: list
:returns: object -- A new instance of :class:`autobahn.compress.PerMessageSnappyOffer`.
"""
## extension parameter defaults
##
acceptNoContextTakeover = False
requestNoContextTakeover = False
##
## verify/parse client ("client-to-server direction") parameters of permessage-snappy offer
##
for p in params:
if len(params[p]) > 1:
raise Exception("multiple occurence of extension parameter '%s' for extension '%s'" % (p, Klass.EXTENSION_NAME))
val = params[p][0]
if p == 'client_no_context_takeover':
if val != True:
raise Exception("illegal extension parameter value '%s' for parameter '%s' of extension '%s'" % (val, p, Klass.EXTENSION_NAME))
else:
acceptNoContextTakeover = True
elif p == 'server_no_context_takeover':
if val != True:
raise Exception("illegal extension parameter value '%s' for parameter '%s' of extension '%s'" % (val, p, Klass.EXTENSION_NAME))
else:
requestNoContextTakeover = True
else:
raise Exception("illegal extension parameter '%s' for extension '%s'" % (p, Klass.EXTENSION_NAME))
offer = Klass(acceptNoContextTakeover,
requestNoContextTakeover)
return offer
def __init__(self,
acceptNoContextTakeover = True,
requestNoContextTakeover = False):
"""
Constructor.
:param acceptNoContextTakeover: Iff true, client accepts "no context takeover" feature.
:type acceptNoContextTakeover: bool
:param requestNoContextTakeover: Iff true, client request "no context takeover" feature.
:type requestNoContextTakeover: bool
"""
if type(acceptNoContextTakeover) != bool:
raise Exception("invalid type %s for acceptNoContextTakeover" % type(acceptNoContextTakeover))
self.acceptNoContextTakeover = acceptNoContextTakeover
if type(requestNoContextTakeover) != bool:
raise Exception("invalid type %s for requestNoContextTakeover" % type(requestNoContextTakeover))
self.requestNoContextTakeover = requestNoContextTakeover
def getExtensionString(self):
"""
Returns the WebSocket extension configuration string as sent to the server.
:returns: str -- PMCE configuration string.
"""
pmceString = self.EXTENSION_NAME
if self.acceptNoContextTakeover:
pmceString += "; client_no_context_takeover"
if self.requestNoContextTakeover:
pmceString += "; server_no_context_takeover"
return pmceString
def __json__(self):
"""
Returns a JSON serializable object representation.
:returns: object -- JSON serializable represention.
"""
return {'extension': self.EXTENSION_NAME,
'acceptNoContextTakeover': self.acceptNoContextTakeover,
'requestNoContextTakeover': self.requestNoContextTakeover}
def __repr__(self):
"""
Returns Python object representation that can be eval'ed to reconstruct the object.
:returns: str -- Python string representation.
"""
return "PerMessageSnappyOffer(acceptNoContextTakeover = %s, requestNoContextTakeover = %s)" % (self.acceptNoContextTakeover, self.requestNoContextTakeover)
class PerMessageSnappyOfferAccept(PerMessageCompressOfferAccept, PerMessageSnappyMixin):
"""
Set of parameters with which to accept an `permessage-snappy` offer
from a client by a server.
"""
def __init__(self,
offer,
requestNoContextTakeover = False,
noContextTakeover = None):
"""
Constructor.
:param offer: The offer being accepted.
:type offer: Instance of :class:`autobahn.compress.PerMessageSnappyOffer`.
:param requestNoContextTakeover: Iff true, server request "no context takeover" feature.
:type requestNoContextTakeover: bool
:param noContextTakeover: Override server ("server-to-client direction") context takeover (this must be compatible with offer).
:type noContextTakeover: bool
"""
if not isinstance(offer, PerMessageSnappyOffer):
raise Exception("invalid type %s for offer" % type(offer))
self.offer = offer
if type(requestNoContextTakeover) != bool:
raise Exception("invalid type %s for requestNoContextTakeover" % type(requestNoContextTakeover))
if requestNoContextTakeover and not offer.acceptNoContextTakeover:
raise Exception("invalid value %s for requestNoContextTakeover - feature unsupported by client" % requestNoContextTakeover)
self.requestNoContextTakeover = requestNoContextTakeover
if noContextTakeover is not None:
if type(noContextTakeover) != bool:
raise Exception("invalid type %s for noContextTakeover" % type(noContextTakeover))
if offer.requestNoContextTakeover and not noContextTakeover:
raise Exception("invalid value %s for noContextTakeover - client requested feature" % noContextTakeover)
self.noContextTakeover = noContextTakeover
def getExtensionString(self):
"""
Returns the WebSocket extension configuration string as sent to the server.
:returns: str -- PMCE configuration string.
"""
pmceString = self.EXTENSION_NAME
if self.offer.requestNoContextTakeover:
pmceString += "; server_no_context_takeover"
if self.requestNoContextTakeover:
pmceString += "; client_no_context_takeover"
return pmceString
def __json__(self):
"""
Returns a JSON serializable object representation.
:returns: object -- JSON serializable represention.
"""
return {'extension': self.EXTENSION_NAME,
'offer': self.offer.__json__(),
'requestNoContextTakeover': self.requestNoContextTakeover,
'noContextTakeover': self.noContextTakeover}
def __repr__(self):
"""
Returns Python object representation that can be eval'ed to reconstruct the object.
:returns: str -- Python string representation.
"""
return "PerMessageSnappyAccept(offer = %s, requestNoContextTakeover = %s, noContextTakeover = %s)" % (self.offer.__repr__(), self.requestNoContextTakeover, self.noContextTakeover)
class PerMessageSnappyResponse(PerMessageCompressResponse, PerMessageSnappyMixin):
"""
Set of parameters for `permessage-snappy` responded by server.
"""
@classmethod
def parse(Klass, params):
"""
Parses a WebSocket extension response for `permessage-snappy` provided by a server to a client.
:param params: Output from :method:`autobahn.websocket.WebSocketProtocol._parseExtensionsHeader`.
:type params: list
:returns: object -- A new instance of :class:`autobahn.compress.PerMessageSnappyResponse`.
"""
client_no_context_takeover = False
server_no_context_takeover = False
for p in params:
if len(params[p]) > 1:
raise Exception("multiple occurence of extension parameter '%s' for extension '%s'" % (p, Klass.EXTENSION_NAME))
val = params[p][0]
if p == 'client_no_context_takeover':
if val != True:
raise Exception("illegal extension parameter value '%s' for parameter '%s' of extension '%s'" % (val, p, Klass.EXTENSION_NAME))
else:
client_no_context_takeover = True
elif p == 'server_no_context_takeover':
if val != True:
raise Exception("illegal extension parameter value '%s' for parameter '%s' of extension '%s'" % (val, p, Klass.EXTENSION_NAME))
else:
server_no_context_takeover = True
else:
raise Exception("illegal extension parameter '%s' for extension '%s'" % (p, Klass.EXTENSION_NAME))
response = Klass(client_no_context_takeover,
server_no_context_takeover)
return response
def __init__(self,
client_no_context_takeover,
server_no_context_takeover):
self.client_no_context_takeover = client_no_context_takeover
self.server_no_context_takeover = server_no_context_takeover
def __json__(self):
"""
Returns a JSON serializable object representation.
:returns: object -- JSON serializable represention.
"""
return {'extension': self.EXTENSION_NAME,
'client_no_context_takeover': self.client_no_context_takeover,
'server_no_context_takeover': self.server_no_context_takeover}
def __repr__(self):
"""
Returns Python object representation that can be eval'ed to reconstruct the object.
:returns: str -- Python string representation.
"""
return "PerMessageSnappyResponse(client_no_context_takeover = %s, server_no_context_takeover = %s)" % (self.client_no_context_takeover, self.server_no_context_takeover)
class PerMessageSnappyResponseAccept(PerMessageCompressResponseAccept, PerMessageSnappyMixin):
"""
Set of parameters with which to accept an `permessage-snappy` response
from a server by a client.
"""
def __init__(self,
response,
noContextTakeover = None):
"""
Constructor.
:param response: The response being accepted.
:type response: Instance of :class:`autobahn.compress.PerMessageSnappyResponse`.
:param noContextTakeover: Override client ("client-to-server direction") context takeover (this must be compatible with response).
:type noContextTakeover: bool
"""
if not isinstance(response, PerMessageSnappyResponse):
raise Exception("invalid type %s for response" % type(response))
self.response = response
if noContextTakeover is not None:
if type(noContextTakeover) != bool:
raise Exception("invalid type %s for noContextTakeover" % type(noContextTakeover))
if response.client_no_context_takeover and not noContextTakeover:
raise Exception("invalid value %s for noContextTakeover - server requested feature" % noContextTakeover)
self.noContextTakeover = noContextTakeover
def __json__(self):
"""
Returns a JSON serializable object representation.
:returns: object -- JSON serializable represention.
"""
return {'extension': self.EXTENSION_NAME,
'response': self.response.__json__(),
'noContextTakeover': self.noContextTakeover}
def __repr__(self):
"""
Returns Python object representation that can be eval'ed to reconstruct the object.
:returns: str -- Python string representation.
"""
return "PerMessageSnappyResponseAccept(response = %s, noContextTakeover = %s)" % (self.response.__repr__(), self.noContextTakeover)
class PerMessageSnappy(PerMessageCompress, PerMessageSnappyMixin):
"""
`permessage-snappy` WebSocket extension processor.
"""
@classmethod
def createFromResponseAccept(Klass, isServer, accept):
pmce = Klass(isServer,
accept.response.server_no_context_takeover,
accept.noContextTakeover if accept.noContextTakeover is not None else accept.response.client_no_context_takeover)
return pmce
@classmethod
def createFromOfferAccept(Klass, isServer, accept):
pmce = Klass(isServer,
accept.noContextTakeover if accept.noContextTakeover is not None else accept.offer.requestNoContextTakeover,
accept.requestNoContextTakeover)
return pmce
def __init__(self,
isServer,
server_no_context_takeover,
client_no_context_takeover):
self._isServer = isServer
self.server_no_context_takeover = server_no_context_takeover
self.client_no_context_takeover = client_no_context_takeover
self._compressor = None
self._decompressor = None
def __json__(self):
return {'extension': self.EXTENSION_NAME,
'server_no_context_takeover': self.server_no_context_takeover,
'client_no_context_takeover': self.client_no_context_takeover}
def __repr__(self):
return "PerMessageSnappy(isServer = %s, server_no_context_takeover = %s, client_no_context_takeover = %s)" % (self._isServer, self.server_no_context_takeover, self.client_no_context_takeover)
def startCompressMessage(self):
if self._isServer:
if self._compressor is None or self.server_no_context_takeover:
self._compressor = snappy.StreamCompressor()
else:
if self._compressor is None or self.client_no_context_takeover:
self._compressor = snappy.StreamCompressor()
def compressMessageData(self, data):
return self._compressor.add_chunk(data)
def endCompressMessage(self):
return ""
def startDecompressMessage(self):
if self._isServer:
if self._decompressor is None or self.client_no_context_takeover:
self._decompressor = snappy.StreamDecompressor()
else:
if self._decompressor is None or self.server_no_context_takeover:
self._decompressor = snappy.StreamDecompressor()
def decompressMessageData(self, data):
return self._decompressor.decompress(data)
def endDecompressMessage(self):
pass
|
apache-2.0
|
chromium/chromium
|
mojo/public/tools/bindings/concatenate-files.py
|
12
|
1556
|
#!/usr/bin/env python
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# This utility concatenates several files into one. On Unix-like systems
# it is equivalent to:
# cat file1 file2 file3 ...files... > target
#
# The reason for writing a separate utility is that 'cat' is not available
# on all supported build platforms, but Python is, and hence this provides
# us with an easy and uniform way of doing this on all platforms.
# for py2/py3 compatibility
from __future__ import print_function
import optparse
def Concatenate(filenames):
"""Concatenate files.
Args:
files: Array of file names.
The last name is the target; all earlier ones are sources.
Returns:
True, if the operation was successful.
"""
if len(filenames) < 2:
print("An error occurred generating %s:\nNothing to do." % filenames[-1])
return False
try:
with open(filenames[-1], "wb") as target:
for filename in filenames[:-1]:
with open(filename, "rb") as current:
target.write(current.read())
return True
except IOError as e:
print("An error occurred when writing %s:\n%s" % (filenames[-1], e))
return False
def main():
parser = optparse.OptionParser()
parser.set_usage("""Concatenate several files into one.
Equivalent to: cat file1 ... > target.""")
(_options, args) = parser.parse_args()
exit(0 if Concatenate(args) else 1)
if __name__ == "__main__":
main()
|
bsd-3-clause
|
hosseinmh/Django_learning
|
djmod/.venv/lib/python3.5/site-packages/pip/_vendor/requests/packages/urllib3/poolmanager.py
|
359
|
13092
|
from __future__ import absolute_import
import collections
import functools
import logging
try: # Python 3
from urllib.parse import urljoin
except ImportError:
from urlparse import urljoin
from ._collections import RecentlyUsedContainer
from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool
from .connectionpool import port_by_scheme
from .exceptions import LocationValueError, MaxRetryError, ProxySchemeUnknown
from .request import RequestMethods
from .util.url import parse_url
from .util.retry import Retry
__all__ = ['PoolManager', 'ProxyManager', 'proxy_from_url']
log = logging.getLogger(__name__)
SSL_KEYWORDS = ('key_file', 'cert_file', 'cert_reqs', 'ca_certs',
'ssl_version', 'ca_cert_dir')
# The base fields to use when determining what pool to get a connection from;
# these do not rely on the ``connection_pool_kw`` and can be determined by the
# URL and potentially the ``urllib3.connection.port_by_scheme`` dictionary.
#
# All custom key schemes should include the fields in this key at a minimum.
BasePoolKey = collections.namedtuple('BasePoolKey', ('scheme', 'host', 'port'))
# The fields to use when determining what pool to get a HTTP and HTTPS
# connection from. All additional fields must be present in the PoolManager's
# ``connection_pool_kw`` instance variable.
HTTPPoolKey = collections.namedtuple(
'HTTPPoolKey', BasePoolKey._fields + ('timeout', 'retries', 'strict',
'block', 'source_address')
)
HTTPSPoolKey = collections.namedtuple(
'HTTPSPoolKey', HTTPPoolKey._fields + SSL_KEYWORDS
)
def _default_key_normalizer(key_class, request_context):
"""
Create a pool key of type ``key_class`` for a request.
According to RFC 3986, both the scheme and host are case-insensitive.
Therefore, this function normalizes both before constructing the pool
key for an HTTPS request. If you wish to change this behaviour, provide
alternate callables to ``key_fn_by_scheme``.
:param key_class:
The class to use when constructing the key. This should be a namedtuple
with the ``scheme`` and ``host`` keys at a minimum.
:param request_context:
A dictionary-like object that contain the context for a request.
It should contain a key for each field in the :class:`HTTPPoolKey`
"""
context = {}
for key in key_class._fields:
context[key] = request_context.get(key)
context['scheme'] = context['scheme'].lower()
context['host'] = context['host'].lower()
return key_class(**context)
# A dictionary that maps a scheme to a callable that creates a pool key.
# This can be used to alter the way pool keys are constructed, if desired.
# Each PoolManager makes a copy of this dictionary so they can be configured
# globally here, or individually on the instance.
key_fn_by_scheme = {
'http': functools.partial(_default_key_normalizer, HTTPPoolKey),
'https': functools.partial(_default_key_normalizer, HTTPSPoolKey),
}
pool_classes_by_scheme = {
'http': HTTPConnectionPool,
'https': HTTPSConnectionPool,
}
class PoolManager(RequestMethods):
"""
Allows for arbitrary requests while transparently keeping track of
necessary connection pools for you.
:param num_pools:
Number of connection pools to cache before discarding the least
recently used pool.
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
:param \**connection_pool_kw:
Additional parameters are used to create fresh
:class:`urllib3.connectionpool.ConnectionPool` instances.
Example::
>>> manager = PoolManager(num_pools=2)
>>> r = manager.request('GET', 'http://google.com/')
>>> r = manager.request('GET', 'http://google.com/mail')
>>> r = manager.request('GET', 'http://yahoo.com/')
>>> len(manager.pools)
2
"""
proxy = None
def __init__(self, num_pools=10, headers=None, **connection_pool_kw):
RequestMethods.__init__(self, headers)
self.connection_pool_kw = connection_pool_kw
self.pools = RecentlyUsedContainer(num_pools,
dispose_func=lambda p: p.close())
# Locally set the pool classes and keys so other PoolManagers can
# override them.
self.pool_classes_by_scheme = pool_classes_by_scheme
self.key_fn_by_scheme = key_fn_by_scheme.copy()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.clear()
# Return False to re-raise any potential exceptions
return False
def _new_pool(self, scheme, host, port):
"""
Create a new :class:`ConnectionPool` based on host, port and scheme.
This method is used to actually create the connection pools handed out
by :meth:`connection_from_url` and companion methods. It is intended
to be overridden for customization.
"""
pool_cls = self.pool_classes_by_scheme[scheme]
kwargs = self.connection_pool_kw
if scheme == 'http':
kwargs = self.connection_pool_kw.copy()
for kw in SSL_KEYWORDS:
kwargs.pop(kw, None)
return pool_cls(host, port, **kwargs)
def clear(self):
"""
Empty our store of pools and direct them all to close.
This will not affect in-flight connections, but they will not be
re-used after completion.
"""
self.pools.clear()
def connection_from_host(self, host, port=None, scheme='http'):
"""
Get a :class:`ConnectionPool` based on the host, port, and scheme.
If ``port`` isn't given, it will be derived from the ``scheme`` using
``urllib3.connectionpool.port_by_scheme``.
"""
if not host:
raise LocationValueError("No host specified.")
request_context = self.connection_pool_kw.copy()
request_context['scheme'] = scheme or 'http'
if not port:
port = port_by_scheme.get(request_context['scheme'].lower(), 80)
request_context['port'] = port
request_context['host'] = host
return self.connection_from_context(request_context)
def connection_from_context(self, request_context):
"""
Get a :class:`ConnectionPool` based on the request context.
``request_context`` must at least contain the ``scheme`` key and its
value must be a key in ``key_fn_by_scheme`` instance variable.
"""
scheme = request_context['scheme'].lower()
pool_key_constructor = self.key_fn_by_scheme[scheme]
pool_key = pool_key_constructor(request_context)
return self.connection_from_pool_key(pool_key)
def connection_from_pool_key(self, pool_key):
"""
Get a :class:`ConnectionPool` based on the provided pool key.
``pool_key`` should be a namedtuple that only contains immutable
objects. At a minimum it must have the ``scheme``, ``host``, and
``port`` fields.
"""
with self.pools.lock:
# If the scheme, host, or port doesn't match existing open
# connections, open a new ConnectionPool.
pool = self.pools.get(pool_key)
if pool:
return pool
# Make a fresh ConnectionPool of the desired type
pool = self._new_pool(pool_key.scheme, pool_key.host, pool_key.port)
self.pools[pool_key] = pool
return pool
def connection_from_url(self, url):
"""
Similar to :func:`urllib3.connectionpool.connection_from_url` but
doesn't pass any additional parameters to the
:class:`urllib3.connectionpool.ConnectionPool` constructor.
Additional parameters are taken from the :class:`.PoolManager`
constructor.
"""
u = parse_url(url)
return self.connection_from_host(u.host, port=u.port, scheme=u.scheme)
def urlopen(self, method, url, redirect=True, **kw):
"""
Same as :meth:`urllib3.connectionpool.HTTPConnectionPool.urlopen`
with custom cross-host redirect logic and only sends the request-uri
portion of the ``url``.
The given ``url`` parameter must be absolute, such that an appropriate
:class:`urllib3.connectionpool.ConnectionPool` can be chosen for it.
"""
u = parse_url(url)
conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme)
kw['assert_same_host'] = False
kw['redirect'] = False
if 'headers' not in kw:
kw['headers'] = self.headers
if self.proxy is not None and u.scheme == "http":
response = conn.urlopen(method, url, **kw)
else:
response = conn.urlopen(method, u.request_uri, **kw)
redirect_location = redirect and response.get_redirect_location()
if not redirect_location:
return response
# Support relative URLs for redirecting.
redirect_location = urljoin(url, redirect_location)
# RFC 7231, Section 6.4.4
if response.status == 303:
method = 'GET'
retries = kw.get('retries')
if not isinstance(retries, Retry):
retries = Retry.from_int(retries, redirect=redirect)
try:
retries = retries.increment(method, url, response=response, _pool=conn)
except MaxRetryError:
if retries.raise_on_redirect:
raise
return response
kw['retries'] = retries
kw['redirect'] = redirect
log.info("Redirecting %s -> %s", url, redirect_location)
return self.urlopen(method, redirect_location, **kw)
class ProxyManager(PoolManager):
"""
Behaves just like :class:`PoolManager`, but sends all requests through
the defined proxy, using the CONNECT method for HTTPS URLs.
:param proxy_url:
The URL of the proxy to be used.
:param proxy_headers:
A dictionary contaning headers that will be sent to the proxy. In case
of HTTP they are being sent with each request, while in the
HTTPS/CONNECT case they are sent only once. Could be used for proxy
authentication.
Example:
>>> proxy = urllib3.ProxyManager('http://localhost:3128/')
>>> r1 = proxy.request('GET', 'http://google.com/')
>>> r2 = proxy.request('GET', 'http://httpbin.org/')
>>> len(proxy.pools)
1
>>> r3 = proxy.request('GET', 'https://httpbin.org/')
>>> r4 = proxy.request('GET', 'https://twitter.com/')
>>> len(proxy.pools)
3
"""
def __init__(self, proxy_url, num_pools=10, headers=None,
proxy_headers=None, **connection_pool_kw):
if isinstance(proxy_url, HTTPConnectionPool):
proxy_url = '%s://%s:%i' % (proxy_url.scheme, proxy_url.host,
proxy_url.port)
proxy = parse_url(proxy_url)
if not proxy.port:
port = port_by_scheme.get(proxy.scheme, 80)
proxy = proxy._replace(port=port)
if proxy.scheme not in ("http", "https"):
raise ProxySchemeUnknown(proxy.scheme)
self.proxy = proxy
self.proxy_headers = proxy_headers or {}
connection_pool_kw['_proxy'] = self.proxy
connection_pool_kw['_proxy_headers'] = self.proxy_headers
super(ProxyManager, self).__init__(
num_pools, headers, **connection_pool_kw)
def connection_from_host(self, host, port=None, scheme='http'):
if scheme == "https":
return super(ProxyManager, self).connection_from_host(
host, port, scheme)
return super(ProxyManager, self).connection_from_host(
self.proxy.host, self.proxy.port, self.proxy.scheme)
def _set_proxy_headers(self, url, headers=None):
"""
Sets headers needed by proxies: specifically, the Accept and Host
headers. Only sets headers not provided by the user.
"""
headers_ = {'Accept': '*/*'}
netloc = parse_url(url).netloc
if netloc:
headers_['Host'] = netloc
if headers:
headers_.update(headers)
return headers_
def urlopen(self, method, url, redirect=True, **kw):
"Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute."
u = parse_url(url)
if u.scheme == "http":
# For proxied HTTPS requests, httplib sets the necessary headers
# on the CONNECT to the proxy. For HTTP, we'll definitely
# need to set 'Host' at the very least.
headers = kw.get('headers', self.headers)
kw['headers'] = self._set_proxy_headers(url, headers)
return super(ProxyManager, self).urlopen(method, url, redirect=redirect, **kw)
def proxy_from_url(url, **kw):
return ProxyManager(proxy_url=url, **kw)
|
mit
|
kirca/odoo
|
openerp/report/pyPdf/filters.py
|
123
|
9847
|
# vim: sw=4:expandtab:foldmethod=marker
#
# Copyright (c) 2006, Mathieu Fenniak
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Implementation of stream filters for PDF.
"""
__author__ = "Mathieu Fenniak"
__author_email__ = "[email protected]"
from utils import PdfReadError
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
try:
import zlib
def decompress(data):
return zlib.decompress(data)
def compress(data):
return zlib.compress(data)
except ImportError:
# Unable to import zlib. Attempt to use the System.IO.Compression
# library from the .NET framework. (IronPython only)
import System
from System import IO, Collections, Array
def _string_to_bytearr(buf):
retval = Array.CreateInstance(System.Byte, len(buf))
for i in range(len(buf)):
retval[i] = ord(buf[i])
return retval
def _bytearr_to_string(bytes):
retval = ""
for i in range(bytes.Length):
retval += chr(bytes[i])
return retval
def _read_bytes(stream):
ms = IO.MemoryStream()
buf = Array.CreateInstance(System.Byte, 2048)
while True:
bytes = stream.Read(buf, 0, buf.Length)
if bytes == 0:
break
else:
ms.Write(buf, 0, bytes)
retval = ms.ToArray()
ms.Close()
return retval
def decompress(data):
bytes = _string_to_bytearr(data)
ms = IO.MemoryStream()
ms.Write(bytes, 0, bytes.Length)
ms.Position = 0 # fseek 0
gz = IO.Compression.DeflateStream(ms, IO.Compression.CompressionMode.Decompress)
bytes = _read_bytes(gz)
retval = _bytearr_to_string(bytes)
gz.Close()
return retval
def compress(data):
bytes = _string_to_bytearr(data)
ms = IO.MemoryStream()
gz = IO.Compression.DeflateStream(ms, IO.Compression.CompressionMode.Compress, True)
gz.Write(bytes, 0, bytes.Length)
gz.Close()
ms.Position = 0 # fseek 0
bytes = ms.ToArray()
retval = _bytearr_to_string(bytes)
ms.Close()
return retval
class FlateDecode(object):
def decode(data, decodeParms):
data = decompress(data)
predictor = 1
if decodeParms:
predictor = decodeParms.get("/Predictor", 1)
# predictor 1 == no predictor
if predictor != 1:
columns = decodeParms["/Columns"]
# PNG prediction:
if 10 <= predictor <= 15:
output = StringIO()
# PNG prediction can vary from row to row
rowlength = columns + 1
assert len(data) % rowlength == 0
prev_rowdata = (0,) * rowlength
for row in xrange(len(data) / rowlength):
rowdata = [ord(x) for x in data[(row*rowlength):((row+1)*rowlength)]]
filterByte = rowdata[0]
if filterByte == 0:
pass
elif filterByte == 1:
for i in range(2, rowlength):
rowdata[i] = (rowdata[i] + rowdata[i-1]) % 256
elif filterByte == 2:
for i in range(1, rowlength):
rowdata[i] = (rowdata[i] + prev_rowdata[i]) % 256
else:
# unsupported PNG filter
raise PdfReadError("Unsupported PNG filter %r" % filterByte)
prev_rowdata = rowdata
output.write(''.join([chr(x) for x in rowdata[1:]]))
data = output.getvalue()
else:
# unsupported predictor
raise PdfReadError("Unsupported flatedecode predictor %r" % predictor)
return data
decode = staticmethod(decode)
def encode(data):
return compress(data)
encode = staticmethod(encode)
class ASCIIHexDecode(object):
def decode(data, decodeParms=None):
retval = ""
char = ""
x = 0
while True:
c = data[x]
if c == ">":
break
elif c.isspace():
x += 1
continue
char += c
if len(char) == 2:
retval += chr(int(char, base=16))
char = ""
x += 1
assert char == ""
return retval
decode = staticmethod(decode)
class ASCII85Decode(object):
def decode(data, decodeParms=None):
retval = ""
group = []
x = 0
hitEod = False
# remove all whitespace from data
data = [y for y in data if not (y in ' \n\r\t')]
while not hitEod:
c = data[x]
if len(retval) == 0 and c == "<" and data[x+1] == "~":
x += 2
continue
#elif c.isspace():
# x += 1
# continue
elif c == 'z':
assert len(group) == 0
retval += '\x00\x00\x00\x00'
continue
elif c == "~" and data[x+1] == ">":
if len(group) != 0:
# cannot have a final group of just 1 char
assert len(group) > 1
cnt = len(group) - 1
group += [ 85, 85, 85 ]
hitEod = cnt
else:
break
else:
c = ord(c) - 33
assert 0 <= c < 85
group += [ c ]
if len(group) >= 5:
b = group[0] * (85**4) + \
group[1] * (85**3) + \
group[2] * (85**2) + \
group[3] * 85 + \
group[4]
assert b < (2**32 - 1)
c4 = chr((b >> 0) % 256)
c3 = chr((b >> 8) % 256)
c2 = chr((b >> 16) % 256)
c1 = chr(b >> 24)
retval += (c1 + c2 + c3 + c4)
if hitEod:
retval = retval[:-4+hitEod]
group = []
x += 1
return retval
decode = staticmethod(decode)
def decodeStreamData(stream):
from generic import NameObject
filters = stream.get("/Filter", ())
if len(filters) and not isinstance(filters[0], NameObject):
# we have a single filter instance
filters = (filters,)
data = stream._data
for filterType in filters:
if filterType == "/FlateDecode":
data = FlateDecode.decode(data, stream.get("/DecodeParms"))
elif filterType == "/ASCIIHexDecode":
data = ASCIIHexDecode.decode(data)
elif filterType == "/ASCII85Decode":
data = ASCII85Decode.decode(data)
elif filterType == "/Crypt":
decodeParams = stream.get("/DecodeParams", {})
if "/Name" not in decodeParams and "/Type" not in decodeParams:
pass
else:
raise NotImplementedError("/Crypt filter with /Name or /Type not supported yet")
else:
# unsupported filter
raise NotImplementedError("unsupported filter %s" % filterType)
return data
if __name__ == "__main__":
assert "abc" == ASCIIHexDecode.decode('61\n626\n3>')
ascii85Test = """
<~9jqo^BlbD-BleB1DJ+*+F(f,q/0JhKF<GL>[email protected]$d7F!,L7@<6@)/0JDEF<G%<+EV:2F!,
O<DJ+*.@<*K0@<6L(Df-\\0Ec5e;DffZ(EZee.Bl.9pF"AGXBPCsi+DGm>@3BB/F*&OCAfu2/AKY
i(DIb:@FD,*)+C]U=@3BN#EcYf8ATD3s@q?d$AftVqCh[NqF<G:8+EV:.+Cf>-FD5W8ARlolDIa
l(DId<j@<?3r@:F%a+D58'ATD4$Bl@l3De:,-DJs`8ARoFb/0JMK@qB4^F!,R<AKZ&-DfTqBG%G
>uD.RTpAKYo'+CT/5+Cei#DII?(E,9)oF*2M7/c~>
"""
ascii85_originalText="Man is distinguished, not only by his reason, but by this singular passion from other animals, which is a lust of the mind, that by a perseverance of delight in the continued and indefatigable generation of knowledge, exceeds the short vehemence of any carnal pleasure."
assert ASCII85Decode.decode(ascii85Test) == ascii85_originalText
|
agpl-3.0
|
jango2015/three.js
|
utils/converters/msgpack/msgpack/__init__.py
|
659
|
1385
|
# coding: utf-8
from msgpack._version import version
from msgpack.exceptions import *
from collections import namedtuple
class ExtType(namedtuple('ExtType', 'code data')):
"""ExtType represents ext type in msgpack."""
def __new__(cls, code, data):
if not isinstance(code, int):
raise TypeError("code must be int")
if not isinstance(data, bytes):
raise TypeError("data must be bytes")
if not 0 <= code <= 127:
raise ValueError("code must be 0~127")
return super(ExtType, cls).__new__(cls, code, data)
import os
if os.environ.get('MSGPACK_PUREPYTHON'):
from msgpack.fallback import Packer, unpack, unpackb, Unpacker
else:
try:
from msgpack._packer import Packer
from msgpack._unpacker import unpack, unpackb, Unpacker
except ImportError:
from msgpack.fallback import Packer, unpack, unpackb, Unpacker
def pack(o, stream, **kwargs):
"""
Pack object `o` and write it to `stream`
See :class:`Packer` for options.
"""
packer = Packer(**kwargs)
stream.write(packer.pack(o))
def packb(o, **kwargs):
"""
Pack object `o` and return packed bytes
See :class:`Packer` for options.
"""
return Packer(**kwargs).pack(o)
# alias for compatibility to simplejson/marshal/pickle.
load = unpack
loads = unpackb
dump = pack
dumps = packb
|
mit
|
popazerty/zde-e2
|
lib/python/Components/ActionMap.py
|
45
|
2560
|
from enigma import eActionMap
class ActionMap:
def __init__(self, contexts = [ ], actions = { }, prio=0):
self.actions = actions
self.contexts = contexts
self.prio = prio
self.p = eActionMap.getInstance()
self.bound = False
self.exec_active = False
self.enabled = True
def setEnabled(self, enabled):
self.enabled = enabled
self.checkBind()
def doBind(self):
if not self.bound:
for ctx in self.contexts:
self.p.bindAction(ctx, self.prio, self.action)
self.bound = True
def doUnbind(self):
if self.bound:
for ctx in self.contexts:
self.p.unbindAction(ctx, self.action)
self.bound = False
def checkBind(self):
if self.exec_active and self.enabled:
self.doBind()
else:
self.doUnbind()
def execBegin(self):
self.exec_active = True
self.checkBind()
def execEnd(self):
self.exec_active = False
self.checkBind()
def action(self, context, action):
print " ".join(("action -> ", context, action))
if self.actions.has_key(action):
res = self.actions[action]()
if res is not None:
return res
return 1
else:
print "unknown action %s/%s! typo in keymap?" % (context, action)
return 0
def destroy(self):
pass
class NumberActionMap(ActionMap):
def action(self, contexts, action):
numbers = ("0", "1", "2", "3", "4", "5", "6", "7", "8", "9")
if (action in numbers and self.actions.has_key(action)):
res = self.actions[action](int(action))
if res is not None:
return res
return 1
else:
return ActionMap.action(self, contexts, action)
class HelpableActionMap(ActionMap):
"""An Actionmap which automatically puts the actions into the helpList.
Note that you can only use ONE context here!"""
# sorry for this complicated code.
# it's not more than converting a "documented" actionmap
# (where the values are possibly (function, help)-tuples)
# into a "classic" actionmap, where values are just functions.
# the classic actionmap is then passed to the ActionMap constructor,
# the collected helpstrings (with correct context, action) is
# added to the screen's "helpList", which will be picked up by
# the "HelpableScreen".
def __init__(self, parent, context, actions = { }, prio=0):
alist = [ ]
adict = { }
for (action, funchelp) in actions.iteritems():
# check if this is a tuple
if isinstance(funchelp, tuple):
alist.append((action, funchelp[1]))
adict[action] = funchelp[0]
else:
adict[action] = funchelp
ActionMap.__init__(self, [context], adict, prio)
parent.helpList.append((self, context, alist))
|
gpl-2.0
|
alivecor/tensorflow
|
tensorflow/contrib/data/python/framework/function.py
|
10
|
10870
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""An experimental fork of the Python TensorFlow-function library.
NOTE: functions are currently experimental and subject to change!
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import function
from tensorflow.python.framework import graph_to_function_def
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.util import tf_inspect
# NOTE(mrry): This is an experimental extension of a core class that wasn't
# designed to be extended, so we disable protected access checks for the
# whole file.
# pylint: disable=protected-access
class _ExperimentalFuncGraph(function._FuncGraph):
"""A helper for construction a function (supporting capture-by-value).
_ExperimentalFuncGraph overrides ops.Graph's create_op() so that we can keep
track of every inputs into every op created inside the function. If
any input is from other graphs, we keep track of it in self.capture
and substitute the input with a place holder.
Each captured input's corresponding place holder is converted into a
function argument and the caller passes in the captured tensor.
"""
def __init__(self, capture_by_value, *args, **kwargs):
super(_ExperimentalFuncGraph, self).__init__(*args, **kwargs)
self._capture_by_value = capture_by_value
self._building_function = True
self._outer_graph = ops.get_default_graph()
self._vscope = vs.get_variable_scope()
self._old_custom_getter = self._vscope.custom_getter
self._captured = {}
self.extra_inputs = []
self.extra_args = []
self.extra_vars = []
def create_op(self, op_type, inputs, data_types, **kwargs):
for i, x in enumerate(inputs):
if x.graph is not self:
# Referring to a tensor from other graph.
if x in self._captured:
# Captured already.
inputs[i] = self._captured[x]
elif self._capture_by_value:
inputs[i] = self._add_tensor_and_parents(x)
else:
# Substitute with a placeholder.
self.extra_inputs.append(x)
ph = array_ops.placeholder(x.dtype, shape=x.get_shape())
# pylint: disable=protected-access
ph._handle_data = x._handle_data
# pylint: enable=protected-access
inputs[i] = ph
self._captured[x] = ph
self.extra_args.append(ph)
return super(_ExperimentalFuncGraph, self).create_op(op_type, inputs,
data_types, **kwargs)
def _add_tensor_and_parents(self, tensor):
op = self._add_op_and_parents(tensor.op)
return op.outputs[tensor.value_index]
def _add_op_and_parents(self, op):
op_def = graph_to_function_def._get_op_def(op)
if op_def.is_stateful:
raise ValueError("Cannot capture a stateful node (name:%s, type:%s) "
"by value." % (op.name, op.type))
elif op.type in ("Placeholder", "PlaceholderV2"):
raise ValueError("Cannot capture a placeholder (name:%s, type:%s) "
"by value." % (op.name, op.type))
captured_inputs = [self._add_tensor_and_parents(x) for x in op.inputs]
captured_op = self.create_op(op.type, captured_inputs,
[o.dtype for o in op.outputs],
name=op.name, attrs=op.node_def.attr,
op_def=op_def)
for t, captured_t in zip(op.outputs, captured_op.outputs):
self._captured[t] = captured_t
return captured_op
class _ExperimentalDefinedFunction(function._DefinedFunction):
"""Overrides _DefinedFunction with support for capture-by-value."""
def __init__(self,
func,
argnames,
input_types,
func_name=None,
grad_func=None,
python_grad_func=None,
out_names=None,
shape_func=None,
capture_by_value=False,
**kwargs):
"""Creates an _ExperimentalDefinedFunction.
Args:
func: A python callable which constructs a tf function body.
argnames: A list of strings for function argument names.
input_types: The function's argument types. Can be a tuple, list of
tf data types.
func_name: The function name. Defaults to None, in which derives from
'func'.
grad_func: This function's gradient function, if not None. Defaults
to None.
python_grad_func: A python callable implementing the gradient of
the function python-side.
out_names: An optional list of strings for the function return value
names.
shape_func: An optional function mapping an op to a list of static
output shapes.
capture_by_value: Boolean (defaults to False). If True, captured values
will be copied into the function body.
**kwargs: The keyword arguments. **kwargs is passed to every call
site of this function.
Raises:
ValueError: The function definition is invalid.
"""
super(_ExperimentalDefinedFunction, self).__init__(
func, argnames, input_types, func_name, grad_func, python_grad_func,
out_names, shape_func, **kwargs)
self._capture_by_value = capture_by_value
def _create_definition_if_needed(self):
"""Creates the function definition if it's not created yet."""
if self._definition is not None:
return
# Create the func_def object.
temp_graph = _ExperimentalFuncGraph(capture_by_value=self._capture_by_value)
with temp_graph.as_default():
# List of placeholders for the function_def.
inputs = []
for (argname, argtype) in self._args:
argholder = array_ops.placeholder(argtype, name=argname)
inputs.append(argholder)
# Call func and gather the output tensors.
with vs.variable_scope("", custom_getter=temp_graph.getvar):
outputs = self._func(*inputs)
# If func only returned one value, make it a tuple.
if not isinstance(outputs, (list, tuple)):
outputs = (outputs,)
if any([_ is None for _ in outputs]):
raise ValueError("Function can not return None.")
# Ensures each output is a Tensor.
outputs = [ops.convert_to_tensor(_) for _ in outputs]
self._extra_inputs = temp_graph.extra_inputs
inputs.extend(temp_graph.extra_args)
self._sub_functions = temp_graph._functions
# Build the FunctionDef
self._definition = graph_to_function_def.graph_to_function_def(
temp_graph, temp_graph.get_operations(), inputs, outputs,
out_names=self._out_names)
# Extra kwargs are treated as attrs on the function def.
sig_pre_func_name = self._func_name or function._get_func_name(self._func)
kwargs_attr = function._parse_kwargs_as_attrs(
sig_pre_func_name, **self._extra_kwargs)
for k in kwargs_attr:
self._definition.attr[k].CopyFrom(kwargs_attr[k])
# Hash the definition and its dependencies.
self._hash_str = self._create_hash_str(
self._definition.signature.input_arg,
self._definition.signature.output_arg,
self._definition.node_def)
# Finally, we decide the function name to use. If not specified,
# make up something which is almost certainly unique (but deterministic).
if not self._func_name:
self._func_name = "_".join([function._get_func_name(self._func),
self._hash_str])
self._definition.signature.name = self._func_name
if self._func.__doc__:
self._definition.signature.description = self._func.__doc__
class Defun(function.Defun):
"""Experimental version of Defun supporting capture-by-value."""
def __init__(self, *input_types, **kwargs):
"""Create an experimental `Defun` decorator.
Args:
*input_types: A list of `tf.DType`
**kwargs: Optional keyword arguments (see `function.Defun`) plus:
capture_by_value - Boolean (defaults to False). If True, captured values
will be copied into the function body.
"""
super(Defun, self).__init__(*input_types, **kwargs)
def __call__(self, func):
# Various sanity checks on the callable func.
if not callable(func):
raise ValueError("func %s must be callable" % func)
# Func should not use kwargs and defaults.
argspec = tf_inspect.getargspec(func)
if argspec.keywords or argspec.defaults:
raise ValueError("Functions with argument defaults or keyword "
"arguments are not supported.")
# Computes how many arguments 'func' has.
min_args = len(argspec.args)
max_args = min_args
if argspec.varargs:
max_args = 1000000
argnames = argspec.args
if tf_inspect.ismethod(func):
# 1st argument is the "class" type.
min_args -= 1
argnames = argnames[1:]
if self._input_types:
# If Defun is given a list of types for the inputs, the number
# of input types should be compatible with 'func'.
num = len(self._input_types)
if num < min_args or num > max_args:
raise ValueError(
"The function has fewer arguments than the number of specified "
"input types.")
return _ExperimentalDefinedFunction(
func, argnames, self._input_types, self._func_name, self._grad_func,
self._python_grad_func, out_names=self._out_names,
**self._extra_kwargs)
# 'func' expects no arguments and input types is an empty list.
if min_args == 0 and max_args == 0:
return _ExperimentalDefinedFunction(
func, [], [], self._func_name, self._grad_func,
self._python_grad_func, out_names=self._out_names,
**self._extra_kwargs)
# Input types are unknown. It's an overloaded function and hence
# its definition needs to be deferred until it's called.
return function._OverloadedFunction(
func, argnames, self._func_name, self._grad_func,
self._python_grad_func, out_names=self._out_names, **self._extra_kwargs)
|
apache-2.0
|
sanandrea/ofnic_R3.3
|
src/nox/coreapps/coretests/test_packet.py
|
10
|
2714
|
from nox.lib.core import *
from nox.coreapps.testharness.testdefs import *
from nox.lib.packet.t.dhcp_parse_test import test_fullDHCPPacket
from nox.lib.packet.t.dns_parse_test import test_dns_1
from nox.lib.packet.t.dns_parse_test import test_dns_2
from nox.lib.packet.t.eap_parse_test import testEAPOL
from nox.lib.packet.t.ethernet_parse_test import testEthernet
from nox.lib.packet.t.ethernet_parse_test import testEthernetConstruct
from nox.lib.packet.t.icmp_parse_test import testICMPEcho
from nox.lib.packet.t.icmp_parse_test import testICMPEchoConstruct
from nox.lib.packet.t.icmp_parse_test import testICMPUnreach
from nox.lib.packet.t.icmp_parse_test import testICMPUnreachConstruct
from nox.lib.packet.t.ipv4_parse_test import testIPv4Edge
from nox.lib.packet.t.ipv4_parse_test import testIPv4Construct
from nox.lib.packet.t.ipv4_parse_test import testIPv4
from nox.lib.packet.t.lldp_parse_test import testLLDP
from nox.lib.packet.t.lldp_parse_test import testLLDPConstruct
from nox.lib.packet.t.lldp_parse_test import testLLDPConstruct2
from nox.lib.packet.t.tcp_parse_test import testTCP
from nox.lib.packet.t.tcp_parse_test import fullTCPPacket
from nox.lib.packet.t.udp_parse_test import fullUDPPacket
from nox.lib.packet.t.vlan_parse_test import testVlan
from nox.lib.packet.t.vlan_parse_test import testVlanAdd
from nox.lib.packet.t.vlan_parse_test import testVlanARP
from nox.lib.packet.t.vlan_parse_test import testVlanRemove
import logging
import os, sys
lg = logging.getLogger('test_packet')
class test_packet(Component):
def __init__(self, ctxt):
Component.__init__(self, ctxt)
def configure(self, configuration):
pass
def getInterface(self):
return str(test_packet)
def bootstrap_complete_callback(self, *args):
test_fullDHCPPacket()
test_dns_1()
test_dns_2()
testEAPOL ()
testEthernet()
testEthernetConstruct()
testICMPUnreachConstruct()
testICMPUnreach()
testICMPEchoConstruct()
testICMPEcho()
testIPv4()
testIPv4Construct()
testIPv4Edge()
testLLDP()
testLLDPConstruct()
testLLDPConstruct2()
testTCP()
fullTCPPacket()
fullUDPPacket()
testVlan()
testVlanARP()
testVlanAdd()
testVlanRemove()
sys.stdout.flush() # XXX handle in component::exit
os._exit(0) # XXX push to component
def install(self):
self.register_for_bootstrap_complete(self.bootstrap_complete_callback)
def getFactory():
class Factory:
def instance(self, ctxt):
return test_packet(ctxt)
return Factory()
|
gpl-3.0
|
gtara/or-tools
|
examples/python/magic_square_and_cards.py
|
32
|
2950
|
# Copyright 2010 Hakan Kjellerstrand [email protected]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Magic squares and cards problem in Google CP Solver.
Martin Gardner (July 1971)
'''
Allowing duplicates values, what is the largest constant sum for an order-3
magic square that can be formed with nine cards from the deck.
'''
This model was created by Hakan Kjellerstrand ([email protected])
Also see my other Google CP Solver models:
http://www.hakank.org/google_or_tools/
"""
import string
import sys
from ortools.constraint_solver import pywrapcp
def main(n=3):
# Create the solver.
solver = pywrapcp.Solver("n-queens")
#
# data
#
# n = 3
#
# declare variables
#
x = {}
for i in range(n):
for j in range(n):
x[(i, j)] = solver.IntVar(1, 13, "x(%i,%i)" % (i, j))
x_flat = [x[(i, j)] for i in range(n) for j in range(n)]
s = solver.IntVar(1, 13 * 4, "s")
counts = [solver.IntVar(0, 4, "counts(%i)" % i) for i in range(14)]
#
# constraints
#
solver.Add(solver.Distribute(x_flat, range(14), counts))
# the standard magic square constraints (sans all_different)
[solver.Add(solver.Sum([x[(i, j)] for j in range(n)]) == s) for i in range(n)]
[solver.Add(solver.Sum([x[(i, j)] for i in range(n)]) == s) for j in range(n)]
solver.Add(solver.Sum([x[(i, i)] for i in range(n)]) == s) # diag 1
solver.Add(solver.Sum([x[(i, n - i - 1)] for i in range(n)]) == s) # diag 2
# redundant constraint
solver.Add(solver.Sum(counts) == n * n)
# objective
objective = solver.Maximize(s, 1)
#
# solution and search
#
solution = solver.Assignment()
solution.Add(x_flat)
solution.Add(s)
solution.Add(counts)
# db: DecisionBuilder
db = solver.Phase(x_flat,
solver.CHOOSE_FIRST_UNBOUND,
solver.ASSIGN_MAX_VALUE)
solver.NewSearch(db, [objective])
num_solutions = 0
while solver.NextSolution():
print "s:", s.Value()
print "counts:", [counts[i].Value() for i in range(14)]
for i in range(n):
for j in range(n):
print x[(i, j)].Value(),
print
print
num_solutions += 1
solver.EndSearch()
print
print "num_solutions:", num_solutions
print "failures:", solver.Failures()
print "branches:", solver.Branches()
print "WallTime:", solver.WallTime()
n = 3
if __name__ == "__main__":
if len(sys.argv) > 1:
n = string.atoi(sys.argv[1])
main(n)
|
apache-2.0
|
undoware/neutron-drive
|
google_appengine/google/appengine/ext/ndb/prospective_search.py
|
20
|
6294
|
"""Prospective Search for NDB.
This reimplements all of the standard APIs with the following changes:
- A document_class argument must be an NDB Model class.
- A document must be an NDB Model instance.
- get_document() always returns an NDB Model instance.
The exceptions and public constants exported by the standard module
are re-exported here.
"""
# TODO: Ideally prospective search would support NDB natively, or
# support protobufs natively (in addition to ENTITY and MODEL).
# TODO: Should we try to support async calls as well? That can't be
# done without rewriting the standard prospective_search API module.
import base64
from .google_imports import datastore
from .google_imports import datastore_types
from .google_imports import prospective_search
from .google_imports import prospective_search_pb
from .google_imports import entity_pb
from . import model
from . import tasklets
# Re-export constants and exceptions from prospective_search.
DEFAULT_RESULT_BATCH_SIZE = prospective_search.DEFAULT_RESULT_BATCH_SIZE
DEFAULT_LEASE_DURATION_SEC = prospective_search.DEFAULT_LEASE_DURATION_SEC
DEFAULT_LIST_SUBSCRIPTIONS_MAX_RESULTS = \
prospective_search.DEFAULT_LIST_SUBSCRIPTIONS_MAX_RESULTS
DEFAULT_LIST_TOPICS_MAX_RESULTS = \
prospective_search.DEFAULT_LIST_TOPICS_MAX_RESULTS
Error = prospective_search.Error
DocumentTypeError = prospective_search.DocumentTypeError
QuerySyntaxError = prospective_search.QuerySyntaxError
SchemaError = prospective_search.SchemaError
SubscriptionDoesNotExist = prospective_search.SubscriptionDoesNotExist
TopicNotSpecified = prospective_search.TopicNotSpecified
SubscriptionState = prospective_search.SubscriptionState
subscription_state_name = prospective_search.subscription_state_name
__all__ = ['get_document',
'get_subscription',
'list_subscriptions',
'list_topics',
'match',
'unsubscribe',
'subscribe',
'subscription_state_name',
'DEFAULT_RESULT_BATCH_SIZE',
'DEFAULT_LEASE_DURATION_SEC',
'DEFAULT_LIST_SUBSCRIPTIONS_MAX_RESULTS',
'DEFAULT_LIST_TOPICS_MAX_RESULTS',
'DocumentTypeError',
'Error',
'QuerySyntaxError',
'SchemaError',
'SubscriptionDoesNotExist',
'SubscriptionState',
'TopicNotSpecified']
_doc_class = prospective_search_pb.MatchRequest # For testing get_document().
_MODEL_TYPE_TO_PYTHON_TYPE = {
model.StringProperty: str,
model.IntegerProperty: int,
model.BooleanProperty: bool,
model.FloatProperty: float,
model.TextProperty: str,
}
def _add_schema_entry(prop_class, name, schema):
"""Add single entry to SchemaEntries by invoking add_entry."""
python_type = _MODEL_TYPE_TO_PYTHON_TYPE.get(prop_class, None)
if not python_type:
return
if python_type not in schema:
schema[python_type] = [name]
else:
schema[python_type].append(name)
def _model_to_entity_schema(document_class):
"""Produce schema from NDB Model class."""
schema = {}
for name, prop in document_class._properties.iteritems():
_add_schema_entry(prop.__class__, name, schema)
return schema
def _get_document_topic(document_class, topic):
assert issubclass(document_class, model.Model)
if topic:
return topic
return document_class._get_kind()
def subscribe(document_class,
query,
sub_id,
schema=None,
topic=None,
lease_duration_sec=DEFAULT_LEASE_DURATION_SEC):
"""Subscribe a query."""
assert schema is None
topic = _get_document_topic(document_class, topic)
schema = _model_to_entity_schema(document_class)
return prospective_search.subscribe(
datastore.Entity,
query,
sub_id,
schema=schema,
topic=topic,
lease_duration_sec=lease_duration_sec)
def unsubscribe(document_class, sub_id, topic=None):
topic = _get_document_topic(document_class, topic)
prospective_search.unsubscribe(datastore.Entity, sub_id, topic=topic)
def get_subscription(document_class, sub_id, topic=None):
"""Get subscription information."""
topic = _get_document_topic(document_class, topic)
return prospective_search.get_subscription(datastore.Entity, sub_id,
topic=topic)
def list_subscriptions(document_class,
sub_id_start='',
topic=None,
max_results=DEFAULT_LIST_SUBSCRIPTIONS_MAX_RESULTS,
expires_before=None):
"""List subscriptions on a topic."""
topic = _get_document_topic(document_class, topic)
return prospective_search.list_subscriptions(
datastore.Entity,
sub_id_start=sub_id_start,
topic=topic,
max_results=max_results,
expires_before=expires_before)
list_topics = prospective_search.list_topics
def match(document,
topic=None,
result_key=None,
result_relative_url='/_ah/prospective_search',
result_task_queue='default',
result_batch_size=DEFAULT_RESULT_BATCH_SIZE,
result_return_document=True):
"""Match document with all subscribed queries on specified topic."""
# Convert document to datastore.Entity.
topic = _get_document_topic(document.__class__, topic)
pb = document._to_pb()
entity = datastore.Entity('temp-kind').FromPb(pb)
return prospective_search.match(
entity,
topic=topic,
result_key=result_key,
result_relative_url=result_relative_url,
result_task_queue=result_task_queue,
result_batch_size=result_batch_size,
result_return_document=result_return_document)
def get_document(request):
"""Decodes document from prospective_search result POST request.
Args:
request: received POST request
Returns:
document: original NDB Model document from match call.
Raises:
DocumentTypeError: if document class is not recognized.
"""
doc_class = request.get('python_document_class')
if not doc_class:
return None
entity = entity_pb.EntityProto()
entity.ParseFromString(base64.urlsafe_b64decode(
request.get('document').encode('utf-8')))
doc_class = int(doc_class)
ctx = tasklets.get_context()
adapter = ctx._conn.adapter
return adapter.pb_to_entity(entity)
|
bsd-3-clause
|
rmboggs/django
|
django/utils/tree.py
|
372
|
4883
|
"""
A class for storing a tree graph. Primarily used for filter constructs in the
ORM.
"""
import copy
class Node(object):
"""
A single internal node in the tree graph. A Node should be viewed as a
connection (the root) with the children being either leaf nodes or other
Node instances.
"""
# Standard connector type. Clients usually won't use this at all and
# subclasses will usually override the value.
default = 'DEFAULT'
def __init__(self, children=None, connector=None, negated=False):
"""
Constructs a new Node. If no connector is given, the default will be
used.
"""
self.children = children[:] if children else []
self.connector = connector or self.default
self.negated = negated
# We need this because of django.db.models.query_utils.Q. Q. __init__() is
# problematic, but it is a natural Node subclass in all other respects.
@classmethod
def _new_instance(cls, children=None, connector=None, negated=False):
"""
This is called to create a new instance of this class when we need new
Nodes (or subclasses) in the internal code in this class. Normally, it
just shadows __init__(). However, subclasses with an __init__ signature
that is not an extension of Node.__init__ might need to implement this
method to allow a Node to create a new instance of them (if they have
any extra setting up to do).
"""
obj = Node(children, connector, negated)
obj.__class__ = cls
return obj
def __str__(self):
if self.negated:
return '(NOT (%s: %s))' % (self.connector, ', '.join(str(c) for c
in self.children))
return '(%s: %s)' % (self.connector, ', '.join(str(c) for c in
self.children))
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def __deepcopy__(self, memodict):
"""
Utility method used by copy.deepcopy().
"""
obj = Node(connector=self.connector, negated=self.negated)
obj.__class__ = self.__class__
obj.children = copy.deepcopy(self.children, memodict)
return obj
def __len__(self):
"""
The size of a node if the number of children it has.
"""
return len(self.children)
def __bool__(self):
"""
For truth value testing.
"""
return bool(self.children)
def __nonzero__(self): # Python 2 compatibility
return type(self).__bool__(self)
def __contains__(self, other):
"""
Returns True is 'other' is a direct child of this instance.
"""
return other in self.children
def add(self, data, conn_type, squash=True):
"""
Combines this tree and the data represented by data using the
connector conn_type. The combine is done by squashing the node other
away if possible.
This tree (self) will never be pushed to a child node of the
combined tree, nor will the connector or negated properties change.
The function returns a node which can be used in place of data
regardless if the node other got squashed or not.
If `squash` is False the data is prepared and added as a child to
this tree without further logic.
"""
if data in self.children:
return data
if not squash:
self.children.append(data)
return data
if self.connector == conn_type:
# We can reuse self.children to append or squash the node other.
if (isinstance(data, Node) and not data.negated
and (data.connector == conn_type or len(data) == 1)):
# We can squash the other node's children directly into this
# node. We are just doing (AB)(CD) == (ABCD) here, with the
# addition that if the length of the other node is 1 the
# connector doesn't matter. However, for the len(self) == 1
# case we don't want to do the squashing, as it would alter
# self.connector.
self.children.extend(data.children)
return self
else:
# We could use perhaps additional logic here to see if some
# children could be used for pushdown here.
self.children.append(data)
return data
else:
obj = self._new_instance(self.children, self.connector,
self.negated)
self.connector = conn_type
self.children = [obj, data]
return data
def negate(self):
"""
Negate the sense of the root connector.
"""
self.negated = not self.negated
|
bsd-3-clause
|
jaraco/aspen
|
aspen/http/__init__.py
|
1
|
1364
|
"""
aspen.http
++++++++++
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
status_strings = {
100 : "Continue",
101 : "Switching Protocols",
200 : "OK",
201 : "Created",
202 : "Accepted",
203 : "Non-Authoritative Information",
204 : "No Content",
205 : "Reset Content",
206 : "Partial Content",
300 : "Multiple Choices",
301 : "Moved Permanently",
302 : "Found",
303 : "See Other",
304 : "Not Modified",
305 : "Use Proxy",
307 : "Temporary Redirect",
400 : "Bad Request",
401 : "Unauthorized",
402 : "Payment Required",
403 : "Forbidden",
404 : "Not Found",
405 : "Method Not Allowed",
406 : "Not Acceptable",
407 : "Proxy Authentication Required",
408 : "Request Time-out",
409 : "Conflict",
410 : "Gone",
411 : "Length Required",
412 : "Precondition Failed",
413 : "Request Entity Too Large",
414 : "Request-URI Too Large",
415 : "Unsupported Media Type",
416 : "Requested range not satisfiable",
417 : "Expectation Failed",
500 : "Internal Server Error",
501 : "Not Implemented",
502 : "Bad Gateway",
503 : "Service Unavailable",
504 : "Gateway Time-out",
505 : "HTTP Version not supported",
}
|
mit
|
doot/CouchPotatoServer
|
couchpotato/core/media/__init__.py
|
11
|
3572
|
import os
import traceback
from couchpotato import CPLog, md5
from couchpotato.core.event import addEvent, fireEvent, fireEventAsync
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.variable import getExt
from couchpotato.core.plugins.base import Plugin
import six
log = CPLog(__name__)
class MediaBase(Plugin):
_type = None
def initType(self):
addEvent('media.types', self.getType)
def getType(self):
return self._type
def createOnComplete(self, media_id):
def onComplete():
try:
media = fireEvent('media.get', media_id, single = True)
if media:
event_name = '%s.searcher.single' % media.get('type')
fireEventAsync(event_name, media, on_complete = self.createNotifyFront(media_id), manual = True)
except:
log.error('Failed creating onComplete: %s', traceback.format_exc())
return onComplete
def createNotifyFront(self, media_id):
def notifyFront():
try:
media = fireEvent('media.get', media_id, single = True)
if media:
event_name = '%s.update' % media.get('type')
fireEvent('notify.frontend', type = event_name, data = media)
except:
log.error('Failed creating onComplete: %s', traceback.format_exc())
return notifyFront
def getDefaultTitle(self, info, ):
# Set default title
default_title = toUnicode(info.get('title'))
titles = info.get('titles', [])
counter = 0
def_title = None
for title in titles:
if (len(default_title) == 0 and counter == 0) or len(titles) == 1 or title.lower() == toUnicode(default_title.lower()) or (toUnicode(default_title) == six.u('') and toUnicode(titles[0]) == title):
def_title = toUnicode(title)
break
counter += 1
if not def_title:
def_title = toUnicode(titles[0])
return def_title or 'UNKNOWN'
def getPoster(self, media, image_urls):
if 'files' not in media:
media['files'] = {}
existing_files = media['files']
image_type = 'poster'
file_type = 'image_%s' % image_type
# Make existing unique
unique_files = list(set(existing_files.get(file_type, [])))
# Remove files that can't be found
for ef in unique_files:
if not os.path.isfile(ef):
unique_files.remove(ef)
# Replace new files list
existing_files[file_type] = unique_files
if len(existing_files) == 0:
del existing_files[file_type]
# Loop over type
for image in image_urls.get(image_type, []):
if not isinstance(image, (str, unicode)):
continue
# Check if it has top image
filename = '%s.%s' % (md5(image), getExt(image))
existing = existing_files.get(file_type, [])
has_latest = False
for x in existing:
if filename in x:
has_latest = True
if not has_latest or file_type not in existing_files or len(existing_files.get(file_type, [])) == 0:
file_path = fireEvent('file.download', url = image, single = True)
if file_path:
existing_files[file_type] = [toUnicode(file_path)]
break
else:
break
|
gpl-3.0
|
tjcsl/ion
|
intranet/apps/groups/views.py
|
1
|
1335
|
import logging
from django.contrib.auth import get_user_model
from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from ..auth.decorators import admin_required, deny_restricted
from .forms import GroupForm
from .models import Group
logger = logging.getLogger(__name__)
# this is the one that students see
# has add/remove from groups form
# students can only add themselves to non-admin groups unless they are already an admin
@login_required
@deny_restricted
def groups_view(request):
group_admin = request.user.has_admin_permission("groups")
if group_admin and "user" in request.GET:
user = get_user_model().objects.get(id=request.GET.get("user"))
else:
user = request.user
return render(request, "groups/groups.html", {"user": user, "all_groups": Group.objects.all(), "group_admin": group_admin})
# Create individual views for each form action
@admin_required("groups")
@deny_restricted
def add_group_view(request):
success = False
if request.method == "POST":
form = GroupForm(request.POST)
if form.is_valid():
form.save()
success = True
else:
form = GroupForm()
context = {"form": form, "action": "add", "success": success}
return render(request, "groups/addmodify.html", context)
|
gpl-2.0
|
Lab603/PicEncyclopedias
|
jni-build/jni/include/tensorflow/python/kernel_tests/sparse_reorder_op_test.py
|
8
|
3294
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SparseReorder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class SparseReorderTest(tf.test.TestCase):
def _SparseTensorPlaceholder(self):
return tf.SparseTensor(
tf.placeholder(tf.int64),
tf.placeholder(tf.float64),
tf.placeholder(tf.int64))
def _SparseTensorValue_5x6(self, permutation):
ind = np.array([
[0, 0],
[1, 0], [1, 3], [1, 4],
[3, 2], [3, 3]]).astype(np.int64)
val = np.array([0, 10, 13, 14, 32, 33]).astype(np.float64)
ind = ind[permutation]
val = val[permutation]
shape = np.array([5, 6]).astype(np.int64)
return tf.SparseTensorValue(ind, val, shape)
def testAlreadyInOrder(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6(np.arange(6))
sp_output = tf.sparse_reorder(sp_input)
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices, input_val.indices)
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.shape, input_val.shape)
def testOutOfOrder(self):
expected_output_val = self._SparseTensorValue_5x6(np.arange(6))
with self.test_session(use_gpu=False) as sess:
for _ in range(5): # To test various random permutations
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6(np.random.permutation(6))
sp_output = tf.sparse_reorder(sp_input)
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices, expected_output_val.indices)
self.assertAllEqual(output_val.values, expected_output_val.values)
self.assertAllEqual(output_val.shape, expected_output_val.shape)
def testGradients(self):
with self.test_session(use_gpu=False):
for _ in range(5): # To test various random permutations
input_val = self._SparseTensorValue_5x6(np.random.permutation(6))
sp_input = tf.SparseTensor(
input_val.indices, input_val.values, input_val.shape)
sp_output = tf.sparse_reorder(sp_input)
err = tf.test.compute_gradient_error(
sp_input.values,
input_val.values.shape,
sp_output.values,
input_val.values.shape,
x_init_value=input_val.values)
self.assertLess(err, 1e-11)
if __name__ == "__main__":
tf.test.main()
|
mit
|
cntnboys/410Lab6
|
v1/lib/python2.7/site-packages/django/contrib/admin/__init__.py
|
72
|
1175
|
# ACTION_CHECKBOX_NAME is unused, but should stay since its import from here
# has been referenced in documentation.
from django.contrib.admin.decorators import register
from django.contrib.admin.helpers import ACTION_CHECKBOX_NAME
from django.contrib.admin.options import (HORIZONTAL, VERTICAL,
ModelAdmin, StackedInline, TabularInline)
from django.contrib.admin.filters import (ListFilter, SimpleListFilter,
FieldListFilter, BooleanFieldListFilter, RelatedFieldListFilter,
ChoicesFieldListFilter, DateFieldListFilter, AllValuesFieldListFilter)
from django.contrib.admin.sites import AdminSite, site
from django.utils.module_loading import autodiscover_modules
__all__ = [
"register", "ACTION_CHECKBOX_NAME", "ModelAdmin", "HORIZONTAL", "VERTICAL",
"StackedInline", "TabularInline", "AdminSite", "site", "ListFilter",
"SimpleListFilter", "FieldListFilter", "BooleanFieldListFilter",
"RelatedFieldListFilter", "ChoicesFieldListFilter", "DateFieldListFilter",
"AllValuesFieldListFilter", "autodiscover",
]
def autodiscover():
autodiscover_modules('admin', register_to=site)
default_app_config = 'django.contrib.admin.apps.AdminConfig'
|
apache-2.0
|
lixt/lily2-gem5
|
configs/common/Simulation.py
|
2
|
19873
|
# Copyright (c) 2012-2013 ARM Limited
# All rights reserved
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2006-2008 The Regents of The University of Michigan
# Copyright (c) 2010 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Lisa Hsu
import sys
from os import getcwd
from os.path import join as joinpath
import CpuConfig
import MemConfig
import m5
from m5.defines import buildEnv
from m5.objects import *
from m5.util import *
addToPath('../common')
def getCPUClass(cpu_type):
"""Returns the required cpu class and the mode of operation."""
cls = CpuConfig.get(cpu_type)
return cls, cls.memory_mode()
def setCPUClass(options):
"""Returns two cpu classes and the initial mode of operation.
Restoring from a checkpoint or fast forwarding through a benchmark
can be done using one type of cpu, and then the actual
simulation can be carried out using another type. This function
returns these two types of cpus and the initial mode of operation
depending on the options provided.
"""
TmpClass, test_mem_mode = getCPUClass(options.cpu_type)
CPUClass = None
if TmpClass.require_caches() and \
not options.caches and not options.ruby:
fatal("%s must be used with caches" % options.cpu_type)
if options.checkpoint_restore != None:
if options.restore_with_cpu != options.cpu_type:
CPUClass = TmpClass
TmpClass, test_mem_mode = getCPUClass(options.restore_with_cpu)
elif options.fast_forward:
CPUClass = TmpClass
TmpClass = AtomicSimpleCPU
test_mem_mode = 'atomic'
return (TmpClass, test_mem_mode, CPUClass)
def setMemClass(options):
"""Returns a memory controller class."""
return MemConfig.get(options.mem_type)
def setWorkCountOptions(system, options):
if options.work_item_id != None:
system.work_item_id = options.work_item_id
if options.work_begin_cpu_id_exit != None:
system.work_begin_cpu_id_exit = options.work_begin_cpu_id_exit
if options.work_end_exit_count != None:
system.work_end_exit_count = options.work_end_exit_count
if options.work_end_checkpoint_count != None:
system.work_end_ckpt_count = options.work_end_checkpoint_count
if options.work_begin_exit_count != None:
system.work_begin_exit_count = options.work_begin_exit_count
if options.work_begin_checkpoint_count != None:
system.work_begin_ckpt_count = options.work_begin_checkpoint_count
if options.work_cpus_checkpoint_count != None:
system.work_cpus_ckpt_count = options.work_cpus_checkpoint_count
def findCptDir(options, maxtick, cptdir, testsys):
"""Figures out the directory from which the checkpointed state is read.
There are two different ways in which the directories holding checkpoints
can be named --
1. cpt.<benchmark name>.<instruction count when the checkpoint was taken>
2. cpt.<some number, usually the tick value when the checkpoint was taken>
This function parses through the options to figure out which one of the
above should be used for selecting the checkpoint, and then figures out
the appropriate directory.
It also sets the value of the maximum tick value till which the simulation
will run.
"""
from os.path import isdir, exists
from os import listdir
import re
if not isdir(cptdir):
fatal("checkpoint dir %s does not exist!", cptdir)
if options.at_instruction or options.simpoint:
inst = options.checkpoint_restore
if options.simpoint:
# assume workload 0 has the simpoint
if testsys.cpu[0].workload[0].simpoint == 0:
fatal('Unable to find simpoint')
inst += int(testsys.cpu[0].workload[0].simpoint)
checkpoint_dir = joinpath(cptdir, "cpt.%s.%s" % (options.bench, inst))
if not exists(checkpoint_dir):
fatal("Unable to find checkpoint directory %s", checkpoint_dir)
else:
dirs = listdir(cptdir)
expr = re.compile('cpt\.([0-9]*)')
cpts = []
for dir in dirs:
match = expr.match(dir)
if match:
cpts.append(match.group(1))
cpts.sort(lambda a,b: cmp(long(a), long(b)))
cpt_num = options.checkpoint_restore
if cpt_num > len(cpts):
fatal('Checkpoint %d not found', cpt_num)
maxtick = maxtick - int(cpts[cpt_num - 1])
checkpoint_dir = joinpath(cptdir, "cpt.%s" % cpts[cpt_num - 1])
return maxtick, checkpoint_dir
def scriptCheckpoints(options, maxtick, cptdir):
if options.at_instruction or options.simpoint:
checkpoint_inst = int(options.take_checkpoints)
# maintain correct offset if we restored from some instruction
if options.checkpoint_restore != None:
checkpoint_inst += options.checkpoint_restore
print "Creating checkpoint at inst:%d" % (checkpoint_inst)
exit_event = m5.simulate()
exit_cause = exit_event.getCause()
print "exit cause = %s" % exit_cause
# skip checkpoint instructions should they exist
while exit_cause == "checkpoint":
exit_event = m5.simulate()
exit_cause = exit_event.getCause()
if exit_cause == "a thread reached the max instruction count":
m5.checkpoint(joinpath(cptdir, "cpt.%s.%d" % \
(options.bench, checkpoint_inst)))
print "Checkpoint written."
else:
when, period = options.take_checkpoints.split(",", 1)
when = int(when)
period = int(period)
num_checkpoints = 0
exit_event = m5.simulate(when - m5.curTick())
exit_cause = exit_event.getCause()
while exit_cause == "checkpoint":
exit_event = m5.simulate(when - m5.curTick())
exit_cause = exit_event.getCause()
if exit_cause == "simulate() limit reached":
m5.checkpoint(joinpath(cptdir, "cpt.%d"))
num_checkpoints += 1
sim_ticks = when
max_checkpoints = options.max_checkpoints
while num_checkpoints < max_checkpoints and \
exit_cause == "simulate() limit reached":
if (sim_ticks + period) > maxtick:
exit_event = m5.simulate(maxtick - sim_ticks)
exit_cause = exit_event.getCause()
break
else:
exit_event = m5.simulate(period)
exit_cause = exit_event.getCause()
sim_ticks += period
while exit_event.getCause() == "checkpoint":
exit_event = m5.simulate(sim_ticks - m5.curTick())
if exit_event.getCause() == "simulate() limit reached":
m5.checkpoint(joinpath(cptdir, "cpt.%d"))
num_checkpoints += 1
return exit_event
def benchCheckpoints(options, maxtick, cptdir):
exit_event = m5.simulate(maxtick - m5.curTick())
exit_cause = exit_event.getCause()
num_checkpoints = 0
max_checkpoints = options.max_checkpoints
while exit_cause == "checkpoint":
m5.checkpoint(joinpath(cptdir, "cpt.%d"))
num_checkpoints += 1
if num_checkpoints == max_checkpoints:
exit_cause = "maximum %d checkpoints dropped" % max_checkpoints
break
exit_event = m5.simulate(maxtick - m5.curTick())
exit_cause = exit_event.getCause()
return exit_event
def repeatSwitch(testsys, repeat_switch_cpu_list, maxtick, switch_freq):
print "starting switch loop"
while True:
exit_event = m5.simulate(switch_freq)
exit_cause = exit_event.getCause()
if exit_cause != "simulate() limit reached":
return exit_event
m5.switchCpus(testsys, repeat_switch_cpu_list)
tmp_cpu_list = []
for old_cpu, new_cpu in repeat_switch_cpu_list:
tmp_cpu_list.append((new_cpu, old_cpu))
repeat_switch_cpu_list = tmp_cpu_list
if (maxtick - m5.curTick()) <= switch_freq:
exit_event = m5.simulate(maxtick - m5.curTick())
return exit_event
def run(options, root, testsys, cpu_class):
if options.maxtick:
maxtick = options.maxtick
elif options.maxtime:
simtime = m5.ticks.seconds(simtime)
print "simulating for: ", simtime
maxtick = simtime
else:
maxtick = m5.MaxTick
if options.checkpoint_dir:
cptdir = options.checkpoint_dir
elif m5.options.outdir:
cptdir = m5.options.outdir
else:
cptdir = getcwd()
if options.fast_forward and options.checkpoint_restore != None:
fatal("Can't specify both --fast-forward and --checkpoint-restore")
if options.standard_switch and not options.caches:
fatal("Must specify --caches when using --standard-switch")
if options.standard_switch and options.repeat_switch:
fatal("Can't specify both --standard-switch and --repeat-switch")
if options.repeat_switch and options.take_checkpoints:
fatal("Can't specify both --repeat-switch and --take-checkpoints")
np = options.num_cpus
switch_cpus = None
if options.prog_interval:
for i in xrange(np):
testsys.cpu[i].progress_interval = options.prog_interval
if options.maxinsts:
for i in xrange(np):
testsys.cpu[i].max_insts_any_thread = options.maxinsts
if cpu_class:
switch_cpus = [cpu_class(switched_out=True, cpu_id=(i))
for i in xrange(np)]
for i in xrange(np):
if options.fast_forward:
testsys.cpu[i].max_insts_any_thread = int(options.fast_forward)
switch_cpus[i].system = testsys
switch_cpus[i].workload = testsys.cpu[i].workload
switch_cpus[i].clk_domain = testsys.cpu[i].clk_domain
# simulation period
if options.maxinsts:
switch_cpus[i].max_insts_any_thread = options.maxinsts
# Add checker cpu if selected
if options.checker:
switch_cpus[i].addCheckerCpu()
testsys.switch_cpus = switch_cpus
switch_cpu_list = [(testsys.cpu[i], switch_cpus[i]) for i in xrange(np)]
if options.repeat_switch:
switch_class = getCPUClass(options.cpu_type)[0]
if switch_class.require_caches() and \
not options.caches:
print "%s: Must be used with caches" % str(switch_class)
sys.exit(1)
if not switch_class.support_take_over():
print "%s: CPU switching not supported" % str(switch_class)
sys.exit(1)
repeat_switch_cpus = [switch_class(switched_out=True, \
cpu_id=(i)) for i in xrange(np)]
for i in xrange(np):
repeat_switch_cpus[i].system = testsys
repeat_switch_cpus[i].workload = testsys.cpu[i].workload
repeat_switch_cpus[i].clk_domain = testsys.cpu[i].clk_domain
if options.maxinsts:
repeat_switch_cpus[i].max_insts_any_thread = options.maxinsts
if options.checker:
repeat_switch_cpus[i].addCheckerCpu()
testsys.repeat_switch_cpus = repeat_switch_cpus
if cpu_class:
repeat_switch_cpu_list = [(switch_cpus[i], repeat_switch_cpus[i])
for i in xrange(np)]
else:
repeat_switch_cpu_list = [(testsys.cpu[i], repeat_switch_cpus[i])
for i in xrange(np)]
if options.standard_switch:
switch_cpus = [TimingSimpleCPU(switched_out=True, cpu_id=(i))
for i in xrange(np)]
switch_cpus_1 = [DerivO3CPU(switched_out=True, cpu_id=(i))
for i in xrange(np)]
for i in xrange(np):
switch_cpus[i].system = testsys
switch_cpus_1[i].system = testsys
switch_cpus[i].workload = testsys.cpu[i].workload
switch_cpus_1[i].workload = testsys.cpu[i].workload
switch_cpus[i].clk_domain = testsys.cpu[i].clk_domain
switch_cpus_1[i].clk_domain = testsys.cpu[i].clk_domain
# if restoring, make atomic cpu simulate only a few instructions
if options.checkpoint_restore != None:
testsys.cpu[i].max_insts_any_thread = 1
# Fast forward to specified location if we are not restoring
elif options.fast_forward:
testsys.cpu[i].max_insts_any_thread = int(options.fast_forward)
# Fast forward to a simpoint (warning: time consuming)
elif options.simpoint:
if testsys.cpu[i].workload[0].simpoint == 0:
fatal('simpoint not found')
testsys.cpu[i].max_insts_any_thread = \
testsys.cpu[i].workload[0].simpoint
# No distance specified, just switch
else:
testsys.cpu[i].max_insts_any_thread = 1
# warmup period
if options.warmup_insts:
switch_cpus[i].max_insts_any_thread = options.warmup_insts
# simulation period
if options.maxinsts:
switch_cpus_1[i].max_insts_any_thread = options.maxinsts
# attach the checker cpu if selected
if options.checker:
switch_cpus[i].addCheckerCpu()
switch_cpus_1[i].addCheckerCpu()
testsys.switch_cpus = switch_cpus
testsys.switch_cpus_1 = switch_cpus_1
switch_cpu_list = [(testsys.cpu[i], switch_cpus[i]) for i in xrange(np)]
switch_cpu_list1 = [(switch_cpus[i], switch_cpus_1[i]) for i in xrange(np)]
# set the checkpoint in the cpu before m5.instantiate is called
if options.take_checkpoints != None and \
(options.simpoint or options.at_instruction):
offset = int(options.take_checkpoints)
# Set an instruction break point
if options.simpoint:
for i in xrange(np):
if testsys.cpu[i].workload[0].simpoint == 0:
fatal('no simpoint for testsys.cpu[%d].workload[0]', i)
checkpoint_inst = int(testsys.cpu[i].workload[0].simpoint) + offset
testsys.cpu[i].max_insts_any_thread = checkpoint_inst
# used for output below
options.take_checkpoints = checkpoint_inst
else:
options.take_checkpoints = offset
# Set all test cpus with the right number of instructions
# for the upcoming simulation
for i in xrange(np):
testsys.cpu[i].max_insts_any_thread = offset
checkpoint_dir = None
if options.checkpoint_restore != None:
maxtick, checkpoint_dir = findCptDir(options, maxtick, cptdir, testsys)
m5.instantiate(checkpoint_dir)
if options.standard_switch or cpu_class:
if options.standard_switch:
print "Switch at instruction count:%s" % \
str(testsys.cpu[0].max_insts_any_thread)
exit_event = m5.simulate()
elif cpu_class and options.fast_forward:
print "Switch at instruction count:%s" % \
str(testsys.cpu[0].max_insts_any_thread)
exit_event = m5.simulate()
else:
print "Switch at curTick count:%s" % str(10000)
exit_event = m5.simulate(10000)
print "Switched CPUS @ tick %s" % (m5.curTick())
m5.switchCpus(testsys, switch_cpu_list)
if options.standard_switch:
print "Switch at instruction count:%d" % \
(testsys.switch_cpus[0].max_insts_any_thread)
#warmup instruction count may have already been set
if options.warmup_insts:
exit_event = m5.simulate()
else:
exit_event = m5.simulate(options.standard_switch)
print "Switching CPUS @ tick %s" % (m5.curTick())
print "Simulation ends instruction count:%d" % \
(testsys.switch_cpus_1[0].max_insts_any_thread)
m5.switchCpus(testsys, switch_cpu_list1)
# If we're taking and restoring checkpoints, use checkpoint_dir
# option only for finding the checkpoints to restore from. This
# lets us test checkpointing by restoring from one set of
# checkpoints, generating a second set, and then comparing them.
if options.take_checkpoints and options.checkpoint_restore:
if m5.options.outdir:
cptdir = m5.options.outdir
else:
cptdir = getcwd()
if options.take_checkpoints != None :
# Checkpoints being taken via the command line at <when> and at
# subsequent periods of <period>. Checkpoint instructions
# received from the benchmark running are ignored and skipped in
# favor of command line checkpoint instructions.
exit_event = scriptCheckpoints(options, maxtick, cptdir)
else:
if options.fast_forward:
m5.stats.reset()
print "**** REAL SIMULATION ****"
# If checkpoints are being taken, then the checkpoint instruction
# will occur in the benchmark code it self.
if options.repeat_switch and maxtick > options.repeat_switch:
exit_event = repeatSwitch(testsys, repeat_switch_cpu_list,
maxtick, options.repeat_switch)
else:
exit_event = benchCheckpoints(options, maxtick, cptdir)
print 'Exiting @ tick %i because %s' % (m5.curTick(), exit_event.getCause())
if options.checkpoint_at_end:
m5.checkpoint(joinpath(cptdir, "cpt.%d"))
if not m5.options.interactive:
sys.exit(exit_event.getCode())
|
bsd-3-clause
|
dchilds7/Deysha-Star-Formation
|
vispy/scene/widgets/colorbar.py
|
10
|
5184
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2015, Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
import numpy as np
from .widget import Widget
from ...visuals import ColorBarVisual
class ColorBarWidget(Widget):
# padding with respect to the major and minor axis
# units are normalized [0, 1] with 1 representing occupying
# all of the length along the given axis
major_axis_padding = 0.1
minor_axis_padding = 0.8
# ratio of minor axis to major axis
minor_axis_ratio = 0.05
"""Widget containing a ColorBar
Parameters
----------
cmap : str | vispy.color.ColorMap
Either the name of the ColorMap to be used from the standard
set of names (refer to `vispy.color.get_colormap`),
or a custom ColorMap object.
The ColorMap is used to apply a gradient on the colorbar.
orientation : {'left', 'right', 'top', 'bottom'}
The orientation of the colorbar, used for rendering. The
orientation can be thought of as the position of the label
relative to the color bar.
When the orientation is 'left' or 'right', the colorbar is
vertically placed. When it is 'top' or 'bottom', the colorbar is
horizontally placed. The colorbar automatically resizes when its
container's dimension changes.
* 'top': the colorbar is horizontal.
Color is applied from left to right.
Minimum corresponds to left and maximum to right.
Label is to the top of the colorbar
* 'bottom': Same as top, except that
label is to the bottom of the colorbar
* 'left': the colorbar is vertical.
Color is applied from bottom to top.
Minimum corresponds to bottom and maximum to top.
Label is to the left of the colorbar
* 'right': Same as left, except that the
label is placed to the right of the colorbar
label : str
The label that is to be drawn with the colorbar
that provides information about the colorbar.
clim : tuple (min, max)
the minimum and maximum values of the data that
is given to the colorbar. This is used to draw the scale
on the side of the colorbar.
border_width : float (in px)
The width of the border the colormap should have. This measurement
is given in pixels
border_color : str | vispy.color.Color
The color of the border of the colormap. This can either be a
str as the color's name or an actual instace of a vipy.color.Color
"""
def __init__(self, cmap, orientation,
label="", clim=("", ""),
border_width=0.0, border_color="black", **kwargs):
dummy_size = (1, 1)
self._colorbar = ColorBarVisual(size=dummy_size, cmap=cmap,
orientation=orientation,
label=label, clim=clim,
border_width=border_width,
border_color=border_color, **kwargs)
Widget.__init__(self)
self.add_subvisual(self._colorbar)
self._update_colorbar()
def on_resize(self, event):
"""Resize event handler
Parameters
----------
event : instance of Event
The event.
"""
self._update_colorbar()
def _update_colorbar(self):
self._colorbar.pos = self.rect.center
self._colorbar.size = \
ColorBarWidget.calc_size(self.rect, self._colorbar.orientation)
@staticmethod
def calc_size(rect, orientation):
(total_halfx, total_halfy) = rect.center
if orientation in ["bottom", "top"]:
(total_major_axis, total_minor_axis) = (total_halfx, total_halfy)
else:
(total_major_axis, total_minor_axis) = (total_halfy, total_halfx)
major_axis = total_major_axis * (1.0 -
ColorBarWidget.major_axis_padding)
minor_axis = major_axis * ColorBarWidget.minor_axis_ratio
# if the minor axis is "leaking" from the padding, then clamp
minor_axis = np.minimum(minor_axis,
total_minor_axis *
(1.0 - ColorBarWidget.minor_axis_padding))
return (major_axis, minor_axis)
@property
def cmap(self):
return self._colorbar.cmap
@cmap.setter
def cmap(self, cmap):
self._colorbar.cmap = cmap
@property
def label(self):
return self._colorbar.label
@label.setter
def label(self, label):
self._colorbar.label = label
@property
def clim(self):
return self._colorbar.clim
@clim.setter
def clim(self, clim):
self._colorbar.clim = clim
@property
def orientation(self):
return self._colorbar.orientation
|
bsd-3-clause
|
alexm92/sentry
|
src/sentry/south_migrations/0057_auto__add_field_group_active_at.py
|
36
|
18071
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Group.active_at'
db.add_column('sentry_groupedmessage', 'active_at', self.gf('django.db.models.fields.DateTimeField')(null=True, db_index=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'Group.active_at'
db.delete_column('sentry_groupedmessage', 'active_at')
models = {
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sentry.event': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'time_spent': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
'sentry.filtervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'FilterValue'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'logger', 'culprit', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'views': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.View']", 'symmetrical': 'False', 'blank': 'True'})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.messagecountbyminute': {
'Meta': {'unique_together': "(('project', 'group', 'date'),)", 'object_name': 'MessageCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.messagefiltervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'MessageFilterValue'},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.messageindex': {
'Meta': {'unique_together': "(('column', 'value', 'object_id'),)", 'object_name': 'MessageIndex'},
'column': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.pendingteammember': {
'Meta': {'unique_together': "(('team', 'email'),)", 'object_name': 'PendingTeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'pending_member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'sentry.project': {
'Meta': {'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_owned_project_set'", 'null': 'True', 'to': "orm['sentry.User']"}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'unique': 'True', 'null': 'True', 'db_index': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Team']", 'null': 'True'})
},
'sentry.projectcountbyminute': {
'Meta': {'unique_together': "(('project', 'date'),)", 'object_name': 'ProjectCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.searchdocument': {
'Meta': {'unique_together': "(('project', 'group'),)", 'object_name': 'SearchDocument'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_changed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'total_events': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
'sentry.searchtoken': {
'Meta': {'unique_together': "(('document', 'field', 'token'),)", 'object_name': 'SearchToken'},
'document': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'token_set'", 'to': "orm['sentry.SearchDocument']"}),
'field': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '64'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.team': {
'Meta': {'object_name': 'Team'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})
},
'sentry.teammember': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_teammember_set'", 'to': "orm['sentry.User']"})
},
'sentry.view': {
'Meta': {'object_name': 'View'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'verbose_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'verbose_name_plural': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'})
}
}
complete_apps = ['sentry']
|
bsd-3-clause
|
kreatorkodi/repository.torrentbr
|
plugin.video.elementum/resources/site-packages/bjsonrpc/jsonlib.py
|
11
|
2695
|
"""
bjson/jsonlib.py
Asynchronous Bidirectional JSON-RPC protocol implementation over TCP/IP
Copyright (c) 2010 David Martinez Marti
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of copyright holders nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS OR CONTRIBUTORS
BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
try:
import simplejson as j
except ImportError:
import json as j
except ImportError:
print("FATAL: No suitable json library found!")
raise
from pprint import pprint
def dumps(argobj, conn):
"""
dumps json object using loaded json library and forwards unknown objects
to *Connection.dumpObject* function.
"""
ret = None
try:
ret = j.dumps(argobj, separators=(',', ':'), default=conn.dump_object)
except TypeError:
pprint(argobj)
raise
# raise TypeError("The Python object is not serializable to JSON!")
return ret
def loads(argobj, conn):
"""
loads json object using *Connection.load_object* to convert json hinted
objects to real objects.
"""
ret = None
try:
ret = j.loads(argobj, object_hook=conn.load_object)
except ValueError:
pprint(argobj)
raise
# raise ValueError("The String object is not a valid JSON data!")
return ret
|
gpl-2.0
|
atupal/ffbird
|
werkzeug/useragents.py
|
317
|
5300
|
# -*- coding: utf-8 -*-
"""
werkzeug.useragents
~~~~~~~~~~~~~~~~~~~
This module provides a helper to inspect user agent strings. This module
is far from complete but should work for most of the currently available
browsers.
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
class UserAgentParser(object):
"""A simple user agent parser. Used by the `UserAgent`."""
platforms = (
('iphone|ios', 'iphone'),
('ipad', 'ipad'),
(r'darwin|mac|os\s*x', 'macos'),
('win', 'windows'),
(r'android', 'android'),
(r'x11|lin(\b|ux)?', 'linux'),
('(sun|i86)os', 'solaris'),
(r'nintendo\s+wii', 'wii'),
('irix', 'irix'),
('hp-?ux', 'hpux'),
('aix', 'aix'),
('sco|unix_sv', 'sco'),
('bsd', 'bsd'),
('amiga', 'amiga'),
('blackberry|playbook', 'blackberry')
)
browsers = (
('googlebot', 'google'),
('msnbot', 'msn'),
('yahoo', 'yahoo'),
('ask jeeves', 'ask'),
(r'aol|america\s+online\s+browser', 'aol'),
('opera', 'opera'),
('chrome', 'chrome'),
('firefox|firebird|phoenix|iceweasel', 'firefox'),
('galeon', 'galeon'),
('safari', 'safari'),
('webkit', 'webkit'),
('camino', 'camino'),
('konqueror', 'konqueror'),
('k-meleon', 'kmeleon'),
('netscape', 'netscape'),
(r'msie|microsoft\s+internet\s+explorer', 'msie'),
('lynx', 'lynx'),
('links', 'links'),
('seamonkey|mozilla', 'seamonkey')
)
_browser_version_re = r'(?:%s)[/\sa-z(]*(\d+[.\da-z]+)?(?i)'
_language_re = re.compile(
r'(?:;\s*|\s+)(\b\w{2}\b(?:-\b\w{2}\b)?)\s*;|'
r'(?:\(|\[|;)\s*(\b\w{2}\b(?:-\b\w{2}\b)?)\s*(?:\]|\)|;)'
)
def __init__(self):
self.platforms = [(b, re.compile(a, re.I)) for a, b in self.platforms]
self.browsers = [(b, re.compile(self._browser_version_re % a))
for a, b in self.browsers]
def __call__(self, user_agent):
for platform, regex in self.platforms:
match = regex.search(user_agent)
if match is not None:
break
else:
platform = None
for browser, regex in self.browsers:
match = regex.search(user_agent)
if match is not None:
version = match.group(1)
break
else:
browser = version = None
match = self._language_re.search(user_agent)
if match is not None:
language = match.group(1) or match.group(2)
else:
language = None
return platform, browser, version, language
class UserAgent(object):
"""Represents a user agent. Pass it a WSGI environment or a user agent
string and you can inspect some of the details from the user agent
string via the attributes. The following attributes exist:
.. attribute:: string
the raw user agent string
.. attribute:: platform
the browser platform. The following platforms are currently
recognized:
- `aix`
- `amiga`
- `android`
- `bsd`
- `hpux`
- `iphone`
- `ipad`
- `irix`
- `linux`
- `macos`
- `sco`
- `solaris`
- `wii`
- `windows`
.. attribute:: browser
the name of the browser. The following browsers are currently
recognized:
- `aol` *
- `ask` *
- `camino`
- `chrome`
- `firefox`
- `galeon`
- `google` *
- `kmeleon`
- `konqueror`
- `links`
- `lynx`
- `msie`
- `msn`
- `netscape`
- `opera`
- `safari`
- `seamonkey`
- `webkit`
- `yahoo` *
(Browsers maked with a star (``*``) are crawlers.)
.. attribute:: version
the version of the browser
.. attribute:: language
the language of the browser
"""
_parser = UserAgentParser()
def __init__(self, environ_or_string):
if isinstance(environ_or_string, dict):
environ_or_string = environ_or_string.get('HTTP_USER_AGENT', '')
self.string = environ_or_string
self.platform, self.browser, self.version, self.language = \
self._parser(environ_or_string)
def to_header(self):
return self.string
def __str__(self):
return self.string
def __nonzero__(self):
return bool(self.browser)
__bool__ = __nonzero__
def __repr__(self):
return '<%s %r/%s>' % (
self.__class__.__name__,
self.browser,
self.version
)
# conceptionally this belongs in this module but because we want to lazily
# load the user agent module (which happens in wrappers.py) we have to import
# it afterwards. The class itself has the module set to this module so
# pickle, inspect and similar modules treat the object as if it was really
# implemented here.
from werkzeug.wrappers import UserAgentMixin
|
apache-2.0
|
gskachkov/phantomjs
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/common/net/resultsjsonparser.py
|
123
|
7067
|
# Copyright (c) 2010, Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import json
import logging
from webkitpy.common.memoized import memoized
# FIXME: common should never import from new-run-webkit-tests, one of these files needs to move.
from webkitpy.layout_tests.layout_package import json_results_generator
from webkitpy.layout_tests.models import test_expectations, test_results, test_failures
from webkitpy.layout_tests.models.test_expectations import TestExpectations
_log = logging.getLogger(__name__)
# These are helper functions for navigating the results json structure.
def for_each_test(tree, handler, prefix=''):
for key in tree:
new_prefix = (prefix + '/' + key) if prefix else key
if 'actual' not in tree[key]:
for_each_test(tree[key], handler, new_prefix)
else:
handler(new_prefix, tree[key])
def result_for_test(tree, test):
parts = test.split('/')
for part in parts:
tree = tree[part]
return tree
# Wrapper around the dictionaries returned from the json.
# Eventually the .json should just serialize the TestFailure objects
# directly and we won't need this.
class JSONTestResult(object):
def __init__(self, test_name, result_dict):
self._test_name = test_name
self._result_dict = result_dict
def did_pass_or_run_as_expected(self):
return self.did_pass() or self.did_run_as_expected()
def did_pass(self):
return test_expectations.PASS in self._actual_as_tokens()
def did_run_as_expected(self):
actual_results = self._actual_as_tokens()
expected_results = self._expected_as_tokens()
# FIXME: We should only call remove_pixel_failures when this JSONResult
# came from a test run without pixel tests!
if not TestExpectations.has_pixel_failures(actual_results):
expected_results = TestExpectations.remove_pixel_failures(expected_results)
for actual_result in actual_results:
if not TestExpectations.result_was_expected(actual_result, expected_results, False, False):
return False
return True
def _tokenize(self, results_string):
tokens = map(TestExpectations.expectation_from_string, results_string.split(' '))
if None in tokens:
_log.warning("Unrecognized result in %s" % results_string)
return set(tokens)
@memoized
def _actual_as_tokens(self):
actual_results = self._result_dict['actual']
return self._tokenize(actual_results)
@memoized
def _expected_as_tokens(self):
actual_results = self._result_dict['expected']
return self._tokenize(actual_results)
def _failure_types_from_actual_result(self, actual):
# FIXME: There doesn't seem to be a full list of all possible values of
# 'actual' anywhere. However JSONLayoutResultsGenerator.FAILURE_TO_CHAR
# is a useful reference as that's for "old" style results.json files
#
# FIXME: TEXT, IMAGE_PLUS_TEXT, and AUDIO are obsolete but we keep them for
# now so that we can parse old results.json files.
if actual == test_expectations.PASS:
return []
elif actual == test_expectations.FAIL:
return [test_failures.FailureTextMismatch(), test_failures.FailureImageHashMismatch(), test_failures.FailureAudioMismatch()]
elif actual == test_expectations.TEXT:
return [test_failures.FailureTextMismatch()]
elif actual == test_expectations.IMAGE:
return [test_failures.FailureImageHashMismatch()]
elif actual == test_expectations.IMAGE_PLUS_TEXT:
return [test_failures.FailureImageHashMismatch(), test_failures.FailureTextMismatch()]
elif actual == test_expectations.AUDIO:
return [test_failures.FailureAudioMismatch()]
elif actual == test_expectations.TIMEOUT:
return [test_failures.FailureTimeout()]
elif actual == test_expectations.CRASH:
# NOTE: We don't know what process crashed from the json, just that a process crashed.
return [test_failures.FailureCrash()]
elif actual == test_expectations.MISSING:
return [test_failures.FailureMissingResult(), test_failures.FailureMissingImageHash(), test_failures.FailureMissingImage()]
else:
_log.warning("Failed to handle: %s" % self._result_dict['actual'])
return []
def _failures(self):
if self.did_pass():
return []
return sum(map(self._failure_types_from_actual_result, self._actual_as_tokens()), [])
def test_result(self):
# FIXME: Optionally pull in the test runtime from times_ms.json.
return test_results.TestResult(self._test_name, self._failures())
class ResultsJSONParser(object):
@classmethod
def parse_results_json(cls, json_string):
if not json_results_generator.has_json_wrapper(json_string):
return None
content_string = json_results_generator.strip_json_wrapper(json_string)
json_dict = json.loads(content_string)
json_results = []
for_each_test(json_dict['tests'], lambda test, result: json_results.append(JSONTestResult(test, result)))
# FIXME: What's the short sexy python way to filter None?
# I would use [foo.bar() for foo in foos if foo.bar()] but bar() is expensive.
unexpected_failures = [result.test_result() for result in json_results if not result.did_pass_or_run_as_expected()]
return filter(lambda a: a, unexpected_failures)
|
bsd-3-clause
|
benfinkelcbt/CPD200
|
CPD200-Lab12-Python/googleapiclient/discovery_cache/appengine_memcache.py
|
50
|
1562
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""App Engine memcache based cache for the discovery document."""
import logging
# This is only an optional dependency because we only import this
# module when google.appengine.api.memcache is available.
from google.appengine.api import memcache
from . import base
from ..discovery_cache import DISCOVERY_DOC_MAX_AGE
NAMESPACE = 'google-api-client'
class Cache(base.Cache):
"""A cache with app engine memcache API."""
def __init__(self, max_age):
"""Constructor.
Args:
max_age: Cache expiration in seconds.
"""
self._max_age = max_age
def get(self, url):
try:
return memcache.get(url, namespace=NAMESPACE)
except Exception as e:
logging.warning(e, exc_info=True)
def set(self, url, content):
try:
memcache.set(url, content, time=int(self._max_age), namespace=NAMESPACE)
except Exception as e:
logging.warning(e, exc_info=True)
cache = Cache(max_age=DISCOVERY_DOC_MAX_AGE)
|
gpl-3.0
|
305120262/ArcGISServerManageTools
|
GetServerLog.py
|
1
|
3475
|
#coding=utf-8
"""
-------------------------------------------------------------------------------
Name: getsvrlog.py
Purpose: Collect ArcGIS Server Site Logs
Author: Sean.L ([email protected])
Created: 8/25/16
Copyright: (c) Sean.L 2016
-------------------------------------------------------------------------------
"""
from __future__ import print_function
# 3rd-party library: requests
# http://docs.python-requests.org/en/latest/
# pip install requests
import requests
import json
import pandas as pd
SITE_LIST = ["http://192.168.1.75:6080/arcgis",""]
USER = "siteadmin"
PASSWORD = "esri"
LOGPATH = r"D:\\"
# note that services are suffixed by type when passed to admin REST API
SERVICES = [r"MajorCity.MapServer"]
class AGSRestError(Exception): pass
class ServerError(Exception): pass
def _validate_response(response):
""" Tests response for HTTP 200 code, tests that response is json,
and searches for typical AGS error indicators in json.
Raises an exception if response does not validate successfully.
"""
if not response.ok:
raise ServerError("Server Error: {}".format(response.text))
try:
response_json = response.json()
if "error" in response_json:
raise AGSRestError(response_json["error"])
if "status" in response_json and response_json["status"] != "success":
error = response_json["status"]
if "messages" in response_json:
for message in response_json["messages"]:
error += "\n" + message
raise AGSRestError(error)
except ValueError:
print(response.text)
raise ServerError("Server returned HTML: {}".format(response.text))
def _get_token(site,username, password):
""" Returns token from server """
token_url = "{host}/tokens/".format(
host=site)
data = { "f": "json",
"username": username,
"password": password,
"client": "requestip",
"expiration": 5 }
response = requests.post(token_url, data,verify=False)
_validate_response(response)
token = response.json()['token']
return token
def _get_log(site):
getlog_url="{host}/admin/logs/query?f=json".format(
host=site)
data = { "token": token,
"startTime":'',
"endTime":'',
"level":'SEVERE',
"filterType":'json',
"filter":'{\"server\": \"*\",\
\"services\": \"*\",\
\"machines":\"*\" }',
"pageSize":10000}
response = requests.post(getlog_url, data,verify=False)
_validate_response(response)
response_json = response.json()
#print (response_json['logMessages'])
myFrame=pd.DataFrame(response_json['logMessages'])
sitewaip= site[site.index("/")+2:site.index(":",7)]
myFrame["sitewaip"]=sitewaip
file_name = sitewaip.replace(".","_");
myFrame.to_csv(r"{root}{site_name}.csv".format(root=LOGPATH,site_name=file_name), index=False)
return myFrame
if __name__ == "__main__":
frames = []
for site in SITE_LIST:
print("Retrieving token...")
token = _get_token(site,USER, PASSWORD)
print("Retrieved: {}".format(token))
df = _get_log(site)
print( df.columns)
frames.append(df)
all_frame = pd.concat(frames)
all_frame.to_csv(r"{root}allsites.csv".format(root=LOGPATH),index=False)
|
apache-2.0
|
apporc/neutron
|
neutron/db/address_scope_db.py
|
1
|
5406
|
# Copyright (c) 2015 Huawei Technologies Co.,LTD.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_utils import uuidutils
import sqlalchemy as sa
from sqlalchemy.orm import exc
from neutron._i18n import _
from neutron.api.v2 import attributes as attr
from neutron.db import model_base
from neutron.db import models_v2
from neutron.extensions import address_scope as ext_address_scope
LOG = logging.getLogger(__name__)
class AddressScope(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
"""Represents a neutron address scope."""
__tablename__ = "address_scopes"
name = sa.Column(sa.String(attr.NAME_MAX_LEN), nullable=False)
shared = sa.Column(sa.Boolean, nullable=False)
class AddressScopeDbMixin(ext_address_scope.AddressScopePluginBase):
"""Mixin class to add address scope to db_base_plugin_v2."""
__native_bulk_support = True
def _make_address_scope_dict(self, address_scope, fields=None):
res = {'id': address_scope['id'],
'name': address_scope['name'],
'tenant_id': address_scope['tenant_id'],
'shared': address_scope['shared']}
return self._fields(res, fields)
def _get_address_scope(self, context, id):
try:
return self._get_by_id(context, AddressScope, id)
except exc.NoResultFound:
raise ext_address_scope.AddressScopeNotFound(address_scope_id=id)
def is_address_scope_owned_by_tenant(self, context, id):
"""Check if address scope id is owned by the tenant or not.
AddressScopeNotFound is raised if the
- address scope id doesn't exist or
- if the (unshared) address scope id is not owned by this tenant.
@return Returns true if the user is admin or tenant is owner
Returns false if the address scope id is shared and not
owned by the tenant.
"""
address_scope = self._get_address_scope(context, id)
return context.is_admin or (
address_scope.tenant_id == context.tenant_id)
def create_address_scope(self, context, address_scope):
"""Create a address scope."""
a_s = address_scope['address_scope']
tenant_id = self._get_tenant_id_for_create(context, a_s)
address_scope_id = a_s.get('id') or uuidutils.generate_uuid()
with context.session.begin(subtransactions=True):
pool_args = {'tenant_id': tenant_id,
'id': address_scope_id,
'name': a_s['name'],
'shared': a_s['shared']}
address_scope = AddressScope(**pool_args)
context.session.add(address_scope)
return self._make_address_scope_dict(address_scope)
def update_address_scope(self, context, id, address_scope):
a_s = address_scope['address_scope']
with context.session.begin(subtransactions=True):
address_scope = self._get_address_scope(context, id)
if address_scope.shared and not a_s.get('shared', True):
reason = _("Shared address scope can't be unshared")
raise ext_address_scope.AddressScopeUpdateError(
address_scope_id=id, reason=reason)
address_scope.update(a_s)
return self._make_address_scope_dict(address_scope)
def get_address_scope(self, context, id, fields=None):
address_scope = self._get_address_scope(context, id)
return self._make_address_scope_dict(address_scope, fields)
def get_address_scopes(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
marker_obj = self._get_marker_obj(context, 'addrscope', limit, marker)
collection = self._get_collection(context, AddressScope,
self._make_address_scope_dict,
filters=filters, fields=fields,
sorts=sorts,
limit=limit,
marker_obj=marker_obj,
page_reverse=page_reverse)
return collection
def get_address_scopes_count(self, context, filters=None):
return self._get_collection_count(context, AddressScope,
filters=filters)
def delete_address_scope(self, context, id):
with context.session.begin(subtransactions=True):
if self._get_subnetpools_by_address_scope_id(context, id):
raise ext_address_scope.AddressScopeInUse(address_scope_id=id)
address_scope = self._get_address_scope(context, id)
context.session.delete(address_scope)
|
apache-2.0
|
anythingrandom/eclcli
|
eclcli/interconnectivity/interconnectivityclient/common/command.py
|
16
|
1059
|
# Copyright 2012 OpenStack Foundation.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from cliff import command
class OpenStackCommand(command.Command):
"""Base class for OpenStack commands."""
api = None
def run(self, parsed_args):
if not self.api:
return
else:
return super(OpenStackCommand, self).run(parsed_args)
def get_data(self, parsed_args):
pass
def take_action(self, parsed_args):
return self.get_data(parsed_args)
|
apache-2.0
|
allevato/swift
|
utils/update_checkout/tests/scheme_mock.py
|
12
|
5419
|
# ===--- SchemeMock.py ----------------------------------------------------===#
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2018 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https:#swift.org/LICENSE.txt for license information
# See https:#swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ===----------------------------------------------------------------------===#
"""This file defines objects for mocking an update-checkout scheme. It creates
a json .config file and a series of .git repos with "fake commits".
"""
import json
import os
import subprocess
import unittest
# For now we only use a config with a single scheme. We should add support for
# handling multiple schemes.
MOCK_REMOTE = {
'repo1': [
# This is a series of changes to repo1. (File, NewContents)
('A.txt', 'A'),
('B.txt', 'B'),
('A.txt', 'a'),
],
'repo2': [
# This is a series of changes to repo1. (File, NewContents)
('X.txt', 'X'),
('Y.txt', 'Y'),
('X.txt', 'z'),
],
}
MOCK_CONFIG = {
# This is here just b/c we expect it. We should consider consolidating
# clone-patterns into a dictionary where we map protocols (i.e. ['ssh,
# 'https'] to patterns). Then we can define this issue.
'ssh-clone-pattern': 'DO_NOT_USE',
# We reset this value with our remote path when we process
'https-clone-pattern': '',
'repos': {
'repo1': {
'remote': {'id': 'repo1'},
},
'repo2': {
'remote': {'id': 'repo2'},
},
},
'default-branch-scheme': 'master',
'branch-schemes': {
'master': {
'aliases': ['master'],
'repos': {
'repo1': 'master',
'repo2': 'master',
}
}
}
}
def call_quietly(*args, **kwargs):
with open(os.devnull, 'w') as f:
kwargs['stdout'] = f
kwargs['stderr'] = f
subprocess.check_call(*args, **kwargs)
def create_dir(d):
if not os.path.isdir(d):
os.makedirs(d)
def teardown_mock_remote(base_dir):
call_quietly(['rm', '-rf', base_dir])
def get_config_path(base_dir):
return os.path.join(base_dir, 'test-config.json')
def setup_mock_remote(base_dir):
create_dir(base_dir)
# We use local as a workspace for creating commits.
LOCAL_PATH = os.path.join(base_dir, 'local')
# We use remote as a directory that simulates our remote unchecked out
# repo.
REMOTE_PATH = os.path.join(base_dir, 'remote')
create_dir(REMOTE_PATH)
create_dir(LOCAL_PATH)
for (k, v) in MOCK_REMOTE.items():
local_repo_path = os.path.join(LOCAL_PATH, k)
remote_repo_path = os.path.join(REMOTE_PATH, k)
create_dir(remote_repo_path)
create_dir(local_repo_path)
call_quietly(['git', 'init', '--bare', remote_repo_path])
call_quietly(['git', 'clone', '-l', remote_repo_path, local_repo_path])
for (i, (filename, contents)) in enumerate(v):
filename_path = os.path.join(local_repo_path, filename)
with open(filename_path, 'w') as f:
f.write(contents)
call_quietly(['git', 'add', filename], cwd=local_repo_path)
call_quietly(['git', 'commit', '-m', 'Commit %d' % i],
cwd=local_repo_path)
call_quietly(['git', 'push', 'origin', 'master'],
cwd=local_repo_path)
base_config = MOCK_CONFIG
https_clone_pattern = os.path.join('file://%s' % REMOTE_PATH, '%s')
base_config['https-clone-pattern'] = https_clone_pattern
with open(get_config_path(base_dir), 'w') as f:
json.dump(base_config, f)
return (LOCAL_PATH, REMOTE_PATH)
BASEDIR_ENV_VAR = 'UPDATECHECKOUT_TEST_WORKSPACE_DIR'
CURRENT_FILE_DIR = os.path.dirname(os.path.abspath(__file__))
UPDATE_CHECKOUT_PATH = os.path.abspath(os.path.join(CURRENT_FILE_DIR,
os.path.pardir,
os.path.pardir,
'update-checkout'))
class SchemeMockTestCase(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(SchemeMockTestCase, self).__init__(*args, **kwargs)
self.workspace = os.getenv(BASEDIR_ENV_VAR)
if self.workspace is None:
raise RuntimeError('Misconfigured test suite! Environment '
'variable %s must be set!' % BASEDIR_ENV_VAR)
self.config_path = get_config_path(self.workspace)
self.update_checkout_path = UPDATE_CHECKOUT_PATH
if not os.access(self.update_checkout_path, os.X_OK):
raise RuntimeError('Error! Could not find executable '
'update-checkout at path: %s'
% self.update_checkout_path)
self.source_root = os.path.join(self.workspace, 'source_root')
def setUp(self):
create_dir(self.source_root)
(self.local_path, self.remote_path) = setup_mock_remote(self.workspace)
def tearDown(self):
teardown_mock_remote(self.workspace)
def call(self, *args, **kwargs):
kwargs['cwd'] = self.source_root
call_quietly(*args, **kwargs)
|
apache-2.0
|
filipenf/ansible
|
test/units/playbook/test_attribute.py
|
219
|
1824
|
# (c) 2015, Marius Gedminas <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from ansible.compat.tests import unittest
from ansible.playbook.attribute import Attribute
class TestAttribute(unittest.TestCase):
def setUp(self):
self.one = Attribute(priority=100)
self.two = Attribute(priority=0)
def test_eq(self):
self.assertTrue(self.one == self.one)
self.assertFalse(self.one == self.two)
def test_ne(self):
self.assertFalse(self.one != self.one)
self.assertTrue(self.one != self.two)
def test_lt(self):
self.assertFalse(self.one < self.one)
self.assertTrue(self.one < self.two)
self.assertFalse(self.two < self.one)
def test_gt(self):
self.assertFalse(self.one > self.one)
self.assertFalse(self.one > self.two)
self.assertTrue(self.two > self.one)
def test_le(self):
self.assertTrue(self.one <= self.one)
self.assertTrue(self.one <= self.two)
self.assertFalse(self.two <= self.one)
def test_ge(self):
self.assertTrue(self.one >= self.one)
self.assertFalse(self.one >= self.two)
self.assertTrue(self.two >= self.one)
|
gpl-3.0
|
swastikit/swastikcoin
|
contrib/testgen/gen_base58_test_vectors.py
|
1064
|
4344
|
#!/usr/bin/env python
'''
Generate valid and invalid base58 address and private key test vectors.
Usage:
gen_base58_test_vectors.py valid 50 > ../../src/test/data/base58_keys_valid.json
gen_base58_test_vectors.py invalid 50 > ../../src/test/data/base58_keys_invalid.json
'''
# 2012 Wladimir J. van der Laan
# Released under MIT License
import os
from itertools import islice
from base58 import b58encode, b58decode, b58encode_chk, b58decode_chk, b58chars
import random
from binascii import b2a_hex
# key types
PUBKEY_ADDRESS = 48
SCRIPT_ADDRESS = 5
PUBKEY_ADDRESS_TEST = 111
SCRIPT_ADDRESS_TEST = 196
PRIVKEY = 176
PRIVKEY_TEST = 239
metadata_keys = ['isPrivkey', 'isTestnet', 'addrType', 'isCompressed']
# templates for valid sequences
templates = [
# prefix, payload_size, suffix, metadata
# None = N/A
((PUBKEY_ADDRESS,), 20, (), (False, False, 'pubkey', None)),
((SCRIPT_ADDRESS,), 20, (), (False, False, 'script', None)),
((PUBKEY_ADDRESS_TEST,), 20, (), (False, True, 'pubkey', None)),
((SCRIPT_ADDRESS_TEST,), 20, (), (False, True, 'script', None)),
((PRIVKEY,), 32, (), (True, False, None, False)),
((PRIVKEY,), 32, (1,), (True, False, None, True)),
((PRIVKEY_TEST,), 32, (), (True, True, None, False)),
((PRIVKEY_TEST,), 32, (1,), (True, True, None, True))
]
def is_valid(v):
'''Check vector v for validity'''
result = b58decode_chk(v)
if result is None:
return False
valid = False
for template in templates:
prefix = str(bytearray(template[0]))
suffix = str(bytearray(template[2]))
if result.startswith(prefix) and result.endswith(suffix):
if (len(result) - len(prefix) - len(suffix)) == template[1]:
return True
return False
def gen_valid_vectors():
'''Generate valid test vectors'''
while True:
for template in templates:
prefix = str(bytearray(template[0]))
payload = os.urandom(template[1])
suffix = str(bytearray(template[2]))
rv = b58encode_chk(prefix + payload + suffix)
assert is_valid(rv)
metadata = dict([(x,y) for (x,y) in zip(metadata_keys,template[3]) if y is not None])
yield (rv, b2a_hex(payload), metadata)
def gen_invalid_vector(template, corrupt_prefix, randomize_payload_size, corrupt_suffix):
'''Generate possibly invalid vector'''
if corrupt_prefix:
prefix = os.urandom(1)
else:
prefix = str(bytearray(template[0]))
if randomize_payload_size:
payload = os.urandom(max(int(random.expovariate(0.5)), 50))
else:
payload = os.urandom(template[1])
if corrupt_suffix:
suffix = os.urandom(len(template[2]))
else:
suffix = str(bytearray(template[2]))
return b58encode_chk(prefix + payload + suffix)
def randbool(p = 0.5):
'''Return True with P(p)'''
return random.random() < p
def gen_invalid_vectors():
'''Generate invalid test vectors'''
# start with some manual edge-cases
yield "",
yield "x",
while True:
# kinds of invalid vectors:
# invalid prefix
# invalid payload length
# invalid (randomized) suffix (add random data)
# corrupt checksum
for template in templates:
val = gen_invalid_vector(template, randbool(0.2), randbool(0.2), randbool(0.2))
if random.randint(0,10)<1: # line corruption
if randbool(): # add random character to end
val += random.choice(b58chars)
else: # replace random character in the middle
n = random.randint(0, len(val))
val = val[0:n] + random.choice(b58chars) + val[n+1:]
if not is_valid(val):
yield val,
if __name__ == '__main__':
import sys, json
iters = {'valid':gen_valid_vectors, 'invalid':gen_invalid_vectors}
try:
uiter = iters[sys.argv[1]]
except IndexError:
uiter = gen_valid_vectors
try:
count = int(sys.argv[2])
except IndexError:
count = 0
data = list(islice(uiter(), count))
json.dump(data, sys.stdout, sort_keys=True, indent=4)
sys.stdout.write('\n')
|
mit
|
gustawho/faba-colors
|
render-verd.py
|
2
|
6308
|
#!/usr/bin/python3
#
import os
import sys
import xml.sax
import subprocess
INKSCAPE = '/usr/bin/inkscape'
OPTIPNG = '/usr/bin/optipng'
MAINDIR = 'Faba-Verd'
SRC = os.path.join('.', 'src/Verd')
inkscape_process = None
def main(SRC):
def optimize_png(png_file):
if os.path.exists(OPTIPNG):
process = subprocess.Popen([OPTIPNG, '-quiet', '-o7', png_file])
process.wait()
def wait_for_prompt(process, command=None):
if command is not None:
process.stdin.write((command+'\n').encode('utf-8'))
# This is kinda ugly ...
# Wait for just a '>', or '\n>' if some other char appearead first
output = process.stdout.read(1)
if output == b'>':
return
output += process.stdout.read(1)
while output != b'\n>':
output += process.stdout.read(1)
output = output[1:]
def start_inkscape():
process = subprocess.Popen([INKSCAPE, '--shell'], bufsize=0, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
wait_for_prompt(process)
return process
def inkscape_render_rect(icon_file, rect, output_file):
global inkscape_process
if inkscape_process is None:
inkscape_process = start_inkscape()
wait_for_prompt(inkscape_process, '%s -i %s -e %s' % (icon_file, rect, output_file))
optimize_png(output_file)
class ContentHandler(xml.sax.ContentHandler):
ROOT = 0
SVG = 1
LAYER = 2
OTHER = 3
TEXT = 4
def __init__(self, path, force=False, filter=None):
self.stack = [self.ROOT]
self.inside = [self.ROOT]
self.path = path
self.rects = []
self.state = self.ROOT
self.chars = ""
self.force = force
self.filter = filter
def endDocument(self):
pass
def startElement(self, name, attrs):
if self.inside[-1] == self.ROOT:
if name == "svg":
self.stack.append(self.SVG)
self.inside.append(self.SVG)
return
elif self.inside[-1] == self.SVG:
if (name == "g" and ('inkscape:groupmode' in attrs) and ('inkscape:label' in attrs)
and attrs['inkscape:groupmode'] == 'layer' and attrs['inkscape:label'].startswith('Baseplate')):
self.stack.append(self.LAYER)
self.inside.append(self.LAYER)
self.context = None
self.icon_name = None
self.rects = []
return
elif self.inside[-1] == self.LAYER:
if name == "text" and ('inkscape:label' in attrs) and attrs['inkscape:label'] == 'context':
self.stack.append(self.TEXT)
self.inside.append(self.TEXT)
self.text='context'
self.chars = ""
return
elif name == "text" and ('inkscape:label' in attrs) and attrs['inkscape:label'] == 'icon-name':
self.stack.append(self.TEXT)
self.inside.append(self.TEXT)
self.text='icon-name'
self.chars = ""
return
elif name == "rect":
self.rects.append(attrs)
self.stack.append(self.OTHER)
def endElement(self, name):
stacked = self.stack.pop()
if self.inside[-1] == stacked:
self.inside.pop()
if stacked == self.TEXT and self.text is not None:
assert self.text in ['context', 'icon-name']
if self.text == 'context':
self.context = self.chars
elif self.text == 'icon-name':
self.icon_name = self.chars
self.text = None
elif stacked == self.LAYER:
assert self.icon_name
assert self.context
if self.filter is not None and not self.icon_name in self.filter:
return
print (self.context, self.icon_name)
for rect in self.rects:
width = rect['width']
height = rect['height']
id = rect['id']
dir = os.path.join(MAINDIR, "%sx%s" % (width, height), self.context)
outfile = os.path.join(dir, self.icon_name+'.png')
if not os.path.exists(dir):
os.makedirs(dir)
# Do a time based check!
if self.force or not os.path.exists(outfile):
inkscape_render_rect(self.path, id, outfile)
sys.stdout.write('.')
else:
stat_in = os.stat(self.path)
stat_out = os.stat(outfile)
if stat_in.st_mtime > stat_out.st_mtime:
inkscape_render_rect(self.path, id, outfile)
sys.stdout.write('.')
else:
sys.stdout.write('-')
sys.stdout.flush()
sys.stdout.write('\n')
sys.stdout.flush()
def characters(self, chars):
self.chars += chars.strip()
if len(sys.argv) == 1:
if not os.path.exists(MAINDIR):
os.mkdir(MAINDIR)
print ('')
print ('Rendering from SVGs in', SRC)
print ('')
for file in os.listdir(SRC):
if file[-4:] == '.svg':
file = os.path.join(SRC, file)
handler = ContentHandler(file)
xml.sax.parse(open(file), handler)
print ('')
else:
file = os.path.join(SRC, sys.argv[1] + '.svg')
if len(sys.argv) > 2:
icons = sys.argv[2:]
else:
icons = None
if os.path.exists(os.path.join(file)):
handler = ContentHandler(file, True, filter=icons)
xml.sax.parse(open(file), handler)
else:
print ("Error: No such file", file)
sys.exit(1)
main(SRC)
|
gpl-3.0
|
rahul-c1/scikit-learn
|
benchmarks/bench_lasso.py
|
297
|
3305
|
"""
Benchmarks of Lasso vs LassoLars
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import gc
from time import time
import numpy as np
from sklearn.datasets.samples_generator import make_regression
def compute_bench(alpha, n_samples, n_features, precompute):
lasso_results = []
lars_lasso_results = []
it = 0
for ns in n_samples:
for nf in n_features:
it += 1
print('==================')
print('Iteration %s of %s' % (it, max(len(n_samples),
len(n_features))))
print('==================')
n_informative = nf // 10
X, Y, coef_ = make_regression(n_samples=ns, n_features=nf,
n_informative=n_informative,
noise=0.1, coef=True)
X /= np.sqrt(np.sum(X ** 2, axis=0)) # Normalize data
gc.collect()
print("- benchmarking Lasso")
clf = Lasso(alpha=alpha, fit_intercept=False,
precompute=precompute)
tstart = time()
clf.fit(X, Y)
lasso_results.append(time() - tstart)
gc.collect()
print("- benchmarking LassoLars")
clf = LassoLars(alpha=alpha, fit_intercept=False,
normalize=False, precompute=precompute)
tstart = time()
clf.fit(X, Y)
lars_lasso_results.append(time() - tstart)
return lasso_results, lars_lasso_results
if __name__ == '__main__':
from sklearn.linear_model import Lasso, LassoLars
import pylab as pl
alpha = 0.01 # regularization parameter
n_features = 10
list_n_samples = np.linspace(100, 1000000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, list_n_samples,
[n_features], precompute=True)
pl.figure('scikit-learn LASSO benchmark results')
pl.subplot(211)
pl.plot(list_n_samples, lasso_results, 'b-',
label='Lasso')
pl.plot(list_n_samples, lars_lasso_results, 'r-',
label='LassoLars')
pl.title('precomputed Gram matrix, %d features, alpha=%s' % (n_features, alpha))
pl.legend(loc='upper left')
pl.xlabel('number of samples')
pl.ylabel('Time (s)')
pl.axis('tight')
n_samples = 2000
list_n_features = np.linspace(500, 3000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, [n_samples],
list_n_features, precompute=False)
pl.subplot(212)
pl.plot(list_n_features, lasso_results, 'b-', label='Lasso')
pl.plot(list_n_features, lars_lasso_results, 'r-', label='LassoLars')
pl.title('%d samples, alpha=%s' % (n_samples, alpha))
pl.legend(loc='upper left')
pl.xlabel('number of features')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
|
bsd-3-clause
|
heeraj123/oh-mainline
|
vendor/packages/twisted/twisted/web/http.py
|
18
|
59989
|
# -*- test-case-name: twisted.web.test.test_http -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
HyperText Transfer Protocol implementation.
This is the basic server-side protocol implementation used by the Twisted
Web server. It can parse HTTP 1.0 requests and supports many HTTP 1.1
features as well. Additionally, some functionality implemented here is
also useful for HTTP clients (such as the chunked encoding parser).
"""
# system imports
from cStringIO import StringIO
import tempfile
import base64, binascii
import cgi
import socket
import math
import time
import calendar
import warnings
import os
from urlparse import urlparse as _urlparse
from zope.interface import implements
# twisted imports
from twisted.internet import interfaces, reactor, protocol, address
from twisted.internet.defer import Deferred
from twisted.protocols import policies, basic
from twisted.python import log
from urllib import unquote
from twisted.web.http_headers import _DictHeaders, Headers
protocol_version = "HTTP/1.1"
_CONTINUE = 100
SWITCHING = 101
OK = 200
CREATED = 201
ACCEPTED = 202
NON_AUTHORITATIVE_INFORMATION = 203
NO_CONTENT = 204
RESET_CONTENT = 205
PARTIAL_CONTENT = 206
MULTI_STATUS = 207
MULTIPLE_CHOICE = 300
MOVED_PERMANENTLY = 301
FOUND = 302
SEE_OTHER = 303
NOT_MODIFIED = 304
USE_PROXY = 305
TEMPORARY_REDIRECT = 307
BAD_REQUEST = 400
UNAUTHORIZED = 401
PAYMENT_REQUIRED = 402
FORBIDDEN = 403
NOT_FOUND = 404
NOT_ALLOWED = 405
NOT_ACCEPTABLE = 406
PROXY_AUTH_REQUIRED = 407
REQUEST_TIMEOUT = 408
CONFLICT = 409
GONE = 410
LENGTH_REQUIRED = 411
PRECONDITION_FAILED = 412
REQUEST_ENTITY_TOO_LARGE = 413
REQUEST_URI_TOO_LONG = 414
UNSUPPORTED_MEDIA_TYPE = 415
REQUESTED_RANGE_NOT_SATISFIABLE = 416
EXPECTATION_FAILED = 417
INTERNAL_SERVER_ERROR = 500
NOT_IMPLEMENTED = 501
BAD_GATEWAY = 502
SERVICE_UNAVAILABLE = 503
GATEWAY_TIMEOUT = 504
HTTP_VERSION_NOT_SUPPORTED = 505
INSUFFICIENT_STORAGE_SPACE = 507
NOT_EXTENDED = 510
RESPONSES = {
# 100
_CONTINUE: "Continue",
SWITCHING: "Switching Protocols",
# 200
OK: "OK",
CREATED: "Created",
ACCEPTED: "Accepted",
NON_AUTHORITATIVE_INFORMATION: "Non-Authoritative Information",
NO_CONTENT: "No Content",
RESET_CONTENT: "Reset Content.",
PARTIAL_CONTENT: "Partial Content",
MULTI_STATUS: "Multi-Status",
# 300
MULTIPLE_CHOICE: "Multiple Choices",
MOVED_PERMANENTLY: "Moved Permanently",
FOUND: "Found",
SEE_OTHER: "See Other",
NOT_MODIFIED: "Not Modified",
USE_PROXY: "Use Proxy",
# 306 not defined??
TEMPORARY_REDIRECT: "Temporary Redirect",
# 400
BAD_REQUEST: "Bad Request",
UNAUTHORIZED: "Unauthorized",
PAYMENT_REQUIRED: "Payment Required",
FORBIDDEN: "Forbidden",
NOT_FOUND: "Not Found",
NOT_ALLOWED: "Method Not Allowed",
NOT_ACCEPTABLE: "Not Acceptable",
PROXY_AUTH_REQUIRED: "Proxy Authentication Required",
REQUEST_TIMEOUT: "Request Time-out",
CONFLICT: "Conflict",
GONE: "Gone",
LENGTH_REQUIRED: "Length Required",
PRECONDITION_FAILED: "Precondition Failed",
REQUEST_ENTITY_TOO_LARGE: "Request Entity Too Large",
REQUEST_URI_TOO_LONG: "Request-URI Too Long",
UNSUPPORTED_MEDIA_TYPE: "Unsupported Media Type",
REQUESTED_RANGE_NOT_SATISFIABLE: "Requested Range not satisfiable",
EXPECTATION_FAILED: "Expectation Failed",
# 500
INTERNAL_SERVER_ERROR: "Internal Server Error",
NOT_IMPLEMENTED: "Not Implemented",
BAD_GATEWAY: "Bad Gateway",
SERVICE_UNAVAILABLE: "Service Unavailable",
GATEWAY_TIMEOUT: "Gateway Time-out",
HTTP_VERSION_NOT_SUPPORTED: "HTTP Version not supported",
INSUFFICIENT_STORAGE_SPACE: "Insufficient Storage Space",
NOT_EXTENDED: "Not Extended"
}
CACHED = """Magic constant returned by http.Request methods to set cache
validation headers when the request is conditional and the value fails
the condition."""
# backwards compatability
responses = RESPONSES
# datetime parsing and formatting
weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
monthname = [None,
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
weekdayname_lower = [name.lower() for name in weekdayname]
monthname_lower = [name and name.lower() for name in monthname]
def urlparse(url):
"""
Parse an URL into six components.
This is similar to L{urlparse.urlparse}, but rejects C{unicode} input
and always produces C{str} output.
@type url: C{str}
@raise TypeError: The given url was a C{unicode} string instead of a
C{str}.
@rtype: six-tuple of str
@return: The scheme, net location, path, params, query string, and fragment
of the URL.
"""
if isinstance(url, unicode):
raise TypeError("url must be str, not unicode")
scheme, netloc, path, params, query, fragment = _urlparse(url)
if isinstance(scheme, unicode):
scheme = scheme.encode('ascii')
netloc = netloc.encode('ascii')
path = path.encode('ascii')
query = query.encode('ascii')
fragment = fragment.encode('ascii')
return scheme, netloc, path, params, query, fragment
def parse_qs(qs, keep_blank_values=0, strict_parsing=0, unquote=unquote):
"""
like cgi.parse_qs, only with custom unquote function
"""
d = {}
items = [s2 for s1 in qs.split("&") for s2 in s1.split(";")]
for item in items:
try:
k, v = item.split("=", 1)
except ValueError:
if strict_parsing:
raise
continue
if v or keep_blank_values:
k = unquote(k.replace("+", " "))
v = unquote(v.replace("+", " "))
if k in d:
d[k].append(v)
else:
d[k] = [v]
return d
def datetimeToString(msSinceEpoch=None):
"""
Convert seconds since epoch to HTTP datetime string.
"""
if msSinceEpoch == None:
msSinceEpoch = time.time()
year, month, day, hh, mm, ss, wd, y, z = time.gmtime(msSinceEpoch)
s = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (
weekdayname[wd],
day, monthname[month], year,
hh, mm, ss)
return s
def datetimeToLogString(msSinceEpoch=None):
"""
Convert seconds since epoch to log datetime string.
"""
if msSinceEpoch == None:
msSinceEpoch = time.time()
year, month, day, hh, mm, ss, wd, y, z = time.gmtime(msSinceEpoch)
s = "[%02d/%3s/%4d:%02d:%02d:%02d +0000]" % (
day, monthname[month], year,
hh, mm, ss)
return s
def timegm(year, month, day, hour, minute, second):
"""
Convert time tuple in GMT to seconds since epoch, GMT
"""
EPOCH = 1970
if year < EPOCH:
raise ValueError("Years prior to %d not supported" % (EPOCH,))
assert 1 <= month <= 12
days = 365*(year-EPOCH) + calendar.leapdays(EPOCH, year)
for i in range(1, month):
days = days + calendar.mdays[i]
if month > 2 and calendar.isleap(year):
days = days + 1
days = days + day - 1
hours = days*24 + hour
minutes = hours*60 + minute
seconds = minutes*60 + second
return seconds
def stringToDatetime(dateString):
"""
Convert an HTTP date string (one of three formats) to seconds since epoch.
"""
parts = dateString.split()
if not parts[0][0:3].lower() in weekdayname_lower:
# Weekday is stupid. Might have been omitted.
try:
return stringToDatetime("Sun, "+dateString)
except ValueError:
# Guess not.
pass
partlen = len(parts)
if (partlen == 5 or partlen == 6) and parts[1].isdigit():
# 1st date format: Sun, 06 Nov 1994 08:49:37 GMT
# (Note: "GMT" is literal, not a variable timezone)
# (also handles without "GMT")
# This is the normal format
day = parts[1]
month = parts[2]
year = parts[3]
time = parts[4]
elif (partlen == 3 or partlen == 4) and parts[1].find('-') != -1:
# 2nd date format: Sunday, 06-Nov-94 08:49:37 GMT
# (Note: "GMT" is literal, not a variable timezone)
# (also handles without without "GMT")
# Two digit year, yucko.
day, month, year = parts[1].split('-')
time = parts[2]
year=int(year)
if year < 69:
year = year + 2000
elif year < 100:
year = year + 1900
elif len(parts) == 5:
# 3rd date format: Sun Nov 6 08:49:37 1994
# ANSI C asctime() format.
day = parts[2]
month = parts[1]
year = parts[4]
time = parts[3]
else:
raise ValueError("Unknown datetime format %r" % dateString)
day = int(day)
month = int(monthname_lower.index(month.lower()))
year = int(year)
hour, min, sec = map(int, time.split(':'))
return int(timegm(year, month, day, hour, min, sec))
def toChunk(data):
"""
Convert string to a chunk.
@returns: a tuple of strings representing the chunked encoding of data
"""
return ("%x\r\n" % len(data), data, "\r\n")
def fromChunk(data):
"""
Convert chunk to string.
@returns: tuple (result, remaining), may raise ValueError.
"""
prefix, rest = data.split('\r\n', 1)
length = int(prefix, 16)
if length < 0:
raise ValueError("Chunk length must be >= 0, not %d" % (length,))
if not rest[length:length + 2] == '\r\n':
raise ValueError, "chunk must end with CRLF"
return rest[:length], rest[length + 2:]
def parseContentRange(header):
"""
Parse a content-range header into (start, end, realLength).
realLength might be None if real length is not known ('*').
"""
kind, other = header.strip().split()
if kind.lower() != "bytes":
raise ValueError, "a range of type %r is not supported"
startend, realLength = other.split("/")
start, end = map(int, startend.split("-"))
if realLength == "*":
realLength = None
else:
realLength = int(realLength)
return (start, end, realLength)
class StringTransport:
"""
I am a StringIO wrapper that conforms for the transport API. I support
the `writeSequence' method.
"""
def __init__(self):
self.s = StringIO()
def writeSequence(self, seq):
self.s.write(''.join(seq))
def __getattr__(self, attr):
return getattr(self.__dict__['s'], attr)
class HTTPClient(basic.LineReceiver):
"""
A client for HTTP 1.0.
Notes:
You probably want to send a 'Host' header with the name of the site you're
connecting to, in order to not break name based virtual hosting.
@ivar length: The length of the request body in bytes.
@type length: C{int}
@ivar firstLine: Are we waiting for the first header line?
@type firstLine: C{bool}
@ivar __buffer: The buffer that stores the response to the HTTP request.
@type __buffer: A C{StringIO} object.
@ivar _header: Part or all of an HTTP request header.
@type _header: C{str}
"""
length = None
firstLine = True
__buffer = None
_header = ""
def sendCommand(self, command, path):
self.transport.write('%s %s HTTP/1.0\r\n' % (command, path))
def sendHeader(self, name, value):
self.transport.write('%s: %s\r\n' % (name, value))
def endHeaders(self):
self.transport.write('\r\n')
def extractHeader(self, header):
"""
Given a complete HTTP header, extract the field name and value and
process the header.
@param header: a complete HTTP request header of the form
'field-name: value'.
@type header: C{str}
"""
key, val = header.split(':', 1)
val = val.lstrip()
self.handleHeader(key, val)
if key.lower() == 'content-length':
self.length = int(val)
def lineReceived(self, line):
"""
Parse the status line and headers for an HTTP request.
@param line: Part of an HTTP request header. Request bodies are parsed
in L{rawDataReceived}.
@type line: C{str}
"""
if self.firstLine:
self.firstLine = False
l = line.split(None, 2)
version = l[0]
status = l[1]
try:
message = l[2]
except IndexError:
# sometimes there is no message
message = ""
self.handleStatus(version, status, message)
return
if not line:
if self._header != "":
# Only extract headers if there are any
self.extractHeader(self._header)
self.__buffer = StringIO()
self.handleEndHeaders()
self.setRawMode()
return
if line.startswith('\t') or line.startswith(' '):
# This line is part of a multiline header. According to RFC 822, in
# "unfolding" multiline headers you do not strip the leading
# whitespace on the continuing line.
self._header = self._header + line
elif self._header:
# This line starts a new header, so process the previous one.
self.extractHeader(self._header)
self._header = line
else: # First header
self._header = line
def connectionLost(self, reason):
self.handleResponseEnd()
def handleResponseEnd(self):
"""
The response has been completely received.
This callback may be invoked more than once per request.
"""
if self.__buffer is not None:
b = self.__buffer.getvalue()
self.__buffer = None
self.handleResponse(b)
def handleResponsePart(self, data):
self.__buffer.write(data)
def connectionMade(self):
pass
def handleStatus(self, version, status, message):
"""
Called when the status-line is received.
@param version: e.g. 'HTTP/1.0'
@param status: e.g. '200'
@type status: C{str}
@param message: e.g. 'OK'
"""
def handleHeader(self, key, val):
"""
Called every time a header is received.
"""
def handleEndHeaders(self):
"""
Called when all headers have been received.
"""
def rawDataReceived(self, data):
if self.length is not None:
data, rest = data[:self.length], data[self.length:]
self.length -= len(data)
else:
rest = ''
self.handleResponsePart(data)
if self.length == 0:
self.handleResponseEnd()
self.setLineMode(rest)
# response codes that must have empty bodies
NO_BODY_CODES = (204, 304)
class Request:
"""
A HTTP request.
Subclasses should override the process() method to determine how
the request will be processed.
@ivar method: The HTTP method that was used.
@ivar uri: The full URI that was requested (includes arguments).
@ivar path: The path only (arguments not included).
@ivar args: All of the arguments, including URL and POST arguments.
@type args: A mapping of strings (the argument names) to lists of values.
i.e., ?foo=bar&foo=baz&quux=spam results in
{'foo': ['bar', 'baz'], 'quux': ['spam']}.
@type requestHeaders: L{http_headers.Headers}
@ivar requestHeaders: All received HTTP request headers.
@ivar received_headers: Backwards-compatibility access to
C{requestHeaders}. Use C{requestHeaders} instead. C{received_headers}
behaves mostly like a C{dict} and does not provide access to all header
values.
@type responseHeaders: L{http_headers.Headers}
@ivar responseHeaders: All HTTP response headers to be sent.
@ivar headers: Backwards-compatibility access to C{responseHeaders}. Use
C{responseHeaders} instead. C{headers} behaves mostly like a C{dict}
and does not provide access to all header values nor does it allow
multiple values for one header to be set.
@ivar notifications: A C{list} of L{Deferred}s which are waiting for
notification that the response to this request has been finished
(successfully or with an error). Don't use this attribute directly,
instead use the L{Request.notifyFinish} method.
@ivar _disconnected: A flag which is C{False} until the connection over
which this request was received is closed and which is C{True} after
that.
@type _disconnected: C{bool}
"""
implements(interfaces.IConsumer)
producer = None
finished = 0
code = OK
code_message = RESPONSES[OK]
method = "(no method yet)"
clientproto = "(no clientproto yet)"
uri = "(no uri yet)"
startedWriting = 0
chunked = 0
sentLength = 0 # content-length of response, or total bytes sent via chunking
etag = None
lastModified = None
args = None
path = None
content = None
_forceSSL = 0
_disconnected = False
def __init__(self, channel, queued):
"""
@param channel: the channel we're connected to.
@param queued: are we in the request queue, or can we start writing to
the transport?
"""
self.notifications = []
self.channel = channel
self.queued = queued
self.requestHeaders = Headers()
self.received_cookies = {}
self.responseHeaders = Headers()
self.cookies = [] # outgoing cookies
if queued:
self.transport = StringTransport()
else:
self.transport = self.channel.transport
def __setattr__(self, name, value):
"""
Support assignment of C{dict} instances to C{received_headers} for
backwards-compatibility.
"""
if name == 'received_headers':
# A property would be nice, but Request is classic.
self.requestHeaders = headers = Headers()
for k, v in value.iteritems():
headers.setRawHeaders(k, [v])
elif name == 'requestHeaders':
self.__dict__[name] = value
self.__dict__['received_headers'] = _DictHeaders(value)
elif name == 'headers':
self.responseHeaders = headers = Headers()
for k, v in value.iteritems():
headers.setRawHeaders(k, [v])
elif name == 'responseHeaders':
self.__dict__[name] = value
self.__dict__['headers'] = _DictHeaders(value)
else:
self.__dict__[name] = value
def _cleanup(self):
"""
Called when have finished responding and are no longer queued.
"""
if self.producer:
log.err(RuntimeError("Producer was not unregistered for %s" % self.uri))
self.unregisterProducer()
self.channel.requestDone(self)
del self.channel
try:
self.content.close()
except OSError:
# win32 suckiness, no idea why it does this
pass
del self.content
for d in self.notifications:
d.callback(None)
self.notifications = []
# methods for channel - end users should not use these
def noLongerQueued(self):
"""
Notify the object that it is no longer queued.
We start writing whatever data we have to the transport, etc.
This method is not intended for users.
"""
if not self.queued:
raise RuntimeError, "noLongerQueued() got called unnecessarily."
self.queued = 0
# set transport to real one and send any buffer data
data = self.transport.getvalue()
self.transport = self.channel.transport
if data:
self.transport.write(data)
# if we have producer, register it with transport
if (self.producer is not None) and not self.finished:
self.transport.registerProducer(self.producer, self.streamingProducer)
# if we're finished, clean up
if self.finished:
self._cleanup()
def gotLength(self, length):
"""
Called when HTTP channel got length of content in this request.
This method is not intended for users.
@param length: The length of the request body, as indicated by the
request headers. C{None} if the request headers do not indicate a
length.
"""
if length is not None and length < 100000:
self.content = StringIO()
else:
self.content = tempfile.TemporaryFile()
def parseCookies(self):
"""
Parse cookie headers.
This method is not intended for users.
"""
cookieheaders = self.requestHeaders.getRawHeaders("cookie")
if cookieheaders is None:
return
for cookietxt in cookieheaders:
if cookietxt:
for cook in cookietxt.split(';'):
cook = cook.lstrip()
try:
k, v = cook.split('=', 1)
self.received_cookies[k] = v
except ValueError:
pass
def handleContentChunk(self, data):
"""
Write a chunk of data.
This method is not intended for users.
"""
self.content.write(data)
def requestReceived(self, command, path, version):
"""
Called by channel when all data has been received.
This method is not intended for users.
@type command: C{str}
@param command: The HTTP verb of this request. This has the case
supplied by the client (eg, it maybe "get" rather than "GET").
@type path: C{str}
@param path: The URI of this request.
@type version: C{str}
@param version: The HTTP version of this request.
"""
self.content.seek(0,0)
self.args = {}
self.stack = []
self.method, self.uri = command, path
self.clientproto = version
x = self.uri.split('?', 1)
if len(x) == 1:
self.path = self.uri
else:
self.path, argstring = x
self.args = parse_qs(argstring, 1)
# cache the client and server information, we'll need this later to be
# serialized and sent with the request so CGIs will work remotely
self.client = self.channel.transport.getPeer()
self.host = self.channel.transport.getHost()
# Argument processing
args = self.args
ctype = self.requestHeaders.getRawHeaders('content-type')
if ctype is not None:
ctype = ctype[0]
if self.method == "POST" and ctype:
mfd = 'multipart/form-data'
key, pdict = cgi.parse_header(ctype)
if key == 'application/x-www-form-urlencoded':
args.update(parse_qs(self.content.read(), 1))
elif key == mfd:
try:
args.update(cgi.parse_multipart(self.content, pdict))
except KeyError, e:
if e.args[0] == 'content-disposition':
# Parse_multipart can't cope with missing
# content-dispostion headers in multipart/form-data
# parts, so we catch the exception and tell the client
# it was a bad request.
self.channel.transport.write(
"HTTP/1.1 400 Bad Request\r\n\r\n")
self.channel.transport.loseConnection()
return
raise
self.content.seek(0, 0)
self.process()
def __repr__(self):
return '<%s %s %s>'% (self.method, self.uri, self.clientproto)
def process(self):
"""
Override in subclasses.
This method is not intended for users.
"""
pass
# consumer interface
def registerProducer(self, producer, streaming):
"""
Register a producer.
"""
if self.producer:
raise ValueError, "registering producer %s before previous one (%s) was unregistered" % (producer, self.producer)
self.streamingProducer = streaming
self.producer = producer
if self.queued:
if streaming:
producer.pauseProducing()
else:
self.transport.registerProducer(producer, streaming)
def unregisterProducer(self):
"""
Unregister the producer.
"""
if not self.queued:
self.transport.unregisterProducer()
self.producer = None
# private http response methods
def _sendError(self, code, resp=''):
self.transport.write('%s %s %s\r\n\r\n' % (self.clientproto, code, resp))
# The following is the public interface that people should be
# writing to.
def getHeader(self, key):
"""
Get an HTTP request header.
@type key: C{str}
@param key: The name of the header to get the value of.
@rtype: C{str} or C{NoneType}
@return: The value of the specified header, or C{None} if that header
was not present in the request.
"""
value = self.requestHeaders.getRawHeaders(key)
if value is not None:
return value[-1]
def getCookie(self, key):
"""
Get a cookie that was sent from the network.
"""
return self.received_cookies.get(key)
def notifyFinish(self):
"""
Notify when the response to this request has finished.
@rtype: L{Deferred}
@return: A L{Deferred} which will be triggered when the request is
finished -- with a C{None} value if the request finishes
successfully or with an error if the request is interrupted by an
error (for example, the client closing the connection prematurely).
"""
self.notifications.append(Deferred())
return self.notifications[-1]
def finish(self):
"""
Indicate that all response data has been written to this L{Request}.
"""
if self._disconnected:
raise RuntimeError(
"Request.finish called on a request after its connection was lost; "
"use Request.notifyFinish to keep track of this.")
if self.finished:
warnings.warn("Warning! request.finish called twice.", stacklevel=2)
return
if not self.startedWriting:
# write headers
self.write('')
if self.chunked:
# write last chunk and closing CRLF
self.transport.write("0\r\n\r\n")
# log request
if hasattr(self.channel, "factory"):
self.channel.factory.log(self)
self.finished = 1
if not self.queued:
self._cleanup()
def write(self, data):
"""
Write some data as a result of an HTTP request. The first
time this is called, it writes out response data.
@type data: C{str}
@param data: Some bytes to be sent as part of the response body.
"""
if self.finished:
raise RuntimeError('Request.write called on a request after '
'Request.finish was called.')
if not self.startedWriting:
self.startedWriting = 1
version = self.clientproto
l = []
l.append('%s %s %s\r\n' % (version, self.code,
self.code_message))
# if we don't have a content length, we send data in
# chunked mode, so that we can support pipelining in
# persistent connections.
if ((version == "HTTP/1.1") and
(self.responseHeaders.getRawHeaders('content-length') is None) and
self.method != "HEAD" and self.code not in NO_BODY_CODES):
l.append("%s: %s\r\n" % ('Transfer-Encoding', 'chunked'))
self.chunked = 1
if self.lastModified is not None:
if self.responseHeaders.hasHeader('last-modified'):
log.msg("Warning: last-modified specified both in"
" header list and lastModified attribute.")
else:
self.responseHeaders.setRawHeaders(
'last-modified',
[datetimeToString(self.lastModified)])
if self.etag is not None:
self.responseHeaders.setRawHeaders('ETag', [self.etag])
for name, values in self.responseHeaders.getAllRawHeaders():
for value in values:
l.append("%s: %s\r\n" % (name, value))
for cookie in self.cookies:
l.append('%s: %s\r\n' % ("Set-Cookie", cookie))
l.append("\r\n")
self.transport.writeSequence(l)
# if this is a "HEAD" request, we shouldn't return any data
if self.method == "HEAD":
self.write = lambda data: None
return
# for certain result codes, we should never return any data
if self.code in NO_BODY_CODES:
self.write = lambda data: None
return
self.sentLength = self.sentLength + len(data)
if data:
if self.chunked:
self.transport.writeSequence(toChunk(data))
else:
self.transport.write(data)
def addCookie(self, k, v, expires=None, domain=None, path=None, max_age=None, comment=None, secure=None):
"""
Set an outgoing HTTP cookie.
In general, you should consider using sessions instead of cookies, see
L{twisted.web.server.Request.getSession} and the
L{twisted.web.server.Session} class for details.
"""
cookie = '%s=%s' % (k, v)
if expires is not None:
cookie = cookie +"; Expires=%s" % expires
if domain is not None:
cookie = cookie +"; Domain=%s" % domain
if path is not None:
cookie = cookie +"; Path=%s" % path
if max_age is not None:
cookie = cookie +"; Max-Age=%s" % max_age
if comment is not None:
cookie = cookie +"; Comment=%s" % comment
if secure:
cookie = cookie +"; Secure"
self.cookies.append(cookie)
def setResponseCode(self, code, message=None):
"""
Set the HTTP response code.
"""
if not isinstance(code, (int, long)):
raise TypeError("HTTP response code must be int or long")
self.code = code
if message:
self.code_message = message
else:
self.code_message = RESPONSES.get(code, "Unknown Status")
def setHeader(self, name, value):
"""
Set an HTTP response header. Overrides any previously set values for
this header.
@type name: C{str}
@param name: The name of the header for which to set the value.
@type value: C{str}
@param value: The value to set for the named header.
"""
self.responseHeaders.setRawHeaders(name, [value])
def redirect(self, url):
"""
Utility function that does a redirect.
The request should have finish() called after this.
"""
self.setResponseCode(FOUND)
self.setHeader("location", url)
def setLastModified(self, when):
"""
Set the C{Last-Modified} time for the response to this request.
If I am called more than once, I ignore attempts to set
Last-Modified earlier, only replacing the Last-Modified time
if it is to a later value.
If I am a conditional request, I may modify my response code
to L{NOT_MODIFIED} if appropriate for the time given.
@param when: The last time the resource being returned was
modified, in seconds since the epoch.
@type when: number
@return: If I am a C{If-Modified-Since} conditional request and
the time given is not newer than the condition, I return
L{http.CACHED<CACHED>} to indicate that you should write no
body. Otherwise, I return a false value.
"""
# time.time() may be a float, but the HTTP-date strings are
# only good for whole seconds.
when = long(math.ceil(when))
if (not self.lastModified) or (self.lastModified < when):
self.lastModified = when
modifiedSince = self.getHeader('if-modified-since')
if modifiedSince:
firstPart = modifiedSince.split(';', 1)[0]
try:
modifiedSince = stringToDatetime(firstPart)
except ValueError:
return None
if modifiedSince >= when:
self.setResponseCode(NOT_MODIFIED)
return CACHED
return None
def setETag(self, etag):
"""
Set an C{entity tag} for the outgoing response.
That's \"entity tag\" as in the HTTP/1.1 C{ETag} header, \"used
for comparing two or more entities from the same requested
resource.\"
If I am a conditional request, I may modify my response code
to L{NOT_MODIFIED} or L{PRECONDITION_FAILED}, if appropriate
for the tag given.
@param etag: The entity tag for the resource being returned.
@type etag: string
@return: If I am a C{If-None-Match} conditional request and
the tag matches one in the request, I return
L{http.CACHED<CACHED>} to indicate that you should write
no body. Otherwise, I return a false value.
"""
if etag:
self.etag = etag
tags = self.getHeader("if-none-match")
if tags:
tags = tags.split()
if (etag in tags) or ('*' in tags):
self.setResponseCode(((self.method in ("HEAD", "GET"))
and NOT_MODIFIED)
or PRECONDITION_FAILED)
return CACHED
return None
def getAllHeaders(self):
"""
Return dictionary mapping the names of all received headers to the last
value received for each.
Since this method does not return all header information,
C{self.requestHeaders.getAllRawHeaders()} may be preferred.
"""
headers = {}
for k, v in self.requestHeaders.getAllRawHeaders():
headers[k.lower()] = v[-1]
return headers
def getRequestHostname(self):
"""
Get the hostname that the user passed in to the request.
This will either use the Host: header (if it is available) or the
host we are listening on if the header is unavailable.
@returns: the requested hostname
@rtype: C{str}
"""
# XXX This method probably has no unit tests. I changed it a ton and
# nothing failed.
host = self.getHeader('host')
if host:
return host.split(':', 1)[0]
return self.getHost().host
def getHost(self):
"""
Get my originally requesting transport's host.
Don't rely on the 'transport' attribute, since Request objects may be
copied remotely. For information on this method's return value, see
twisted.internet.tcp.Port.
"""
return self.host
def setHost(self, host, port, ssl=0):
"""
Change the host and port the request thinks it's using.
This method is useful for working with reverse HTTP proxies (e.g.
both Squid and Apache's mod_proxy can do this), when the address
the HTTP client is using is different than the one we're listening on.
For example, Apache may be listening on https://www.example.com, and then
forwarding requests to http://localhost:8080, but we don't want HTML produced
by Twisted to say 'http://localhost:8080', they should say 'https://www.example.com',
so we do::
request.setHost('www.example.com', 443, ssl=1)
@type host: C{str}
@param host: The value to which to change the host header.
@type ssl: C{bool}
@param ssl: A flag which, if C{True}, indicates that the request is
considered secure (if C{True}, L{isSecure} will return C{True}).
"""
self._forceSSL = ssl # set first so isSecure will work
if self.isSecure():
default = 443
else:
default = 80
if port == default:
hostHeader = host
else:
hostHeader = '%s:%d' % (host, port)
self.requestHeaders.setRawHeaders("host", [hostHeader])
self.host = address.IPv4Address("TCP", host, port)
def getClientIP(self):
"""
Return the IP address of the client who submitted this request.
@returns: the client IP address
@rtype: C{str}
"""
if isinstance(self.client, address.IPv4Address):
return self.client.host
else:
return None
def isSecure(self):
"""
Return True if this request is using a secure transport.
Normally this method returns True if this request's HTTPChannel
instance is using a transport that implements ISSLTransport.
This will also return True if setHost() has been called
with ssl=True.
@returns: True if this request is secure
@rtype: C{bool}
"""
if self._forceSSL:
return True
transport = getattr(getattr(self, 'channel', None), 'transport', None)
if interfaces.ISSLTransport(transport, None) is not None:
return True
return False
def _authorize(self):
# Authorization, (mostly) per the RFC
try:
authh = self.getHeader("Authorization")
if not authh:
self.user = self.password = ''
return
bas, upw = authh.split()
if bas.lower() != "basic":
raise ValueError
upw = base64.decodestring(upw)
self.user, self.password = upw.split(':', 1)
except (binascii.Error, ValueError):
self.user = self.password = ""
except:
log.err()
self.user = self.password = ""
def getUser(self):
"""
Return the HTTP user sent with this request, if any.
If no user was supplied, return the empty string.
@returns: the HTTP user, if any
@rtype: C{str}
"""
try:
return self.user
except:
pass
self._authorize()
return self.user
def getPassword(self):
"""
Return the HTTP password sent with this request, if any.
If no password was supplied, return the empty string.
@returns: the HTTP password, if any
@rtype: C{str}
"""
try:
return self.password
except:
pass
self._authorize()
return self.password
def getClient(self):
if self.client.type != 'TCP':
return None
host = self.client.host
try:
name, names, addresses = socket.gethostbyaddr(host)
except socket.error:
return host
names.insert(0, name)
for name in names:
if '.' in name:
return name
return names[0]
def connectionLost(self, reason):
"""
There is no longer a connection for this request to respond over.
Clean up anything which can't be useful anymore.
"""
self._disconnected = True
self.channel = None
if self.content is not None:
self.content.close()
for d in self.notifications:
d.errback(reason)
self.notifications = []
class _DataLoss(Exception):
"""
L{_DataLoss} indicates that not all of a message body was received. This
is only one of several possible exceptions which may indicate that data
was lost. Because of this, it should not be checked for by
specifically; any unexpected exception should be treated as having
caused data loss.
"""
class PotentialDataLoss(Exception):
"""
L{PotentialDataLoss} may be raised by a transfer encoding decoder's
C{noMoreData} method to indicate that it cannot be determined if the
entire response body has been delivered. This only occurs when making
requests to HTTP servers which do not set I{Content-Length} or a
I{Transfer-Encoding} in the response because in this case the end of the
response is indicated by the connection being closed, an event which may
also be due to a transient network problem or other error.
"""
class _IdentityTransferDecoder(object):
"""
Protocol for accumulating bytes up to a specified length. This handles the
case where no I{Transfer-Encoding} is specified.
@ivar contentLength: Counter keeping track of how many more bytes there are
to receive.
@ivar dataCallback: A one-argument callable which will be invoked each
time application data is received.
@ivar finishCallback: A one-argument callable which will be invoked when
the terminal chunk is received. It will be invoked with all bytes
which were delivered to this protocol which came after the terminal
chunk.
"""
def __init__(self, contentLength, dataCallback, finishCallback):
self.contentLength = contentLength
self.dataCallback = dataCallback
self.finishCallback = finishCallback
def dataReceived(self, data):
"""
Interpret the next chunk of bytes received. Either deliver them to the
data callback or invoke the finish callback if enough bytes have been
received.
@raise RuntimeError: If the finish callback has already been invoked
during a previous call to this methood.
"""
if self.dataCallback is None:
raise RuntimeError(
"_IdentityTransferDecoder cannot decode data after finishing")
if self.contentLength is None:
self.dataCallback(data)
elif len(data) < self.contentLength:
self.contentLength -= len(data)
self.dataCallback(data)
else:
# Make the state consistent before invoking any code belonging to
# anyone else in case noMoreData ends up being called beneath this
# stack frame.
contentLength = self.contentLength
dataCallback = self.dataCallback
finishCallback = self.finishCallback
self.dataCallback = self.finishCallback = None
self.contentLength = 0
dataCallback(data[:contentLength])
finishCallback(data[contentLength:])
def noMoreData(self):
"""
All data which will be delivered to this decoder has been. Check to
make sure as much data as was expected has been received.
@raise PotentialDataLoss: If the content length is unknown.
@raise _DataLoss: If the content length is known and fewer than that
many bytes have been delivered.
@return: C{None}
"""
finishCallback = self.finishCallback
self.dataCallback = self.finishCallback = None
if self.contentLength is None:
finishCallback('')
raise PotentialDataLoss()
elif self.contentLength != 0:
raise _DataLoss()
class _ChunkedTransferDecoder(object):
"""
Protocol for decoding I{chunked} Transfer-Encoding, as defined by RFC 2616,
section 3.6.1. This protocol can interpret the contents of a request or
response body which uses the I{chunked} Transfer-Encoding. It cannot
interpret any of the rest of the HTTP protocol.
It may make sense for _ChunkedTransferDecoder to be an actual IProtocol
implementation. Currently, the only user of this class will only ever
call dataReceived on it. However, it might be an improvement if the
user could connect this to a transport and deliver connection lost
notification. This way, `dataCallback` becomes `self.transport.write`
and perhaps `finishCallback` becomes `self.transport.loseConnection()`
(although I'm not sure where the extra data goes in that case). This
could also allow this object to indicate to the receiver of data that
the stream was not completely received, an error case which should be
noticed. -exarkun
@ivar dataCallback: A one-argument callable which will be invoked each
time application data is received.
@ivar finishCallback: A one-argument callable which will be invoked when
the terminal chunk is received. It will be invoked with all bytes
which were delivered to this protocol which came after the terminal
chunk.
@ivar length: Counter keeping track of how many more bytes in a chunk there
are to receive.
@ivar state: One of C{'chunk-length'}, C{'trailer'}, C{'body'}, or
C{'finished'}. For C{'chunk-length'}, data for the chunk length line
is currently being read. For C{'trailer'}, the CR LF pair which
follows each chunk is being read. For C{'body'}, the contents of a
chunk are being read. For C{'finished'}, the last chunk has been
completely read and no more input is valid.
@ivar finish: A flag indicating that the last chunk has been started. When
it finishes, the state will change to C{'finished'} and no more data
will be accepted.
"""
state = 'chunk-length'
finish = False
def __init__(self, dataCallback, finishCallback):
self.dataCallback = dataCallback
self.finishCallback = finishCallback
self._buffer = ''
def dataReceived(self, data):
"""
Interpret data from a request or response body which uses the
I{chunked} Transfer-Encoding.
"""
data = self._buffer + data
self._buffer = ''
while data:
if self.state == 'chunk-length':
if '\r\n' in data:
line, rest = data.split('\r\n', 1)
parts = line.split(';')
self.length = int(parts[0], 16)
if self.length == 0:
self.state = 'trailer'
self.finish = True
else:
self.state = 'body'
data = rest
else:
self._buffer = data
data = ''
elif self.state == 'trailer':
if data.startswith('\r\n'):
data = data[2:]
if self.finish:
self.state = 'finished'
self.finishCallback(data)
data = ''
else:
self.state = 'chunk-length'
else:
self._buffer = data
data = ''
elif self.state == 'body':
if len(data) >= self.length:
chunk, data = data[:self.length], data[self.length:]
self.dataCallback(chunk)
self.state = 'trailer'
elif len(data) < self.length:
self.length -= len(data)
self.dataCallback(data)
data = ''
elif self.state == 'finished':
raise RuntimeError(
"_ChunkedTransferDecoder.dataReceived called after last "
"chunk was processed")
def noMoreData(self):
"""
Verify that all data has been received. If it has not been, raise
L{_DataLoss}.
"""
if self.state != 'finished':
raise _DataLoss(
"Chunked decoder in %r state, still expecting more data to "
"get to finished state." % (self.state,))
class HTTPChannel(basic.LineReceiver, policies.TimeoutMixin):
"""
A receiver for HTTP requests.
@ivar _transferDecoder: C{None} or an instance of
L{_ChunkedTransferDecoder} if the request body uses the I{chunked}
Transfer-Encoding.
"""
maxHeaders = 500 # max number of headers allowed per request
length = 0
persistent = 1
__header = ''
__first_line = 1
__content = None
# set in instances or subclasses
requestFactory = Request
_savedTimeOut = None
_receivedHeaderCount = 0
def __init__(self):
# the request queue
self.requests = []
self._transferDecoder = None
def connectionMade(self):
self.setTimeout(self.timeOut)
def lineReceived(self, line):
self.resetTimeout()
if self.__first_line:
# if this connection is not persistent, drop any data which
# the client (illegally) sent after the last request.
if not self.persistent:
self.dataReceived = self.lineReceived = lambda *args: None
return
# IE sends an extraneous empty line (\r\n) after a POST request;
# eat up such a line, but only ONCE
if not line and self.__first_line == 1:
self.__first_line = 2
return
# create a new Request object
request = self.requestFactory(self, len(self.requests))
self.requests.append(request)
self.__first_line = 0
parts = line.split()
if len(parts) != 3:
self.transport.write("HTTP/1.1 400 Bad Request\r\n\r\n")
self.transport.loseConnection()
return
command, request, version = parts
self._command = command
self._path = request
self._version = version
elif line == '':
if self.__header:
self.headerReceived(self.__header)
self.__header = ''
self.allHeadersReceived()
if self.length == 0:
self.allContentReceived()
else:
self.setRawMode()
elif line[0] in ' \t':
self.__header = self.__header+'\n'+line
else:
if self.__header:
self.headerReceived(self.__header)
self.__header = line
def _finishRequestBody(self, data):
self.allContentReceived()
self.setLineMode(data)
def headerReceived(self, line):
"""
Do pre-processing (for content-length) and store this header away.
Enforce the per-request header limit.
@type line: C{str}
@param line: A line from the header section of a request, excluding the
line delimiter.
"""
header, data = line.split(':', 1)
header = header.lower()
data = data.strip()
if header == 'content-length':
self.length = int(data)
self._transferDecoder = _IdentityTransferDecoder(
self.length, self.requests[-1].handleContentChunk, self._finishRequestBody)
elif header == 'transfer-encoding' and data.lower() == 'chunked':
self.length = None
self._transferDecoder = _ChunkedTransferDecoder(
self.requests[-1].handleContentChunk, self._finishRequestBody)
reqHeaders = self.requests[-1].requestHeaders
values = reqHeaders.getRawHeaders(header)
if values is not None:
values.append(data)
else:
reqHeaders.setRawHeaders(header, [data])
self._receivedHeaderCount += 1
if self._receivedHeaderCount > self.maxHeaders:
self.transport.write("HTTP/1.1 400 Bad Request\r\n\r\n")
self.transport.loseConnection()
def allContentReceived(self):
command = self._command
path = self._path
version = self._version
# reset ALL state variables, so we don't interfere with next request
self.length = 0
self._receivedHeaderCount = 0
self.__first_line = 1
self._transferDecoder = None
del self._command, self._path, self._version
# Disable the idle timeout, in case this request takes a long
# time to finish generating output.
if self.timeOut:
self._savedTimeOut = self.setTimeout(None)
req = self.requests[-1]
req.requestReceived(command, path, version)
def rawDataReceived(self, data):
self.resetTimeout()
self._transferDecoder.dataReceived(data)
def allHeadersReceived(self):
req = self.requests[-1]
req.parseCookies()
self.persistent = self.checkPersistence(req, self._version)
req.gotLength(self.length)
def checkPersistence(self, request, version):
"""
Check if the channel should close or not.
@param request: The request most recently received over this channel
against which checks will be made to determine if this connection
can remain open after a matching response is returned.
@type version: C{str}
@param version: The version of the request.
@rtype: C{bool}
@return: A flag which, if C{True}, indicates that this connection may
remain open to receive another request; if C{False}, the connection
must be closed in order to indicate the completion of the response
to C{request}.
"""
connection = request.requestHeaders.getRawHeaders('connection')
if connection:
tokens = map(str.lower, connection[0].split(' '))
else:
tokens = []
# HTTP 1.0 persistent connection support is currently disabled,
# since we need a way to disable pipelining. HTTP 1.0 can't do
# pipelining since we can't know in advance if we'll have a
# content-length header, if we don't have the header we need to close the
# connection. In HTTP 1.1 this is not an issue since we use chunked
# encoding if content-length is not available.
#if version == "HTTP/1.0":
# if 'keep-alive' in tokens:
# request.setHeader('connection', 'Keep-Alive')
# return 1
# else:
# return 0
if version == "HTTP/1.1":
if 'close' in tokens:
request.responseHeaders.setRawHeaders('connection', ['close'])
return False
else:
return True
else:
return False
def requestDone(self, request):
"""
Called by first request in queue when it is done.
"""
if request != self.requests[0]: raise TypeError
del self.requests[0]
if self.persistent:
# notify next request it can start writing
if self.requests:
self.requests[0].noLongerQueued()
else:
if self._savedTimeOut:
self.setTimeout(self._savedTimeOut)
else:
self.transport.loseConnection()
def timeoutConnection(self):
log.msg("Timing out client: %s" % str(self.transport.getPeer()))
policies.TimeoutMixin.timeoutConnection(self)
def connectionLost(self, reason):
self.setTimeout(None)
for request in self.requests:
request.connectionLost(reason)
class HTTPFactory(protocol.ServerFactory):
"""
Factory for HTTP server.
@ivar _logDateTime: A cached datetime string for log messages, updated by
C{_logDateTimeCall}.
@type _logDateTime: L{str}
@ivar _logDateTimeCall: A delayed call for the next update to the cached log
datetime string.
@type _logDateTimeCall: L{IDelayedCall} provided
"""
protocol = HTTPChannel
logPath = None
timeOut = 60 * 60 * 12
def __init__(self, logPath=None, timeout=60*60*12):
if logPath is not None:
logPath = os.path.abspath(logPath)
self.logPath = logPath
self.timeOut = timeout
# For storing the cached log datetime and the callback to update it
self._logDateTime = None
self._logDateTimeCall = None
def _updateLogDateTime(self):
"""
Update log datetime periodically, so we aren't always recalculating it.
"""
self._logDateTime = datetimeToLogString()
self._logDateTimeCall = reactor.callLater(1, self._updateLogDateTime)
def buildProtocol(self, addr):
p = protocol.ServerFactory.buildProtocol(self, addr)
# timeOut needs to be on the Protocol instance cause
# TimeoutMixin expects it there
p.timeOut = self.timeOut
return p
def startFactory(self):
"""
Set up request logging if necessary.
"""
if self._logDateTimeCall is None:
self._updateLogDateTime()
if self.logPath:
self.logFile = self._openLogFile(self.logPath)
else:
self.logFile = log.logfile
def stopFactory(self):
if hasattr(self, "logFile"):
if self.logFile != log.logfile:
self.logFile.close()
del self.logFile
if self._logDateTimeCall is not None and self._logDateTimeCall.active():
self._logDateTimeCall.cancel()
self._logDateTimeCall = None
def _openLogFile(self, path):
"""
Override in subclasses, e.g. to use twisted.python.logfile.
"""
f = open(path, "a", 1)
return f
def _escape(self, s):
# pain in the ass. Return a string like python repr, but always
# escaped as if surrounding quotes were "".
r = repr(s)
if r[0] == "'":
return r[1:-1].replace('"', '\\"').replace("\\'", "'")
return r[1:-1]
def log(self, request):
"""
Log a request's result to the logfile, by default in combined log format.
"""
if hasattr(self, "logFile"):
line = '%s - - %s "%s" %d %s "%s" "%s"\n' % (
request.getClientIP(),
# request.getUser() or "-", # the remote user is almost never important
self._logDateTime,
'%s %s %s' % (self._escape(request.method),
self._escape(request.uri),
self._escape(request.clientproto)),
request.code,
request.sentLength or "-",
self._escape(request.getHeader("referer") or "-"),
self._escape(request.getHeader("user-agent") or "-"))
self.logFile.write(line)
|
agpl-3.0
|
r4vi/open-ihm
|
src/openihm/gui/interface/frmhousehold_addincome_employment.py
|
3
|
4830
|
#!/usr/bin/env python
"""
This file is part of open-ihm.
open-ihm is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
open-ihm is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with open-ihm. If not, see <http://www.gnu.org/licenses/>.
"""
# imports from PyQt4 package
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyQt4 import uic
from data.config import Config
Ui_AddHouseholdIncomeEmployment, base_class = uic.loadUiType("gui/designs/ui_household_income_employment.ui")
from mixins import MDIDialogMixin, MySQLMixin
class FrmHouseholdEmploymentIncome(QDialog, Ui_AddHouseholdIncomeEmployment, MySQLMixin, MDIDialogMixin):
''' Form to add or edit a Household Employment Income '''
def __init__(self, parent, hhid, hhname, incomeid = 0 ):
''' Set up the dialog box interface '''
QDialog.__init__(self)
self.setupUi(self)
self.parent = parent
self.hhid = hhid
self.pid = parent.parent.projectid
self.incomeid = incomeid
self.config = Config.dbinfo().copy()
self.getEmploymentTypes()
self.getFoodTypes()
if ( incomeid != 0 ):
self.displayIncomeDetails()
self.setWindowTitle( "Edit Income Item" )
# display household name
self.lblHouseholdName.setText(hhname)
def getEmploymentTypes(self):
''' Retrieve Employment Types and display them in a combobox '''
# select query to Employment Types
query = '''SELECT incomesource FROM setup_employment'''
rows = self.executeResultsQuery(query)
for row in rows:
employmenttype = row[0]
self.cboEmploymentType.addItem(employmenttype)
def getFoodTypes(self):
''' Retrieve Food Types and display them in a combobox listing type of food payments '''
self.cboFoodType.addItem("None")
# select query to Food Types
query = '''SELECT name FROM setup_foods_crops '''
rows = self.executeResultsQuery(query)
for row in rows:
foodtype = row[0]
self.cboFoodType.addItem(foodtype)
def displayIncomeDetails(self):
''' Retrieve and display Household Income details '''
query = '''SELECT *
FROM employmentincome WHERE hhid=%s AND pid=%s AND id=%s ''' % ( self.hhid, self.pid, self.incomeid )
rows = self.executeResultsQuery(query)
for row in rows:
employmenttype = row[2]
self.cboEmploymentType.setCurrentIndex( self.cboEmploymentType.findText( employmenttype ) )
foodtype = row[3]
self.cboFoodType.setCurrentIndex( self.cboFoodType.findText( foodtype ) )
unitofmeasure = row[4]
self.txtUnitOfMeasure.setText( str(unitofmeasure) )
unitspaid = row[5]
self.txtUnitsPaid.setText( str(unitspaid) )
incomekcal = row[6]
self.txtTotalEnergyValue.setText( str(incomekcal) )
cashincome = row[7]
self.txtCashPaid.setText( str(cashincome) )
def saveIncome(self):
''' Saves employment income to database '''
# get the data entered by user
employmenttype = self.cboEmploymentType.currentText()
foodtype = self.cboFoodType.currentText()
if (foodtype != "None"):
unitofmeasure = self.txtUnitOfMeasure.text() if self.txtUnitOfMeasure.text() != "" else "n/a"
unitspaid = self.txtUnitsPaid.text() if self.txtUnitsPaid.text() != "" else "0"
incomekcal = self.txtTotalEnergyValue.text() if self.txtTotalEnergyValue.text() != "" else "0"
else:
unitofmeasure = "n/a"
unitspaid = "0"
incomekcal = "0"
cashpaid = self.txtCashPaid.text() if self.txtCashPaid.text() != "" else "0"
# create UPDATE query
if (self.incomeid == 0):
query = '''INSERT INTO employmentincome (hhid, incomesource, foodtypepaid, unitofmeasure, unitspaid,
incomekcal, cashincome, pid ) VALUES(%s,'%s','%s','%s',%s,%s,%s,%s)
''' % (self.hhid, employmenttype, foodtype, unitofmeasure, unitspaid, incomekcal, cashpaid, self.pid)
else:
query = ''' UPDATE employmentincome
SET incomesource='%s', foodtypepaid='%s', unitofmeasure='%s', unitspaid=%s, incomekcal=%s,
cashincome=%s
WHERE hhid=%s AND pid=%s AND id=%s
''' % ( employmenttype, foodtype, unitofmeasure, unitspaid, incomekcal, cashpaid, self.hhid, self.pid, self.incomeid)
self.executeUpdateQuery(query)
# close new project window
self.parent.retrieveHouseholdEmploymentIncome()
self.mdiClose()
|
lgpl-3.0
|
matrix65536/kosmosfs
|
scripts.solaris/kfssetup.py
|
15
|
16413
|
#!/usr/bin/env python
#
# $Id: kfssetup.py 36 2007-11-12 02:43:36Z sriramsrao $
#
# Copyright 2007 Kosmix Corp.
#
# This file is part of Kosmos File System (KFS).
#
# Licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# Script to setup KFS servers on a set of nodes
# This scripts reads a machines.cfg file that describes the meta/chunk
# servers configurations and installs the binaries/scripts and creates
# the necessary directory hierarchy.
#
import os,sys,os.path,getopt
import socket,threading,popen2
import md5
from ConfigParser import ConfigParser
# Use the python config parser to parse out machines setup
# Input file format for machines.cfg
# [metaserver]
# type: metaserver
# clusterkey: <cluster name>
# node: <value>
# rundir: <dir>
# baseport: <port>
#
# [chunkserver1]
# node: <value>
# rundir: <dir>
# baseport: <port>
# space: <space exported by the server> (n m/g)
# {chunkdir: <dir>}
# [chunkserver2]
# ...
# [chunkserverN]
# ...
#
# where, space is expressed in units of MB/GB or bytes.
#
# Install on each machine with the following directory hierarchy:
# rundir/
# bin/ -- binaries, config file, kfscp/kfslog/kfschunk dirs
# logs/ -- log output from running the binary
# scripts/ -- all the helper scripts
# If a path for storing the chunks isn't specified, then it defaults to bin
#
unitsScale = {'g' : 1 << 30, 'm' : 1 << 20, 'k' : 1 << 10, 'b' : 1}
tarProg = 'gtar'
maxConcurrent = 25
chunkserversOnly = 0
md5String = ""
def setupMeta(section, config, outputFn, packageFn):
""" Setup the metaserver binaries/config files on a node. """
global chunkserversOnly
if chunkserversOnly > 0:
print "Chunkservers only is set; not doing meta"
return
key = config.get(section, 'clusterkey')
baseport = config.getint(section, 'baseport')
rundir = config.get(section, 'rundir')
fh = open(outputFn, 'w')
print >> fh, "metaServer.clientPort = %d" % baseport
print >> fh, "metaServer.chunkServerPort = %d" % (baseport + 100)
print >> fh, "metaServer.clusterKey = %s" % (key)
print >> fh, "metaServer.cpDir = %s/bin/kfscp" % rundir
print >> fh, "metaServer.logDir = %s/bin/kfslog" % rundir
if config.has_option(section, 'loglevel'):
print >> fh, "metaServer.loglevel = %s" % config.get(section, 'loglevel')
if config.has_option(section, 'worm'):
print >> fh, "metaServer.wormMode = 1"
if config.has_option(section, 'numservers'):
print >> fh, "metaServer.minChunkservers = %s" % config.get(section, 'numservers')
if config.has_option(section, 'md5sumfilename'):
print >> fh, "metaServer.md5sumFilename = %s" % config.get(section, 'md5sumfilename')
fh.close()
if config.has_option(section, 'webuiConfFile'):
confFile = config.get(section, 'webuiConfFile')
fh = open(confFile, 'w')
print >> fh, "[webserver]"
print >> fh, "webServer.metaserverPort = %d" % baseport
print >> fh, "webServer.port = %d" % (baseport + 50)
print >> fh, "webServer.allMachinesFn = %s/webui/all-machines.txt" % rundir
print >> fh, "webServer.docRoot = %s/webui/files" % rundir
fh.close()
cmd = "%s -zcf %s bin/logcompactor bin/metaserver %s lib webui scripts/*" % (tarProg, packageFn, outputFn)
os.system(cmd)
installArgs = "-r %s -d %s -m" % (tarProg, rundir)
return installArgs
def setupChunkConfig(section, config, outputFn):
""" Setup the chunkserver binaries/config files on a node. """
metaNode = config.get('metaserver', 'node')
metaToChunkPort = config.getint('metaserver', 'baseport') + 100
hostname = config.get(section, 'node')
# for rack-aware replication, we assume that nodes on different racks are on different subnets
s = socket.gethostbyname(hostname)
ipoctets = s.split('.')
rackId = int(ipoctets[2])
#
fh = open (outputFn, 'w')
print >> fh, "chunkServer.metaServer.hostname = %s" % metaNode
print >> fh, "chunkServer.metaServer.port = %d" % metaToChunkPort
print >> fh, "chunkServer.clientPort = %d" % config.getint(section, 'baseport')
print >> fh, "chunkServer.clusterKey = %s" % config.get('metaserver', 'clusterkey')
print >> fh, "chunkServer.rackId = %d" % (rackId)
print >> fh, "chunkServer.md5sum = %s" % (md5String)
space = config.get(section, 'space')
s = space.split()
if (len(s) >= 2):
units = s[1].lower()
else:
units = 'b'
value = int(s[0]) * unitsScale[ units[0] ]
print >> fh, "chunkServer.totalSpace = %d" % value
rundir = config.get(section, 'rundir')
if config.has_option(section, 'chunkdir'):
chunkDir = config.get(section, 'chunkdir')
else:
chunkDir = "%s/bin/kfschunk" % (rundir)
print >> fh, "chunkServer.chunkDir = %s" % (chunkDir)
print >> fh, "chunkServer.logDir = %s/bin/kfslog" % (rundir)
if config.has_option(section, 'loglevel'):
print >> fh, "chunkServer.loglevel = %s" % config.get(section, 'loglevel')
fh.close()
def setupChunk(section, config, outputFn, packageFn):
""" Setup the chunkserver binaries/config files on a node. """
setupChunkConfig(section, config, outputFn)
cmd = "%s -zcf %s bin/chunkscrubber bin/chunkserver %s lib scripts/*" % (tarProg, packageFn, outputFn)
os.system(cmd)
rundir = config.get(section, 'rundir')
if config.has_option(section, 'chunkdir'):
chunkDir = config.get(section, 'chunkdir')
else:
chunkDir = "%s/bin/kfschunk" % (rundir)
installArgs = "-r %s -d %s -c \"%s\" " % (tarProg, rundir, chunkDir)
return installArgs
def usage():
""" Print out the usage for this program. """
print "%s [-f, --file <server.cfg>] [-m , --machines <chunkservers.txt>] [-r, --tar <tar|gtar>] \
[-w, --webui <webui dir>] [ [-b, --bin <dir with binaries>] {-u, --upgrade} | [-U, --uninstall] ]\n" % sys.argv[0]
return
def copyDir(srcDir, dstDir):
""" Copy files from src to dest"""
cmd = "cp -r %s %s" % (srcDir, dstDir)
os.system(cmd)
def computeMD5(datadir, digest):
"""Update the MD5 digest using the MD5 of all the files in a directory"""
files = os.listdir(datadir)
for f in sorted(files):
path = os.path.join(datadir, f)
if os.path.isdir(path):
continue
fh = open(path, 'r')
while 1:
buf = fh.read(4096)
if buf == "":
break
digest.update(buf)
def getFiles(buildDir, webuidir):
""" Copy files from buildDir/bin, buildDir/lib and . to ./bin, ./lib, and ./scripts
respectively."""
global md5String
cmd = "mkdir -p ./scripts; cp ./* scripts; chmod u+w scripts/*"
os.system(cmd)
s = "%s/bin" % buildDir
if (os.path.exists(s + "/amd64")):
s += "/amd64"
copyDir(s, './bin')
digest = md5.new()
computeMD5('./bin', digest)
s = "%s/lib" % buildDir
if (os.path.exists(s + "/amd64")):
s += "/amd64"
copyDir(s, './lib')
computeMD5('./lib', digest)
md5String = digest.hexdigest()
copyDir(webuidir, './webui')
def cleanup(fn):
""" Cleanout the dirs we created. """
cmd = "rm -rf ./scripts ./bin ./lib ./webui %s " % fn
os.system(cmd)
class InstallWorker(threading.Thread):
"""InstallWorker thread that runs a command on remote node"""
def __init__(self, sec, conf, tmpdir, i, m):
threading.Thread.__init__(self)
self.section = sec
self.config = conf
self.tmpdir = tmpdir
self.id = i
self.mode = m
self.doBuildPkg = 1
def singlePackageForAll(self, packageFn, installArgs):
self.doBuildPkg = 0
self.packageFn = packageFn
self.installArgs = installArgs
def buildPackage(self):
if (self.section == 'metaserver'):
self.installArgs = setupMeta(self.section, self.config, self.configOutputFn, self.packageFn)
else:
self.installArgs = setupChunk(self.section, self.config, self.configOutputFn, self.packageFn)
def doInstall(self):
fn = os.path.basename(self.packageFn)
if (self.section == 'metaserver'):
if chunkserversOnly > 0:
return
c = "scp -pr -o StrictHostKeyChecking=no -q %s kfsinstall.sh %s:/tmp/; ssh -o StrictHostKeyChecking=no %s 'mv /tmp/%s /tmp/kfspkg.tgz; sh /tmp/kfsinstall.sh %s %s ' " % \
(self.packageFn, self.dest, self.dest, fn, self.mode, self.installArgs)
else:
# chunkserver
configFn = os.path.basename(self.configOutputFn)
c = "scp -pr -o StrictHostKeyChecking=no -q %s kfsinstall.sh %s %s:/tmp/; ssh -o StrictHostKeyChecking=no %s 'mv /tmp/%s /tmp/kfspkg.tgz; mv /tmp/%s /tmp/ChunkServer.prp; sh /tmp/kfsinstall.sh %s %s ' " % \
(self.packageFn, self.configOutputFn, self.dest, self.dest, fn, configFn, self.mode, self.installArgs)
p = popen2.Popen3(c, True)
for out in p.fromchild:
if len(out) > 1:
print '[%s]: %s' % (self.dest, out[:-1])
def cleanup(self):
if self.doBuildPkg > 0:
# if we built the package, nuke it
c = "rm -f %s %s" % (self.configOutputFn, self.packageFn)
else:
c = "rm -f %s" % (self.configOutputFn)
os.system(c)
c = "ssh -o StrictHostKeyChecking=no %s 'rm -f /tmp/install.sh /tmp/kfspkg.tgz' " % self.dest
popen2.Popen3(c, True)
def run(self):
self.configOutputFn = "%s/fn.%d" % (self.tmpdir, self.id)
if self.doBuildPkg > 0:
self.packageFn = "%s/kfspkg.%d.tgz" % (self.tmpdir, self.id)
self.buildPackage()
else:
setupChunkConfig(self.section, self.config, self.configOutputFn)
self.dest = config.get(self.section, 'node')
self.doInstall()
self.cleanup()
def doInstall(config, builddir, tmpdir, webuidir, upgrade, serialMode):
if not config.has_section('metaserver'):
raise config.NoSectionError, "No metaserver section"
if not os.path.exists(builddir):
print "%s : directory doesn't exist\n" % builddir
sys.exit(-1)
getFiles(builddir, webuidir)
if os.path.exists('webui'):
webuiconfFile = os.path.join(webuidir, "server.conf")
config.set('metaserver', 'webuiConfFile', webuiconfFile)
workers = []
i = 0
sections = config.sections()
if upgrade == 1:
mode = "-u"
else:
mode = "-i"
chunkPkgFn = ""
cleanupFn = ""
for s in sections:
w = InstallWorker(s, config, tmpdir, i, mode)
workers.append(w)
if serialMode == 1:
w.start()
w.join()
else:
# same package for all chunkservers
if (s != 'metaserver'):
if chunkPkgFn == "":
configOutputFn = "%s/fn.common" % (tmpdir)
chunkPkgFn = "kfspkg-chunk.tgz"
cleanupFn = "%s %s" % (configOutputFn, chunkPkgFn)
installArgs = setupChunk(s, config, configOutputFn, chunkPkgFn)
w.singlePackageForAll(chunkPkgFn, installArgs)
i = i + 1
if serialMode == 0:
for i in xrange(0, len(workers), maxConcurrent):
#start a bunch
for j in xrange(maxConcurrent):
idx = i + j
if idx >= len(workers):
break
workers[idx].start()
#wait for each one to finish
for j in xrange(maxConcurrent):
idx = i + j
if idx >= len(workers):
break
workers[idx].join()
print "Done with %d workers" % idx
for i in xrange(len(workers)):
workers[i].join(120.0)
cleanup(cleanupFn)
class UnInstallWorker(threading.Thread):
"""UnInstallWorker thread that runs a command on remote node"""
def __init__(self, c, n):
threading.Thread.__init__(self)
self.cmd = c
self.node = n
def run(self):
# capture stderr and ignore the hostkey has changed message
p = popen2.Popen3(self.cmd, True)
for out in p.fromchild:
if len(out) > 1:
print '[%s]: %s' % (self.node, out[:-1])
def doUninstall(config):
sections = config.sections()
workers = []
for s in sections:
rundir = config.get(s, 'rundir')
node = config.get(s, 'node')
if (s == 'metaserver'):
otherArgs = '-m'
else:
# This is a chunkserver; so nuke out chunk dir as well
if config.has_option(s, 'chunkdir'):
chunkDir = config.get(s, 'chunkdir')
else:
chunkDir = "%s/bin/kfschunk" % (rundir)
otherArgs = "-c \"%s\"" % (chunkDir)
cmd = "ssh -o StrictHostKeyChecking=no %s 'cd %s; sh scripts/kfsinstall.sh -U -d %s %s' " % \
(node, rundir, rundir, otherArgs)
# print "Uninstall cmd: %s\n" % cmd
# os.system(cmd)
w = UnInstallWorker(cmd, node)
workers.append(w)
w.start()
print "Started all the workers..waiting for them to finish"
for i in xrange(len(workers)):
workers[i].join(120.0)
sys.exit(0)
def readChunkserversFile(machinesFn):
'''Given a list of chunkserver node names, one per line, construct a config
for each chunkserver and add that to the config based on the defaults'''
global config
defaultChunkOptions = config.options("chunkserver_defaults")
for l in open(machinesFn, 'r'):
line = l.strip()
if (line.startswith('#')):
# ignore commented out node names
continue
section_name = "chunkserver_" + line
config.add_section(section_name)
config.set(section_name, "node", line)
for o in defaultChunkOptions:
config.set(section_name, o, config.get("chunkserver_defaults", o))
config.remove_section("chunkserver_defaults")
if __name__ == '__main__':
(opts, args) = getopt.getopt(sys.argv[1:], "cb:f:m:r:t:w:hsUu",
["chunkserversOnly", "build=", "file=", "machines=", "tar=", "tmpdir=",
"webui=", "help", "serialMode", "uninstall", "upgrade"])
filename = ""
builddir = ""
uninstall = 0
upgrade = 0
serialMode = 0
machines = ""
webuidir = ""
chunkserversOnly = 0
# Script probably won't work right if you change tmpdir from /tmp location
tmpdir = "/tmp"
for (o, a) in opts:
if o in ("-h", "--help"):
usage()
sys.exit(2)
if o in ("-f", "--file"):
filename = a
elif o in ("-b", "--build"):
builddir = a
elif o in ("-c", "--chunkserversOnly"):
chunkserversOnly = 1
elif o in ("-m", "--machines"):
machines = a
elif o in ("-r", "--tar"):
tarProg = a
elif o in ("-w", "--webuidir"):
webuidir = a
elif o in ("-t", "--tmpdir"):
tmpdir = a
elif o in ("-U", "--uninstall"):
uninstall = 1
elif o in ("-u", "--upgrade"):
upgrade = 1
elif o in ("-s", "--serialMode"):
serialMode = 1
if not os.path.exists(filename):
print "%s : directory doesn't exist\n" % filename
sys.exit(-1)
config = ConfigParser()
config.readfp(open(filename, 'r'))
if machines != "":
readChunkserversFile(machines)
if uninstall == 1:
doUninstall(config)
else:
doInstall(config, builddir, tmpdir, webuidir, upgrade, serialMode)
|
apache-2.0
|
mhugo/QGIS
|
python/plugins/processing/algs/qgis/ExtentFromLayer.py
|
7
|
5806
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
ExtentFromLayer.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
import os
from math import floor, ceil
from qgis.PyQt.QtGui import QIcon
from qgis.PyQt.QtCore import QVariant
from qgis.core import (QgsApplication,
QgsField,
QgsFeatureSink,
QgsGeometry,
QgsFeature,
QgsWkbTypes,
QgsProcessing,
QgsProcessingException,
QgsProcessingParameterMapLayer,
QgsProcessingParameterDistance,
QgsProcessingParameterDefinition,
QgsProcessingParameterFeatureSink,
QgsFields)
from processing.algs.qgis.QgisAlgorithm import QgisAlgorithm
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class ExtentFromLayer(QgisAlgorithm):
INPUT = 'INPUT'
BY_FEATURE = 'BY_FEATURE'
ROUND_TO = 'ROUND_TO'
OUTPUT = 'OUTPUT'
def icon(self):
return QgsApplication.getThemeIcon("/algorithms/mAlgorithmExtractLayerExtent.svg")
def svgIconPath(self):
return QgsApplication.iconPath("/algorithms/mAlgorithmExtractLayerExtent.svg")
def tags(self):
return self.tr('polygon,vector,raster,extent,envelope,bounds,bounding,boundary,layer,round,rounded').split(',')
def group(self):
return self.tr('Layer tools')
def groupId(self):
return 'layertools'
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.addParameter(QgsProcessingParameterMapLayer(self.INPUT, self.tr('Input layer')))
round_to_parameter = QgsProcessingParameterDistance(self.ROUND_TO,
self.tr('Round values to'),
parentParameterName=self.INPUT,
minValue=0,
defaultValue=0)
round_to_parameter.setFlags(round_to_parameter.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
self.addParameter(round_to_parameter)
self.addParameter(QgsProcessingParameterFeatureSink(self.OUTPUT, self.tr('Extent'), type=QgsProcessing.TypeVectorPolygon))
def name(self):
return 'polygonfromlayerextent'
def displayName(self):
return self.tr('Extract layer extent')
def processAlgorithm(self, parameters, context, feedback):
layer = self.parameterAsLayer(parameters, self.INPUT, context)
round_to = self.parameterAsDouble(parameters, self.ROUND_TO, context)
fields = QgsFields()
fields.append(QgsField('MINX', QVariant.Double))
fields.append(QgsField('MINY', QVariant.Double))
fields.append(QgsField('MAXX', QVariant.Double))
fields.append(QgsField('MAXY', QVariant.Double))
fields.append(QgsField('CNTX', QVariant.Double))
fields.append(QgsField('CNTY', QVariant.Double))
fields.append(QgsField('AREA', QVariant.Double))
fields.append(QgsField('PERIM', QVariant.Double))
fields.append(QgsField('HEIGHT', QVariant.Double))
fields.append(QgsField('WIDTH', QVariant.Double))
(sink, dest_id) = self.parameterAsSink(parameters, self.OUTPUT, context,
fields, QgsWkbTypes.Polygon, layer.crs())
if sink is None:
raise QgsProcessingException(self.invalidSinkError(parameters, self.OUTPUT))
try:
# may not be possible
layer.updateExtents()
except:
pass
rect = layer.extent()
if round_to > 0:
rect.setXMinimum(floor(rect.xMinimum() / round_to) * round_to)
rect.setYMinimum(floor(rect.yMinimum() / round_to) * round_to)
rect.setXMaximum(ceil(rect.xMaximum() / round_to) * round_to)
rect.setYMaximum(ceil(rect.yMaximum() / round_to) * round_to)
geometry = QgsGeometry.fromRect(rect)
minx = rect.xMinimum()
miny = rect.yMinimum()
maxx = rect.xMaximum()
maxy = rect.yMaximum()
height = rect.height()
width = rect.width()
cntx = minx + width / 2.0
cnty = miny + height / 2.0
area = width * height
perim = 2 * width + 2 * height
feat = QgsFeature()
feat.setGeometry(geometry)
attrs = [
minx,
miny,
maxx,
maxy,
cntx,
cnty,
area,
perim,
height,
width,
]
feat.setAttributes(attrs)
sink.addFeature(feat, QgsFeatureSink.FastInsert)
return {self.OUTPUT: dest_id}
|
gpl-2.0
|
michaldaniel/Ebook-Viewer
|
src/main.py
|
1
|
3097
|
#!/usr/bin/env python3
# Easy eBook Viewer by Michal Daniel
# Easy eBook Viewer is free software; you can redistribute it and/or modify it under the terms
# of the GNU General Public Licence as published by the Free Software Foundation.
# Easy eBook Viewer is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public Licence for more details.
# You should have received a copy of the GNU General Public Licence along with
# Easy eBook Viewer; if not, write to the Free Software Foundation, Inc., 51 Franklin Street,
# Fifth Floor, Boston, MA 02110-1301, USA.
import os, sys, gettext
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import GLib, Gio, Gtk, GObject, Gdk
from main_window import MainWindow
from components import about_dialog
class Application(Gtk.Application):
def __init__(self, *args, **kwargs):
super().__init__(*args, application_id="easy-ebook-viewer",
flags=Gio.ApplicationFlags.HANDLES_COMMAND_LINE,
**kwargs)
self.window = None
self.file_path = None
GLib.set_application_name('Easy eBook Viewer')
GLib.set_prgname('easy-ebook-viewer')
GLib.setenv('PULSE_PROP_application.icon_name', 'easy-ebook-viewer', True)
def do_startup(self):
Gtk.Application.do_startup(self)
action = Gio.SimpleAction.new("about", None)
action.connect("activate", self.on_about)
self.add_action(action)
action = Gio.SimpleAction.new("quit", None)
action.connect("activate", self.on_quit)
self.add_action(action)
def do_activate(self):
GObject.threads_init()
gettext.install('easy-ebook-viewer', '/usr/share/easy-ebook-viewer/locale')
# We only allow a single window and raise any existing ones
if not self.window:
# Windows are associated with the application
# when the last one is closed the application shuts down
self.window = MainWindow(file_path=self.file_path)
self.window.connect("delete-event", self.on_quit)
self.window.set_wmclass("easy-ebook-viewer", "easy-ebook-viewer")
self.window.show_all()
if not self.window.book_loaded:
self.window.header_bar_component.hide_jumping_navigation()
Gtk.main()
def do_command_line(self, command_line):
# If book came from arguments ie. was oppened using "Open with..." method etc.
if len(sys.argv) > 1:
# Check if that file really exists
if os.path.exists(sys.argv[1]):
self.file_path = sys.argv[1]
self.activate()
return 0
def on_about(self, action, param):
dialog = about_dialog.AboutDialog()
dialog.show_all()
def on_quit(self, action, param):
Gdk.threads_leave()
Gtk.main_quit()
self.quit()
if __name__ == "__main__":
app = Application()
app.run(sys.argv)
|
gpl-3.0
|
empireryan/director
|
src/python/ddapp/tasks/descriptions/drillTask.py
|
6
|
5717
|
from ddapp.tasks import robottasks as rt
side = 'Left'
mirror = 1 if side == 'Right' else -1
drillTaskRight = [
['drill wall', [
['fit wall targets', [
[rt.SnapshotMultisensePointcloud, {}],
[rt.UserAnnotatePointCloud, {'Annotation name':'wall annotation', 'Number of points':3}],
[rt.FitWallFrameFromAnnotation, {'Annotation input name':'wall annotation'}],
]],
['walk to wall', [
[rt.ComputeRobotFootFrame, {}],
[rt.ProjectAffordanceToGround, {'Affordance name':'wall', 'Ground frame name':'robot foot frame', 'Frame output name':'wall ground frame'}],
[rt.TransformFrame, {'Frame input name':'wall ground frame', 'Frame output name':'wall stance frame', 'Translation':[-0.7, -0.3, 0.0]}],
[rt.RequestFootstepPlan, {'Stance frame name':'wall stance frame'}],
[rt.RequestWalkingPlan, {'Footstep plan name':'wall stance frame footstep plan'}],
[rt.UserPromptTask, {'Message':'Commit footstep plan?'}],
[rt.CommitFootstepPlan, {'Plan name':'wall stance frame footstep plan'}],
[rt.WaitForWalkExecution, {}]
]],
['plan drill trajectory', [
[rt.TransformFrame, {'Frame input name':'wall frame', 'Frame output name':'wall gaze frame', 'Translation':[0.0, 0.0, 0.0], 'Rotation':[0, 0, 90]}],
[rt.PlanGazeTrajectory, {'Target frame name':'wall gaze frame', 'Annotation input name':'wall annotation'}],
]],
]],
['drill pickup', [
['fit drill on table', [
[rt.SnapshotMultisensePointcloud, {}],
[rt.UserAnnotatePointCloud, {'Annotation name':'rotary drill annotation', 'Number of points':1}],
[rt.FindRotaryDrillByAnnotation, {}],
]],
# ['user fit table', [
# [rt.SnapshotMultisensePointcloud, {}],
# [rt.UserAnnotatePointCloud, {'Annotation name':'drill on table annotation', 'Number of points':1}],
# [rt.FindDrillOnTableByAnnotation, {}],
# [rt.UserSelectAffordanceCandidate, {'Candidate name prefix':'drill', 'New name':'drill'}],
# ]],
['plan walk to drill', [
[rt.ComputeRobotFootFrame, {}],
[rt.ProjectAffordanceToGround, {'Affordance name':'drill', 'Ground frame name':'robot foot frame', 'Frame output name':'drill ground frame'}],
[rt.TransformFrame, {'Frame input name':'drill ground frame', 'Frame output name':'drill stance frame', 'Translation':[-0.7, 0.25*mirror, 0.0]}],
[rt.RequestFootstepPlan, {'Stance frame name':'drill stance frame'}],
[rt.RequestWalkingPlan, {'Footstep plan name':'drill stance frame footstep plan'}],
[rt.UserPromptTask, {'Message':'Commit footstep plan?'}],
[rt.CommitFootstepPlan, {'Plan name':'drill stance frame footstep plan'}],
[rt.WaitForWalkExecution, {}]
]],
['refit drill on table', [
[rt.SnapshotMultisensePointcloud, {}],
[rt.UserAnnotatePointCloud, {'Annotation name':'rotary drill annotation', 'Number of points':1}],
[rt.FindRotaryDrillByAnnotation, {}],
]],
['pick up drill on table', [
['compute manipulation frames', [
[rt.TransformFrame, {'Frame input name':'drill frame', 'Frame output name':'drill grasp frame', 'Translation':[-0.045, 0.0, 0.0275], 'Rotation':[0, -90*mirror, -90]}],
[rt.TransformFrame, {'Frame input name':'drill grasp frame', 'Frame output name':'drill reach frame', 'Translation':[0.0, -0.17, 0.0]}],
[rt.TransformFrame, {'Frame input name':'drill grasp frame', 'Frame output name':'drill lift frame', 'Translation':[0.1*mirror, -0.05, 0.0]}],
]],
['raise arm', [
[rt.PlanPostureGoal, {'Posture group':'General', 'Posture name':'arm up pregrasp', 'Side':side}],
[rt.UserPromptTask, {'Message':'Commit manipulation plan?'}],
[rt.CommitManipulationPlan, {'Plan name':'arm up pregrasp posture plan'}],
[rt.WaitForManipulationPlanExecution, {}],
]],
['pre reach', [
[rt.PlanReachToFrame, {'Frame input name':'drill reach frame', 'Side':side}],
[rt.UserPromptTask, {'Message':'Commit manipulation plan?'}],
[rt.CommitManipulationPlan, {'Plan name':'drill reach frame reach plan'}],
[rt.WaitForManipulationPlanExecution, {}],
]],
['reach', [
[rt.PlanReachToFrame, {'Frame input name':'drill grasp frame', 'Side':side}],
[rt.UserPromptTask, {'Message':'Commit manipulation plan?'}],
[rt.CommitManipulationPlan, {'Plan name':'drill grasp frame reach plan'}],
[rt.WaitForManipulationPlanExecution, {}],
]],
['close gripper', [
[rt.UserPromptTask, {'Message':'Close gripper?'}],
[rt.CloseHand, {'Side':side}],
[rt.DelayTask, {'Delay time':2.0}],
]],
['lift', [
[rt.PlanReachToFrame, {'Frame input name':'drill lift frame', 'Side':side}],
[rt.UserPromptTask, {'Message':'Commit manipulation plan?'}],
[rt.CommitManipulationPlan, {'Plan name':'drill lift frame reach plan'}],
[rt.WaitForManipulationPlanExecution, {}],
]],
['extract', [
[rt.PlanPostureGoal, {'Posture group':'General', 'Posture name':'arm up pregrasp', 'Side':side}],
[rt.UserPromptTask, {'Message':'Commit manipulation plan?'}],
[rt.CommitManipulationPlan, {'Plan name':'arm up pregrasp posture plan'}],
[rt.WaitForManipulationPlanExecution, {}],
]],
]],
]],
]
|
bsd-3-clause
|
jarshwah/django
|
tests/template_tests/syntax_tests/test_comment.py
|
521
|
3667
|
from django.test import SimpleTestCase
from ..utils import setup
class CommentSyntaxTests(SimpleTestCase):
@setup({'comment-syntax01': '{# this is hidden #}hello'})
def test_comment_syntax01(self):
output = self.engine.render_to_string('comment-syntax01')
self.assertEqual(output, 'hello')
@setup({'comment-syntax02': '{# this is hidden #}hello{# foo #}'})
def test_comment_syntax02(self):
output = self.engine.render_to_string('comment-syntax02')
self.assertEqual(output, 'hello')
@setup({'comment-syntax03': 'foo{# {% if %} #}'})
def test_comment_syntax03(self):
output = self.engine.render_to_string('comment-syntax03')
self.assertEqual(output, 'foo')
@setup({'comment-syntax04': 'foo{# {% endblock %} #}'})
def test_comment_syntax04(self):
output = self.engine.render_to_string('comment-syntax04')
self.assertEqual(output, 'foo')
@setup({'comment-syntax05': 'foo{# {% somerandomtag %} #}'})
def test_comment_syntax05(self):
output = self.engine.render_to_string('comment-syntax05')
self.assertEqual(output, 'foo')
@setup({'comment-syntax06': 'foo{# {% #}'})
def test_comment_syntax06(self):
output = self.engine.render_to_string('comment-syntax06')
self.assertEqual(output, 'foo')
@setup({'comment-syntax07': 'foo{# %} #}'})
def test_comment_syntax07(self):
output = self.engine.render_to_string('comment-syntax07')
self.assertEqual(output, 'foo')
@setup({'comment-syntax08': 'foo{# %} #}bar'})
def test_comment_syntax08(self):
output = self.engine.render_to_string('comment-syntax08')
self.assertEqual(output, 'foobar')
@setup({'comment-syntax09': 'foo{# {{ #}'})
def test_comment_syntax09(self):
output = self.engine.render_to_string('comment-syntax09')
self.assertEqual(output, 'foo')
@setup({'comment-syntax10': 'foo{# }} #}'})
def test_comment_syntax10(self):
output = self.engine.render_to_string('comment-syntax10')
self.assertEqual(output, 'foo')
@setup({'comment-syntax11': 'foo{# { #}'})
def test_comment_syntax11(self):
output = self.engine.render_to_string('comment-syntax11')
self.assertEqual(output, 'foo')
@setup({'comment-syntax12': 'foo{# } #}'})
def test_comment_syntax12(self):
output = self.engine.render_to_string('comment-syntax12')
self.assertEqual(output, 'foo')
@setup({'comment-tag01': '{% comment %}this is hidden{% endcomment %}hello'})
def test_comment_tag01(self):
output = self.engine.render_to_string('comment-tag01')
self.assertEqual(output, 'hello')
@setup({'comment-tag02': '{% comment %}this is hidden{% endcomment %}'
'hello{% comment %}foo{% endcomment %}'})
def test_comment_tag02(self):
output = self.engine.render_to_string('comment-tag02')
self.assertEqual(output, 'hello')
@setup({'comment-tag03': 'foo{% comment %} {% if %} {% endcomment %}'})
def test_comment_tag03(self):
output = self.engine.render_to_string('comment-tag03')
self.assertEqual(output, 'foo')
@setup({'comment-tag04': 'foo{% comment %} {% endblock %} {% endcomment %}'})
def test_comment_tag04(self):
output = self.engine.render_to_string('comment-tag04')
self.assertEqual(output, 'foo')
@setup({'comment-tag05': 'foo{% comment %} {% somerandomtag %} {% endcomment %}'})
def test_comment_tag05(self):
output = self.engine.render_to_string('comment-tag05')
self.assertEqual(output, 'foo')
|
bsd-3-clause
|
supermanheng21/twilio-python
|
twilio/rest/resources/task_router/workspaces.py
|
38
|
3347
|
from .. import NextGenInstanceResource, NextGenListResource
from .statistics import Statistics
class Workspace(NextGenInstanceResource):
"""
A Workspace resource.
See the `TaskRouter API reference
<https://www.twilio.com/docs/taskrouter/workspaces>_`
for more information.
.. attribute:: sid
The unique ID of the Workspace
.. attribute:: account_sid
The ID of the account that owns this Workspace
.. attribute:: friendly_name
Human readable description of this workspace (for example "Sales Call
Center" or "Customer Support Team")
.. attribute:: default_activity_sid
The ID of the default :class:`Activity` that will be used when new
Workers are created in this Workspace.
.. attribute:: default_activity_name
The human readable name of the default activity. Read only.
.. attribute:: timeout_activity_sid
The ID of the Activity that will be assigned to a Worker when a
:class:`Task` reservation times out without a response.
.. attribute:: timeout_activity_name
The human readable name of the timeout activity. Read only.
.. attribute:: event_callback_url
An optional URL where the Workspace will publish events. You can use
this to gather data for reporting.
.. attribute:: date_created
The time the Workspace was created, given as UTC in ISO 8601 format.
.. attribute:: date_updated
The time the Workspace was last updated, given as UTC in ISO 8601
format.
"""
subresources = [
Statistics,
]
def delete(self):
"""
Delete a workspace.
"""
return self.parent.delete_instance(self.name)
def update(self, **kwargs):
"""
Update a workspace.
"""
return self.parent.update_instance(self.name, kwargs)
class Workspaces(NextGenListResource):
""" A list of Workspace resources """
name = "Workspaces"
instance = Workspace
def create(self, friendly_name, **kwargs):
"""
Create a Workspace.
:param friendly_name: Human readable description of this workspace (for
example "Customer Support" or "2014 Election Campaign").
:param event_callback_url: If provided, the Workspace will publish
events to this URL. You can use this to gather data for reporting.
See Workspace Events for more information.
:param template: One of the available template names. Will
pre-configure this Workspace with the Workflow and Activities
specified in the template. Currently "FIFO" is the only available
template, which will configure Work Distribution Service with a set
of default activities and a single queue for first-in, first-out
distribution.
"""
kwargs['friendly_name'] = friendly_name
return self.create_instance(kwargs)
def delete(self, sid):
"""
Delete the given workspace
"""
return self.delete_instance(sid)
def update(self, sid, **kwargs):
"""
Update a :class:`Workspace` with the given parameters.
All the parameters are describe above in :meth:`create`
"""
return self.update_instance(sid, kwargs)
|
mit
|
Arvedui/i3pystatus
|
i3pystatus/gpu_temp.py
|
10
|
1150
|
from i3pystatus import IntervalModule
from .utils import gpu
class GPUTemperature(IntervalModule):
"""
Shows GPU temperature
Currently Nvidia only and nvidia-smi required
.. rubric:: Available formatters
* `{temp}` — the temperature in integer degrees celsius
"""
settings = (
("format", "format string used for output. {temp} is the temperature in integer degrees celsius"),
("display_if", "snippet that gets evaluated. if true, displays the module output"),
("gpu_number", "set the gpu number when you have several GPU"),
"color",
"alert_temp",
"alert_color",
)
format = "{temp} °C"
color = "#FFFFFF"
alert_temp = 90
alert_color = "#FF0000"
display_if = 'True'
gpu_number = 0
def run(self):
temp = gpu.query_nvidia_smi(self.gpu_number).temp
temp_alert = temp is None or temp >= self.alert_temp
if eval(self.display_if):
self.output = {
"full_text": self.format.format(temp=temp),
"color": self.color if not temp_alert else self.alert_color,
}
|
mit
|
gersteinlab/AlleleDB
|
alleledb_pipeline/fasta.py
|
1
|
1219
|
import sys
class fastaReader:
def __init__(self, fn):
self.done=False
self.ifile=open(fn)
self.hdr=self.ifile.readline().rstrip()
def __iter__(self):
return self
def next(self):
if self.done:
raise StopIteration
body=''
while True:
l=self.ifile.readline().rstrip()
if not l:
self.done=True
if not l or l[0]=='>':
hdr=self.hdr
self.hdr=l
return (hdr, body)
else:
body+=l
class fastaWriter:
def __init__(self, fn, linelen=60):
self.ofile=open(fn, 'w')
self.linelen=linelen
def close(self):
self.ofile.close()
def writeFA(self, hdr, body):
pos=0
stop=len(body)
self.ofile.write(hdr)
self.ofile.write('\n')
while pos<stop:
self.ofile.write(body[pos:pos+self.linelen])
self.ofile.write('\n')
pos+=self.linelen
if __name__=='__main__':
rdr=fastaReader(sys.argv[1])
wrter=fastaWriter(sys.argv[2], 10)
for hdr, body in rdr:
wrter.writeFA(hdr, body)
wrter.close()
|
cc0-1.0
|
k3nnyfr/s2a_fr-nsis
|
s2a/Python/Lib/modulefinder.py
|
31
|
24283
|
"""Find modules used by a script, using introspection."""
# This module should be kept compatible with Python 2.2, see PEP 291.
from __future__ import generators
import dis
import imp
import marshal
import os
import sys
import types
import struct
if hasattr(sys.__stdout__, "newlines"):
READ_MODE = "U" # universal line endings
else:
# remain compatible with Python < 2.3
READ_MODE = "r"
LOAD_CONST = chr(dis.opname.index('LOAD_CONST'))
IMPORT_NAME = chr(dis.opname.index('IMPORT_NAME'))
STORE_NAME = chr(dis.opname.index('STORE_NAME'))
STORE_GLOBAL = chr(dis.opname.index('STORE_GLOBAL'))
STORE_OPS = [STORE_NAME, STORE_GLOBAL]
HAVE_ARGUMENT = chr(dis.HAVE_ARGUMENT)
# Modulefinder does a good job at simulating Python's, but it can not
# handle __path__ modifications packages make at runtime. Therefore there
# is a mechanism whereby you can register extra paths in this map for a
# package, and it will be honored.
# Note this is a mapping is lists of paths.
packagePathMap = {}
# A Public interface
def AddPackagePath(packagename, path):
paths = packagePathMap.get(packagename, [])
paths.append(path)
packagePathMap[packagename] = paths
replacePackageMap = {}
# This ReplacePackage mechanism allows modulefinder to work around the
# way the _xmlplus package injects itself under the name "xml" into
# sys.modules at runtime by calling ReplacePackage("_xmlplus", "xml")
# before running ModuleFinder.
def ReplacePackage(oldname, newname):
replacePackageMap[oldname] = newname
class Module:
def __init__(self, name, file=None, path=None):
self.__name__ = name
self.__file__ = file
self.__path__ = path
self.__code__ = None
# The set of global names that are assigned to in the module.
# This includes those names imported through starimports of
# Python modules.
self.globalnames = {}
# The set of starimports this module did that could not be
# resolved, ie. a starimport from a non-Python module.
self.starimports = {}
def __repr__(self):
s = "Module(%r" % (self.__name__,)
if self.__file__ is not None:
s = s + ", %r" % (self.__file__,)
if self.__path__ is not None:
s = s + ", %r" % (self.__path__,)
s = s + ")"
return s
class ModuleFinder:
def __init__(self, path=None, debug=0, excludes=[], replace_paths=[]):
if path is None:
path = sys.path
self.path = path
self.modules = {}
self.badmodules = {}
self.debug = debug
self.indent = 0
self.excludes = excludes
self.replace_paths = replace_paths
self.processed_paths = [] # Used in debugging only
def msg(self, level, str, *args):
if level <= self.debug:
for i in range(self.indent):
print " ",
print str,
for arg in args:
print repr(arg),
print
def msgin(self, *args):
level = args[0]
if level <= self.debug:
self.indent = self.indent + 1
self.msg(*args)
def msgout(self, *args):
level = args[0]
if level <= self.debug:
self.indent = self.indent - 1
self.msg(*args)
def run_script(self, pathname):
self.msg(2, "run_script", pathname)
fp = open(pathname, READ_MODE)
stuff = ("", "r", imp.PY_SOURCE)
self.load_module('__main__', fp, pathname, stuff)
def load_file(self, pathname):
dir, name = os.path.split(pathname)
name, ext = os.path.splitext(name)
fp = open(pathname, READ_MODE)
stuff = (ext, "r", imp.PY_SOURCE)
self.load_module(name, fp, pathname, stuff)
def import_hook(self, name, caller=None, fromlist=None, level=-1):
self.msg(3, "import_hook", name, caller, fromlist, level)
parent = self.determine_parent(caller, level=level)
q, tail = self.find_head_package(parent, name)
m = self.load_tail(q, tail)
if not fromlist:
return q
if m.__path__:
self.ensure_fromlist(m, fromlist)
return None
def determine_parent(self, caller, level=-1):
self.msgin(4, "determine_parent", caller, level)
if not caller or level == 0:
self.msgout(4, "determine_parent -> None")
return None
pname = caller.__name__
if level >= 1: # relative import
if caller.__path__:
level -= 1
if level == 0:
parent = self.modules[pname]
assert parent is caller
self.msgout(4, "determine_parent ->", parent)
return parent
if pname.count(".") < level:
raise ImportError, "relative importpath too deep"
pname = ".".join(pname.split(".")[:-level])
parent = self.modules[pname]
self.msgout(4, "determine_parent ->", parent)
return parent
if caller.__path__:
parent = self.modules[pname]
assert caller is parent
self.msgout(4, "determine_parent ->", parent)
return parent
if '.' in pname:
i = pname.rfind('.')
pname = pname[:i]
parent = self.modules[pname]
assert parent.__name__ == pname
self.msgout(4, "determine_parent ->", parent)
return parent
self.msgout(4, "determine_parent -> None")
return None
def find_head_package(self, parent, name):
self.msgin(4, "find_head_package", parent, name)
if '.' in name:
i = name.find('.')
head = name[:i]
tail = name[i+1:]
else:
head = name
tail = ""
if parent:
qname = "%s.%s" % (parent.__name__, head)
else:
qname = head
q = self.import_module(head, qname, parent)
if q:
self.msgout(4, "find_head_package ->", (q, tail))
return q, tail
if parent:
qname = head
parent = None
q = self.import_module(head, qname, parent)
if q:
self.msgout(4, "find_head_package ->", (q, tail))
return q, tail
self.msgout(4, "raise ImportError: No module named", qname)
raise ImportError, "No module named " + qname
def load_tail(self, q, tail):
self.msgin(4, "load_tail", q, tail)
m = q
while tail:
i = tail.find('.')
if i < 0: i = len(tail)
head, tail = tail[:i], tail[i+1:]
mname = "%s.%s" % (m.__name__, head)
m = self.import_module(head, mname, m)
if not m:
self.msgout(4, "raise ImportError: No module named", mname)
raise ImportError, "No module named " + mname
self.msgout(4, "load_tail ->", m)
return m
def ensure_fromlist(self, m, fromlist, recursive=0):
self.msg(4, "ensure_fromlist", m, fromlist, recursive)
for sub in fromlist:
if sub == "*":
if not recursive:
all = self.find_all_submodules(m)
if all:
self.ensure_fromlist(m, all, 1)
elif not hasattr(m, sub):
subname = "%s.%s" % (m.__name__, sub)
submod = self.import_module(sub, subname, m)
if not submod:
raise ImportError, "No module named " + subname
def find_all_submodules(self, m):
if not m.__path__:
return
modules = {}
# 'suffixes' used to be a list hardcoded to [".py", ".pyc", ".pyo"].
# But we must also collect Python extension modules - although
# we cannot separate normal dlls from Python extensions.
suffixes = []
for triple in imp.get_suffixes():
suffixes.append(triple[0])
for dir in m.__path__:
try:
names = os.listdir(dir)
except os.error:
self.msg(2, "can't list directory", dir)
continue
for name in names:
mod = None
for suff in suffixes:
n = len(suff)
if name[-n:] == suff:
mod = name[:-n]
break
if mod and mod != "__init__":
modules[mod] = mod
return modules.keys()
def import_module(self, partname, fqname, parent):
self.msgin(3, "import_module", partname, fqname, parent)
try:
m = self.modules[fqname]
except KeyError:
pass
else:
self.msgout(3, "import_module ->", m)
return m
if fqname in self.badmodules:
self.msgout(3, "import_module -> None")
return None
if parent and parent.__path__ is None:
self.msgout(3, "import_module -> None")
return None
try:
fp, pathname, stuff = self.find_module(partname,
parent and parent.__path__, parent)
except ImportError:
self.msgout(3, "import_module ->", None)
return None
try:
m = self.load_module(fqname, fp, pathname, stuff)
finally:
if fp: fp.close()
if parent:
setattr(parent, partname, m)
self.msgout(3, "import_module ->", m)
return m
def load_module(self, fqname, fp, pathname, file_info):
suffix, mode, type = file_info
self.msgin(2, "load_module", fqname, fp and "fp", pathname)
if type == imp.PKG_DIRECTORY:
m = self.load_package(fqname, pathname)
self.msgout(2, "load_module ->", m)
return m
if type == imp.PY_SOURCE:
co = compile(fp.read()+'\n', pathname, 'exec')
elif type == imp.PY_COMPILED:
if fp.read(4) != imp.get_magic():
self.msgout(2, "raise ImportError: Bad magic number", pathname)
raise ImportError, "Bad magic number in %s" % pathname
fp.read(4)
co = marshal.load(fp)
else:
co = None
m = self.add_module(fqname)
m.__file__ = pathname
if co:
if self.replace_paths:
co = self.replace_paths_in_code(co)
m.__code__ = co
self.scan_code(co, m)
self.msgout(2, "load_module ->", m)
return m
def _add_badmodule(self, name, caller):
if name not in self.badmodules:
self.badmodules[name] = {}
if caller:
self.badmodules[name][caller.__name__] = 1
else:
self.badmodules[name]["-"] = 1
def _safe_import_hook(self, name, caller, fromlist, level=-1):
# wrapper for self.import_hook() that won't raise ImportError
if name in self.badmodules:
self._add_badmodule(name, caller)
return
try:
self.import_hook(name, caller, level=level)
except ImportError, msg:
self.msg(2, "ImportError:", str(msg))
self._add_badmodule(name, caller)
else:
if fromlist:
for sub in fromlist:
if sub in self.badmodules:
self._add_badmodule(sub, caller)
continue
try:
self.import_hook(name, caller, [sub], level=level)
except ImportError, msg:
self.msg(2, "ImportError:", str(msg))
fullname = name + "." + sub
self._add_badmodule(fullname, caller)
def scan_opcodes(self, co,
unpack = struct.unpack):
# Scan the code, and yield 'interesting' opcode combinations
# Version for Python 2.4 and older
code = co.co_code
names = co.co_names
consts = co.co_consts
while code:
c = code[0]
if c in STORE_OPS:
oparg, = unpack('<H', code[1:3])
yield "store", (names[oparg],)
code = code[3:]
continue
if c == LOAD_CONST and code[3] == IMPORT_NAME:
oparg_1, oparg_2 = unpack('<xHxH', code[:6])
yield "import", (consts[oparg_1], names[oparg_2])
code = code[6:]
continue
if c >= HAVE_ARGUMENT:
code = code[3:]
else:
code = code[1:]
def scan_opcodes_25(self, co,
unpack = struct.unpack):
# Scan the code, and yield 'interesting' opcode combinations
# Python 2.5 version (has absolute and relative imports)
code = co.co_code
names = co.co_names
consts = co.co_consts
LOAD_LOAD_AND_IMPORT = LOAD_CONST + LOAD_CONST + IMPORT_NAME
while code:
c = code[0]
if c in STORE_OPS:
oparg, = unpack('<H', code[1:3])
yield "store", (names[oparg],)
code = code[3:]
continue
if code[:9:3] == LOAD_LOAD_AND_IMPORT:
oparg_1, oparg_2, oparg_3 = unpack('<xHxHxH', code[:9])
level = consts[oparg_1]
if level == -1: # normal import
yield "import", (consts[oparg_2], names[oparg_3])
elif level == 0: # absolute import
yield "absolute_import", (consts[oparg_2], names[oparg_3])
else: # relative import
yield "relative_import", (level, consts[oparg_2], names[oparg_3])
code = code[9:]
continue
if c >= HAVE_ARGUMENT:
code = code[3:]
else:
code = code[1:]
def scan_code(self, co, m):
code = co.co_code
if sys.version_info >= (2, 5):
scanner = self.scan_opcodes_25
else:
scanner = self.scan_opcodes
for what, args in scanner(co):
if what == "store":
name, = args
m.globalnames[name] = 1
elif what in ("import", "absolute_import"):
fromlist, name = args
have_star = 0
if fromlist is not None:
if "*" in fromlist:
have_star = 1
fromlist = [f for f in fromlist if f != "*"]
if what == "absolute_import": level = 0
else: level = -1
self._safe_import_hook(name, m, fromlist, level=level)
if have_star:
# We've encountered an "import *". If it is a Python module,
# the code has already been parsed and we can suck out the
# global names.
mm = None
if m.__path__:
# At this point we don't know whether 'name' is a
# submodule of 'm' or a global module. Let's just try
# the full name first.
mm = self.modules.get(m.__name__ + "." + name)
if mm is None:
mm = self.modules.get(name)
if mm is not None:
m.globalnames.update(mm.globalnames)
m.starimports.update(mm.starimports)
if mm.__code__ is None:
m.starimports[name] = 1
else:
m.starimports[name] = 1
elif what == "relative_import":
level, fromlist, name = args
if name:
self._safe_import_hook(name, m, fromlist, level=level)
else:
parent = self.determine_parent(m, level=level)
self._safe_import_hook(parent.__name__, None, fromlist, level=0)
else:
# We don't expect anything else from the generator.
raise RuntimeError(what)
for c in co.co_consts:
if isinstance(c, type(co)):
self.scan_code(c, m)
def load_package(self, fqname, pathname):
self.msgin(2, "load_package", fqname, pathname)
newname = replacePackageMap.get(fqname)
if newname:
fqname = newname
m = self.add_module(fqname)
m.__file__ = pathname
m.__path__ = [pathname]
# As per comment at top of file, simulate runtime __path__ additions.
m.__path__ = m.__path__ + packagePathMap.get(fqname, [])
fp, buf, stuff = self.find_module("__init__", m.__path__)
self.load_module(fqname, fp, buf, stuff)
self.msgout(2, "load_package ->", m)
return m
def add_module(self, fqname):
if fqname in self.modules:
return self.modules[fqname]
self.modules[fqname] = m = Module(fqname)
return m
def find_module(self, name, path, parent=None):
if parent is not None:
# assert path is not None
fullname = parent.__name__+'.'+name
else:
fullname = name
if fullname in self.excludes:
self.msgout(3, "find_module -> Excluded", fullname)
raise ImportError, name
if path is None:
if name in sys.builtin_module_names:
return (None, None, ("", "", imp.C_BUILTIN))
path = self.path
return imp.find_module(name, path)
def report(self):
"""Print a report to stdout, listing the found modules with their
paths, as well as modules that are missing, or seem to be missing.
"""
print
print " %-25s %s" % ("Name", "File")
print " %-25s %s" % ("----", "----")
# Print modules found
keys = self.modules.keys()
keys.sort()
for key in keys:
m = self.modules[key]
if m.__path__:
print "P",
else:
print "m",
print "%-25s" % key, m.__file__ or ""
# Print missing modules
missing, maybe = self.any_missing_maybe()
if missing:
print
print "Missing modules:"
for name in missing:
mods = self.badmodules[name].keys()
mods.sort()
print "?", name, "imported from", ', '.join(mods)
# Print modules that may be missing, but then again, maybe not...
if maybe:
print
print "Submodules that appear to be missing, but could also be",
print "global names in the parent package:"
for name in maybe:
mods = self.badmodules[name].keys()
mods.sort()
print "?", name, "imported from", ', '.join(mods)
def any_missing(self):
"""Return a list of modules that appear to be missing. Use
any_missing_maybe() if you want to know which modules are
certain to be missing, and which *may* be missing.
"""
missing, maybe = self.any_missing_maybe()
return missing + maybe
def any_missing_maybe(self):
"""Return two lists, one with modules that are certainly missing
and one with modules that *may* be missing. The latter names could
either be submodules *or* just global names in the package.
The reason it can't always be determined is that it's impossible to
tell which names are imported when "from module import *" is done
with an extension module, short of actually importing it.
"""
missing = []
maybe = []
for name in self.badmodules:
if name in self.excludes:
continue
i = name.rfind(".")
if i < 0:
missing.append(name)
continue
subname = name[i+1:]
pkgname = name[:i]
pkg = self.modules.get(pkgname)
if pkg is not None:
if pkgname in self.badmodules[name]:
# The package tried to import this module itself and
# failed. It's definitely missing.
missing.append(name)
elif subname in pkg.globalnames:
# It's a global in the package: definitely not missing.
pass
elif pkg.starimports:
# It could be missing, but the package did an "import *"
# from a non-Python module, so we simply can't be sure.
maybe.append(name)
else:
# It's not a global in the package, the package didn't
# do funny star imports, it's very likely to be missing.
# The symbol could be inserted into the package from the
# outside, but since that's not good style we simply list
# it missing.
missing.append(name)
else:
missing.append(name)
missing.sort()
maybe.sort()
return missing, maybe
def replace_paths_in_code(self, co):
new_filename = original_filename = os.path.normpath(co.co_filename)
for f, r in self.replace_paths:
if original_filename.startswith(f):
new_filename = r + original_filename[len(f):]
break
if self.debug and original_filename not in self.processed_paths:
if new_filename != original_filename:
self.msgout(2, "co_filename %r changed to %r" \
% (original_filename,new_filename,))
else:
self.msgout(2, "co_filename %r remains unchanged" \
% (original_filename,))
self.processed_paths.append(original_filename)
consts = list(co.co_consts)
for i in range(len(consts)):
if isinstance(consts[i], type(co)):
consts[i] = self.replace_paths_in_code(consts[i])
return types.CodeType(co.co_argcount, co.co_nlocals, co.co_stacksize,
co.co_flags, co.co_code, tuple(consts), co.co_names,
co.co_varnames, new_filename, co.co_name,
co.co_firstlineno, co.co_lnotab,
co.co_freevars, co.co_cellvars)
def test():
# Parse command line
import getopt
try:
opts, args = getopt.getopt(sys.argv[1:], "dmp:qx:")
except getopt.error, msg:
print msg
return
# Process options
debug = 1
domods = 0
addpath = []
exclude = []
for o, a in opts:
if o == '-d':
debug = debug + 1
if o == '-m':
domods = 1
if o == '-p':
addpath = addpath + a.split(os.pathsep)
if o == '-q':
debug = 0
if o == '-x':
exclude.append(a)
# Provide default arguments
if not args:
script = "hello.py"
else:
script = args[0]
# Set the path based on sys.path and the script directory
path = sys.path[:]
path[0] = os.path.dirname(script)
path = addpath + path
if debug > 1:
print "path:"
for item in path:
print " ", repr(item)
# Create the module finder and turn its crank
mf = ModuleFinder(path, debug, exclude)
for arg in args[1:]:
if arg == '-m':
domods = 1
continue
if domods:
if arg[-2:] == '.*':
mf.import_hook(arg[:-2], None, ["*"])
else:
mf.import_hook(arg)
else:
mf.load_file(arg)
mf.run_script(script)
mf.report()
return mf # for -i debugging
if __name__ == '__main__':
try:
mf = test()
except KeyboardInterrupt:
print "\n[interrupt]"
|
gpl-3.0
|
atomic83/youtube-dl
|
youtube_dl/extractor/cbs.py
|
19
|
2818
|
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
sanitized_Request,
smuggle_url,
)
class CBSIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?(?:cbs\.com/shows/[^/]+/(?:video|artist)|colbertlateshow\.com/(?:video|podcasts))/[^/]+/(?P<id>[^/]+)'
_TESTS = [{
'url': 'http://www.cbs.com/shows/garth-brooks/video/_u7W953k6la293J7EPTd9oHkSPs6Xn6_/connect-chat-feat-garth-brooks/',
'info_dict': {
'id': '4JUVEwq3wUT7',
'display_id': 'connect-chat-feat-garth-brooks',
'ext': 'flv',
'title': 'Connect Chat feat. Garth Brooks',
'description': 'Connect with country music singer Garth Brooks, as he chats with fans on Wednesday November 27, 2013. Be sure to tune in to Garth Brooks: Live from Las Vegas, Friday November 29, at 9/8c on CBS!',
'duration': 1495,
},
'params': {
# rtmp download
'skip_download': True,
},
'_skip': 'Blocked outside the US',
}, {
'url': 'http://www.cbs.com/shows/liveonletterman/artist/221752/st-vincent/',
'info_dict': {
'id': 'WWF_5KqY3PK1',
'display_id': 'st-vincent',
'ext': 'flv',
'title': 'Live on Letterman - St. Vincent',
'description': 'Live On Letterman: St. Vincent in concert from New York\'s Ed Sullivan Theater on Tuesday, July 16, 2014.',
'duration': 3221,
},
'params': {
# rtmp download
'skip_download': True,
},
'_skip': 'Blocked outside the US',
}, {
'url': 'http://colbertlateshow.com/video/8GmB0oY0McANFvp2aEffk9jZZZ2YyXxy/the-colbeard/',
'only_matching': True,
}, {
'url': 'http://www.colbertlateshow.com/podcasts/dYSwjqPs_X1tvbV_P2FcPWRa_qT6akTC/in-the-bad-room-with-stephen/',
'only_matching': True,
}]
def _real_extract(self, url):
display_id = self._match_id(url)
request = sanitized_Request(url)
# Android UA is served with higher quality (720p) streams (see
# https://github.com/rg3/youtube-dl/issues/7490)
request.add_header('User-Agent', 'Mozilla/5.0 (Linux; Android 4.4; Nexus 5)')
webpage = self._download_webpage(request, display_id)
real_id = self._search_regex(
[r"video\.settings\.pid\s*=\s*'([^']+)';", r"cbsplayer\.pid\s*=\s*'([^']+)';"],
webpage, 'real video ID')
return {
'_type': 'url_transparent',
'ie_key': 'ThePlatform',
'url': smuggle_url(
'http://link.theplatform.com/s/dJ5BDC/%s?mbr=true&manifest=m3u' % real_id,
{'force_smil_url': True}),
'display_id': display_id,
}
|
unlicense
|
mishravikas/geonode-cas
|
geonode/api/api.py
|
1
|
6609
|
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.contrib.contenttypes.models import ContentType
from geonode.base.models import TopicCategory
from geonode.layers.models import Layer
from geonode.maps.models import Map
from geonode.documents.models import Document
from geonode.people.models import Profile
from geonode.contrib.groups.models import Group
from taggit.models import Tag
from tastypie import fields
from tastypie.resources import ModelResource
from tastypie.constants import ALL, ALL_WITH_RELATIONS
from .authorization import perms
FILTER_TYPES = {
'layer': Layer,
'map': Map,
'document': Document
}
class TypeFilteredResource(ModelResource):
""" Common resource used to apply faceting to categories and keywords
based on the type passed as query parameter in the form type:layer/map/document"""
count = fields.IntegerField()
type_filter = None
def dehydrate_count(self, bundle):
raise Exception('dehydrate_count not implemented in the child class')
def build_filters(self, filters={}):
orm_filters = super(TypeFilteredResource, self).build_filters(filters)
if 'type' in filters and filters['type'] in FILTER_TYPES.keys():
self.type_filter = FILTER_TYPES[filters['type']]
else:
self.type_filter = None
return orm_filters
def filter_security(self, obj, user):
""" Used to check whether the item should be included in the counts or not"""
return user.has_perm(perms[obj.class_name]['view'], obj)
class TagResource(TypeFilteredResource):
"""Tags api"""
def dehydrate_count(self, bundle):
count = 0
if self.type_filter:
for tagged in bundle.obj.taggit_taggeditem_items.all():
if tagged.content_object and tagged.content_type.model_class() == self.type_filter and \
self.filter_security(tagged.content_object, bundle.request.user):
count += 1
else:
count = bundle.obj.taggit_taggeditem_items.count()
return count
class Meta:
queryset = Tag.objects.all()
resource_name = 'keywords'
allowed_methods = ['get',]
filtering = {
'slug': ALL,
}
class TopicCategoryResource(TypeFilteredResource):
"""Category api"""
def dehydrate_count(self, bundle):
count = 0
resources = bundle.obj.resourcebase_set.instance_of(self.type_filter) if \
self.type_filter else bundle.obj.resourcebase_set.all()
for resource in resources:
if self.filter_security(resource, bundle.request.user):
count += 1
return count
class Meta:
queryset = TopicCategory.objects.all()
resource_name = 'categories'
allowed_methods = ['get',]
filtering = {
'identifier': ALL,
}
class UserResource(ModelResource):
"""User api"""
class Meta:
queryset = User.objects.all()
resource_name = 'users'
allowed_methods = ['get',]
excludes = ['is_staff', 'password', 'is_superuser',
'is_active', 'date_joined', 'last_login']
filtering = {
'username': ALL,
}
class GroupResource(ModelResource):
"""Groups api"""
detail_url = fields.CharField()
member_count = fields.IntegerField()
manager_count = fields.IntegerField()
def dehydrate_member_count(self, bundle):
return bundle.obj.member_queryset().count()
def dehydrate_manager_count(self, bundle):
return bundle.obj.get_managers().count()
def dehydrate_detail_url(self, bundle):
return reverse('group_detail', args=[bundle.obj.slug,])
class Meta:
queryset = Group.objects.all()
resource_name = 'groups'
allowed_methods = ['get',]
filtering = {
'name': ALL
}
ordering = ['title', 'last_modified',]
class ProfileResource(ModelResource):
"""Profile api"""
user = fields.ToOneField(UserResource, 'user')
avatar_100 = fields.CharField(null=True)
profile_detail_url = fields.CharField()
email = fields.CharField(default='')
layers_count = fields.IntegerField(default=0)
maps_count = fields.IntegerField(default=0)
documents_count = fields.IntegerField(default=0)
current_user = fields.BooleanField(default=False)
activity_stream_url = fields.CharField(null=True)
def build_filters(self, filters={}):
"""adds filtering by group functionality"""
orm_filters = super(ProfileResource, self).build_filters(filters)
if 'group' in filters:
orm_filters['group'] = filters['group']
return orm_filters
def apply_filters(self, request, applicable_filters):
"""filter by group if applicable by group functionality"""
group = applicable_filters.pop('group', None)
semi_filtered = super(ProfileResource, self).apply_filters(request, applicable_filters)
if group is not None:
semi_filtered = semi_filtered.filter(user__groupmember__group__slug=group)
return semi_filtered
def dehydrate_email(self, bundle):
email = ''
if bundle.request.user.is_authenticated():
email = bundle.obj.email
return email
def dehydrate_layers_count(self, bundle):
return bundle.obj.user.resourcebase_set.instance_of(Layer).count()
def dehydrate_maps_count(self, bundle):
return bundle.obj.user.resourcebase_set.instance_of(Map).count()
def dehydrate_documents_count(self, bundle):
return bundle.obj.user.resourcebase_set.instance_of(Document).count()
def dehydrate_avatar_100(self, bundle):
avatars = bundle.obj.user.avatar_set.filter(primary=True)
return avatars[0].avatar_url(100) if avatars.count() > 0 else ''
def dehydrate_profile_detail_url(self, bundle):
return bundle.obj.get_absolute_url()
def dehydrate_current_user(self, bundle):
return bundle.request.user.username == bundle.obj.user.username
def dehydrate_activity_stream_url(self, bundle):
return reverse('actstream_actor', kwargs={
'content_type_id': ContentType.objects.get_for_model(bundle.obj.user).pk,
'object_id': bundle.obj.user.pk})
class Meta:
queryset = Profile.objects.all()
resource_name = 'profiles'
allowed_methods = ['get',]
ordering = ['user','name']
|
gpl-3.0
|
bikong2/django
|
tests/middleware_exceptions/tests.py
|
119
|
44368
|
import sys
from django.conf import settings
from django.core.exceptions import MiddlewareNotUsed
from django.core.signals import got_request_exception
from django.http import HttpResponse
from django.template import engines
from django.template.response import TemplateResponse
from django.test import RequestFactory, SimpleTestCase, override_settings
from django.test.utils import patch_logger
class TestException(Exception):
pass
# A middleware base class that tracks which methods have been called
class TestMiddleware(object):
def __init__(self):
self.process_request_called = False
self.process_view_called = False
self.process_response_called = False
self.process_template_response_called = False
self.process_exception_called = False
def process_request(self, request):
self.process_request_called = True
def process_view(self, request, view_func, view_args, view_kwargs):
self.process_view_called = True
def process_template_response(self, request, response):
self.process_template_response_called = True
return response
def process_response(self, request, response):
self.process_response_called = True
return response
def process_exception(self, request, exception):
self.process_exception_called = True
# Middleware examples that do the right thing
class RequestMiddleware(TestMiddleware):
def process_request(self, request):
super(RequestMiddleware, self).process_request(request)
return HttpResponse('Request Middleware')
class ViewMiddleware(TestMiddleware):
def process_view(self, request, view_func, view_args, view_kwargs):
super(ViewMiddleware, self).process_view(request, view_func, view_args, view_kwargs)
return HttpResponse('View Middleware')
class ResponseMiddleware(TestMiddleware):
def process_response(self, request, response):
super(ResponseMiddleware, self).process_response(request, response)
return HttpResponse('Response Middleware')
class TemplateResponseMiddleware(TestMiddleware):
def process_template_response(self, request, response):
super(TemplateResponseMiddleware, self).process_template_response(request, response)
template = engines['django'].from_string('Template Response Middleware')
return TemplateResponse(request, template)
class ExceptionMiddleware(TestMiddleware):
def process_exception(self, request, exception):
super(ExceptionMiddleware, self).process_exception(request, exception)
return HttpResponse('Exception Middleware')
# Sample middlewares that raise exceptions
class BadRequestMiddleware(TestMiddleware):
def process_request(self, request):
super(BadRequestMiddleware, self).process_request(request)
raise TestException('Test Request Exception')
class BadViewMiddleware(TestMiddleware):
def process_view(self, request, view_func, view_args, view_kwargs):
super(BadViewMiddleware, self).process_view(request, view_func, view_args, view_kwargs)
raise TestException('Test View Exception')
class BadTemplateResponseMiddleware(TestMiddleware):
def process_template_response(self, request, response):
super(BadTemplateResponseMiddleware, self).process_template_response(request, response)
raise TestException('Test Template Response Exception')
class BadResponseMiddleware(TestMiddleware):
def process_response(self, request, response):
super(BadResponseMiddleware, self).process_response(request, response)
raise TestException('Test Response Exception')
class BadExceptionMiddleware(TestMiddleware):
def process_exception(self, request, exception):
super(BadExceptionMiddleware, self).process_exception(request, exception)
raise TestException('Test Exception Exception')
# Sample middlewares that omit to return an HttpResonse
class NoTemplateResponseMiddleware(TestMiddleware):
def process_template_response(self, request, response):
super(NoTemplateResponseMiddleware, self).process_template_response(request, response)
class NoResponseMiddleware(TestMiddleware):
def process_response(self, request, response):
super(NoResponseMiddleware, self).process_response(request, response)
@override_settings(ROOT_URLCONF='middleware_exceptions.urls')
class BaseMiddlewareExceptionTest(SimpleTestCase):
def setUp(self):
self.exceptions = []
got_request_exception.connect(self._on_request_exception)
self.client.handler.load_middleware()
def tearDown(self):
got_request_exception.disconnect(self._on_request_exception)
self.exceptions = []
def _on_request_exception(self, sender, request, **kwargs):
self.exceptions.append(sys.exc_info())
def _add_middleware(self, middleware):
self.client.handler._request_middleware.insert(0, middleware.process_request)
self.client.handler._view_middleware.insert(0, middleware.process_view)
self.client.handler._template_response_middleware.append(middleware.process_template_response)
self.client.handler._response_middleware.append(middleware.process_response)
self.client.handler._exception_middleware.append(middleware.process_exception)
def assert_exceptions_handled(self, url, errors, extra_error=None):
try:
self.client.get(url)
except TestException:
# Test client intentionally re-raises any exceptions being raised
# during request handling. Hence actual testing that exception was
# properly handled is done by relying on got_request_exception
# signal being sent.
pass
except Exception as e:
if type(extra_error) != type(e):
self.fail("Unexpected exception: %s" % e)
self.assertEqual(len(self.exceptions), len(errors))
for i, error in enumerate(errors):
exception, value, tb = self.exceptions[i]
self.assertEqual(value.args, (error, ))
def assert_middleware_usage(self, middleware, request, view, template_response, response, exception):
self.assertEqual(middleware.process_request_called, request)
self.assertEqual(middleware.process_view_called, view)
self.assertEqual(middleware.process_template_response_called, template_response)
self.assertEqual(middleware.process_response_called, response)
self.assertEqual(middleware.process_exception_called, exception)
class MiddlewareTests(BaseMiddlewareExceptionTest):
def test_process_request_middleware(self):
pre_middleware = TestMiddleware()
middleware = RequestMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/view/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, False, False, True, False)
self.assert_middleware_usage(middleware, True, False, False, True, False)
self.assert_middleware_usage(post_middleware, False, False, False, True, False)
def test_process_view_middleware(self):
pre_middleware = TestMiddleware()
middleware = ViewMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/view/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, False, False, True, False)
def test_process_response_middleware(self):
pre_middleware = TestMiddleware()
middleware = ResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/view/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, True, False, True, False)
def test_process_template_response_middleware(self):
pre_middleware = TestMiddleware()
middleware = TemplateResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/template_response/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, True, True, False)
self.assert_middleware_usage(middleware, True, True, True, True, False)
self.assert_middleware_usage(post_middleware, True, True, True, True, False)
def test_process_exception_middleware(self):
pre_middleware = TestMiddleware()
middleware = ExceptionMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/view/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, True, False, True, False)
def test_process_request_middleware_not_found(self):
pre_middleware = TestMiddleware()
middleware = RequestMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/not_found/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, False, False, True, False)
self.assert_middleware_usage(middleware, True, False, False, True, False)
self.assert_middleware_usage(post_middleware, False, False, False, True, False)
def test_process_view_middleware_not_found(self):
pre_middleware = TestMiddleware()
middleware = ViewMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/not_found/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, False, False, True, False)
def test_process_template_response_middleware_not_found(self):
pre_middleware = TestMiddleware()
middleware = TemplateResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/not_found/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, True)
self.assert_middleware_usage(middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_response_middleware_not_found(self):
pre_middleware = TestMiddleware()
middleware = ResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/not_found/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, True)
self.assert_middleware_usage(middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_exception_middleware_not_found(self):
pre_middleware = TestMiddleware()
middleware = ExceptionMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/not_found/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_request_middleware_exception(self):
pre_middleware = TestMiddleware()
middleware = RequestMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/error/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, False, False, True, False)
self.assert_middleware_usage(middleware, True, False, False, True, False)
self.assert_middleware_usage(post_middleware, False, False, False, True, False)
def test_process_view_middleware_exception(self):
pre_middleware = TestMiddleware()
middleware = ViewMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/error/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, False, False, True, False)
def test_process_response_middleware_exception(self):
pre_middleware = TestMiddleware()
middleware = ResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/error/', ['Error in view'], Exception())
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, True)
self.assert_middleware_usage(middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_exception_middleware_exception(self):
pre_middleware = TestMiddleware()
middleware = ExceptionMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/error/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_request_middleware_null_view(self):
pre_middleware = TestMiddleware()
middleware = RequestMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/null_view/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, False, False, True, False)
self.assert_middleware_usage(middleware, True, False, False, True, False)
self.assert_middleware_usage(post_middleware, False, False, False, True, False)
def test_process_view_middleware_null_view(self):
pre_middleware = TestMiddleware()
middleware = ViewMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/null_view/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, False, False, True, False)
def test_process_response_middleware_null_view(self):
pre_middleware = TestMiddleware()
middleware = ResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled(
'/middleware_exceptions/null_view/', [
"The view middleware_exceptions.views.null_view didn't return "
"an HttpResponse object. It returned None instead."
],
ValueError()
)
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, True, False, True, False)
def test_process_exception_middleware_null_view(self):
pre_middleware = TestMiddleware()
middleware = ExceptionMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled(
'/middleware_exceptions/null_view/', [
"The view middleware_exceptions.views.null_view didn't return "
"an HttpResponse object. It returned None instead."
],
ValueError()
)
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, True, False, True, False)
def test_process_request_middleware_permission_denied(self):
pre_middleware = TestMiddleware()
middleware = RequestMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/permission_denied/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, False, False, True, False)
self.assert_middleware_usage(middleware, True, False, False, True, False)
self.assert_middleware_usage(post_middleware, False, False, False, True, False)
def test_process_view_middleware_permission_denied(self):
pre_middleware = TestMiddleware()
middleware = ViewMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/permission_denied/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, False, False, True, False)
def test_process_response_middleware_permission_denied(self):
pre_middleware = TestMiddleware()
middleware = ResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/permission_denied/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, True)
self.assert_middleware_usage(middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_exception_middleware_permission_denied(self):
pre_middleware = TestMiddleware()
middleware = ExceptionMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/permission_denied/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_template_response_error(self):
middleware = TestMiddleware()
self._add_middleware(middleware)
self.assert_exceptions_handled('/middleware_exceptions/template_response_error/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(middleware, True, True, True, True, False)
@override_settings(
MIDDLEWARE_CLASSES=['middleware_exceptions.middleware.ProcessExceptionMiddleware'],
)
def test_exception_in_render_passed_to_process_exception(self):
# Repopulate the list of middlewares since it's already been populated
# by setUp() before the MIDDLEWARE_CLASSES setting got overridden
self.client.handler.load_middleware()
response = self.client.get('/middleware_exceptions/exception_in_render/')
self.assertEqual(response.content, b'Exception caught')
class BadMiddlewareTests(BaseMiddlewareExceptionTest):
def test_process_request_bad_middleware(self):
pre_middleware = TestMiddleware()
bad_middleware = BadRequestMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/view/', ['Test Request Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, False, False, True, False)
self.assert_middleware_usage(bad_middleware, True, False, False, True, False)
self.assert_middleware_usage(post_middleware, False, False, False, True, False)
def test_process_view_bad_middleware(self):
pre_middleware = TestMiddleware()
bad_middleware = BadViewMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/view/', ['Test View Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, False, False, True, False)
def test_process_template_response_bad_middleware(self):
pre_middleware = TestMiddleware()
bad_middleware = BadTemplateResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled(
'/middleware_exceptions/template_response/',
['Test Template Response Exception']
)
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(bad_middleware, True, True, True, True, False)
self.assert_middleware_usage(post_middleware, True, True, True, True, False)
def test_process_response_bad_middleware(self):
pre_middleware = TestMiddleware()
bad_middleware = BadResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/view/', ['Test Response Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, False, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, True, False, True, False)
def test_process_exception_bad_middleware(self):
pre_middleware = TestMiddleware()
bad_middleware = BadExceptionMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/view/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, True, False, True, False)
def test_process_request_bad_middleware_not_found(self):
pre_middleware = TestMiddleware()
bad_middleware = BadRequestMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/not_found/', ['Test Request Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, False, False, True, False)
self.assert_middleware_usage(bad_middleware, True, False, False, True, False)
self.assert_middleware_usage(post_middleware, False, False, False, True, False)
def test_process_view_bad_middleware_not_found(self):
pre_middleware = TestMiddleware()
bad_middleware = BadViewMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/not_found/', ['Test View Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, False, False, True, False)
def test_process_response_bad_middleware_not_found(self):
pre_middleware = TestMiddleware()
bad_middleware = BadResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/not_found/', ['Test Response Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, False, True)
self.assert_middleware_usage(bad_middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_exception_bad_middleware_not_found(self):
pre_middleware = TestMiddleware()
bad_middleware = BadExceptionMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/not_found/', ['Test Exception Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_request_bad_middleware_exception(self):
pre_middleware = TestMiddleware()
bad_middleware = BadRequestMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/error/', ['Test Request Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, False, False, True, False)
self.assert_middleware_usage(bad_middleware, True, False, False, True, False)
self.assert_middleware_usage(post_middleware, False, False, False, True, False)
def test_process_view_bad_middleware_exception(self):
pre_middleware = TestMiddleware()
bad_middleware = BadViewMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/error/', ['Test View Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, False, False, True, False)
def test_process_response_bad_middleware_exception(self):
pre_middleware = TestMiddleware()
bad_middleware = BadResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/error/', ['Error in view', 'Test Response Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, False, True)
self.assert_middleware_usage(bad_middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_exception_bad_middleware_exception(self):
pre_middleware = TestMiddleware()
bad_middleware = BadExceptionMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/error/', ['Test Exception Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_request_bad_middleware_null_view(self):
pre_middleware = TestMiddleware()
bad_middleware = BadRequestMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/null_view/', ['Test Request Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, False, False, True, False)
self.assert_middleware_usage(bad_middleware, True, False, False, True, False)
self.assert_middleware_usage(post_middleware, False, False, False, True, False)
def test_process_view_bad_middleware_null_view(self):
pre_middleware = TestMiddleware()
bad_middleware = BadViewMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/null_view/', ['Test View Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, False, False, True, False)
def test_process_response_bad_middleware_null_view(self):
pre_middleware = TestMiddleware()
bad_middleware = BadResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled(
'/middleware_exceptions/null_view/', [
"The view middleware_exceptions.views.null_view didn't return "
"an HttpResponse object. It returned None instead.",
'Test Response Exception'
]
)
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, False, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, True, False, True, False)
def test_process_exception_bad_middleware_null_view(self):
pre_middleware = TestMiddleware()
bad_middleware = BadExceptionMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled(
'/middleware_exceptions/null_view/', [
"The view middleware_exceptions.views.null_view didn't return "
"an HttpResponse object. It returned None instead."
],
ValueError()
)
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, True, False, True, False)
def test_process_request_bad_middleware_permission_denied(self):
pre_middleware = TestMiddleware()
bad_middleware = BadRequestMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/permission_denied/', ['Test Request Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, False, False, True, False)
self.assert_middleware_usage(bad_middleware, True, False, False, True, False)
self.assert_middleware_usage(post_middleware, False, False, False, True, False)
def test_process_view_bad_middleware_permission_denied(self):
pre_middleware = TestMiddleware()
bad_middleware = BadViewMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/permission_denied/', ['Test View Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, False, False, True, False)
def test_process_response_bad_middleware_permission_denied(self):
pre_middleware = TestMiddleware()
bad_middleware = BadResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/permission_denied/', ['Test Response Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, False, True)
self.assert_middleware_usage(bad_middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_exception_bad_middleware_permission_denied(self):
pre_middleware = TestMiddleware()
bad_middleware = BadExceptionMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/permission_denied/', ['Test Exception Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_response_no_response_middleware(self):
pre_middleware = TestMiddleware()
middleware = NoResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/view/', [
"NoResponseMiddleware.process_response didn't return an HttpResponse object. It returned None instead."
],
ValueError())
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, False, False)
self.assert_middleware_usage(middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, True, False, True, False)
def test_process_template_response_no_response_middleware(self):
pre_middleware = TestMiddleware()
middleware = NoTemplateResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled(
'/middleware_exceptions/template_response/', [
"NoTemplateResponseMiddleware.process_template_response didn't "
"return an HttpResponse object. It returned None instead."
],
ValueError()
)
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, True, True, False)
self.assert_middleware_usage(post_middleware, True, True, True, True, False)
_missing = object()
@override_settings(ROOT_URLCONF='middleware_exceptions.urls')
class RootUrlconfTests(SimpleTestCase):
@override_settings(ROOT_URLCONF=None)
def test_missing_root_urlconf(self):
# Removing ROOT_URLCONF is safe, as override_settings will restore
# the previously defined settings.
del settings.ROOT_URLCONF
self.assertRaises(AttributeError, self.client.get, "/middleware_exceptions/view/")
class MyMiddleware(object):
def __init__(self):
raise MiddlewareNotUsed
def process_request(self, request):
pass
class MyMiddlewareWithExceptionMessage(object):
def __init__(self):
raise MiddlewareNotUsed('spam eggs')
def process_request(self, request):
pass
@override_settings(
DEBUG=True,
ROOT_URLCONF='middleware_exceptions.urls',
)
class MiddlewareNotUsedTests(SimpleTestCase):
rf = RequestFactory()
def test_raise_exception(self):
request = self.rf.get('middleware_exceptions/view/')
with self.assertRaises(MiddlewareNotUsed):
MyMiddleware().process_request(request)
@override_settings(MIDDLEWARE_CLASSES=[
'middleware_exceptions.tests.MyMiddleware',
])
def test_log(self):
with patch_logger('django.request', 'debug') as calls:
self.client.get('/middleware_exceptions/view/')
self.assertEqual(len(calls), 1)
self.assertEqual(
calls[0],
"MiddlewareNotUsed: 'middleware_exceptions.tests.MyMiddleware'"
)
@override_settings(MIDDLEWARE_CLASSES=[
'middleware_exceptions.tests.MyMiddlewareWithExceptionMessage',
])
def test_log_custom_message(self):
with patch_logger('django.request', 'debug') as calls:
self.client.get('/middleware_exceptions/view/')
self.assertEqual(len(calls), 1)
self.assertEqual(
calls[0],
"MiddlewareNotUsed('middleware_exceptions.tests.MyMiddlewareWithExceptionMessage'): spam eggs"
)
@override_settings(DEBUG=False)
def test_do_not_log_when_debug_is_false(self):
with patch_logger('django.request', 'debug') as calls:
self.client.get('/middleware_exceptions/view/')
self.assertEqual(len(calls), 0)
|
bsd-3-clause
|
sudheesh001/oh-mainline
|
vendor/packages/twisted/twisted/web/test/test_resource.py
|
20
|
4660
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.web.resource}.
"""
from twisted.trial.unittest import TestCase
from twisted.web import error
from twisted.web.http import NOT_FOUND, FORBIDDEN
from twisted.web.resource import ErrorPage, NoResource, ForbiddenResource
from twisted.web.test.test_web import DummyRequest
class ErrorPageTests(TestCase):
"""
Tests for L{ErrorPage}, L{NoResource}, and L{ForbiddenResource}.
"""
errorPage = ErrorPage
noResource = NoResource
forbiddenResource = ForbiddenResource
def test_getChild(self):
"""
The C{getChild} method of L{ErrorPage} returns the L{ErrorPage} it is
called on.
"""
page = self.errorPage(321, "foo", "bar")
self.assertIdentical(page.getChild("name", object()), page)
def _pageRenderingTest(self, page, code, brief, detail):
request = DummyRequest([''])
self.assertEqual(
page.render(request),
"\n"
"<html>\n"
" <head><title>%s - %s</title></head>\n"
" <body>\n"
" <h1>%s</h1>\n"
" <p>%s</p>\n"
" </body>\n"
"</html>\n" % (code, brief, brief, detail))
self.assertEqual(request.responseCode, code)
self.assertEqual(
request.outgoingHeaders,
{'content-type': 'text/html; charset=utf-8'})
def test_errorPageRendering(self):
"""
L{ErrorPage.render} returns a C{str} describing the error defined by
the response code and message passed to L{ErrorPage.__init__}. It also
uses that response code to set the response code on the L{Request}
passed in.
"""
code = 321
brief = "brief description text"
detail = "much longer text might go here"
page = self.errorPage(code, brief, detail)
self._pageRenderingTest(page, code, brief, detail)
def test_noResourceRendering(self):
"""
L{NoResource} sets the HTTP I{NOT FOUND} code.
"""
detail = "long message"
page = self.noResource(detail)
self._pageRenderingTest(page, NOT_FOUND, "No Such Resource", detail)
def test_forbiddenResourceRendering(self):
"""
L{ForbiddenResource} sets the HTTP I{FORBIDDEN} code.
"""
detail = "longer message"
page = self.forbiddenResource(detail)
self._pageRenderingTest(page, FORBIDDEN, "Forbidden Resource", detail)
class DeprecatedErrorPageTests(ErrorPageTests):
"""
Tests for L{error.ErrorPage}, L{error.NoResource}, and
L{error.ForbiddenResource}.
"""
def errorPage(self, *args):
return error.ErrorPage(*args)
def noResource(self, *args):
return error.NoResource(*args)
def forbiddenResource(self, *args):
return error.ForbiddenResource(*args)
def _assertWarning(self, name, offendingFunction):
warnings = self.flushWarnings([offendingFunction])
self.assertEqual(len(warnings), 1)
self.assertEqual(warnings[0]['category'], DeprecationWarning)
self.assertEqual(
warnings[0]['message'],
'twisted.web.error.%s is deprecated since Twisted 9.0. '
'See twisted.web.resource.%s.' % (name, name))
def test_getChild(self):
"""
Like L{ErrorPageTests.test_getChild}, but flush the deprecation warning
emitted by instantiating L{error.ErrorPage}.
"""
ErrorPageTests.test_getChild(self)
self._assertWarning('ErrorPage', self.errorPage)
def test_errorPageRendering(self):
"""
Like L{ErrorPageTests.test_errorPageRendering}, but flush the
deprecation warning emitted by instantiating L{error.ErrorPage}.
"""
ErrorPageTests.test_errorPageRendering(self)
self._assertWarning('ErrorPage', self.errorPage)
def test_noResourceRendering(self):
"""
Like L{ErrorPageTests.test_noResourceRendering}, but flush the
deprecation warning emitted by instantiating L{error.NoResource}.
"""
ErrorPageTests.test_noResourceRendering(self)
self._assertWarning('NoResource', self.noResource)
def test_forbiddenResourceRendering(self):
"""
Like L{ErrorPageTests.test_forbiddenResourceRendering}, but flush the
deprecation warning emitted by instantiating
L{error.ForbiddenResource}.
"""
ErrorPageTests.test_forbiddenResourceRendering(self)
self._assertWarning('ForbiddenResource', self.forbiddenResource)
|
agpl-3.0
|
thezbyg/gpick
|
tools/gpick.py
|
1
|
5990
|
import os, time, re, string, glob, subprocess
from .gettext import *
from .resource_template import *
from .ragel import *
from .template import *
from SCons.Script import Chmod, Flatten
from SCons.Util import NodeList
from SCons.Script.SConscript import SConsEnvironment
def MatchFiles (files, path, repath, dir_exclude_pattern, file_exclude_pattern):
for filename in os.listdir(path):
fullname = os.path.join (path, filename)
repath_file = os.path.join (repath, filename);
if os.path.isdir (fullname):
if not dir_exclude_pattern.search(repath_file):
MatchFiles (files, fullname, repath_file, dir_exclude_pattern, file_exclude_pattern)
else:
if not file_exclude_pattern.search(filename):
files.append (fullname)
def CheckPKG(context, name):
context.Message('Checking for library %s... ' % name)
ret = context.TryAction('pkg-config --exists "%s"' % name)[0]
context.Result(ret)
return ret
def CheckProgram(context, env, name, member_name):
context.Message('Checking for program %s... ' % name)
if env[member_name]:
context.Result(True)
return True
else:
context.Result(False)
return False
def CompareVersions(a, b):
for i in range(0, min(len(a), len(b))):
if a[i] < b[i]:
return 1
if a[i] > b[i]:
return -1
return 0
def CheckBoost(context, version):
context.Message('Checking for library boost >= %s... ' % (version))
result, boost_version = context.TryRun("""
#include <boost/version.hpp>
#include <iostream>
int main(){
std::cout << BOOST_LIB_VERSION << std::endl;
return 0;
}
""", '.cpp')
if result:
found_version = boost_version.strip('\r\n\t ').split('_')
required_version = version.strip('\r\n\t ').split('.')
result = CompareVersions(required_version, found_version) >= 0
context.Result(result)
return result
class GpickLibrary(NodeList):
include_dirs = []
class GpickEnvironment(SConsEnvironment):
extern_libs = {}
def AddCustomBuilders(self):
addGettextBuilder(self)
addResourceTemplateBuilder(self)
addTemplateBuilder(self)
addRagelBuilder(self)
def DefineLibrary(self, library_name, library):
self.extern_libs[library_name] = library
def UseLibrary(self, library_name):
lib = self.extern_libs[library_name]
for i in lib:
lib_include_path = os.path.split(i.path)[0]
self.PrependUnique(LIBS = [library_name], LIBPATH = ['#' + lib_include_path])
self.PrependUnique(CPPPATH = lib.include_dirs)
return lib
def ConfirmPrograms(self, conf, programs):
conf.AddTests({'CheckProgram': CheckProgram})
for evar, args in programs.items():
found = False
for name, member_name in args['checks'].items():
if conf.CheckProgram(self, name, member_name):
found = True;
break
if not found:
if 'required' in args:
if not args['required']==False:
self.Exit(1)
else:
self.Exit(1)
def ConfirmLibs(self, conf, libs):
conf.AddTests({'CheckPKG': CheckPKG})
for evar, args in libs.items():
found = False
for name, version in args['checks'].items():
if conf.CheckPKG(name + ' ' + version):
self[evar]=name
found = True;
break
if not found:
if 'required' in args:
if not args['required']==False:
self.Exit(1)
else:
self.Exit(1)
def ConfirmBoost(self, conf, version):
conf.AddTests({'CheckBoost': CheckBoost})
if conf.CheckBoost(version):
return
else:
self.Exit(1)
def InstallPerm(self, dir, source, perm):
obj = self.Install(dir, source)
for i in obj:
self.AddPostAction(i, Chmod(i, perm))
return dir
def InstallPermAutoDir(self, dir, relative_dir, source, perm):
for f in Flatten(source):
path = dir
if str(f.get_dir()).startswith(relative_dir):
path = os.path.join(path, str(f.get_dir())[len(relative_dir):])
else:
path = os.path.join(path, str(f.get_dir()))
obj = self.Install(path, f)
for i in obj:
self.AddPostAction(i, Chmod(i, perm))
return dir
InstallProgram = lambda self, dir, source: GpickEnvironment.InstallPerm(self, dir, source, 0o755)
InstallData = lambda self, dir, source: GpickEnvironment.InstallPerm(self, dir, source, 0o644)
InstallDataAutoDir = lambda self, dir, relative_dir, source: GpickEnvironment.InstallPermAutoDir(self, dir, relative_dir, source, 0o644)
def GetSourceFiles(self, dir_exclude_pattern, file_exclude_pattern):
dir_exclude_prog = re.compile(dir_exclude_pattern)
file_exclude_prog = re.compile(file_exclude_pattern)
files = []
MatchFiles(files, self.GetLaunchDir(), os.sep, dir_exclude_prog, file_exclude_prog)
return files
def GetVersionInfo(self):
try:
revision = subprocess.Popen(['git', 'show', '--no-patch', '--format="%H %ct"'], shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0]
match = re.search('([\d\w]+) (\d+)', str(revision))
rev_hash = match.group(1)
commit_date = time.gmtime(int(match.group(2)))
rev_date = time.strftime("%Y-%m-%d", commit_date)
rev_time = time.strftime("%H:%M:%S", commit_date)
except:
try:
with open("../version.txt", "r") as version_file:
lines = version_file.read().splitlines()
rev_hash = lines[0]
rev_date = lines[1]
rev_time = lines[2]
except:
rev_hash = 'unknown'
commit_date = time.gmtime(int(os.environ.get('SOURCE_DATE_EPOCH', time.time())))
rev_date = time.strftime("%Y-%m-%d", commit_date)
rev_time = time.strftime("%H:%M:%S", commit_date)
self.Replace(
GPICK_BUILD_REVISION = rev_hash[0:10],
GPICK_BUILD_DATE = rev_date,
GPICK_BUILD_TIME = rev_time,
);
def RegexEscape(str):
return str.replace('\\', '\\\\')
def WriteNsisVersion(target, source, env):
for t in target:
for s in source:
file = open(str(t),"w")
file.writelines('!define VERSION "' + str(env['GPICK_BUILD_VERSION']) + '"')
file.close()
return 0
def Glob(path):
files = []
for f in glob.glob(os.path.join(path, '*')):
if os.path.isdir(str(f)):
files.extend(Glob(str(f)));
else:
files.append(str(f));
return files
|
bsd-3-clause
|
halfcrazy/sqlalchemy
|
test/orm/test_bind.py
|
25
|
14697
|
from sqlalchemy.testing import assert_raises_message
from sqlalchemy import MetaData, Integer, ForeignKey
from sqlalchemy.testing.schema import Table
from sqlalchemy.testing.schema import Column
from sqlalchemy.orm import mapper, create_session
import sqlalchemy as sa
from sqlalchemy import testing
from sqlalchemy.testing import fixtures, eq_, engines, is_
from sqlalchemy.orm import relationship, Session, backref, sessionmaker
from test.orm import _fixtures
from sqlalchemy.testing.mock import Mock
class BindIntegrationTest(_fixtures.FixtureTest):
run_inserts = None
def test_mapped_binds(self):
Address, addresses, users, User = (self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User)
# ensure tables are unbound
m2 = sa.MetaData()
users_unbound = users.tometadata(m2)
addresses_unbound = addresses.tometadata(m2)
mapper(Address, addresses_unbound)
mapper(User, users_unbound, properties={
'addresses': relationship(Address,
backref=backref("user", cascade="all"),
cascade="all")})
sess = Session(binds={User: self.metadata.bind,
Address: self.metadata.bind})
u1 = User(id=1, name='ed')
sess.add(u1)
eq_(sess.query(User).filter(User.id == 1).all(),
[User(id=1, name='ed')])
# test expression binding
sess.execute(users_unbound.insert(), params=dict(id=2,
name='jack'))
eq_(sess.execute(users_unbound.select(users_unbound.c.id
== 2)).fetchall(), [(2, 'jack')])
eq_(sess.execute(users_unbound.select(User.id == 2)).fetchall(),
[(2, 'jack')])
sess.execute(users_unbound.delete())
eq_(sess.execute(users_unbound.select()).fetchall(), [])
sess.close()
def test_table_binds(self):
Address, addresses, users, User = (self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User)
# ensure tables are unbound
m2 = sa.MetaData()
users_unbound = users.tometadata(m2)
addresses_unbound = addresses.tometadata(m2)
mapper(Address, addresses_unbound)
mapper(User, users_unbound, properties={
'addresses': relationship(Address,
backref=backref("user", cascade="all"),
cascade="all")})
Session = sessionmaker(binds={users_unbound: self.metadata.bind,
addresses_unbound: self.metadata.bind})
sess = Session()
u1 = User(id=1, name='ed')
sess.add(u1)
eq_(sess.query(User).filter(User.id == 1).all(),
[User(id=1, name='ed')])
sess.execute(users_unbound.insert(), params=dict(id=2, name='jack'))
eq_(sess.execute(users_unbound.select(users_unbound.c.id
== 2)).fetchall(), [(2, 'jack')])
eq_(sess.execute(users_unbound.select(User.id == 2)).fetchall(),
[(2, 'jack')])
sess.execute(users_unbound.delete())
eq_(sess.execute(users_unbound.select()).fetchall(), [])
sess.close()
def test_bind_from_metadata(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
session = create_session()
session.execute(users.insert(), dict(name='Johnny'))
assert len(session.query(User).filter_by(name='Johnny').all()) == 1
session.execute(users.delete())
assert len(session.query(User).filter_by(name='Johnny').all()) == 0
session.close()
def test_bind_arguments(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users)
mapper(Address, addresses)
e1 = engines.testing_engine()
e2 = engines.testing_engine()
e3 = engines.testing_engine()
sess = Session(e3)
sess.bind_mapper(User, e1)
sess.bind_mapper(Address, e2)
assert sess.connection().engine is e3
assert sess.connection(bind=e1).engine is e1
assert sess.connection(mapper=Address, bind=e1).engine is e1
assert sess.connection(mapper=Address).engine is e2
assert sess.connection(clause=addresses.select()).engine is e2
assert sess.connection(mapper=User,
clause=addresses.select()).engine is e1
assert sess.connection(mapper=User,
clause=addresses.select(),
bind=e2).engine is e2
sess.close()
@engines.close_open_connections
def test_bound_connection(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
c = testing.db.connect()
sess = create_session(bind=c)
sess.begin()
transaction = sess.transaction
u = User(name='u1')
sess.add(u)
sess.flush()
assert transaction._connection_for_bind(testing.db, None) \
is transaction._connection_for_bind(c, None) is c
assert_raises_message(sa.exc.InvalidRequestError,
'Session already has a Connection '
'associated',
transaction._connection_for_bind,
testing.db.connect(), None)
transaction.rollback()
assert len(sess.query(User).all()) == 0
sess.close()
def test_bound_connection_transactional(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
c = testing.db.connect()
sess = create_session(bind=c, autocommit=False)
u = User(name='u1')
sess.add(u)
sess.flush()
sess.close()
assert not c.in_transaction()
assert c.scalar("select count(1) from users") == 0
sess = create_session(bind=c, autocommit=False)
u = User(name='u2')
sess.add(u)
sess.flush()
sess.commit()
assert not c.in_transaction()
assert c.scalar("select count(1) from users") == 1
c.execute("delete from users")
assert c.scalar("select count(1) from users") == 0
c = testing.db.connect()
trans = c.begin()
sess = create_session(bind=c, autocommit=True)
u = User(name='u3')
sess.add(u)
sess.flush()
assert c.in_transaction()
trans.commit()
assert not c.in_transaction()
assert c.scalar("select count(1) from users") == 1
class SessionBindTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('test_table', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('data', Integer))
@classmethod
def setup_classes(cls):
class Foo(cls.Basic):
pass
@classmethod
def setup_mappers(cls):
test_table, Foo = cls.tables.test_table, cls.classes.Foo
meta = MetaData()
test_table.tometadata(meta)
assert meta.tables['test_table'].bind is None
mapper(Foo, meta.tables['test_table'])
def test_session_bind(self):
Foo = self.classes.Foo
engine = self.metadata.bind
for bind in (engine, engine.connect()):
try:
sess = create_session(bind=bind)
assert sess.bind is bind
f = Foo()
sess.add(f)
sess.flush()
assert sess.query(Foo).get(f.id) is f
finally:
if hasattr(bind, 'close'):
bind.close()
def test_session_unbound(self):
Foo = self.classes.Foo
sess = create_session()
sess.add(Foo())
assert_raises_message(
sa.exc.UnboundExecutionError,
('Could not locate a bind configured on Mapper|Foo|test_table '
'or this Session'),
sess.flush)
class GetBindTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table(
'base_table', metadata,
Column('id', Integer, primary_key=True)
)
Table(
'w_mixin_table', metadata,
Column('id', Integer, primary_key=True)
)
Table(
'joined_sub_table', metadata,
Column('id', ForeignKey('base_table.id'), primary_key=True)
)
Table(
'concrete_sub_table', metadata,
Column('id', Integer, primary_key=True)
)
@classmethod
def setup_classes(cls):
class MixinOne(cls.Basic):
pass
class BaseClass(cls.Basic):
pass
class ClassWMixin(MixinOne, cls.Basic):
pass
class JoinedSubClass(BaseClass):
pass
class ConcreteSubClass(BaseClass):
pass
@classmethod
def setup_mappers(cls):
mapper(cls.classes.ClassWMixin, cls.tables.w_mixin_table)
mapper(cls.classes.BaseClass, cls.tables.base_table)
mapper(
cls.classes.JoinedSubClass,
cls.tables.joined_sub_table, inherits=cls.classes.BaseClass)
mapper(
cls.classes.ConcreteSubClass,
cls.tables.concrete_sub_table, inherits=cls.classes.BaseClass,
concrete=True)
def _fixture(self, binds):
return Session(binds=binds)
def test_fallback_table_metadata(self):
session = self._fixture({})
is_(
session.get_bind(self.classes.BaseClass),
testing.db
)
def test_bind_base_table_base_class(self):
base_class_bind = Mock()
session = self._fixture({
self.tables.base_table: base_class_bind
})
is_(
session.get_bind(self.classes.BaseClass),
base_class_bind
)
def test_bind_base_table_joined_sub_class(self):
base_class_bind = Mock()
session = self._fixture({
self.tables.base_table: base_class_bind
})
is_(
session.get_bind(self.classes.BaseClass),
base_class_bind
)
is_(
session.get_bind(self.classes.JoinedSubClass),
base_class_bind
)
def test_bind_joined_sub_table_joined_sub_class(self):
base_class_bind = Mock(name='base')
joined_class_bind = Mock(name='joined')
session = self._fixture({
self.tables.base_table: base_class_bind,
self.tables.joined_sub_table: joined_class_bind
})
is_(
session.get_bind(self.classes.BaseClass),
base_class_bind
)
# joined table inheritance has to query based on the base
# table, so this is what we expect
is_(
session.get_bind(self.classes.JoinedSubClass),
base_class_bind
)
def test_bind_base_table_concrete_sub_class(self):
base_class_bind = Mock()
session = self._fixture({
self.tables.base_table: base_class_bind
})
is_(
session.get_bind(self.classes.ConcreteSubClass),
testing.db
)
def test_bind_sub_table_concrete_sub_class(self):
base_class_bind = Mock(name='base')
concrete_sub_bind = Mock(name='concrete')
session = self._fixture({
self.tables.base_table: base_class_bind,
self.tables.concrete_sub_table: concrete_sub_bind
})
is_(
session.get_bind(self.classes.BaseClass),
base_class_bind
)
is_(
session.get_bind(self.classes.ConcreteSubClass),
concrete_sub_bind
)
def test_bind_base_class_base_class(self):
base_class_bind = Mock()
session = self._fixture({
self.classes.BaseClass: base_class_bind
})
is_(
session.get_bind(self.classes.BaseClass),
base_class_bind
)
def test_bind_mixin_class_simple_class(self):
base_class_bind = Mock()
session = self._fixture({
self.classes.MixinOne: base_class_bind
})
is_(
session.get_bind(self.classes.ClassWMixin),
base_class_bind
)
def test_bind_base_class_joined_sub_class(self):
base_class_bind = Mock()
session = self._fixture({
self.classes.BaseClass: base_class_bind
})
is_(
session.get_bind(self.classes.JoinedSubClass),
base_class_bind
)
def test_bind_joined_sub_class_joined_sub_class(self):
base_class_bind = Mock(name='base')
joined_class_bind = Mock(name='joined')
session = self._fixture({
self.classes.BaseClass: base_class_bind,
self.classes.JoinedSubClass: joined_class_bind
})
is_(
session.get_bind(self.classes.BaseClass),
base_class_bind
)
is_(
session.get_bind(self.classes.JoinedSubClass),
joined_class_bind
)
def test_bind_base_class_concrete_sub_class(self):
base_class_bind = Mock()
session = self._fixture({
self.classes.BaseClass: base_class_bind
})
is_(
session.get_bind(self.classes.ConcreteSubClass),
base_class_bind
)
def test_bind_sub_class_concrete_sub_class(self):
base_class_bind = Mock(name='base')
concrete_sub_bind = Mock(name='concrete')
session = self._fixture({
self.classes.BaseClass: base_class_bind,
self.classes.ConcreteSubClass: concrete_sub_bind
})
is_(
session.get_bind(self.classes.BaseClass),
base_class_bind
)
is_(
session.get_bind(self.classes.ConcreteSubClass),
concrete_sub_bind
)
|
mit
|
mindbender-studio/core
|
avalon/vendor/jsonschema/tests/test_jsonschema_test_suite.py
|
62
|
9644
|
"""
Test runner for the JSON Schema official test suite
Tests comprehensive correctness of each draft's validator.
See https://github.com/json-schema/JSON-Schema-Test-Suite for details.
"""
from contextlib import closing
from decimal import Decimal
import glob
import json
import io
import itertools
import os
import re
import subprocess
import sys
try:
from sys import pypy_version_info
except ImportError:
pypy_version_info = None
from jsonschema import (
FormatError, SchemaError, ValidationError, Draft3Validator,
Draft4Validator, FormatChecker, draft3_format_checker,
draft4_format_checker, validate,
)
from jsonschema.compat import PY3
from jsonschema.tests.compat import mock, unittest
import jsonschema
REPO_ROOT = os.path.join(os.path.dirname(jsonschema.__file__), os.path.pardir)
SUITE = os.getenv("JSON_SCHEMA_TEST_SUITE", os.path.join(REPO_ROOT, "json"))
if not os.path.isdir(SUITE):
raise ValueError(
"Can't find the JSON-Schema-Test-Suite directory. Set the "
"'JSON_SCHEMA_TEST_SUITE' environment variable or run the tests from "
"alongside a checkout of the suite."
)
TESTS_DIR = os.path.join(SUITE, "tests")
JSONSCHEMA_SUITE = os.path.join(SUITE, "bin", "jsonschema_suite")
remotes_stdout = subprocess.Popen(
["python", JSONSCHEMA_SUITE, "remotes"], stdout=subprocess.PIPE,
).stdout
with closing(remotes_stdout):
if PY3:
remotes_stdout = io.TextIOWrapper(remotes_stdout)
REMOTES = json.load(remotes_stdout)
def make_case(schema, data, valid, name):
if valid:
def test_case(self):
kwargs = getattr(self, "validator_kwargs", {})
validate(data, schema, cls=self.validator_class, **kwargs)
else:
def test_case(self):
kwargs = getattr(self, "validator_kwargs", {})
with self.assertRaises(ValidationError):
validate(data, schema, cls=self.validator_class, **kwargs)
if not PY3:
name = name.encode("utf-8")
test_case.__name__ = name
return test_case
def maybe_skip(skip, test_case, case, test):
if skip is not None:
reason = skip(case, test)
if reason is not None:
test_case = unittest.skip(reason)(test_case)
return test_case
def load_json_cases(tests_glob, ignore_glob="", basedir=TESTS_DIR, skip=None):
if ignore_glob:
ignore_glob = os.path.join(basedir, ignore_glob)
def add_test_methods(test_class):
ignored = set(glob.iglob(ignore_glob))
for filename in glob.iglob(os.path.join(basedir, tests_glob)):
if filename in ignored:
continue
validating, _ = os.path.splitext(os.path.basename(filename))
id = itertools.count(1)
with open(filename) as test_file:
for case in json.load(test_file):
for test in case["tests"]:
name = "test_%s_%s_%s" % (
validating,
next(id),
re.sub(r"[\W ]+", "_", test["description"]),
)
assert not hasattr(test_class, name), name
test_case = make_case(
data=test["data"],
schema=case["schema"],
valid=test["valid"],
name=name,
)
test_case = maybe_skip(skip, test_case, case, test)
setattr(test_class, name, test_case)
return test_class
return add_test_methods
class TypesMixin(object):
@unittest.skipIf(PY3, "In Python 3 json.load always produces unicode")
def test_string_a_bytestring_is_a_string(self):
self.validator_class({"type" : "string"}).validate(b"foo")
class DecimalMixin(object):
def test_it_can_validate_with_decimals(self):
schema = {"type" : "number"}
validator = self.validator_class(
schema, types={"number" : (int, float, Decimal)}
)
for valid in [1, 1.1, Decimal(1) / Decimal(8)]:
validator.validate(valid)
for invalid in ["foo", {}, [], True, None]:
with self.assertRaises(ValidationError):
validator.validate(invalid)
def missing_format(checker):
def missing_format(case, test):
format = case["schema"].get("format")
if format not in checker.checkers:
return "Format checker {0!r} not found.".format(format)
elif (
format == "date-time" and
pypy_version_info is not None and
pypy_version_info[:2] <= (1, 9)
):
# datetime.datetime is overzealous about typechecking in <=1.9
return "datetime.datetime is broken on this version of PyPy."
return missing_format
class FormatMixin(object):
def test_it_returns_true_for_formats_it_does_not_know_about(self):
validator = self.validator_class(
{"format" : "carrot"}, format_checker=FormatChecker(),
)
validator.validate("bugs")
def test_it_does_not_validate_formats_by_default(self):
validator = self.validator_class({})
self.assertIsNone(validator.format_checker)
def test_it_validates_formats_if_a_checker_is_provided(self):
checker = mock.Mock(spec=FormatChecker)
validator = self.validator_class(
{"format" : "foo"}, format_checker=checker,
)
validator.validate("bar")
checker.check.assert_called_once_with("bar", "foo")
cause = ValueError()
checker.check.side_effect = FormatError('aoeu', cause=cause)
with self.assertRaises(ValidationError) as cm:
validator.validate("bar")
# Make sure original cause is attached
self.assertIs(cm.exception.cause, cause)
def test_it_validates_formats_of_any_type(self):
checker = mock.Mock(spec=FormatChecker)
validator = self.validator_class(
{"format" : "foo"}, format_checker=checker,
)
validator.validate([1, 2, 3])
checker.check.assert_called_once_with([1, 2, 3], "foo")
cause = ValueError()
checker.check.side_effect = FormatError('aoeu', cause=cause)
with self.assertRaises(ValidationError) as cm:
validator.validate([1, 2, 3])
# Make sure original cause is attached
self.assertIs(cm.exception.cause, cause)
if sys.maxunicode == 2 ** 16 - 1: # This is a narrow build.
def narrow_unicode_build(case, test):
if "supplementary Unicode" in test["description"]:
return "Not running surrogate Unicode case, this Python is narrow."
else:
def narrow_unicode_build(case, test): # This isn't, skip nothing.
return
@load_json_cases(
"draft3/*.json",
skip=narrow_unicode_build,
ignore_glob="draft3/refRemote.json",
)
@load_json_cases(
"draft3/optional/format.json", skip=missing_format(draft3_format_checker)
)
@load_json_cases("draft3/optional/bignum.json")
@load_json_cases("draft3/optional/zeroTerminatedFloats.json")
class TestDraft3(unittest.TestCase, TypesMixin, DecimalMixin, FormatMixin):
validator_class = Draft3Validator
validator_kwargs = {"format_checker" : draft3_format_checker}
def test_any_type_is_valid_for_type_any(self):
validator = self.validator_class({"type" : "any"})
validator.validate(mock.Mock())
# TODO: we're in need of more meta schema tests
def test_invalid_properties(self):
with self.assertRaises(SchemaError):
validate({}, {"properties": {"test": True}},
cls=self.validator_class)
def test_minItems_invalid_string(self):
with self.assertRaises(SchemaError):
# needs to be an integer
validate([1], {"minItems" : "1"}, cls=self.validator_class)
@load_json_cases(
"draft4/*.json",
skip=narrow_unicode_build,
ignore_glob="draft4/refRemote.json",
)
@load_json_cases(
"draft4/optional/format.json", skip=missing_format(draft4_format_checker)
)
@load_json_cases("draft4/optional/bignum.json")
@load_json_cases("draft4/optional/zeroTerminatedFloats.json")
class TestDraft4(unittest.TestCase, TypesMixin, DecimalMixin, FormatMixin):
validator_class = Draft4Validator
validator_kwargs = {"format_checker" : draft4_format_checker}
# TODO: we're in need of more meta schema tests
def test_invalid_properties(self):
with self.assertRaises(SchemaError):
validate({}, {"properties": {"test": True}},
cls=self.validator_class)
def test_minItems_invalid_string(self):
with self.assertRaises(SchemaError):
# needs to be an integer
validate([1], {"minItems" : "1"}, cls=self.validator_class)
class RemoteRefResolutionMixin(object):
def setUp(self):
patch = mock.patch("jsonschema.validators.requests")
requests = patch.start()
requests.get.side_effect = self.resolve
self.addCleanup(patch.stop)
def resolve(self, reference):
_, _, reference = reference.partition("http://localhost:1234/")
return mock.Mock(**{"json.return_value" : REMOTES.get(reference)})
@load_json_cases("draft3/refRemote.json")
class Draft3RemoteResolution(RemoteRefResolutionMixin, unittest.TestCase):
validator_class = Draft3Validator
@load_json_cases("draft4/refRemote.json")
class Draft4RemoteResolution(RemoteRefResolutionMixin, unittest.TestCase):
validator_class = Draft4Validator
|
mit
|
illicitonion/givabit
|
lib/sdks/google_appengine_1.7.1/google_appengine/lib/django_1_2/django/contrib/localflavor/pl/pl_administrativeunits.py
|
433
|
13194
|
# -*- coding: utf-8 -*-
"""
Polish administrative units as in http://pl.wikipedia.org/wiki/Podzia%C5%82_administracyjny_Polski
"""
ADMINISTRATIVE_UNIT_CHOICES = (
('wroclaw', u'Wrocław'),
('jeleniagora', u'Jelenia Góra'),
('legnica', u'Legnica'),
('boleslawiecki', u'bolesławiecki'),
('dzierzoniowski', u'dzierżoniowski'),
('glogowski', u'głogowski'),
('gorowski', u'górowski'),
('jaworski', u'jaworski'),
('jeleniogorski', u'jeleniogórski'),
('kamiennogorski', u'kamiennogórski'),
('klodzki', u'kłodzki'),
('legnicki', u'legnicki'),
('lubanski', u'lubański'),
('lubinski', u'lubiński'),
('lwowecki', u'lwówecki'),
('milicki', u'milicki'),
('olesnicki', u'oleśnicki'),
('olawski', u'oławski'),
('polkowicki', u'polkowicki'),
('strzelinski', u'strzeliński'),
('sredzki', u'średzki'),
('swidnicki', u'świdnicki'),
('trzebnicki', u'trzebnicki'),
('walbrzyski', u'wałbrzyski'),
('wolowski', u'wołowski'),
('wroclawski', u'wrocławski'),
('zabkowicki', u'ząbkowicki'),
('zgorzelecki', u'zgorzelecki'),
('zlotoryjski', u'złotoryjski'),
('bydgoszcz', u'Bydgoszcz'),
('torun', u'Toruń'),
('wloclawek', u'Włocławek'),
('grudziadz', u'Grudziądz'),
('aleksandrowski', u'aleksandrowski'),
('brodnicki', u'brodnicki'),
('bydgoski', u'bydgoski'),
('chelminski', u'chełmiński'),
('golubsko-dobrzynski', u'golubsko-dobrzyński'),
('grudziadzki', u'grudziądzki'),
('inowroclawski', u'inowrocławski'),
('lipnowski', u'lipnowski'),
('mogilenski', u'mogileński'),
('nakielski', u'nakielski'),
('radziejowski', u'radziejowski'),
('rypinski', u'rypiński'),
('sepolenski', u'sępoleński'),
('swiecki', u'świecki'),
('torunski', u'toruński'),
('tucholski', u'tucholski'),
('wabrzeski', u'wąbrzeski'),
('wloclawski', u'wrocławski'),
('zninski', u'źniński'),
('lublin', u'Lublin'),
('biala-podlaska', u'Biała Podlaska'),
('chelm', u'Chełm'),
('zamosc', u'Zamość'),
('bialski', u'bialski'),
('bilgorajski', u'biłgorajski'),
('chelmski', u'chełmski'),
('hrubieszowski', u'hrubieszowski'),
('janowski', u'janowski'),
('krasnostawski', u'krasnostawski'),
('krasnicki', u'kraśnicki'),
('lubartowski', u'lubartowski'),
('lubelski', u'lubelski'),
('leczynski', u'łęczyński'),
('lukowski', u'łukowski'),
('opolski', u'opolski'),
('parczewski', u'parczewski'),
('pulawski', u'puławski'),
('radzynski', u'radzyński'),
('rycki', u'rycki'),
('swidnicki', u'świdnicki'),
('tomaszowski', u'tomaszowski'),
('wlodawski', u'włodawski'),
('zamojski', u'zamojski'),
('gorzow-wielkopolski', u'Gorzów Wielkopolski'),
('zielona-gora', u'Zielona Góra'),
('gorzowski', u'gorzowski'),
('krosnienski', u'krośnieński'),
('miedzyrzecki', u'międzyrzecki'),
('nowosolski', u'nowosolski'),
('slubicki', u'słubicki'),
('strzelecko-drezdenecki', u'strzelecko-drezdenecki'),
('sulecinski', u'suleńciński'),
('swiebodzinski', u'świebodziński'),
('wschowski', u'wschowski'),
('zielonogorski', u'zielonogórski'),
('zaganski', u'żagański'),
('zarski', u'żarski'),
('lodz', u'Łódź'),
('piotrkow-trybunalski', u'Piotrków Trybunalski'),
('skierniewice', u'Skierniewice'),
('belchatowski', u'bełchatowski'),
('brzezinski', u'brzeziński'),
('kutnowski', u'kutnowski'),
('laski', u'łaski'),
('leczycki', u'łęczycki'),
('lowicki', u'łowicki'),
('lodzki wschodni', u'łódzki wschodni'),
('opoczynski', u'opoczyński'),
('pabianicki', u'pabianicki'),
('pajeczanski', u'pajęczański'),
('piotrkowski', u'piotrkowski'),
('poddebicki', u'poddębicki'),
('radomszczanski', u'radomszczański'),
('rawski', u'rawski'),
('sieradzki', u'sieradzki'),
('skierniewicki', u'skierniewicki'),
('tomaszowski', u'tomaszowski'),
('wielunski', u'wieluński'),
('wieruszowski', u'wieruszowski'),
('zdunskowolski', u'zduńskowolski'),
('zgierski', u'zgierski'),
('krakow', u'Kraków'),
('tarnow', u'Tarnów'),
('nowy-sacz', u'Nowy Sącz'),
('bochenski', u'bocheński'),
('brzeski', u'brzeski'),
('chrzanowski', u'chrzanowski'),
('dabrowski', u'dąbrowski'),
('gorlicki', u'gorlicki'),
('krakowski', u'krakowski'),
('limanowski', u'limanowski'),
('miechowski', u'miechowski'),
('myslenicki', u'myślenicki'),
('nowosadecki', u'nowosądecki'),
('nowotarski', u'nowotarski'),
('olkuski', u'olkuski'),
('oswiecimski', u'oświęcimski'),
('proszowicki', u'proszowicki'),
('suski', u'suski'),
('tarnowski', u'tarnowski'),
('tatrzanski', u'tatrzański'),
('wadowicki', u'wadowicki'),
('wielicki', u'wielicki'),
('warszawa', u'Warszawa'),
('ostroleka', u'Ostrołęka'),
('plock', u'Płock'),
('radom', u'Radom'),
('siedlce', u'Siedlce'),
('bialobrzeski', u'białobrzeski'),
('ciechanowski', u'ciechanowski'),
('garwolinski', u'garwoliński'),
('gostyninski', u'gostyniński'),
('grodziski', u'grodziski'),
('grojecki', u'grójecki'),
('kozienicki', u'kozenicki'),
('legionowski', u'legionowski'),
('lipski', u'lipski'),
('losicki', u'łosicki'),
('makowski', u'makowski'),
('minski', u'miński'),
('mlawski', u'mławski'),
('nowodworski', u'nowodworski'),
('ostrolecki', u'ostrołęcki'),
('ostrowski', u'ostrowski'),
('otwocki', u'otwocki'),
('piaseczynski', u'piaseczyński'),
('plocki', u'płocki'),
('plonski', u'płoński'),
('pruszkowski', u'pruszkowski'),
('przasnyski', u'przasnyski'),
('przysuski', u'przysuski'),
('pultuski', u'pułtuski'),
('radomski', u'radomski'),
('siedlecki', u'siedlecki'),
('sierpecki', u'sierpecki'),
('sochaczewski', u'sochaczewski'),
('sokolowski', u'sokołowski'),
('szydlowiecki', u'szydłowiecki'),
('warszawski-zachodni', u'warszawski zachodni'),
('wegrowski', u'węgrowski'),
('wolominski', u'wołomiński'),
('wyszkowski', u'wyszkowski'),
('zwolenski', u'zwoleński'),
('zurominski', u'żuromiński'),
('zyrardowski', u'żyrardowski'),
('opole', u'Opole'),
('brzeski', u'brzeski'),
('glubczycki', u'głubczyski'),
('kedzierzynsko-kozielski', u'kędzierzyński-kozielski'),
('kluczborski', u'kluczborski'),
('krapkowicki', u'krapkowicki'),
('namyslowski', u'namysłowski'),
('nyski', u'nyski'),
('oleski', u'oleski'),
('opolski', u'opolski'),
('prudnicki', u'prudnicki'),
('strzelecki', u'strzelecki'),
('rzeszow', u'Rzeszów'),
('krosno', u'Krosno'),
('przemysl', u'Przemyśl'),
('tarnobrzeg', u'Tarnobrzeg'),
('bieszczadzki', u'bieszczadzki'),
('brzozowski', u'brzozowski'),
('debicki', u'dębicki'),
('jaroslawski', u'jarosławski'),
('jasielski', u'jasielski'),
('kolbuszowski', u'kolbuszowski'),
('krosnienski', u'krośnieński'),
('leski', u'leski'),
('lezajski', u'leżajski'),
('lubaczowski', u'lubaczowski'),
('lancucki', u'łańcucki'),
('mielecki', u'mielecki'),
('nizanski', u'niżański'),
('przemyski', u'przemyski'),
('przeworski', u'przeworski'),
('ropczycko-sedziszowski', u'ropczycko-sędziszowski'),
('rzeszowski', u'rzeszowski'),
('sanocki', u'sanocki'),
('stalowowolski', u'stalowowolski'),
('strzyzowski', u'strzyżowski'),
('tarnobrzeski', u'tarnobrzeski'),
('bialystok', u'Białystok'),
('lomza', u'Łomża'),
('suwalki', u'Suwałki'),
('augustowski', u'augustowski'),
('bialostocki', u'białostocki'),
('bielski', u'bielski'),
('grajewski', u'grajewski'),
('hajnowski', u'hajnowski'),
('kolnenski', u'kolneński'),
('łomzynski', u'łomżyński'),
('moniecki', u'moniecki'),
('sejnenski', u'sejneński'),
('siemiatycki', u'siematycki'),
('sokolski', u'sokólski'),
('suwalski', u'suwalski'),
('wysokomazowiecki', u'wysokomazowiecki'),
('zambrowski', u'zambrowski'),
('gdansk', u'Gdańsk'),
('gdynia', u'Gdynia'),
('slupsk', u'Słupsk'),
('sopot', u'Sopot'),
('bytowski', u'bytowski'),
('chojnicki', u'chojnicki'),
('czluchowski', u'człuchowski'),
('kartuski', u'kartuski'),
('koscierski', u'kościerski'),
('kwidzynski', u'kwidzyński'),
('leborski', u'lęborski'),
('malborski', u'malborski'),
('nowodworski', u'nowodworski'),
('gdanski', u'gdański'),
('pucki', u'pucki'),
('slupski', u'słupski'),
('starogardzki', u'starogardzki'),
('sztumski', u'sztumski'),
('tczewski', u'tczewski'),
('wejherowski', u'wejcherowski'),
('katowice', u'Katowice'),
('bielsko-biala', u'Bielsko-Biała'),
('bytom', u'Bytom'),
('chorzow', u'Chorzów'),
('czestochowa', u'Częstochowa'),
('dabrowa-gornicza', u'Dąbrowa Górnicza'),
('gliwice', u'Gliwice'),
('jastrzebie-zdroj', u'Jastrzębie Zdrój'),
('jaworzno', u'Jaworzno'),
('myslowice', u'Mysłowice'),
('piekary-slaskie', u'Piekary Śląskie'),
('ruda-slaska', u'Ruda Śląska'),
('rybnik', u'Rybnik'),
('siemianowice-slaskie', u'Siemianowice Śląskie'),
('sosnowiec', u'Sosnowiec'),
('swietochlowice', u'Świętochłowice'),
('tychy', u'Tychy'),
('zabrze', u'Zabrze'),
('zory', u'Żory'),
('bedzinski', u'będziński'),
('bielski', u'bielski'),
('bierunsko-ledzinski', u'bieruńsko-lędziński'),
('cieszynski', u'cieszyński'),
('czestochowski', u'częstochowski'),
('gliwicki', u'gliwicki'),
('klobucki', u'kłobucki'),
('lubliniecki', u'lubliniecki'),
('mikolowski', u'mikołowski'),
('myszkowski', u'myszkowski'),
('pszczynski', u'pszczyński'),
('raciborski', u'raciborski'),
('rybnicki', u'rybnicki'),
('tarnogorski', u'tarnogórski'),
('wodzislawski', u'wodzisławski'),
('zawiercianski', u'zawierciański'),
('zywiecki', u'żywiecki'),
('kielce', u'Kielce'),
('buski', u'buski'),
('jedrzejowski', u'jędrzejowski'),
('kazimierski', u'kazimierski'),
('kielecki', u'kielecki'),
('konecki', u'konecki'),
('opatowski', u'opatowski'),
('ostrowiecki', u'ostrowiecki'),
('pinczowski', u'pińczowski'),
('sandomierski', u'sandomierski'),
('skarzyski', u'skarżyski'),
('starachowicki', u'starachowicki'),
('staszowski', u'staszowski'),
('wloszczowski', u'włoszczowski'),
('olsztyn', u'Olsztyn'),
('elblag', u'Elbląg'),
('bartoszycki', u'bartoszycki'),
('braniewski', u'braniewski'),
('dzialdowski', u'działdowski'),
('elblaski', u'elbląski'),
('elcki', u'ełcki'),
('gizycki', u'giżycki'),
('goldapski', u'gołdapski'),
('ilawski', u'iławski'),
('ketrzynski', u'kętrzyński'),
('lidzbarski', u'lidzbarski'),
('mragowski', u'mrągowski'),
('nidzicki', u'nidzicki'),
('nowomiejski', u'nowomiejski'),
('olecki', u'olecki'),
('olsztynski', u'olsztyński'),
('ostrodzki', u'ostródzki'),
('piski', u'piski'),
('szczycienski', u'szczycieński'),
('wegorzewski', u'węgorzewski'),
('poznan', u'Poznań'),
('kalisz', u'Kalisz'),
('konin', u'Konin'),
('leszno', u'Leszno'),
('chodzieski', u'chodziejski'),
('czarnkowsko-trzcianecki', u'czarnkowsko-trzcianecki'),
('gnieznienski', u'gnieźnieński'),
('gostynski', u'gostyński'),
('grodziski', u'grodziski'),
('jarocinski', u'jarociński'),
('kaliski', u'kaliski'),
('kepinski', u'kępiński'),
('kolski', u'kolski'),
('koninski', u'koniński'),
('koscianski', u'kościański'),
('krotoszynski', u'krotoszyński'),
('leszczynski', u'leszczyński'),
('miedzychodzki', u'międzychodzki'),
('nowotomyski', u'nowotomyski'),
('obornicki', u'obornicki'),
('ostrowski', u'ostrowski'),
('ostrzeszowski', u'ostrzeszowski'),
('pilski', u'pilski'),
('pleszewski', u'pleszewski'),
('poznanski', u'poznański'),
('rawicki', u'rawicki'),
('slupecki', u'słupecki'),
('szamotulski', u'szamotulski'),
('sredzki', u'średzki'),
('sremski', u'śremski'),
('turecki', u'turecki'),
('wagrowiecki', u'wągrowiecki'),
('wolsztynski', u'wolsztyński'),
('wrzesinski', u'wrzesiński'),
('zlotowski', u'złotowski'),
('bialogardzki', u'białogardzki'),
('choszczenski', u'choszczeński'),
('drawski', u'drawski'),
('goleniowski', u'goleniowski'),
('gryficki', u'gryficki'),
('gryfinski', u'gryfiński'),
('kamienski', u'kamieński'),
('kolobrzeski', u'kołobrzeski'),
('koszalinski', u'koszaliński'),
('lobeski', u'łobeski'),
('mysliborski', u'myśliborski'),
('policki', u'policki'),
('pyrzycki', u'pyrzycki'),
('slawienski', u'sławieński'),
('stargardzki', u'stargardzki'),
('szczecinecki', u'szczecinecki'),
('swidwinski', u'świdwiński'),
('walecki', u'wałecki'),
)
|
apache-2.0
|
mikewiebe-ansible/ansible
|
test/lib/ansible_test/_internal/cloud/nios.py
|
17
|
5475
|
"""NIOS plugin for integration tests."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from . import (
CloudProvider,
CloudEnvironment,
CloudEnvironmentConfig,
)
from ..util import (
find_executable,
display,
)
from ..docker_util import (
docker_run,
docker_rm,
docker_inspect,
docker_pull,
get_docker_container_id,
)
class NiosProvider(CloudProvider):
"""Nios plugin.
Sets up NIOS mock server for tests.
"""
DOCKER_SIMULATOR_NAME = 'nios-simulator'
DOCKER_IMAGE = 'quay.io/ansible/nios-test-container:1.3.0'
"""Default image to run the nios simulator.
The simulator must be pinned to a specific version
to guarantee CI passes with the version used.
It's source source itself resides at:
https://github.com/ansible/nios-test-container
"""
def __init__(self, args):
"""Set up container references for provider.
:type args: TestConfig
"""
super(NiosProvider, self).__init__(args)
self.__container_from_env = os.getenv('ANSIBLE_NIOSSIM_CONTAINER')
"""Overrides target container, might be used for development.
Use ANSIBLE_NIOSSIM_CONTAINER=whatever_you_want if you want
to use other image. Omit/empty otherwise.
"""
self.image = self.__container_from_env or self.DOCKER_IMAGE
self.container_name = ''
def filter(self, targets, exclude):
"""Filter out the tests with the necessary config and res unavailable.
:type targets: tuple[TestTarget]
:type exclude: list[str]
"""
docker_cmd = 'docker'
docker = find_executable(docker_cmd, required=False)
if docker:
return
skip = 'cloud/%s/' % self.platform
skipped = [target.name for target in targets if skip in target.aliases]
if skipped:
exclude.append(skip)
display.warning(
'Excluding tests marked "%s" '
'which require the "%s" command: %s'
% (skip.rstrip('/'), docker_cmd, ', '.join(skipped))
)
def setup(self):
"""Setup cloud resource before delegation and reg cleanup callback."""
super(NiosProvider, self).setup()
if self._use_static_config():
self._setup_static()
else:
self._setup_dynamic()
def get_docker_run_options(self):
"""Get additional options needed when delegating tests to a container.
:rtype: list[str]
"""
return ['--link', self.DOCKER_SIMULATOR_NAME] if self.managed else []
def cleanup(self):
"""Clean up the resource and temporary configs files after tests."""
if self.container_name:
docker_rm(self.args, self.container_name)
super(NiosProvider, self).cleanup()
def _setup_dynamic(self):
"""Spawn a NIOS simulator within docker container."""
nios_port = 443
container_id = get_docker_container_id()
if container_id:
display.info(
'Running in docker container: %s' % container_id,
verbosity=1,
)
self.container_name = self.DOCKER_SIMULATOR_NAME
results = docker_inspect(self.args, self.container_name)
if results and not results[0].get('State', {}).get('Running'):
docker_rm(self.args, self.container_name)
results = []
display.info(
'%s NIOS simulator docker container.'
% ('Using the existing' if results else 'Starting a new'),
verbosity=1,
)
if not results:
if self.args.docker or container_id:
publish_ports = []
else:
# publish the simulator ports when not running inside docker
publish_ports = [
'-p', ':'.join((str(nios_port), ) * 2),
]
if not self.__container_from_env:
docker_pull(self.args, self.image)
docker_run(
self.args,
self.image,
['-d', '--name', self.container_name] + publish_ports,
)
if self.args.docker:
nios_host = self.DOCKER_SIMULATOR_NAME
elif container_id:
nios_host = self._get_simulator_address()
display.info(
'Found NIOS simulator container address: %s'
% nios_host, verbosity=1
)
else:
nios_host = 'localhost'
self._set_cloud_config('NIOS_HOST', nios_host)
def _get_simulator_address(self):
results = docker_inspect(self.args, self.container_name)
ip_address = results[0]['NetworkSettings']['IPAddress']
return ip_address
def _setup_static(self):
raise NotImplementedError
class NiosEnvironment(CloudEnvironment):
"""NIOS environment plugin.
Updates integration test environment after delegation.
"""
def get_environment_config(self):
"""
:rtype: CloudEnvironmentConfig
"""
ansible_vars = dict(
nios_provider=dict(
host=self._get_cloud_config('NIOS_HOST'),
username='admin',
password='infoblox',
),
)
return CloudEnvironmentConfig(
ansible_vars=ansible_vars,
)
|
gpl-3.0
|
browseinfo/odoo_saas3_nicolas
|
addons/gamification_sale_crm/__openerp__.py
|
67
|
1369
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 OpenERP SA (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'CRM Gamification',
'version': '1.0',
'author': 'OpenERP SA',
'category': 'hidden',
'depends': ['gamification','sale_crm'],
'description': """Example of goal definitions and challenges that can be used related to the usage of the CRM Sale module.""",
'data': ['sale_crm_goals.xml'],
'demo': ['sale_crm_goals_demo.xml'],
'auto_install': True,
}
|
agpl-3.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.