repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
cyberden/CouchPotatoServer | couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/collegehumor.py | 150 | 3638 | from __future__ import unicode_literals
import json
import re
from .common import InfoExtractor
from ..utils import int_or_none
class CollegeHumorIE(InfoExtractor):
_VALID_URL = r'^(?:https?://)?(?:www\.)?collegehumor\.com/(video|embed|e)/(?P<videoid>[0-9]+)/?(?P<shorttitle>.*)$'
_TESTS = [
{
'url': 'http://www.collegehumor.com/video/6902724/comic-con-cosplay-catastrophe',
'md5': 'dcc0f5c1c8be98dc33889a191f4c26bd',
'info_dict': {
'id': '6902724',
'ext': 'mp4',
'title': 'Comic-Con Cosplay Catastrophe',
'description': "Fans get creative this year at San Diego. Too creative. And yes, that's really Joss Whedon.",
'age_limit': 13,
'duration': 187,
},
}, {
'url': 'http://www.collegehumor.com/video/3505939/font-conference',
'md5': '72fa701d8ef38664a4dbb9e2ab721816',
'info_dict': {
'id': '3505939',
'ext': 'mp4',
'title': 'Font Conference',
'description': "This video wasn't long enough, so we made it double-spaced.",
'age_limit': 10,
'duration': 179,
},
}, {
# embedded youtube video
'url': 'http://www.collegehumor.com/embed/6950306',
'info_dict': {
'id': 'Z-bao9fg6Yc',
'ext': 'mp4',
'title': 'Young Americans Think President John F. Kennedy Died THIS MORNING IN A CAR ACCIDENT!!!',
'uploader': 'Mark Dice',
'uploader_id': 'MarkDice',
'description': 'md5:62c3dab9351fac7bb44b53b69511d87f',
'upload_date': '20140127',
},
'params': {
'skip_download': True,
},
'add_ie': ['Youtube'],
},
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('videoid')
jsonUrl = 'http://www.collegehumor.com/moogaloop/video/' + video_id + '.json'
data = json.loads(self._download_webpage(
jsonUrl, video_id, 'Downloading info JSON'))
vdata = data['video']
if vdata.get('youtubeId') is not None:
return {
'_type': 'url',
'url': vdata['youtubeId'],
'ie_key': 'Youtube',
}
AGE_LIMITS = {'nc17': 18, 'r': 18, 'pg13': 13, 'pg': 10, 'g': 0}
rating = vdata.get('rating')
if rating:
age_limit = AGE_LIMITS.get(rating.lower())
else:
age_limit = None # None = No idea
PREFS = {'high_quality': 2, 'low_quality': 0}
formats = []
for format_key in ('mp4', 'webm'):
for qname, qurl in vdata.get(format_key, {}).items():
formats.append({
'format_id': format_key + '_' + qname,
'url': qurl,
'format': format_key,
'preference': PREFS.get(qname),
})
self._sort_formats(formats)
duration = int_or_none(vdata.get('duration'), 1000)
like_count = int_or_none(vdata.get('likes'))
return {
'id': video_id,
'title': vdata['title'],
'description': vdata.get('description'),
'thumbnail': vdata.get('thumbnail'),
'formats': formats,
'age_limit': age_limit,
'duration': duration,
'like_count': like_count,
}
| gpl-3.0 | 3,071,952,220,327,779,300 | 35.019802 | 127 | 0.481858 | false |
wooga/airflow | tests/providers/amazon/aws/sensors/test_sagemaker_base.py | 1 | 4679 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from airflow.exceptions import AirflowException
from airflow.providers.amazon.aws.sensors.sagemaker_base import SageMakerBaseSensor
class TestSagemakerBaseSensor(unittest.TestCase):
def test_execute(self):
class SageMakerBaseSensorSubclass(SageMakerBaseSensor):
def non_terminal_states(self):
return ['PENDING', 'RUNNING', 'CONTINUE']
def failed_states(self):
return ['FAILED']
def get_sagemaker_response(self):
return {
'SomeKey': {'State': 'COMPLETED'},
'ResponseMetadata': {'HTTPStatusCode': 200}
}
def state_from_response(self, response):
return response['SomeKey']['State']
sensor = SageMakerBaseSensorSubclass(
task_id='test_task',
poke_interval=2,
aws_conn_id='aws_test'
)
sensor.execute(None)
def test_poke_with_unfinished_job(self):
class SageMakerBaseSensorSubclass(SageMakerBaseSensor):
def non_terminal_states(self):
return ['PENDING', 'RUNNING', 'CONTINUE']
def failed_states(self):
return ['FAILED']
def get_sagemaker_response(self):
return {
'SomeKey': {'State': 'PENDING'},
'ResponseMetadata': {'HTTPStatusCode': 200}
}
def state_from_response(self, response):
return response['SomeKey']['State']
sensor = SageMakerBaseSensorSubclass(
task_id='test_task',
poke_interval=2,
aws_conn_id='aws_test'
)
self.assertEqual(sensor.poke(None), False)
def test_poke_with_not_implemented_method(self):
class SageMakerBaseSensorSubclass(SageMakerBaseSensor):
def non_terminal_states(self):
return ['PENDING', 'RUNNING', 'CONTINUE']
def failed_states(self):
return ['FAILED']
sensor = SageMakerBaseSensorSubclass(
task_id='test_task',
poke_interval=2,
aws_conn_id='aws_test'
)
self.assertRaises(NotImplementedError, sensor.poke, None)
def test_poke_with_bad_response(self):
class SageMakerBaseSensorSubclass(SageMakerBaseSensor):
def non_terminal_states(self):
return ['PENDING', 'RUNNING', 'CONTINUE']
def failed_states(self):
return ['FAILED']
def get_sagemaker_response(self):
return {
'SomeKey': {'State': 'COMPLETED'},
'ResponseMetadata': {'HTTPStatusCode': 400}
}
def state_from_response(self, response):
return response['SomeKey']['State']
sensor = SageMakerBaseSensorSubclass(
task_id='test_task',
poke_interval=2,
aws_conn_id='aws_test'
)
self.assertEqual(sensor.poke(None), False)
def test_poke_with_job_failure(self):
class SageMakerBaseSensorSubclass(SageMakerBaseSensor):
def non_terminal_states(self):
return ['PENDING', 'RUNNING', 'CONTINUE']
def failed_states(self):
return ['FAILED']
def get_sagemaker_response(self):
return {
'SomeKey': {'State': 'FAILED'},
'ResponseMetadata': {'HTTPStatusCode': 200}
}
def state_from_response(self, response):
return response['SomeKey']['State']
sensor = SageMakerBaseSensorSubclass(
task_id='test_task',
poke_interval=2,
aws_conn_id='aws_test'
)
self.assertRaises(AirflowException, sensor.poke, None)
| apache-2.0 | -5,071,910,991,025,993,000 | 32.421429 | 83 | 0.584527 | false |
Timurdov/bionic | bionic/Lib/site-packages/django/db/backends/sqlite3/base.py | 52 | 23862 | """
SQLite3 backend for django.
Works with either the pysqlite2 module or the sqlite3 module in the
standard library.
"""
from __future__ import unicode_literals
import datetime
import decimal
import warnings
import re
from django.conf import settings
from django.db import utils
from django.db.backends import (utils as backend_utils, BaseDatabaseFeatures,
BaseDatabaseOperations, BaseDatabaseWrapper, BaseDatabaseValidation)
from django.db.backends.sqlite3.client import DatabaseClient
from django.db.backends.sqlite3.creation import DatabaseCreation
from django.db.backends.sqlite3.introspection import DatabaseIntrospection
from django.db.backends.sqlite3.schema import DatabaseSchemaEditor
from django.db.models import fields
from django.db.models.sql import aggregates
from django.utils.dateparse import parse_date, parse_datetime, parse_time
from django.utils.encoding import force_text
from django.utils.functional import cached_property
from django.utils.safestring import SafeBytes
from django.utils import six
from django.utils import timezone
try:
try:
from pysqlite2 import dbapi2 as Database
except ImportError:
from sqlite3 import dbapi2 as Database
except ImportError as exc:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading either pysqlite2 or sqlite3 modules (tried in that order): %s" % exc)
try:
import pytz
except ImportError:
pytz = None
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
def parse_datetime_with_timezone_support(value):
dt = parse_datetime(value)
# Confirm that dt is naive before overwriting its tzinfo.
if dt is not None and settings.USE_TZ and timezone.is_naive(dt):
dt = dt.replace(tzinfo=timezone.utc)
return dt
def adapt_datetime_with_timezone_support(value):
# Equivalent to DateTimeField.get_db_prep_value. Used only by raw SQL.
if settings.USE_TZ:
if timezone.is_naive(value):
warnings.warn("SQLite received a naive datetime (%s)"
" while time zone support is active." % value,
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
value = value.astimezone(timezone.utc).replace(tzinfo=None)
return value.isoformat(str(" "))
def decoder(conv_func):
""" The Python sqlite3 interface returns always byte strings.
This function converts the received value to a regular string before
passing it to the receiver function.
"""
return lambda s: conv_func(s.decode('utf-8'))
Database.register_converter(str("bool"), decoder(lambda s: s == '1'))
Database.register_converter(str("time"), decoder(parse_time))
Database.register_converter(str("date"), decoder(parse_date))
Database.register_converter(str("datetime"), decoder(parse_datetime_with_timezone_support))
Database.register_converter(str("timestamp"), decoder(parse_datetime_with_timezone_support))
Database.register_converter(str("TIMESTAMP"), decoder(parse_datetime_with_timezone_support))
Database.register_converter(str("decimal"), decoder(backend_utils.typecast_decimal))
Database.register_adapter(datetime.datetime, adapt_datetime_with_timezone_support)
Database.register_adapter(decimal.Decimal, backend_utils.rev_typecast_decimal)
if six.PY2:
Database.register_adapter(str, lambda s: s.decode('utf-8'))
Database.register_adapter(SafeBytes, lambda s: s.decode('utf-8'))
class DatabaseFeatures(BaseDatabaseFeatures):
# SQLite cannot handle us only partially reading from a cursor's result set
# and then writing the same rows to the database in another cursor. This
# setting ensures we always read result sets fully into memory all in one
# go.
can_use_chunked_reads = False
test_db_allows_multiple_connections = False
supports_unspecified_pk = True
supports_timezones = False
supports_1000_query_parameters = False
supports_mixed_date_datetime_comparisons = False
has_bulk_insert = True
can_combine_inserts_with_and_without_auto_increment_pk = False
supports_foreign_keys = False
supports_column_check_constraints = False
autocommits_when_autocommit_is_off = True
can_introspect_decimal_field = False
can_introspect_positive_integer_field = True
can_introspect_small_integer_field = True
supports_transactions = True
atomic_transactions = False
can_rollback_ddl = True
supports_paramstyle_pyformat = False
supports_sequence_reset = False
@cached_property
def uses_savepoints(self):
return Database.sqlite_version_info >= (3, 6, 8)
@cached_property
def supports_stddev(self):
"""Confirm support for STDDEV and related stats functions
SQLite supports STDDEV as an extension package; so
connection.ops.check_aggregate_support() can't unilaterally
rule out support for STDDEV. We need to manually check
whether the call works.
"""
with self.connection.cursor() as cursor:
cursor.execute('CREATE TABLE STDDEV_TEST (X INT)')
try:
cursor.execute('SELECT STDDEV(*) FROM STDDEV_TEST')
has_support = True
except utils.DatabaseError:
has_support = False
cursor.execute('DROP TABLE STDDEV_TEST')
return has_support
@cached_property
def has_zoneinfo_database(self):
return pytz is not None
class DatabaseOperations(BaseDatabaseOperations):
def bulk_batch_size(self, fields, objs):
"""
SQLite has a compile-time default (SQLITE_LIMIT_VARIABLE_NUMBER) of
999 variables per query.
If there is just single field to insert, then we can hit another
limit, SQLITE_MAX_COMPOUND_SELECT which defaults to 500.
"""
limit = 999 if len(fields) > 1 else 500
return (limit // len(fields)) if len(fields) > 0 else len(objs)
def check_aggregate_support(self, aggregate):
bad_fields = (fields.DateField, fields.DateTimeField, fields.TimeField)
bad_aggregates = (aggregates.Sum, aggregates.Avg,
aggregates.Variance, aggregates.StdDev)
if (isinstance(aggregate.source, bad_fields) and
isinstance(aggregate, bad_aggregates)):
raise NotImplementedError(
'You cannot use Sum, Avg, StdDev and Variance aggregations '
'on date/time fields in sqlite3 '
'since date/time is saved as text.')
def date_extract_sql(self, lookup_type, field_name):
# sqlite doesn't support extract, so we fake it with the user-defined
# function django_date_extract that's registered in connect(). Note that
# single quotes are used because this is a string (and could otherwise
# cause a collision with a field name).
return "django_date_extract('%s', %s)" % (lookup_type.lower(), field_name)
def date_interval_sql(self, sql, connector, timedelta):
# It would be more straightforward if we could use the sqlite strftime
# function, but it does not allow for keeping six digits of fractional
# second information, nor does it allow for formatting date and datetime
# values differently. So instead we register our own function that
# formats the datetime combined with the delta in a manner suitable
# for comparisons.
return 'django_format_dtdelta(%s, "%s", "%d", "%d", "%d")' % (sql,
connector, timedelta.days, timedelta.seconds, timedelta.microseconds)
def date_trunc_sql(self, lookup_type, field_name):
# sqlite doesn't support DATE_TRUNC, so we fake it with a user-defined
# function django_date_trunc that's registered in connect(). Note that
# single quotes are used because this is a string (and could otherwise
# cause a collision with a field name).
return "django_date_trunc('%s', %s)" % (lookup_type.lower(), field_name)
def datetime_extract_sql(self, lookup_type, field_name, tzname):
# Same comment as in date_extract_sql.
if settings.USE_TZ:
if pytz is None:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("This query requires pytz, "
"but it isn't installed.")
return "django_datetime_extract('%s', %s, %%s)" % (
lookup_type.lower(), field_name), [tzname]
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
# Same comment as in date_trunc_sql.
if settings.USE_TZ:
if pytz is None:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("This query requires pytz, "
"but it isn't installed.")
return "django_datetime_trunc('%s', %s, %%s)" % (
lookup_type.lower(), field_name), [tzname]
def drop_foreignkey_sql(self):
return ""
def pk_default_value(self):
return "NULL"
def quote_name(self, name):
if name.startswith('"') and name.endswith('"'):
return name # Quoting once is enough.
return '"%s"' % name
def no_limit_value(self):
return -1
def sql_flush(self, style, tables, sequences, allow_cascade=False):
# NB: The generated SQL below is specific to SQLite
# Note: The DELETE FROM... SQL generated below works for SQLite databases
# because constraints don't exist
sql = ['%s %s %s;' % (
style.SQL_KEYWORD('DELETE'),
style.SQL_KEYWORD('FROM'),
style.SQL_FIELD(self.quote_name(table))
) for table in tables]
# Note: No requirement for reset of auto-incremented indices (cf. other
# sql_flush() implementations). Just return SQL at this point
return sql
def value_to_db_datetime(self, value):
if value is None:
return None
# SQLite doesn't support tz-aware datetimes
if timezone.is_aware(value):
if settings.USE_TZ:
value = value.astimezone(timezone.utc).replace(tzinfo=None)
else:
raise ValueError("SQLite backend does not support timezone-aware datetimes when USE_TZ is False.")
return six.text_type(value)
def value_to_db_time(self, value):
if value is None:
return None
# SQLite doesn't support tz-aware datetimes
if timezone.is_aware(value):
raise ValueError("SQLite backend does not support timezone-aware times.")
return six.text_type(value)
def convert_values(self, value, field):
"""SQLite returns floats when it should be returning decimals,
and gets dates and datetimes wrong.
For consistency with other backends, coerce when required.
"""
if value is None:
return None
internal_type = field.get_internal_type()
if internal_type == 'DecimalField':
return backend_utils.typecast_decimal(field.format_number(value))
elif internal_type and internal_type.endswith('IntegerField') or internal_type == 'AutoField':
return int(value)
elif internal_type == 'DateField':
return parse_date(value)
elif internal_type == 'DateTimeField':
return parse_datetime_with_timezone_support(value)
elif internal_type == 'TimeField':
return parse_time(value)
# No field, or the field isn't known to be a decimal or integer
return value
def bulk_insert_sql(self, fields, num_values):
res = []
res.append("SELECT %s" % ", ".join(
"%%s AS %s" % self.quote_name(f.column) for f in fields
))
res.extend(["UNION ALL SELECT %s" % ", ".join(["%s"] * len(fields))] * (num_values - 1))
return " ".join(res)
def combine_expression(self, connector, sub_expressions):
# SQLite doesn't have a power function, so we fake it with a
# user-defined function django_power that's registered in connect().
if connector == '^':
return 'django_power(%s)' % ','.join(sub_expressions)
return super(DatabaseOperations, self).combine_expression(connector, sub_expressions)
def integer_field_range(self, internal_type):
# SQLite doesn't enforce any integer constraints
return (None, None)
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'sqlite'
# SQLite requires LIKE statements to include an ESCAPE clause if the value
# being escaped has a percent or underscore in it.
# See http://www.sqlite.org/lang_expr.html for an explanation.
operators = {
'exact': '= %s',
'iexact': "LIKE %s ESCAPE '\\'",
'contains': "LIKE %s ESCAPE '\\'",
'icontains': "LIKE %s ESCAPE '\\'",
'regex': 'REGEXP %s',
'iregex': "REGEXP '(?i)' || %s",
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': "LIKE %s ESCAPE '\\'",
'endswith': "LIKE %s ESCAPE '\\'",
'istartswith': "LIKE %s ESCAPE '\\'",
'iendswith': "LIKE %s ESCAPE '\\'",
}
pattern_ops = {
'startswith': "LIKE %s || '%%%%'",
'istartswith': "LIKE UPPER(%s) || '%%%%'",
}
Database = Database
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = DatabaseFeatures(self)
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = BaseDatabaseValidation(self)
def get_connection_params(self):
settings_dict = self.settings_dict
if not settings_dict['NAME']:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured(
"settings.DATABASES is improperly configured. "
"Please supply the NAME value.")
kwargs = {
'database': settings_dict['NAME'],
'detect_types': Database.PARSE_DECLTYPES | Database.PARSE_COLNAMES,
}
kwargs.update(settings_dict['OPTIONS'])
# Always allow the underlying SQLite connection to be shareable
# between multiple threads. The safe-guarding will be handled at a
# higher level by the `BaseDatabaseWrapper.allow_thread_sharing`
# property. This is necessary as the shareability is disabled by
# default in pysqlite and it cannot be changed once a connection is
# opened.
if 'check_same_thread' in kwargs and kwargs['check_same_thread']:
warnings.warn(
'The `check_same_thread` option was provided and set to '
'True. It will be overridden with False. Use the '
'`DatabaseWrapper.allow_thread_sharing` property instead '
'for controlling thread shareability.',
RuntimeWarning
)
kwargs.update({'check_same_thread': False})
return kwargs
def get_new_connection(self, conn_params):
conn = Database.connect(**conn_params)
conn.create_function("django_date_extract", 2, _sqlite_date_extract)
conn.create_function("django_date_trunc", 2, _sqlite_date_trunc)
conn.create_function("django_datetime_extract", 3, _sqlite_datetime_extract)
conn.create_function("django_datetime_trunc", 3, _sqlite_datetime_trunc)
conn.create_function("regexp", 2, _sqlite_regexp)
conn.create_function("django_format_dtdelta", 5, _sqlite_format_dtdelta)
conn.create_function("django_power", 2, _sqlite_power)
return conn
def init_connection_state(self):
pass
def create_cursor(self):
return self.connection.cursor(factory=SQLiteCursorWrapper)
def close(self):
self.validate_thread_sharing()
# If database is in memory, closing the connection destroys the
# database. To prevent accidental data loss, ignore close requests on
# an in-memory db.
if self.settings_dict['NAME'] != ":memory:":
BaseDatabaseWrapper.close(self)
def _savepoint_allowed(self):
# Two conditions are required here:
# - A sufficiently recent version of SQLite to support savepoints,
# - Being in a transaction, which can only happen inside 'atomic'.
# When 'isolation_level' is not None, sqlite3 commits before each
# savepoint; it's a bug. When it is None, savepoints don't make sense
# because autocommit is enabled. The only exception is inside 'atomic'
# blocks. To work around that bug, on SQLite, 'atomic' starts a
# transaction explicitly rather than simply disable autocommit.
return self.features.uses_savepoints and self.in_atomic_block
def _set_autocommit(self, autocommit):
if autocommit:
level = None
else:
# sqlite3's internal default is ''. It's different from None.
# See Modules/_sqlite/connection.c.
level = ''
# 'isolation_level' is a misleading API.
# SQLite always runs at the SERIALIZABLE isolation level.
with self.wrap_database_errors:
self.connection.isolation_level = level
def check_constraints(self, table_names=None):
"""
Checks each table name in `table_names` for rows with invalid foreign key references. This method is
intended to be used in conjunction with `disable_constraint_checking()` and `enable_constraint_checking()`, to
determine if rows with invalid references were entered while constraint checks were off.
Raises an IntegrityError on the first invalid foreign key reference encountered (if any) and provides
detailed information about the invalid reference in the error message.
Backends can override this method if they can more directly apply constraint checking (e.g. via "SET CONSTRAINTS
ALL IMMEDIATE")
"""
cursor = self.cursor()
if table_names is None:
table_names = self.introspection.table_names(cursor)
for table_name in table_names:
primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name)
if not primary_key_column_name:
continue
key_columns = self.introspection.get_key_columns(cursor, table_name)
for column_name, referenced_table_name, referenced_column_name in key_columns:
cursor.execute("""
SELECT REFERRING.`%s`, REFERRING.`%s` FROM `%s` as REFERRING
LEFT JOIN `%s` as REFERRED
ON (REFERRING.`%s` = REFERRED.`%s`)
WHERE REFERRING.`%s` IS NOT NULL AND REFERRED.`%s` IS NULL"""
% (primary_key_column_name, column_name, table_name, referenced_table_name,
column_name, referenced_column_name, column_name, referenced_column_name))
for bad_row in cursor.fetchall():
raise utils.IntegrityError("The row in table '%s' with primary key '%s' has an invalid "
"foreign key: %s.%s contains a value '%s' that does not have a corresponding value in %s.%s."
% (table_name, bad_row[0], table_name, column_name, bad_row[1],
referenced_table_name, referenced_column_name))
def is_usable(self):
return True
def _start_transaction_under_autocommit(self):
"""
Start a transaction explicitly in autocommit mode.
Staying in autocommit mode works around a bug of sqlite3 that breaks
savepoints when autocommit is disabled.
"""
self.cursor().execute("BEGIN")
def schema_editor(self, *args, **kwargs):
"Returns a new instance of this backend's SchemaEditor"
return DatabaseSchemaEditor(self, *args, **kwargs)
FORMAT_QMARK_REGEX = re.compile(r'(?<!%)%s')
class SQLiteCursorWrapper(Database.Cursor):
"""
Django uses "format" style placeholders, but pysqlite2 uses "qmark" style.
This fixes it -- but note that if you want to use a literal "%s" in a query,
you'll need to use "%%s".
"""
def execute(self, query, params=None):
if params is None:
return Database.Cursor.execute(self, query)
query = self.convert_query(query)
return Database.Cursor.execute(self, query, params)
def executemany(self, query, param_list):
query = self.convert_query(query)
return Database.Cursor.executemany(self, query, param_list)
def convert_query(self, query):
return FORMAT_QMARK_REGEX.sub('?', query).replace('%%', '%')
def _sqlite_date_extract(lookup_type, dt):
if dt is None:
return None
try:
dt = backend_utils.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
if lookup_type == 'week_day':
return (dt.isoweekday() % 7) + 1
else:
return getattr(dt, lookup_type)
def _sqlite_date_trunc(lookup_type, dt):
try:
dt = backend_utils.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
if lookup_type == 'year':
return "%i-01-01" % dt.year
elif lookup_type == 'month':
return "%i-%02i-01" % (dt.year, dt.month)
elif lookup_type == 'day':
return "%i-%02i-%02i" % (dt.year, dt.month, dt.day)
def _sqlite_datetime_extract(lookup_type, dt, tzname):
if dt is None:
return None
try:
dt = backend_utils.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
if tzname is not None:
dt = timezone.localtime(dt, pytz.timezone(tzname))
if lookup_type == 'week_day':
return (dt.isoweekday() % 7) + 1
else:
return getattr(dt, lookup_type)
def _sqlite_datetime_trunc(lookup_type, dt, tzname):
try:
dt = backend_utils.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
if tzname is not None:
dt = timezone.localtime(dt, pytz.timezone(tzname))
if lookup_type == 'year':
return "%i-01-01 00:00:00" % dt.year
elif lookup_type == 'month':
return "%i-%02i-01 00:00:00" % (dt.year, dt.month)
elif lookup_type == 'day':
return "%i-%02i-%02i 00:00:00" % (dt.year, dt.month, dt.day)
elif lookup_type == 'hour':
return "%i-%02i-%02i %02i:00:00" % (dt.year, dt.month, dt.day, dt.hour)
elif lookup_type == 'minute':
return "%i-%02i-%02i %02i:%02i:00" % (dt.year, dt.month, dt.day, dt.hour, dt.minute)
elif lookup_type == 'second':
return "%i-%02i-%02i %02i:%02i:%02i" % (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second)
def _sqlite_format_dtdelta(dt, conn, days, secs, usecs):
try:
dt = backend_utils.typecast_timestamp(dt)
delta = datetime.timedelta(int(days), int(secs), int(usecs))
if conn.strip() == '+':
dt = dt + delta
else:
dt = dt - delta
except (ValueError, TypeError):
return None
# typecast_timestamp returns a date or a datetime without timezone.
# It will be formatted as "%Y-%m-%d" or "%Y-%m-%d %H:%M:%S[.%f]"
return str(dt)
def _sqlite_regexp(re_pattern, re_string):
return bool(re.search(re_pattern, force_text(re_string))) if re_string is not None else False
def _sqlite_power(x, y):
return x ** y
| apache-2.0 | 4,334,365,291,262,007,000 | 40.355286 | 120 | 0.639972 | false |
PeterMosmans/letsencrypt | letsencrypt/tests/auth_handler_test.py | 4 | 18143 | """Tests for letsencrypt.auth_handler."""
import functools
import logging
import unittest
import mock
from acme import challenges
from acme import client as acme_client
from acme import messages
from letsencrypt import errors
from letsencrypt import le_util
from letsencrypt.tests import acme_util
TRANSLATE = {
"dvsni": "DVSNI",
"simpleHttp": "SimpleHTTP",
"dns": "DNS",
"recoveryToken": "RecoveryToken",
"recoveryContact": "RecoveryContact",
"proofOfPossession": "ProofOfPossession",
}
class ChallengeFactoryTest(unittest.TestCase):
# pylint: disable=protected-access
def setUp(self):
from letsencrypt.auth_handler import AuthHandler
# Account is mocked...
self.handler = AuthHandler(
None, None, None, mock.Mock(key="mock_key"))
self.dom = "test"
self.handler.authzr[self.dom] = acme_util.gen_authzr(
messages.STATUS_PENDING, self.dom, acme_util.CHALLENGES,
[messages.STATUS_PENDING]*6, False)
def test_all(self):
cont_c, dv_c = self.handler._challenge_factory(self.dom, range(0, 6))
self.assertEqual(
[achall.chall for achall in cont_c], acme_util.CONT_CHALLENGES)
self.assertEqual(
[achall.chall for achall in dv_c], acme_util.DV_CHALLENGES)
def test_one_dv_one_cont(self):
cont_c, dv_c = self.handler._challenge_factory(self.dom, [1, 4])
self.assertEqual(
[achall.chall for achall in cont_c], [acme_util.RECOVERY_TOKEN])
self.assertEqual([achall.chall for achall in dv_c], [acme_util.DVSNI])
def test_unrecognized(self):
self.handler.authzr["failure.com"] = acme_util.gen_authzr(
messages.STATUS_PENDING, "failure.com",
[mock.Mock(chall="chall", typ="unrecognized")],
[messages.STATUS_PENDING])
self.assertRaises(
errors.Error, self.handler._challenge_factory, "failure.com", [0])
class GetAuthorizationsTest(unittest.TestCase):
"""get_authorizations test.
This tests everything except for all functions under _poll_challenges.
"""
def setUp(self):
from letsencrypt.auth_handler import AuthHandler
self.mock_dv_auth = mock.MagicMock(name="ApacheConfigurator")
self.mock_cont_auth = mock.MagicMock(name="ContinuityAuthenticator")
self.mock_dv_auth.get_chall_pref.return_value = [challenges.DVSNI]
self.mock_cont_auth.get_chall_pref.return_value = [
challenges.RecoveryToken]
self.mock_cont_auth.perform.side_effect = gen_auth_resp
self.mock_dv_auth.perform.side_effect = gen_auth_resp
self.mock_account = mock.Mock(key=le_util.Key("file_path", "PEM"))
self.mock_net = mock.MagicMock(spec=acme_client.Client)
self.handler = AuthHandler(
self.mock_dv_auth, self.mock_cont_auth,
self.mock_net, self.mock_account)
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
@mock.patch("letsencrypt.auth_handler.AuthHandler._poll_challenges")
def test_name1_dvsni1(self, mock_poll):
self.mock_net.request_domain_challenges.side_effect = functools.partial(
gen_dom_authzr, challs=acme_util.DV_CHALLENGES)
mock_poll.side_effect = self._validate_all
authzr = self.handler.get_authorizations(["0"])
self.assertEqual(self.mock_net.answer_challenge.call_count, 1)
self.assertEqual(mock_poll.call_count, 1)
chall_update = mock_poll.call_args[0][0]
self.assertEqual(chall_update.keys(), ["0"])
self.assertEqual(len(chall_update.values()), 1)
self.assertEqual(self.mock_dv_auth.cleanup.call_count, 1)
self.assertEqual(self.mock_cont_auth.cleanup.call_count, 0)
# Test if list first element is DVSNI, use typ because it is an achall
self.assertEqual(
self.mock_dv_auth.cleanup.call_args[0][0][0].typ, "dvsni")
self.assertEqual(len(authzr), 1)
@mock.patch("letsencrypt.auth_handler.AuthHandler._poll_challenges")
def test_name3_dvsni3_rectok_3(self, mock_poll):
self.mock_net.request_domain_challenges.side_effect = functools.partial(
gen_dom_authzr, challs=acme_util.CHALLENGES)
mock_poll.side_effect = self._validate_all
authzr = self.handler.get_authorizations(["0", "1", "2"])
self.assertEqual(self.mock_net.answer_challenge.call_count, 6)
# Check poll call
self.assertEqual(mock_poll.call_count, 1)
chall_update = mock_poll.call_args[0][0]
self.assertEqual(len(chall_update.keys()), 3)
self.assertTrue("0" in chall_update.keys())
self.assertEqual(len(chall_update["0"]), 2)
self.assertTrue("1" in chall_update.keys())
self.assertEqual(len(chall_update["1"]), 2)
self.assertTrue("2" in chall_update.keys())
self.assertEqual(len(chall_update["2"]), 2)
self.assertEqual(self.mock_dv_auth.cleanup.call_count, 1)
self.assertEqual(self.mock_cont_auth.cleanup.call_count, 1)
self.assertEqual(len(authzr), 3)
def test_perform_failure(self):
self.mock_net.request_domain_challenges.side_effect = functools.partial(
gen_dom_authzr, challs=acme_util.CHALLENGES)
self.mock_dv_auth.perform.side_effect = errors.AuthorizationError
self.assertRaises(
errors.AuthorizationError, self.handler.get_authorizations, ["0"])
def _validate_all(self, unused_1, unused_2):
for dom in self.handler.authzr.keys():
azr = self.handler.authzr[dom]
self.handler.authzr[dom] = acme_util.gen_authzr(
messages.STATUS_VALID,
dom,
[challb.chall for challb in azr.body.challenges],
[messages.STATUS_VALID]*len(azr.body.challenges),
azr.body.combinations)
class PollChallengesTest(unittest.TestCase):
# pylint: disable=protected-access
"""Test poll challenges."""
def setUp(self):
from letsencrypt.auth_handler import challb_to_achall
from letsencrypt.auth_handler import AuthHandler
# Account and network are mocked...
self.mock_net = mock.MagicMock()
self.handler = AuthHandler(
None, None, self.mock_net, mock.Mock(key="mock_key"))
self.doms = ["0", "1", "2"]
self.handler.authzr[self.doms[0]] = acme_util.gen_authzr(
messages.STATUS_PENDING, self.doms[0],
acme_util.DV_CHALLENGES, [messages.STATUS_PENDING]*3, False)
self.handler.authzr[self.doms[1]] = acme_util.gen_authzr(
messages.STATUS_PENDING, self.doms[1],
acme_util.DV_CHALLENGES, [messages.STATUS_PENDING]*3, False)
self.handler.authzr[self.doms[2]] = acme_util.gen_authzr(
messages.STATUS_PENDING, self.doms[2],
acme_util.DV_CHALLENGES, [messages.STATUS_PENDING]*3, False)
self.chall_update = {}
for dom in self.doms:
self.chall_update[dom] = [
challb_to_achall(challb, "dummy_key", dom)
for challb in self.handler.authzr[dom].body.challenges]
@mock.patch("letsencrypt.auth_handler.time")
def test_poll_challenges(self, unused_mock_time):
self.mock_net.poll.side_effect = self._mock_poll_solve_one_valid
self.handler._poll_challenges(self.chall_update, False)
for authzr in self.handler.authzr.values():
self.assertEqual(authzr.body.status, messages.STATUS_VALID)
@mock.patch("letsencrypt.auth_handler.time")
def test_poll_challenges_failure_best_effort(self, unused_mock_time):
self.mock_net.poll.side_effect = self._mock_poll_solve_one_invalid
self.handler._poll_challenges(self.chall_update, True)
for authzr in self.handler.authzr.values():
self.assertEqual(authzr.body.status, messages.STATUS_PENDING)
@mock.patch("letsencrypt.auth_handler.time")
@mock.patch("letsencrypt.auth_handler.zope.component.getUtility")
def test_poll_challenges_failure(self, unused_mock_time, unused_mock_zope):
self.mock_net.poll.side_effect = self._mock_poll_solve_one_invalid
self.assertRaises(
errors.AuthorizationError, self.handler._poll_challenges,
self.chall_update, False)
@mock.patch("letsencrypt.auth_handler.time")
def test_unable_to_find_challenge_status(self, unused_mock_time):
from letsencrypt.auth_handler import challb_to_achall
self.mock_net.poll.side_effect = self._mock_poll_solve_one_valid
self.chall_update[self.doms[0]].append(
challb_to_achall(acme_util.RECOVERY_CONTACT_P, "key", self.doms[0]))
self.assertRaises(
errors.AuthorizationError, self.handler._poll_challenges,
self.chall_update, False)
def test_verify_authzr_failure(self):
self.assertRaises(
errors.AuthorizationError, self.handler.verify_authzr_complete)
def _mock_poll_solve_one_valid(self, authzr):
# Pending here because my dummy script won't change the full status.
# Basically it didn't raise an error and it stopped earlier than
# Making all challenges invalid which would make mock_poll_solve_one
# change authzr to invalid
return self._mock_poll_solve_one_chall(authzr, messages.STATUS_VALID)
def _mock_poll_solve_one_invalid(self, authzr):
return self._mock_poll_solve_one_chall(authzr, messages.STATUS_INVALID)
def _mock_poll_solve_one_chall(self, authzr, desired_status):
# pylint: disable=no-self-use
"""Dummy method that solves one chall at a time to desired_status.
When all are solved.. it changes authzr.status to desired_status
"""
new_challbs = authzr.body.challenges
for challb in authzr.body.challenges:
if challb.status != desired_status:
new_challbs = tuple(
challb_temp if challb_temp != challb
else acme_util.chall_to_challb(challb.chall, desired_status)
for challb_temp in authzr.body.challenges
)
break
if all(test_challb.status == desired_status
for test_challb in new_challbs):
status_ = desired_status
else:
status_ = authzr.body.status
new_authzr = messages.AuthorizationResource(
uri=authzr.uri,
new_cert_uri=authzr.new_cert_uri,
body=messages.Authorization(
identifier=authzr.body.identifier,
challenges=new_challbs,
combinations=authzr.body.combinations,
status=status_,
),
)
return (new_authzr, "response")
class GenChallengePathTest(unittest.TestCase):
"""Tests for letsencrypt.auth_handler.gen_challenge_path.
.. todo:: Add more tests for dumb_path... depending on what we want to do.
"""
def setUp(self):
logging.disable(logging.fatal)
def tearDown(self):
logging.disable(logging.NOTSET)
@classmethod
def _call(cls, challbs, preferences, combinations):
from letsencrypt.auth_handler import gen_challenge_path
return gen_challenge_path(challbs, preferences, combinations)
def test_common_case(self):
"""Given DVSNI and SimpleHTTP with appropriate combos."""
challbs = (acme_util.DVSNI_P, acme_util.SIMPLE_HTTP_P)
prefs = [challenges.DVSNI]
combos = ((0,), (1,))
# Smart then trivial dumb path test
self.assertEqual(self._call(challbs, prefs, combos), (0,))
self.assertTrue(self._call(challbs, prefs, None))
# Rearrange order...
self.assertEqual(self._call(challbs[::-1], prefs, combos), (1,))
self.assertTrue(self._call(challbs[::-1], prefs, None))
def test_common_case_with_continuity(self):
challbs = (acme_util.RECOVERY_TOKEN_P,
acme_util.RECOVERY_CONTACT_P,
acme_util.DVSNI_P,
acme_util.SIMPLE_HTTP_P)
prefs = [challenges.RecoveryToken, challenges.DVSNI]
combos = acme_util.gen_combos(challbs)
self.assertEqual(self._call(challbs, prefs, combos), (0, 2))
# dumb_path() trivial test
self.assertTrue(self._call(challbs, prefs, None))
def test_full_cont_server(self):
challbs = (acme_util.RECOVERY_TOKEN_P,
acme_util.RECOVERY_CONTACT_P,
acme_util.POP_P,
acme_util.DVSNI_P,
acme_util.SIMPLE_HTTP_P,
acme_util.DNS_P)
# Typical webserver client that can do everything except DNS
# Attempted to make the order realistic
prefs = [challenges.RecoveryToken,
challenges.ProofOfPossession,
challenges.SimpleHTTP,
challenges.DVSNI,
challenges.RecoveryContact]
combos = acme_util.gen_combos(challbs)
self.assertEqual(self._call(challbs, prefs, combos), (0, 4))
# Dumb path trivial test
self.assertTrue(self._call(challbs, prefs, None))
def test_not_supported(self):
challbs = (acme_util.POP_P, acme_util.DVSNI_P)
prefs = [challenges.DVSNI]
combos = ((0, 1),)
self.assertRaises(
errors.AuthorizationError, self._call, challbs, prefs, combos)
class MutuallyExclusiveTest(unittest.TestCase):
"""Tests for letsencrypt.auth_handler.mutually_exclusive."""
# pylint: disable=invalid-name,missing-docstring,too-few-public-methods
class A(object):
pass
class B(object):
pass
class C(object):
pass
class D(C):
pass
@classmethod
def _call(cls, chall1, chall2, different=False):
from letsencrypt.auth_handler import mutually_exclusive
return mutually_exclusive(chall1, chall2, groups=frozenset([
frozenset([cls.A, cls.B]), frozenset([cls.A, cls.C]),
]), different=different)
def test_group_members(self):
self.assertFalse(self._call(self.A(), self.B()))
self.assertFalse(self._call(self.A(), self.C()))
def test_cross_group(self):
self.assertTrue(self._call(self.B(), self.C()))
def test_same_type(self):
self.assertFalse(self._call(self.A(), self.A(), different=False))
self.assertTrue(self._call(self.A(), self.A(), different=True))
# in particular...
obj = self.A()
self.assertFalse(self._call(obj, obj, different=False))
self.assertTrue(self._call(obj, obj, different=True))
def test_subclass(self):
self.assertFalse(self._call(self.A(), self.D()))
self.assertFalse(self._call(self.D(), self.A()))
class IsPreferredTest(unittest.TestCase):
"""Tests for letsencrypt.auth_handler.is_preferred."""
@classmethod
def _call(cls, chall, satisfied):
from letsencrypt.auth_handler import is_preferred
return is_preferred(chall, satisfied, exclusive_groups=frozenset([
frozenset([challenges.DVSNI, challenges.SimpleHTTP]),
frozenset([challenges.DNS, challenges.SimpleHTTP]),
]))
def test_empty_satisfied(self):
self.assertTrue(self._call(acme_util.DNS_P, frozenset()))
def test_mutually_exclusvie(self):
self.assertFalse(
self._call(
acme_util.DVSNI_P, frozenset([acme_util.SIMPLE_HTTP_P])))
def test_mutually_exclusive_same_type(self):
self.assertTrue(
self._call(acme_util.DVSNI_P, frozenset([acme_util.DVSNI_P])))
class ReportFailedChallsTest(unittest.TestCase):
"""Tests for letsencrypt.auth_handler._report_failed_challs."""
# pylint: disable=protected-access
def setUp(self):
from letsencrypt import achallenges
kwargs = {
"chall" : acme_util.SIMPLE_HTTP,
"uri": "uri",
"status": messages.STATUS_INVALID,
"error": messages.Error(typ="tls", detail="detail"),
}
self.simple_http = achallenges.SimpleHTTP(
challb=messages.ChallengeBody(**kwargs),# pylint: disable=star-args
domain="example.com",
key=acme_util.KEY)
kwargs["chall"] = acme_util.DVSNI
self.dvsni_same = achallenges.DVSNI(
challb=messages.ChallengeBody(**kwargs),# pylint: disable=star-args
domain="example.com",
key=acme_util.KEY)
kwargs["error"] = messages.Error(typ="dnssec", detail="detail")
self.dvsni_diff = achallenges.DVSNI(
challb=messages.ChallengeBody(**kwargs),# pylint: disable=star-args
domain="foo.bar",
key=acme_util.KEY)
@mock.patch("letsencrypt.auth_handler.zope.component.getUtility")
def test_same_error_and_domain(self, mock_zope):
from letsencrypt import auth_handler
auth_handler._report_failed_challs([self.simple_http, self.dvsni_same])
call_list = mock_zope().add_message.call_args_list
self.assertTrue(len(call_list) == 1)
self.assertTrue("Domains: example.com\n" in call_list[0][0][0])
@mock.patch("letsencrypt.auth_handler.zope.component.getUtility")
def test_different_errors_and_domains(self, mock_zope):
from letsencrypt import auth_handler
auth_handler._report_failed_challs([self.simple_http, self.dvsni_diff])
self.assertTrue(mock_zope().add_message.call_count == 2)
def gen_auth_resp(chall_list):
"""Generate a dummy authorization response."""
return ["%s%s" % (chall.__class__.__name__, chall.domain)
for chall in chall_list]
def gen_dom_authzr(domain, unused_new_authzr_uri, challs):
"""Generates new authzr for domains."""
return acme_util.gen_authzr(
messages.STATUS_PENDING, domain, challs,
[messages.STATUS_PENDING]*len(challs))
if __name__ == "__main__":
unittest.main() # pragma: no cover
| apache-2.0 | 3,392,201,171,399,727,600 | 36.331276 | 80 | 0.634735 | false |
aospx-kitkat/platform_external_chromium_org | ui/resources/resource_check/resource_scale_factors.py | 41 | 4151 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for Chromium browser resources.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl/git cl, and see
http://www.chromium.org/developers/web-development-style-guide for the rules
we're checking against here.
"""
import os
import struct
class ResourceScaleFactors(object):
"""Verifier of image dimensions for Chromium resources.
This class verifies the image dimensions of resources in the various
resource subdirectories.
Attributes:
paths: An array of tuples giving the folders to check and their
relevant scale factors. For example:
[(100, 'default_100_percent'), (200, 'default_200_percent')]
"""
def __init__(self, input_api, output_api, paths):
""" Initializes ResourceScaleFactors with paths."""
self.input_api = input_api
self.output_api = output_api
self.paths = paths
def RunChecks(self):
"""Verifies the scale factors of resources being added or modified.
Returns:
An array of presubmit errors if any images were detected not
having the correct dimensions.
"""
def ImageSize(filename):
with open(filename, 'rb', buffering=0) as f:
data = f.read(24)
assert data[:8] == '\x89PNG\r\n\x1A\n' and data[12:16] == 'IHDR'
return struct.unpack('>ii', data[16:24])
# Returns a list of valid scaled image sizes. The valid sizes are the
# floor and ceiling of (base_size * scale_percent / 100). This is equivalent
# to requiring that the actual scaled size is less than one pixel away from
# the exact scaled size.
def ValidSizes(base_size, scale_percent):
return sorted(set([(base_size * scale_percent) / 100,
(base_size * scale_percent + 99) / 100]))
repository_path = self.input_api.os_path.relpath(
self.input_api.PresubmitLocalPath(),
self.input_api.change.RepositoryRoot())
results = []
# Check for affected files in any of the paths specified.
affected_files = self.input_api.AffectedFiles(include_deletes=False)
files = []
for f in affected_files:
for path_spec in self.paths:
path_root = self.input_api.os_path.join(
repository_path, path_spec[1])
if (f.LocalPath().endswith('.png') and
f.LocalPath().startswith(path_root)):
# Only save the relative path from the resource directory.
relative_path = self.input_api.os_path.relpath(f.LocalPath(),
path_root)
if relative_path not in files:
files.append(relative_path)
for f in files:
base_image = self.input_api.os_path.join(self.paths[0][1], f)
if not os.path.exists(base_image):
results.append(self.output_api.PresubmitError(
'Base image %s does not exist' % self.input_api.os_path.join(
repository_path, base_image)))
continue
base_dimensions = ImageSize(base_image)
# Find all scaled versions of the base image and verify their sizes.
for i in range(1, len(self.paths)):
image_path = self.input_api.os_path.join(self.paths[i][1], f)
if not os.path.exists(image_path):
continue
# Ensure that each image for a particular scale factor is the
# correct scale of the base image.
scaled_dimensions = ImageSize(image_path)
for dimension_name, base_size, scaled_size in zip(
('width', 'height'), base_dimensions, scaled_dimensions):
valid_sizes = ValidSizes(base_size, self.paths[i][0])
if scaled_size not in valid_sizes:
results.append(self.output_api.PresubmitError(
'Image %s has %s %d, expected to be %s' % (
self.input_api.os_path.join(repository_path, image_path),
dimension_name,
scaled_size,
' or '.join(map(str, valid_sizes)))))
return results
| bsd-3-clause | 7,125,098,176,620,080,000 | 38.913462 | 80 | 0.648278 | false |
Panos512/invenio | modules/websubmit/lib/websubmitadmin_engine.py | 25 | 240715 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
__revision__ = "$Id$"
import re
from os.path import split, basename, isfile
from os import access, F_OK, R_OK, getpid, rename, unlink
from time import strftime, localtime
from invenio.websubmitadmin_dblayer import *
from invenio.websubmitadmin_config import *
from invenio.websubmit_config import CFG_RESERVED_SUBMISSION_FILENAMES
from invenio.access_control_admin import acc_get_all_roles, acc_get_role_users, acc_delete_user_role
from invenio.config import CFG_SITE_LANG, CFG_WEBSUBMIT_BIBCONVERTCONFIGDIR
from invenio.access_control_engine import acc_authorize_action
from invenio.errorlib import register_exception
from invenio.websubmitadmin_config import InvenioWebSubmitWarning
from invenio.messages import gettext_set_language
import invenio.template
try:
websubmitadmin_templates = invenio.template.load('websubmitadmin')
except:
pass
## utility functions:
def is_adminuser(req, role):
"""check if user is a registered administrator. """
return acc_authorize_action(req, role)
def check_user(req, role, adminarea=2, authorized=0):
(auth_code, auth_message) = is_adminuser(req, role)
if not authorized and auth_code != 0:
return ("false", auth_message)
return ("", auth_message)
def get_navtrail(ln=CFG_SITE_LANG):
"""gets the navtrail for title...
@param title: title of the page
@param ln: language
@return: HTML output
"""
navtrail = websubmitadmin_templates.tmpl_navtrail(ln)
return navtrail
def stringify_listvars(mylist):
"""Accept a list (or a list of lists) (or tuples).
Convert each item in the list, into a string (replace None with the empty
string "").
@param mylist: A list/tuple of values, or a list/tuple of value list/tuples.
@return: a tuple of string values or a tuple of string value tuples
"""
string_list = []
try:
if type(mylist[0]) in (tuple,list):
for row in mylist:
string_list.append(map(lambda x: x is not None and str(x) or "", row))
else:
string_list = map(lambda x: x is not None and str(x) or "", mylist)
except IndexError:
pass
return string_list
def save_update_to_file(filepath, filecontent, notruncate=0, appendmode=0):
"""Save a string value to a file.
Save will create a new file if the file does not exist. Mode can be set to truncate an older file
or to refuse to create the file if it already exists. There is also a mode to "append" the string value
to a file.
@param filepath: (string) the full path to the file
@param filecontent: (string) the content to be written to the file
@param notruncate: (integer) should be 1 or 0, defaults to 0 (ZERO). If 0, existing file will be truncated;
if 1, file will not be written if it already exists
@param appendmode: (integer) should be 1 or 0, defaults to 0 (ZERO). If 1, data will be appended to the file
if it exists; if 0, file will be truncated (or not, depending on the notruncate mode) by new data.
@return: None
@exceptions raised:
- InvenioWebSubmitAdminWarningIOError: when operations involving writing to file failed.
"""
## sanity checking:
if notruncate not in (0, 1):
notruncate = 0
if appendmode not in (0, 1):
appendmode = 0
(fpath, fname) = split(filepath)
if fname == "":
## error opening file
msg = """Unable to open filepath [%s] - couldn't determine a valid filename""" % (filepath,)
raise InvenioWebSubmitAdminWarningIOError(msg)
## if fpath is not empty, append the trailing "/":
if fpath != "":
fpath += "/"
if appendmode == 0:
if notruncate != 0 and access("%s%s" % (fpath, fname), F_OK):
## in no-truncate mode, but file already exists!
msg = """Unable to write to file [%s] in "no-truncate mode" because file already exists"""\
% (fname,)
raise InvenioWebSubmitAdminWarningIOError(msg)
## file already exists, make temporary file first, then move it later
tmpfname = "%s_%s_%s" % (fname, strftime("%Y%m%d%H%M%S", localtime()), getpid())
## open temp file for writing:
try:
fp = open("%s%s" % (fpath, tmpfname), "w")
except IOError, e:
## cannot open file
msg = """Unable to write to file [%s%s] - cannot open file for writing""" % (fpath, fname)
raise InvenioWebSubmitAdminWarningIOError(msg)
## write contents to temp file:
try:
fp.write(filecontent)
fp.flush()
fp.close()
except IOError, e:
## could not write to temp file
msg = """Unable to write to file [%s]""" % (tmpfname,)
## remove the "temp file"
try:
fp.close()
unlink("%s%s" % (fpath, tmpfname))
except IOError:
pass
raise InvenioWebSubmitAdminWarningIOError(msg)
## rename temp file to final filename:
try:
rename("%s%s" % (fpath, tmpfname), "%s%s" % (fpath, fname))
except OSError:
## couldnt rename the tmp file to final file name
msg = """Unable to write to file [%s] - created temporary file [%s], but could not then rename it to [%s]"""\
% (fname, tmpfname, fname)
raise InvenioWebSubmitAdminWarningIOError(msg)
else:
## append mode:
try:
fp = open("%s%s" % (fpath, fname), "a")
except IOError, e:
## cannot open file
msg = """Unable to write to file [%s] - cannot open file for writing in append mode""" % (fname,)
raise InvenioWebSubmitAdminWarningIOError(msg)
## write contents to temp file:
try:
fp.write(filecontent)
fp.flush()
fp.close()
except IOError, e:
## could not write to temp file
msg = """Unable to write to file [%s] in append mode""" % (fname,)
## close the file
try:
fp.close()
except IOError:
pass
raise InvenioWebSubmitAdminWarningIOError(msg)
return
def string_is_alphanumeric_including_underscore(txtstring):
p_txtstring = re.compile(r'^\w*$')
m_txtstring = p_txtstring.search(txtstring)
if m_txtstring is not None:
return 1
else:
return 0
def function_name_is_valid(fname):
p_fname = re.compile(r'^(_|[a-zA-Z])\w*$')
m_fname = p_fname.search(fname)
if m_fname is not None:
return 1
else:
return 0
def wash_single_urlarg(urlarg, argreqdtype, argdefault, maxstrlen=None, minstrlen=None, truncatestr=0):
"""Wash a single argument according to some specifications.
@param urlarg: the argument to be tested, as passed from the form/url, etc
@param argreqdtype: (a python type) the type that the argument should conform to (argument required
type)
@argdefault: the default value that should be returned for the argument in the case that it
doesn't comply with the washing specifications
@param maxstrlen: (integer) the maximum length for a string argument; defaults to None, which means
that no maximum length is forced upon the string
@param minstrlen: (integer) the minimum length for a string argument; defaults to None, which means
that no minimum length is forced upon the string
@truncatestr: (integer) should be 1 or 0 (ZERO). A flag used to determine whether or not a string
argument that overstretches the maximum length (if one if provided) should be truncated, or reset
to the default for the argument. 0, means don't truncate and reset the argument; 1 means truncate
the string.
@return: the washed argument
@exceptions raised:
- ValueError: when it is not possible to cast an argument to the type passed as argreqdtype
"""
## sanity checking:
if maxstrlen is not None and type(maxstrlen) is not int:
maxstrlen = None
elif maxstrlen is int and maxstrlen < 1:
maxstrlen = None
if minstrlen is not None and type(minstrlen) is not int:
minstrlen = None
elif minstrlen is int and minstrlen < 1:
minstrlen = None
result = ""
arg_dst_type = argreqdtype
## if no urlarg, return the default for that argument:
if urlarg is None:
result = argdefault
return result
## get the type of the argument passed:
arg_src_type = type(urlarg)
value = urlarg
# First, handle the case where we want all the results. In
# this case, we need to ensure all the elements are strings,
# and not Field instances.
if arg_src_type in (list, tuple):
if arg_dst_type is list:
result = [str(x) for x in value]
return result
if arg_dst_type is tuple:
result = tuple([str(x) for x in value])
return result
# in all the other cases, we are only interested in the
# first value.
value = value[0]
# Maybe we already have what is expected? Then don't change
# anything.
if arg_src_type is arg_dst_type:
result = value
if arg_dst_type is str and maxstrlen is not None and len(result) > maxstrlen:
if truncatestr != 0:
result = result[0:maxstrlen]
else:
result = argdefault
elif arg_dst_type is str and minstrlen is not None and len(result) < minstrlen:
result = argdefault
return result
if arg_dst_type in (str, int):
try:
result = arg_dst_type(value)
if arg_dst_type is str and maxstrlen is not None and len(result) > maxstrlen:
if truncatestr != 0:
result = result[0:maxstrlen]
else:
result = argdefault
elif arg_dst_type is str and minstrlen is not None and len(result) < minstrlen:
result = argdefault
except:
result = argdefault
elif arg_dst_type is tuple:
result = (value,)
elif arg_dst_type is list:
result = [value]
elif arg_dst_type is dict:
result = {0: str(value)}
else:
raise ValueError('cannot cast form argument into type %r' % (arg_dst_type,))
return result
## Internal Business-Logic functions
## Functions for managing collection order, etc:
def build_submission_collection_tree(collection_id, has_brother_above=0, has_brother_below=0):
## get the name of this collection:
collection_name = get_collection_name(collection_id)
if collection_name is None:
collection_name = "Unknown Collection"
## make a data-structure containing the details of the collection:
collection_node = { 'collection_id' : collection_id, ## collection ID
'collection_name' : collection_name, ## collection Name
'collection_children' : [], ## list of 'collection' children nodes
'doctype_children' : [], ## list of 'doctype' children
'has_brother_above' : has_brother_above, ## has a sibling collection above in score
'has_brother_below' : has_brother_below, ## has a sibling collection below in score
}
## get the IDs and names of all doctypes attached to this collection:
res_doctype_children = get_doctype_children_of_collection(collection_id)
## for each child, add its details to the list of doctype children for this node:
for doctype in res_doctype_children:
doctype_node = { 'doctype_id' : doctype[0],
'doctype_lname' : doctype[1],
'catalogue_order' : doctype[2],
}
collection_node['doctype_children'].append(doctype_node)
## now get details of all collections attached to this one:
res_collection_children = get_collection_children_of_collection(collection_id)
num_collection_children = len(res_collection_children)
for child_num in xrange(0, num_collection_children):
brother_below = brother_above = 0
if child_num > 0:
## this is not the first brother - it has a brother above
brother_above = 1
if child_num < num_collection_children - 1:
## this is not the last brother - it has a brother below
brother_below = 1
collection_node['collection_children'].append(\
build_submission_collection_tree(collection_id=res_collection_children[child_num][0],
has_brother_above=brother_above,
has_brother_below=brother_below))
## return the built collection tree:
return collection_node
def _organise_submission_page_display_submission_tree(user_msg=""):
title = "Organise WebSubmit Main Page"
body = ""
if user_msg == "" or type(user_msg) not in (list, tuple, str, unicode):
user_msg = []
## Get the submissions tree:
submission_collection_tree = build_submission_collection_tree(0)
## Get all 'submission collections':
submission_collections = get_details_of_all_submission_collections()
sub_col = [('0', 'Top Level')]
for collection in submission_collections:
sub_col.append((str(collection[0]), str(collection[1])))
## Get all document types:
doctypes = get_docid_docname_and_docid_alldoctypes()
## build the page:
body = websubmitadmin_templates.tmpl_display_submission_page_organisation(submission_collection_tree=submission_collection_tree,
submission_collections=sub_col,
doctypes=doctypes,
user_msg=user_msg)
return (title, body)
def _delete_submission_collection(sbmcolid):
"""Recursively calls itself to delete a submission-collection and all of its
attached children (and their children, etc) from the submission-tree.
@param sbmcolid: (integer) - the ID of the submission-collection to be deleted.
@return: None
@Exceptions raised: InvenioWebSubmitAdminWarningDeleteFailed when it was not
possible to delete the submission-collection or some of its children.
"""
## Get the collection-children of this submission-collection:
collection_children = get_collection_children_of_collection(sbmcolid)
## recursively move through each collection-child:
for collection_child in collection_children:
_delete_submission_collection(collection_child[0])
## delete all document-types attached to this submission-collection:
error_code = delete_doctype_children_from_submission_collection(sbmcolid)
if error_code != 0:
## Unable to delete all doctype-children:
err_msg = "Unable to delete doctype children of submission-collection [%s]" % sbmcolid
raise InvenioWebSubmitAdminWarningDeleteFailed(err_msg)
## delete this submission-collection's entry from the sbmCOLLECTION_sbmCOLLECTION table:
error_code = delete_submission_collection_from_submission_tree(sbmcolid)
if error_code != 0:
## Unable to delete submission-collection from the submission-tree:
err_msg = "Unable to delete submission-collection [%s] from submission-tree" % sbmcolid
raise InvenioWebSubmitAdminWarningDeleteFailed(err_msg)
## Now delete this submission-collection's details:
error_code = delete_submission_collection_details(sbmcolid)
if error_code != 0:
## Unable to delete the details of the submission-collection:
err_msg = "Unable to delete details of submission-collection [%s]" % sbmcolid
raise InvenioWebSubmitAdminWarningDeleteFailed(err_msg)
## return
return
def perform_request_organise_submission_page(doctype="",
sbmcolid="",
catscore="",
addsbmcollection="",
deletesbmcollection="",
addtosbmcollection="",
adddoctypes="",
movesbmcollectionup="",
movesbmcollectiondown="",
deletedoctypefromsbmcollection="",
movedoctypeupinsbmcollection="",
movedoctypedowninsbmcollection=""):
user_msg = []
body = ""
if "" not in (deletedoctypefromsbmcollection, sbmcolid, catscore, doctype):
## delete a document type from it's position in the tree
error_code = delete_doctype_from_position_on_submission_page(doctype, sbmcolid, catscore)
if error_code == 0:
## doctype deleted - now normalize scores of remaining doctypes:
normalize_scores_of_doctype_children_for_submission_collection(sbmcolid)
user_msg.append("Document type successfully deleted from submissions tree")
else:
user_msg.append("Unable to delete document type from submission-collection")
## display submission-collections:
(title, body) = _organise_submission_page_display_submission_tree(user_msg=user_msg)
elif "" not in (deletesbmcollection, sbmcolid):
## try to delete the submission-collection from the tree:
try:
_delete_submission_collection(sbmcolid)
user_msg.append("Submission-collection successfully deleted from submissions tree")
except InvenioWebSubmitAdminWarningDeleteFailed, excptn:
user_msg.append(str(excptn))
## re-display submission-collections:
(title, body) = _organise_submission_page_display_submission_tree(user_msg=user_msg)
elif "" not in (movedoctypedowninsbmcollection, sbmcolid, doctype, catscore):
## move a doctype down in order for a submission-collection:
## normalize scores of all doctype-children of the submission-collection:
normalize_scores_of_doctype_children_for_submission_collection(sbmcolid)
## swap this doctype with that below it:
## Get score of doctype to move:
score_doctype_to_move = get_catalogue_score_of_doctype_child_of_submission_collection(sbmcolid, doctype)
## Get score of the doctype brother directly below the doctype to be moved:
score_brother_below = get_score_of_next_doctype_child_below(sbmcolid, score_doctype_to_move)
if None in (score_doctype_to_move, score_brother_below):
user_msg.append("Unable to move document type down")
else:
## update the brother below the doctype to be moved to have a score the same as the doctype to be moved:
update_score_of_doctype_child_of_submission_collection_at_scorex(sbmcolid, score_brother_below, score_doctype_to_move)
## Update the doctype to be moved to have a score of the brother directly below it:
update_score_of_doctype_child_of_submission_collection_with_doctypeid_and_scorex(sbmcolid,
doctype,
score_doctype_to_move,
score_brother_below)
user_msg.append("Document type moved down")
(title, body) = _organise_submission_page_display_submission_tree(user_msg=user_msg)
elif "" not in (movedoctypeupinsbmcollection, sbmcolid, doctype, catscore):
## move a doctype up in order for a submission-collection:
## normalize scores of all doctype-children of the submission-collection:
normalize_scores_of_doctype_children_for_submission_collection(sbmcolid)
## swap this doctype with that above it:
## Get score of doctype to move:
score_doctype_to_move = get_catalogue_score_of_doctype_child_of_submission_collection(sbmcolid, doctype)
## Get score of the doctype brother directly above the doctype to be moved:
score_brother_above = get_score_of_previous_doctype_child_above(sbmcolid, score_doctype_to_move)
if None in (score_doctype_to_move, score_brother_above):
user_msg.append("Unable to move document type up")
else:
## update the brother above the doctype to be moved to have a score the same as the doctype to be moved:
update_score_of_doctype_child_of_submission_collection_at_scorex(sbmcolid, score_brother_above, score_doctype_to_move)
## Update the doctype to be moved to have a score of the brother directly above it:
update_score_of_doctype_child_of_submission_collection_with_doctypeid_and_scorex(sbmcolid,
doctype,
score_doctype_to_move,
score_brother_above)
user_msg.append("Document type moved up")
(title, body) = _organise_submission_page_display_submission_tree(user_msg=user_msg)
elif "" not in (movesbmcollectiondown, sbmcolid):
## move a submission-collection down in order:
## Sanity checking:
try:
int(sbmcolid)
except ValueError:
sbmcolid = 0
if int(sbmcolid) != 0:
## Get father ID of submission-collection:
sbmcolidfather = get_id_father_of_collection(sbmcolid)
if sbmcolidfather is None:
user_msg.append("Unable to move submission-collection downwards")
else:
## normalize scores of all collection-children of the father submission-collection:
normalize_scores_of_collection_children_of_collection(sbmcolidfather)
## swap this collection with the one above it:
## get the score of the collection to move:
score_col_to_move = get_score_of_collection_child_of_submission_collection(sbmcolidfather, sbmcolid)
## get the score of the collection brother directly below the collection to be moved:
score_brother_below = get_score_of_next_collection_child_below(sbmcolidfather, score_col_to_move)
if None in (score_col_to_move, score_brother_below):
## Invalid movement
user_msg.append("Unable to move submission collection downwards")
else:
## update the brother below the collection to be moved to have a score the same as the collection to be moved:
update_score_of_collection_child_of_submission_collection_at_scorex(sbmcolidfather,
score_brother_below,
score_col_to_move)
## Update the collection to be moved to have a score of the brother directly below it:
update_score_of_collection_child_of_submission_collection_with_colid_and_scorex(sbmcolidfather,
sbmcolid,
score_col_to_move,
score_brother_below)
user_msg.append("Submission-collection moved downwards")
else:
## cannot move the master (0) collection
user_msg.append("Unable to move submission-collection downwards")
(title, body) = _organise_submission_page_display_submission_tree(user_msg=user_msg)
elif "" not in (movesbmcollectionup, sbmcolid):
## move a submission-collection up in order:
## Sanity checking:
try:
int(sbmcolid)
except ValueError:
sbmcolid = 0
if int(sbmcolid) != 0:
## Get father ID of submission-collection:
sbmcolidfather = get_id_father_of_collection(sbmcolid)
if sbmcolidfather is None:
user_msg.append("Unable to move submission-collection upwards")
else:
## normalize scores of all collection-children of the father submission-collection:
normalize_scores_of_collection_children_of_collection(sbmcolidfather)
## swap this collection with the one above it:
## get the score of the collection to move:
score_col_to_move = get_score_of_collection_child_of_submission_collection(sbmcolidfather, sbmcolid)
## get the score of the collection brother directly above the collection to be moved:
score_brother_above = get_score_of_previous_collection_child_above(sbmcolidfather, score_col_to_move)
if None in (score_col_to_move, score_brother_above):
## Invalid movement
user_msg.append("Unable to move submission collection upwards")
else:
## update the brother above the collection to be moved to have a score the same as the collection to be moved:
update_score_of_collection_child_of_submission_collection_at_scorex(sbmcolidfather,
score_brother_above,
score_col_to_move)
## Update the collection to be moved to have a score of the brother directly above it:
update_score_of_collection_child_of_submission_collection_with_colid_and_scorex(sbmcolidfather,
sbmcolid,
score_col_to_move,
score_brother_above)
user_msg.append("Submission-collection moved upwards")
else:
## cannot move the master (0) collection
user_msg.append("Unable to move submission-collection upwards")
(title, body) = _organise_submission_page_display_submission_tree(user_msg=user_msg)
elif "" not in (addsbmcollection, addtosbmcollection):
## Add a submission-collection, attached to a submission-collection:
## check that the collection to attach to exists:
parent_ok = 0
if int(addtosbmcollection) != 0:
parent_name = get_collection_name(addtosbmcollection)
if parent_name is not None:
parent_ok = 1
else:
parent_ok = 1
if parent_ok != 0:
## create the new collection:
id_son = insert_submission_collection(addsbmcollection)
## get the maximum catalogue score of the existing collection children:
max_child_score = \
get_maximum_catalogue_score_of_collection_children_of_submission_collection(addtosbmcollection)
## add it to the collection, at a higher score than the others have:
new_score = max_child_score + 1
insert_collection_child_for_submission_collection(addtosbmcollection, id_son, new_score)
user_msg.append("Submission-collection added to submissions tree")
else:
## Parent submission-collection does not exist:
user_msg.append("Unable to add submission-collection - parent unknown")
(title, body) = _organise_submission_page_display_submission_tree(user_msg=user_msg)
elif "" not in (adddoctypes, addtosbmcollection):
## Add document type(s) to a submission-collection:
if type(adddoctypes) == str:
adddoctypes = [adddoctypes,]
## Does submission-collection exist?
num_collections_sbmcolid = get_number_of_rows_for_submission_collection(addtosbmcollection)
if num_collections_sbmcolid > 0:
for doctypeid in adddoctypes:
## Check that Doctype exists:
num_doctypes_doctypeid = get_number_doctypes_docid(doctypeid)
if num_doctypes_doctypeid < 1:
## Cannot connect an unknown doctype:
user_msg.append("Unable to connect unknown document-type [%s] to a submission-collection" \
% doctypeid)
continue
else:
## insert the submission-collection/doctype link:
## get the maximum catalogue score of the existing doctype children:
max_child_score = \
get_maximum_catalogue_score_of_doctype_children_of_submission_collection(addtosbmcollection)
## add it to the new doctype, at a higher score than the others have:
new_score = max_child_score + 1
insert_doctype_child_for_submission_collection(addtosbmcollection, doctypeid, new_score)
user_msg.append("Document-type added to submissions tree")
else:
## submission-collection didn't exist
user_msg.append("The selected submission-collection doesn't seem to exist")
## Check that submission-collection exists:
## insert
(title, body) = _organise_submission_page_display_submission_tree(user_msg=user_msg)
else:
## default action - display submission-collections:
(title, body) = _organise_submission_page_display_submission_tree(user_msg=user_msg)
return (title, body)
## Functions for adding new catalgue to DB:
def _add_new_action(actid,actname,working_dir,status_text):
"""Insert the details of a new action into the websubmit system database.
@param actid: unique action id (sactname)
@param actname: action name (lactname)
@param working_dir: directory action works from (dir)
@param status_text: text string indicating action status (statustext)
"""
(actid,actname,working_dir,status_text) = (str(actid).upper(),str(actname),str(working_dir),str(status_text))
err_code = insert_action_details(actid,actname,working_dir,status_text)
return err_code
def perform_request_add_function(funcname=None, funcdescr=None, funcaddcommit=""):
user_msg = []
body = ""
title = "Create New WebSubmit Function"
commit_error=0
## wash args:
if funcname is not None:
try:
funcname = wash_single_urlarg(urlarg=funcname, argreqdtype=str, argdefault="", maxstrlen=40, minstrlen=1)
if function_name_is_valid(fname=funcname) == 0:
funcname = ""
except ValueError, e:
funcname = ""
else:
funcname = ""
if funcdescr is not None:
try:
funcdescr = wash_single_urlarg(urlarg=funcdescr, argreqdtype=str, argdefault="")
except ValueError, e:
funcdescr = ""
else:
funcdescr = ""
## process request:
if funcaddcommit != "" and funcaddcommit is not None:
if funcname == "":
funcname = ""
user_msg.append("""Function name is mandatory and must be a string with no more than 40 characters""")
user_msg.append("""It must contain only alpha-numeric and underscore characters, beginning with a """\
"""letter or underscore""")
commit_error = 1
if commit_error != 0:
## don't commit - just re-display page with message to user
body = websubmitadmin_templates.tmpl_display_addfunctionform(funcdescr=funcdescr, user_msg=user_msg)
return (title, body)
## Add a new function definition - IF it is not already present
err_code = insert_function_details(funcname, funcdescr)
## Handle error code - redisplay form with warning about no DB commit, or display with options
## to edit function:
if err_code == 0:
user_msg.append("""'%s' Function Added to WebSubmit""" % (funcname,))
all_function_parameters = get_distinct_paramname_all_websubmit_function_parameters()
body = websubmitadmin_templates.tmpl_display_addfunctionform(funcname=funcname,
funcdescr=funcdescr,
all_websubmit_func_parameters=all_function_parameters,
perform_act="functionedit",
user_msg=user_msg)
else:
## Could not commit function to WebSubmit DB - redisplay form with function description:
user_msg.append("""Could Not Add '%s' Function to WebSubmit""" % (funcname,))
body = websubmitadmin_templates.tmpl_display_addfunctionform(funcdescr=funcdescr, user_msg=user_msg)
else:
## Display Web form for new function addition:
body = websubmitadmin_templates.tmpl_display_addfunctionform()
return (title, body)
def perform_request_add_action(actid=None, actname=None, working_dir=None, status_text=None, actcommit=""):
"""An interface for the addition of a new WebSubmit action.
If form fields filled, will insert new action into WebSubmit database, else will display
web form prompting for action details.
@param actid: unique id for new action
@param actname: name of new action
@param working_dir: action working directory for WebSubmit core
@param status_text: status text displayed at end of action
@return: tuple containing "title" (title of page), body (page body).
"""
user_msg = []
body = ""
title = "Create New WebSubmit Action"
commit_error=0
## wash args:
if actid is not None:
try:
actid = wash_single_urlarg(urlarg=actid, argreqdtype=str, argdefault="", maxstrlen=3, minstrlen=3)
if string_is_alphanumeric_including_underscore(txtstring=actid) == 0:
actid = ""
except ValueError, e:
actid = ""
else:
actid = ""
if actname is not None:
try:
actname = wash_single_urlarg(urlarg=actname, argreqdtype=str, argdefault="")
except ValueError, e:
actname = ""
else:
actname = ""
if working_dir is not None:
try:
working_dir = wash_single_urlarg(urlarg=working_dir, argreqdtype=str, argdefault="")
except ValueError, e:
working_dir = ""
else:
working_dir = ""
if status_text is not None:
try:
status_text = wash_single_urlarg(urlarg=status_text, argreqdtype=str, argdefault="")
except ValueError, e:
status_text = ""
else:
status_text = ""
## process request:
if actcommit != "" and actcommit is not None:
if actid in ("", None):
actid = ""
user_msg.append("""Action ID is mandatory and must be a 3 letter string""")
commit_error = 1
if actname in ("", None):
actname = ""
user_msg.append("""Action description is mandatory""")
commit_error = 1
if commit_error != 0:
## don't commit - just re-display page with message to user
body = websubmitadmin_templates.tmpl_display_addactionform(actid=actid, actname=actname, working_dir=working_dir,\
status_text=status_text, user_msg=user_msg)
return (title, body)
## Commit new action to WebSubmit DB:
err_code = _add_new_action(actid,actname,working_dir,status_text)
## Handle error code - redisplay form with warning about no DB commit, or move to list
## of actions
if err_code == 0:
## Action added: show page listing WebSubmit actions
user_msg = """'%s' Action Added to WebSubmit""" % (actid,)
all_actions = get_actid_actname_allactions()
body = websubmitadmin_templates.tmpl_display_allactions(all_actions,user_msg=user_msg)
title = "Available WebSubmit Actions"
else:
## Could not commit action to WebSubmit DB redisplay form with completed details and error message
## warnings.append(('ERR_WEBSUBMIT_ADMIN_ADDACTIONFAILDUPLICATE',actid) ## TODO
user_msg = """Could Not Add '%s' Action to WebSubmit""" % (actid,)
body = websubmitadmin_templates.tmpl_display_addactionform(actid=actid, actname=actname, working_dir=working_dir, \
status_text=status_text, user_msg=user_msg)
else:
## Display Web form for new action details:
body = websubmitadmin_templates.tmpl_display_addactionform()
return (title, body)
def perform_request_add_jscheck(chname=None, chdesc=None, chcommit=""):
"""An interface for the addition of a new WebSubmit JavaScript Check, as used on form elements.
If form fields filled, will insert new Check into WebSubmit database, else will display
Web form prompting for Check details.
@param chname: unique id/name for new Check
@param chdesc: description (JavaScript code body) of new Check
@return: tuple containing "title" (title of page), body (page body).
"""
user_msg = []
body = ""
title = "Create New WebSubmit Checking Function"
commit_error=0
## wash args:
if chname is not None:
try:
chname = wash_single_urlarg(urlarg=chname, argreqdtype=str, argdefault="", maxstrlen=15, minstrlen=1)
if function_name_is_valid(fname=chname) == 0:
chname = ""
except ValueError, e:
chname = ""
else:
chname = ""
if chdesc is not None:
try:
chdesc = wash_single_urlarg(urlarg=chdesc, argreqdtype=str, argdefault="")
except ValueError, e:
chdesc = ""
else:
chdesc = ""
## process request:
if chcommit != "" and chcommit is not None:
if chname in ("", None):
chname = ""
user_msg.append("""Check name is mandatory and must be a string with no more than 15 characters""")
user_msg.append("""It must contain only alpha-numeric and underscore characters, beginning with a """\
"""letter or underscore""")
commit_error = 1
if commit_error != 0:
## don't commit - just re-display page with message to user
body = websubmitadmin_templates.tmpl_display_addjscheckform(chname=chname, chdesc=chdesc, user_msg=user_msg)
return (title, body)
## Commit new check to WebSubmit DB:
err_code = insert_jscheck_details(chname, chdesc)
## Handle error code - redisplay form wih warning about no DB commit, or move to list
## of checks
if err_code == 0:
## Check added: show page listing WebSubmit JS Checks
user_msg.append("""'%s' Checking Function Added to WebSubmit""" % (chname,))
all_jschecks = get_chname_alljschecks()
body = websubmitadmin_templates.tmpl_display_alljschecks(all_jschecks, user_msg=user_msg)
title = "Available WebSubmit Checking Functions"
else:
## Could not commit Check to WebSubmit DB: redisplay form with completed details and error message
## TODO : Warning Message
user_msg.append("""Could Not Add '%s' Checking Function to WebSubmit""" % (chname,))
body = websubmitadmin_templates.tmpl_display_addjscheckform(chname=chname, chdesc=chdesc, user_msg=user_msg)
else:
## Display Web form for new check details:
body = websubmitadmin_templates.tmpl_display_addjscheckform()
return (title, body)
def perform_request_add_element(elname=None, elmarccode=None, eltype=None, elsize=None, elrows=None, \
elcols=None, elmaxlength=None, elval=None, elfidesc=None, \
elmodifytext=None, elcommit=""):
"""An interface for adding a new ELEMENT to the WebSubmit DB.
@param elname: (string) element name.
@param elmarccode: (string) element's MARC code.
@param eltype: (character) element type.
@param elsize: (integer) element size.
@param elrows: (integer) number of rows in element.
@param elcols: (integer) number of columns in element.
@param elmaxlength: (integer) maximum length of element
@param elval: (string) default value of element
@param elfidesc: (string) description of element
@param elmodifytext: (string) modification text of element
@param elcommit: (string) If this value is not empty, attempt to commit element details to WebSubmit DB
@return: tuple containing "title" (title of page), body (page body).
"""
user_msg = []
body = ""
title = "Create New WebSubmit Element"
commit_error=0
## wash args:
if elname is not None:
try:
elname = wash_single_urlarg(urlarg=elname, argreqdtype=str, argdefault="", maxstrlen=15, minstrlen=1)
if string_is_alphanumeric_including_underscore(txtstring=elname) == 0:
elname = ""
except ValueError, e:
elname = ""
else:
elname = ""
if elmarccode is not None:
try:
elmarccode = wash_single_urlarg(urlarg=elmarccode, argreqdtype=str, argdefault="")
except ValueError, e:
elmarccode = ""
else:
elmarccode = ""
if eltype is not None:
try:
eltype = wash_single_urlarg(urlarg=eltype, argreqdtype=str, argdefault="", maxstrlen=1, minstrlen=1)
except ValueError, e:
eltype = ""
else:
eltype = ""
if elsize is not None:
try:
elsize = wash_single_urlarg(urlarg=elsize, argreqdtype=int, argdefault="")
except ValueError, e:
elsize = ""
else:
elsize = ""
if elrows is not None:
try:
elrows = wash_single_urlarg(urlarg=elrows, argreqdtype=int, argdefault="")
except ValueError, e:
elrows = ""
else:
elrows = ""
if elcols is not None:
try:
elcols = wash_single_urlarg(urlarg=elcols, argreqdtype=int, argdefault="")
except ValueError, e:
elcols = ""
else:
elcols = ""
if elmaxlength is not None:
try:
elmaxlength = wash_single_urlarg(urlarg=elmaxlength, argreqdtype=int, argdefault="")
except ValueError, e:
elmaxlength = ""
else:
elmaxlength = ""
if elval is not None:
try:
elval = wash_single_urlarg(urlarg=elval, argreqdtype=str, argdefault="")
except ValueError, e:
elval = ""
else:
elval = ""
if elfidesc is not None:
try:
elfidesc = wash_single_urlarg(urlarg=elfidesc, argreqdtype=str, argdefault="")
except ValueError, e:
elfidesc = ""
else:
elfidesc = ""
if elmodifytext is not None:
try:
elmodifytext = wash_single_urlarg(urlarg=elmodifytext, argreqdtype=str, argdefault="")
except ValueError, e:
elmodifytext = ""
else:
elmodifytext = ""
## process request:
if elcommit != "" and elcommit is not None:
if elname == "":
elname = ""
user_msg.append("""The element name is mandatory and must be a string with no more than 15 characters""")
user_msg.append("""It must contain only alpha-numeric and underscore characters""")
commit_error = 1
if eltype == "" or eltype not in ("D", "F", "H", "I", "R", "S", "T"):
eltype = ""
user_msg.append("""The element type is mandatory and must be selected from the list""")
commit_error = 1
if commit_error != 0:
## don't commit - just re-display page with message to user
body = websubmitadmin_templates.tmpl_display_addelementform(elname=elname,
elmarccode=elmarccode,
eltype=eltype,
elsize=str(elsize),
elrows=str(elrows),
elcols=str(elcols),
elmaxlength=str(elmaxlength),
elval=elval,
elfidesc=elfidesc,
elmodifytext=elmodifytext,
user_msg=user_msg,
)
return (title, body)
## Commit new element description to WebSubmit DB:
err_code = insert_element_details(elname=elname, elmarccode=elmarccode, eltype=eltype, \
elsize=elsize, elrows=elrows, elcols=elcols, \
elmaxlength=elmaxlength, elval=elval, elfidesc=elfidesc, \
elmodifytext=elmodifytext)
if err_code == 0:
## Element added: show page listing WebSubmit elements
user_msg.append("""'%s' Element Added to WebSubmit""" % (elname,))
if elname in CFG_RESERVED_SUBMISSION_FILENAMES:
user_msg.append("""WARNING: '%s' is a reserved name. Check WebSubmit admin guide to be aware of possible side-effects.""" % elname)
title = "Available WebSubmit Elements"
all_elements = get_elename_allelements()
body = websubmitadmin_templates.tmpl_display_allelements(all_elements, user_msg=user_msg)
else:
## Could not commit element to WebSubmit DB: redisplay form with completed details and error message
## TODO : Warning Message
user_msg.append("""Could Not Add '%s' Element to WebSubmit""" % (elname,))
body = websubmitadmin_templates.tmpl_display_addelementform(elname=elname,
elmarccode=elmarccode,
eltype=eltype,
elsize=str(elsize),
elrows=str(elrows),
elcols=str(elcols),
elmaxlength=str(elmaxlength),
elval=elval,
elfidesc=elfidesc,
elmodifytext=elmodifytext,
user_msg=user_msg,
)
else:
## Display Web form for new element details:
body = websubmitadmin_templates.tmpl_display_addelementform()
return (title, body)
def perform_request_edit_element(elname, elmarccode=None, eltype=None, elsize=None, \
elrows=None, elcols=None, elmaxlength=None, elval=None, \
elfidesc=None, elmodifytext=None, elcommit=""):
"""An interface for the editing and updating the details of a WebSubmit ELEMENT.
@param elname: element name.
@param elmarccode: element's MARC code.
@param eltype: element type.
@param elsize: element size.
@param elrows: number of rows in element.
@param elcols: number of columns in element.
@param elmaxlength: maximum length of element
@param elval: default value of element
@param elfidesc: description of element
@param elmodifytext: modification text of element
@param elcommit: If this value is not empty, attempt to commit element details to WebSubmit DB
@return: tuple containing "title" (title of page), body (page body).
"""
user_msg = []
body = ""
title = "Edit WebSubmit Element"
commit_error=0
## wash args:
if elname is not None:
try:
elname = wash_single_urlarg(urlarg=elname, argreqdtype=str, argdefault="", maxstrlen=15, minstrlen=1)
if string_is_alphanumeric_including_underscore(txtstring=elname) == 0:
elname = ""
except ValueError, e:
elname = ""
else:
elname = ""
if elmarccode is not None:
try:
elmarccode = wash_single_urlarg(urlarg=elmarccode, argreqdtype=str, argdefault="")
except ValueError, e:
elmarccode = ""
else:
elmarccode = ""
if eltype is not None:
try:
eltype = wash_single_urlarg(urlarg=eltype, argreqdtype=str, argdefault="", maxstrlen=1, minstrlen=1)
except ValueError, e:
eltype = ""
else:
eltype = ""
if elsize is not None:
try:
elsize = wash_single_urlarg(urlarg=elsize, argreqdtype=int, argdefault="")
except ValueError, e:
elsize = ""
else:
elsize = ""
if elrows is not None:
try:
elrows = wash_single_urlarg(urlarg=elrows, argreqdtype=int, argdefault="")
except ValueError, e:
elrows = ""
else:
elrows = ""
if elcols is not None:
try:
elcols = wash_single_urlarg(urlarg=elcols, argreqdtype=int, argdefault="")
except ValueError, e:
elcols = ""
else:
elcols = ""
if elmaxlength is not None:
try:
elmaxlength = wash_single_urlarg(urlarg=elmaxlength, argreqdtype=int, argdefault="")
except ValueError, e:
elmaxlength = ""
else:
elmaxlength = ""
if elval is not None:
try:
elval = wash_single_urlarg(urlarg=elval, argreqdtype=str, argdefault="")
except ValueError, e:
elval = ""
else:
elval = ""
if elfidesc is not None:
try:
elfidesc = wash_single_urlarg(urlarg=elfidesc, argreqdtype=str, argdefault="")
except ValueError, e:
elfidesc = ""
else:
elfidesc = ""
if elmodifytext is not None:
try:
elmodifytext = wash_single_urlarg(urlarg=elmodifytext, argreqdtype=str, argdefault="")
except ValueError, e:
elmodifytext = ""
else:
elmodifytext = ""
## process request:
if elcommit != "" and elcommit is not None:
if elname == "":
elname = ""
user_msg.append("""Invalid Element Name!""")
commit_error = 1
if eltype == "" or eltype not in ("D", "F", "H", "I", "R", "S", "T"):
eltype = ""
user_msg.append("""Invalid Element Type!""")
commit_error = 1
if commit_error != 0:
## don't commit - just re-display page with message to user
all_elements = get_elename_allelements()
user_msg.append("""Could Not Update Element""")
title = "Available WebSubmit Elements"
body = websubmitadmin_templates.tmpl_display_allelements(all_elements, user_msg=user_msg)
return (title, body)
## Commit updated element description to WebSubmit DB:
err_code = update_element_details(elname=elname, elmarccode=elmarccode, eltype=eltype, \
elsize=elsize, elrows=elrows, elcols=elcols, \
elmaxlength=elmaxlength, elval=elval, elfidesc=elfidesc, \
elmodifytext=elmodifytext)
if err_code == 0:
## Element Updated: Show All Element Details Again
user_msg.append("""'%s' Element Updated""" % (elname,))
## Get submission page usage of element:
el_use = get_doctype_action_pagenb_for_submissions_using_element(elname)
element_dets = get_element_details(elname)
element_dets = stringify_listvars(element_dets)
## Take elements from results tuple:
(elmarccode, eltype, elsize, elrows, elcols, elmaxlength, \
elval, elfidesc, elcd, elmd, elmodifytext) = \
(element_dets[0][0], element_dets[0][1], element_dets[0][2], element_dets[0][3], \
element_dets[0][4], element_dets[0][5], element_dets[0][6], element_dets[0][7], \
element_dets[0][8], element_dets[0][9], element_dets[0][10])
## Pass to template:
body = websubmitadmin_templates.tmpl_display_addelementform(elname=elname,
elmarccode=elmarccode,
eltype=eltype,
elsize=elsize,
elrows=elrows,
elcols=elcols,
elmaxlength=elmaxlength,
elval=elval,
elfidesc=elfidesc,
elcd=elcd,
elmd=elmd,
elmodifytext=elmodifytext,
perform_act="elementedit",
user_msg=user_msg,
el_use_tuple=el_use
)
else:
## Could Not Update Element: Maybe Key Violation, or Invalid elname? Redisplay all elements.
## TODO : LOGGING
all_elements = get_elename_allelements()
user_msg.append("""Could Not Update Element '%s'""" % (elname,))
title = "Available WebSubmit Elements"
body = websubmitadmin_templates.tmpl_display_allelements(all_elements, user_msg=user_msg)
else:
## Display Web form containing existing details of element:
element_dets = get_element_details(elname)
## Get submission page usage of element:
el_use = get_doctype_action_pagenb_for_submissions_using_element(elname)
num_rows_ret = len(element_dets)
element_dets = stringify_listvars(element_dets)
if num_rows_ret == 1:
## Display Element details
## Take elements from results tuple:
(elmarccode, eltype, elsize, elrows, elcols, elmaxlength, \
elval, elfidesc, elcd, elmd, elmodifytext) = \
(element_dets[0][0], element_dets[0][1], element_dets[0][2], element_dets[0][3], \
element_dets[0][4], element_dets[0][5], element_dets[0][6], element_dets[0][7], \
element_dets[0][8], element_dets[0][9], element_dets[0][10])
## Pass to template:
body = websubmitadmin_templates.tmpl_display_addelementform(elname=elname,
elmarccode=elmarccode,
eltype=eltype,
elsize=elsize,
elrows=elrows,
elcols=elcols,
elmaxlength=elmaxlength,
elval=elval,
elfidesc=elfidesc,
elcd=elcd,
elmd=elmd,
elmodifytext=elmodifytext,
perform_act="elementedit",
el_use_tuple=el_use
)
else:
## Either no rows, or more than one row for ELEMENT: log error, and display all Elements
## TODO : LOGGING
title = "Available WebSubmit Elements"
all_elements = get_elename_allelements()
if num_rows_ret > 1:
## Key Error - duplicated elname
user_msg.append("""Found Several Rows for Element with Name '%s' - Inform Administrator""" % (elname,))
## LOG MESSAGE
else:
## No rows for ELEMENT
user_msg.append("""Could Not Find Any Rows for Element with Name '%s'""" % (elname,))
## LOG MESSAGE
body = websubmitadmin_templates.tmpl_display_allelements(all_elements, user_msg=user_msg)
return (title, body)
def _display_edit_check_form(chname, user_msg=""):
title = "Edit WebSubmit Checking Function"
if user_msg == "":
user_msg = []
jscheck_dets = get_jscheck_details(chname)
num_rows_ret = len(jscheck_dets)
if num_rows_ret == 1:
## Display Check details
body = websubmitadmin_templates.tmpl_display_addjscheckform(chname=jscheck_dets[0][0],
chdesc=jscheck_dets[0][1],
perform_act="jscheckedit",
cd=jscheck_dets[0][2],
md=jscheck_dets[0][3],
user_msg=user_msg)
else:
## Either no rows, or more than one row for Check: log error, and display all Checks
## TODO : LOGGING
title = "Available WebSubmit Checking Functions"
all_jschecks = get_chname_alljschecks()
if num_rows_ret > 1:
## Key Error - duplicated chname
user_msg.append("""Found Several Rows for Checking Function with Name '%s' - Inform Administrator""" % (chname,))
## LOG MESSAGE
else:
## No rows for action
user_msg.append("""Could Not Find Any Rows for Checking Function with Name '%s'""" % (chname,))
## LOG MESSAGE
body = websubmitadmin_templates.tmpl_display_alljschecks(all_jschecks, user_msg=user_msg)
return (title, body)
def perform_request_edit_jscheck(chname, chdesc=None, chcommit=""):
"""Interface for editing and updating the details of a WebSubmit Check.
If only "chname" provided, will display the details of a Check in a Web form.
If "chdesc" not empty, will assume that this is a call to commit update to Check details.
@param chname: unique id for Check
@param chdesc: modified value for WebSubmit Check description (code body) - (presence invokes update)
@return: tuple containing "title" (title of page), body (page body).
"""
user_msg = []
body = ""
title = "Edit WebSubmit Checking Function"
commit_error=0
## wash args:
if chname is not None:
try:
chname = wash_single_urlarg(urlarg=chname, argreqdtype=str, argdefault="", maxstrlen=15, minstrlen=1)
if function_name_is_valid(fname=chname) == 0:
chname = ""
except ValueError, e:
chname = ""
else:
chname = ""
if chdesc is not None:
try:
chdesc = wash_single_urlarg(urlarg=chdesc, argreqdtype=str, argdefault="")
except ValueError, e:
chdesc = ""
else:
chdesc = ""
(chname, chdesc) = (str(chname), str(chdesc))
if chcommit != "" and chcommit is not None:
if chname in ("", None):
chname = ""
user_msg.append("""Check name is mandatory and must be a string with no more than 15 characters""")
user_msg.append("""It must contain only alpha-numeric and underscore characters, beginning with a """\
"""letter or underscore""")
commit_error = 1
if commit_error != 0:
## don't commit - just re-display page with message to user
all_jschecks = get_chname_alljschecks()
user_msg.append("""Could Not Update Checking Function""")
body = websubmitadmin_templates.tmpl_display_alljschecks(all_jschecks, user_msg=user_msg)
title = "Available WebSubmit Checking Functions"
return (title, body)
## Commit updated Check details to WebSubmit DB:
err_code = update_jscheck_details(chname, chdesc)
if err_code == 0:
## Check Updated: Show All Check Details Again
user_msg.append("""'%s' Check Updated""" % (chname,))
jscheck_dets = get_jscheck_details(chname)
body = websubmitadmin_templates.tmpl_display_addjscheckform(chname=jscheck_dets[0][0],
chdesc=jscheck_dets[0][1],
perform_act="jscheckedit",
cd=jscheck_dets[0][2],
md=jscheck_dets[0][3],
user_msg=user_msg
)
else:
## Could Not Update Check: Maybe Key Violation, or Invalid chname? Redisplay all Checks.
## TODO : LOGGING
all_jschecks = get_chname_alljschecks()
user_msg.append("""Could Not Update Checking Function '%s'""" % (chname,))
body = websubmitadmin_templates.tmpl_display_alljschecks(all_jschecks, user_msg=user_msg)
title = "Available WebSubmit Checking Functions"
else:
## Display Web form containing existing details of Check:
(title, body) = _display_edit_check_form(chname=chname)
return (title, body)
def _display_edit_action_form(actid, user_msg=""):
title = "Edit WebSubmit Action"
if user_msg == "":
user_msg = []
action_dets = get_action_details(actid)
num_rows_ret = len(action_dets)
if num_rows_ret == 1:
## Display action details
body = websubmitadmin_templates.tmpl_display_addactionform(actid=action_dets[0][0],
actname=action_dets[0][1],
working_dir=action_dets[0][2],
status_text=action_dets[0][3],
perform_act="actionedit",
cd=action_dets[0][4],
md=action_dets[0][5],
user_msg=user_msg)
else:
## Either no rows, or more than one row for action: log error, and display all actions
## TODO : LOGGING
title = "Available WebSubmit Actions"
all_actions = get_actid_actname_allactions()
if num_rows_ret > 1:
## Key Error - duplicated actid
user_msg.append("""Found Several Rows for Action with ID '%s' - Inform Administrator""" % (actid,))
## LOG MESSAGE
else:
## No rows for action
user_msg.append("""Could Not Find Any Rows for Action with ID '%s'""" % (actid,))
## LOG MESSAGE
body = websubmitadmin_templates.tmpl_display_allactions(all_actions, user_msg=user_msg)
return (title, body)
def perform_request_edit_action(actid, actname=None, working_dir=None, status_text=None, actcommit=""):
"""Interface for editing and updating the details of a WebSubmit action.
If only "actid" provided, will display the details of an action in a Web form.
If "actname" not empty, will assume that this is a call to commit update to action details.
@param actid: unique id for action
@param actname: modified value for WebSubmit action name/description (presence invokes update)
@param working_dir: modified value for WebSubmit action working_dir
@param status_text: modified value for WebSubmit action status text
@return: tuple containing "title" (title of page), body (page body).
"""
user_msg = []
body = ""
title = "Edit WebSubmit Action"
commit_error = 0
## wash args:
if actid is not None:
try:
actid = wash_single_urlarg(urlarg=actid, argreqdtype=str, argdefault="", maxstrlen=3, minstrlen=3)
if string_is_alphanumeric_including_underscore(txtstring=actid) == 0:
actid = ""
except ValueError, e:
actid = ""
actid = actid.upper()
else:
actid = ""
if actname is not None:
try:
actname = wash_single_urlarg(urlarg=actname, argreqdtype=str, argdefault="")
except ValueError, e:
actname = ""
else:
actname = ""
if working_dir is not None:
try:
working_dir = wash_single_urlarg(urlarg=working_dir, argreqdtype=str, argdefault="")
except ValueError, e:
working_dir = ""
else:
working_dir = ""
if status_text is not None:
try:
status_text = wash_single_urlarg(urlarg=status_text, argreqdtype=str, argdefault="")
except ValueError, e:
status_text = ""
else:
status_text = ""
## process request:
if actcommit != "" and actcommit is not None:
if actname in ("", None):
actname = ""
user_msg.append("""Action description is mandatory""")
commit_error = 1
if commit_error != 0:
## don't commit - just re-display page with message to user
(title, body) = _display_edit_action_form(actid=actid, user_msg=user_msg)
return (title, body)
## Commit updated action details to WebSubmit DB:
err_code = update_action_details(actid, actname, working_dir, status_text)
if err_code == 0:
## Action Updated: Show Action Details Again
user_msg.append("""'%s' Action Updated""" % (actid,))
action_dets = get_action_details(actid)
body = websubmitadmin_templates.tmpl_display_addactionform(actid=action_dets[0][0],
actname=action_dets[0][1],
working_dir=action_dets[0][2],
status_text=action_dets[0][3],
perform_act="actionedit",
cd=action_dets[0][4],
md=action_dets[0][5],
user_msg=user_msg
)
else:
## Could Not Update Action: Maybe Key Violation, or Invalid actid? Redisplay all actions.
## TODO : LOGGING
all_actions = get_actid_actname_allactions()
user_msg.append("""Could Not Update Action '%s'""" % (actid,))
body = websubmitadmin_templates.tmpl_display_allactions(all_actions, user_msg=user_msg)
title = "Available WebSubmit Actions"
else:
## Display Web form containing existing details of action:
(title, body) = _display_edit_action_form(actid=actid)
return (title, body)
def _functionedit_display_function_details(funcname, user_msg=""):
"""Display the details of a function, along with any message to the user that may have been provided.
@param funcname: unique name of function to be updated
@param user_msg: Any message to the user that is to be displayed on the page.
@return: tuple containing (page title, HTML page body).
"""
if user_msg == "":
user_msg = []
title = "Edit WebSubmit Function"
func_descr_res = get_function_description(function=funcname)
num_rows_ret = len(func_descr_res)
if num_rows_ret == 1:
## Display action details
funcdescr = func_descr_res[0][0]
if funcdescr is None:
funcdescr = ""
## get parameters for this function:
this_function_parameters = get_function_parameters(function=funcname)
## get all function parameters in WebSubmit:
all_function_parameters = get_distinct_paramname_all_websubmit_function_parameters()
## get the docstring of the function. Remove leading empty
## lines and remove unnecessary leading whitespaces
docstring = None
try:
websubmit_function = __import__('invenio.websubmit_functions.%s' % funcname,
globals(), locals(), [funcname])
if hasattr(websubmit_function, funcname) and getattr(websubmit_function, funcname).__doc__:
docstring = getattr(websubmit_function, funcname).__doc__
except Exception, e:
docstring = '''<span style="color:#f00;font-weight:700">Function documentation could
not be loaded</span>.<br/>Please check function definition. Error was:<br/>%s''' % str(e)
if docstring:
docstring = '<pre style="max-height:500px;overflow: auto;">' + _format_function_docstring(docstring) + '</pre>'
body = websubmitadmin_templates.tmpl_display_addfunctionform(funcname=funcname,
funcdescr=funcdescr,
func_parameters=this_function_parameters,
all_websubmit_func_parameters=all_function_parameters,
perform_act="functionedit",
user_msg=user_msg,
func_docstring = docstring
)
else:
## Either no rows, or more than one row for function: log error, and display all functions
## TODO : LOGGING
title = "Available WebSubmit Functions"
all_functions = get_funcname_funcdesc_allfunctions()
if num_rows_ret > 1:
## Key Error - duplicated function name
user_msg.append("""Found Several Rows for Function with Name '%s' - Inform Administrator""" % (funcname,))
## LOG MESSAGE
else:
## No rows for function
user_msg.append("""Could Not Find Any Rows for Function with Name '%s'""" % (funcname,))
## LOG MESSAGE
body = websubmitadmin_templates.tmpl_display_allfunctions(all_functions, user_msg=user_msg)
return (title, body)
def _format_function_docstring(docstring):
"""
Remove unnecessary leading and trailing empty lines, as well as
meaningless leading and trailing whitespaces on every lines
@param docstring: the input docstring to format
@type docstring: string
@return: a formatted docstring
@rtype: string
"""
def count_leading_whitespaces(line):
"Count enumber of leading whitespaces"
line_length = len(line)
pos = 0
while pos < line_length and line[pos] == " ":
pos += 1
return pos
new_docstring_list = []
min_nb_leading_whitespace = len(docstring) # this is really the max possible
# First count min number of leading whitespaces of all lines. Also
# remove leading empty lines.
docstring_has_started_p = False
for line in docstring.splitlines():
if docstring_has_started_p or line.strip():
# A non-empty line has been found, or an emtpy line after
# the beginning of some text was found
docstring_has_started_p = True
new_docstring_list.append(line)
if line.strip():
# If line has some meaningful char, count leading whitespaces
line_nb_spaces = count_leading_whitespaces(line)
if line_nb_spaces < min_nb_leading_whitespace:
min_nb_leading_whitespace = line_nb_spaces
return '\n'.join([line[min_nb_leading_whitespace:] for line in new_docstring_list]).rstrip()
def _functionedit_update_description(funcname, funcdescr):
"""Perform an update of the description for a given function.
@param funcname: unique name of function to be updated
@param funcdescr: description to be updated for funcname
@return: a tuple containing (page title, HTML body content)
"""
user_msg = []
err_code = update_function_description(funcname, funcdescr)
if err_code == 0:
## Function updated - redisplay
user_msg.append("""'%s' Function Description Updated""" % (funcname,))
else:
## Could not update function description
## TODO : ERROR LIBS
user_msg.append("""Could Not Update Description for Function '%s'""" % (funcname,))
## Display function details
(title, body) = _functionedit_display_function_details(funcname=funcname, user_msg=user_msg)
return (title, body)
def _functionedit_delete_parameter(funcname, deleteparam):
"""Delete a parameter from a given function.
Important: if any document types have been using the function from which this parameter will be deleted,
and therefore have values for this parameter, these values will not be deleted from the WebSubmit DB.
The deleted parameter therefore may continue to exist in the WebSubmit DB, but will be disassociated
from this function.
@param funcname: unique name of the function from which the parameter is to be deleted.
@param deleteparam: the name of the parameter to be deleted from the function.
@return: tuple containing (title, HTML body content)
"""
user_msg = []
err_code = delete_function_parameter(function=funcname, parameter_name=deleteparam)
if err_code == 0:
## Parameter deleted - redisplay function details
user_msg.append("""'%s' Parameter Deleted from '%s' Function""" % (deleteparam, funcname))
else:
## could not delete param - it does not exist for this function
## TODO : ERROR LIBS
user_msg.append("""'%s' Parameter Does not Seem to Exist for Function '%s' - Could not Delete""" \
% (deleteparam, funcname))
## Display function details
(title, body) = _functionedit_display_function_details(funcname=funcname, user_msg=user_msg)
return (title, body)
def _functionedit_add_parameter(funcname, funceditaddparam="", funceditaddparamfree=""):
"""Add (connect) a parameter to a given WebSubmit function.
@param funcname: unique name of the function to which the parameter is to be added.
@param funceditaddparam: the value of a HTML select list: if present, will contain the name of the
parameter to be added to the function. May also be empty - the user may have used the free-text field
(funceditaddparamfree) to manually enter the name of a parameter. The important thing is that one
must be present for the parameter to be added sucessfully.
@param funceditaddparamfree: The name of the parameter to be added to the function, as taken from a free-
text HTML input field. May also be empty - the user may have used the HTML select-list (funceditaddparam)
field to choose the parameter. The important thing is that one must be present for the parameter to be
added sucessfully. The value "funceditaddparamfree" value will take priority over the "funceditaddparam"
list value.
@return: tuple containing (title, HTML body content)
"""
user_msg = []
if funceditaddparam in ("", None, "NO_VALUE") and funceditaddparamfree in ("", None):
## no parameter chosen
## TODO : ERROR LIBS
user_msg.append("""Unable to Find the Parameter to be Added to Function '%s' - Could not Add""" % (funcname,))
else:
add_parameter = ""
if funceditaddparam not in ("", None) and funceditaddparamfree not in ("", None):
## both select box and free-text values provided for parameter - prefer free-text
add_parameter = funceditaddparamfree
elif funceditaddparam not in ("", None):
## take add select-box chosen parameter
add_parameter = funceditaddparam
else:
## take add free-text chosen parameter
add_parameter = funceditaddparamfree
## attempt to commit parameter:
err_code = add_function_parameter(function=funcname, parameter_name=add_parameter)
if err_code == 0:
## Parameter added - redisplay function details
user_msg.append("""'%s' Parameter Added to '%s' Function""" % (add_parameter, funcname))
else:
## could not add param - perhaps it already exists for this function
## TODO : ERROR LIBS
user_msg.append("""Could not Add '%s' Parameter to Function '%s' - It Already Exists for this Function""" \
% (add_parameter, funcname))
## Display function details
(title, body) = _functionedit_display_function_details(funcname=funcname, user_msg=user_msg)
return (title, body)
def perform_request_edit_function(funcname, funcdescr=None, funceditaddparam=None, funceditaddparamfree=None,
funceditdelparam=None, funcdescreditcommit="", funcparamdelcommit="",
funcparamaddcommit=""):
"""Edit a WebSubmit function. 3 possibilities: edit the function description; delete a parameter from the
function; add a new parameter to the function.
@param funcname: the name of the function to be modified
@param funcdescr: the new function description
@param funceditaddparam: the name of the parameter to be added to the function (taken from HTML SELECT-list)
@param funceditaddparamfree: the name of the parameter to be added to the function (taken from free-text input)
@param funceditdelparam: the name of the parameter to be deleted from the function
@param funcdescreditcommit: a flag to indicate that this request is to update the description of a function
@param funcparamdelcommit: a flag to indicate that this request is to delete a parameter from a function
@param funcparamaddcommit: a flag to indicate that this request is to add a new parameter to a function
@return: tuple containing (page title, HTML page body)
"""
body = ""
title = "Edit WebSubmit Function"
commit_error = 0
## wash args:
if funcname is not None:
try:
funcname = wash_single_urlarg(urlarg=funcname, argreqdtype=str, argdefault="")
if string_is_alphanumeric_including_underscore(txtstring=funcname) == 0:
funcname = ""
except ValueError, e:
funcname = ""
else:
funcname = ""
if funcdescr is not None:
try:
funcdescr = wash_single_urlarg(urlarg=funcdescr, argreqdtype=str, argdefault="")
except ValueError, e:
funcdescr = ""
else:
funcdescr = ""
if funceditaddparam is not None:
try:
funceditaddparam = wash_single_urlarg(urlarg=funceditaddparam, argreqdtype=str, argdefault="")
if string_is_alphanumeric_including_underscore(txtstring=funceditaddparam) == 0:
funceditaddparam = ""
except ValueError, e:
funceditaddparam = ""
else:
funceditaddparam = ""
if funceditaddparamfree is not None:
try:
funceditaddparamfree = wash_single_urlarg(urlarg=funceditaddparamfree, argreqdtype=str, argdefault="")
if string_is_alphanumeric_including_underscore(txtstring=funceditaddparamfree) == 0:
funceditaddparamfree = ""
except ValueError, e:
funceditaddparamfree = ""
else:
funceditaddparamfree = ""
if funceditdelparam is not None:
try:
funceditdelparam = wash_single_urlarg(urlarg=funceditdelparam, argreqdtype=str, argdefault="")
except ValueError, e:
funceditdelparam = ""
else:
funceditdelparam = ""
if funcname == "":
(title, body) = _functionedit_display_function_details(funcname=funcname)
return (title, body)
if funcdescreditcommit != "" and funcdescreditcommit is not None:
## Update the definition of a function:
(title, body) = _functionedit_update_description(funcname=funcname, funcdescr=funcdescr)
elif funcparamaddcommit != "" and funcparamaddcommit is not None:
## Request to add a new parameter to a function
(title, body) = _functionedit_add_parameter(funcname=funcname,
funceditaddparam=funceditaddparam, funceditaddparamfree=funceditaddparamfree)
elif funcparamdelcommit != "" and funcparamdelcommit is not None:
## Request to delete a parameter from a function
(title, body) = _functionedit_delete_parameter(funcname=funcname, deleteparam=funceditdelparam)
else:
## Display Web form for new function addition:
(title, body) = _functionedit_display_function_details(funcname=funcname)
return (title, body)
def perform_request_function_usage(funcname):
"""Display a page containing the usage details of a given function.
@param funcname: the function name
@return: page body
"""
func_usage = get_function_usage_details(function=funcname)
func_usage = stringify_listvars(func_usage)
body = websubmitadmin_templates.tmpl_display_function_usage(funcname, func_usage)
return body
def perform_request_list_actions():
"""Display a list of all WebSubmit actions.
@return: body where body is a string of HTML, which is a page body.
"""
body = ""
all_actions = get_actid_actname_allactions()
body = websubmitadmin_templates.tmpl_display_allactions(all_actions)
return body
def perform_request_list_doctypes():
"""Display a list of all WebSubmit document types.
@return: body where body is a string of HTML, which is a page body.
"""
body = ""
all_doctypes = get_docid_docname_alldoctypes()
body = websubmitadmin_templates.tmpl_display_alldoctypes(all_doctypes)
return body
def perform_request_list_jschecks():
"""Display a list of all WebSubmit JavaScript element checking functions.
@return: body, where body is a string of HTML, which is a page body.
"""
body = ""
all_jschecks = get_chname_alljschecks()
body = websubmitadmin_templates.tmpl_display_alljschecks(all_jschecks)
return body
def perform_request_list_functions():
"""Display a list of all WebSubmit FUNCTIONS.
@return: body where body is a string of HTML, which is a page body.
"""
body = ""
all_functions = get_funcname_funcdesc_allfunctions()
body = websubmitadmin_templates.tmpl_display_allfunctions(all_functions)
return body
def perform_request_list_elements():
"""Display a list of all WebSubmit ELEMENTS.
@return: body where body is a string of HTML, which is a page body.
"""
body = ""
all_elements = get_elename_allelements()
body = websubmitadmin_templates.tmpl_display_allelements(all_elements)
return body
def _remove_doctype(doctype):
"""Process removal of a document type.
@param doctype: the document type to be removed.
@return: a tuple containing page title, and HTML page body)
"""
title = ""
body = ""
user_msg = []
numrows_doctype = get_number_doctypes_docid(docid=doctype)
if numrows_doctype == 1:
## Doctype is unique and can therefore be deleted:
## Delete any function parameters for this document type:
error_code = delete_all_parameters_doctype(doctype=doctype)
if error_code != 0:
## problem deleting some or all parameters - inform user and log error
## TODO : ERROR LOGGING
user_msg.append("""Unable to delete some or all function parameter values for document type "%s".""" % (doctype,))
## delete all functions called by this doctype's actions
error_code = delete_all_functions_doctype(doctype=doctype)
if error_code != 0:
## problem deleting some or all functions - inform user and log error
## TODO : ERROR LOGGING
user_msg.append("""Unable to delete some or all functions for document type "%s".""" % (doctype,))
## delete all categories of this doctype
error_code = delete_all_categories_doctype(doctype=doctype)
if error_code != 0:
## problem deleting some or all categories - inform user and log error
## TODO : ERROR LOGGING
user_msg.append("""Unable to delete some or all parameters for document type "%s".""" % (doctype,))
## delete all submission interface fields for this doctype
error_code = delete_all_submissionfields_doctype(doctype=doctype)
if error_code != 0:
## problem deleting some or all submission fields - inform user and log error
## TODO : ERROR LOGGING
user_msg.append("""Unable to delete some or all submission fields for document type "%s".""" % (doctype,))
## delete all submissions for this doctype
error_code = delete_all_submissions_doctype(doctype)
if error_code != 0:
## problem deleting some or all submissions - inform user and log error
## TODO : ERROR LOGGING
user_msg.append("""Unable to delete some or all submissions for document type "%s".""" % (doctype,))
## delete entry for this doctype in the collection-doctypes table
error_code = delete_collection_doctype_entry_doctype(doctype)
if error_code != 0:
## problem deleting this doctype from the collection-doctypes table
## TODO : ERROR LOGGING
user_msg.append("""Unable to delete document type "%s" from the collection-doctypes table.""" % (doctype,))
## delete the doctype itself
error_code = delete_doctype(doctype)
if error_code != 0:
## problem deleting this doctype from the doctypes table
## TODO : ERROR LOGGING
user_msg.append("""Unable to delete document type "%s" from the document types table.""" % (doctype,))
user_msg.append("""The "%s" document type should now have been deleted, but you should not ignore any warnings.""" % (doctype,))
title = """Available WebSubmit Document Types"""
all_doctypes = get_docid_docname_alldoctypes()
body = websubmitadmin_templates.tmpl_display_alldoctypes(doctypes=all_doctypes, user_msg=user_msg)
else:
## doctype is not unique and cannot be deleted
if numrows_doctype > 1:
## doctype is duplicated - cannot delete - needs admin intervention
## TODO : LOG ERROR
user_msg.append("""%s WebSubmit document types have been identified for doctype id "%s" - unable to delete.""" \
""" Please inform administrator.""" % (numrows_doctype, doctype))
else:
## no document types found for this doctype id
## TODO : LOG ERROR
user_msg.append("""Unable to find any document types in the WebSubmit database for doctype id "%s" - unable to delete""" \
% (doctype,))
## get a list of all document types, and once more display the delete form, with the message
alldoctypes = get_docid_docname_and_docid_alldoctypes()
title = "Remove WebSubmit Doctument Type"
body = websubmitadmin_templates.tmpl_display_delete_doctype_form(doctype="", alldoctypes=alldoctypes, user_msg=user_msg)
return (title, body)
def perform_request_remove_doctype(doctype="", doctypedelete="", doctypedeleteconfirm=""):
"""Remove a document type from WebSubmit.
@param doctype: the document type to be removed
@doctypedelete: flag to signal that a confirmation for deletion should be displayed
@doctypedeleteconfirm: flag to signal that confirmation for deletion has been received and
the doctype should be removed
@return: a tuple (title, body)
"""
body = ""
title = "Remove WebSubmit Document Type"
if doctypedeleteconfirm not in ("", None):
## Delete the document type:
(title, body) = _remove_doctype(doctype=doctype)
else:
## Display "doctype delete form"
if doctypedelete not in ("", None) and doctype not in ("", None):
## don't bother to get list of doctypes - user will be prompted to confirm the deletion of "doctype"
alldoctypes = None
else:
## get list of all doctypes to pass to template so that it can prompt the user to choose a doctype to delete
## alldoctypes = get_docid_docname_alldoctypes()
alldoctypes = get_docid_docname_and_docid_alldoctypes()
body = websubmitadmin_templates.tmpl_display_delete_doctype_form(doctype=doctype, alldoctypes=alldoctypes)
return (title, body)
def _create_add_doctype_form(doctype="", doctypename="", doctypedescr="", clonefrom="", user_msg=""):
"""Perform the steps necessary to create the "add a new doctype" form.
@param doctype: The unique ID that is to be used for the new doctype.
@param doctypename: the name that is to be given to a doctype.
@param doctypedescr: the description to be allocated to the new doctype.
@param user_msg: any message to be displayed to the user.
@return: a tuple containing page title and HTML body of page: (title, body)
"""
title = """Add New WebSubmit Document Type"""
alldoctypes = get_docid_docname_and_docid_alldoctypes()
body = websubmitadmin_templates.tmpl_display_doctypedetails_form(doctype=doctype,
doctypename=doctypename,
doctypedescr=doctypedescr,
clonefrom=clonefrom,
alldoctypes=alldoctypes,
user_msg=user_msg
)
return (title, body)
def _clone_categories_doctype(user_msg, fromdoctype, todoctype):
"""Clone the categories of one document type, to another document type.
@param user_msg: any message to be displayed to the user (this is a list)
@param fromdoctype: the doctype from which categories are to be cloned
@param todoctype: the doctype into which categories are to be cloned
@return: integer value (0/1/2) - if doctype's categories couldn't be deleted, return 0 (cloning failed);
if some categories could be cloned, return 1 (cloning partially successful); if all categories could be
cloned, return 2 (cloning successful).
"""
error_code = clone_categories_fromdoctype_todoctype(fromdoctype=fromdoctype, todoctype=todoctype)
if error_code == 1:
## doctype had existing categories and they could not be deleted
## TODO : LOG ERRORS
user_msg.append("""Categories already existed for the document type "%s" but could not be deleted. Unable to clone""" \
""" categories of doctype "%s".""" % (todoctype, fromdoctype))
return 1 ## cloning failed
elif error_code == 2:
## could not clone all categories for new doctype
## TODO : LOG ERRORS
user_msg.append("""Unable to clone all categories from doctype "%s", for doctype "%s".""" % (fromdoctype, todoctype))
return 2 ## cloning at least partially successful
else:
return 0 ## cloning successful
def _clone_functions_foraction_doctype(user_msg, fromdoctype, todoctype, action):
"""Clone the functions of a given action of one document type, to the same action on another document type.
@param user_msg: any message to be displayed to the user (this is a list)
@param fromdoctype: the doctype from which functions are to be cloned
@param todoctype: the doctype into which functions are to be cloned
@param action: the action for which functions are to be cloned
@return: an integer value (0/1/2). In the case that todoctype had existing functions for the given action and
they could not be deleted return 0, signalling that this is a serious problem; in the case that some
functions were cloned, return 1; in the case that all functions were cloned, return 2.
"""
error_code = clone_functions_foraction_fromdoctype_todoctype(fromdoctype=fromdoctype, todoctype=todoctype, action=action)
if error_code == 1:
## doctype had existing functions for the given action and they could not be deleted
## TODO : LOG ERRORS
user_msg.append("""Functions already existed for the "%s" action of the document type "%s" but they could not be """ \
"""deleted. Unable to clone the functions of Document Type "%s" for action "%s".""" \
% (action, todoctype, fromdoctype, action))
## critical - return 1 to signal this
return 1
elif error_code == 2:
## could not clone all functions for given action for new doctype
## TODO : LOG ERRORS
user_msg.append("""Unable to clone all functions for the "%s" action from doctype "%s", for doctype "%s".""" \
% (action, fromdoctype, todoctype))
return 2 ## not critical
else:
return 0 ## total success
def _clone_functionparameters_foraction_fromdoctype_todoctype(user_msg, fromdoctype, todoctype, action):
"""Clone the parameters/values of a given action of one document type, to the same action on another document type.
@param user_msg: any message to be displayed to the user (this is a list)
@param fromdoctype: the doctype from which parameters are to be cloned
@param todoctype: the doctype into which parameters are to be cloned
@param action: the action for which parameters are to be cloned
@return: 0 if it was not possible to clone all parameters/values; 1 if all parameters/values were cloned successfully.
"""
error_code = clone_functionparameters_foraction_fromdoctype_todoctype(fromdoctype=fromdoctype, \
todoctype=todoctype, action=action)
if error_code in (1, 2):
## something went wrong and it was not possible to clone all parameters/values of "action"/"fromdoctype" for "action"/"todoctype"
## TODO : LOG ERRORS
user_msg.append("""It was not possible to clone all parameter values from the action "%(act)s" of the document type""" \
""" "%(fromdt)s" for the action "%(act)s" of the document type "%(todt)s".""" \
% { 'act' : action, 'fromdt' : fromdoctype, 'todt' : todoctype }
)
return 2 ## to signal that addition wasn't 100% successful
else:
return 0 ## all parameters were cloned
def _add_doctype(doctype, doctypename, doctypedescr, clonefrom):
title = ""
body = ""
user_msg = []
commit_error = 0
if doctype == "":
user_msg.append("""The Document Type ID is mandatory and must be a string with no more than 10 alpha-numeric characters""")
commit_error = 1
if commit_error != 0:
## don't commit - just re-display page with message to user
(title, body) = _create_add_doctype_form(doctypename=doctypename, doctypedescr=doctypedescr, clonefrom=clonefrom, user_msg=user_msg)
return (title, body)
numrows_doctype = get_number_doctypes_docid(docid=doctype)
if numrows_doctype > 0:
## this document type already exists - do not add
## TODO : LOG ERROR
user_msg.append("""A document type identified by "%s" already seems to exist and there cannot be added. Choose another ID.""" \
% (doctype,))
(title, body) = _create_add_doctype_form(doctypename=doctypename, doctypedescr=doctypedescr, clonefrom=clonefrom, user_msg=user_msg)
else:
## proceed with addition
## add the document type details:
error_code = insert_doctype_details(doctype=doctype, doctypename=doctypename, doctypedescr=doctypedescr)
if error_code == 0:
## added successfully
if clonefrom not in ("", "None", None):
## document type should be cloned from "clonefrom"
## first, clone the categories from another doctype:
error_code = _clone_categories_doctype(user_msg=user_msg,
fromdoctype=clonefrom,
todoctype=doctype)
## get details of clonefrom's submissions
all_actnames_submissions_clonefrom = get_actname_all_submissions_doctype(doctype=clonefrom)
if len(all_actnames_submissions_clonefrom) > 0:
## begin cloning
for doc_submission_actname in all_actnames_submissions_clonefrom:
## clone submission details:
action_name = doc_submission_actname[0]
_clone_submission_fromdoctype_todoctype(user_msg=user_msg,
todoctype=doctype, action=action_name, clonefrom=clonefrom)
user_msg.append("""The "%s" document type has been added.""" % (doctype,))
title = """Available WebSubmit Document Types"""
all_doctypes = get_docid_docname_alldoctypes()
body = websubmitadmin_templates.tmpl_display_alldoctypes(doctypes=all_doctypes, user_msg=user_msg)
else:
## could not add document type details - do no more
## TODO : LOG ERROR!
user_msg.append("""Unable to add details for document type "%s".""" % (doctype,))
(title, body) = _create_add_doctype_form(user_msg=user_msg)
return (title, body)
def perform_request_add_doctype(doctype=None, doctypename=None, doctypedescr=None, clonefrom=None, doctypedetailscommit=""):
body = ""
## wash args:
if doctype is not None:
try:
doctype = wash_single_urlarg(urlarg=doctype, argreqdtype=str, argdefault="", maxstrlen=10, minstrlen=1)
if string_is_alphanumeric_including_underscore(txtstring=doctype) == 0:
doctype = ""
except ValueError, e:
doctype = ""
else:
doctype = ""
if doctypename is not None:
try:
doctypename = wash_single_urlarg(urlarg=doctypename, argreqdtype=str, argdefault="")
except ValueError, e:
doctypename = ""
else:
doctypename = ""
if doctypedescr is not None:
try:
doctypedescr = wash_single_urlarg(urlarg=doctypedescr, argreqdtype=str, argdefault="")
except ValueError, e:
doctypedescr = ""
else:
doctypedescr = ""
if clonefrom is not None:
try:
clonefrom = wash_single_urlarg(urlarg=clonefrom, argreqdtype=str, argdefault="None")
except ValueError, e:
clonefrom = "None"
else:
clonefrom = "None"
if doctypedetailscommit not in ("", None):
(title, body) = _add_doctype(doctype=doctype,
doctypename=doctypename, doctypedescr=doctypedescr, clonefrom=clonefrom)
else:
(title, body) = _create_add_doctype_form()
return (title, body)
def _delete_referee_doctype(doctype, categid, refereeid):
"""Delete a referee from a given category of a document type.
@param doctype: the document type from whose category the referee is to be removed
@param categid: the name/ID of the category from which the referee is to be removed
@param refereeid: the id of the referee to be removed from the given category
@return: a tuple containing 2 strings: (page title, page body)
"""
user_msg = []
role_name = """referee_%s_%s""" % (doctype, categid)
error_code = acc_delete_user_role(id_user=refereeid, name_role=role_name)
if error_code > 0:
## referee was deleted from category
user_msg.append(""" "%s".""" % (doctype,))
def _create_list_referees_doctype(doctype):
referees = {}
referees_details = {}
## get all Invenio roles:
all_roles = acc_get_all_roles()
for role in all_roles:
(roleid, rolename) = (role[0], role[1])
if re.match("^referee_%s_" % (doctype,), rolename):
## this is a "referee" role - get users of this role:
role_users = acc_get_role_users(roleid)
if role_users is not None and (type(role_users) in (tuple, list) and len(role_users) > 0):
## this role has users, record them in dictionary:
referees[rolename] = role_users
## for each "group" of referees:
for ref_role in referees.keys():
## get category ID for this referee-role:
try:
categid = re.match("^referee_%s_(.*)" % (doctype,), ref_role).group(1)
## from WebSubmit DB, get categ name for "categid":
if categid != "*":
categ_details = get_all_categories_sname_lname_for_doctype_categsname(doctype=doctype, categsname=categid)
if len(categ_details) > 0:
## if possible to receive details of this category, record them in a tuple in the format:
## ("categ name", (tuple of users details)):
referees_details[ref_role] = (categid, categ_details[0][1], referees[ref_role])
else:
## general referee entry:
referees_details[ref_role] = (categid, "General Referee(s)", referees[ref_role])
except AttributeError:
## there is no category for this role - it is broken, so pass it
pass
return referees_details
def _create_edit_doctype_details_form(doctype, doctypename="", doctypedescr="", doctypedetailscommit="", user_msg=""):
if user_msg == "" or type(user_msg) not in (list, tuple, str, unicode):
user_msg = []
elif type(user_msg) in (str, unicode):
user_msg = [user_msg]
title = "Edit Document Type Details"
doctype_details = get_doctype_docname_descr_cd_md_fordoctype(doctype)
if len(doctype_details) == 1:
docname = doctype_details[0][1]
docdescr = doctype_details[0][2]
(cd, md) = (doctype_details[0][3], doctype_details[0][4])
if doctypedetailscommit != "":
## could not commit details
docname = doctypename
docdescr = doctypedescr
body = websubmitadmin_templates.tmpl_display_doctypedetails_form(doctype=doctype,
doctypename=docname,
doctypedescr=docdescr,
cd=cd,
md=md,
user_msg=user_msg,
perform_act="doctypeconfigure")
else:
## problem retrieving details of doctype:
user_msg.append("""Unable to retrieve details of doctype '%s' - cannot edit.""" % (doctype,),)
## TODO : LOG ERROR
all_doctypes = get_docid_docname_alldoctypes()
body = websubmitadmin_templates.tmpl_display_alldoctypes(doctypes=all_doctypes, user_msg=user_msg)
title = "Available WebSubmit Document Types"
return (title, body)
def _create_add_submission_choose_clonefrom_form(doctype, action, user_msg=""):
if user_msg == "" or type(user_msg) not in (list, tuple, str, unicode):
user_msg = []
elif type(user_msg) in (str, unicode):
user_msg = [user_msg]
if action in ("", None):
user_msg.append("""Unknown Submission""")
(title, body) = _create_configure_doctype_form(doctype=doctype, user_msg=user_msg)
return (title, body)
## does this doctype already have this action?
numrows_doctype_action = get_number_submissions_doctype_action(doctype=doctype, action=action)
if numrows_doctype_action < 1:
## action not present for this doctype - can be added
## get list of all doctypes implementing this action (for possible cloning purposes)
doctypes_implementing_action = get_doctypeid_doctypes_implementing_action(action=action)
## create form to display document types to clone from
title = "Add Submission '%s' to Document Type '%s'" % (action, doctype)
body = websubmitadmin_templates.tmpl_display_submission_clone_form(doctype=doctype,
action=action,
clonefrom_list=doctypes_implementing_action,
user_msg=user_msg
)
else:
## warn user that action already exists for doctype and canot be added, then display all
## details of doctype again
user_msg.append("The Document Type '%s' already implements the Submission '%s' - cannot add it again" \
% (doctype, action))
## TODO : LOG WARNING
(title, body) = _create_configure_doctype_form(doctype=doctype, user_msg=user_msg)
return (title, body)
def _create_add_submission_form(doctype, action, displayed="", buttonorder="", statustext="",
level="", score="", stpage="", endtxt="", user_msg=""):
if user_msg == "" or type(user_msg) not in (list, tuple, str, unicode):
user_msg = []
elif type(user_msg) in (str, unicode):
user_msg = [user_msg]
if action in ("", None):
user_msg.append("""Unknown Submission""")
(title, body) = _create_configure_doctype_form(doctype=doctype, user_msg=user_msg)
return (title, body)
title = "Add Submission '%s' to Document Type '%s'" % (action, doctype)
body = websubmitadmin_templates.tmpl_display_submissiondetails_form(doctype=doctype,
action=action,
displayed=displayed,
buttonorder=buttonorder,
statustext=statustext,
level=level,
score=score,
stpage=stpage,
endtxt=endtxt,
user_msg=user_msg,
saveaction="add"
)
return (title, body)
def _create_delete_submission_form(doctype, action):
user_msg = []
title = """Delete Submission "%s" from Document Type "%s" """ % (action, doctype)
numrows_doctypesubmission = get_number_submissions_doctype_action(doctype=doctype, action=action)
if numrows_doctypesubmission > 0:
## submission exists: create form to delete it:
body = websubmitadmin_templates.tmpl_display_delete_doctypesubmission_form(doctype=doctype, action=action)
else:
## submission doesn't seem to exist. Display details of doctype only:
user_msg.append("""The Submission "%s" doesn't seem to exist for the Document Type "%s" - unable to delete it""" % (action, doctype))
## TODO : LOG ERRORS
(title, body) = _create_configure_doctype_form(doctype=doctype, user_msg=user_msg)
return (title, body)
def _create_edit_submission_form(doctype, action, user_msg=""):
if user_msg == "" or type(user_msg) not in (list, tuple, str, unicode):
user_msg = []
elif type(user_msg) in (str, unicode):
user_msg = [user_msg]
submission_details = get_submissiondetails_doctype_action(doctype=doctype, action=action)
numrows_submission_details = len(submission_details)
if numrows_submission_details == 1:
## correctly retrieved details of submission - display:
submission_details = stringify_listvars(submission_details)
displayed = submission_details[0][3]
buttonorder = submission_details[0][7]
statustext = submission_details[0][8]
level = submission_details[0][9]
score = submission_details[0][10]
stpage = submission_details[0][11]
endtxt = submission_details[0][12]
cd = submission_details[0][5]
md = submission_details[0][6]
title = "Edit Details of '%s' Submission of '%s' Document Type" % (action, doctype)
body = websubmitadmin_templates.tmpl_display_submissiondetails_form(doctype=doctype,
action=action,
displayed=displayed,
buttonorder=buttonorder,
statustext=statustext,
level=level,
score=score,
stpage=stpage,
endtxt=endtxt,
cd=cd,
md=md,
user_msg=user_msg
)
else:
if numrows_submission_details > 1:
## multiple rows for this submission - this is a key violation
user_msg.append("Found multiple rows for the Submission '%s' of the Document Type '%s'" \
% (action, doctype))
else:
## submission does not exist
user_msg.append("The Submission '%s' of the Document Type '%s' doesn't seem to exist." \
% (action, doctype))
## TODO : LOG ERROR
(title, body) = _create_configure_doctype_form(doctype=doctype, user_msg=user_msg)
return (title, body)
def _create_edit_category_form(doctype, categid):
title = "Edit Category Description"
categ_details = get_all_categories_sname_lname_for_doctype_categsname(doctype=doctype, categsname=categid)
if len(categ_details) == 1:
## disaply details
retrieved_categid=categ_details[0][0]
retrieved_categdescr=categ_details[0][1]
body = websubmitadmin_templates.tmpl_display_edit_category_form(doctype=doctype,
categid=retrieved_categid,
categdescr=retrieved_categdescr
)
else:
## problem retrieving details of categ
user_msg = """Unable to retrieve details of category '%s'""" % (categid,)
## TODO : LOG ERRORS
(title, body) = _create_configure_doctype_form(doctype=doctype, user_msg=user_msg)
return (title, body)
def _create_configure_doctype_form(doctype, jumpcategout="", user_msg=""):
title = "Configure Document Type"
body = ""
if user_msg == "" or type(user_msg) not in (list, tuple, str, unicode):
user_msg = []
## get details of doctype:
doctype_details = get_doctype_docname_descr_cd_md_fordoctype(doctype)
docname = doctype_details[0][1]
docdescr = doctype_details[0][2]
(cd, md) = (doctype_details[0][3], doctype_details[0][4])
## get categories for doctype:
doctype_categs = get_all_category_details_for_doctype(doctype=doctype)
## get submissions for doctype:
doctype_submissions = get_submissiondetails_all_submissions_doctype(doctype=doctype)
## get list of actions that this doctype doesn't have:
unlinked_actions = get_actions_sname_lname_not_linked_to_doctype(doctype=doctype)
## get referees for doctype:
referees_dets = _create_list_referees_doctype(doctype=doctype)
body = websubmitadmin_templates.tmpl_configure_doctype_overview(doctype=doctype, doctypename=docname,
doctypedescr=docdescr, doctype_cdate=cd,
doctype_mdate=md, doctype_categories=doctype_categs,
jumpcategout=jumpcategout,
doctype_submissions=doctype_submissions,
doctype_referees=referees_dets,
add_actions_list=unlinked_actions,
user_msg=user_msg)
return (title, body)
def _clone_submission_fromdoctype_todoctype(user_msg, todoctype, action, clonefrom):
## first, delete the submission from todoctype (if it exists):
error_code = delete_submissiondetails_doctype(doctype=todoctype, action=action)
if error_code == 0:
## could be deleted - now clone it
error_code = insert_submission_details_clonefrom_submission(addtodoctype=todoctype, action=action, clonefromdoctype=clonefrom)
if error_code == 0:
## submission inserted
## now clone functions:
error_code = _clone_functions_foraction_doctype(user_msg=user_msg, \
fromdoctype=clonefrom, todoctype=todoctype, action=action)
if error_code in (0, 2):
## no serious error - clone parameters:
error_code = _clone_functionparameters_foraction_fromdoctype_todoctype(user_msg=user_msg,
fromdoctype=clonefrom,
todoctype=todoctype,
action=action)
## now clone pages/elements
error_code = clone_submissionfields_from_doctypesubmission_to_doctypesubmission(fromsub="%s%s" % (action, clonefrom),
tosub="%s%s" % (action, todoctype))
if error_code == 1:
## could not delete all existing submission fields and therefore could no clone submission fields at all
## TODO : LOG ERROR
user_msg.append("""Unable to delete existing submission fields for Submission "%s" of Document Type "%s" - """ \
"""cannot clone submission fields!""" % (action, todoctype))
elif error_code == 2:
## could not clone all fields
## TODO : LOG ERROR
user_msg.append("""Unable to clone all submission fields for submission "%s" on Document Type "%s" from Document""" \
""" Type "%s" """ % (action, todoctype, clonefrom))
else:
## could not insert submission details!
user_msg.append("""Unable to successfully insert details of submission "%s" into Document Type "%s" - cannot clone from "%s" """ \
% (action, todoctype, clonefrom))
## TODO : LOG ERROR
else:
## could not delete details of existing submission (action) from 'todoctype' - cannot clone it as new
user_msg.append("""Unable to delete details of existing Submission "%s" from Document Type "%s" - cannot clone it from "%s" """ \
% (action, todoctype, clonefrom))
## TODO : LOG ERROR
def _add_submission_to_doctype_clone(doctype, action, clonefrom):
user_msg = []
if action in ("", None) or clonefrom in ("", None):
user_msg.append("Unknown action or document type to clone from - cannot add submission")
(title, body) = _create_configure_doctype_form(doctype=doctype, user_msg=user_msg)
return (title, body)
## does action exist?
numrows_action = get_number_actions_with_actid(actid=action)
if numrows_action > 0:
## The action exists, but is it already implemented as a submission by doctype?
numrows_submission_doctype = get_number_submissions_doctype_action(doctype=doctype, action=action)
if numrows_submission_doctype > 0:
## this submission already exists for this document type - unable to add it again
user_msg.append("""The Submission "%s" already exists for Document Type "%s" - cannot add it again""" \
%(action, doctype))
## TODO : LOG ERROR
else:
## clone the submission
_clone_submission_fromdoctype_todoctype(user_msg=user_msg,
todoctype=doctype, action=action, clonefrom=clonefrom)
user_msg.append("""Cloning of Submission "%s" from Document Type "%s" has been carried out. You should not""" \
""" ignore any warnings that you may have seen.""" % (action, clonefrom))
## TODO : LOG WARNING OF NEW SUBMISSION CREATION BY CLONING
else:
## this action doesn't exist! cannot add a submission based upon it!
user_msg.append("The Action '%s' does not seem to exist in WebSubmit. Cannot add it as a Submission!" \
% (action))
## TODO : LOG ERROR
(title, body) = _create_configure_doctype_form(doctype=doctype, user_msg=user_msg)
return (title, body)
def _add_submission_to_doctype(doctype, action, displayed, buttonorder,
statustext, level, score, stpage, endtxt):
user_msg = []
## does "action" exist?
numrows_action = get_number_actions_with_actid(actid=action)
if numrows_action < 1:
## this action does not exist! Can't add a submission based upon it!
user_msg.append("'%s' does not exist in WebSubmit as an Action! Unable to add this submission."\
% (action,))
## TODO : LOG ERROR
(title, body) = _create_configure_doctype_form(doctype=doctype, user_msg=user_msg)
return (title, body)
## Insert the new submission
error_code = insert_submission_details(doctype=doctype, action=action, displayed=displayed,
nbpg="0", buttonorder=buttonorder, statustext=statustext,
level=level, score=score, stpage=stpage, endtext=endtxt)
if error_code == 0:
## successful insert
user_msg.append("""'%s' Submission Successfully Added to Document Type '%s'""" % (action, doctype))
else:
## could not insert submission into doctype
user_msg.append("""Unable to Add '%s' Submission to '%s' Document Type""" % (action, doctype))
## TODO : LOG ERROR
(title, body) = _create_configure_doctype_form(doctype=doctype, user_msg=user_msg)
return (title, body)
def _delete_submission_from_doctype(doctype, action):
"""Delete a submission (action) from the document type identified by "doctype".
@param doctype: the unique ID of the document type from which the submission is to be deleted
@param categid: the action ID of the submission to be deleted from doctype
@return: a tuple containing 2 strings: (page title, page body)
"""
user_msg = []
if action in ("", None):
user_msg.append("Unknown action - cannot delete submission")
(title, body) = _create_configure_doctype_form(doctype=doctype, user_msg=user_msg)
return (title, body)
## delete fields for this submission:
error_code = delete_all_submissionfields_submission("""%s%s""" % (action, doctype) )
if error_code != 0:
## could not successfully delete all fields - report error
user_msg.append("""When deleting Submission "%s" from Document Type "%s", it wasn't possible to delete all Submission Fields""" \
% (action, doctype))
## TODO : LOG ERROR
## delete parameters for this submission:
error_code = delete_functionparameters_doctype_submission(doctype=doctype, action=action)
if error_code != 0:
## could not successfully delete all functions - report error
user_msg.append("""When deleting Submission "%s" from Document Type "%s", it wasn't possible to delete all Function Parameters""" \
% (action, doctype))
## TODO : LOG ERROR
## delete functions for this submission:
error_code = delete_all_functions_foraction_doctype(doctype=doctype, action=action)
if error_code != 0:
## could not successfully delete all functions - report error
user_msg.append("""When deleting Submission "%s" from Document Type "%s", it wasn't possible to delete all Functions""" \
% (action, doctype))
## TODO : LOG ERROR
## delete this submission itself:
error_code = delete_submissiondetails_doctype(doctype=doctype, action=action)
if error_code == 0:
## successful delete
user_msg.append("""The "%s" Submission has been deleted from the "%s" Document Type""" % (action, doctype))
else:
## could not delete category
user_msg.append("""Unable to successfully delete the "%s" Submission from the "%s" Document Type""" % (action, doctype))
## TODO : LOG ERROR
(title, body) = _create_configure_doctype_form(doctype=doctype, user_msg=user_msg)
return (title, body)
def _edit_submission_for_doctype(doctype, action, displayed, buttonorder,
statustext, level, score, stpage, endtxt):
"""Update the details of a given submission belonging to the document type identified by "doctype".
@param doctype: the unique ID of the document type for which the submission is to be updated
@param action: action name of the submission to be updated
@param displayed: displayed on main submission page? (Y/N)
@param buttonorder: button order
@param statustext: statustext
@param level: level
@param score: score
@param stpage: stpage
@param endtxt: endtxt
@return: a tuple of 2 strings: (page title, page body)
"""
user_msg = []
commit_error = 0
if action in ("", None):
user_msg.append("Unknown Action - cannot update submission")
(title, body) = _create_configure_doctype_form(doctype=doctype, user_msg=user_msg)
return (title, body)
error_code = update_submissiondetails_doctype_action(doctype=doctype, action=action, displayed=displayed,
buttonorder=buttonorder, statustext=statustext, level=level,
score=score, stpage=stpage, endtxt=endtxt)
if error_code == 0:
## successful update
user_msg.append("'%s' Submission of '%s' Document Type updated." % (action, doctype) )
else:
## could not update
user_msg.append("Unable to update '%s' Submission of '%s' Document Type." % (action, doctype) )
## TODO : LOG ERROR
(title, body) = _create_configure_doctype_form(doctype=doctype, user_msg=user_msg)
return (title, body)
def _edit_doctype_details(doctype, doctypename, doctypedescr):
"""Update the details (name and/or description) of a document type (identified by doctype.)
@param doctype: the unique ID of the document type to be updated
@param doctypename: the new/updated name for the doctype
@param doctypedescr: the new/updated description for the doctype
@return: a tuple containing 2 strings: (page title, page body)
"""
user_msg = []
error_code = update_doctype_details(doctype=doctype, doctypename=doctypename, doctypedescr=doctypedescr)
if error_code == 0:
## successful update
user_msg.append("""'%s' Document Type Updated""" % (doctype,))
else:
## could not update
user_msg.append("""Unable to Update Doctype '%s'""" % (doctype,))
## TODO : LOG ERROR
(title, body) = _create_configure_doctype_form(doctype=doctype, user_msg=user_msg)
return (title, body)
def _edit_category_for_doctype(doctype, categid, categdescr):
"""Edit the description of a given category (identified by categid), belonging to
the document type identified by doctype.
@param doctype: the unique ID of the document type for which the category is to be modified
@param categid: the unique category ID of the category to be modified
@param categdescr: the new description for the category
@return: at tuple containing 2 strings: (page title, page body)
"""
user_msg = []
if categid in ("", None) or categdescr in ("", None):
## cannot edit unknown category!
user_msg.append("Category ID and Description are both mandatory")
(title, body) = _create_configure_doctype_form(doctype=doctype, user_msg=user_msg)
return (title, body)
error_code = update_category_description_doctype_categ(doctype=doctype, categ=categid, categdescr=categdescr)
if error_code == 0:
## successful update
user_msg.append("""'%s' Category Description Successfully Updated""" % (categid,))
else:
## could not update category description
user_msg.append("""Unable to Description for Category '%s'""" % (categid,))
## TODO : LOG ERROR
(title, body) = _create_configure_doctype_form(doctype=doctype, user_msg=user_msg)
return (title, body)
def _add_category_to_doctype(doctype, categid, categdescr):
"""Add a new category to the document type identified by "doctype".
Category ID, and category description are both mandatory.
@param doctype: the unique ID of the document type to which the category is to be added
@param categid: the unique category ID of the category to be added to doctype
@param categdescr: the description of the category to be added
@return: at tuple containing 2 strings: (page title, page body)
"""
user_msg = []
if categid in ("", None) or categdescr in ("", None):
## cannot add unknown category!
user_msg.append("Category ID and Description are both mandatory")
(title, body) = _create_configure_doctype_form(doctype=doctype, user_msg=user_msg)
return (title, body)
error_code = insert_category_into_doctype(doctype=doctype, categ=categid, categdescr=categdescr)
if error_code == 0:
## successful insert
user_msg.append("""'%s' Category Successfully Added""" % (categid,))
else:
## could not insert category into doctype
user_msg.append("""Unable to Add '%s' Category""" % (categid,))
## TODO : LOG ERROR
(title, body) = _create_configure_doctype_form(doctype=doctype, user_msg=user_msg)
return (title, body)
def _delete_category_from_doctype(doctype, categid):
"""Delete a category (categid) from the document type identified by "doctype".
@param doctype: the unique ID of the document type from which the category is to be deleted
@param categid: the unique category ID of the category to be deleted from doctype
@return: a tuple containing 2 strings: (page title, page body)
"""
user_msg = []
if categid in ("", None):
## cannot delete unknown category!
user_msg.append("Category ID is mandatory")
(title, body) = _create_configure_doctype_form(doctype=doctype, user_msg=user_msg)
return (title, body)
error_code = delete_category_doctype(doctype=doctype, categ=categid)
if error_code == 0:
## successful delete
user_msg.append("""'%s' Category Successfully Deleted""" % (categid,))
else:
## could not delete category
user_msg.append("""Unable to Delete '%s' Category""" % (categid,))
## TODO : LOG ERROR
(title, body) = _create_configure_doctype_form(doctype=doctype, user_msg=user_msg)
return (title, body)
def _jump_category_to_new_score(doctype, jumpcategout, jumpcategin):
user_msg = []
if jumpcategout in ("", None) or jumpcategin in ("", None):
## need both jumpcategout and jumpcategin to move a category:
user_msg.append("Unable to move category - unknown source and/or destination score(s)")
(title, body) = _create_configure_doctype_form(doctype=doctype, user_msg=user_msg)
return (title, body)
## FIXME TODO:
error_code = move_category_to_new_score(doctype, jumpcategout, jumpcategin)
if error_code == 0:
## successful jump of category
user_msg.append("""Successfully Moved [%s] Category""" % (jumpcategout,))
else:
## could not delete category
user_msg.append("""Unable to Move [%s] Category""" % (jumpcategout,))
## TODO : LOG ERROR
(title, body) = _create_configure_doctype_form(doctype=doctype, user_msg=user_msg)
return (title, body)
def _move_category(doctype, categid, movecategup=""):
user_msg = []
if categid in ("", None):
## cannot move unknown category!
user_msg.append("Cannot move an unknown category - category ID is mandatory")
(title, body) = _create_configure_doctype_form(doctype=doctype, user_msg=user_msg)
return (title, body)
if movecategup not in ("", None):
## move the category up in score:
error_code = move_category_by_one_place_in_score(doctype=doctype,
categsname=categid,
direction="up")
else:
## move the category down in score:
error_code = move_category_by_one_place_in_score(doctype=doctype,
categsname=categid,
direction="down")
if error_code == 0:
## successful move of category
user_msg.append("""[%s] Category Successfully Moved""" % (categid,))
else:
## could not delete category
user_msg.append("""Unable to Move [%s] Category""" % (categid,))
## TODO : LOG ERROR
(title, body) = _create_configure_doctype_form(doctype=doctype, user_msg=user_msg)
return (title, body)
def perform_request_configure_doctype(doctype,
doctypename=None,
doctypedescr=None,
doctypedetailsedit="",
doctypedetailscommit="",
doctypecategoryadd="",
doctypecategoryedit="",
doctypecategoryeditcommit="",
doctypecategorydelete="",
doctypesubmissionadd="",
doctypesubmissiondelete="",
doctypesubmissiondeleteconfirm="",
doctypesubmissionedit="",
doctypesubmissionaddclonechosen="",
doctypesubmissionadddetailscommit="",
doctypesubmissioneditdetailscommit="",
categid=None,
categdescr=None,
movecategup=None,
movecategdown=None,
jumpcategout=None,
jumpcategin=None,
action=None,
doctype_cloneactionfrom=None,
displayed=None,
buttonorder=None,
statustext=None,
level=None,
score=None,
stpage=None,
endtxt=None
):
user_msg = []
body = ""
if doctype is not None:
try:
doctype = wash_single_urlarg(urlarg=doctype, argreqdtype=str, argdefault="", maxstrlen=10, minstrlen=1)
if string_is_alphanumeric_including_underscore(txtstring=doctype) == 0:
doctype = ""
except ValueError, e:
doctype = ""
else:
doctype = ""
if action is not None:
try:
action = wash_single_urlarg(urlarg=action, argreqdtype=str, argdefault="", maxstrlen=3, minstrlen=1)
if string_is_alphanumeric_including_underscore(txtstring=action) == 0:
action = ""
except ValueError, e:
action = ""
else:
action = ""
if doctypename is not None:
try:
doctypename = wash_single_urlarg(urlarg=doctypename, argreqdtype=str, argdefault="")
except ValueError, e:
doctypename = ""
else:
doctypename = ""
if doctypedescr is not None:
try:
doctypedescr = wash_single_urlarg(urlarg=doctypedescr, argreqdtype=str, argdefault="")
except ValueError, e:
doctypedescr = ""
else:
doctypedescr = ""
if categid is not None:
try:
categid = wash_single_urlarg(urlarg=categid, argreqdtype=str, argdefault="")
except ValueError, e:
categid = ""
else:
categid = ""
if categdescr is not None:
try:
categdescr = wash_single_urlarg(urlarg=categdescr, argreqdtype=str, argdefault="")
except ValueError, e:
categdescr = ""
else:
categdescr = ""
if doctype_cloneactionfrom is not None:
try:
doctype_cloneactionfrom = wash_single_urlarg(urlarg=doctype_cloneactionfrom, argreqdtype=str, argdefault="", maxstrlen=10, minstrlen=1)
if string_is_alphanumeric_including_underscore(txtstring=doctype_cloneactionfrom) == 0:
doctype_cloneactionfrom = ""
except ValueError, e:
doctype_cloneactionfrom = ""
else:
doctype_cloneactionfrom = ""
if displayed is not None:
try:
displayed = wash_single_urlarg(urlarg=displayed, argreqdtype=str, argdefault="Y", maxstrlen=1, minstrlen=1)
except ValueError, e:
displayed = "Y"
else:
displayed = "Y"
if buttonorder is not None:
try:
buttonorder = wash_single_urlarg(urlarg=buttonorder, argreqdtype=int, argdefault="")
except ValueError, e:
buttonorder = ""
else:
buttonorder = ""
if level is not None:
try:
level = wash_single_urlarg(urlarg=level, argreqdtype=str, argdefault="", maxstrlen=1, minstrlen=1)
except ValueError, e:
level = ""
else:
level = ""
if score is not None:
try:
score = wash_single_urlarg(urlarg=score, argreqdtype=int, argdefault="")
except ValueError, e:
score = ""
else:
score = ""
if stpage is not None:
try:
stpage = wash_single_urlarg(urlarg=stpage, argreqdtype=int, argdefault="")
except ValueError, e:
stpage = ""
else:
stpage = ""
if statustext is not None:
try:
statustext = wash_single_urlarg(urlarg=statustext, argreqdtype=str, argdefault="")
except ValueError, e:
statustext = ""
else:
statustext = ""
if endtxt is not None:
try:
endtxt = wash_single_urlarg(urlarg=endtxt, argreqdtype=str, argdefault="")
except ValueError, e:
endtxt = ""
else:
endtxt = ""
## ensure that there is only one doctype for this doctype ID - simply display all doctypes with warning if not
numrows_doctype = get_number_doctypes_docid(docid=doctype)
if numrows_doctype > 1:
## there are multiple doctypes with this doctype ID:
## TODO : LOG ERROR
user_msg.append("""Multiple document types identified by "%s" exist - cannot configure at this time.""" \
% (doctype,))
all_doctypes = get_docid_docname_alldoctypes()
body = websubmitadmin_templates.tmpl_display_alldoctypes(doctypes=all_doctypes, user_msg=user_msg)
title = "Available WebSubmit Document Types"
return (title, body)
elif numrows_doctype == 0:
## this doctype does not seem to exist:
user_msg.append("""The document type identified by "%s" doesn't exist - cannot configure at this time.""" \
% (doctype,))
## TODO : LOG ERROR
all_doctypes = get_docid_docname_alldoctypes()
body = websubmitadmin_templates.tmpl_display_alldoctypes(doctypes=all_doctypes, user_msg=user_msg)
title = "Available WebSubmit Document Types"
return (title, body)
## since doctype ID is OK, process doctype configuration request:
if doctypedetailsedit not in ("", None):
(title, body) = _create_edit_doctype_details_form(doctype=doctype)
elif doctypedetailscommit not in ("", None):
## commit updated document type details
(title, body) = _edit_doctype_details(doctype=doctype,
doctypename=doctypename, doctypedescr=doctypedescr)
elif doctypecategoryadd not in ("", None):
## add new category:
(title, body) = _add_category_to_doctype(doctype=doctype, categid=categid, categdescr=categdescr)
elif doctypecategoryedit not in ("", None):
## create form to update category description:
(title, body) = _create_edit_category_form(doctype=doctype,
categid=categid)
elif doctypecategoryeditcommit not in ("", None):
## commit updated category description:
(title, body) = _edit_category_for_doctype(doctype=doctype, categid=categid, categdescr=categdescr)
elif doctypecategorydelete not in ("", None):
## delete a category
(title, body) = _delete_category_from_doctype(doctype=doctype, categid=categid)
elif movecategup not in ("", None) or movecategdown not in ("", None):
## move a category up or down in score:
(title, body) = _move_category(doctype=doctype, categid=categid,
movecategup=movecategup)
elif jumpcategout not in ("", None) and jumpcategin not in ("", None):
## jump a category from one score to another:
(title, body) = _jump_category_to_new_score(doctype=doctype, jumpcategout=jumpcategout,
jumpcategin=jumpcategin)
elif doctypesubmissionadd not in ("", None):
## form displaying option of adding doctype:
(title, body) = _create_add_submission_choose_clonefrom_form(doctype=doctype, action=action)
elif doctypesubmissionaddclonechosen not in ("", None):
## add a submission. if there is a document type to be cloned from, then process clone;
## otherwise, present form with details of doctype
if doctype_cloneactionfrom in ("", None, "None"):
## no clone - present form into which details of new submission should be entered
(title, body) = _create_add_submission_form(doctype=doctype, action=action)
else:
## new submission should be cloned from doctype_cloneactionfrom
(title, body) = _add_submission_to_doctype_clone(doctype=doctype, action=action, clonefrom=doctype_cloneactionfrom)
elif doctypesubmissiondelete not in ("", None):
## create form to prompt for confirmation of deletion of a submission:
(title, body) = _create_delete_submission_form(doctype=doctype, action=action)
elif doctypesubmissiondeleteconfirm not in ("", None):
## process the deletion of a submission from the doctype concerned:
(title, body) = _delete_submission_from_doctype(doctype=doctype, action=action)
elif doctypesubmissionedit not in ("", None):
## create form to update details of a submission
(title, body) = _create_edit_submission_form(doctype=doctype, action=action)
elif doctypesubmissioneditdetailscommit not in ("", None):
## commit updated submission details:
(title, body) = _edit_submission_for_doctype(doctype=doctype, action=action,
displayed=displayed, buttonorder=buttonorder, statustext=statustext,
level=level, score=score, stpage=stpage, endtxt=endtxt)
elif doctypesubmissionadddetailscommit not in ("", None):
## commit new submission to doctype (not by cloning)
(title, body) = _add_submission_to_doctype(doctype=doctype, action=action,
displayed=displayed, buttonorder=buttonorder, statustext=statustext,
level=level, score=score, stpage=stpage, endtxt=endtxt)
else:
## default - display root of edit doctype
(title, body) = _create_configure_doctype_form(doctype=doctype, jumpcategout=jumpcategout)
return (title, body)
def _create_configure_doctype_submission_functions_form(doctype,
action,
movefromfunctionname="",
movefromfunctionstep="",
movefromfunctionscore="",
user_msg=""):
title = """Functions of the "%s" Submission of the "%s" Document Type:""" % (action, doctype)
submission_functions = get_functionname_step_score_allfunctions_doctypesubmission(doctype=doctype, action=action)
body = websubmitadmin_templates.tmpl_configuredoctype_display_submissionfunctions(doctype=doctype,
action=action,
movefromfunctionname=movefromfunctionname,
movefromfunctionstep=movefromfunctionstep,
movefromfunctionscore=movefromfunctionscore,
submissionfunctions=submission_functions,
user_msg=user_msg)
return (title, body)
def _create_configure_doctype_submission_functions_add_function_form(doctype, action, addfunctionname="",
addfunctionstep="", addfunctionscore="", user_msg=""):
"""Create a form that allows a user to add a function a submission.
@param doctype: (string) the unique ID of a document type
@param action: (string) the unique ID of an action
@param addfunctionname: (string) the name of the function to be added to the submission (passed in case of page refresh)
@param addfunctionstep: (integer) the step of the submission into which the function is to be added (passed in case of
page refresh)
@param addfunctionscore: (integer) the score at at which the function is to be added (passed in case of page refresh)
@param user_msg: (string or list of strings) any message(s) to be displayed to the user
@return: (tuple) containing 2 strings - (page-title, HTML page-body)
"""
title = """Add a function to the [%s] submission of the [%s] document type""" % (action, doctype)
submission_functions = get_functionname_step_score_allfunctions_doctypesubmission(doctype=doctype, action=action)
## get names of all WebSubmit functions:
all_websubmit_functions = get_names_of_all_functions()
## put names into a list of single-element tuples, so that template can make HTML select list with them:
all_websubmit_functions = map(lambda x: (str(x),), all_websubmit_functions)
## create page body:
body = websubmitadmin_templates.tmpl_configuredoctype_add_submissionfunction(doctype=doctype,
action=action,
cursubmissionfunctions=submission_functions,
allWSfunctions=all_websubmit_functions,
addfunctionname=addfunctionname,
addfunctionstep=addfunctionstep,
addfunctionscore=addfunctionscore,
user_msg=user_msg)
return (title, body)
def _create_configure_doctype_submission_functions_list_parameters_form(doctype,
action,
functionname,
user_msg=""):
title = """Parameters of the %s function, as used in the %s document type"""\
% (functionname, doctype)
funcparams = get_function_parameters(function=functionname)
if len(funcparams) > 0:
## get the values
paramslist = map(lambda x: str(x[0]), funcparams)
params = get_function_parameter_vals_doctype(doctype=doctype, paramlist=paramslist)
else:
params = ()
## params = get_parameters_name_and_value_for_function_of_doctype(doctype=doctype, function=functionname)
body = websubmitadmin_templates.tmpl_configuredoctype_list_functionparameters(doctype=doctype,
action=action,
function=functionname,
params=params,
user_msg=user_msg)
return (title, body)
def _update_submission_function_parameter_file(doctype, action, functionname,
paramname, paramfilecontent):
user_msg = []
## get the filename:
paramval_res = get_value_of_parameter_for_doctype(doctype=doctype, parameter=paramname)
if paramval_res is None:
## this parameter doesn't exist for this doctype!
user_msg.append("The parameter [%s] doesn't exist for the document type [%s]!" % (paramname, doctype))
(title, body) = _create_configure_doctype_submission_functions_list_parameters_form(doctype=doctype,
action=action,
functionname=functionname,
user_msg=user_msg)
return (title, body)
paramval = str(paramval_res)
filename = basename(paramval)
if filename == "":
## invalid filename
user_msg.append("[%s] is an invalid filename - cannot save details" % (paramval,))
(title, body) = _create_configure_doctype_submission_functions_list_parameters_form(doctype=doctype,
action=action,
functionname=functionname,
user_msg=user_msg)
return (title, body)
## save file:
try:
save_update_to_file(filepath="%s/%s" % (CFG_WEBSUBMIT_BIBCONVERTCONFIGDIR, filename), filecontent=paramfilecontent)
except InvenioWebSubmitAdminWarningIOError, e:
## could not correctly update the file!
user_msg.append(str(e))
(title, body) = _create_configure_doctype_submission_functions_list_parameters_form(doctype=doctype,
action=action,
functionname=functionname,
user_msg=user_msg)
return (title, body)
## redisplay form
user_msg.append("""[%s] file updated""" % (filename,))
(title, body) = _create_configure_doctype_submission_functions_edit_parameter_file_form(doctype=doctype,
action=action,
functionname=functionname,
paramname=paramname,
user_msg=user_msg)
return (title, body)
def _create_configure_doctype_submission_functions_edit_parameter_file_form(doctype,
action,
functionname,
paramname,
user_msg=""):
if type(user_msg) is not list:
user_msg = []
paramval_res = get_value_of_parameter_for_doctype(doctype=doctype, parameter=paramname)
if paramval_res is None:
## this parameter doesn't exist for this doctype!
user_msg.append("The parameter [%s] doesn't exist for the document type [%s]!" % (paramname, doctype))
(title, body) = _create_configure_doctype_submission_functions_list_parameters_form(doctype=doctype,
action=action,
functionname=functionname)
return (title, body)
paramval = str(paramval_res)
title = "Edit the [%s] file for the [%s] document type" % (paramval, doctype)
## get basename of file:
filecontent = ""
filename = basename(paramval)
if filename == "":
## invalid filename
user_msg.append("[%s] is an invalid filename" % (paramval,))
(title, body) = _create_configure_doctype_submission_functions_list_parameters_form(doctype=doctype,
action=action,
functionname=functionname,
user_msg=user_msg)
return (title, body)
## try to read file contents:
if access("%s/%s" % (CFG_WEBSUBMIT_BIBCONVERTCONFIGDIR, filename), F_OK):
## file exists
if access("%s/%s" % (CFG_WEBSUBMIT_BIBCONVERTCONFIGDIR, filename), R_OK) and \
isfile("%s/%s" % (CFG_WEBSUBMIT_BIBCONVERTCONFIGDIR, filename)):
## file is a regular file and is readable - get contents
filecontent = open("%s/%s" % (CFG_WEBSUBMIT_BIBCONVERTCONFIGDIR, filename), "r").read()
else:
if not isfile("%s/%s" % (CFG_WEBSUBMIT_BIBCONVERTCONFIGDIR, filename)):
## file is not a regular file
user_msg.append("The parameter file [%s] is not regular file - unable to read" % (filename,))
else:
## file is not readable - error message
user_msg.append("The parameter file [%s] could not be read - check permissions" % (filename,))
## display page listing the parameters of this function:
(title, body) = _create_configure_doctype_submission_functions_list_parameters_form(doctype=doctype,
action=action,
functionname=functionname,
user_msg=user_msg)
return (title, body)
else:
## file does not exist:
user_msg.append("The parameter file [%s] does not exist - it will be created" % (filename,))
## make page body:
body = websubmitadmin_templates.tmpl_configuredoctype_edit_functionparameter_file(doctype=doctype,
action=action,
function=functionname,
paramname=paramname,
paramfilename=filename,
paramfilecontent=filecontent,
user_msg=user_msg)
return (title, body)
def _create_configure_doctype_submission_functions_edit_parameter_value_form(doctype,
action,
functionname,
paramname,
paramval="",
user_msg=""):
title = """Edit the value of the [%s] Parameter""" % (paramname,)
## get the parameter's value from the DB:
paramval_res = get_value_of_parameter_for_doctype(doctype=doctype, parameter=paramname)
if paramval_res is None:
## this parameter doesn't exist for this doctype!
(title, body) = _create_configure_doctype_submission_functions_list_parameters_form(doctype=doctype,
action=action,
functionname=functionname)
if paramval == "":
## use whatever retrieved paramval_res contains:
paramval = str(paramval_res)
body = websubmitadmin_templates.tmpl_configuredoctype_edit_functionparameter_value(doctype=doctype,
action=action,
function=functionname,
paramname=paramname,
paramval=paramval)
return (title, body)
def _update_submissionfunction_parameter_value(doctype, action, functionname, paramname, paramval):
user_msg = []
try:
update_value_of_function_parameter_for_doctype(doctype=doctype, paramname=paramname, paramval=paramval)
user_msg.append("""The value of the parameter [%s] was updated for document type [%s]""" % (paramname, doctype))
except InvenioWebSubmitAdminWarningTooManyRows, e:
## multiple rows found for param - update not carried out
user_msg.append(str(e))
except InvenioWebSubmitAdminWarningNoRowsFound, e:
## no rows found - parameter does not exist for doctype, therefore no update
user_msg.append(str(e))
(title, body) = \
_create_configure_doctype_submission_functions_list_parameters_form(doctype=doctype, action=action,
functionname=functionname, user_msg=user_msg)
return (title, body)
def perform_request_configure_doctype_submissionfunctions_parameters(doctype,
action,
functionname,
functionstep,
functionscore,
paramname="",
paramval="",
editfunctionparametervalue="",
editfunctionparametervaluecommit="",
editfunctionparameterfile="",
editfunctionparameterfilecommit="",
paramfilename="",
paramfilecontent=""):
body = ""
user_msg = []
## ensure that there is only one doctype for this doctype ID - simply display all doctypes with warning if not
if doctype in ("", None):
user_msg.append("""Unknown Document Type""")
## TODO : LOG ERROR
all_doctypes = get_docid_docname_alldoctypes()
body = websubmitadmin_templates.tmpl_display_alldoctypes(doctypes=all_doctypes, user_msg=user_msg)
title = "Available WebSubmit Document Types"
return (title, body)
numrows_doctype = get_number_doctypes_docid(docid=doctype)
if numrows_doctype > 1:
## there are multiple doctypes with this doctype ID:
## TODO : LOG ERROR
user_msg.append("""Multiple document types identified by "%s" exist - cannot configure at this time.""" \
% (doctype,))
all_doctypes = get_docid_docname_alldoctypes()
body = websubmitadmin_templates.tmpl_display_alldoctypes(doctypes=all_doctypes, user_msg=user_msg)
title = "Available WebSubmit Document Types"
return (title, body)
elif numrows_doctype == 0:
## this doctype does not seem to exist:
user_msg.append("""The document type identified by "%s" doesn't exist - cannot configure at this time.""" \
% (doctype,))
## TODO : LOG ERROR
all_doctypes = get_docid_docname_alldoctypes()
body = websubmitadmin_templates.tmpl_display_alldoctypes(doctypes=all_doctypes, user_msg=user_msg)
title = "Available WebSubmit Document Types"
return (title, body)
## ensure that this submission exists for this doctype:
numrows_submission = get_number_submissions_doctype_action(doctype=doctype, action=action)
if numrows_submission > 1:
## there are multiple submissions for this doctype/action ID:
## TODO : LOG ERROR
user_msg.append("""The Submission "%s" seems to exist multiple times for the Document Type "%s" - cannot configure at this time.""" \
% (action, doctype))
(title, body) = _create_configure_doctype_form(doctype=doctype, user_msg=user_msg)
return (title, body)
elif numrows_submission == 0:
## this submission does not seem to exist for this doctype:
user_msg.append("""The Submission "%s" doesn't exist for the "%s" Document Type - cannot configure at this time.""" \
% (action, doctype))
## TODO : LOG ERROR
(title, body) = _create_configure_doctype_form(doctype=doctype, user_msg=user_msg)
return (title, body)
if editfunctionparametervaluecommit not in ("", None):
## commit an update to a function parameter:
(title, body) = _update_submissionfunction_parameter_value(doctype=doctype, action=action, functionname=functionname,
paramname=paramname, paramval=paramval)
elif editfunctionparametervalue not in ("", None):
## display a form for editing the value of a parameter:
(title, body) = _create_configure_doctype_submission_functions_edit_parameter_value_form(doctype=doctype,
action=action,
functionname=functionname,
paramname=paramname,
paramval=paramval)
elif editfunctionparameterfile not in ("", None):
## display a form for editing the contents of a file, named by the parameter's value:
(title, body) = _create_configure_doctype_submission_functions_edit_parameter_file_form(doctype=doctype,
action=action,
functionname=functionname,
paramname=paramname)
elif editfunctionparameterfilecommit not in ("", None):
(title, body) = _update_submission_function_parameter_file(doctype=doctype, action=action, functionname=functionname,
paramname=paramname, paramfilecontent=paramfilecontent)
else:
## default - display list of parameters for function:
(title, body) = _create_configure_doctype_submission_functions_list_parameters_form(doctype=doctype,
action=action,
functionname=functionname)
return (title, body)
def perform_request_configure_doctype_submissionfunctions(doctype,
action,
moveupfunctionname="",
moveupfunctionstep="",
moveupfunctionscore="",
movedownfunctionname="",
movedownfunctionstep="",
movedownfunctionscore="",
movefromfunctionname="",
movefromfunctionstep="",
movefromfunctionscore="",
movetofunctionname="",
movetofunctionstep="",
movetofunctionscore="",
deletefunctionname="",
deletefunctionstep="",
deletefunctionscore="",
configuresubmissionaddfunction="",
configuresubmissionaddfunctioncommit="",
addfunctionname="",
addfunctionstep="",
addfunctionscore=""):
body = ""
user_msg = []
if addfunctionstep != "":
try:
addfunctionstep = str(wash_single_urlarg(urlarg=addfunctionstep, argreqdtype=int, argdefault=""))
except ValueError, e:
addfunctionstep = ""
if addfunctionscore != "":
try:
addfunctionscore = str(wash_single_urlarg(urlarg=addfunctionscore, argreqdtype=int, argdefault=""))
except ValueError, e:
addfunctionscore = ""
if deletefunctionstep != "":
try:
deletefunctionstep = str(wash_single_urlarg(urlarg=deletefunctionstep, argreqdtype=int, argdefault=""))
except ValueError, e:
deletefunctionstep = ""
if deletefunctionscore != "":
try:
deletefunctionscore = str(wash_single_urlarg(urlarg=deletefunctionscore, argreqdtype=int, argdefault=""))
except ValueError, e:
deletefunctionscore = ""
if movetofunctionstep != "":
try:
movetofunctionstep = str(wash_single_urlarg(urlarg=movetofunctionstep, argreqdtype=int, argdefault=""))
except ValueError, e:
movetofunctionstep = ""
if movetofunctionscore != "":
try:
movetofunctionscore = str(wash_single_urlarg(urlarg=movetofunctionscore, argreqdtype=int, argdefault=""))
except ValueError, e:
movetofunctionscore = ""
if moveupfunctionstep != "":
try:
moveupfunctionstep = str(wash_single_urlarg(urlarg=moveupfunctionstep, argreqdtype=int, argdefault=""))
except ValueError, e:
moveupfunctionstep = ""
if moveupfunctionscore != "":
try:
moveupfunctionscore = str(wash_single_urlarg(urlarg=moveupfunctionscore, argreqdtype=int, argdefault=""))
except ValueError, e:
moveupfunctionscore = ""
if movedownfunctionstep != "":
try:
movedownfunctionstep = str(wash_single_urlarg(urlarg=movedownfunctionstep, argreqdtype=int, argdefault=""))
except ValueError, e:
movedownfunctionstep = ""
if movedownfunctionscore != "":
try:
movedownfunctionscore = str(wash_single_urlarg(urlarg=movedownfunctionscore, argreqdtype=int, argdefault=""))
except ValueError, e:
movedownfunctionscore = ""
## ensure that there is only one doctype for this doctype ID - simply display all doctypes with warning if not
if doctype in ("", None):
user_msg.append("""Unknown Document Type""")
## TODO : LOG ERROR
all_doctypes = get_docid_docname_alldoctypes()
body = websubmitadmin_templates.tmpl_display_alldoctypes(doctypes=all_doctypes, user_msg=user_msg)
title = "Available WebSubmit Document Types"
return (title, body)
numrows_doctype = get_number_doctypes_docid(docid=doctype)
if numrows_doctype > 1:
## there are multiple doctypes with this doctype ID:
## TODO : LOG ERROR
user_msg.append("""Multiple document types identified by "%s" exist - cannot configure at this time.""" \
% (doctype,))
all_doctypes = get_docid_docname_alldoctypes()
body = websubmitadmin_templates.tmpl_display_alldoctypes(doctypes=all_doctypes, user_msg=user_msg)
title = "Available WebSubmit Document Types"
return (title, body)
elif numrows_doctype == 0:
## this doctype does not seem to exist:
user_msg.append("""The document type identified by "%s" doesn't exist - cannot configure at this time.""" \
% (doctype,))
## TODO : LOG ERROR
all_doctypes = get_docid_docname_alldoctypes()
body = websubmitadmin_templates.tmpl_display_alldoctypes(doctypes=all_doctypes, user_msg=user_msg)
title = "Available WebSubmit Document Types"
return (title, body)
## ensure that this submission exists for this doctype:
numrows_submission = get_number_submissions_doctype_action(doctype=doctype, action=action)
if numrows_submission > 1:
## there are multiple submissions for this doctype/action ID:
## TODO : LOG ERROR
user_msg.append("""The Submission "%s" seems to exist multiple times for the Document Type "%s" - cannot configure at this time.""" \
% (action, doctype))
(title, body) = _create_configure_doctype_form(doctype=doctype, user_msg=user_msg)
return (title, body)
elif numrows_submission == 0:
## this submission does not seem to exist for this doctype:
user_msg.append("""The Submission "%s" doesn't exist for the "%s" Document Type - cannot configure at this time.""" \
% (action, doctype))
## TODO : LOG ERROR
(title, body) = _create_configure_doctype_form(doctype=doctype, user_msg=user_msg)
return (title, body)
## submission valid
if movefromfunctionname != "" and movefromfunctionstep != "" and movefromfunctionscore != "" and \
movetofunctionname != "" and movetofunctionstep != "" and movetofunctionscore != "":
## process moving the function by jumping it to another position
try:
move_submission_function_from_one_position_to_another_position(doctype=doctype, action=action,
movefuncname=movefromfunctionname,
movefuncfromstep=movefromfunctionstep,
movefuncfromscore=movefromfunctionscore,
movefunctostep=movetofunctionstep,
movefunctoscore=movetofunctionscore)
user_msg.append("""The function [%s] at step [%s], score [%s] was successfully moved."""\
% (movefromfunctionname, movefromfunctionstep, movefromfunctionscore))
except Exception, e:
## there was a problem
user_msg.append(str(e))
(title, body) = _create_configure_doctype_submission_functions_form(doctype=doctype,
action=action,
user_msg=user_msg)
elif moveupfunctionname != "" and moveupfunctionstep != "" and moveupfunctionscore != "":
## process moving the function up one position
error_code = move_position_submissionfunction_up(doctype=doctype,
action=action,
function=moveupfunctionname,
funccurstep=moveupfunctionstep,
funccurscore=moveupfunctionscore)
if error_code == 0:
## success
user_msg.append("""The Function "%s" that was located at step %s, score %s, has been moved upwards""" \
% (moveupfunctionname, moveupfunctionstep, moveupfunctionscore))
else:
## could not move it
user_msg.append("""Unable to move the Function "%s" that is located at step %s, score %s""" \
% (moveupfunctionname, moveupfunctionstep, moveupfunctionscore))
(title, body) = _create_configure_doctype_submission_functions_form(doctype=doctype,
action=action,
user_msg=user_msg)
elif movedownfunctionname != "" and movedownfunctionstep != "" and movedownfunctionscore != "":
## process moving the function down one position
error_code = move_position_submissionfunction_down(doctype=doctype,
action=action,
function=movedownfunctionname,
funccurstep=movedownfunctionstep,
funccurscore=movedownfunctionscore)
if error_code == 0:
## success
user_msg.append("""The Function "%s" that was located at step %s, score %s, has been moved downwards""" \
% (movedownfunctionname, movedownfunctionstep, movedownfunctionscore))
else:
## could not move it
user_msg.append("""Unable to move the Function "%s" that is located at step %s, score %s""" \
% (movedownfunctionname, movedownfunctionstep, movedownfunctionscore))
(title, body) = _create_configure_doctype_submission_functions_form(doctype=doctype,
action=action,
user_msg=user_msg)
elif deletefunctionname != "" and deletefunctionstep != "" and deletefunctionscore != "":
## process deletion of function from the given position
(title, body) = _delete_submission_function(doctype=doctype, action=action, deletefunctionname=deletefunctionname,
deletefunctionstep=deletefunctionstep, deletefunctionscore=deletefunctionscore)
elif configuresubmissionaddfunction != "":
## display a form that allows the addition of a new WebSubmit function
(title, body) = _create_configure_doctype_submission_functions_add_function_form(doctype=doctype,
action=action)
elif configuresubmissionaddfunctioncommit != "":
## process the addition of the new WebSubmit function to the submission:
(title, body) = _add_function_to_submission(doctype=doctype, action=action, addfunctionname=addfunctionname,
addfunctionstep=addfunctionstep, addfunctionscore=addfunctionscore)
else:
## default - display functions for this submission
(title, body) = _create_configure_doctype_submission_functions_form(doctype=doctype,
action=action,
movefromfunctionname=movefromfunctionname,
movefromfunctionstep=movefromfunctionstep,
movefromfunctionscore=movefromfunctionscore
)
return (title, body)
def _add_function_to_submission(doctype, action, addfunctionname, addfunctionstep, addfunctionscore):
"""Process the addition of a function to a submission.
The user can decide in which step and at which score to insert the function.
@param doctype: (string) the unique ID of a document type
@param action: (string) the unique ID of an action
@param addfunctionname: (string) the name of the function to be added to the submission
@param addfunctionstep: (integer) the step at which the function is to be added
@param addfunctionscore: (integer) the score at which the function is to be added
@return: a tuple containing 2 strings: (page-title, page-body)
"""
user_msg = []
if addfunctionname == "" or addfunctionstep == "" or addfunctionscore == "":
## invalid details!
user_msg.append("""Invalid function coordinates supplied!""")
(title, body) = _create_configure_doctype_submission_functions_add_function_form(doctype=doctype,
action=action,
user_msg=user_msg)
return (title, body)
try:
if int(addfunctionstep) < 1 or int(addfunctionscore) < 1:
## invalid details!
user_msg.append("""Invalid function step and/or score!""")
(title, body) = _create_configure_doctype_submission_functions_add_function_form(doctype=doctype,
action=action,
user_msg=user_msg)
return (title, body)
except ValueError:
user_msg.append("""Invalid function step and/or score!""")
(title, body) = _create_configure_doctype_submission_functions_add_function_form(doctype=doctype,
action=action,
user_msg=user_msg)
try:
insert_function_into_submission_at_step_and_score_then_regulate_scores_of_functions_in_step(doctype=doctype,
action=action,
function=addfunctionname,
step=addfunctionstep,
score=addfunctionscore)
except InvenioWebSubmitAdminWarningReferentialIntegrityViolation, e:
## Function didn't exist in WebSubmit! Not added to submission.
user_msg.append(str(e))
## TODO : LOG ERROR
(title, body) = _create_configure_doctype_submission_functions_add_function_form(doctype=doctype,
action=action,
addfunctionstep=addfunctionstep,
addfunctionscore=addfunctionscore,
user_msg=user_msg)
return (title, body)
except InvenioWebSubmitAdminWarningInsertFailed, e:
## insert failed - some functions within the step may have been corrupted!
user_msg.append(str(e))
## TODO : LOG ERROR
(title, body) = \
_create_configure_doctype_submission_functions_form(doctype=doctype, action=action, user_msg=user_msg)
return (title, body)
except InvenioWebSubmitAdminWarningDeleteFailed, e:
## when regulating the scores of functions within the step, could not delete some or all of the functions
## within the step that the function was added to. Some functions may have been lost!
user_msg.append(str(e))
## TODO : LOG ERROR
(title, body) = \
_create_configure_doctype_submission_functions_form(doctype=doctype, action=action, user_msg=user_msg)
return (title, body)
## Successfully added
user_msg.append("""The function [%s] has been added to submission [%s] at step [%s], score [%s]."""\
% (addfunctionname, "%s%s" % (action, doctype), addfunctionstep, addfunctionscore))
(title, body) = \
_create_configure_doctype_submission_functions_form(doctype=doctype, action=action, user_msg=user_msg)
return (title, body)
def _delete_submission_function(doctype, action, deletefunctionname, deletefunctionstep, deletefunctionscore):
"""Delete a submission function from a given submission. Re-order all functions below it (within the same step)
to fill the gap left by the deleted function.
@param doctype: (string) the unique ID of a document type
@param action: (string) the unique ID of an action
@param deletefunctionname: (string) the name of the function to be deleted from the submission
@param deletefunctionstep: (string) the step of the function to be deleted from the submission
@param deletefunctionscore: (string) the score of the function to be deleted from the submission
@return: tuple containing 2 strings: (page-title, page-body)
"""
user_msg = []
## first, delete the function:
try:
delete_function_at_step_and_score_from_submission(doctype=doctype, action=action,
function=deletefunctionname, step=deletefunctionstep,
score=deletefunctionscore)
except InvenioWebSubmitAdminWarningDeleteFailed, e:
## unable to delete function - error message and return
user_msg.append("""Unable to delete function [%s] at step [%s], score [%s] from submission [%s]""" \
% (deletefunctionname, deletefunctionstep, deletefunctionscore, "%s%s" % (action, doctype)))
## TODO : LOG ERROR
(title, body) = _create_configure_doctype_submission_functions_form(doctype=doctype, action=action, user_msg=user_msg)
return (title, body)
## now, correct the scores of all functions in the step from which a function was just deleted:
try:
regulate_score_of_all_functions_in_step_to_ascending_multiples_of_10_for_submission(doctype=doctype,
action=action,
step=deletefunctionstep)
except InvenioWebSubmitAdminWarningDeleteFailed, e:
## couldnt delete the functions before reordering them
user_msg.append("""Deleted function [%s] at step [%s], score [%s] from submission [%s], but could not re-order""" \
""" scores of remaining functions within step [%s]""" \
% (deletefunctionname, deletefunctionstep, deletefunctionscore,
"%s%s" % (action, doctype), deletefunctionstep))
## TODO : LOG ERROR
(title, body) = _create_configure_doctype_submission_functions_form(doctype=doctype, action=action, user_msg=user_msg)
return (title, body)
## update submission "last-modification" date:
update_modification_date_for_submission(doctype=doctype, action=action)
## success message:
user_msg.append("""Successfully deleted function [%s] at step [%s], score [%s] from submission [%s]""" \
% (deletefunctionname, deletefunctionstep, deletefunctionscore, "%s%s" % (action, doctype)))
## TODO : LOG function Deletion
(title, body) = _create_configure_doctype_submission_functions_form(doctype=doctype, action=action, user_msg=user_msg)
return (title, body)
def perform_request_configure_doctype_submissionpage_preview(doctype, action, pagenum):
"""Display a preview of a Submission Page and its fields.
@param doctype: (string) the unique ID of a document type
@param action: (string) the unique ID of an action
@param pagenum: (integer) the number of the submission page to be previewed
@return: a tuple of four elements. (page-title, page-body)
"""
body = ""
user_msg = []
try:
pagenum = str(pagenum)
except ValueError:
pagenum = ""
if pagenum != "":
try:
pagenum = str(wash_single_urlarg(urlarg=pagenum, argreqdtype=int, argdefault=""))
except ValueError, e:
pagenum = ""
## ensure that the page number for this submission is valid:
num_pages_submission = get_numbersubmissionpages_doctype_action(doctype=doctype, action=action)
try:
if not (int(pagenum) > 0 and int(pagenum) <= num_pages_submission):
user_msg.append("Invalid page number - out of range")
(title, body) = _create_configure_doctype_submission_pages_form(doctype=doctype, action=action, user_msg=user_msg)
return (title, body)
except ValueError:
## invalid page number
user_msg.append("Invalid page number - must be an integer value!")
(title, body) = _create_configure_doctype_submission_pages_form(doctype=doctype, action=action, user_msg=user_msg)
return (title, body)
## get details of all fields on submission page:
fields = get_details_and_description_of_all_fields_on_submissionpage(doctype=doctype, action=action, pagenum=pagenum)
## ensure all values for each field are strings:
string_fields = []
for field in fields:
string_fields.append(stringify_list_elements(field))
title = """A preview of Page %s of the %s Submission""" % (pagenum, "%s%s" % (action, doctype))
body = websubmitadmin_templates.tmpl_configuredoctype_display_submissionpage_preview(doctype=doctype,
action=action,
pagenum=pagenum,
fields=string_fields)
return (title, body)
def perform_request_configure_doctype_submissionpage_elements(doctype, action, pagenum, movefieldfromposn="",
movefieldtoposn="", deletefieldposn="", editfieldposn="",
editfieldposncommit="", fieldname="", fieldtext="", fieldlevel="",
fieldshortdesc="", fieldcheck="", addfield="", addfieldcommit=""):
"""Process requests relating to the elements of a particular submission page"""
body = ""
user_msg = []
try:
pagenum = str(pagenum)
except ValueError:
pagenum = ""
if pagenum != "":
try:
pagenum = str(wash_single_urlarg(urlarg=pagenum, argreqdtype=int, argdefault=""))
except ValueError, e:
pagenum = ""
if fieldname != "":
try:
fieldname = wash_single_urlarg(urlarg=fieldname, argreqdtype=str, argdefault="", maxstrlen=15, minstrlen=1)
if string_is_alphanumeric_including_underscore(txtstring=fieldname) == 0:
fieldname = ""
except ValueError, e:
fieldname = ""
if fieldtext != "":
try:
fieldtext = wash_single_urlarg(urlarg=fieldtext, argreqdtype=str, argdefault="")
except ValueError, e:
fieldtext = ""
if fieldlevel != "":
try:
fieldlevel = wash_single_urlarg(urlarg=fieldlevel, argreqdtype=str, argdefault="O", maxstrlen=1, minstrlen=1)
if string_is_alphanumeric_including_underscore(txtstring=fieldlevel) == 0:
fieldlevel = "O"
if fieldlevel not in ("m", "M", "o", "O"):
fieldlevel = "O"
fieldlevel = fieldlevel.upper()
except ValueError, e:
fieldlevel = "O"
if fieldshortdesc != "":
try:
fieldshortdesc = wash_single_urlarg(urlarg=fieldshortdesc, argreqdtype=str, argdefault="")
except ValueError, e:
fieldshortdesc = ""
if fieldcheck != "":
try:
fieldcheck = wash_single_urlarg(urlarg=fieldcheck, argreqdtype=str, argdefault="", maxstrlen=15, minstrlen=1)
if string_is_alphanumeric_including_underscore(txtstring=fieldcheck) == 0:
fieldcheck = ""
except ValueError, e:
fieldcheck = ""
if editfieldposn != "":
try:
editfieldposn = str(wash_single_urlarg(urlarg=editfieldposn, argreqdtype=int, argdefault=""))
except ValueError, e:
editfieldposn = ""
if deletefieldposn != "":
try:
deletefieldposn = str(wash_single_urlarg(urlarg=deletefieldposn, argreqdtype=int, argdefault=""))
except ValueError, e:
deletefieldposn = ""
if movefieldfromposn != "":
try:
movefieldfromposn = str(wash_single_urlarg(urlarg=movefieldfromposn, argreqdtype=int, argdefault=""))
except ValueError, e:
movefieldfromposn = ""
if movefieldtoposn != "":
try:
movefieldtoposn = str(wash_single_urlarg(urlarg=movefieldtoposn, argreqdtype=int, argdefault=""))
except ValueError, e:
movefieldtoposn = ""
## ensure that there is only one doctype for this doctype ID - simply display all doctypes with warning if not
if doctype in ("", None):
user_msg.append("""Unknown Document Type""")
## TODO : LOG ERROR
all_doctypes = get_docid_docname_alldoctypes()
body = websubmitadmin_templates.tmpl_display_alldoctypes(doctypes=all_doctypes, user_msg=user_msg)
title = "Available WebSubmit Document Types"
return (title, body)
numrows_doctype = get_number_doctypes_docid(docid=doctype)
if numrows_doctype > 1:
## there are multiple doctypes with this doctype ID:
## TODO : LOG ERROR
user_msg.append("""Multiple document types identified by "%s" exist - cannot configure at this time.""" \
% (doctype,))
all_doctypes = get_docid_docname_alldoctypes()
body = websubmitadmin_templates.tmpl_display_alldoctypes(doctypes=all_doctypes, user_msg=user_msg)
title = "Available WebSubmit Document Types"
return (title, body)
elif numrows_doctype == 0:
## this doctype does not seem to exist:
user_msg.append("""The document type identified by "%s" doesn't exist - cannot configure at this time.""" \
% (doctype,))
## TODO : LOG ERROR
all_doctypes = get_docid_docname_alldoctypes()
body = websubmitadmin_templates.tmpl_display_alldoctypes(doctypes=all_doctypes, user_msg=user_msg)
title = "Available WebSubmit Document Types"
return (title, body)
## ensure that this submission exists for this doctype:
numrows_submission = get_number_submissions_doctype_action(doctype=doctype, action=action)
if numrows_submission > 1:
## there are multiple submissions for this doctype/action ID:
## TODO : LOG ERROR
user_msg.append("""The Submission "%s" seems to exist multiple times for the Document Type "%s" - cannot configure at this time.""" \
% (action, doctype))
(title, body) = _create_configure_doctype_form(doctype=doctype, user_msg=user_msg)
return (title, body)
elif numrows_submission == 0:
## this submission does not seem to exist for this doctype:
user_msg.append("""The Submission "%s" doesn't exist for the "%s" Document Type - cannot configure at this time.""" \
% (action, doctype))
## TODO : LOG ERROR
(title, body) = _create_configure_doctype_form(doctype=doctype, user_msg=user_msg)
return (title, body)
## ensure that the page number for this submission is valid:
num_pages_submission = get_numbersubmissionpages_doctype_action(doctype=doctype, action=action)
try:
if not (int(pagenum) > 0 and int(pagenum) <= num_pages_submission):
user_msg.append("Invalid page number - out of range")
(title, body) = _create_configure_doctype_submission_pages_form(doctype=doctype, action=action, user_msg=user_msg)
return (title, body)
except ValueError:
## invalid page number
user_msg.append("Invalid page number - must be an integer value!")
(title, body) = _create_configure_doctype_submission_pages_form(doctype=doctype, action=action, user_msg=user_msg)
return (title, body)
## submission valid
if editfieldposn != "" and editfieldposncommit == "":
## display form for editing field
(title, body) = _configure_doctype_edit_field_on_submissionpage_display_field_details(doctype=doctype, action=action,
pagenum=pagenum, fieldposn=editfieldposn)
elif editfieldposn != "" and editfieldposncommit != "":
## commit changes to element
(title, body) = _configure_doctype_edit_field_on_submissionpage(doctype=doctype, action=action,
pagenum=pagenum, fieldposn=editfieldposn, fieldtext=fieldtext,
fieldlevel=fieldlevel, fieldshortdesc=fieldshortdesc, fieldcheck=fieldcheck)
elif movefieldfromposn != "" and movefieldtoposn != "":
## move a field
(title, body) = _configure_doctype_move_field_on_submissionpage(doctype=doctype,
action=action, pagenum=pagenum, movefieldfromposn=movefieldfromposn,
movefieldtoposn=movefieldtoposn)
elif addfield != "":
## request to add a new field to a page - display form
(title, body) = _configure_doctype_add_field_to_submissionpage_display_form(doctype=doctype, action=action, pagenum=pagenum)
elif addfieldcommit != "":
## commit a new field to the page
(title, body) = _configure_doctype_add_field_to_submissionpage(doctype=doctype, action=action,
pagenum=pagenum, fieldname=fieldname, fieldtext=fieldtext,
fieldlevel=fieldlevel, fieldshortdesc=fieldshortdesc, fieldcheck=fieldcheck)
elif deletefieldposn != "":
## user wishes to delete a field from the page:
(title, body) = _configure_doctype_delete_field_from_submissionpage(doctype=doctype,
action=action, pagenum=pagenum, fieldnum=deletefieldposn)
else:
## default visit to page - list its elements:
(title, body) = _create_configure_doctype_submission_page_elements_form(doctype=doctype, action=action,
pagenum=pagenum, movefieldfromposn=movefieldfromposn)
return (title, body)
def stringify_list_elements(elementslist):
o = []
for el in elementslist:
o.append(str(el))
return o
def _configure_doctype_edit_field_on_submissionpage(doctype, action, pagenum, fieldposn,
fieldtext, fieldlevel, fieldshortdesc, fieldcheck):
"""Perform an update to the details of a field on a submission page.
@param doctype: (string) the unique ID of a document type
@param action: (string) the unique ID of an action
@param pagenum: (integer) the number of the page on which the element to be updated is found
@param fieldposn: (integer) the numeric position of the field to be editied
@param fieldtext: (string) the text label displayed with a field
@param fieldlevel: (char) M or O (whether the field is mandatory or optional)
@param fieldshortdesc: (string) the short description of a field
@param fieldcheck: (string) the name of a JavaScript check to be applied to a field
@return: a tuple containing 2 strings - (page-title, page-body)
"""
user_msg = []
if fieldcheck not in ("", None):
## ensure check exists:
checkres = get_number_jschecks_with_chname(chname=fieldcheck)
if checkres < 1:
user_msg.append("The Check '%s' does not exist in WebSubmit - changes to field not saved" % (fieldcheck,))
(title, body) = _configure_doctype_edit_field_on_submissionpage_display_field_details(doctype=doctype, action=action,
pagenum=pagenum, fieldposn=fieldposn,
fieldtext=fieldtext, fieldlevel=fieldlevel,
fieldshortdesc=fieldshortdesc, user_msg=user_msg)
return (title, body)
try:
update_details_of_a_field_on_a_submissionpage(doctype=doctype, action=action, pagenum=pagenum, fieldposn=fieldposn,
fieldtext=fieldtext, fieldlevel=fieldlevel, fieldshortdesc=fieldshortdesc,
fieldcheck=fieldcheck)
user_msg.append("The details of the field at position %s have been updated successfully" % (fieldposn,))
update_modification_date_for_submission(doctype=doctype, action=action)
except InvenioWebSubmitAdminWarningTooManyRows, e:
## multiple rows found at page position - not safe to edit:
user_msg.append("Unable to update details of field at position %s on submission page %s - multiple fields found at this position" \
% (fieldposn, pagenum))
## TODO : LOG WARNING
except InvenioWebSubmitAdminWarningNoRowsFound, e:
## field not found - cannot edit
user_msg.append("Unable to update details of field at position %s on submission page %s - field doesn't seem to exist there!" \
% (fieldposn, pagenum))
## TODO : LOG WARNING
(title, body) = _create_configure_doctype_submission_page_elements_form(doctype=doctype, action=action, pagenum=pagenum, user_msg=user_msg)
return (title, body)
def _configure_doctype_edit_field_on_submissionpage_display_field_details(doctype, action, pagenum, fieldposn,
fieldtext=None, fieldlevel=None, fieldshortdesc=None,
fieldcheck=None, user_msg=""):
"""Display a form used to edit a field on a submission page.
@param doctype: (string) the unique ID of a document type
@param action: (string) the unique ID of an action
@param pagenum: (integer) the number of the page on which the element to be updated is found
@param fieldposn: (integer) the numeric position of the field to be editied
@param fieldtext: (string) the text label displayed with a field
@param fieldlevel: (char) M or O (whether the field is mandatory or optional)
@param fieldshortdesc: (string) the short description of a field
@param fieldcheck: (string) the name of a JavaScript check to be applied to a field
@param user_msg: (list of strings, or string) any warning/error message to be displayed to the user
@return: a tuple containing 2 strings (page-title, page-body)
"""
if type(user_msg) not in (list, tuple) or user_msg == "":
user_msg = []
## get a list of all check names:
checks_res = get_all_jscheck_names()
allchecks=[]
for check in checks_res:
allchecks.append((check,))
## get the details for the field to be edited:
fielddets = get_details_of_field_at_positionx_on_submissionpage(doctype=doctype, action=action, pagenum=pagenum, fieldposition=fieldposn)
if len(fielddets) < 1:
(title, body) = _create_configure_doctype_submission_page_elements_form(doctype=doctype, action=action, pagenum=pagenum)
return (title, body)
fieldname = str(fielddets[2])
if fieldtext is not None:
fieldtext = str(fieldtext)
else:
fieldtext = str(fielddets[3])
if fieldlevel is not None:
fieldlevel = str(fieldlevel)
else:
fieldlevel = str(fielddets[4])
if fieldshortdesc is not None:
fieldshortdesc = str(fieldshortdesc)
else:
fieldshortdesc = str(fielddets[5])
if fieldcheck is not None:
fieldcheck = str(fieldcheck)
else:
fieldcheck = str(fielddets[6])
cd = str(fielddets[7])
md = str(fielddets[8])
title = """Edit the %(fieldname)s field as it appears at position %(fieldnum)s on Page %(pagenum)s of the %(submission)s Submission""" \
% { 'fieldname' : fieldname, 'fieldnum' : fieldposn, 'pagenum' : pagenum, 'submission' : "%s%s" % (action, doctype) }
body = websubmitadmin_templates.tmpl_configuredoctype_edit_submissionfield(doctype=doctype,
action=action,
pagenum=pagenum,
fieldnum=fieldposn,
fieldname=fieldname,
fieldtext=fieldtext,
fieldlevel=fieldlevel,
fieldshortdesc=fieldshortdesc,
fieldcheck=fieldcheck,
cd=cd,
md=md,
allchecks=allchecks,
user_msg=user_msg)
return (title, body)
def _configure_doctype_add_field_to_submissionpage(doctype, action, pagenum, fieldname="",
fieldtext="", fieldlevel="", fieldshortdesc="", fieldcheck=""):
"""Add a field to a submission page.
@param doctype: (string) the unique ID of a document type
@param action: (string) the unique ID of an action
@param pagenum: (integer) the number of the page on which the element to be updated is found
@param fieldname: (string) the name of the field to be added to the page
@param fieldtext: (string) the text label displayed with a field
@param fieldlevel: (char) M or O (whether the field is mandatory or optional)
@param fieldshortdesc: (string) the short description of a field
@param fieldcheck: (string) the name of a JavaScript check to be applied to a field
@return: a tuple containing 2 strings - (page-title, page-body)
"""
user_msg = []
## ensure that a field "fieldname" actually exists:
if fieldname == "":
## the field to be added has no element description in the WebSubmit DB - cannot add
user_msg.append("""The field that you have chosen to add does not seem to exist in WebSubmit - cannot add""")
(title, body) = _configure_doctype_add_field_to_submissionpage_display_form(doctype, action, pagenum,
fieldtext=fieldtext,
fieldlevel=fieldlevel, fieldshortdesc=fieldshortdesc,
fieldcheck=fieldcheck, user_msg=user_msg)
return (title, body)
numelements_elname = get_number_elements_with_elname(elname=fieldname)
if numelements_elname < 1:
## the field to be added has no element description in the WebSubmit DB - cannot add
user_msg.append("""The field that you have chosen to add (%s) does not seem to exist in WebSubmit - cannot add""" % (fieldname,))
(title, body) = _configure_doctype_add_field_to_submissionpage_display_form(doctype, action, pagenum,
fieldtext=fieldtext,
fieldlevel=fieldlevel, fieldshortdesc=fieldshortdesc,
fieldcheck=fieldcheck, user_msg=user_msg)
return (title, body)
## if fieldcheck has been provided, ensure that it is a valid check name:
if fieldcheck not in ("", None):
## ensure check exists:
checkres = get_number_jschecks_with_chname(chname=fieldcheck)
if checkres < 1:
user_msg.append("The Check '%s' does not exist in WebSubmit - new field not saved to page" % (fieldcheck,))
(title, body) = _configure_doctype_add_field_to_submissionpage_display_form(doctype, action, pagenum,
fieldname=fieldname, fieldtext=fieldtext,
fieldlevel=fieldlevel, fieldshortdesc=fieldshortdesc,
user_msg=user_msg)
return (title, body)
## now add the new field to the page:
try:
insert_field_onto_submissionpage(doctype=doctype, action=action, pagenum=pagenum, fieldname=fieldname, fieldtext=fieldtext,
fieldlevel=fieldlevel, fieldshortdesc=fieldshortdesc, fieldcheck=fieldcheck)
user_msg.append("""Successfully added the field "%s" to the last position on page %s of submission %s""" \
% (fieldname, pagenum, "%s%s" % (action, doctype)))
update_modification_date_for_submission(doctype=doctype, action=action)
(title, body) = _create_configure_doctype_submission_page_elements_form(doctype=doctype, action=action, pagenum=pagenum, user_msg=user_msg)
except InvenioWebSubmitAdminWarningInsertFailed, e:
## the insert of the new field failed for some reason
## TODO : LOG ERROR
user_msg.append("""Couldn't add the field "%s" to page %s of submission %s - please try again""" \
% (fieldname, pagenum, "%s%s" % (action, doctype)))
(title, body) = _configure_doctype_add_field_to_submissionpage_display_form(doctype, action, pagenum,
fieldname=fieldname, fieldtext=fieldtext,
fieldlevel=fieldlevel, fieldshortdesc=fieldshortdesc,
fieldcheck=fieldcheck, user_msg=user_msg)
return (title, body)
def _configure_doctype_add_field_to_submissionpage_display_form(doctype, action, pagenum, fieldname="", fieldtext="",
fieldlevel="", fieldshortdesc="", fieldcheck="", user_msg=""):
title = """Add a Field to Page %(pagenum)s of the %(submission)s Submission""" \
% { 'pagenum' : pagenum, 'submission' : "%s%s" % (action, doctype) }
## sanity checking:
if type(user_msg) not in (list, tuple) or user_msg == "":
user_msg = []
## get a list of all check names:
checks_res = get_all_jscheck_names()
allchecks=[]
for check in checks_res:
allchecks.append((check,))
## get a list of all WebSubmit element names:
elements_res = get_all_element_names()
allelements = []
for el in elements_res:
allelements.append((el,))
## get form:
body = websubmitadmin_templates.tmpl_configuredoctype_add_submissionfield(doctype=doctype,
action=action,
pagenum=pagenum,
fieldname=fieldname,
fieldtext=fieldtext,
fieldlevel=fieldlevel,
fieldshortdesc=fieldshortdesc,
fieldcheck=fieldcheck,
allchecks=allchecks,
allelements=allelements,
user_msg=user_msg)
return (title, body)
def _configure_doctype_move_field_on_submissionpage(doctype, action, pagenum, movefieldfromposn, movefieldtoposn):
user_msg = []
_ = gettext_set_language(CFG_SITE_LANG)
movefield_res = move_field_on_submissionpage_from_positionx_to_positiony(doctype=doctype, action=action, pagenum=pagenum,
movefieldfrom=movefieldfromposn, movefieldto=movefieldtoposn)
if movefield_res == 1:
## invalid field numbers
try:
raise InvenioWebSubmitWarning(_('Unable to move field at position %s to position %s on page %s of submission \'%s%s\' - Invalid Field Position Numbers') % (movefieldfromposn, movefieldtoposn, pagenum, action, doctype))
except InvenioWebSubmitWarning, exc:
register_exception(stream='warning')
#warnings.append(exc.message)
#warnings.append(('WRN_WEBSUBMITADMIN_INVALID_FIELD_NUMBERS_SUPPLIED_WHEN_TRYING_TO_MOVE_FIELD_ON_SUBMISSION_PAGE', \
#movefieldfromposn, movefieldtoposn, pagenum, "%s%s" % (action, doctype)))
user_msg.append("""Unable to move field from position %s to position %s on page %s of submission %s%s - field position numbers invalid""" \
% (movefieldfromposn, movefieldtoposn, pagenum, action, doctype))
elif movefield_res == 2:
## failed to swap 2 fields - couldn't move field1 to temp position
try:
raise InvenioWebSubmitWarning(_('Unable to swap field at position %s with field at position %s on page %s of submission %s - could not move field at position %s to temporary field location') % (movefieldfromposn, movefieldtoposn, pagenum, action, doctype))
except InvenioWebSubmitWarning, exc:
register_exception(stream='warning')
#warnings.append(exc.message)
#warnings.append(('WRN_WEBSUBMITADMIN_UNABLE_TO_SWAP_TWO_FIELDS_ON_SUBMISSION_PAGE_COULDNT_MOVE_FIELD1_TO_TEMP_POSITION', \
#movefieldfromposn, movefieldtoposn, pagenum, "%s%s" % (action, doctype)))
user_msg.append("""Unable to move field from position %s to position %s on page %s of submission %s%s""" \
% (movefieldfromposn, movefieldtoposn, pagenum, action, doctype))
elif movefield_res == 3:
## failed to swap 2 fields on submission page - couldn't move field2 to field1 position
try:
raise InvenioWebSubmitWarning(_('Unable to swap field at position %s with field at position %s on page %s of submission %s - could not move field at position %s to position %s. Please ask Admin to check that a field was not stranded in a temporary position') % (movefieldfromposn, movefieldtoposn, pagenum, action, doctype))
except InvenioWebSubmitWarning, exc:
register_exception(stream='warning')
#warnings.append(exc.message)
#warnings.append(('WRN_WEBSUBMITADMIN_UNABLE_TO_SWAP_TWO_FIELDS_ON_SUBMISSION_PAGE_COULDNT_MOVE_FIELD2_TO_FIELD1_POSITION', \
#movefieldfromposn, movefieldtoposn, pagenum, "%s%s" % (action, doctype), movefieldtoposn, movefieldfromposn))
user_msg.append("""Unable to move field from position %s to position %s on page %s of submission %s%s - See Admin if field order is broken""" \
% (movefieldfromposn, movefieldtoposn, pagenum, action, doctype))
elif movefield_res == 4:
## failed to swap 2 fields in submission page - couldnt swap field at temp position to field2 position
try:
raise InvenioWebSubmitWarning(_('Unable to swap field at position %s with field at position %s on page %s of submission %s - could not move field that was located at position %s to position %s from temporary position. Field is now stranded in temporary position and must be corrected manually by an Admin') % (movefieldfromposn, movefieldtoposn, pagenum, action, doctype, movefieldfromposn, movefieldtoposn))
except InvenioWebSubmitWarning, exc:
register_exception(stream='warning')
#warnings.append(exc.message)
#warnings.append(('WRN_WEBSUBMITADMIN_UNABLE_TO_SWAP_TWO_FIELDS_ON_SUBMISSION_PAGE_COULDNT_MOVE_FIELD1_TO_POSITION_FIELD2_FROM_TEMPORARY_POSITION', \
#movefieldfromposn, movefieldtoposn, pagenum, "%s%s" % (action, doctype), movefieldfromposn, movefieldtoposn))
user_msg.append("""Unable to move field from position %s to position %s on page %s of submission %s%s - Field-order is now broken and must be corrected by Admin""" \
% (movefieldfromposn, movefieldtoposn, pagenum, action, doctype))
elif movefield_res == 5:
## failed to decrement the position of all fields below the field that was moved to a temp position
try:
raise InvenioWebSubmitWarning(_('Unable to move field at position %s to position %s on page %s of submission %s - could not decrement the position of the fields below position %s. Tried to recover - please check that field ordering is not broken') % (movefieldfromposn, movefieldtoposn, pagenum, action, doctype, movefieldfromposn))
except InvenioWebSubmitWarning, exc:
register_exception(stream='warning')
#warnings.append(exc.message)
#warnings.append(('WRN_WEBSUBMITADMIN_UNABLE_TO_MOVE_FIELD_TO_NEW_POSITION_ON_SUBMISSION_PAGE_COULDNT_DECREMENT_POSITION_OF_FIELDS_BELOW_FIELD1', \
#movefieldfromposn, movefieldtoposn, pagenum, "%s%s" % (action, doctype), movefieldfromposn))
user_msg.append("""Unable to move field from position %s to position %s on page %s of submission %s%s - See Admin if field-order is broken""" \
% (movefieldfromposn, movefieldtoposn, pagenum, action, doctype))
elif movefield_res == 6:
## failed to increment position of fields in and below position into which 'movefromfieldposn' is to be inserted
try:
raise InvenioWebSubmitWarning(_('Unable to move field at position %s to position %s on page %s of submission %s%s - could not increment the position of the fields at and below position %s. The field that was at position %s is now stranded in a temporary position.') % (movefieldfromposn, movefieldtoposn, pagenum, action, doctype, movefieldtoposn, movefieldfromposn))
except InvenioWebSubmitWarning, exc:
register_exception(stream='warning')
#warnings.append(exc.message)
#warnings.append(('WRN_WEBSUBMITADMIN_UNABLE_TO_MOVE_FIELD_TO_NEW_POSITION_ON_SUBMISSION_PAGE_COULDNT_INCREMENT_POSITION_OF_FIELDS_AT_AND_BELOW_FIELD2', \
#movefieldfromposn, movefieldtoposn, pagenum, "%s%s" % (action, doctype), movefieldtoposn, movefieldfromposn))
user_msg.append("""Unable to move field from position %s to position %s on page %s of submission %s%s - Field-order is now broken and must be corrected by Admin""" \
% (movefieldfromposn, movefieldtoposn, pagenum, action, doctype))
else:
## successful update:
try:
raise InvenioWebSubmitWarning(_('Moved field from position %s to position %s on page %s of submission \'%s%s\'.') % (movefieldfromposn, movefieldtoposn, pagenum, action, doctype))
except InvenioWebSubmitWarning, exc:
register_exception(stream='warning')
#warnings.append(exc.message)
#warnings.append(('WRN_WEBSUBMITADMIN_MOVED_FIELD_ON_SUBMISSION_PAGE', movefieldfromposn, movefieldtoposn, pagenum, "%s%s" % (action, doctype)))
user_msg.append("""Successfully moved field from position %s to position %s on page %s of submission %s%s""" \
% (movefieldfromposn, movefieldtoposn, pagenum, action, doctype))
(title, body) = _create_configure_doctype_submission_page_elements_form(doctype=doctype, action=action, pagenum=pagenum, user_msg=user_msg)
return (title, body)
def _configure_doctype_delete_field_from_submissionpage(doctype, action, pagenum, fieldnum):
"""Delete a field from a submission page"""
_ = gettext_set_language(CFG_SITE_LANG)
user_msg = []
del_res = delete_a_field_from_submissionpage_then_reorder_fields_below_to_fill_vacant_position(doctype=doctype,
action=action,
pagenum=pagenum,
fieldposn=fieldnum)
if del_res == 1:
try:
raise InvenioWebSubmitWarning(_('Unable to delete field at position %s from page %s of submission \'%s\'') % (fieldnum, pagenum, action, doctype))
except InvenioWebSubmitWarning, exc:
register_exception(stream='warning')
#warnings.append(exc.message)
#warnings.append(('WRN_WEBSUBMITADMIN_UNABLE_TO_DELETE_FIELD_FROM_SUBMISSION_PAGE', fieldnum, pagenum, "%s%s" % (action, doctype)))
user_msg.append("Unable to delete field at position %s from page number %s of submission %s%s" % (fieldnum, pagenum, action, doctype))
else:
## deletion was OK
user_msg.append("Field deleted")
try:
raise InvenioWebSubmitWarning(_('Unable to delete field at position %s from page %s of submission \'%s%s\'') % (fieldnum, pagenum, action, doctype))
except InvenioWebSubmitWarning, exc:
register_exception(stream='warning')
#warnings.append(exc.message)
#warnings.append(('WRN_WEBSUBMITADMIN_DELETED_FIELD_FROM_SUBMISSION_PAGE', fieldnum, pagenum, "%s%s" % (action, doctype)))
(title, body) = _create_configure_doctype_submission_page_elements_form(doctype=doctype, action=action, pagenum=pagenum, user_msg=user_msg)
return (title, body)
def _create_configure_doctype_submission_page_elements_form(doctype, action, pagenum, movefieldfromposn="", user_msg=""):
## get list of elements for page:
title = """Submission Elements found on Page %s of the "%s" Submission of the "%s" Document Type:"""\
% (pagenum, action, doctype)
body = ""
raw_page_elements = get_details_allsubmissionfields_on_submission_page(doctype=doctype, action=action, pagenum=pagenum)
## correctly stringify page elements for the template:
page_elements = []
for element in raw_page_elements:
page_elements.append(stringify_list_elements(element))
body = websubmitadmin_templates.tmpl_configuredoctype_list_submissionelements(doctype=doctype,
action=action,
pagenum=pagenum,
page_elements=page_elements,
movefieldfromposn=movefieldfromposn,
user_msg=user_msg)
return (title, body)
def perform_request_configure_doctype_submissionpages(doctype,
action,
pagenum="",
movepage="",
movepagedirection="",
deletepage="",
deletepageconfirm="",
addpage=""):
"""Process requests relating to the submission pages of a doctype/submission"""
body = ""
user_msg = []
try:
pagenum = int(pagenum)
except ValueError:
pagenum = ""
## ensure that there is only one doctype for this doctype ID - simply display all doctypes with warning if not
if doctype in ("", None):
user_msg.append("""Unknown Document Type""")
## TODO : LOG ERROR
all_doctypes = get_docid_docname_alldoctypes()
body = websubmitadmin_templates.tmpl_display_alldoctypes(doctypes=all_doctypes, user_msg=user_msg)
title = "Available WebSubmit Document Types"
return (title, body)
numrows_doctype = get_number_doctypes_docid(docid=doctype)
if numrows_doctype > 1:
## there are multiple doctypes with this doctype ID:
## TODO : LOG ERROR
user_msg.append("""Multiple document types identified by "%s" exist - cannot configure at this time.""" \
% (doctype,))
all_doctypes = get_docid_docname_alldoctypes()
body = websubmitadmin_templates.tmpl_display_alldoctypes(doctypes=all_doctypes, user_msg=user_msg)
title = "Available WebSubmit Document Types"
return (title, body)
elif numrows_doctype == 0:
## this doctype does not seem to exist:
user_msg.append("""The document type identified by "%s" doesn't exist - cannot configure at this time.""" \
% (doctype,))
## TODO : LOG ERROR
all_doctypes = get_docid_docname_alldoctypes()
body = websubmitadmin_templates.tmpl_display_alldoctypes(doctypes=all_doctypes, user_msg=user_msg)
title = "Available WebSubmit Document Types"
return (title, body)
## ensure that this submission exists for this doctype:
numrows_submission = get_number_submissions_doctype_action(doctype=doctype, action=action)
if numrows_submission > 1:
## there are multiple submissions for this doctype/action ID:
## TODO : LOG ERROR
user_msg.append("""The Submission "%s" seems to exist multiple times for the Document Type "%s" - cannot configure at this time.""" \
% (action, doctype))
(title, body) = _create_configure_doctype_form(doctype=doctype, user_msg=user_msg)
return (title, body)
elif numrows_submission == 0:
## this submission does not seem to exist for this doctype:
user_msg.append("""The Submission "%s" doesn't exist for the "%s" Document Type - cannot configure at this time.""" \
% (action, doctype))
## TODO : LOG ERROR
(title, body) = _create_configure_doctype_form(doctype=doctype, user_msg=user_msg)
return (title, body)
## submission valid
if addpage != "":
## add a new page to a submission:
error_code = add_submission_page_doctype_action(doctype=doctype, action=action)
if error_code == 0:
## success
user_msg.append("""A new Submission Page has been added into the last position""")
else:
## could not move it
user_msg.append("""Unable to add a new Submission Page""")
(title, body) = _create_configure_doctype_submission_pages_form(doctype=doctype,
action=action,
user_msg=user_msg)
elif movepage != "":
## user wants to move a page upwards in the order
(title, body) = _configure_doctype_move_submission_page(doctype=doctype,
action=action, pagenum=pagenum, direction=movepagedirection)
elif deletepage != "":
## user wants to delete a page:
if deletepageconfirm != "":
## confirmation of deletion has been provided - proceed
(title, body) = _configure_doctype_delete_submission_page(doctype=doctype,
action=action, pagenum=pagenum)
else:
## user has not yet confirmed the deletion of a page - prompt for confirmation
(title, body) = _create_configure_doctype_submission_pages_form(doctype=doctype,
action=action,
deletepagenum=pagenum)
else:
## default - display details of submission pages for this submission:
(title, body) = _create_configure_doctype_submission_pages_form(doctype=doctype, action=action)
return (title, body)
def _configure_doctype_move_submission_page(doctype, action, pagenum, direction):
user_msg = []
## Sanity checking:
if direction.lower() not in ("up", "down"):
## invalid direction:
user_msg.append("""Invalid Page destination - no action was taken""")
(title, body) = _create_configure_doctype_submission_pages_form(doctype=doctype,
action=action,
user_msg=user_msg)
return (title, body)
## swap the pages:
if direction.lower() == "up":
error_code = swap_elements_adjacent_pages_doctype_action(doctype=doctype, action=action,
page1=pagenum, page2=pagenum-1)
else:
error_code = swap_elements_adjacent_pages_doctype_action(doctype=doctype, action=action,
page1=pagenum, page2=pagenum+1)
if error_code == 0:
## pages swapped successfully:
## TODO : LOG PAGE SWAP
user_msg.append("""Page %s was successfully moved %swards""" % (pagenum, direction.capitalize()))
elif error_code == 1:
## pages are not adjacent:
user_msg.append("""Unable to move page - only adjacent pages can be swapped around""")
elif error_code == 2:
## at least one page out of legal range (e.g. trying to move a page to a position higher or lower
## than the number of pages:
user_msg.append("""Unable to move page to illegal position""")
elif error_code in (3, 4):
## Some sort of problem moving fields around!
## TODO : LOG ERROR
user_msg.append("""Error: there was a problem swapping the submission elements to their new pages.""")
user_msg.append("""An attempt was made to return the elements to their original pages - you """\
"""should verify that this was successful, or ask your administrator"""\
""" to fix the problem manually""")
elif error_code == 5:
## the elements from the first page were left stranded in the temporary page!
## TODO : LOG ERROR
user_msg.append("""Error: there was a problem swapping the submission elements to their new pages.""")
user_msg.append("""Some elements were left stranded on a temporary page. Please ask your administrator to"""\
""" fix this problem manually""")
(title, body) = _create_configure_doctype_submission_pages_form(doctype=doctype, action=action, user_msg=user_msg)
return (title, body)
def _configure_doctype_delete_submission_page(doctype, action, pagenum):
user_msg = []
num_pages = get_numbersubmissionpages_doctype_action(doctype=doctype, action=action)
if num_pages > 0:
## proceed with deletion
error_code = delete_allfields_submissionpage_doctype_action(doctype=doctype, action=action, pagenum=pagenum)
if error_code == 0:
## everything OK
## move elements from pages above the deleted page down by one page:
decrement_by_one_pagenumber_submissionelements_abovepage(doctype=doctype, action=action, frompage=pagenum)
## now decrement the number of pages associated with the submission:
error_code = decrement_by_one_number_submissionpages_doctype_action(doctype=doctype, action=action)
if error_code == 0:
## successfully deleted submission page
## TODO : LOG DELETION
user_msg.append("""Page number %s of Submission %s was successfully deleted."""\
% (pagenum, action))
(title, body) = _create_configure_doctype_submission_pages_form(doctype=doctype,
action=action,
user_msg=user_msg)
else:
## error - either submission didn't exist, or multiple instances found
## TODO : LOG ERROR
user_msg.append("""The Submission elements were deleted from Page %s of the Submission "%s"."""\
""" However, it was not possible to delete the page itself."""\
% (pagenum, action))
(title, body) = _create_configure_doctype_submission_pages_form(doctype=doctype,
action=action,
user_msg=user_msg)
else:
## unable to delete some or all fields from the page
## TODO : LOG ERROR
user_msg.append("""Error: Unable to delete some field elements from Page %s of Submission %s%s - """\
"""Page not deleted!""" % (pagenum, action, doctype))
(title, body) = _create_configure_doctype_submission_pages_form(doctype=doctype,
action=action,
user_msg=user_msg)
elif num_pages == 0:
## no pages to delete for this submission
user_msg.append("""This Submission has no Pages - Cannot delete a Page!""")
(title, body) = _create_configure_doctype_submission_pages_form(doctype=doctype,
action=action,
user_msg=user_msg)
else:
## error - couldn't determine the number of pages for submission
## TODO : LOG ERROR
user_msg.append("""Unable to determine number of Submission Pages for Submission "%s" - """\
"""Cannot delete page %s"""\
% (action, pagenum))
(title, body) = _create_configure_doctype_form(doctype=doctype, user_msg=user_msg)
return (title, body)
def _create_configure_doctype_submission_pages_form(doctype,
action,
deletepagenum="",
user_msg=""):
"""Perform the necessary steps in order to display a list of the pages belonging to a given
submission of a given document type.
@param doctype: (string) the unique ID of the document type.
@param action: (string) the unique name/ID of the action.
@param user_msg: (string, or list) any message(s) to be displayed to the user.
@return: a tuple containing 2 strings - the page title and the page body.
"""
title = """Details of the Pages of the "%s" Submission of the "%s" Document Type:""" % (action, doctype)
submission_dets = get_cd_md_numbersubmissionpages_doctype_action(doctype=doctype, action=action)
if len(submission_dets) > 0:
cd = str(submission_dets[0][0])
md = str(submission_dets[0][1])
num_pages = submission_dets[0][2]
else:
(cd, md, num_pages) = ("", "", "0")
body = websubmitadmin_templates.tmpl_configuredoctype_list_submissionpages(doctype=doctype,
action=action,
number_pages=num_pages,
cd=cd,
md=md,
deletepagenum=deletepagenum,
user_msg=user_msg)
return (title, body)
| gpl-2.0 | 1,279,078,170,720,660,200 | 55.692181 | 421 | 0.557892 | false |
darkleons/odoo | addons/mrp_repair/mrp_repair.py | 4 | 36646 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from datetime import datetime
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
class mrp_repair(osv.osv):
_name = 'mrp.repair'
_inherit = 'mail.thread'
_description = 'Repair Order'
def _amount_untaxed(self, cr, uid, ids, field_name, arg, context=None):
""" Calculates untaxed amount.
@param self: The object pointer
@param cr: The current row, from the database cursor,
@param uid: The current user ID for security checks
@param ids: List of selected IDs
@param field_name: Name of field.
@param arg: Argument
@param context: A standard dictionary for contextual values
@return: Dictionary of values.
"""
res = {}
cur_obj = self.pool.get('res.currency')
for repair in self.browse(cr, uid, ids, context=context):
res[repair.id] = 0.0
for line in repair.operations:
res[repair.id] += line.price_subtotal
for line in repair.fees_lines:
res[repair.id] += line.price_subtotal
cur = repair.pricelist_id.currency_id
res[repair.id] = cur_obj.round(cr, uid, cur, res[repair.id])
return res
def _amount_tax(self, cr, uid, ids, field_name, arg, context=None):
""" Calculates taxed amount.
@param field_name: Name of field.
@param arg: Argument
@return: Dictionary of values.
"""
res = {}
#return {}.fromkeys(ids, 0)
cur_obj = self.pool.get('res.currency')
tax_obj = self.pool.get('account.tax')
for repair in self.browse(cr, uid, ids, context=context):
val = 0.0
cur = repair.pricelist_id.currency_id
for line in repair.operations:
#manage prices with tax included use compute_all instead of compute
if line.to_invoice:
tax_calculate = tax_obj.compute_all(cr, uid, line.tax_id, line.price_unit, line.product_uom_qty, line.product_id, repair.partner_id)
for c in tax_calculate['taxes']:
val += c['amount']
for line in repair.fees_lines:
if line.to_invoice:
tax_calculate = tax_obj.compute_all(cr, uid, line.tax_id, line.price_unit, line.product_uom_qty, line.product_id, repair.partner_id)
for c in tax_calculate['taxes']:
val += c['amount']
res[repair.id] = cur_obj.round(cr, uid, cur, val)
return res
def _amount_total(self, cr, uid, ids, field_name, arg, context=None):
""" Calculates total amount.
@param field_name: Name of field.
@param arg: Argument
@return: Dictionary of values.
"""
res = {}
untax = self._amount_untaxed(cr, uid, ids, field_name, arg, context=context)
tax = self._amount_tax(cr, uid, ids, field_name, arg, context=context)
cur_obj = self.pool.get('res.currency')
for id in ids:
repair = self.browse(cr, uid, id, context=context)
cur = repair.pricelist_id.currency_id
res[id] = cur_obj.round(cr, uid, cur, untax.get(id, 0.0) + tax.get(id, 0.0))
return res
def _get_default_address(self, cr, uid, ids, field_name, arg, context=None):
res = {}
partner_obj = self.pool.get('res.partner')
for data in self.browse(cr, uid, ids, context=context):
adr_id = False
if data.partner_id:
adr_id = partner_obj.address_get(cr, uid, [data.partner_id.id], ['default'])['default']
res[data.id] = adr_id
return res
def _get_lines(self, cr, uid, ids, context=None):
return self.pool['mrp.repair'].search(cr, uid, [('operations', 'in', ids)], context=context)
def _get_fee_lines(self, cr, uid, ids, context=None):
return self.pool['mrp.repair'].search(cr, uid, [('fees_lines', 'in', ids)], context=context)
_columns = {
'name': fields.char('Repair Reference', required=True, states={'confirmed': [('readonly', True)]}, copy=False),
'product_id': fields.many2one('product.product', string='Product to Repair', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'product_qty': fields.float('Product Quantity', digits_compute=dp.get_precision('Product Unit of Measure'),
required=True, readonly=True, states={'draft': [('readonly', False)]}),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'partner_id': fields.many2one('res.partner', 'Partner', select=True, help='Choose partner for whom the order will be invoiced and delivered.', states={'confirmed': [('readonly', True)]}),
'address_id': fields.many2one('res.partner', 'Delivery Address', domain="[('parent_id','=',partner_id)]", states={'confirmed': [('readonly', True)]}),
'default_address_id': fields.function(_get_default_address, type="many2one", relation="res.partner"),
'state': fields.selection([
('draft', 'Quotation'),
('cancel', 'Cancelled'),
('confirmed', 'Confirmed'),
('under_repair', 'Under Repair'),
('ready', 'Ready to Repair'),
('2binvoiced', 'To be Invoiced'),
('invoice_except', 'Invoice Exception'),
('done', 'Repaired')
], 'Status', readonly=True, track_visibility='onchange', copy=False,
help=' * The \'Draft\' status is used when a user is encoding a new and unconfirmed repair order. \
\n* The \'Confirmed\' status is used when a user confirms the repair order. \
\n* The \'Ready to Repair\' status is used to start to repairing, user can start repairing only after repair order is confirmed. \
\n* The \'To be Invoiced\' status is used to generate the invoice before or after repairing done. \
\n* The \'Done\' status is set when repairing is completed.\
\n* The \'Cancelled\' status is used when user cancel repair order.'),
'location_id': fields.many2one('stock.location', 'Current Location', select=True, required=True, readonly=True, states={'draft': [('readonly', False)], 'confirmed': [('readonly', True)]}),
'location_dest_id': fields.many2one('stock.location', 'Delivery Location', readonly=True, required=True, states={'draft': [('readonly', False)], 'confirmed': [('readonly', True)]}),
'lot_id': fields.many2one('stock.production.lot', 'Repaired Lot', domain="[('product_id','=', product_id)]", help="Products repaired are all belonging to this lot"),
'guarantee_limit': fields.date('Warranty Expiration', help="The warranty expiration limit is computed as: last move date + warranty defined on selected product. If the current date is below the warranty expiration limit, each operation and fee you will add will be set as 'not to invoiced' by default. Note that you can change manually afterwards.", states={'confirmed': [('readonly', True)]}),
'operations': fields.one2many('mrp.repair.line', 'repair_id', 'Operation Lines', readonly=True, states={'draft': [('readonly', False)]}, copy=True),
'pricelist_id': fields.many2one('product.pricelist', 'Pricelist', help='Pricelist of the selected partner.'),
'partner_invoice_id': fields.many2one('res.partner', 'Invoicing Address'),
'invoice_method': fields.selection([
("none", "No Invoice"),
("b4repair", "Before Repair"),
("after_repair", "After Repair")
], "Invoice Method",
select=True, required=True, states={'draft': [('readonly', False)]}, readonly=True, help='Selecting \'Before Repair\' or \'After Repair\' will allow you to generate invoice before or after the repair is done respectively. \'No invoice\' means you don\'t want to generate invoice for this repair order.'),
'invoice_id': fields.many2one('account.invoice', 'Invoice', readonly=True, track_visibility="onchange", copy=False),
'move_id': fields.many2one('stock.move', 'Move', readonly=True, help="Move created by the repair order", track_visibility="onchange", copy=False),
'fees_lines': fields.one2many('mrp.repair.fee', 'repair_id', 'Fees', readonly=True, states={'draft': [('readonly', False)]}, copy=True),
'internal_notes': fields.text('Internal Notes'),
'quotation_notes': fields.text('Quotation Notes'),
'company_id': fields.many2one('res.company', 'Company'),
'invoiced': fields.boolean('Invoiced', readonly=True, copy=False),
'repaired': fields.boolean('Repaired', readonly=True, copy=False),
'amount_untaxed': fields.function(_amount_untaxed, string='Untaxed Amount',
store={
'mrp.repair': (lambda self, cr, uid, ids, c={}: ids, ['operations', 'fees_lines'], 10),
'mrp.repair.line': (_get_lines, ['price_unit', 'price_subtotal', 'product_id', 'tax_id', 'product_uom_qty', 'product_uom'], 10),
'mrp.repair.fee': (_get_fee_lines, ['price_unit', 'price_subtotal', 'product_id', 'tax_id', 'product_uom_qty', 'product_uom'], 10),
}),
'amount_tax': fields.function(_amount_tax, string='Taxes',
store={
'mrp.repair': (lambda self, cr, uid, ids, c={}: ids, ['operations', 'fees_lines'], 10),
'mrp.repair.line': (_get_lines, ['price_unit', 'price_subtotal', 'product_id', 'tax_id', 'product_uom_qty', 'product_uom'], 10),
'mrp.repair.fee': (_get_fee_lines, ['price_unit', 'price_subtotal', 'product_id', 'tax_id', 'product_uom_qty', 'product_uom'], 10),
}),
'amount_total': fields.function(_amount_total, string='Total',
store={
'mrp.repair': (lambda self, cr, uid, ids, c={}: ids, ['operations', 'fees_lines'], 10),
'mrp.repair.line': (_get_lines, ['price_unit', 'price_subtotal', 'product_id', 'tax_id', 'product_uom_qty', 'product_uom'], 10),
'mrp.repair.fee': (_get_fee_lines, ['price_unit', 'price_subtotal', 'product_id', 'tax_id', 'product_uom_qty', 'product_uom'], 10),
}),
}
def _default_stock_location(self, cr, uid, context=None):
try:
warehouse = self.pool.get('ir.model.data').get_object(cr, uid, 'stock', 'warehouse0')
return warehouse.lot_stock_id.id
except:
return False
_defaults = {
'state': lambda *a: 'draft',
'name': lambda obj, cr, uid, context: obj.pool.get('ir.sequence').get(cr, uid, 'mrp.repair'),
'invoice_method': lambda *a: 'none',
'company_id': lambda self, cr, uid, context: self.pool.get('res.company')._company_default_get(cr, uid, 'mrp.repair', context=context),
'pricelist_id': lambda self, cr, uid, context: self.pool.get('product.pricelist').search(cr, uid, [('type', '=', 'sale')])[0],
'product_qty': 1.0,
'location_id': _default_stock_location,
}
_sql_constraints = [
('name', 'unique (name)', 'The name of the Repair Order must be unique!'),
]
def onchange_product_id(self, cr, uid, ids, product_id=None):
""" On change of product sets some values.
@param product_id: Changed product
@return: Dictionary of values.
"""
product = False
if product_id:
product = self.pool.get("product.product").browse(cr, uid, product_id)
return {'value': {
'guarantee_limit': False,
'lot_id': False,
'product_uom': product and product.uom_id.id or False,
}
}
def onchange_product_uom(self, cr, uid, ids, product_id, product_uom, context=None):
res = {'value': {}}
if not product_uom or not product_id:
return res
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
uom = self.pool.get('product.uom').browse(cr, uid, product_uom, context=context)
if uom.category_id.id != product.uom_id.category_id.id:
res['warning'] = {'title': _('Warning'), 'message': _('The Product Unit of Measure you chose has a different category than in the product form.')}
res['value'].update({'product_uom': product.uom_id.id})
return res
def onchange_location_id(self, cr, uid, ids, location_id=None):
""" On change of location
"""
return {'value': {'location_dest_id': location_id}}
def button_dummy(self, cr, uid, ids, context=None):
return True
def onchange_partner_id(self, cr, uid, ids, part, address_id):
""" On change of partner sets the values of partner address,
partner invoice address and pricelist.
@param part: Changed id of partner.
@param address_id: Address id from current record.
@return: Dictionary of values.
"""
part_obj = self.pool.get('res.partner')
pricelist_obj = self.pool.get('product.pricelist')
if not part:
return {'value': {
'address_id': False,
'partner_invoice_id': False,
'pricelist_id': pricelist_obj.search(cr, uid, [('type', '=', 'sale')])[0]
}
}
addr = part_obj.address_get(cr, uid, [part], ['delivery', 'invoice', 'default'])
partner = part_obj.browse(cr, uid, part)
pricelist = partner.property_product_pricelist and partner.property_product_pricelist.id or False
return {'value': {
'address_id': addr['delivery'] or addr['default'],
'partner_invoice_id': addr['invoice'],
'pricelist_id': pricelist
}
}
def action_cancel_draft(self, cr, uid, ids, *args):
""" Cancels repair order when it is in 'Draft' state.
@param *arg: Arguments
@return: True
"""
if not len(ids):
return False
mrp_line_obj = self.pool.get('mrp.repair.line')
for repair in self.browse(cr, uid, ids):
mrp_line_obj.write(cr, uid, [l.id for l in repair.operations], {'state': 'draft'})
self.write(cr, uid, ids, {'state': 'draft'})
return self.create_workflow(cr, uid, ids)
def action_confirm(self, cr, uid, ids, *args):
""" Repair order state is set to 'To be invoiced' when invoice method
is 'Before repair' else state becomes 'Confirmed'.
@param *arg: Arguments
@return: True
"""
mrp_line_obj = self.pool.get('mrp.repair.line')
for o in self.browse(cr, uid, ids):
if (o.invoice_method == 'b4repair'):
self.write(cr, uid, [o.id], {'state': '2binvoiced'})
else:
self.write(cr, uid, [o.id], {'state': 'confirmed'})
for line in o.operations:
if line.product_id.track_production:
raise osv.except_osv(_('Warning!'), _("Serial number is required for operation line with product '%s'") % (line.product_id.name))
mrp_line_obj.write(cr, uid, [l.id for l in o.operations], {'state': 'confirmed'})
return True
def action_cancel(self, cr, uid, ids, context=None):
""" Cancels repair order.
@return: True
"""
mrp_line_obj = self.pool.get('mrp.repair.line')
for repair in self.browse(cr, uid, ids, context=context):
if not repair.invoiced:
mrp_line_obj.write(cr, uid, [l.id for l in repair.operations], {'state': 'cancel'}, context=context)
else:
raise osv.except_osv(_('Warning!'), _('Repair order is already invoiced.'))
return self.write(cr, uid, ids, {'state': 'cancel'})
def wkf_invoice_create(self, cr, uid, ids, *args):
self.action_invoice_create(cr, uid, ids)
return True
def action_invoice_create(self, cr, uid, ids, group=False, context=None):
""" Creates invoice(s) for repair order.
@param group: It is set to true when group invoice is to be generated.
@return: Invoice Ids.
"""
res = {}
invoices_group = {}
inv_line_obj = self.pool.get('account.invoice.line')
inv_obj = self.pool.get('account.invoice')
repair_line_obj = self.pool.get('mrp.repair.line')
repair_fee_obj = self.pool.get('mrp.repair.fee')
for repair in self.browse(cr, uid, ids, context=context):
res[repair.id] = False
if repair.state in ('draft', 'cancel') or repair.invoice_id:
continue
if not (repair.partner_id.id and repair.partner_invoice_id.id):
raise osv.except_osv(_('No partner!'), _('You have to select a Partner Invoice Address in the repair form!'))
comment = repair.quotation_notes
if (repair.invoice_method != 'none'):
if group and repair.partner_invoice_id.id in invoices_group:
inv_id = invoices_group[repair.partner_invoice_id.id]
invoice = inv_obj.browse(cr, uid, inv_id)
invoice_vals = {
'name': invoice.name + ', ' + repair.name,
'origin': invoice.origin + ', ' + repair.name,
'comment': (comment and (invoice.comment and invoice.comment + "\n" + comment or comment)) or (invoice.comment and invoice.comment or ''),
}
inv_obj.write(cr, uid, [inv_id], invoice_vals, context=context)
else:
if not repair.partner_id.property_account_receivable:
raise osv.except_osv(_('Error!'), _('No account defined for partner "%s".') % repair.partner_id.name)
account_id = repair.partner_id.property_account_receivable.id
inv = {
'name': repair.name,
'origin': repair.name,
'type': 'out_invoice',
'account_id': account_id,
'partner_id': repair.partner_invoice_id.id or repair.partner_id.id,
'currency_id': repair.pricelist_id.currency_id.id,
'comment': repair.quotation_notes,
'fiscal_position': repair.partner_id.property_account_position.id
}
inv_id = inv_obj.create(cr, uid, inv)
invoices_group[repair.partner_invoice_id.id] = inv_id
self.write(cr, uid, repair.id, {'invoiced': True, 'invoice_id': inv_id})
for operation in repair.operations:
if operation.to_invoice:
if group:
name = repair.name + '-' + operation.name
else:
name = operation.name
if operation.product_id.property_account_income:
account_id = operation.product_id.property_account_income.id
elif operation.product_id.categ_id.property_account_income_categ:
account_id = operation.product_id.categ_id.property_account_income_categ.id
else:
raise osv.except_osv(_('Error!'), _('No account defined for product "%s".') % operation.product_id.name)
invoice_line_id = inv_line_obj.create(cr, uid, {
'invoice_id': inv_id,
'name': name,
'origin': repair.name,
'account_id': account_id,
'quantity': operation.product_uom_qty,
'invoice_line_tax_id': [(6, 0, [x.id for x in operation.tax_id])],
'uos_id': operation.product_uom.id,
'price_unit': operation.price_unit,
'price_subtotal': operation.product_uom_qty * operation.price_unit,
'product_id': operation.product_id and operation.product_id.id or False
})
repair_line_obj.write(cr, uid, [operation.id], {'invoiced': True, 'invoice_line_id': invoice_line_id})
for fee in repair.fees_lines:
if fee.to_invoice:
if group:
name = repair.name + '-' + fee.name
else:
name = fee.name
if not fee.product_id:
raise osv.except_osv(_('Warning!'), _('No product defined on Fees!'))
if fee.product_id.property_account_income:
account_id = fee.product_id.property_account_income.id
elif fee.product_id.categ_id.property_account_income_categ:
account_id = fee.product_id.categ_id.property_account_income_categ.id
else:
raise osv.except_osv(_('Error!'), _('No account defined for product "%s".') % fee.product_id.name)
invoice_fee_id = inv_line_obj.create(cr, uid, {
'invoice_id': inv_id,
'name': name,
'origin': repair.name,
'account_id': account_id,
'quantity': fee.product_uom_qty,
'invoice_line_tax_id': [(6, 0, [x.id for x in fee.tax_id])],
'uos_id': fee.product_uom.id,
'product_id': fee.product_id and fee.product_id.id or False,
'price_unit': fee.price_unit,
'price_subtotal': fee.product_uom_qty * fee.price_unit
})
repair_fee_obj.write(cr, uid, [fee.id], {'invoiced': True, 'invoice_line_id': invoice_fee_id})
res[repair.id] = inv_id
return res
def action_repair_ready(self, cr, uid, ids, context=None):
""" Writes repair order state to 'Ready'
@return: True
"""
for repair in self.browse(cr, uid, ids, context=context):
self.pool.get('mrp.repair.line').write(cr, uid, [l.id for
l in repair.operations], {'state': 'confirmed'}, context=context)
self.write(cr, uid, [repair.id], {'state': 'ready'})
return True
def action_repair_start(self, cr, uid, ids, context=None):
""" Writes repair order state to 'Under Repair'
@return: True
"""
repair_line = self.pool.get('mrp.repair.line')
for repair in self.browse(cr, uid, ids, context=context):
repair_line.write(cr, uid, [l.id for
l in repair.operations], {'state': 'confirmed'}, context=context)
repair.write({'state': 'under_repair'})
return True
def action_repair_end(self, cr, uid, ids, context=None):
""" Writes repair order state to 'To be invoiced' if invoice method is
After repair else state is set to 'Ready'.
@return: True
"""
for order in self.browse(cr, uid, ids, context=context):
val = {}
val['repaired'] = True
if (not order.invoiced and order.invoice_method == 'after_repair'):
val['state'] = '2binvoiced'
elif (not order.invoiced and order.invoice_method == 'b4repair'):
val['state'] = 'ready'
else:
pass
self.write(cr, uid, [order.id], val)
return True
def wkf_repair_done(self, cr, uid, ids, *args):
self.action_repair_done(cr, uid, ids)
return True
def action_repair_done(self, cr, uid, ids, context=None):
""" Creates stock move for operation and stock move for final product of repair order.
@return: Move ids of final products
"""
res = {}
move_obj = self.pool.get('stock.move')
repair_line_obj = self.pool.get('mrp.repair.line')
for repair in self.browse(cr, uid, ids, context=context):
move_ids = []
for move in repair.operations:
move_id = move_obj.create(cr, uid, {
'name': move.name,
'product_id': move.product_id.id,
'restrict_lot_id': move.lot_id.id,
'product_uom_qty': move.product_uom_qty,
'product_uom': move.product_uom.id,
'partner_id': repair.address_id and repair.address_id.id or False,
'location_id': move.location_id.id,
'location_dest_id': move.location_dest_id.id,
'state': 'assigned',
})
move_ids.append(move_id)
repair_line_obj.write(cr, uid, [move.id], {'move_id': move_id, 'state': 'done'}, context=context)
move_id = move_obj.create(cr, uid, {
'name': repair.name,
'product_id': repair.product_id.id,
'product_uom': repair.product_uom.id or repair.product_id.uom_id.id,
'product_uom_qty': repair.product_qty,
'partner_id': repair.address_id and repair.address_id.id or False,
'location_id': repair.location_id.id,
'location_dest_id': repair.location_dest_id.id,
'restrict_lot_id': repair.lot_id.id,
})
move_ids.append(move_id)
move_obj.action_done(cr, uid, move_ids, context=context)
self.write(cr, uid, [repair.id], {'state': 'done', 'move_id': move_id}, context=context)
res[repair.id] = move_id
return res
class ProductChangeMixin(object):
def product_id_change(self, cr, uid, ids, pricelist, product, uom=False,
product_uom_qty=0, partner_id=False, guarantee_limit=False):
""" On change of product it sets product quantity, tax account, name,
uom of product, unit price and price subtotal.
@param pricelist: Pricelist of current record.
@param product: Changed id of product.
@param uom: UoM of current record.
@param product_uom_qty: Quantity of current record.
@param partner_id: Partner of current record.
@param guarantee_limit: Guarantee limit of current record.
@return: Dictionary of values and warning message.
"""
result = {}
warning = {}
if not product_uom_qty:
product_uom_qty = 1
result['product_uom_qty'] = product_uom_qty
if product:
product_obj = self.pool.get('product.product').browse(cr, uid, product)
if partner_id:
partner = self.pool.get('res.partner').browse(cr, uid, partner_id)
result['tax_id'] = self.pool.get('account.fiscal.position').map_tax(cr, uid, partner.property_account_position, product_obj.taxes_id)
result['name'] = product_obj.display_name
result['product_uom'] = product_obj.uom_id and product_obj.uom_id.id or False
if not pricelist:
warning = {
'title': _('No Pricelist!'),
'message':
_('You have to select a pricelist in the Repair form !\n'
'Please set one before choosing a product.')
}
else:
price = self.pool.get('product.pricelist').price_get(cr, uid, [pricelist],
product, product_uom_qty, partner_id, {'uom': uom})[pricelist]
if price is False:
warning = {
'title': _('No valid pricelist line found !'),
'message':
_("Couldn't find a pricelist line matching this product and quantity.\n"
"You have to change either the product, the quantity or the pricelist.")
}
else:
result.update({'price_unit': price, 'price_subtotal': price * product_uom_qty})
return {'value': result, 'warning': warning}
class mrp_repair_line(osv.osv, ProductChangeMixin):
_name = 'mrp.repair.line'
_description = 'Repair Line'
def _amount_line(self, cr, uid, ids, field_name, arg, context=None):
""" Calculates amount.
@param field_name: Name of field.
@param arg: Argument
@return: Dictionary of values.
"""
res = {}
cur_obj = self.pool.get('res.currency')
for line in self.browse(cr, uid, ids, context=context):
res[line.id] = line.to_invoice and line.price_unit * line.product_uom_qty or 0
cur = line.repair_id.pricelist_id.currency_id
res[line.id] = cur_obj.round(cr, uid, cur, res[line.id])
return res
_columns = {
'name': fields.char('Description', required=True),
'repair_id': fields.many2one('mrp.repair', 'Repair Order Reference', ondelete='cascade', select=True),
'type': fields.selection([('add', 'Add'), ('remove', 'Remove')], 'Type', required=True),
'to_invoice': fields.boolean('To Invoice'),
'product_id': fields.many2one('product.product', 'Product', required=True),
'invoiced': fields.boolean('Invoiced', readonly=True, copy=False),
'price_unit': fields.float('Unit Price', required=True, digits_compute=dp.get_precision('Product Price')),
'price_subtotal': fields.function(_amount_line, string='Subtotal', digits_compute=dp.get_precision('Account')),
'tax_id': fields.many2many('account.tax', 'repair_operation_line_tax', 'repair_operation_line_id', 'tax_id', 'Taxes'),
'product_uom_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True),
'invoice_line_id': fields.many2one('account.invoice.line', 'Invoice Line', readonly=True, copy=False),
'location_id': fields.many2one('stock.location', 'Source Location', required=True, select=True),
'location_dest_id': fields.many2one('stock.location', 'Dest. Location', required=True, select=True),
'move_id': fields.many2one('stock.move', 'Inventory Move', readonly=True, copy=False),
'lot_id': fields.many2one('stock.production.lot', 'Lot'),
'state': fields.selection([
('draft', 'Draft'),
('confirmed', 'Confirmed'),
('done', 'Done'),
('cancel', 'Cancelled')], 'Status', required=True, readonly=True, copy=False,
help=' * The \'Draft\' status is set automatically as draft when repair order in draft status. \
\n* The \'Confirmed\' status is set automatically as confirm when repair order in confirm status. \
\n* The \'Done\' status is set automatically when repair order is completed.\
\n* The \'Cancelled\' status is set automatically when user cancel repair order.'),
}
_defaults = {
'state': lambda *a: 'draft',
'product_uom_qty': lambda *a: 1,
}
def onchange_operation_type(self, cr, uid, ids, type, guarantee_limit, company_id=False, context=None):
""" On change of operation type it sets source location, destination location
and to invoice field.
@param product: Changed operation type.
@param guarantee_limit: Guarantee limit of current record.
@return: Dictionary of values.
"""
if not type:
return {'value': {
'location_id': False,
'location_dest_id': False
}}
location_obj = self.pool.get('stock.location')
warehouse_obj = self.pool.get('stock.warehouse')
location_id = location_obj.search(cr, uid, [('usage', '=', 'production')], context=context)
location_id = location_id and location_id[0] or False
if type == 'add':
# TOCHECK: Find stock location for user's company warehouse or
# repair order's company's warehouse (company_id field is added in fix of lp:831583)
args = company_id and [('company_id', '=', company_id)] or []
warehouse_ids = warehouse_obj.search(cr, uid, args, context=context)
stock_id = False
if warehouse_ids:
stock_id = warehouse_obj.browse(cr, uid, warehouse_ids[0], context=context).lot_stock_id.id
to_invoice = (guarantee_limit and datetime.strptime(guarantee_limit, '%Y-%m-%d') < datetime.now())
return {'value': {
'to_invoice': to_invoice,
'location_id': stock_id,
'location_dest_id': location_id
}}
scrap_location_ids = location_obj.search(cr, uid, [('scrap_location', '=', True)], context=context)
return {'value': {
'to_invoice': False,
'location_id': location_id,
'location_dest_id': scrap_location_ids and scrap_location_ids[0] or False,
}}
class mrp_repair_fee(osv.osv, ProductChangeMixin):
_name = 'mrp.repair.fee'
_description = 'Repair Fees Line'
def _amount_line(self, cr, uid, ids, field_name, arg, context=None):
""" Calculates amount.
@param field_name: Name of field.
@param arg: Argument
@return: Dictionary of values.
"""
res = {}
cur_obj = self.pool.get('res.currency')
for line in self.browse(cr, uid, ids, context=context):
res[line.id] = line.to_invoice and line.price_unit * line.product_uom_qty or 0
cur = line.repair_id.pricelist_id.currency_id
res[line.id] = cur_obj.round(cr, uid, cur, res[line.id])
return res
_columns = {
'repair_id': fields.many2one('mrp.repair', 'Repair Order Reference', required=True, ondelete='cascade', select=True),
'name': fields.char('Description', select=True, required=True),
'product_id': fields.many2one('product.product', 'Product'),
'product_uom_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
'price_unit': fields.float('Unit Price', required=True),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True),
'price_subtotal': fields.function(_amount_line, string='Subtotal', digits_compute=dp.get_precision('Account')),
'tax_id': fields.many2many('account.tax', 'repair_fee_line_tax', 'repair_fee_line_id', 'tax_id', 'Taxes'),
'invoice_line_id': fields.many2one('account.invoice.line', 'Invoice Line', readonly=True, copy=False),
'to_invoice': fields.boolean('To Invoice'),
'invoiced': fields.boolean('Invoiced', readonly=True, copy=False),
}
_defaults = {
'to_invoice': lambda *a: True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -6,575,557,988,210,559,000 | 53.29037 | 402 | 0.563581 | false |
repotvsupertuga/repo | plugin.video.jami/resources/lib/resolvers/fastvideo.py | 23 | 1471 | # -*- coding: utf-8 -*-
'''
Specto Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re
from resources.lib.libraries import client
from resources.lib.libraries import jsunpack
def resolve(url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://rapidvideo.ws/embed-%s.html' % url
result = client.request(url, mobile=True)
result = re.compile('(eval.*?\)\)\))').findall(result)[-1]
result = jsunpack.unpack(result)
url = client.parseDOM(result, 'embed', ret='src')
url += re.compile("file *: *[\'|\"](.+?)[\'|\"]").findall(result)
url = [i for i in url if not i.endswith('.srt')]
url = 'http://' + url[0].split('://', 1)[-1]
return url
except:
return
| gpl-2.0 | 4,690,116,600,312,048,000 | 30.978261 | 73 | 0.628144 | false |
MRigal/django | django/contrib/gis/admin/options.py | 379 | 5649 | from django.contrib.admin import ModelAdmin
from django.contrib.gis.admin.widgets import OpenLayersWidget
from django.contrib.gis.db import models
from django.contrib.gis.gdal import HAS_GDAL, OGRGeomType
from django.core.exceptions import ImproperlyConfigured
spherical_mercator_srid = 3857
class GeoModelAdmin(ModelAdmin):
"""
The administration options class for Geographic models. Map settings
may be overloaded from their defaults to create custom maps.
"""
# The default map settings that may be overloaded -- still subject
# to API changes.
default_lon = 0
default_lat = 0
default_zoom = 4
display_wkt = False
display_srid = False
extra_js = []
num_zoom = 18
max_zoom = False
min_zoom = False
units = False
max_resolution = False
max_extent = False
modifiable = True
mouse_position = True
scale_text = True
layerswitcher = True
scrollable = True
map_width = 600
map_height = 400
map_srid = 4326
map_template = 'gis/admin/openlayers.html'
openlayers_url = 'http://openlayers.org/api/2.13/OpenLayers.js'
point_zoom = num_zoom - 6
wms_url = 'http://vmap0.tiles.osgeo.org/wms/vmap0'
wms_layer = 'basic'
wms_name = 'OpenLayers WMS'
wms_options = {'format': 'image/jpeg'}
debug = False
widget = OpenLayersWidget
@property
def media(self):
"Injects OpenLayers JavaScript into the admin."
media = super(GeoModelAdmin, self).media
media.add_js([self.openlayers_url])
media.add_js(self.extra_js)
return media
def formfield_for_dbfield(self, db_field, **kwargs):
"""
Overloaded from ModelAdmin so that an OpenLayersWidget is used
for viewing/editing 2D GeometryFields (OpenLayers 2 does not support
3D editing).
"""
if isinstance(db_field, models.GeometryField) and db_field.dim < 3:
kwargs.pop('request', None)
# Setting the widget with the newly defined widget.
kwargs['widget'] = self.get_map_widget(db_field)
return db_field.formfield(**kwargs)
else:
return super(GeoModelAdmin, self).formfield_for_dbfield(db_field, **kwargs)
def get_map_widget(self, db_field):
"""
Returns a subclass of the OpenLayersWidget (or whatever was specified
in the `widget` attribute) using the settings from the attributes set
in this class.
"""
is_collection = db_field.geom_type in ('MULTIPOINT', 'MULTILINESTRING', 'MULTIPOLYGON', 'GEOMETRYCOLLECTION')
if is_collection:
if db_field.geom_type == 'GEOMETRYCOLLECTION':
collection_type = 'Any'
else:
collection_type = OGRGeomType(db_field.geom_type.replace('MULTI', ''))
else:
collection_type = 'None'
class OLMap(self.widget):
template = self.map_template
geom_type = db_field.geom_type
wms_options = ''
if self.wms_options:
wms_options = ["%s: '%s'" % pair for pair in self.wms_options.items()]
wms_options = ', %s' % ', '.join(wms_options)
params = {'default_lon': self.default_lon,
'default_lat': self.default_lat,
'default_zoom': self.default_zoom,
'display_wkt': self.debug or self.display_wkt,
'geom_type': OGRGeomType(db_field.geom_type),
'field_name': db_field.name,
'is_collection': is_collection,
'scrollable': self.scrollable,
'layerswitcher': self.layerswitcher,
'collection_type': collection_type,
'is_generic': db_field.geom_type == 'GEOMETRY',
'is_linestring': db_field.geom_type in ('LINESTRING', 'MULTILINESTRING'),
'is_polygon': db_field.geom_type in ('POLYGON', 'MULTIPOLYGON'),
'is_point': db_field.geom_type in ('POINT', 'MULTIPOINT'),
'num_zoom': self.num_zoom,
'max_zoom': self.max_zoom,
'min_zoom': self.min_zoom,
'units': self.units, # likely should get from object
'max_resolution': self.max_resolution,
'max_extent': self.max_extent,
'modifiable': self.modifiable,
'mouse_position': self.mouse_position,
'scale_text': self.scale_text,
'map_width': self.map_width,
'map_height': self.map_height,
'point_zoom': self.point_zoom,
'srid': self.map_srid,
'display_srid': self.display_srid,
'wms_url': self.wms_url,
'wms_layer': self.wms_layer,
'wms_name': self.wms_name,
'wms_options': wms_options,
'debug': self.debug,
}
return OLMap
class OSMGeoAdmin(GeoModelAdmin):
map_template = 'gis/admin/osm.html'
num_zoom = 20
map_srid = spherical_mercator_srid
max_extent = '-20037508,-20037508,20037508,20037508'
max_resolution = '156543.0339'
point_zoom = num_zoom - 6
units = 'm'
def __init__(self, *args):
if not HAS_GDAL:
raise ImproperlyConfigured("OSMGeoAdmin is not usable without GDAL libs installed")
super(OSMGeoAdmin, self).__init__(*args)
| bsd-3-clause | 3,868,140,902,743,238,000 | 38.78169 | 117 | 0.565764 | false |
thaihungle/deepexp | kbtext-mann/get_stats.py | 3 | 2790 | import json
import util
from collections import defaultdict
def get_fb_stats(freebase_data_file):
with open(freebase_data_file) as fb:
fact_counter = 0
relation_set = set()
entity_set = set()
for line in fb:
line = line.strip()
line = line[1:-1]
e1, r1, r2, e2 = [a.strip('"') for a in [x.strip() for x in line.split(',')]]
r = r1 + '_' + r2
fact_counter += 1
relation_set.add(r)
entity_set.add(e1)
entity_set.add(e2)
print("Total num of facts {}".format(fact_counter))
print("Num unique entities {}".format(len(entity_set)))
print("Num unique relations {}".format(len(relation_set)))
def get_questions_stats(train_data_file, dev_data_file):
print('1. Getting the number of blanks')
blank_str = '_blank_'
num_blanks_map = defaultdict(int)
word_freq_train = defaultdict(int)
with open(train_data_file) as train_file:
for counter, line in enumerate(util.verboserate(train_file)):
line = line.strip()
q_json = json.loads(line)
q = q_json['sentence']
count = q.count(blank_str)
num_blanks_map[count] += 1
words = q.split(' ')
for word in words:
word = word.strip()
word_freq_train[word] += 1
a_list = q_json['answerSubset']
for a in a_list:
word_freq_train[a] = word_freq_train[word] + 1
print(num_blanks_map)
print '2. Number of word types in the train set {}'.format(len(word_freq_train))
print '3. Checking overlap with the dev answers'
dev_answers_present = set()
dev_answers_oov = set()
dev_answers = set()
with open(dev_data_file) as dev_file:
for line in dev_file:
line = line.strip()
dev_json = json.loads(line)
a_list = dev_json['answerSubset']
for a in a_list:
if a in word_freq_train:
dev_answers_present.add(a)
else:
dev_answers_oov.add(a)
dev_answers.add(a)
print 'Number of unique dev answer strings {}'.format(len(dev_answers))
print 'Number of oov answer strings in dev set {}'.format(len(dev_answers_oov))
print 'Number of dev answer strings which have atleast 1 occurrences in train set {}'.format(
len(dev_answers_present))
freebase_data_file = "/home/rajarshi/research/graph-parser/data/spades/freebase.spades.txt"
train_data_file = "/home/rajarshi/research/graph-parser/data/spades/train.json"
dev_data_file = "/home/rajarshi/research/graph-parser/data/spades/dev.json"
# get_fb_stats()
get_questions_stats(train_data_file, dev_data_file)
| mit | -6,408,277,778,152,423,000 | 34.316456 | 97 | 0.583154 | false |
sunzhongwei/pelican | pelican/readers.py | 4 | 21032 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import logging
import os
import re
import docutils
import docutils.core
import docutils.io
from docutils.writers.html4css1 import HTMLTranslator
import six
# import the directives to have pygments support
from pelican import rstdirectives # NOQA
try:
from markdown import Markdown
except ImportError:
Markdown = False # NOQA
try:
from html import escape
except ImportError:
from cgi import escape
from six.moves.html_parser import HTMLParser
from pelican import signals
from pelican.contents import Page, Category, Tag, Author
from pelican.utils import get_date, pelican_open, FileStampDataCacher, SafeDatetime
METADATA_PROCESSORS = {
'tags': lambda x, y: [Tag(tag, y) for tag in x.split(',')],
'date': lambda x, y: get_date(x),
'modified': lambda x, y: get_date(x),
'status': lambda x, y: x.strip(),
'category': Category,
'author': Author,
'authors': lambda x, y: [Author(author.strip(), y) for author in x.split(',')],
}
logger = logging.getLogger(__name__)
class BaseReader(object):
"""Base class to read files.
This class is used to process static files, and it can be inherited for
other types of file. A Reader class must have the following attributes:
- enabled: (boolean) tell if the Reader class is enabled. It
generally depends on the import of some dependency.
- file_extensions: a list of file extensions that the Reader will process.
- extensions: a list of extensions to use in the reader (typical use is
Markdown).
"""
enabled = True
file_extensions = ['static']
extensions = None
def __init__(self, settings):
self.settings = settings
def process_metadata(self, name, value):
if name in METADATA_PROCESSORS:
return METADATA_PROCESSORS[name](value, self.settings)
return value
def read(self, source_path):
"No-op parser"
content = None
metadata = {}
return content, metadata
class _FieldBodyTranslator(HTMLTranslator):
def __init__(self, document):
HTMLTranslator.__init__(self, document)
self.compact_p = None
def astext(self):
return ''.join(self.body)
def visit_field_body(self, node):
pass
def depart_field_body(self, node):
pass
def render_node_to_html(document, node):
visitor = _FieldBodyTranslator(document)
node.walkabout(visitor)
return visitor.astext()
class PelicanHTMLTranslator(HTMLTranslator):
def visit_abbreviation(self, node):
attrs = {}
if node.hasattr('explanation'):
attrs['title'] = node['explanation']
self.body.append(self.starttag(node, 'abbr', '', **attrs))
def depart_abbreviation(self, node):
self.body.append('</abbr>')
def visit_image(self, node):
# set an empty alt if alt is not specified
# avoids that alt is taken from src
node['alt'] = node.get('alt', '')
return HTMLTranslator.visit_image(self, node)
class RstReader(BaseReader):
"""Reader for reStructuredText files"""
enabled = bool(docutils)
file_extensions = ['rst']
class FileInput(docutils.io.FileInput):
"""Patch docutils.io.FileInput to remove "U" mode in py3.
Universal newlines is enabled by default and "U" mode is deprecated
in py3.
"""
def __init__(self, *args, **kwargs):
if six.PY3:
kwargs['mode'] = kwargs.get('mode', 'r').replace('U', '')
docutils.io.FileInput.__init__(self, *args, **kwargs)
def __init__(self, *args, **kwargs):
super(RstReader, self).__init__(*args, **kwargs)
def _parse_metadata(self, document):
"""Return the dict containing document metadata"""
output = {}
for docinfo in document.traverse(docutils.nodes.docinfo):
for element in docinfo.children:
if element.tagname == 'field': # custom fields (e.g. summary)
name_elem, body_elem = element.children
name = name_elem.astext()
if name == 'summary':
value = render_node_to_html(document, body_elem)
else:
value = body_elem.astext()
elif element.tagname == 'authors': # author list
name = element.tagname
value = [element.astext() for element in element.children]
value = ','.join(value) # METADATA_PROCESSORS expects a string
else: # standard fields (e.g. address)
name = element.tagname
value = element.astext()
name = name.lower()
output[name] = self.process_metadata(name, value)
return output
def _get_publisher(self, source_path):
extra_params = {'initial_header_level': '2',
'syntax_highlight': 'short',
'input_encoding': 'utf-8',
'exit_status_level': 2,
'embed_stylesheet': False}
user_params = self.settings.get('DOCUTILS_SETTINGS')
if user_params:
extra_params.update(user_params)
pub = docutils.core.Publisher(
source_class=self.FileInput,
destination_class=docutils.io.StringOutput)
pub.set_components('standalone', 'restructuredtext', 'html')
pub.writer.translator_class = PelicanHTMLTranslator
pub.process_programmatic_settings(None, extra_params, None)
pub.set_source(source_path=source_path)
pub.publish(enable_exit_status=True)
return pub
def read(self, source_path):
"""Parses restructured text"""
pub = self._get_publisher(source_path)
parts = pub.writer.parts
content = parts.get('body')
metadata = self._parse_metadata(pub.document)
metadata.setdefault('title', parts.get('title'))
return content, metadata
class MarkdownReader(BaseReader):
"""Reader for Markdown files"""
enabled = bool(Markdown)
file_extensions = ['md', 'markdown', 'mkd', 'mdown']
def __init__(self, *args, **kwargs):
super(MarkdownReader, self).__init__(*args, **kwargs)
self.extensions = list(self.settings['MD_EXTENSIONS'])
if 'meta' not in self.extensions:
self.extensions.append('meta')
self._source_path = None
def _parse_metadata(self, meta):
"""Return the dict containing document metadata"""
output = {}
for name, value in meta.items():
name = name.lower()
if name == "summary":
# handle summary metadata as markdown
# summary metadata is special case and join all list values
summary_values = "\n".join(value)
# reset the markdown instance to clear any state
self._md.reset()
summary = self._md.convert(summary_values)
output[name] = self.process_metadata(name, summary)
elif name in METADATA_PROCESSORS:
if len(value) > 1:
logger.warning('Duplicate definition of `%s` '
'for %s. Using first one.', name, self._source_path)
output[name] = self.process_metadata(name, value[0])
elif len(value) > 1:
# handle list metadata as list of string
output[name] = self.process_metadata(name, value)
else:
# otherwise, handle metadata as single string
output[name] = self.process_metadata(name, value[0])
return output
def read(self, source_path):
"""Parse content and metadata of markdown files"""
self._source_path = source_path
self._md = Markdown(extensions=self.extensions)
with pelican_open(source_path) as text:
content = self._md.convert(text)
metadata = self._parse_metadata(self._md.Meta)
return content, metadata
class HTMLReader(BaseReader):
"""Parses HTML files as input, looking for meta, title, and body tags"""
file_extensions = ['htm', 'html']
enabled = True
class _HTMLParser(HTMLParser):
def __init__(self, settings, filename):
try:
# Python 3.4+
HTMLParser.__init__(self, convert_charrefs=False)
except TypeError:
HTMLParser.__init__(self)
self.body = ''
self.metadata = {}
self.settings = settings
self._data_buffer = ''
self._filename = filename
self._in_top_level = True
self._in_head = False
self._in_title = False
self._in_body = False
self._in_tags = False
def handle_starttag(self, tag, attrs):
if tag == 'head' and self._in_top_level:
self._in_top_level = False
self._in_head = True
elif tag == 'title' and self._in_head:
self._in_title = True
self._data_buffer = ''
elif tag == 'body' and self._in_top_level:
self._in_top_level = False
self._in_body = True
self._data_buffer = ''
elif tag == 'meta' and self._in_head:
self._handle_meta_tag(attrs)
elif self._in_body:
self._data_buffer += self.build_tag(tag, attrs, False)
def handle_endtag(self, tag):
if tag == 'head':
if self._in_head:
self._in_head = False
self._in_top_level = True
elif tag == 'title':
self._in_title = False
self.metadata['title'] = self._data_buffer
elif tag == 'body':
self.body = self._data_buffer
self._in_body = False
self._in_top_level = True
elif self._in_body:
self._data_buffer += '</{}>'.format(escape(tag))
def handle_startendtag(self, tag, attrs):
if tag == 'meta' and self._in_head:
self._handle_meta_tag(attrs)
if self._in_body:
self._data_buffer += self.build_tag(tag, attrs, True)
def handle_comment(self, data):
self._data_buffer += '<!--{}-->'.format(data)
def handle_data(self, data):
self._data_buffer += data
def handle_entityref(self, data):
self._data_buffer += '&{};'.format(data)
def handle_charref(self, data):
self._data_buffer += '&#{};'.format(data)
def build_tag(self, tag, attrs, close_tag):
result = '<{}'.format(escape(tag))
for k, v in attrs:
result += ' ' + escape(k)
if v is not None:
result += '="{}"'.format(escape(v))
if close_tag:
return result + ' />'
return result + '>'
def _handle_meta_tag(self, attrs):
name = self._attr_value(attrs, 'name')
if name is None:
attr_serialized = ', '.join(['{}="{}"'.format(k, v) for k, v in attrs])
logger.warning("Meta tag in file %s does not have a 'name' "
"attribute, skipping. Attributes: %s",
self._filename, attr_serialized)
return
name = name.lower()
contents = self._attr_value(attrs, 'content', '')
if not contents:
contents = self._attr_value(attrs, 'contents', '')
if contents:
logger.warning(
"Meta tag attribute 'contents' used in file %s, should"
" be changed to 'content'",
self._filename,
extra={'limit_msg': ("Other files have meta tag "
"attribute 'contents' that should "
"be changed to 'content'")})
if name == 'keywords':
name = 'tags'
self.metadata[name] = contents
@classmethod
def _attr_value(cls, attrs, name, default=None):
return next((x[1] for x in attrs if x[0] == name), default)
def read(self, filename):
"""Parse content and metadata of HTML files"""
with pelican_open(filename) as content:
parser = self._HTMLParser(self.settings, filename)
parser.feed(content)
parser.close()
metadata = {}
for k in parser.metadata:
metadata[k] = self.process_metadata(k, parser.metadata[k])
return parser.body, metadata
class Readers(FileStampDataCacher):
"""Interface for all readers.
This class contains a mapping of file extensions / Reader classes, to know
which Reader class must be used to read a file (based on its extension).
This is customizable both with the 'READERS' setting, and with the
'readers_init' signall for plugins.
"""
def __init__(self, settings=None, cache_name=''):
self.settings = settings or {}
self.readers = {}
self.reader_classes = {}
for cls in [BaseReader] + BaseReader.__subclasses__():
if not cls.enabled:
logger.debug('Missing dependencies for %s',
', '.join(cls.file_extensions))
continue
for ext in cls.file_extensions:
self.reader_classes[ext] = cls
if self.settings['READERS']:
self.reader_classes.update(self.settings['READERS'])
signals.readers_init.send(self)
for fmt, reader_class in self.reader_classes.items():
if not reader_class:
continue
self.readers[fmt] = reader_class(self.settings)
# set up caching
cache_this_level = (cache_name != '' and
self.settings['CONTENT_CACHING_LAYER'] == 'reader')
caching_policy = cache_this_level and self.settings['CACHE_CONTENT']
load_policy = cache_this_level and self.settings['LOAD_CONTENT_CACHE']
super(Readers, self).__init__(settings, cache_name,
caching_policy, load_policy,
)
@property
def extensions(self):
return self.readers.keys()
def read_file(self, base_path, path, content_class=Page, fmt=None,
context=None, preread_signal=None, preread_sender=None,
context_signal=None, context_sender=None):
"""Return a content object parsed with the given format."""
path = os.path.abspath(os.path.join(base_path, path))
source_path = os.path.relpath(path, base_path)
logger.debug('Read file %s -> %s',
source_path, content_class.__name__)
if not fmt:
_, ext = os.path.splitext(os.path.basename(path))
fmt = ext[1:]
if fmt not in self.readers:
raise TypeError(
'Pelican does not know how to parse %s', path)
if preread_signal:
logger.debug('Signal %s.send(%s)',
preread_signal.name, preread_sender)
preread_signal.send(preread_sender)
reader = self.readers[fmt]
metadata = default_metadata(
settings=self.settings, process=reader.process_metadata)
metadata.update(path_metadata(
full_path=path, source_path=source_path,
settings=self.settings))
metadata.update(parse_path_metadata(
source_path=source_path, settings=self.settings,
process=reader.process_metadata))
reader_name = reader.__class__.__name__
metadata['reader'] = reader_name.replace('Reader', '').lower()
content, reader_metadata = self.get_cached_data(path, (None, None))
if content is None:
content, reader_metadata = reader.read(path)
self.cache_data(path, (content, reader_metadata))
metadata.update(reader_metadata)
if content:
# find images with empty alt
find_empty_alt(content, path)
# eventually filter the content with typogrify if asked so
if self.settings['TYPOGRIFY']:
from typogrify.filters import typogrify
def typogrify_wrapper(text):
"""Ensures ignore_tags feature is backward compatible"""
try:
return typogrify(text, self.settings['TYPOGRIFY_IGNORE_TAGS'])
except TypeError:
return typogrify(text)
if content:
content = typogrify_wrapper(content)
metadata['title'] = typogrify_wrapper(metadata['title'])
if 'summary' in metadata:
metadata['summary'] = typogrify_wrapper(metadata['summary'])
if context_signal:
logger.debug('Signal %s.send(%s, <metadata>)',
context_signal.name, context_sender)
context_signal.send(context_sender, metadata=metadata)
return content_class(content=content, metadata=metadata,
settings=self.settings, source_path=path,
context=context)
def find_empty_alt(content, path):
"""Find images with empty alt
Create warnings for all images with empty alt (up to a certain number),
as they are really likely to be accessibility flaws.
"""
imgs = re.compile(r"""
(?:
# src before alt
<img
[^\>]*
src=(['"])(.*)\1
[^\>]*
alt=(['"])\3
)|(?:
# alt before src
<img
[^\>]*
alt=(['"])\4
[^\>]*
src=(['"])(.*)\5
)
""", re.X)
for match in re.findall(imgs, content):
logger.warning(
'Empty alt attribute for image %s in %s',
os.path.basename(match[1] + match[5]), path,
extra={'limit_msg': 'Other images have empty alt attributes'})
def default_metadata(settings=None, process=None):
metadata = {}
if settings:
if 'DEFAULT_CATEGORY' in settings:
value = settings['DEFAULT_CATEGORY']
if process:
value = process('category', value)
metadata['category'] = value
if settings.get('DEFAULT_DATE', None) and settings['DEFAULT_DATE'] != 'fs':
metadata['date'] = SafeDatetime(*settings['DEFAULT_DATE'])
return metadata
def path_metadata(full_path, source_path, settings=None):
metadata = {}
if settings:
if settings.get('DEFAULT_DATE', None) == 'fs':
metadata['date'] = SafeDatetime.fromtimestamp(
os.stat(full_path).st_ctime)
metadata.update(settings.get('EXTRA_PATH_METADATA', {}).get(
source_path, {}))
return metadata
def parse_path_metadata(source_path, settings=None, process=None):
"""Extract a metadata dictionary from a file's path
>>> import pprint
>>> settings = {
... 'FILENAME_METADATA': '(?P<slug>[^.]*).*',
... 'PATH_METADATA':
... '(?P<category>[^/]*)/(?P<date>\d{4}-\d{2}-\d{2})/.*',
... }
>>> reader = BaseReader(settings=settings)
>>> metadata = parse_path_metadata(
... source_path='my-cat/2013-01-01/my-slug.html',
... settings=settings,
... process=reader.process_metadata)
>>> pprint.pprint(metadata) # doctest: +ELLIPSIS
{'category': <pelican.urlwrappers.Category object at ...>,
'date': SafeDatetime(2013, 1, 1, 0, 0),
'slug': 'my-slug'}
"""
metadata = {}
dirname, basename = os.path.split(source_path)
base, ext = os.path.splitext(basename)
subdir = os.path.basename(dirname)
if settings:
checks = []
for key, data in [('FILENAME_METADATA', base),
('PATH_METADATA', source_path)]:
checks.append((settings.get(key, None), data))
if settings.get('USE_FOLDER_AS_CATEGORY', None):
checks.insert(0, ('(?P<category>.*)', subdir))
for regexp, data in checks:
if regexp and data:
match = re.match(regexp, data)
if match:
# .items() for py3k compat.
for k, v in match.groupdict().items():
if k not in metadata:
k = k.lower() # metadata must be lowercase
if process:
v = process(k, v)
metadata[k] = v
return metadata
| agpl-3.0 | -5,030,568,412,486,187,000 | 34.58714 | 87 | 0.549163 | false |
BillKeenan/lets-encrypt-preview | letsencrypt/tests/proof_of_possession_test.py | 37 | 3484 | """Tests for letsencrypt.proof_of_possession."""
import os
import tempfile
import unittest
import mock
from acme import challenges
from acme import jose
from acme import messages
from letsencrypt import achallenges
from letsencrypt import proof_of_possession
from letsencrypt.display import util as display_util
from letsencrypt.tests import test_util
CERT0_PATH = test_util.vector_path("cert.der")
CERT2_PATH = test_util.vector_path("dsa_cert.pem")
CERT2_KEY_PATH = test_util.vector_path("dsa512_key.pem")
CERT3_PATH = test_util.vector_path("matching_cert.pem")
CERT3_KEY_PATH = test_util.vector_path("rsa512_key_2.pem")
CERT3_KEY = test_util.load_rsa_private_key("rsa512_key_2.pem").public_key()
class ProofOfPossessionTest(unittest.TestCase):
def setUp(self):
self.installer = mock.MagicMock()
self.cert1_path = tempfile.mkstemp()[1]
certs = [CERT0_PATH, self.cert1_path, CERT2_PATH, CERT3_PATH]
keys = [None, None, CERT2_KEY_PATH, CERT3_KEY_PATH]
self.installer.get_all_certs_keys.return_value = zip(
certs, keys, 4 * [None])
self.proof_of_pos = proof_of_possession.ProofOfPossession(
self.installer)
hints = challenges.ProofOfPossession.Hints(
jwk=jose.JWKRSA(key=CERT3_KEY), cert_fingerprints=(),
certs=(), serial_numbers=(), subject_key_identifiers=(),
issuers=(), authorized_for=())
chall = challenges.ProofOfPossession(
alg=jose.RS256, nonce='zczv4HMLVe_0kimJ25Juig', hints=hints)
challb = messages.ChallengeBody(
chall=chall, uri="http://example", status=messages.STATUS_PENDING)
self.achall = achallenges.ProofOfPossession(
challb=challb, domain="example.com")
def tearDown(self):
os.remove(self.cert1_path)
def test_perform_bad_challenge(self):
hints = challenges.ProofOfPossession.Hints(
jwk=jose.jwk.JWKOct(key="foo"), cert_fingerprints=(),
certs=(), serial_numbers=(), subject_key_identifiers=(),
issuers=(), authorized_for=())
chall = challenges.ProofOfPossession(
alg=jose.HS512, nonce='zczv4HMLVe_0kimJ25Juig', hints=hints)
challb = messages.ChallengeBody(
chall=chall, uri="http://example", status=messages.STATUS_PENDING)
self.achall = achallenges.ProofOfPossession(
challb=challb, domain="example.com")
self.assertEqual(self.proof_of_pos.perform(self.achall), None)
def test_perform_no_input(self):
self.assertTrue(self.proof_of_pos.perform(self.achall).verify())
@mock.patch("letsencrypt.proof_of_possession.zope.component.getUtility")
def test_perform_with_input(self, mock_input):
# Remove the matching certificate
self.installer.get_all_certs_keys.return_value.pop()
mock_input().input.side_effect = [(display_util.CANCEL, ""),
(display_util.OK, CERT0_PATH),
(display_util.OK, "imaginary_file"),
(display_util.OK, CERT3_KEY_PATH)]
self.assertFalse(self.proof_of_pos.perform(self.achall))
self.assertFalse(self.proof_of_pos.perform(self.achall))
self.assertFalse(self.proof_of_pos.perform(self.achall))
self.assertTrue(self.proof_of_pos.perform(self.achall).verify())
if __name__ == "__main__":
unittest.main() # pragma: no cover
| apache-2.0 | 3,307,103,420,549,911,000 | 40.975904 | 78 | 0.651263 | false |
marcore/edx-platform | common/djangoapps/terrain/stubs/lti.py | 44 | 12380 | """
Stub implementation of LTI Provider.
What is supported:
------------------
1.) This LTI Provider can service only one Tool Consumer at the same time. It is
not possible to have this LTI multiple times on a single page in LMS.
"""
from uuid import uuid4
import textwrap
import urllib
from oauthlib.oauth1.rfc5849 import signature, parameters
import oauthlib.oauth1
import hashlib
import base64
import mock
import requests
from http import StubHttpRequestHandler, StubHttpService
class StubLtiHandler(StubHttpRequestHandler):
"""
A handler for LTI POST and GET requests.
"""
DEFAULT_CLIENT_KEY = 'test_client_key'
DEFAULT_CLIENT_SECRET = 'test_client_secret'
DEFAULT_LTI_ENDPOINT = 'correct_lti_endpoint'
DEFAULT_LTI_ADDRESS = 'http://127.0.0.1:{port}/'
def do_GET(self):
"""
Handle a GET request from the client and sends response back.
Used for checking LTI Provider started correctly.
"""
self.send_response(200, 'This is LTI Provider.', {'Content-type': 'text/plain'})
def do_POST(self):
"""
Handle a POST request from the client and sends response back.
"""
if 'grade' in self.path and self._send_graded_result().status_code == 200:
status_message = 'LTI consumer (edX) responded with XML content:<br>' + self.server.grade_data['TC answer']
content = self._create_content(status_message)
self.send_response(200, content)
elif 'lti2_outcome' in self.path and self._send_lti2_outcome().status_code == 200:
status_message = 'LTI consumer (edX) responded with HTTP {}<br>'.format(
self.server.grade_data['status_code'])
content = self._create_content(status_message)
self.send_response(200, content)
elif 'lti2_delete' in self.path and self._send_lti2_delete().status_code == 200:
status_message = 'LTI consumer (edX) responded with HTTP {}<br>'.format(
self.server.grade_data['status_code'])
content = self._create_content(status_message)
self.send_response(200, content)
# Respond to request with correct lti endpoint
elif self._is_correct_lti_request():
params = {k: v for k, v in self.post_dict.items() if k != 'oauth_signature'}
if self._check_oauth_signature(params, self.post_dict.get('oauth_signature', "")):
status_message = "This is LTI tool. Success."
# Set data for grades what need to be stored as server data
if 'lis_outcome_service_url' in self.post_dict:
self.server.grade_data = {
'callback_url': self.post_dict.get('lis_outcome_service_url').replace('https', 'http'),
'sourcedId': self.post_dict.get('lis_result_sourcedid')
}
submit_url = '//{}:{}'.format(*self.server.server_address)
content = self._create_content(status_message, submit_url)
self.send_response(200, content)
else:
content = self._create_content("Wrong LTI signature")
self.send_response(200, content)
else:
content = self._create_content("Invalid request URL")
self.send_response(500, content)
def _send_graded_result(self):
"""
Send grade request.
"""
values = {
'textString': 0.5,
'sourcedId': self.server.grade_data['sourcedId'],
'imsx_messageIdentifier': uuid4().hex,
}
payload = textwrap.dedent("""
<?xml version = "1.0" encoding = "UTF-8"?>
<imsx_POXEnvelopeRequest xmlns="http://www.imsglobal.org/services/ltiv1p1/xsd/imsoms_v1p0">
<imsx_POXHeader>
<imsx_POXRequestHeaderInfo>
<imsx_version>V1.0</imsx_version>
<imsx_messageIdentifier>{imsx_messageIdentifier}</imsx_messageIdentifier> /
</imsx_POXRequestHeaderInfo>
</imsx_POXHeader>
<imsx_POXBody>
<replaceResultRequest>
<resultRecord>
<sourcedGUID>
<sourcedId>{sourcedId}</sourcedId>
</sourcedGUID>
<result>
<resultScore>
<language>en-us</language>
<textString>{textString}</textString>
</resultScore>
</result>
</resultRecord>
</replaceResultRequest>
</imsx_POXBody>
</imsx_POXEnvelopeRequest>
""")
data = payload.format(**values)
url = self.server.grade_data['callback_url']
headers = {
'Content-Type': 'application/xml',
'X-Requested-With': 'XMLHttpRequest',
'Authorization': self._oauth_sign(url, data)
}
# Send request ignoring verifirecation of SSL certificate
response = requests.post(url, data=data, headers=headers, verify=False)
self.server.grade_data['TC answer'] = response.content
return response
def _send_lti2_outcome(self):
"""
Send a grade back to consumer
"""
payload = textwrap.dedent("""
{{
"@context" : "http://purl.imsglobal.org/ctx/lis/v2/Result",
"@type" : "Result",
"resultScore" : {score},
"comment" : "This is awesome."
}}
""")
data = payload.format(score=0.8)
return self._send_lti2(data)
def _send_lti2_delete(self):
"""
Send a delete back to consumer
"""
payload = textwrap.dedent("""
{
"@context" : "http://purl.imsglobal.org/ctx/lis/v2/Result",
"@type" : "Result"
}
""")
return self._send_lti2(payload)
def _send_lti2(self, payload):
"""
Send lti2 json result service request.
"""
### We compute the LTI V2.0 service endpoint from the callback_url (which is set by the launch call)
url = self.server.grade_data['callback_url']
url_parts = url.split('/')
url_parts[-1] = "lti_2_0_result_rest_handler"
anon_id = self.server.grade_data['sourcedId'].split(":")[-1]
url_parts.extend(["user", anon_id])
new_url = '/'.join(url_parts)
content_type = 'application/vnd.ims.lis.v2.result+json'
headers = {
'Content-Type': content_type,
'Authorization': self._oauth_sign(new_url, payload,
method='PUT',
content_type=content_type)
}
# Send request ignoring verifirecation of SSL certificate
response = requests.put(new_url, data=payload, headers=headers, verify=False)
self.server.grade_data['status_code'] = response.status_code
self.server.grade_data['TC answer'] = response.content
return response
def _create_content(self, response_text, submit_url=None):
"""
Return content (str) either for launch, send grade or get result from TC.
"""
if submit_url:
submit_form = textwrap.dedent("""
<form action="{submit_url}/grade" method="post">
<input type="submit" name="submit-button" value="Submit">
</form>
<form action="{submit_url}/lti2_outcome" method="post">
<input type="submit" name="submit-lti2-button" value="Submit">
</form>
<form action="{submit_url}/lti2_delete" method="post">
<input type="submit" name="submit-lti2-delete-button" value="Submit">
</form>
""").format(submit_url=submit_url)
else:
submit_form = ''
# Show roles only for LTI launch.
if self.post_dict.get('roles'):
role = '<h5>Role: {}</h5>'.format(self.post_dict['roles'])
else:
role = ''
response_str = textwrap.dedent("""
<html>
<head>
<title>TEST TITLE</title>
</head>
<body>
<div>
<h2>IFrame loaded</h2>
<h3>Server response is:</h3>
<h3 class="result">{response}</h3>
{role}
</div>
{submit_form}
</body>
</html>
""").format(response=response_text, role=role, submit_form=submit_form)
# Currently LTI module doublequotes the lis_result_sourcedid parameter.
# Unquote response two times.
return urllib.unquote(urllib.unquote(response_str))
def _is_correct_lti_request(self):
"""
Return a boolean indicating whether the URL path is a valid LTI end-point.
"""
lti_endpoint = self.server.config.get('lti_endpoint', self.DEFAULT_LTI_ENDPOINT)
return lti_endpoint in self.path
def _oauth_sign(self, url, body, content_type=u'application/x-www-form-urlencoded', method=u'POST'):
"""
Signs request and returns signed Authorization header.
"""
client_key = self.server.config.get('client_key', self.DEFAULT_CLIENT_KEY)
client_secret = self.server.config.get('client_secret', self.DEFAULT_CLIENT_SECRET)
client = oauthlib.oauth1.Client(
client_key=unicode(client_key),
client_secret=unicode(client_secret)
)
headers = {
# This is needed for body encoding:
'Content-Type': content_type,
}
# Calculate and encode body hash. See http://oauth.googlecode.com/svn/spec/ext/body_hash/1.0/oauth-bodyhash.html
sha1 = hashlib.sha1()
sha1.update(body)
oauth_body_hash = unicode(base64.b64encode(sha1.digest()))
params = client.get_oauth_params(None)
params.append((u'oauth_body_hash', oauth_body_hash))
mock_request = mock.Mock(
uri=unicode(urllib.unquote(url)),
headers=headers,
body=u"",
decoded_body=u"",
oauth_params=params,
http_method=unicode(method),
)
sig = client.get_oauth_signature(mock_request)
mock_request.oauth_params.append((u'oauth_signature', sig))
new_headers = parameters.prepare_headers(mock_request.oauth_params, headers, realm=None)
return new_headers['Authorization']
def _check_oauth_signature(self, params, client_signature):
"""
Checks oauth signature from client.
`params` are params from post request except signature,
`client_signature` is signature from request.
Builds mocked request and verifies hmac-sha1 signing::
1. builds string to sign from `params`, `url` and `http_method`.
2. signs it with `client_secret` which comes from server settings.
3. obtains signature after sign and then compares it with request.signature
(request signature comes form client in request)
Returns `True` if signatures are correct, otherwise `False`.
"""
client_secret = unicode(self.server.config.get('client_secret', self.DEFAULT_CLIENT_SECRET))
port = self.server.server_address[1]
lti_base = self.DEFAULT_LTI_ADDRESS.format(port=port)
lti_endpoint = self.server.config.get('lti_endpoint', self.DEFAULT_LTI_ENDPOINT)
url = lti_base + lti_endpoint
request = mock.Mock()
request.params = [(unicode(k), unicode(v)) for k, v in params.items()]
request.uri = unicode(url)
request.http_method = u'POST'
request.signature = unicode(client_signature)
return signature.verify_hmac_sha1(request, client_secret)
class StubLtiService(StubHttpService):
"""
A stub LTI provider server that responds
to POST and GET requests to localhost.
"""
HANDLER_CLASS = StubLtiHandler
| agpl-3.0 | 7,304,052,343,522,553,000 | 38.807074 | 120 | 0.561874 | false |
python-tap/tappy | tap/tests/test_result.py | 2 | 3581 | # Copyright (c) 2019, Matt Layman and contributors
import contextlib
import os
import unittest
import unittest.case
from tap.i18n import _
from tap.runner import TAPTestResult
from tap.tests import TestCase
from tap.tracker import Tracker
class FakeTestCase(unittest.TestCase):
def runTest(self):
pass
@contextlib.contextmanager
def subTest(self, *args, **kwargs):
try:
self._subtest = unittest.case._SubTest(self, object(), {})
yield
finally:
self._subtest = None
def __call__(self, result):
pass
class TestTAPTestResult(TestCase):
@classmethod
def _make_one(cls):
# Yep, the stream is not being closed.
stream = open(os.devnull, "w")
result = TAPTestResult(stream, False, 0)
result.tracker = Tracker()
return result
def test_adds_error(self):
result = self._make_one()
# Python 3 does some extra testing in unittest on exceptions so fake
# the cause as if it were raised.
ex = Exception()
ex.__cause__ = None
result.addError(FakeTestCase(), (None, ex, None))
self.assertEqual(len(result.tracker._test_cases["FakeTestCase"]), 1)
def test_adds_failure(self):
result = self._make_one()
# Python 3 does some extra testing in unittest on exceptions so fake
# the cause as if it were raised.
ex = Exception()
ex.__cause__ = None
result.addFailure(FakeTestCase(), (None, ex, None))
self.assertEqual(len(result.tracker._test_cases["FakeTestCase"]), 1)
def test_adds_success(self):
result = self._make_one()
result.addSuccess(FakeTestCase())
self.assertEqual(len(result.tracker._test_cases["FakeTestCase"]), 1)
def test_adds_skip(self):
result = self._make_one()
result.addSkip(FakeTestCase(), "a reason")
self.assertEqual(len(result.tracker._test_cases["FakeTestCase"]), 1)
def test_adds_expected_failure(self):
exc = self.factory.make_exc()
result = self._make_one()
result.addExpectedFailure(FakeTestCase(), exc)
line = result.tracker._test_cases["FakeTestCase"][0]
self.assertFalse(line.ok)
self.assertEqual(line.directive.text, "TODO {}".format(_("(expected failure)")))
def test_adds_unexpected_success(self):
result = self._make_one()
result.addUnexpectedSuccess(FakeTestCase())
line = result.tracker._test_cases["FakeTestCase"][0]
self.assertTrue(line.ok)
self.assertEqual(
line.directive.text, "TODO {}".format(_("(unexpected success)"))
)
def test_adds_subtest_success(self):
"""Test that the runner handles subtest success results."""
result = self._make_one()
test = FakeTestCase()
with test.subTest():
result.addSubTest(test, test._subtest, None)
line = result.tracker._test_cases["FakeTestCase"][0]
self.assertTrue(line.ok)
def test_adds_subtest_failure(self):
"""Test that the runner handles subtest failure results."""
result = self._make_one()
# Python 3 does some extra testing in unittest on exceptions so fake
# the cause as if it were raised.
ex = Exception()
ex.__cause__ = None
test = FakeTestCase()
with test.subTest():
result.addSubTest(test, test._subtest, (ex.__class__, ex, None))
line = result.tracker._test_cases["FakeTestCase"][0]
self.assertFalse(line.ok)
| bsd-2-clause | 5,577,237,272,769,613,000 | 33.432692 | 88 | 0.620776 | false |
pedrobaeza/OpenUpgrade | openerp/cli/server.py | 187 | 5869 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
"""
OpenERP - Server
OpenERP is an ERP+CRM program for small and medium businesses.
The whole source code is distributed under the terms of the
GNU Public Licence.
(c) 2003-TODAY, Fabien Pinckaers - OpenERP SA
"""
import atexit
import csv
import logging
import os
import signal
import sys
import threading
import traceback
import time
import openerp
from . import Command
__author__ = openerp.release.author
__version__ = openerp.release.version
# Also use the `openerp` logger for the main script.
_logger = logging.getLogger('openerp')
def check_root_user():
""" Exit if the process's user is 'root' (on POSIX system)."""
if os.name == 'posix':
import pwd
if pwd.getpwuid(os.getuid())[0] == 'root' :
sys.stderr.write("Running as user 'root' is a security risk, aborting.\n")
sys.exit(1)
def check_postgres_user():
""" Exit if the configured database user is 'postgres'.
This function assumes the configuration has been initialized.
"""
config = openerp.tools.config
if config['db_user'] == 'postgres':
sys.stderr.write("Using the database user 'postgres' is a security risk, aborting.")
sys.exit(1)
def report_configuration():
""" Log the server version and some configuration values.
This function assumes the configuration has been initialized.
"""
config = openerp.tools.config
_logger.info("OpenERP version %s", __version__)
for name, value in [('addons paths', openerp.modules.module.ad_paths),
('database hostname', config['db_host'] or 'localhost'),
('database port', config['db_port'] or '5432'),
('database user', config['db_user'])]:
_logger.info("%s: %s", name, value)
def rm_pid_file():
config = openerp.tools.config
if not openerp.evented and config['pidfile']:
try:
os.unlink(config['pidfile'])
except OSError:
pass
def setup_pid_file():
""" Create a file with the process id written in it.
This function assumes the configuration has been initialized.
"""
config = openerp.tools.config
if not openerp.evented and config['pidfile']:
with open(config['pidfile'], 'w') as fd:
pidtext = "%d" % (os.getpid())
fd.write(pidtext)
atexit.register(rm_pid_file)
def export_translation():
config = openerp.tools.config
dbname = config['db_name']
if config["language"]:
msg = "language %s" % (config["language"],)
else:
msg = "new language"
_logger.info('writing translation file for %s to %s', msg,
config["translate_out"])
fileformat = os.path.splitext(config["translate_out"])[-1][1:].lower()
with open(config["translate_out"], "w") as buf:
registry = openerp.modules.registry.RegistryManager.new(dbname)
with openerp.api.Environment.manage():
with registry.cursor() as cr:
openerp.tools.trans_export(config["language"],
config["translate_modules"] or ["all"], buf, fileformat, cr)
_logger.info('translation file written successfully')
def import_translation():
config = openerp.tools.config
context = {'overwrite': config["overwrite_existing_translations"]}
dbname = config['db_name']
registry = openerp.modules.registry.RegistryManager.new(dbname)
with openerp.api.Environment.manage():
with registry.cursor() as cr:
openerp.tools.trans_load(
cr, config["translate_in"], config["language"], context=context,
)
def main(args):
check_root_user()
openerp.tools.config.parse_config(args)
check_postgres_user()
report_configuration()
config = openerp.tools.config
# the default limit for CSV fields in the module is 128KiB, which is not
# quite sufficient to import images to store in attachment. 500MiB is a
# bit overkill, but better safe than sorry I guess
csv.field_size_limit(500 * 1024 * 1024)
if config["test_file"]:
config["test_enable"] = True
if config["translate_out"]:
export_translation()
sys.exit(0)
if config["translate_in"]:
import_translation()
sys.exit(0)
# This needs to be done now to ensure the use of the multiprocessing
# signaling mecanism for registries loaded with -d
if config['workers']:
openerp.multi_process = True
preload = []
if config['db_name']:
preload = config['db_name'].split(',')
stop = config["stop_after_init"]
setup_pid_file()
rc = openerp.service.server.start(preload=preload, stop=stop)
sys.exit(rc)
class Server(Command):
"""Start the odoo server (default command)"""
def run(self, args):
main(args)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -3,795,725,336,854,778,400 | 31.247253 | 92 | 0.630601 | false |
h3biomed/luigi | luigi/execution_summary.py | 7 | 17170 | # -*- coding: utf-8 -*-
#
# Copyright 2015-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module provide the function :py:func:`summary` that is used for printing
an `execution summary
<https://github.com/spotify/luigi/blob/master/examples/execution_summary_example.py>`_
at the end of luigi invocations.
"""
import textwrap
import collections
import functools
import luigi
class execution_summary(luigi.Config):
summary_length = luigi.IntParameter(default=5)
def _partition_tasks(worker):
"""
Takes a worker and sorts out tasks based on their status.
Still_pending_not_ext is only used to get upstream_failure, upstream_missing_dependency and run_by_other_worker
"""
task_history = worker._add_task_history
pending_tasks = {task for(task, status, ext) in task_history if status == 'PENDING'}
set_tasks = {}
set_tasks["completed"] = {task for (task, status, ext) in task_history if status == 'DONE' and task in pending_tasks}
set_tasks["already_done"] = {task for (task, status, ext) in task_history
if status == 'DONE' and task not in pending_tasks and task not in set_tasks["completed"]}
set_tasks["ever_failed"] = {task for (task, status, ext) in task_history if status == 'FAILED'}
set_tasks["failed"] = set_tasks["ever_failed"] - set_tasks["completed"]
set_tasks["scheduling_error"] = {task for(task, status, ext) in task_history if status == 'UNKNOWN'}
set_tasks["still_pending_ext"] = {task for (task, status, ext) in task_history
if status == 'PENDING' and task not in set_tasks["ever_failed"] and task not in set_tasks["completed"] and not ext}
set_tasks["still_pending_not_ext"] = {task for (task, status, ext) in task_history
if status == 'PENDING' and task not in set_tasks["ever_failed"] and task not in set_tasks["completed"] and ext}
set_tasks["run_by_other_worker"] = set()
set_tasks["upstream_failure"] = set()
set_tasks["upstream_missing_dependency"] = set()
set_tasks["upstream_run_by_other_worker"] = set()
set_tasks["upstream_scheduling_error"] = set()
set_tasks["not_run"] = set()
return set_tasks
def _root_task(worker):
"""
Return the first task scheduled by the worker, corresponding to the root task
"""
return worker._add_task_history[0][0]
def _populate_unknown_statuses(set_tasks):
"""
Add the "upstream_*" and "not_run" statuses my mutating set_tasks.
"""
visited = set()
for task in set_tasks["still_pending_not_ext"]:
_depth_first_search(set_tasks, task, visited)
def _depth_first_search(set_tasks, current_task, visited):
"""
This dfs checks why tasks are still pending.
"""
visited.add(current_task)
if current_task in set_tasks["still_pending_not_ext"]:
upstream_failure = False
upstream_missing_dependency = False
upstream_run_by_other_worker = False
upstream_scheduling_error = False
for task in current_task._requires():
if task not in visited:
_depth_first_search(set_tasks, task, visited)
if task in set_tasks["ever_failed"] or task in set_tasks["upstream_failure"]:
set_tasks["upstream_failure"].add(current_task)
upstream_failure = True
if task in set_tasks["still_pending_ext"] or task in set_tasks["upstream_missing_dependency"]:
set_tasks["upstream_missing_dependency"].add(current_task)
upstream_missing_dependency = True
if task in set_tasks["run_by_other_worker"] or task in set_tasks["upstream_run_by_other_worker"]:
set_tasks["upstream_run_by_other_worker"].add(current_task)
upstream_run_by_other_worker = True
if task in set_tasks["scheduling_error"]:
set_tasks["upstream_scheduling_error"].add(current_task)
upstream_scheduling_error = True
if not upstream_failure and not upstream_missing_dependency and \
not upstream_run_by_other_worker and not upstream_scheduling_error and \
current_task not in set_tasks["run_by_other_worker"]:
set_tasks["not_run"].add(current_task)
def _get_str(task_dict, extra_indent):
"""
This returns a string for each status
"""
summary_length = execution_summary().summary_length
lines = []
task_names = sorted(task_dict.keys())
for task_family in task_names:
tasks = task_dict[task_family]
tasks = sorted(tasks, key=lambda x: str(x))
prefix_size = 8 if extra_indent else 4
prefix = ' ' * prefix_size
line = None
if summary_length > 0 and len(lines) >= summary_length:
line = prefix + "..."
lines.append(line)
break
if len(tasks[0].get_params()) == 0:
line = prefix + '- {0} {1}()'.format(len(tasks), str(task_family))
elif _get_len_of_params(tasks[0]) > 60 or len(str(tasks[0])) > 200 or \
(len(tasks) == 2 and len(tasks[0].get_params()) > 1 and (_get_len_of_params(tasks[0]) > 40 or len(str(tasks[0])) > 100)):
"""
This is to make sure that there is no really long task in the output
"""
line = prefix + '- {0} {1}(...)'.format(len(tasks), task_family)
elif len((tasks[0].get_params())) == 1:
attributes = {getattr(task, tasks[0].get_params()[0][0]) for task in tasks}
param_class = tasks[0].get_params()[0][1]
first, last = _ranging_attributes(attributes, param_class)
if first is not None and last is not None and len(attributes) > 3:
param_str = '{0}...{1}'.format(param_class.serialize(first), param_class.serialize(last))
else:
param_str = '{0}'.format(_get_str_one_parameter(tasks))
line = prefix + '- {0} {1}({2}={3})'.format(len(tasks), task_family, tasks[0].get_params()[0][0], param_str)
else:
ranging = False
params = _get_set_of_params(tasks)
unique_param_keys = list(_get_unique_param_keys(params))
if len(unique_param_keys) == 1:
unique_param, = unique_param_keys
attributes = params[unique_param]
param_class = unique_param[1]
first, last = _ranging_attributes(attributes, param_class)
if first is not None and last is not None and len(attributes) > 2:
ranging = True
line = prefix + '- {0} {1}({2}'.format(len(tasks), task_family, _get_str_ranging_multiple_parameters(first, last, tasks, unique_param))
if not ranging:
if len(tasks) == 1:
line = prefix + '- {0} {1}'.format(len(tasks), tasks[0])
if len(tasks) == 2:
line = prefix + '- {0} {1} and {2}'.format(len(tasks), tasks[0], tasks[1])
if len(tasks) > 2:
line = prefix + '- {0} {1} ...'.format(len(tasks), tasks[0])
lines.append(line)
return '\n'.join(lines)
def _get_len_of_params(task):
return sum(len(param[0]) for param in task.get_params())
def _get_str_ranging_multiple_parameters(first, last, tasks, unique_param):
row = ''
str_unique_param = '{0}...{1}'.format(unique_param[1].serialize(first), unique_param[1].serialize(last))
for param in tasks[0].get_params():
row += '{0}='.format(param[0])
if param[0] == unique_param[0]:
row += '{0}'.format(str_unique_param)
else:
row += '{0}'.format(param[1].serialize(getattr(tasks[0], param[0])))
if param != tasks[0].get_params()[-1]:
row += ", "
row += ')'
return row
def _get_set_of_params(tasks):
params = {}
for param in tasks[0].get_params():
params[param] = {getattr(task, param[0]) for task in tasks}
return params
def _get_unique_param_keys(params):
for param_key, param_values in params.items():
if len(param_values) > 1:
yield param_key
def _ranging_attributes(attributes, param_class):
"""
Checks if there is a continuous range
"""
next_attributes = {param_class.next_in_enumeration(attribute) for attribute in attributes}
in_first = attributes.difference(next_attributes)
in_second = next_attributes.difference(attributes)
if len(in_first) == 1 and len(in_second) == 1:
for x in attributes:
if {param_class.next_in_enumeration(x)} == in_second:
return next(iter(in_first)), x
return None, None
def _get_str_one_parameter(tasks):
row = ''
count = 0
for task in tasks:
if (len(row) >= 30 and count > 2 and count != len(tasks) - 1) or len(row) > 200:
row += '...'
break
param = task.get_params()[0]
row += '{0}'.format(param[1].serialize(getattr(task, param[0])))
if count < len(tasks) - 1:
row += ','
count += 1
return row
def _serialize_first_param(task):
return task.get_params()[0][1].serialize(getattr(task, task.get_params()[0][0]))
def _get_number_of_tasks_for(status, group_tasks):
if status == "still_pending":
return (_get_number_of_tasks(group_tasks["still_pending_ext"]) +
_get_number_of_tasks(group_tasks["still_pending_not_ext"]))
return _get_number_of_tasks(group_tasks[status])
def _get_number_of_tasks(task_dict):
return sum(len(tasks) for tasks in task_dict.values())
def _get_comments(group_tasks):
"""
Get the human readable comments and quantities for the task types.
"""
comments = {}
for status, human in _COMMENTS:
num_tasks = _get_number_of_tasks_for(status, group_tasks)
if num_tasks:
space = " " if status in _PENDING_SUB_STATUSES else ""
comments[status] = '{space}* {num_tasks} {human}:\n'.format(
space=space,
num_tasks=num_tasks,
human=human)
return comments
# Oredered in the sense that they'll be printed in this order
_ORDERED_STATUSES = (
"already_done",
"completed",
"ever_failed",
"failed",
"scheduling_error",
"still_pending",
"still_pending_ext",
"run_by_other_worker",
"upstream_failure",
"upstream_missing_dependency",
"upstream_run_by_other_worker",
"upstream_scheduling_error",
"not_run",
)
_PENDING_SUB_STATUSES = set(_ORDERED_STATUSES[_ORDERED_STATUSES.index("still_pending_ext"):])
_COMMENTS = set((
("already_done", 'present dependencies were encountered'),
("completed", 'ran successfully'),
("failed", 'failed'),
("scheduling_error", 'failed scheduling'),
("still_pending", 'were left pending, among these'),
("still_pending_ext", 'were missing external dependencies'),
("run_by_other_worker", 'were being run by another worker'),
("upstream_failure", 'had failed dependencies'),
("upstream_missing_dependency", 'had missing external dependencies'),
("upstream_run_by_other_worker", 'had dependencies that were being run by other worker'),
("upstream_scheduling_error", 'had dependencies whose scheduling failed'),
("not_run", 'was not granted run permission by the scheduler'),
))
def _get_run_by_other_worker(worker):
"""
This returns a set of the tasks that are being run by other worker
"""
task_sets = _get_external_workers(worker).values()
return functools.reduce(lambda a, b: a | b, task_sets, set())
def _get_external_workers(worker):
"""
This returns a dict with a set of tasks for all of the other workers
"""
worker_that_blocked_task = collections.defaultdict(set)
get_work_response_history = worker._get_work_response_history
for get_work_response in get_work_response_history:
if get_work_response['task_id'] is None:
for running_task in get_work_response['running_tasks']:
other_worker_id = running_task['worker']
other_task_id = running_task['task_id']
other_task = worker._scheduled_tasks.get(other_task_id)
if other_worker_id == worker._id or not other_task:
continue
worker_that_blocked_task[other_worker_id].add(other_task)
return worker_that_blocked_task
def _group_tasks_by_name_and_status(task_dict):
"""
Takes a dictionary with sets of tasks grouped by their status and
returns a dictionary with dictionaries with an array of tasks grouped by
their status and task name
"""
group_status = {}
for task in task_dict:
if task.task_family not in group_status:
group_status[task.task_family] = []
group_status[task.task_family].append(task)
return group_status
def _summary_dict(worker):
set_tasks = _partition_tasks(worker)
set_tasks["run_by_other_worker"] = _get_run_by_other_worker(worker)
_populate_unknown_statuses(set_tasks)
return set_tasks
def _summary_format(set_tasks, worker):
group_tasks = {}
for status, task_dict in set_tasks.items():
group_tasks[status] = _group_tasks_by_name_and_status(task_dict)
comments = _get_comments(group_tasks)
num_all_tasks = sum([len(set_tasks["already_done"]),
len(set_tasks["completed"]), len(set_tasks["failed"]),
len(set_tasks["scheduling_error"]),
len(set_tasks["still_pending_ext"]),
len(set_tasks["still_pending_not_ext"])])
str_output = ''
str_output += 'Scheduled {0} tasks of which:\n'.format(num_all_tasks)
for status in _ORDERED_STATUSES:
if status not in comments:
continue
str_output += '{0}'.format(comments[status])
if status != 'still_pending':
str_output += '{0}\n'.format(_get_str(group_tasks[status], status in _PENDING_SUB_STATUSES))
ext_workers = _get_external_workers(worker)
group_tasks_ext_workers = {}
for ext_worker, task_dict in ext_workers.items():
group_tasks_ext_workers[ext_worker] = _group_tasks_by_name_and_status(task_dict)
if len(ext_workers) > 0:
str_output += "\nThe other workers were:\n"
count = 0
for ext_worker, task_dict in ext_workers.items():
if count > 3 and count < len(ext_workers) - 1:
str_output += " and {0} other workers".format(len(ext_workers) - count)
break
str_output += " - {0} ran {1} tasks\n".format(ext_worker, len(task_dict))
count += 1
str_output += '\n'
if num_all_tasks == sum([len(set_tasks["already_done"]),
len(set_tasks["scheduling_error"]),
len(set_tasks["still_pending_ext"]),
len(set_tasks["still_pending_not_ext"])]):
if len(ext_workers) == 0:
str_output += '\n'
str_output += 'Did not run any tasks'
smiley = ""
reason = ""
if set_tasks["ever_failed"]:
if not set_tasks["failed"]:
smiley = ":)"
reason = "there were failed tasks but they all suceeded in a retry"
else:
smiley = ":("
reason = "there were failed tasks"
if set_tasks["scheduling_error"]:
reason += " and tasks whose scheduling failed"
elif set_tasks["scheduling_error"]:
smiley = ":("
reason = "there were tasks whose scheduling failed"
elif set_tasks["not_run"]:
smiley = ":|"
reason = "there were tasks that were not granted run permission by the scheduler"
elif set_tasks["still_pending_ext"]:
smiley = ":|"
reason = "there were missing external dependencies"
else:
smiley = ":)"
reason = "there were no failed tasks or missing external dependencies"
str_output += "\nThis progress looks {0} because {1}".format(smiley, reason)
if num_all_tasks == 0:
str_output = 'Did not schedule any tasks'
return str_output
def _summary_wrap(str_output):
return textwrap.dedent("""
===== Luigi Execution Summary =====
{str_output}
===== Luigi Execution Summary =====
""").format(str_output=str_output)
def summary(worker):
"""
Given a worker, return a human readable summary of what the worker have
done.
"""
return _summary_wrap(_summary_format(_summary_dict(worker), worker))
# 5
| apache-2.0 | -2,679,349,740,150,130,000 | 39.4 | 155 | 0.602679 | false |
mKeRix/home-assistant | homeassistant/components/freebox/config_flow.py | 6 | 3504 | """Config flow to configure the Freebox integration."""
import logging
from aiofreepybox.exceptions import AuthorizationError, HttpRequestError
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_HOST, CONF_PORT
from .const import DOMAIN # pylint: disable=unused-import
from .router import get_api
_LOGGER = logging.getLogger(__name__)
class FreeboxFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
def __init__(self):
"""Initialize Freebox config flow."""
self._host = None
self._port = None
def _show_setup_form(self, user_input=None, errors=None):
"""Show the setup form to the user."""
if user_input is None:
user_input = {}
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{
vol.Required(CONF_HOST, default=user_input.get(CONF_HOST, "")): str,
vol.Required(CONF_PORT, default=user_input.get(CONF_PORT, "")): int,
}
),
errors=errors or {},
)
async def async_step_user(self, user_input=None):
"""Handle a flow initiated by the user."""
errors = {}
if user_input is None:
return self._show_setup_form(user_input, errors)
self._host = user_input[CONF_HOST]
self._port = user_input[CONF_PORT]
# Check if already configured
await self.async_set_unique_id(self._host)
self._abort_if_unique_id_configured()
return await self.async_step_link()
async def async_step_link(self, user_input=None):
"""Attempt to link with the Freebox router.
Given a configured host, will ask the user to press the button
to connect to the router.
"""
if user_input is None:
return self.async_show_form(step_id="link")
errors = {}
fbx = await get_api(self.hass, self._host)
try:
# Open connection and check authentication
await fbx.open(self._host, self._port)
# Check permissions
await fbx.system.get_config()
await fbx.lan.get_hosts_list()
await self.hass.async_block_till_done()
# Close connection
await fbx.close()
return self.async_create_entry(
title=self._host, data={CONF_HOST: self._host, CONF_PORT: self._port},
)
except AuthorizationError as error:
_LOGGER.error(error)
errors["base"] = "register_failed"
except HttpRequestError:
_LOGGER.error("Error connecting to the Freebox router at %s", self._host)
errors["base"] = "connection_failed"
except Exception: # pylint: disable=broad-except
_LOGGER.exception(
"Unknown error connecting with Freebox router at %s", self._host
)
errors["base"] = "unknown"
return self.async_show_form(step_id="link", errors=errors)
async def async_step_import(self, user_input=None):
"""Import a config entry."""
return await self.async_step_user(user_input)
async def async_step_discovery(self, discovery_info):
"""Initialize step from discovery."""
return await self.async_step_user(discovery_info)
| mit | 1,653,596,644,735,233,800 | 30.854545 | 88 | 0.597317 | false |
FireballDWF/cloud-custodian | tools/c7n_gcp/tests/test_notify_gcp.py | 5 | 2961 | # Copyright 2019 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from gcp_common import BaseTest
from c7n_gcp.client import Session
import mock
import sys
class NotifyTest(BaseTest):
def test_pubsub_notify(self):
factory = self.replay_flight_data("notify-action")
orig_client = Session.client
stub_client = mock.MagicMock()
calls = []
def client_factory(*args, **kw):
calls.append(args)
if len(calls) == 1:
return orig_client(*args, **kw)
return stub_client
self.patch(Session, 'client', client_factory)
p = self.load_policy({
'name': 'test-notify',
'resource': 'gcp.pubsub-topic',
'filters': [
{
'name': 'projects/cloud-custodian/topics/gcptestnotifytopic'
}
],
'actions': [
{'type': 'notify',
'template': 'default',
'priority_header': '2',
'subject': 'testing notify action',
'to': ['[email protected]'],
'transport':
{'type': 'pubsub',
'topic': 'projects/cloud-custodian/topics/gcptestnotifytopic'}
}
]}, session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
stub_client.execute_command.assert_called_once()
if sys.version_info.major < 3:
return
stub_client.execute_command.assert_called_with(
'publish', {
'topic': 'projects/cloud-custodian/topics/gcptestnotifytopic',
'body': {
'messages': {
'data': ('eJzdUrtqAzEQ7PUVh+qcjd2EuEqVLl8QgpFXe2cFnVZIq8Bh/O/'
'RA58vkCqkSrHNDDuPZS9C4ic6lofOJWsfhFQAlBwfjc6YhBSZtFGu3'
'+2fdvLO/0wGHA25wilrC+DJGpgzcBHSqQkLxRi5d8RmmNtOpBSgUiP4jU'
'+nmE49kzdQ+MFYxhAz/SZWKj7QBwLHLVhKul+'
'ybOti3GapYtR8mpi4ivfagHPIRZBnXwXviRgnbxVXVOOgkuXaJRgKhuf'
'jGZXGUNh9wXPakuRWzbixa1pdc6qSVO1kihieNU3KuA3QJGsgDspFT4Hb'
'nW6B2iHadon/69K5trguxb+b/OPWq9/6i+/JcvDoDq+'
'K4Yz6ZfWVTbUcucwX+HoY5Q==')
}}})
| apache-2.0 | 5,308,216,574,212,791,000 | 36.481013 | 92 | 0.55792 | false |
joeyli/qemu-acpitad | tests/qemu-iotests/qed.py | 248 | 7194 | #!/usr/bin/env python
#
# Tool to manipulate QED image files
#
# Copyright (C) 2010 IBM, Corp.
#
# Authors:
# Stefan Hajnoczi <[email protected]>
#
# This work is licensed under the terms of the GNU GPL, version 2 or later.
# See the COPYING file in the top-level directory.
import sys
import struct
import random
import optparse
# This can be used as a module
__all__ = ['QED_F_NEED_CHECK', 'QED']
QED_F_NEED_CHECK = 0x02
header_fmt = '<IIIIQQQQQII'
header_size = struct.calcsize(header_fmt)
field_names = ['magic', 'cluster_size', 'table_size',
'header_size', 'features', 'compat_features',
'autoclear_features', 'l1_table_offset', 'image_size',
'backing_filename_offset', 'backing_filename_size']
table_elem_fmt = '<Q'
table_elem_size = struct.calcsize(table_elem_fmt)
def err(msg):
sys.stderr.write(msg + '\n')
sys.exit(1)
def unpack_header(s):
fields = struct.unpack(header_fmt, s)
return dict((field_names[idx], val) for idx, val in enumerate(fields))
def pack_header(header):
fields = tuple(header[x] for x in field_names)
return struct.pack(header_fmt, *fields)
def unpack_table_elem(s):
return struct.unpack(table_elem_fmt, s)[0]
def pack_table_elem(elem):
return struct.pack(table_elem_fmt, elem)
class QED(object):
def __init__(self, f):
self.f = f
self.f.seek(0, 2)
self.filesize = f.tell()
self.load_header()
self.load_l1_table()
def raw_pread(self, offset, size):
self.f.seek(offset)
return self.f.read(size)
def raw_pwrite(self, offset, data):
self.f.seek(offset)
return self.f.write(data)
def load_header(self):
self.header = unpack_header(self.raw_pread(0, header_size))
def store_header(self):
self.raw_pwrite(0, pack_header(self.header))
def read_table(self, offset):
size = self.header['table_size'] * self.header['cluster_size']
s = self.raw_pread(offset, size)
table = [unpack_table_elem(s[i:i + table_elem_size]) for i in xrange(0, size, table_elem_size)]
return table
def load_l1_table(self):
self.l1_table = self.read_table(self.header['l1_table_offset'])
self.table_nelems = self.header['table_size'] * self.header['cluster_size'] / table_elem_size
def write_table(self, offset, table):
s = ''.join(pack_table_elem(x) for x in table)
self.raw_pwrite(offset, s)
def random_table_item(table):
vals = [(index, offset) for index, offset in enumerate(table) if offset != 0]
if not vals:
err('cannot pick random item because table is empty')
return random.choice(vals)
def corrupt_table_duplicate(table):
'''Corrupt a table by introducing a duplicate offset'''
victim_idx, victim_val = random_table_item(table)
unique_vals = set(table)
if len(unique_vals) == 1:
err('no duplication corruption possible in table')
dup_val = random.choice(list(unique_vals.difference([victim_val])))
table[victim_idx] = dup_val
def corrupt_table_invalidate(qed, table):
'''Corrupt a table by introducing an invalid offset'''
index, _ = random_table_item(table)
table[index] = qed.filesize + random.randint(0, 100 * 1024 * 1024 * 1024 * 1024)
def cmd_show(qed, *args):
'''show [header|l1|l2 <offset>]- Show header or l1/l2 tables'''
if not args or args[0] == 'header':
print qed.header
elif args[0] == 'l1':
print qed.l1_table
elif len(args) == 2 and args[0] == 'l2':
offset = int(args[1])
print qed.read_table(offset)
else:
err('unrecognized sub-command')
def cmd_duplicate(qed, table_level):
'''duplicate l1|l2 - Duplicate a random table element'''
if table_level == 'l1':
offset = qed.header['l1_table_offset']
table = qed.l1_table
elif table_level == 'l2':
_, offset = random_table_item(qed.l1_table)
table = qed.read_table(offset)
else:
err('unrecognized sub-command')
corrupt_table_duplicate(table)
qed.write_table(offset, table)
def cmd_invalidate(qed, table_level):
'''invalidate l1|l2 - Plant an invalid table element at random'''
if table_level == 'l1':
offset = qed.header['l1_table_offset']
table = qed.l1_table
elif table_level == 'l2':
_, offset = random_table_item(qed.l1_table)
table = qed.read_table(offset)
else:
err('unrecognized sub-command')
corrupt_table_invalidate(qed, table)
qed.write_table(offset, table)
def cmd_need_check(qed, *args):
'''need-check [on|off] - Test, set, or clear the QED_F_NEED_CHECK header bit'''
if not args:
print bool(qed.header['features'] & QED_F_NEED_CHECK)
return
if args[0] == 'on':
qed.header['features'] |= QED_F_NEED_CHECK
elif args[0] == 'off':
qed.header['features'] &= ~QED_F_NEED_CHECK
else:
err('unrecognized sub-command')
qed.store_header()
def cmd_zero_cluster(qed, pos, *args):
'''zero-cluster <pos> [<n>] - Zero data clusters'''
pos, n = int(pos), 1
if args:
if len(args) != 1:
err('expected one argument')
n = int(args[0])
for i in xrange(n):
l1_index = pos / qed.header['cluster_size'] / len(qed.l1_table)
if qed.l1_table[l1_index] == 0:
err('no l2 table allocated')
l2_offset = qed.l1_table[l1_index]
l2_table = qed.read_table(l2_offset)
l2_index = (pos / qed.header['cluster_size']) % len(qed.l1_table)
l2_table[l2_index] = 1 # zero the data cluster
qed.write_table(l2_offset, l2_table)
pos += qed.header['cluster_size']
def cmd_copy_metadata(qed, outfile):
'''copy-metadata <outfile> - Copy metadata only (for scrubbing corrupted images)'''
out = open(outfile, 'wb')
# Match file size
out.seek(qed.filesize - 1)
out.write('\0')
# Copy header clusters
out.seek(0)
header_size_bytes = qed.header['header_size'] * qed.header['cluster_size']
out.write(qed.raw_pread(0, header_size_bytes))
# Copy L1 table
out.seek(qed.header['l1_table_offset'])
s = ''.join(pack_table_elem(x) for x in qed.l1_table)
out.write(s)
# Copy L2 tables
for l2_offset in qed.l1_table:
if l2_offset == 0:
continue
l2_table = qed.read_table(l2_offset)
out.seek(l2_offset)
s = ''.join(pack_table_elem(x) for x in l2_table)
out.write(s)
out.close()
def usage():
print 'Usage: %s <file> <cmd> [<arg>, ...]' % sys.argv[0]
print
print 'Supported commands:'
for cmd in sorted(x for x in globals() if x.startswith('cmd_')):
print globals()[cmd].__doc__
sys.exit(1)
def main():
if len(sys.argv) < 3:
usage()
filename, cmd = sys.argv[1:3]
cmd = 'cmd_' + cmd.replace('-', '_')
if cmd not in globals():
usage()
qed = QED(open(filename, 'r+b'))
try:
globals()[cmd](qed, *sys.argv[3:])
except TypeError, e:
sys.stderr.write(globals()[cmd].__doc__ + '\n')
sys.exit(1)
if __name__ == '__main__':
main()
| gpl-2.0 | -498,082,471,374,941,800 | 29.612766 | 103 | 0.60759 | false |
rmboggs/django | django/core/files/utils.py | 395 | 1338 | from django.utils import six
class FileProxyMixin(object):
"""
A mixin class used to forward file methods to an underlaying file
object. The internal file object has to be called "file"::
class FileProxy(FileProxyMixin):
def __init__(self, file):
self.file = file
"""
encoding = property(lambda self: self.file.encoding)
fileno = property(lambda self: self.file.fileno)
flush = property(lambda self: self.file.flush)
isatty = property(lambda self: self.file.isatty)
newlines = property(lambda self: self.file.newlines)
read = property(lambda self: self.file.read)
readinto = property(lambda self: self.file.readinto)
readline = property(lambda self: self.file.readline)
readlines = property(lambda self: self.file.readlines)
seek = property(lambda self: self.file.seek)
softspace = property(lambda self: self.file.softspace)
tell = property(lambda self: self.file.tell)
truncate = property(lambda self: self.file.truncate)
write = property(lambda self: self.file.write)
writelines = property(lambda self: self.file.writelines)
xreadlines = property(lambda self: self.file.xreadlines)
if six.PY3:
seekable = property(lambda self: self.file.seekable)
def __iter__(self):
return iter(self.file)
| bsd-3-clause | -7,402,708,627,289,800,000 | 38.352941 | 69 | 0.688341 | false |
levkar/odoo | addons/hw_scale/controllers/main.py | 20 | 15524 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
import os
import re
import time
from collections import namedtuple
from os import listdir
from threading import Thread, Lock
from odoo import http
import odoo.addons.hw_proxy.controllers.main as hw_proxy
_logger = logging.getLogger(__name__)
DRIVER_NAME = 'scale'
try:
import serial
except ImportError:
_logger.error('Odoo module hw_scale depends on the pyserial python module')
serial = None
def _toledo8217StatusParse(status):
""" Parse a scale's status, returning a `(weight, weight_info)` pair. """
weight, weight_info = None, None
stat = ord(status[status.index('?') + 1])
if stat == 0:
weight_info = 'ok'
else:
weight_info = []
if stat & 1 :
weight_info.append('moving')
if stat & 1 << 1:
weight_info.append('over_capacity')
if stat & 1 << 2:
weight_info.append('negative')
weight = 0.0
if stat & 1 << 3:
weight_info.append('outside_zero_capture_range')
if stat & 1 << 4:
weight_info.append('center_of_zero')
if stat & 1 << 5:
weight_info.append('net_weight')
return weight, weight_info
ScaleProtocol = namedtuple(
'ScaleProtocol',
"name baudrate bytesize stopbits parity timeout writeTimeout weightRegexp statusRegexp "
"statusParse commandTerminator commandDelay weightDelay newWeightDelay "
"weightCommand zeroCommand tareCommand clearCommand emptyAnswerValid autoResetWeight")
# 8217 Mettler-Toledo (Weight-only) Protocol, as described in the scale's Service Manual.
# e.g. here: https://www.manualslib.com/manual/861274/Mettler-Toledo-Viva.html?page=51#manual
# Our recommended scale, the Mettler-Toledo "Ariva-S", supports this protocol on
# both the USB and RS232 ports, it can be configured in the setup menu as protocol option 3.
# We use the default serial protocol settings, the scale's settings can be configured in the
# scale's menu anyway.
Toledo8217Protocol = ScaleProtocol(
name='Toledo 8217',
baudrate=9600,
bytesize=serial.SEVENBITS,
stopbits=serial.STOPBITS_ONE,
parity=serial.PARITY_EVEN,
timeout=1,
writeTimeout=1,
weightRegexp="\x02\\s*([0-9.]+)N?\\r",
statusRegexp="\x02\\s*(\\?.)\\r",
statusParse=_toledo8217StatusParse,
commandDelay=0.2,
weightDelay=0.5,
newWeightDelay=0.2,
commandTerminator='',
weightCommand='W',
zeroCommand='Z',
tareCommand='T',
clearCommand='C',
emptyAnswerValid=False,
autoResetWeight=False,
)
# The ADAM scales have their own RS232 protocol, usually documented in the scale's manual
# e.g at https://www.adamequipment.com/media/docs/Print%20Publications/Manuals/PDF/AZEXTRA/AZEXTRA-UM.pdf
# https://www.manualslib.com/manual/879782/Adam-Equipment-Cbd-4.html?page=32#manual
# Only the baudrate and label format seem to be configurable in the AZExtra series.
ADAMEquipmentProtocol = ScaleProtocol(
name='Adam Equipment',
baudrate=4800,
bytesize=serial.EIGHTBITS,
stopbits=serial.STOPBITS_ONE,
parity=serial.PARITY_NONE,
timeout=0.2,
writeTimeout=0.2,
weightRegexp=r"\s*([0-9.]+)kg", # LABEL format 3 + KG in the scale settings, but Label 1/2 should work
statusRegexp=None,
statusParse=None,
commandTerminator="\r\n",
commandDelay=0.2,
weightDelay=0.5,
newWeightDelay=5, # AZExtra beeps every time you ask for a weight that was previously returned!
# Adding an extra delay gives the operator a chance to remove the products
# before the scale starts beeping. Could not find a way to disable the beeps.
weightCommand='P',
zeroCommand='Z',
tareCommand='T',
clearCommand=None, # No clear command -> Tare again
emptyAnswerValid=True, # AZExtra does not answer unless a new non-zero weight has been detected
autoResetWeight=True, # AZExtra will not return 0 after removing products
)
SCALE_PROTOCOLS = (
Toledo8217Protocol,
ADAMEquipmentProtocol, # must be listed last, as it supports no probing!
)
class Scale(Thread):
def __init__(self):
Thread.__init__(self)
self.lock = Lock()
self.scalelock = Lock()
self.status = {'status':'connecting', 'messages':[]}
self.input_dir = '/dev/serial/by-path/'
self.weight = 0
self.weight_info = 'ok'
self.device = None
self.path_to_scale = ''
self.protocol = None
def lockedstart(self):
with self.lock:
if not self.isAlive():
self.daemon = True
self.start()
def set_status(self, status, message=None):
if status == self.status['status']:
if message is not None and message != self.status['messages'][-1]:
self.status['messages'].append(message)
if status == 'error' and message:
_logger.error('Scale Error: '+ message)
elif status == 'disconnected' and message:
_logger.warning('Disconnected Scale: '+ message)
else:
self.status['status'] = status
if message:
self.status['messages'] = [message]
else:
self.status['messages'] = []
if status == 'error' and message:
_logger.error('Scale Error: '+ message)
elif status == 'disconnected' and message:
_logger.info('Disconnected Scale: %s', message)
def _get_raw_response(self, connection):
answer = []
while True:
char = connection.read(1) # may return `bytes` or `str`
if not char:
break
else:
answer.append(char)
return ''.join(answer)
def _parse_weight_answer(self, protocol, answer):
""" Parse a scale's answer to a weighing request, returning
a `(weight, weight_info, status)` pair.
"""
weight, weight_info, status = None, None, None
try:
_logger.debug("Parsing weight [%r]", answer)
if not answer and protocol.emptyAnswerValid:
# Some scales do not return the same value again, but we
# should not clear the weight data, POS may still be reading it
return weight, weight_info, status
if protocol.statusRegexp and re.search(protocol.statusRegexp, answer):
# parse status to set weight_info - we'll try weighing again later
weight, weight_info = protocol.statusParse(answer)
else:
match = re.search(protocol.weightRegexp, answer)
if match:
weight_text = match.group(1)
try:
weight = float(weight_text)
_logger.info('Weight: %s', weight)
except ValueError:
_logger.exception("Cannot parse weight [%r]", weight_text)
status = 'Invalid weight, please power-cycle the scale'
else:
_logger.error("Cannot parse scale answer [%r]", answer)
status = 'Invalid scale answer, please power-cycle the scale'
except Exception as e:
_logger.exception("Cannot parse scale answer [%r]", answer)
status = ("Could not weigh on scale %s with protocol %s: %s" %
(self.path_to_scale, protocol.name, e))
return weight, weight_info, status
def get_device(self):
if self.device:
return self.device
with hw_proxy.rs232_lock:
try:
if not os.path.exists(self.input_dir):
self.set_status('disconnected', 'No RS-232 device found')
return None
devices = [device for device in listdir(self.input_dir)]
for device in devices:
driver = hw_proxy.rs232_devices.get(device)
if driver and driver != DRIVER_NAME:
# belongs to another driver
_logger.info('Ignoring %s, belongs to %s', device, driver)
continue
path = self.input_dir + device
for protocol in SCALE_PROTOCOLS:
_logger.info('Probing %s with protocol %s', path, protocol)
connection = serial.Serial(path,
baudrate=protocol.baudrate,
bytesize=protocol.bytesize,
stopbits=protocol.stopbits,
parity=protocol.parity,
timeout=1, # longer timeouts for probing
writeTimeout=1) # longer timeouts for probing
connection.write(protocol.weightCommand + protocol.commandTerminator)
time.sleep(protocol.commandDelay)
answer = self._get_raw_response(connection)
weight, weight_info, status = self._parse_weight_answer(protocol, answer)
if status:
_logger.info('Probing %s: no valid answer to protocol %s', path, protocol.name)
else:
_logger.info('Probing %s: answer looks ok for protocol %s', path, protocol.name)
self.path_to_scale = path
self.protocol = protocol
self.set_status(
'connected',
'Connected to %s with %s protocol' % (device, protocol.name)
)
connection.timeout = protocol.timeout
connection.writeTimeout = protocol.writeTimeout
hw_proxy.rs232_devices[path] = DRIVER_NAME
return connection
self.set_status('disconnected', 'No supported RS-232 scale found')
except Exception as e:
_logger.exception('Failed probing for scales')
self.set_status('error', 'Failed probing for scales: %s' % e)
return None
def get_weight(self):
self.lockedstart()
return self.weight
def get_weight_info(self):
self.lockedstart()
return self.weight_info
def get_status(self):
self.lockedstart()
return self.status
def read_weight(self):
with self.scalelock:
p = self.protocol
try:
self.device.write(p.weightCommand + p.commandTerminator)
time.sleep(p.commandDelay)
answer = self._get_raw_response(self.device)
weight, weight_info, status = self._parse_weight_answer(p, answer)
if status:
self.set_status('error', status)
self.device = None
else:
if weight is not None:
self.weight = weight
if weight_info is not None:
self.weight_info = weight_info
except Exception as e:
self.set_status(
'error',
"Could not weigh on scale %s with protocol %s: %s" %
(self.path_to_scale, p.name, e))
self.device = None
def set_zero(self):
with self.scalelock:
if self.device:
try:
self.device.write(self.protocol.zeroCommand + self.protocol.commandTerminator)
time.sleep(self.protocol.commandDelay)
except Exception as e:
self.set_status(
'error',
"Could not zero scale %s with protocol %s: %s" %
(self.path_to_scale, self.protocol.name, e))
self.device = None
def set_tare(self):
with self.scalelock:
if self.device:
try:
self.device.write(self.protocol.tareCommand + self.protocol.commandTerminator)
time.sleep(self.protocol.commandDelay)
except Exception as e:
self.set_status(
'error',
"Could not tare scale %s with protocol %s: %s" %
(self.path_to_scale, self.protocol.name, e))
self.device = None
def clear_tare(self):
with self.scalelock:
if self.device:
p = self.protocol
try:
# if the protocol has no clear, we can just tare again
clearCommand = p.clearCommand or p.tareCommand
self.device.write(clearCommand + p.commandTerminator)
time.sleep(p.commandDelay)
except Exception as e:
self.set_status(
'error',
"Could not clear tare on scale %s with protocol %s: %s" %
(self.path_to_scale, p.name, e))
self.device = None
def run(self):
self.device = None
while True:
if self.device:
old_weight = self.weight
self.read_weight()
if self.weight != old_weight:
_logger.info('New Weight: %s, sleeping %ss', self.weight, self.protocol.newWeightDelay)
time.sleep(self.protocol.newWeightDelay)
if self.weight and self.protocol.autoResetWeight:
self.weight = 0
else:
_logger.info('Weight: %s, sleeping %ss', self.weight, self.protocol.weightDelay)
time.sleep(self.protocol.weightDelay)
else:
with self.scalelock:
self.device = self.get_device()
if not self.device:
# retry later to support "plug and play"
time.sleep(10)
scale_thread = None
if serial:
scale_thread = Scale()
hw_proxy.drivers[DRIVER_NAME] = scale_thread
class ScaleDriver(hw_proxy.Proxy):
@http.route('/hw_proxy/scale_read/', type='json', auth='none', cors='*')
def scale_read(self):
if scale_thread:
return {'weight': scale_thread.get_weight(),
'unit': 'kg',
'info': scale_thread.get_weight_info()}
return None
@http.route('/hw_proxy/scale_zero/', type='json', auth='none', cors='*')
def scale_zero(self):
if scale_thread:
scale_thread.set_zero()
return True
@http.route('/hw_proxy/scale_tare/', type='json', auth='none', cors='*')
def scale_tare(self):
if scale_thread:
scale_thread.set_tare()
return True
@http.route('/hw_proxy/scale_clear_tare/', type='json', auth='none', cors='*')
def scale_clear_tare(self):
if scale_thread:
scale_thread.clear_tare()
return True
| agpl-3.0 | 1,431,801,333,806,867,200 | 39.113695 | 108 | 0.545156 | false |
pku9104038/edx-platform | lms/djangoapps/courseware/management/commands/export_course.py | 7 | 2593 | """
A Django command that exports a course to a tar.gz file.
If <filename> is '-', it pipes the file to stdout
"""
import os
import shutil
import tarfile
from tempfile import mktemp, mkdtemp
from textwrap import dedent
from path import path
from django.core.management.base import BaseCommand, CommandError
from xmodule.modulestore.django import modulestore
from xmodule.contentstore.django import contentstore
from xmodule.modulestore.xml_exporter import export_to_xml
class Command(BaseCommand):
"""
Export a course to XML. The output is compressed as a tar.gz file
"""
args = "<course_id> <output_filename>"
help = dedent(__doc__).strip()
def handle(self, *args, **options):
course_id, filename, pipe_results = self._parse_arguments(args)
export_course_to_tarfile(course_id, filename)
results = self._get_results(filename) if pipe_results else None
return results
def _parse_arguments(self, args):
"""Parse command line arguments"""
try:
course_id = args[0]
filename = args[1]
except IndexError:
raise CommandError("Insufficient arguments")
# If filename is '-' save to a temp file
pipe_results = False
if filename == '-':
filename = mktemp()
pipe_results = True
return course_id, filename, pipe_results
def _get_results(self, filename):
"""Load results from file"""
results = None
with open(filename) as f:
results = f.read()
os.remove(filename)
return results
def export_course_to_tarfile(course_id, filename):
"""Exports a course into a tar.gz file"""
tmp_dir = mkdtemp()
try:
course_dir = export_course_to_directory(course_id, tmp_dir)
compress_directory(course_dir, filename)
finally:
shutil.rmtree(tmp_dir)
def export_course_to_directory(course_id, root_dir):
"""Export course into a directory"""
store = modulestore()
course = store.get_course(course_id)
if course is None:
raise CommandError("Invalid course_id")
course_name = course.location.course_id.replace('/', '-')
export_to_xml(store, None, course.location, root_dir, course_name)
course_dir = path(root_dir) / course_name
return course_dir
def compress_directory(directory, filename):
"""Compress a directrory into a tar.gz file"""
mode = 'w:gz'
name = path(directory).name
with tarfile.open(filename, mode) as tar_file:
tar_file.add(directory, arcname=name)
| agpl-3.0 | -5,036,373,635,954,047,000 | 26.585106 | 71 | 0.648669 | false |
fanne/june | june/forms/node.py | 11 | 1204 | # coding: utf-8
from wtforms import TextField, TextAreaField, SelectField, BooleanField
from wtforms.validators import DataRequired
from flask.ext.babel import lazy_gettext as _
from ._base import BaseForm
from ..models import Node
class NodeForm(BaseForm):
title = TextField(
_('Title'), validators=[DataRequired()],
description=_('The screen title of the node')
)
urlname = TextField(
_('URL'), validators=[DataRequired()],
description=_('The url name of the node')
)
description = TextAreaField(_('Description'))
role = SelectField(
_('Role'),
description=_('Required role'),
choices=[
('user', _('User')),
('staff', _('Staff')),
('admin', _('Admin'))
],
default='user',
)
on_home = BooleanField(_('Show on home page'), default=True)
def validate_urlname(self, field):
if self._obj and self._obj.urlname == field.data:
return
if Node.query.filter_by(urlname=field.data).count():
raise ValueError(_('The node exists'))
def save(self):
node = Node(**self.data)
node.save()
return node
| bsd-3-clause | 5,144,944,532,718,721,000 | 27.666667 | 71 | 0.583887 | false |
justajeffy/arsenalsuite | cpp/apps/bach/data_export/FixCachedKeywords.py | 10 | 7224 | #!/usr/bin/env python2.5
#
# Copyright (c) 2009 Dr. D Studios. (Please refer to license for details)
# SVN_META_HEADURL = "$HeadURL: $"
# SVN_META_ID = "$Id: FixCachedKeywords.py 9408 2010-03-03 22:35:49Z brobison $"
#
import sys
import os
from PyQt4.QtSql import *
#-----------------------------------------------------------------------------
class FixCachedKeywords:
#-----------------------------------------------------------------------------
def __init__( self, parent ):
self.parent = parent
self.pgAssetName2Id = {}
self.pgAssetId2Name = {}
self.pgKeywordName2Id = {}
self.pgKeywordId2Name = {}
self.pgKeywordMapping = {}
self._pgdb = QSqlDatabase.addDatabase( "QPSQL", "pgDB" )
self._pgdb.setDatabaseName( "bach" )
self._pgdb.setHostName( "sql01" )
self._pgdb.setUserName( "bach" )
self._pgdb.setPassword( "escher" )
if not self._pgdb.open():
self.p( "Couldn't open Bach DB" )
return False
self.p( "Opened Bach DB" )
self.dryRun = True
self.dryRun = False
self.fout = file( 'fixKeyword.bach.sql', 'wt' )
self.collectPGData_Keyword()
idx = 0
for k in self.pgKeywordMapping:
keywords = ','.join( self.pgKeywordMapping[ k ] )
s = "UPDATE bachasset SET cachedkeywords='%s' WHERE keybachasset=%d;" % ( esc( keywords ), k )
self._doPGSqlMod( s )
print idx, len( self.pgKeywordMapping), s
#-----------------------------------------------------------------------------
def p( self, p ):
self.parent.printIt( p )
#-----------------------------------------------------------------------------
def pS( self, p ):
self.parent.stat( p )
#-----------------------------------------------------------------------------
def _doPGSql( self, query ):
# self.p( '>>> Executing: [Bach] [%s]' % query )
q = QSqlQuery( query, self._pgdb )
#self.p( '<<< Done' )
return q
#-----------------------------------------------------------------------------
def _doPGSqlMod( self, query ):
self.fout.write( query )
self.fout.write( '\n' )
if self.dryRun:
return
#self.p( '>>> Executing: [Bach] [%s]' % query )
q = QSqlQuery( query, self._pgdb )
#self.p( '<<< Done' )
return q
#-----------------------------------------------------------------------------
def collectPGData_Asset(self):
q = self._doPGSql("""SELECT path, keybachasset FROM bachasset""")
while(q.next()):
name, id = extractPGAsset( q )
self.pgAssetName2Id[ name ] = id
self.pgAssetId2Name[ id ] = name
#-----------------------------------------------------------------------------
def collectPGData_Keyword(self):
q = self._doPGSql("""SELECT keybachasset, name FROM
bachkeywordmap, bachasset, bachkeyword
WHERE
fkeybachasset=keybachasset AND
fkeybachkeyword=keybachkeyword""")
while(q.next()):
d = extractPGKeywordMapping( q )
id = d[ 0 ]
name = d[ 1 ]
if not id in self.pgKeywordMapping:
self.pgKeywordMapping[ id ] = [ name ]
self.pgKeywordMapping[ id ].append( name )
#-----------------------------------------------------------------------------
def collectPGData(self):
self.p( "Preloading Bach data..." )
#----------------
self.collectPGData_Asset()
self.collectPGData_Keyword()
#----------------
self.p( "... finished" )
#-----------------------------------------------------------------------------
def assetExists(self, path):
if not path in self.pgAssetName2Id:
return 0
return self.pgAssetName2Id[ path ]
#-----------------------------------------------------------------------------
def getAssetId( self, path ):
return self.assetExists( path )
#-----------------------------------------------------------------------------
def keywordExists(self, name):
if not name in self.pgKeywordName2Id:
return 0
return self.pgKeywordName2Id[ name ]
#-----------------------------------------------------------------------------
def getKeywordId( self, name ):
return self.keywordExists( name )
#-----------------------------------------------------------------------------
def keywordMapExists(self, imgPath, keywordName):
if not imgPath in self.pgKeywordMapping:
return False
if not keywordName in self.pgKeywordMapping[ imgPath ]:
return False
return True
#-----------------------------------------------------------------------------
def collectionExists(self, name):
if not name in self.pgCollectionName2Id:
return 0
return self.pgCollectionName2Id[ name ]
#-----------------------------------------------------------------------------
def getCollectionId( self, name ):
return self.collectionExists( name )
#-----------------------------------------------------------------------------
def collectionMapExists(self, imgPath, collectionName):
if not imgPath in self.pgCollectionMapping:
return False
if not collectionName in self.pgCollectionMapping[ imgPath ]:
return False
return True
#-----------------------------------------------------------------------------
def esc( s ):
s = s.replace( '\'', '\'\'' )
return s
#-----------------------------------------------------------------------------
def toS( variant ):
v = variant.toString()
return str( v.toAscii() )
#-----------------------------------------------------------------------------
def toI( variant ):
v, ok = variant.toInt()
return int( v )
#-----------------------------------------------------------------------------
def extractPGAsset( query ):
name = toS( query.value( 0 ) )
id = toI( query.value( 1 ) )
return name, id
#-----------------------------------------------------------------------------
def extractPGKeyword( query ):
name = toS( query.value( 0 ) )
id = toI( query.value( 1 ) )
return name, id
#-----------------------------------------------------------------------------
def extractPGCollection( query ):
name = toS( query.value( 0 ) )
id = toI( query.value( 1 ) )
return name, id
#-----------------------------------------------------------------------------
def extractPGCollectionMapping( query ):
d = []
d.append( toI( query.value(0) ) )
d.append( toS( query.value(1) ) )
return d
#-----------------------------------------------------------------------------
def extractPGKeywordMapping( query ):
d = []
d.append( toI( query.value(0) ) )
d.append( toS( query.value(1) ) )
return d
#-----------------------------------------------------------------------------
class Printer():
def printIt(self,p):
print p
if __name__=='__main__':
printer = Printer()
fixit = FixCachedKeywords( printer )
| gpl-2.0 | 271,184,016,735,814,200 | 32.444444 | 106 | 0.406285 | false |
manastech/de-bee | gdata/Crypto/Cipher/__init__.py | 271 | 1145 | """Secret-key encryption algorithms.
Secret-key encryption algorithms transform plaintext in some way that
is dependent on a key, producing ciphertext. This transformation can
easily be reversed, if (and, hopefully, only if) one knows the key.
The encryption modules here all support the interface described in PEP
272, "API for Block Encryption Algorithms".
If you don't know which algorithm to choose, use AES because it's
standard and has undergone a fair bit of examination.
Crypto.Cipher.AES Advanced Encryption Standard
Crypto.Cipher.ARC2 Alleged RC2
Crypto.Cipher.ARC4 Alleged RC4
Crypto.Cipher.Blowfish
Crypto.Cipher.CAST
Crypto.Cipher.DES The Data Encryption Standard. Very commonly used
in the past, but today its 56-bit keys are too small.
Crypto.Cipher.DES3 Triple DES.
Crypto.Cipher.IDEA
Crypto.Cipher.RC5
Crypto.Cipher.XOR The simple XOR cipher.
"""
__all__ = ['AES', 'ARC2', 'ARC4',
'Blowfish', 'CAST', 'DES', 'DES3', 'IDEA', 'RC5',
'XOR'
]
__revision__ = "$Id: __init__.py,v 1.7 2003/02/28 15:28:35 akuchling Exp $"
| mit | 5,950,643,396,369,680,000 | 33.69697 | 79 | 0.687336 | false |
jordiclariana/ansible | lib/ansible/modules/cloud/amazon/iam_mfa_device_facts.py | 48 | 3696 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: iam_mfa_device_facts
short_description: List the MFA (Multi-Factor Authentication) devices registered for a user
description:
- List the MFA (Multi-Factor Authentication) devices registered for a user
version_added: "2.2"
author: Victor Costan (@pwnall)
options:
user_name:
description:
- The name of the user whose MFA devices will be listed
required: false
default: null
extends_documentation_fragment:
- aws
- ec2
requirements:
- boto3
- botocore
'''
RETURN = """
mfa_devices:
description: The MFA devices registered for the given user
returned: always
type: list
sample:
- enable_date: "2016-03-11T23:25:36+00:00"
serial_number: arn:aws:iam::085120003701:mfa/pwnall
user_name: pwnall
- enable_date: "2016-03-11T23:25:37+00:00"
serial_number: arn:aws:iam::085120003702:mfa/pwnall
user_name: pwnall
"""
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# List MFA devices (more details: http://docs.aws.amazon.com/IAM/latest/APIReference/API_ListMFADevices.html)
iam_mfa_device_facts:
register: mfa_devices
# Assume an existing role (more details: http://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html)
sts_assume_role:
mfa_serial_number: "{{ mfa_devices.mfa_devices[0].serial_number }}"
role_arn: "arn:aws:iam::123456789012:role/someRole"
role_session_name: "someRoleSession"
register: assumed_role
'''
try:
import boto3
from botocore.exceptions import ClientError
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
def list_mfa_devices(connection, module):
user_name = module.params.get('user_name')
changed = False
args = {}
if user_name is not None:
args['UserName'] = user_name
try:
response = connection.list_mfa_devices(**args)
except ClientError as e:
module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
module.exit_json(changed=changed, **camel_dict_to_snake_dict(response))
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
user_name=dict(required=False, default=None)
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
if region:
connection = boto3_conn(module, conn_type='client', resource='iam', region=region, endpoint=ec2_url, **aws_connect_kwargs)
else:
module.fail_json(msg="region must be specified")
list_mfa_devices(connection, module)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
| gpl-3.0 | 8,583,782,468,030,183,000 | 29.295082 | 130 | 0.689394 | false |
crobinso/pkgdb2 | pkgdb2/ui/collections.py | 4 | 6211 | # -*- coding: utf-8 -*-
#
# Copyright © 2013-2014 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2, or (at your option) any later
# version. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details. You
# should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# Any Red Hat trademarks that are incorporated in the source
# code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission
# of Red Hat, Inc.
#
'''
UI namespace for the Flask application.
'''
import flask
from math import ceil
import pkgdb2.forms
import pkgdb2.lib as pkgdblib
from pkgdb2 import SESSION, APP, is_admin
from pkgdb2.ui import UI
## Some of the object we use here have inherited methods which apparently
## pylint does not detect.
# pylint: disable=E1101
@UI.route('/collections/')
@UI.route('/collections/page/<int:page>/')
@UI.route('/collections/<motif>/')
@UI.route('/collections/<motif>/page/<int:page>/')
def list_collections(motif=None, page=1):
''' Display the list of collections corresponding to the motif. '''
pattern = flask.request.args.get('motif', motif) or '*'
limit = flask.request.args.get('limit', APP.config['ITEMS_PER_PAGE'])
try:
limit = abs(int(limit))
except ValueError:
limit = APP.config['ITEMS_PER_PAGE']
flask.flash('Incorrect limit provided, using default', 'errors')
collections = pkgdblib.search_collection(
SESSION,
pattern=pattern,
page=page,
limit=limit,
)
collections_count = pkgdblib.search_collection(
SESSION,
pattern=pattern,
page=page,
limit=limit,
count=True
)
total_page = int(ceil(collections_count / float(limit)))
return flask.render_template(
'list_collections.html',
collections=collections,
motif=motif,
total_page=total_page,
page=page
)
@UI.route('/collection/<collection>/')
def collection_info(collection):
''' Display the information about the specified collection. '''
try:
collection = pkgdblib.search_collection(SESSION, collection)[0]
except IndexError:
flask.flash('No collection of this name found.', 'errors')
return flask.render_template('msg.html')
return flask.render_template(
'collection.html',
collection=collection,
)
@UI.route('/collection/<collection>/edit', methods=('GET', 'POST'))
@is_admin
def collection_edit(collection):
''' Allows to edit the information about the specified collection. '''
try:
collection = pkgdblib.search_collection(SESSION, collection)[0]
except IndexError:
flask.flash('No collection of this name found.', 'errors')
return flask.render_template('msg.html')
clt_status = pkgdblib.get_status(SESSION, 'clt_status')['clt_status']
form = pkgdb2.forms.AddCollectionForm(
clt_status=clt_status
)
if form.validate_on_submit():
clt_name = form.clt_name.data
clt_version = form.version.data
clt_status = form.clt_status.data
clt_branchname = form.branchname.data
clt_disttag = form.dist_tag.data
clt_koji_name = form.kojiname.data
try:
pkgdblib.edit_collection(
SESSION,
collection=collection,
clt_name=clt_name,
clt_version=clt_version,
clt_status=clt_status,
clt_branchname=clt_branchname,
clt_disttag=clt_disttag,
clt_koji_name=clt_koji_name,
user=flask.g.fas_user,
)
SESSION.commit()
flask.flash('Collection "%s" edited' % clt_branchname)
return flask.redirect(flask.url_for(
'.collection_info', collection=collection.branchname))
# In theory we should never hit this
except pkgdblib.PkgdbException, err: # pragma: no cover
SESSION.rollback()
flask.flash(str(err), 'errors')
elif flask.request.method == 'GET':
form = pkgdb2.forms.AddCollectionForm(
clt_status=clt_status,
collection=collection
)
return flask.render_template(
'collection_edit.html',
form=form,
collection=collection,
)
@UI.route('/new/collection/', methods=('GET', 'POST'))
@is_admin
def collection_new():
''' Page to create a new collection. '''
clt_status = pkgdblib.get_status(SESSION, 'clt_status')['clt_status']
form = pkgdb2.forms.AddCollectionForm(clt_status=clt_status)
if form.validate_on_submit():
clt_name = form.clt_name.data
clt_version = form.version.data
clt_status = form.clt_status.data
clt_branchname = form.branchname.data
clt_disttag = form.dist_tag.data
clt_koji_name = form.kojiname.data
try:
message = pkgdblib.add_collection(
SESSION,
clt_name=clt_name,
clt_version=clt_version,
clt_status=clt_status,
clt_branchname=clt_branchname,
clt_disttag=clt_disttag,
clt_koji_name=clt_koji_name,
user=flask.g.fas_user,
)
SESSION.commit()
flask.flash(message)
return flask.redirect(flask.url_for('.list_collections'))
# In theory we should never hit this
except pkgdblib.PkgdbException, err: # pragma: no cover
SESSION.rollback()
flask.flash(str(err), 'errors')
return flask.render_template(
'collection_new.html',
form=form,
)
| gpl-2.0 | 6,682,098,268,803,269,000 | 31.684211 | 74 | 0.63124 | false |
rghe/ansible | test/units/modules/network/nxos/test_nxos_bgp_neighbor_af.py | 18 | 4920 | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests.mock import patch
from ansible.modules.network.nxos import nxos_bgp_neighbor_af
from .nxos_module import TestNxosModule, load_fixture, set_module_args
class TestNxosBgpNeighborAfModule(TestNxosModule):
module = nxos_bgp_neighbor_af
def setUp(self):
super(TestNxosBgpNeighborAfModule, self).setUp()
self.mock_load_config = patch('ansible.modules.network.nxos.nxos_bgp_neighbor_af.load_config')
self.load_config = self.mock_load_config.start()
self.mock_get_config = patch('ansible.modules.network.nxos.nxos_bgp_neighbor_af.get_config')
self.get_config = self.mock_get_config.start()
def tearDown(self):
super(TestNxosBgpNeighborAfModule, self).tearDown()
self.mock_load_config.stop()
self.mock_get_config.stop()
def load_fixtures(self, commands=None, device=''):
self.get_config.return_value = load_fixture('nxos_bgp', 'config.cfg')
self.load_config.return_value = []
def test_nxos_bgp_neighbor_af(self):
set_module_args(dict(asn=65535, neighbor='192.0.2.3', afi='ipv4',
safi='unicast', route_reflector_client=True))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'], [
'router bgp 65535', 'neighbor 192.0.2.3', 'address-family ipv4 unicast',
'route-reflector-client'
])
def test_nxos_bgp_neighbor_af_exists(self):
set_module_args(dict(asn=65535, neighbor='3.3.3.5', afi='ipv4', safi='unicast'))
self.execute_module(changed=False, commands=[])
def test_nxos_bgp_neighbor_af_absent(self):
set_module_args(dict(asn=65535, neighbor='3.3.3.5', afi='ipv4', safi='unicast', state='absent'))
self.execute_module(
changed=True, sort=False,
commands=['router bgp 65535', 'neighbor 3.3.3.5', 'no address-family ipv4 unicast']
)
def test_nxos_bgp_neighbor_af_advertise_map(self):
set_module_args(dict(asn=65535, neighbor='3.3.3.5', afi='ipv4', safi='unicast',
advertise_map_exist=['my_advertise_map', 'my_exist_map']))
self.execute_module(
changed=True, sort=False,
commands=['router bgp 65535', 'neighbor 3.3.3.5', 'address-family ipv4 unicast', 'advertise-map my_advertise_map exist-map my_exist_map']
)
def test_nxos_bgp_neighbor_af_advertise_map_non_exist(self):
set_module_args(dict(asn=65535, neighbor='3.3.3.5', afi='ipv4', safi='unicast',
advertise_map_non_exist=['my_advertise_map', 'my_non_exist_map']))
self.execute_module(
changed=True, sort=False,
commands=['router bgp 65535', 'neighbor 3.3.3.5', 'address-family ipv4 unicast', 'advertise-map my_advertise_map non-exist-map my_non_exist_map']
)
def test_nxos_bgp_neighbor_af_max_prefix_limit_default(self):
set_module_args(dict(asn=65535, neighbor='3.3.3.5', afi='ipv4',
safi='unicast', max_prefix_limit='default'))
self.execute_module(
changed=True, sort=False,
commands=['router bgp 65535', 'neighbor 3.3.3.5', 'address-family ipv4 unicast', 'no maximum-prefix']
)
def test_nxos_bgp_neighbor_af_max_prefix(self):
set_module_args(dict(asn=65535, neighbor='3.3.3.5', afi='ipv4',
safi='unicast', max_prefix_threshold=20,
max_prefix_limit=20))
self.execute_module(
changed=True, sort=False,
commands=['router bgp 65535', 'neighbor 3.3.3.5', 'address-family ipv4 unicast', 'maximum-prefix 20 20']
)
def test_nxos_bgp_neighbor_af_disable_peer_as_check(self):
set_module_args(dict(asn=65535, neighbor='3.3.3.5', afi='ipv4',
safi='unicast', disable_peer_as_check=True))
self.execute_module(
changed=True,
commands=['router bgp 65535', 'neighbor 3.3.3.5', 'address-family ipv4 unicast', 'disable-peer-as-check']
)
| gpl-3.0 | 5,345,591,356,409,268,000 | 44.555556 | 157 | 0.638618 | false |
cernops/nova | nova/tests/unit/console/test_websocketproxy.py | 10 | 13858 | # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for nova websocketproxy."""
import mock
from nova.console import websocketproxy
from nova import exception
from nova import test
class NovaProxyRequestHandlerBaseTestCase(test.NoDBTestCase):
def setUp(self):
super(NovaProxyRequestHandlerBaseTestCase, self).setUp()
self.flags(console_allowed_origins = ['allowed-origin-example-1.net',
'allowed-origin-example-2.net'])
self.wh = websocketproxy.NovaProxyRequestHandlerBase()
self.wh.socket = mock.MagicMock()
self.wh.msg = mock.MagicMock()
self.wh.do_proxy = mock.MagicMock()
self.wh.headers = mock.MagicMock()
def _fake_getheader(self, header):
if header == 'cookie':
return 'token="123-456-789"'
elif header == 'Origin':
return 'https://example.net:6080'
elif header == 'Host':
return 'example.net:6080'
else:
return
def _fake_getheader_ipv6(self, header):
if header == 'cookie':
return 'token="123-456-789"'
elif header == 'Origin':
return 'https://[2001:db8::1]:6080'
elif header == 'Host':
return '[2001:db8::1]:6080'
else:
return
def _fake_getheader_bad_token(self, header):
if header == 'cookie':
return 'token="XXX"'
elif header == 'Origin':
return 'https://example.net:6080'
elif header == 'Host':
return 'example.net:6080'
else:
return
def _fake_getheader_bad_origin(self, header):
if header == 'cookie':
return 'token="123-456-789"'
elif header == 'Origin':
return 'https://bad-origin-example.net:6080'
elif header == 'Host':
return 'example.net:6080'
else:
return
def _fake_getheader_allowed_origin(self, header):
if header == 'cookie':
return 'token="123-456-789"'
elif header == 'Origin':
return 'https://allowed-origin-example-2.net:6080'
elif header == 'Host':
return 'example.net:6080'
else:
return
def _fake_getheader_blank_origin(self, header):
if header == 'cookie':
return 'token="123-456-789"'
elif header == 'Origin':
return ''
elif header == 'Host':
return 'example.net:6080'
else:
return
def _fake_getheader_no_origin(self, header):
if header == 'cookie':
return 'token="123-456-789"'
elif header == 'Origin':
return None
elif header == 'Host':
return 'any-example.net:6080'
else:
return
def _fake_getheader_http(self, header):
if header == 'cookie':
return 'token="123-456-789"'
elif header == 'Origin':
return 'http://example.net:6080'
elif header == 'Host':
return 'example.net:6080'
else:
return
@mock.patch('nova.consoleauth.rpcapi.ConsoleAuthAPI.check_token')
def test_new_websocket_client(self, check_token):
check_token.return_value = {
'host': 'node1',
'port': '10000',
'console_type': 'novnc',
'access_url': 'https://example.net:6080'
}
self.wh.socket.return_value = '<socket>'
self.wh.path = "http://127.0.0.1/?token=123-456-789"
self.wh.headers.getheader = self._fake_getheader
self.wh.new_websocket_client()
check_token.assert_called_with(mock.ANY, token="123-456-789")
self.wh.socket.assert_called_with('node1', 10000, connect=True)
self.wh.do_proxy.assert_called_with('<socket>')
@mock.patch('nova.consoleauth.rpcapi.ConsoleAuthAPI.check_token')
def test_new_websocket_client_ipv6_url(self, check_token):
check_token.return_value = {
'host': 'node1',
'port': '10000',
'console_type': 'novnc',
'access_url': 'https://[2001:db8::1]:6080'
}
self.wh.socket.return_value = '<socket>'
self.wh.path = "http://[2001:db8::1]/?token=123-456-789"
self.wh.headers.getheader = self._fake_getheader_ipv6
self.wh.new_websocket_client()
check_token.assert_called_with(mock.ANY, token="123-456-789")
self.wh.socket.assert_called_with('node1', 10000, connect=True)
self.wh.do_proxy.assert_called_with('<socket>')
@mock.patch('nova.consoleauth.rpcapi.ConsoleAuthAPI.check_token')
def test_new_websocket_client_token_invalid(self, check_token):
check_token.return_value = False
self.wh.path = "http://127.0.0.1/?token=XXX"
self.wh.headers.getheader = self._fake_getheader_bad_token
self.assertRaises(exception.InvalidToken,
self.wh.new_websocket_client)
check_token.assert_called_with(mock.ANY, token="XXX")
@mock.patch('nova.consoleauth.rpcapi.ConsoleAuthAPI.check_token')
def test_new_websocket_client_internal_access_path(self, check_token):
check_token.return_value = {
'host': 'node1',
'port': '10000',
'internal_access_path': 'vmid',
'console_type': 'novnc',
'access_url': 'https://example.net:6080'
}
tsock = mock.MagicMock()
tsock.recv.return_value = "HTTP/1.1 200 OK\r\n\r\n"
self.wh.socket.return_value = tsock
self.wh.path = "http://127.0.0.1/?token=123-456-789"
self.wh.headers.getheader = self._fake_getheader
self.wh.new_websocket_client()
check_token.assert_called_with(mock.ANY, token="123-456-789")
self.wh.socket.assert_called_with('node1', 10000, connect=True)
self.wh.do_proxy.assert_called_with(tsock)
@mock.patch('nova.consoleauth.rpcapi.ConsoleAuthAPI.check_token')
def test_new_websocket_client_internal_access_path_err(self, check_token):
check_token.return_value = {
'host': 'node1',
'port': '10000',
'internal_access_path': 'xxx',
'console_type': 'novnc',
'access_url': 'https://example.net:6080'
}
tsock = mock.MagicMock()
tsock.recv.return_value = "HTTP/1.1 500 Internal Server Error\r\n\r\n"
self.wh.socket.return_value = tsock
self.wh.path = "http://127.0.0.1/?token=123-456-789"
self.wh.headers.getheader = self._fake_getheader
self.assertRaises(exception.InvalidConnectionInfo,
self.wh.new_websocket_client)
check_token.assert_called_with(mock.ANY, token="123-456-789")
@mock.patch('sys.version_info')
@mock.patch('nova.consoleauth.rpcapi.ConsoleAuthAPI.check_token')
def test_new_websocket_client_py273_good_scheme(
self, check_token, version_info):
version_info.return_value = (2, 7, 3)
check_token.return_value = {
'host': 'node1',
'port': '10000',
'console_type': 'novnc',
'access_url': 'https://example.net:6080'
}
self.wh.socket.return_value = '<socket>'
self.wh.path = "http://127.0.0.1/?token=123-456-789"
self.wh.headers.getheader = self._fake_getheader
self.wh.new_websocket_client()
check_token.assert_called_with(mock.ANY, token="123-456-789")
self.wh.socket.assert_called_with('node1', 10000, connect=True)
self.wh.do_proxy.assert_called_with('<socket>')
@mock.patch('sys.version_info')
@mock.patch('nova.consoleauth.rpcapi.ConsoleAuthAPI.check_token')
def test_new_websocket_client_py273_special_scheme(
self, check_token, version_info):
version_info.return_value = (2, 7, 3)
check_token.return_value = {
'host': 'node1',
'port': '10000',
'console_type': 'novnc'
}
self.wh.socket.return_value = '<socket>'
self.wh.path = "ws://127.0.0.1/?token=123-456-789"
self.wh.headers.getheader = self._fake_getheader
self.assertRaises(exception.NovaException,
self.wh.new_websocket_client)
@mock.patch('socket.getfqdn')
def test_address_string_doesnt_do_reverse_dns_lookup(self, getfqdn):
request_mock = mock.MagicMock()
request_mock.makefile().readline.side_effect = [
'GET /vnc.html?token=123-456-789 HTTP/1.1\r\n',
''
]
server_mock = mock.MagicMock()
client_address = ('8.8.8.8', 54321)
handler = websocketproxy.NovaProxyRequestHandler(
request_mock, client_address, server_mock)
handler.log_message('log message using client address context info')
self.assertFalse(getfqdn.called) # no reverse dns look up
self.assertEqual(handler.address_string(), '8.8.8.8') # plain address
@mock.patch('nova.consoleauth.rpcapi.ConsoleAuthAPI.check_token')
def test_new_websocket_client_novnc_bad_origin_header(self, check_token):
check_token.return_value = {
'host': 'node1',
'port': '10000',
'console_type': 'novnc'
}
self.wh.path = "http://127.0.0.1/"
self.wh.headers.getheader = self._fake_getheader_bad_origin
self.assertRaises(exception.ValidationError,
self.wh.new_websocket_client)
@mock.patch('nova.consoleauth.rpcapi.ConsoleAuthAPI.check_token')
def test_new_websocket_client_novnc_allowed_origin_header(self,
check_token):
check_token.return_value = {
'host': 'node1',
'port': '10000',
'console_type': 'novnc',
'access_url': 'https://example.net:6080'
}
self.wh.socket.return_value = '<socket>'
self.wh.path = "http://127.0.0.1/"
self.wh.headers.getheader = self._fake_getheader_allowed_origin
self.wh.new_websocket_client()
check_token.assert_called_with(mock.ANY, token="123-456-789")
self.wh.socket.assert_called_with('node1', 10000, connect=True)
self.wh.do_proxy.assert_called_with('<socket>')
@mock.patch('nova.consoleauth.rpcapi.ConsoleAuthAPI.check_token')
def test_new_websocket_client_novnc_blank_origin_header(self, check_token):
check_token.return_value = {
'host': 'node1',
'port': '10000',
'console_type': 'novnc'
}
self.wh.path = "http://127.0.0.1/"
self.wh.headers.getheader = self._fake_getheader_blank_origin
self.assertRaises(exception.ValidationError,
self.wh.new_websocket_client)
@mock.patch('nova.consoleauth.rpcapi.ConsoleAuthAPI.check_token')
def test_new_websocket_client_novnc_no_origin_header(self, check_token):
check_token.return_value = {
'host': 'node1',
'port': '10000',
'console_type': 'novnc'
}
self.wh.socket.return_value = '<socket>'
self.wh.path = "http://127.0.0.1/"
self.wh.headers.getheader = self._fake_getheader_no_origin
self.wh.new_websocket_client()
check_token.assert_called_with(mock.ANY, token="123-456-789")
self.wh.socket.assert_called_with('node1', 10000, connect=True)
self.wh.do_proxy.assert_called_with('<socket>')
@mock.patch('nova.consoleauth.rpcapi.ConsoleAuthAPI.check_token')
def test_new_websocket_client_novnc_https_origin_proto_http(self,
check_token):
check_token.return_value = {
'host': 'node1',
'port': '10000',
'console_type': 'novnc',
'access_url': 'http://example.net:6080'
}
self.wh.path = "https://127.0.0.1/"
self.wh.headers.getheader = self._fake_getheader
self.assertRaises(exception.ValidationError,
self.wh.new_websocket_client)
@mock.patch('nova.consoleauth.rpcapi.ConsoleAuthAPI.check_token')
def test_new_websocket_client_novnc_https_origin_proto_ws(self,
check_token):
check_token.return_value = {
'host': 'node1',
'port': '10000',
'console_type': 'serial',
'access_url': 'ws://example.net:6080'
}
self.wh.path = "https://127.0.0.1/"
self.wh.headers.getheader = self._fake_getheader
self.assertRaises(exception.ValidationError,
self.wh.new_websocket_client)
@mock.patch('nova.consoleauth.rpcapi.ConsoleAuthAPI.check_token')
def test_new_websocket_client_novnc_bad_console_type(self, check_token):
check_token.return_value = {
'host': 'node1',
'port': '10000',
'console_type': 'bad-console-type'
}
self.wh.path = "http://127.0.0.1/"
self.wh.headers.getheader = self._fake_getheader
self.assertRaises(exception.ValidationError,
self.wh.new_websocket_client)
| apache-2.0 | -5,190,832,898,095,035,000 | 36.252688 | 79 | 0.581686 | false |
spacy-io/spaCy | spacy/displacy/render.py | 2 | 13219 | from typing import Dict, Any, List, Optional, Union
import uuid
from .templates import TPL_DEP_SVG, TPL_DEP_WORDS, TPL_DEP_WORDS_LEMMA, TPL_DEP_ARCS
from .templates import TPL_ENT, TPL_ENT_RTL, TPL_FIGURE, TPL_TITLE, TPL_PAGE
from .templates import TPL_ENTS
from ..util import minify_html, escape_html, registry
from ..errors import Errors
DEFAULT_LANG = "en"
DEFAULT_DIR = "ltr"
DEFAULT_ENTITY_COLOR = "#ddd"
DEFAULT_LABEL_COLORS = {
"ORG": "#7aecec",
"PRODUCT": "#bfeeb7",
"GPE": "#feca74",
"LOC": "#ff9561",
"PERSON": "#aa9cfc",
"NORP": "#c887fb",
"FACILITY": "#9cc9cc",
"EVENT": "#ffeb80",
"LAW": "#ff8197",
"LANGUAGE": "#ff8197",
"WORK_OF_ART": "#f0d0ff",
"DATE": "#bfe1d9",
"TIME": "#bfe1d9",
"MONEY": "#e4e7d2",
"QUANTITY": "#e4e7d2",
"ORDINAL": "#e4e7d2",
"CARDINAL": "#e4e7d2",
"PERCENT": "#e4e7d2",
}
class DependencyRenderer:
"""Render dependency parses as SVGs."""
style = "dep"
def __init__(self, options: Dict[str, Any] = {}) -> None:
"""Initialise dependency renderer.
options (dict): Visualiser-specific options (compact, word_spacing,
arrow_spacing, arrow_width, arrow_stroke, distance, offset_x,
color, bg, font)
"""
self.compact = options.get("compact", False)
self.word_spacing = options.get("word_spacing", 45)
self.arrow_spacing = options.get("arrow_spacing", 12 if self.compact else 20)
self.arrow_width = options.get("arrow_width", 6 if self.compact else 10)
self.arrow_stroke = options.get("arrow_stroke", 2)
self.distance = options.get("distance", 150 if self.compact else 175)
self.offset_x = options.get("offset_x", 50)
self.color = options.get("color", "#000000")
self.bg = options.get("bg", "#ffffff")
self.font = options.get("font", "Arial")
self.direction = DEFAULT_DIR
self.lang = DEFAULT_LANG
def render(
self, parsed: List[Dict[str, Any]], page: bool = False, minify: bool = False
) -> str:
"""Render complete markup.
parsed (list): Dependency parses to render.
page (bool): Render parses wrapped as full HTML page.
minify (bool): Minify HTML markup.
RETURNS (str): Rendered SVG or HTML markup.
"""
# Create a random ID prefix to make sure parses don't receive the
# same ID, even if they're identical
id_prefix = uuid.uuid4().hex
rendered = []
for i, p in enumerate(parsed):
if i == 0:
settings = p.get("settings", {})
self.direction = settings.get("direction", DEFAULT_DIR)
self.lang = settings.get("lang", DEFAULT_LANG)
render_id = f"{id_prefix}-{i}"
svg = self.render_svg(render_id, p["words"], p["arcs"])
rendered.append(svg)
if page:
content = "".join([TPL_FIGURE.format(content=svg) for svg in rendered])
markup = TPL_PAGE.format(
content=content, lang=self.lang, dir=self.direction
)
else:
markup = "".join(rendered)
if minify:
return minify_html(markup)
return markup
def render_svg(
self,
render_id: Union[int, str],
words: List[Dict[str, Any]],
arcs: List[Dict[str, Any]],
) -> str:
"""Render SVG.
render_id (Union[int, str]): Unique ID, typically index of document.
words (list): Individual words and their tags.
arcs (list): Individual arcs and their start, end, direction and label.
RETURNS (str): Rendered SVG markup.
"""
self.levels = self.get_levels(arcs)
self.highest_level = len(self.levels)
self.offset_y = self.distance / 2 * self.highest_level + self.arrow_stroke
self.width = self.offset_x + len(words) * self.distance
self.height = self.offset_y + 3 * self.word_spacing
self.id = render_id
words_svg = [
self.render_word(w["text"], w["tag"], w.get("lemma", None), i)
for i, w in enumerate(words)
]
arcs_svg = [
self.render_arrow(a["label"], a["start"], a["end"], a["dir"], i)
for i, a in enumerate(arcs)
]
content = "".join(words_svg) + "".join(arcs_svg)
return TPL_DEP_SVG.format(
id=self.id,
width=self.width,
height=self.height,
color=self.color,
bg=self.bg,
font=self.font,
content=content,
dir=self.direction,
lang=self.lang,
)
def render_word(self, text: str, tag: str, lemma: str, i: int) -> str:
"""Render individual word.
text (str): Word text.
tag (str): Part-of-speech tag.
i (int): Unique ID, typically word index.
RETURNS (str): Rendered SVG markup.
"""
y = self.offset_y + self.word_spacing
x = self.offset_x + i * self.distance
if self.direction == "rtl":
x = self.width - x
html_text = escape_html(text)
if lemma is not None:
return TPL_DEP_WORDS_LEMMA.format(
text=html_text, tag=tag, lemma=lemma, x=x, y=y
)
return TPL_DEP_WORDS.format(text=html_text, tag=tag, x=x, y=y)
def render_arrow(
self, label: str, start: int, end: int, direction: str, i: int
) -> str:
"""Render individual arrow.
label (str): Dependency label.
start (int): Index of start word.
end (int): Index of end word.
direction (str): Arrow direction, 'left' or 'right'.
i (int): Unique ID, typically arrow index.
RETURNS (str): Rendered SVG markup.
"""
if start < 0 or end < 0:
error_args = dict(start=start, end=end, label=label, dir=direction)
raise ValueError(Errors.E157.format(**error_args))
level = self.levels.index(end - start) + 1
x_start = self.offset_x + start * self.distance + self.arrow_spacing
if self.direction == "rtl":
x_start = self.width - x_start
y = self.offset_y
x_end = (
self.offset_x
+ (end - start) * self.distance
+ start * self.distance
- self.arrow_spacing * (self.highest_level - level) / 4
)
if self.direction == "rtl":
x_end = self.width - x_end
y_curve = self.offset_y - level * self.distance / 2
if self.compact:
y_curve = self.offset_y - level * self.distance / 6
if y_curve == 0 and len(self.levels) > 5:
y_curve = -self.distance
arrowhead = self.get_arrowhead(direction, x_start, y, x_end)
arc = self.get_arc(x_start, y, y_curve, x_end)
label_side = "right" if self.direction == "rtl" else "left"
return TPL_DEP_ARCS.format(
id=self.id,
i=i,
stroke=self.arrow_stroke,
head=arrowhead,
label=label,
label_side=label_side,
arc=arc,
)
def get_arc(self, x_start: int, y: int, y_curve: int, x_end: int) -> str:
"""Render individual arc.
x_start (int): X-coordinate of arrow start point.
y (int): Y-coordinate of arrow start and end point.
y_curve (int): Y-corrdinate of Cubic Bézier y_curve point.
x_end (int): X-coordinate of arrow end point.
RETURNS (str): Definition of the arc path ('d' attribute).
"""
template = "M{x},{y} C{x},{c} {e},{c} {e},{y}"
if self.compact:
template = "M{x},{y} {x},{c} {e},{c} {e},{y}"
return template.format(x=x_start, y=y, c=y_curve, e=x_end)
def get_arrowhead(self, direction: str, x: int, y: int, end: int) -> str:
"""Render individual arrow head.
direction (str): Arrow direction, 'left' or 'right'.
x (int): X-coordinate of arrow start point.
y (int): Y-coordinate of arrow start and end point.
end (int): X-coordinate of arrow end point.
RETURNS (str): Definition of the arrow head path ('d' attribute).
"""
if direction == "left":
p1, p2, p3 = (x, x - self.arrow_width + 2, x + self.arrow_width - 2)
else:
p1, p2, p3 = (end, end + self.arrow_width - 2, end - self.arrow_width + 2)
return f"M{p1},{y + 2} L{p2},{y - self.arrow_width} {p3},{y - self.arrow_width}"
def get_levels(self, arcs: List[Dict[str, Any]]) -> List[int]:
"""Calculate available arc height "levels".
Used to calculate arrow heights dynamically and without wasting space.
args (list): Individual arcs and their start, end, direction and label.
RETURNS (list): Arc levels sorted from lowest to highest.
"""
levels = set(map(lambda arc: arc["end"] - arc["start"], arcs))
return sorted(list(levels))
class EntityRenderer:
"""Render named entities as HTML."""
style = "ent"
def __init__(self, options: Dict[str, Any] = {}) -> None:
"""Initialise dependency renderer.
options (dict): Visualiser-specific options (colors, ents)
"""
colors = dict(DEFAULT_LABEL_COLORS)
user_colors = registry.displacy_colors.get_all()
for user_color in user_colors.values():
if callable(user_color):
# Since this comes from the function registry, we want to make
# sure we support functions that *return* a dict of colors
user_color = user_color()
if not isinstance(user_color, dict):
raise ValueError(Errors.E925.format(obj=type(user_color)))
colors.update(user_color)
colors.update(options.get("colors", {}))
self.default_color = DEFAULT_ENTITY_COLOR
self.colors = {label.upper(): color for label, color in colors.items()}
self.ents = options.get("ents", None)
if self.ents is not None:
self.ents = [ent.upper() for ent in self.ents]
self.direction = DEFAULT_DIR
self.lang = DEFAULT_LANG
template = options.get("template")
if template:
self.ent_template = template
else:
if self.direction == "rtl":
self.ent_template = TPL_ENT_RTL
else:
self.ent_template = TPL_ENT
def render(
self, parsed: List[Dict[str, Any]], page: bool = False, minify: bool = False
) -> str:
"""Render complete markup.
parsed (list): Dependency parses to render.
page (bool): Render parses wrapped as full HTML page.
minify (bool): Minify HTML markup.
RETURNS (str): Rendered HTML markup.
"""
rendered = []
for i, p in enumerate(parsed):
if i == 0:
settings = p.get("settings", {})
self.direction = settings.get("direction", DEFAULT_DIR)
self.lang = settings.get("lang", DEFAULT_LANG)
rendered.append(self.render_ents(p["text"], p["ents"], p.get("title")))
if page:
docs = "".join([TPL_FIGURE.format(content=doc) for doc in rendered])
markup = TPL_PAGE.format(content=docs, lang=self.lang, dir=self.direction)
else:
markup = "".join(rendered)
if minify:
return minify_html(markup)
return markup
def render_ents(
self, text: str, spans: List[Dict[str, Any]], title: Optional[str]
) -> str:
"""Render entities in text.
text (str): Original text.
spans (list): Individual entity spans and their start, end and label.
title (str / None): Document title set in Doc.user_data['title'].
"""
markup = ""
offset = 0
for span in spans:
label = span["label"]
start = span["start"]
end = span["end"]
additional_params = span.get("params", {})
entity = escape_html(text[start:end])
fragments = text[offset:start].split("\n")
for i, fragment in enumerate(fragments):
markup += escape_html(fragment)
if len(fragments) > 1 and i != len(fragments) - 1:
markup += "</br>"
if self.ents is None or label.upper() in self.ents:
color = self.colors.get(label.upper(), self.default_color)
ent_settings = {"label": label, "text": entity, "bg": color}
ent_settings.update(additional_params)
markup += self.ent_template.format(**ent_settings)
else:
markup += entity
offset = end
fragments = text[offset:].split("\n")
for i, fragment in enumerate(fragments):
markup += escape_html(fragment)
if len(fragments) > 1 and i != len(fragments) - 1:
markup += "</br>"
markup = TPL_ENTS.format(content=markup, dir=self.direction)
if title:
markup = TPL_TITLE.format(title=title) + markup
return markup
| mit | 5,007,843,174,777,000,000 | 37.876471 | 88 | 0.554093 | false |
daonb/obudget | src/server/budget_lines/management/commands/budget_lines_csv_to_db.py | 1 | 1996 | import sys
import csv
import re
from obudget.budget_lines.models import BudgetLine
from django.core.management.base import BaseCommand
class Command(BaseCommand):
args = '<csv-file>'
help = 'Parses csv''d budget data into the DB'
def handle(self, *args, **options):
reader = csv.DictReader(file(args[0]), ['year','title','budget_id','allocated','revised','used'])
print 'Deleting current rows'
BudgetLine.objects.all().delete()
print 'Loading raw rows'
x = set()
k = 0
for d in reader:
key = d['budget_id'], d['year']
if key in x:
continue
x.add(key)
BudgetLine( title = d['title'].decode('utf8'),
budget_id = d['budget_id'],
amount_allocated = int(d['allocated']),
amount_revised = int(d['revised']),
amount_used = int(d['used']),
year = int(d['year']),
budget_id_len = len(d['budget_id']),
).save()
k+=1
if k % 1000 == 0:
print k
# Update internal relationships in the DB
print 'Internal relationships'
k = 0
for line in BudgetLine.objects.all():
k+=1
if k % 1000 == 0:
print k
if line.budget_id == None or len(line.budget_id) == 2:
continue
for i in range(2,len(line.budget_id),2):
parents = BudgetLine.objects.filter( year = line.year, budget_id = line.budget_id[:-i] ).count()
if parents > 0:
parent = BudgetLine.objects.get( year = line.year, budget_id = line.budget_id[:-i] )
line.containing_line = parent
line.save()
break
| bsd-3-clause | -5,401,843,691,152,568,000 | 32.283333 | 112 | 0.462926 | false |
CloudServer/nova | nova/api/openstack/compute/contrib/baremetal_nodes.py | 60 | 6552 | # Copyright (c) 2013 NTT DOCOMO, INC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The bare-metal admin extension with Ironic Proxy."""
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
import webob
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.i18n import _
ironic_client = importutils.try_import('ironicclient.client')
ironic_exc = importutils.try_import('ironicclient.exc')
authorize = extensions.extension_authorizer('compute', 'baremetal_nodes')
node_fields = ['id', 'cpus', 'local_gb', 'memory_mb', 'pm_address',
'pm_user', 'service_host', 'terminal_port', 'instance_uuid']
node_ext_fields = ['uuid', 'task_state', 'updated_at', 'pxe_config_path']
interface_fields = ['id', 'address', 'datapath_id', 'port_no']
CONF = cfg.CONF
CONF.import_opt('api_version',
'nova.virt.ironic.driver',
group='ironic')
CONF.import_opt('api_endpoint',
'nova.virt.ironic.driver',
group='ironic')
CONF.import_opt('admin_username',
'nova.virt.ironic.driver',
group='ironic')
CONF.import_opt('admin_password',
'nova.virt.ironic.driver',
group='ironic')
CONF.import_opt('admin_tenant_name',
'nova.virt.ironic.driver',
group='ironic')
CONF.import_opt('compute_driver', 'nova.virt.driver')
LOG = logging.getLogger(__name__)
def _check_ironic_client_enabled():
"""Check whether Ironic is installed or not."""
if ironic_client is None:
msg = _("Ironic client unavailable, cannot access Ironic.")
raise webob.exc.HTTPNotImplemented(explanation=msg)
def _get_ironic_client():
"""return an Ironic client."""
# TODO(NobodyCam): Fix insecure setting
kwargs = {'os_username': CONF.ironic.admin_username,
'os_password': CONF.ironic.admin_password,
'os_auth_url': CONF.ironic.admin_url,
'os_tenant_name': CONF.ironic.admin_tenant_name,
'os_service_type': 'baremetal',
'os_endpoint_type': 'public',
'insecure': 'true',
'ironic_url': CONF.ironic.api_endpoint}
ironicclient = ironic_client.get_client(CONF.ironic.api_version, **kwargs)
return ironicclient
def _no_ironic_proxy(cmd):
raise webob.exc.HTTPBadRequest(
explanation=_("Command Not supported. Please use Ironic "
"command %(cmd)s to perform this "
"action.") % {'cmd': cmd})
class BareMetalNodeController(wsgi.Controller):
"""The Bare-Metal Node API controller for the OpenStack API.
Ironic is used for the following commands:
'baremetal-node-list'
'baremetal-node-show'
"""
def __init__(self, ext_mgr=None, *args, **kwargs):
super(BareMetalNodeController, self).__init__(*args, **kwargs)
self.ext_mgr = ext_mgr
def _node_dict(self, node_ref):
d = {}
for f in node_fields:
d[f] = node_ref.get(f)
if self.ext_mgr.is_loaded('os-baremetal-ext-status'):
for f in node_ext_fields:
d[f] = node_ref.get(f)
return d
def index(self, req):
context = req.environ['nova.context']
authorize(context)
nodes = []
# proxy command to Ironic
_check_ironic_client_enabled()
ironicclient = _get_ironic_client()
ironic_nodes = ironicclient.node.list(detail=True)
for inode in ironic_nodes:
node = {'id': inode.uuid,
'interfaces': [],
'host': 'IRONIC MANAGED',
'task_state': inode.provision_state,
'cpus': inode.properties.get('cpus', 0),
'memory_mb': inode.properties.get('memory_mb', 0),
'disk_gb': inode.properties.get('local_gb', 0)}
nodes.append(node)
return {'nodes': nodes}
def show(self, req, id):
context = req.environ['nova.context']
authorize(context)
# proxy command to Ironic
_check_ironic_client_enabled()
icli = _get_ironic_client()
try:
inode = icli.node.get(id)
except ironic_exc.NotFound:
msg = _("Node %s could not be found.") % id
raise webob.exc.HTTPNotFound(explanation=msg)
iports = icli.node.list_ports(id)
node = {'id': inode.uuid,
'interfaces': [],
'host': 'IRONIC MANAGED',
'task_state': inode.provision_state,
'cpus': inode.properties.get('cpus', 0),
'memory_mb': inode.properties.get('memory_mb', 0),
'disk_gb': inode.properties.get('local_gb', 0),
'instance_uuid': inode.instance_uuid}
for port in iports:
node['interfaces'].append({'address': port.address})
return {'node': node}
def create(self, req, body):
_no_ironic_proxy("port-create")
def delete(self, req, id):
_no_ironic_proxy("port-create")
@wsgi.action('add_interface')
def _add_interface(self, req, id, body):
_no_ironic_proxy("port-create")
@wsgi.action('remove_interface')
def _remove_interface(self, req, id, body):
_no_ironic_proxy("port-delete")
class Baremetal_nodes(extensions.ExtensionDescriptor):
"""Admin-only bare-metal node administration."""
name = "BareMetalNodes"
alias = "os-baremetal-nodes"
namespace = "http://docs.openstack.org/compute/ext/baremetal_nodes/api/v2"
updated = "2013-01-04T00:00:00Z"
def get_resources(self):
resources = []
res = extensions.ResourceExtension('os-baremetal-nodes',
BareMetalNodeController(self.ext_mgr),
member_actions={"action": "POST", })
resources.append(res)
return resources
| apache-2.0 | 9,073,414,560,237,424,000 | 34.803279 | 78 | 0.596917 | false |
HewlettPackard/oneview-ansible | build-doc/module_docs_fragments/oneview.py | 1 | 2367 | #!/usr/bin/python
# -*- coding: utf-8 -*-
###
# Copyright (2016-2017) Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
class ModuleDocFragment(object):
# OneView doc fragment
DOCUMENTATION = '''
options:
config:
description:
- Path to a .json configuration file containing the OneView client configuration.
The configuration file is optional. If the file path is not provided, the configuration will be loaded from
environment variables.
required: false
notes:
- "A sample configuration file for the config parameter can be found at:
U(https://github.com/HewlettPackard/oneview-ansible/blob/master/examples/oneview_config-rename.json)"
- "Check how to use environment variables for configuration at:
U(https://github.com/HewlettPackard/oneview-ansible#environment-variables)"
- "Additional Playbooks for the HPE OneView Ansible modules can be found at:
U(https://github.com/HewlettPackard/oneview-ansible/tree/master/examples)"
'''
VALIDATEETAG = '''
options:
validate_etag:
description:
- When the ETag Validation is enabled, the request will be conditionally processed only if the current ETag
for the resource matches the ETag provided in the data.
default: true
choices: ['true', 'false']
'''
FACTSPARAMS = '''
options:
params:
description:
- List of params to delimit, filter and sort the list of resources.
- "params allowed:
C(start): The first item to return, using 0-based indexing.
C(count): The number of resources to return.
C(filter): A general filter/query string to narrow the list of items returned.
C(sort): The sort order of the returned data set."
required: false
'''
| apache-2.0 | -8,440,713,204,019,359,000 | 38.45 | 119 | 0.692015 | false |
michalliu/OpenWrt-Firefly-Libraries | staging_dir/target-mipsel_1004kc+dsp_uClibc-0.9.33.2/usr/lib/python3.4/unittest/main.py | 84 | 9759 | """Unittest main program"""
import sys
import argparse
import os
from . import loader, runner
from .signals import installHandler
__unittest = True
MAIN_EXAMPLES = """\
Examples:
%(prog)s test_module - run tests from test_module
%(prog)s module.TestClass - run tests from module.TestClass
%(prog)s module.Class.test_method - run specified test method
"""
MODULE_EXAMPLES = """\
Examples:
%(prog)s - run default set of tests
%(prog)s MyTestSuite - run suite 'MyTestSuite'
%(prog)s MyTestCase.testSomething - run MyTestCase.testSomething
%(prog)s MyTestCase - run all 'test*' test methods
in MyTestCase
"""
def _convert_name(name):
# on Linux / Mac OS X 'foo.PY' is not importable, but on
# Windows it is. Simpler to do a case insensitive match
# a better check would be to check that the name is a
# valid Python module name.
if os.path.isfile(name) and name.lower().endswith('.py'):
if os.path.isabs(name):
rel_path = os.path.relpath(name, os.getcwd())
if os.path.isabs(rel_path) or rel_path.startswith(os.pardir):
return name
name = rel_path
# on Windows both '\' and '/' are used as path
# separators. Better to replace both than rely on os.path.sep
return name[:-3].replace('\\', '.').replace('/', '.')
return name
def _convert_names(names):
return [_convert_name(name) for name in names]
class TestProgram(object):
"""A command-line program that runs a set of tests; this is primarily
for making test modules conveniently executable.
"""
# defaults for testing
module=None
verbosity = 1
failfast = catchbreak = buffer = progName = warnings = None
_discovery_parser = None
def __init__(self, module='__main__', defaultTest=None, argv=None,
testRunner=None, testLoader=loader.defaultTestLoader,
exit=True, verbosity=1, failfast=None, catchbreak=None,
buffer=None, warnings=None):
if isinstance(module, str):
self.module = __import__(module)
for part in module.split('.')[1:]:
self.module = getattr(self.module, part)
else:
self.module = module
if argv is None:
argv = sys.argv
self.exit = exit
self.failfast = failfast
self.catchbreak = catchbreak
self.verbosity = verbosity
self.buffer = buffer
if warnings is None and not sys.warnoptions:
# even if DreprecationWarnings are ignored by default
# print them anyway unless other warnings settings are
# specified by the warnings arg or the -W python flag
self.warnings = 'default'
else:
# here self.warnings is set either to the value passed
# to the warnings args or to None.
# If the user didn't pass a value self.warnings will
# be None. This means that the behavior is unchanged
# and depends on the values passed to -W.
self.warnings = warnings
self.defaultTest = defaultTest
self.testRunner = testRunner
self.testLoader = testLoader
self.progName = os.path.basename(argv[0])
self.parseArgs(argv)
self.runTests()
def usageExit(self, msg=None):
if msg:
print(msg)
if self._discovery_parser is None:
self._initArgParsers()
self._print_help()
sys.exit(2)
def _print_help(self, *args, **kwargs):
if self.module is None:
print(self._main_parser.format_help())
print(MAIN_EXAMPLES % {'prog': self.progName})
self._discovery_parser.print_help()
else:
print(self._main_parser.format_help())
print(MODULE_EXAMPLES % {'prog': self.progName})
def parseArgs(self, argv):
self._initArgParsers()
if self.module is None:
if len(argv) > 1 and argv[1].lower() == 'discover':
self._do_discovery(argv[2:])
return
self._main_parser.parse_args(argv[1:], self)
if not self.tests:
# this allows "python -m unittest -v" to still work for
# test discovery.
self._do_discovery([])
return
else:
self._main_parser.parse_args(argv[1:], self)
if self.tests:
self.testNames = _convert_names(self.tests)
if __name__ == '__main__':
# to support python -m unittest ...
self.module = None
elif self.defaultTest is None:
# createTests will load tests from self.module
self.testNames = None
elif isinstance(self.defaultTest, str):
self.testNames = (self.defaultTest,)
else:
self.testNames = list(self.defaultTest)
self.createTests()
def createTests(self):
if self.testNames is None:
self.test = self.testLoader.loadTestsFromModule(self.module)
else:
self.test = self.testLoader.loadTestsFromNames(self.testNames,
self.module)
def _initArgParsers(self):
parent_parser = self._getParentArgParser()
self._main_parser = self._getMainArgParser(parent_parser)
self._discovery_parser = self._getDiscoveryArgParser(parent_parser)
def _getParentArgParser(self):
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('-v', '--verbose', dest='verbosity',
action='store_const', const=2,
help='Verbose output')
parser.add_argument('-q', '--quiet', dest='verbosity',
action='store_const', const=0,
help='Quiet output')
if self.failfast is None:
parser.add_argument('-f', '--failfast', dest='failfast',
action='store_true',
help='Stop on first fail or error')
self.failfast = False
if self.catchbreak is None:
parser.add_argument('-c', '--catch', dest='catchbreak',
action='store_true',
help='Catch ctrl-C and display results so far')
self.catchbreak = False
if self.buffer is None:
parser.add_argument('-b', '--buffer', dest='buffer',
action='store_true',
help='Buffer stdout and stderr during tests')
self.buffer = False
return parser
def _getMainArgParser(self, parent):
parser = argparse.ArgumentParser(parents=[parent])
parser.prog = self.progName
parser.print_help = self._print_help
parser.add_argument('tests', nargs='*',
help='a list of any number of test modules, '
'classes and test methods.')
return parser
def _getDiscoveryArgParser(self, parent):
parser = argparse.ArgumentParser(parents=[parent])
parser.prog = '%s discover' % self.progName
parser.epilog = ('For test discovery all test modules must be '
'importable from the top level directory of the '
'project.')
parser.add_argument('-s', '--start-directory', dest='start',
help="Directory to start discovery ('.' default)")
parser.add_argument('-p', '--pattern', dest='pattern',
help="Pattern to match tests ('test*.py' default)")
parser.add_argument('-t', '--top-level-directory', dest='top',
help='Top level directory of project (defaults to '
'start directory)')
for arg in ('start', 'pattern', 'top'):
parser.add_argument(arg, nargs='?',
default=argparse.SUPPRESS,
help=argparse.SUPPRESS)
return parser
def _do_discovery(self, argv, Loader=None):
self.start = '.'
self.pattern = 'test*.py'
self.top = None
if argv is not None:
# handle command line args for test discovery
if self._discovery_parser is None:
# for testing
self._initArgParsers()
self._discovery_parser.parse_args(argv, self)
loader = self.testLoader if Loader is None else Loader()
self.test = loader.discover(self.start, self.pattern, self.top)
def runTests(self):
if self.catchbreak:
installHandler()
if self.testRunner is None:
self.testRunner = runner.TextTestRunner
if isinstance(self.testRunner, type):
try:
testRunner = self.testRunner(verbosity=self.verbosity,
failfast=self.failfast,
buffer=self.buffer,
warnings=self.warnings)
except TypeError:
# didn't accept the verbosity, buffer or failfast arguments
testRunner = self.testRunner()
else:
# it is assumed to be a TestRunner instance
testRunner = self.testRunner
self.result = testRunner.run(self.test)
if self.exit:
sys.exit(not self.result.wasSuccessful())
main = TestProgram
| gpl-2.0 | -1,070,835,917,345,665,800 | 38.350806 | 79 | 0.549851 | false |
farodin91/servo | components/script/dom/bindings/codegen/parser/tests/test_replaceable.py | 138 | 1833 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
def should_throw(parser, harness, message, code):
parser = parser.reset();
threw = False
try:
parser.parse(code)
parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown: %s" % message)
def WebIDLTest(parser, harness):
# The [Replaceable] extended attribute MUST take no arguments.
should_throw(parser, harness, "no arguments", """
interface I {
[Replaceable=X] readonly attribute long A;
};
""")
# An attribute with the [Replaceable] extended attribute MUST NOT also be
# declared with the [PutForwards] extended attribute.
should_throw(parser, harness, "PutForwards", """
interface I {
[PutForwards=B, Replaceable] readonly attribute J A;
};
interface J {
attribute long B;
};
""")
# The [Replaceable] extended attribute MUST NOT be used on an attribute
# that is not read only.
should_throw(parser, harness, "writable attribute", """
interface I {
[Replaceable] attribute long A;
};
""")
# The [Replaceable] extended attribute MUST NOT be used on a static
# attribute.
should_throw(parser, harness, "static attribute", """
interface I {
[Replaceable] static readonly attribute long A;
};
""")
# The [Replaceable] extended attribute MUST NOT be used on an attribute
# declared on a callback interface.
should_throw(parser, harness, "callback interface", """
callback interface I {
[Replaceable] readonly attribute long A;
};
""")
| mpl-2.0 | -8,153,213,442,284,906,000 | 30.603448 | 77 | 0.62084 | false |
noroot/zulip | api/integrations/codebase/zulip_codebase_config.py | 124 | 2537 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright © 2014 Zulip, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Change these values to configure authentication for your codebase account
# Note that this is the Codebase API Username, found in the Settings page
# for your account
CODEBASE_API_USERNAME = "[email protected]"
CODEBASE_API_KEY = "1234561234567abcdef"
# The URL of your codebase setup
CODEBASE_ROOT_URL = "https://YOUR_COMPANY.codebasehq.com"
# When initially started, how many hours of messages to include.
# Note that the Codebase API only returns the 20 latest events,
# if you have more than 20 events that fit within this window,
# earlier ones may be lost
CODEBASE_INITIAL_HISTORY_HOURS = 12
# Change these values to configure Zulip authentication for the plugin
ZULIP_USER = "[email protected]"
ZULIP_API_KEY = "0123456789abcdef0123456789abcdef"
# The streams to send commit information and ticket information to
ZULIP_COMMITS_STREAM_NAME = "codebase"
ZULIP_TICKETS_STREAM_NAME = "tickets"
# If properly installed, the Zulip API should be in your import
# path, but if not, set a custom path below
ZULIP_API_PATH = None
# Set this to your Zulip API server URI
ZULIP_SITE = "https://api.zulip.com"
# If you wish to log to a file rather than stdout/stderr,
# please fill this out your desired path
LOG_FILE = None
# This file is used to resume this mirror in case the script shuts down.
# It is required and needs to be writeable.
RESUME_FILE = "/var/tmp/zulip_codebase.state"
| apache-2.0 | -213,760,720,724,946,080 | 39.903226 | 79 | 0.767744 | false |
evfredericksen/pynacea | pynhost/pynhost/platforms/winconstants.py | 1 | 3710 | import ctypes as ct
class POINT(ct.Structure):
_fields_ = [("x", ct.c_ulong), ("y", ct.c_ulong)]
PUL = ct.POINTER(ct.c_ulong)
GMEM_DDESHARE = 0x2000
class KEYBOARD_INPUT(ct.Structure):
_fields_ = [("wVk", ct.c_ushort),
("wScan", ct.c_ushort),
("dwFlags", ct.c_ulong),
("time", ct.c_ulong),
("dwExtraInfo", PUL)]
class HARDWARE_INPUT(ct.Structure):
_fields_ = [("uMsg", ct.c_ulong),
("wParamL", ct.c_short),
("wParamH", ct.c_ushort)]
class MOUSE_INPUT(ct.Structure):
_fields_ = [("dx", ct.c_long),
("dy", ct.c_long),
("mouseData", ct.c_ulong),
("dwFlags", ct.c_ulong),
("time",ct.c_ulong),
("dwExtraInfo", PUL)]
class INPUT_I(ct.Union):
_fields_ = [("ki", KEYBOARD_INPUT),
("mi", MOUSE_INPUT),
("hi", HARDWARE_INPUT)]
class INPUT(ct.Structure):
_fields_ = [("type", ct.c_ulong),
("ii", INPUT_I)]
WINDOWS_KEYCODES = {
'lmouse': 0x01,
'rmouse': 0x02,
'cancel': 0x03,
'mmouse': 0x04,
'x1mouse': 0x05,
'x2mouse': 0x06,
'back': 0x08,
'backspace': 0x08,
'tab': 0x09,
'clear': 0x0C,
'enter': 0x0D,
'return': 0x0D,
'\n': 0x0D,
'\r\n': 0x0D,
'shift': 0x10,
'ctrl': 0x11,
'control': 0x11,
'alt': 0x12,
'caps': 0x14,
'esc': 0x1B,
'escape': 0x1B,
' ': 0x20,
'pageup': 0x21,
'page_up': 0x21,
'pagedown': 0x22,
'page_down': 0x22,
'end': 0x23,
'home': 0x24,
'left': 0x25,
'up': 0x26,
'right': 0x27,
'down': 0x28,
'select': 0x29,
'print': 0x2A,
'execute': 0x2B,
'print_screen': 0x2C,
'insert': 0x2D,
'del': 0x2E,
'delete': 0x2E,
'help': 0X2F,
'0': 0x30,
'1': 0x31,
'2': 0x32,
'3': 0x33,
'4': 0x34,
'5': 0x35,
'6': 0x36,
'7': 0x37,
'8': 0x38,
'9': 0x39,
'a': 0x41,
'b': 0x42,
'c': 0x43,
'd': 0x44,
'e': 0x45,
'f': 0x46,
'g': 0x47,
'h': 0x48,
'i': 0x49,
'j': 0x4A,
'k': 0x4B,
'l': 0x4C,
'm': 0x4D,
'n': 0x4E,
'o': 0x4F,
'p': 0x50,
'q': 0x51,
'r': 0x52,
's': 0x53,
't': 0x54,
'u': 0x55,
'v': 0x56,
'w': 0x57,
'x': 0x58,
'y': 0x59,
'z': 0x5A,
'lwindows': 0x5B,
'rwindows': 0x5C,
'apps': 0x5D,
'sleep': 0x5F,
'numpad0': 0x60,
'numpad1': 0x61,
'numpad2': 0x62,
'numpad3': 0x63,
'numpad4': 0x64,
'numpad5': 0x65,
'numpad6': 0x66,
'numpad7': 0x67,
'numpad8': 0x68,
'numpad9': 0x69,
'f1': 0x70,
'f2': 0x71,
'f3': 0x72,
'f4': 0x73,
'f5': 0x74,
'f6': 0x75,
'f7': 0x76,
'f8': 0x77,
'f9': 0x78,
'f10': 0x79,
'f11': 0x7A,
'f12': 0x7B,
'f13': 0x7C,
'f14': 0x7D,
'f15': 0x7E,
'f16': 0x7F,
'f17': 0x80,
'f18': 0x81,
'f19': 0x82,
'f20': 0x83,
'f21': 0x84,
'f22': 0x85,
'f23': 0x86,
'f24': 0x87,
'numlock': 0x90,
'scroll': 0x91,
'lshift': 0xA0,
'rshift': 0xA1,
'mute': 0xAD,
'volume_up': 0xAE,
'volume_down': 0xAF,
'.': 0xBE,
',': 0xBC,
';': 0xBA,
"'": 0xDE,
'/': 0xBF,
'`': 0xC0,
'-': 0xBD,
'=': 0xBB,
'[': 0xDB,
'\\': 0xDC,
']': 0xDD,
}
WINDOWS_SHIFT_MAP = {
')': '0',
'!': '1',
'@': '2',
'#': '3',
'$': '4',
'%': '5',
'^': '6',
'&': '7',
'*': '8',
'(': '9',
'<': ',',
'>': '.',
'?': '/',
'"': "'",
':': ';',
'{': '[',
'}': ']',
'|': '\\',
'~': '`',
'_': '-',
'+': '=',
}
| mit | 3,160,820,212,732,816,400 | 17.928571 | 53 | 0.407547 | false |
navycrow/Sick-Beard | lib/hachoir_parser/game/blp.py | 90 | 11108 | """
Blizzard BLP Image File Parser
Author: Robert Xiao
Creation date: July 10 2007
- BLP1 File Format
http://magos.thejefffiles.com/War3ModelEditor/MagosBlpFormat.txt
- BLP2 File Format (Wikipedia)
http://en.wikipedia.org/wiki/.BLP
- S3TC (DXT1, 3, 5) Formats
http://en.wikipedia.org/wiki/S3_Texture_Compression
"""
from lib.hachoir_core.endian import LITTLE_ENDIAN
from lib.hachoir_core.field import String, UInt32, UInt8, Enum, FieldSet, RawBytes, GenericVector, Bit, Bits
from lib.hachoir_parser.parser import Parser
from lib.hachoir_parser.image.common import PaletteRGBA
from lib.hachoir_core.tools import alignValue
class PaletteIndex(UInt8):
def createDescription(self):
return "Palette index %i (%s)" % (self.value, self["/palette/color[%i]" % self.value].description)
class Generic2DArray(FieldSet):
def __init__(self, parent, name, width, height, item_class, row_name="row", item_name="item", *args, **kwargs):
FieldSet.__init__(self, parent, name, *args, **kwargs)
self.width = width
self.height = height
self.item_class = item_class
self.row_name = row_name
self.item_name = item_name
def createFields(self):
for i in xrange(self.height):
yield GenericVector(self, self.row_name+"[]", self.width, self.item_class, self.item_name)
class BLP1File(Parser):
MAGIC = "BLP1"
PARSER_TAGS = {
"id": "blp1",
"category": "game",
"file_ext": ("blp",),
"mime": (u"application/x-blp",), # TODO: real mime type???
"magic": ((MAGIC, 0),),
"min_size": 7*32, # 7 DWORDs start, incl. magic
"description": "Blizzard Image Format, version 1",
}
endian = LITTLE_ENDIAN
def validate(self):
if self.stream.readBytes(0, 4) != "BLP1":
return "Invalid magic"
return True
def createFields(self):
yield String(self, "magic", 4, "Signature (BLP1)")
yield Enum(UInt32(self, "compression"), {
0:"JPEG Compression",
1:"Uncompressed"})
yield UInt32(self, "flags")
yield UInt32(self, "width")
yield UInt32(self, "height")
yield Enum(UInt32(self, "type"), {
3:"Uncompressed Index List + Alpha List",
4:"Uncompressed Index List + Alpha List",
5:"Uncompressed Index List"})
yield UInt32(self, "subtype")
for i in xrange(16):
yield UInt32(self, "mipmap_offset[]")
for i in xrange(16):
yield UInt32(self, "mipmap_size[]")
compression = self["compression"].value
image_type = self["type"].value
width = self["width"].value
height = self["height"].value
if compression == 0: # JPEG Compression
yield UInt32(self, "jpeg_header_len")
yield RawBytes(self, "jpeg_header", self["jpeg_header_len"].value, "Shared JPEG Header")
else:
yield PaletteRGBA(self, "palette", 256)
offsets = self.array("mipmap_offset")
sizes = self.array("mipmap_size")
for i in xrange(16):
if not offsets[i].value or not sizes[i].value:
continue
padding = self.seekByte(offsets[i].value)
if padding:
yield padding
if compression == 0:
yield RawBytes(self, "mipmap[%i]" % i, sizes[i].value, "JPEG data, append to header to recover complete image")
elif compression == 1:
yield Generic2DArray(self, "mipmap_indexes[%i]" % i, width, height, PaletteIndex, "row", "index", "Indexes into the palette")
if image_type in (3, 4):
yield Generic2DArray(self, "mipmap_alphas[%i]" % i, width, height, UInt8, "row", "alpha", "Alpha values")
width /= 2
height /= 2
def interp_avg(data_low, data_high, n):
"""Interpolated averages. For example,
>>> list(interp_avg(1, 10, 3))
[4, 7]
"""
if isinstance(data_low, (int, long)):
for i in range(1, n):
yield (data_low * (n-i) + data_high * i) / n
else: # iterable
pairs = zip(data_low, data_high)
pair_iters = [interp_avg(x, y, n) for x, y in pairs]
for i in range(1, n):
yield [iter.next() for iter in pair_iters]
def color_name(data, bits):
"""Color names in #RRGGBB format, given the number of bits for each component."""
ret = ["#"]
for i in range(3):
ret.append("%02X" % (data[i] << (8-bits[i])))
return ''.join(ret)
class DXT1(FieldSet):
static_size = 64
def __init__(self, parent, name, dxt2_mode=False, *args, **kwargs):
"""with dxt2_mode on, this field will always use the four color model"""
FieldSet.__init__(self, parent, name, *args, **kwargs)
self.dxt2_mode = dxt2_mode
def createFields(self):
values = [[], []]
for i in (0, 1):
yield Bits(self, "blue[]", 5)
yield Bits(self, "green[]", 6)
yield Bits(self, "red[]", 5)
values[i] = [self["red[%i]" % i].value,
self["green[%i]" % i].value,
self["blue[%i]" % i].value]
if values[0] > values[1] or self.dxt2_mode:
values += interp_avg(values[0], values[1], 3)
else:
values += interp_avg(values[0], values[1], 2)
values.append(None) # transparent
for i in xrange(16):
pixel = Bits(self, "pixel[%i][%i]" % divmod(i, 4), 2)
color = values[pixel.value]
if color is None:
pixel._description = "Transparent"
else:
pixel._description = "RGB color: %s" % color_name(color, [5, 6, 5])
yield pixel
class DXT3Alpha(FieldSet):
static_size = 64
def createFields(self):
for i in xrange(16):
yield Bits(self, "alpha[%i][%i]" % divmod(i, 4), 4)
class DXT3(FieldSet):
static_size = 128
def createFields(self):
yield DXT3Alpha(self, "alpha", "Alpha Channel Data")
yield DXT1(self, "color", True, "Color Channel Data")
class DXT5Alpha(FieldSet):
static_size = 64
def createFields(self):
values = []
yield UInt8(self, "alpha_val[0]", "First alpha value")
values.append(self["alpha_val[0]"].value)
yield UInt8(self, "alpha_val[1]", "Second alpha value")
values.append(self["alpha_val[1]"].value)
if values[0] > values[1]:
values += interp_avg(values[0], values[1], 7)
else:
values += interp_avg(values[0], values[1], 5)
values += [0, 255]
for i in xrange(16):
pixel = Bits(self, "alpha[%i][%i]" % divmod(i, 4), 3)
alpha = values[pixel.value]
pixel._description = "Alpha value: %i" % alpha
yield pixel
class DXT5(FieldSet):
static_size = 128
def createFields(self):
yield DXT5Alpha(self, "alpha", "Alpha Channel Data")
yield DXT1(self, "color", True, "Color Channel Data")
class BLP2File(Parser):
MAGIC = "BLP2"
PARSER_TAGS = {
"id": "blp2",
"category": "game",
"file_ext": ("blp",),
"mime": (u"application/x-blp",),
"magic": ((MAGIC, 0),),
"min_size": 5*32, # 5 DWORDs start, incl. magic
"description": "Blizzard Image Format, version 2",
}
endian = LITTLE_ENDIAN
def validate(self):
if self.stream.readBytes(0, 4) != "BLP2":
return "Invalid magic"
return True
def createFields(self):
yield String(self, "magic", 4, "Signature (BLP2)")
yield Enum(UInt32(self, "compression", "Compression type"), {
0:"JPEG Compressed",
1:"Uncompressed or DXT/S3TC compressed"})
yield Enum(UInt8(self, "encoding", "Encoding type"), {
1:"Raw",
2:"DXT/S3TC Texture Compression (a.k.a. DirectX)"})
yield UInt8(self, "alpha_depth", "Alpha channel depth, in bits (0 = no alpha)")
yield Enum(UInt8(self, "alpha_encoding", "Encoding used for alpha channel"), {
0:"DXT1 alpha (0 or 1 bit alpha)",
1:"DXT3 alpha (4 bit alpha)",
7:"DXT5 alpha (8 bit interpolated alpha)"})
yield Enum(UInt8(self, "has_mips", "Are mip levels present?"), {
0:"No mip levels",
1:"Mip levels present; number of levels determined by image size"})
yield UInt32(self, "width", "Base image width")
yield UInt32(self, "height", "Base image height")
for i in xrange(16):
yield UInt32(self, "mipmap_offset[]")
for i in xrange(16):
yield UInt32(self, "mipmap_size[]")
yield PaletteRGBA(self, "palette", 256)
compression = self["compression"].value
encoding = self["encoding"].value
alpha_depth = self["alpha_depth"].value
alpha_encoding = self["alpha_encoding"].value
width = self["width"].value
height = self["height"].value
if compression == 0: # JPEG Compression
yield UInt32(self, "jpeg_header_len")
yield RawBytes(self, "jpeg_header", self["jpeg_header_len"].value, "Shared JPEG Header")
offsets = self.array("mipmap_offset")
sizes = self.array("mipmap_size")
for i in xrange(16):
if not offsets[i].value or not sizes[i].value:
continue
padding = self.seekByte(offsets[i].value)
if padding:
yield padding
if compression == 0:
yield RawBytes(self, "mipmap[%i]" % i, sizes[i].value, "JPEG data, append to header to recover complete image")
elif compression == 1 and encoding == 1:
yield Generic2DArray(self, "mipmap_indexes[%i]" % i, height, width, PaletteIndex, "row", "index", "Indexes into the palette")
if alpha_depth == 1:
yield GenericVector(self, "mipmap_alphas[%i]" % i, height, width, Bit, "row", "is_opaque", "Alpha values")
elif alpha_depth == 8:
yield GenericVector(self, "mipmap_alphas[%i]" % i, height, width, UInt8, "row", "alpha", "Alpha values")
elif compression == 1 and encoding == 2:
block_height = alignValue(height, 4) // 4
block_width = alignValue(width, 4) // 4
if alpha_depth in [0, 1] and alpha_encoding == 0:
yield Generic2DArray(self, "mipmap[%i]" % i, block_height, block_width, DXT1, "row", "block", "DXT1-compressed image blocks")
elif alpha_depth == 8 and alpha_encoding == 1:
yield Generic2DArray(self, "mipmap[%i]" % i, block_height, block_width, DXT3, "row", "block", "DXT3-compressed image blocks")
elif alpha_depth == 8 and alpha_encoding == 7:
yield Generic2DArray(self, "mipmap[%i]" % i, block_height, block_width, DXT5, "row", "block", "DXT5-compressed image blocks")
width /= 2
height /= 2
| gpl-3.0 | 6,848,537,926,750,104,000 | 40.29368 | 145 | 0.563918 | false |
zhoulingjun/django | django/views/decorators/csrf.py | 586 | 2202 | from functools import wraps
from django.middleware.csrf import CsrfViewMiddleware, get_token
from django.utils.decorators import available_attrs, decorator_from_middleware
csrf_protect = decorator_from_middleware(CsrfViewMiddleware)
csrf_protect.__name__ = "csrf_protect"
csrf_protect.__doc__ = """
This decorator adds CSRF protection in exactly the same way as
CsrfViewMiddleware, but it can be used on a per view basis. Using both, or
using the decorator multiple times, is harmless and efficient.
"""
class _EnsureCsrfToken(CsrfViewMiddleware):
# We need this to behave just like the CsrfViewMiddleware, but not reject
# requests or log warnings.
def _reject(self, request, reason):
return None
requires_csrf_token = decorator_from_middleware(_EnsureCsrfToken)
requires_csrf_token.__name__ = 'requires_csrf_token'
requires_csrf_token.__doc__ = """
Use this decorator on views that need a correct csrf_token available to
RequestContext, but without the CSRF protection that csrf_protect
enforces.
"""
class _EnsureCsrfCookie(CsrfViewMiddleware):
def _reject(self, request, reason):
return None
def process_view(self, request, callback, callback_args, callback_kwargs):
retval = super(_EnsureCsrfCookie, self).process_view(request, callback, callback_args, callback_kwargs)
# Forces process_response to send the cookie
get_token(request)
return retval
ensure_csrf_cookie = decorator_from_middleware(_EnsureCsrfCookie)
ensure_csrf_cookie.__name__ = 'ensure_csrf_cookie'
ensure_csrf_cookie.__doc__ = """
Use this decorator to ensure that a view sets a CSRF cookie, whether or not it
uses the csrf_token template tag, or the CsrfViewMiddleware is used.
"""
def csrf_exempt(view_func):
"""
Marks a view function as being exempt from the CSRF view protection.
"""
# We could just do view_func.csrf_exempt = True, but decorators
# are nicer if they don't have side-effects, so we return a new
# function.
def wrapped_view(*args, **kwargs):
return view_func(*args, **kwargs)
wrapped_view.csrf_exempt = True
return wraps(view_func, assigned=available_attrs(view_func))(wrapped_view)
| bsd-3-clause | 4,937,777,100,078,680,000 | 35.7 | 111 | 0.732516 | false |
vityurkiv/Ox | python/MooseDocs/database/items.py | 2 | 3034 | """
The following objects are designed to work with the Database class,
see Database.py for usage.
"""
import os
import re
import subprocess
import MooseDocs
from markdown.util import etree
import logging
log = logging.getLogger(__name__)
class DatabaseItem(object):
"""
Base class for database items.
Args:
filename[str]: The complete filename (supplied by Database object).
"""
output = os.path.dirname(subprocess.check_output(['git', 'rev-parse', '--git-dir'], stderr=subprocess.STDOUT))
def __init__(self, filename, **kwargs):
self._filename = os.path.abspath(filename)
self._rel_path = os.path.relpath(filename, self.output)
self._config = kwargs
def keys(self):
pass
def markdown(self):
pass
def html(self):
pass
def filename(self):
return self._filename
def content(self):
fid = open(self._filename, 'r')
content = fid.read()
fid.close()
return content
class MarkdownIncludeItem(DatabaseItem):
"""
An item that returns a markdown include string for use with the markdown_include extension.
"""
def keys(self):
yield self._filename
def markdown(self):
return '{{!{}!}}'.format(self._filename)
class RegexItem(DatabaseItem):
"""
An item that creates keys base on regex match.
"""
def __init__(self, filename, regex, **kwargs):
DatabaseItem.__init__(self, filename, **kwargs)
self._regex = re.compile(regex)
self._repo = os.path.join(kwargs.get('repo'), self._rel_path)
def keys(self):
"""
Return the keys for which this item will be stored in the database.
"""
keys = []
for match in re.finditer(self._regex, self.content()):
k = match.group('key')
if k not in keys:
keys.append(k)
return keys
class InputFileItem(RegexItem):
"""
Returns a list item for input file matching of (type = ).
"""
def __init__(self, filename, **kwargs):
RegexItem.__init__(self, filename, r'type\s*=\s*(?P<key>\w+)\b', **kwargs)
def markdown(self):
return '* [{}]({})'.format(self._rel_path, self._repo)
def html(self):
el = etree.Element('li')
a = etree.SubElement(el, 'a')
a.set('href', self._repo)
a.text = self._rel_path
return el
class ChildClassItem(RegexItem):
"""
Returns a list item for h file containing a base.
"""
def __init__(self, filename, **kwargs):
super(ChildClassItem, self).__init__(filename, r'public\s*(?P<key>\w+)\b', **kwargs)
def html(self, element='li'):
c_filename = self._filename.replace('/include/', '/src/').replace('.h', '.C')
el = etree.Element(element)
a = etree.SubElement(el, 'a')
a.set('href', self._repo)
a.text = self._rel_path
if os.path.exists(c_filename):
etree.SubElement(el, 'br')
c_rel_path = self._rel_path.replace('/include/', '/src/').replace('.h', '.C')
c_repo = self._repo.replace('/include/', '/src/').replace('.h', '.C')
a = etree.SubElement(el, 'a')
a.set('href', c_repo)
a.text = c_rel_path
return el
| lgpl-2.1 | 198,607,282,374,954,050 | 24.495798 | 112 | 0.626566 | false |
mihailignatenko/erp | openerp/addons/base/tests/test_ir_sequence.py | 39 | 9375 | # -*- coding: utf-8 -*-
# Run with one of these commands:
# > OPENERP_ADDONS_PATH='../../addons/trunk' OPENERP_PORT=8069 \
# OPENERP_DATABASE=yy PYTHONPATH=. python tests/test_ir_sequence.py
# > OPENERP_ADDONS_PATH='../../addons/trunk' OPENERP_PORT=8069 \
# OPENERP_DATABASE=yy nosetests tests/test_ir_sequence.py
# > OPENERP_ADDONS_PATH='../../../addons/trunk' OPENERP_PORT=8069 \
# OPENERP_DATABASE=yy PYTHONPATH=../:. unit2 test_ir_sequence
# This assume an existing database.
import psycopg2
import psycopg2.errorcodes
import unittest2
import openerp
from openerp.tests import common
DB = common.DB
ADMIN_USER_ID = common.ADMIN_USER_ID
def registry(model):
return openerp.modules.registry.RegistryManager.get(DB)[model]
def cursor():
return openerp.modules.registry.RegistryManager.get(DB).cursor()
def drop_sequence(code):
cr = cursor()
for model in ['ir.sequence', 'ir.sequence.type']:
s = registry(model)
ids = s.search(cr, ADMIN_USER_ID, [('code', '=', code)])
s.unlink(cr, ADMIN_USER_ID, ids)
cr.commit()
cr.close()
class test_ir_sequence_standard(unittest2.TestCase):
""" A few tests for a 'Standard' (i.e. PostgreSQL) sequence. """
def test_ir_sequence_create(self):
""" Try to create a sequence object. """
cr = cursor()
d = dict(code='test_sequence_type', name='Test sequence type')
c = registry('ir.sequence.type').create(cr, ADMIN_USER_ID, d, {})
assert c
d = dict(code='test_sequence_type', name='Test sequence')
c = registry('ir.sequence').create(cr, ADMIN_USER_ID, d, {})
assert c
cr.commit()
cr.close()
def test_ir_sequence_search(self):
""" Try a search. """
cr = cursor()
ids = registry('ir.sequence').search(cr, ADMIN_USER_ID, [], {})
assert ids
cr.commit()
cr.close()
def test_ir_sequence_draw(self):
""" Try to draw a number. """
cr = cursor()
n = registry('ir.sequence').next_by_code(cr, ADMIN_USER_ID, 'test_sequence_type', {})
assert n
cr.commit()
cr.close()
def test_ir_sequence_draw_twice(self):
""" Try to draw a number from two transactions. """
cr0 = cursor()
cr1 = cursor()
n0 = registry('ir.sequence').next_by_code(cr0, ADMIN_USER_ID, 'test_sequence_type', {})
assert n0
n1 = registry('ir.sequence').next_by_code(cr1, ADMIN_USER_ID, 'test_sequence_type', {})
assert n1
cr0.commit()
cr1.commit()
cr0.close()
cr1.close()
@classmethod
def tearDownClass(cls):
drop_sequence('test_sequence_type')
class test_ir_sequence_no_gap(unittest2.TestCase):
""" Copy of the previous tests for a 'No gap' sequence. """
def test_ir_sequence_create_no_gap(self):
""" Try to create a sequence object. """
cr = cursor()
d = dict(code='test_sequence_type_2', name='Test sequence type')
c = registry('ir.sequence.type').create(cr, ADMIN_USER_ID, d, {})
assert c
d = dict(code='test_sequence_type_2', name='Test sequence',
implementation='no_gap')
c = registry('ir.sequence').create(cr, ADMIN_USER_ID, d, {})
assert c
cr.commit()
cr.close()
def test_ir_sequence_draw_no_gap(self):
""" Try to draw a number. """
cr = cursor()
n = registry('ir.sequence').next_by_code(cr, ADMIN_USER_ID, 'test_sequence_type_2', {})
assert n
cr.commit()
cr.close()
def test_ir_sequence_draw_twice_no_gap(self):
""" Try to draw a number from two transactions.
This is expected to not work.
"""
cr0 = cursor()
cr1 = cursor()
cr1._default_log_exceptions = False # Prevent logging a traceback
with self.assertRaises(psycopg2.OperationalError) as e:
n0 = registry('ir.sequence').next_by_code(cr0, ADMIN_USER_ID, 'test_sequence_type_2', {})
assert n0
n1 = registry('ir.sequence').next_by_code(cr1, ADMIN_USER_ID, 'test_sequence_type_2', {})
self.assertEqual(e.exception.pgcode, psycopg2.errorcodes.LOCK_NOT_AVAILABLE, msg="postgresql returned an incorrect errcode")
cr0.close()
cr1.close()
@classmethod
def tearDownClass(cls):
drop_sequence('test_sequence_type_2')
class test_ir_sequence_change_implementation(unittest2.TestCase):
""" Create sequence objects and change their ``implementation`` field. """
def test_ir_sequence_1_create(self):
""" Try to create a sequence object. """
cr = cursor()
d = dict(code='test_sequence_type_3', name='Test sequence type')
c = registry('ir.sequence.type').create(cr, ADMIN_USER_ID, d, {})
assert c
d = dict(code='test_sequence_type_3', name='Test sequence')
c = registry('ir.sequence').create(cr, ADMIN_USER_ID, d, {})
assert c
d = dict(code='test_sequence_type_4', name='Test sequence type')
c = registry('ir.sequence.type').create(cr, ADMIN_USER_ID, d, {})
assert c
d = dict(code='test_sequence_type_4', name='Test sequence',
implementation='no_gap')
c = registry('ir.sequence').create(cr, ADMIN_USER_ID, d, {})
assert c
cr.commit()
cr.close()
def test_ir_sequence_2_write(self):
cr = cursor()
ids = registry('ir.sequence').search(cr, ADMIN_USER_ID,
[('code', 'in', ['test_sequence_type_3', 'test_sequence_type_4'])], {})
registry('ir.sequence').write(cr, ADMIN_USER_ID, ids,
{'implementation': 'standard'}, {})
registry('ir.sequence').write(cr, ADMIN_USER_ID, ids,
{'implementation': 'no_gap'}, {})
cr.commit()
cr.close()
def test_ir_sequence_3_unlink(self):
cr = cursor()
ids = registry('ir.sequence').search(cr, ADMIN_USER_ID,
[('code', 'in', ['test_sequence_type_3', 'test_sequence_type_4'])], {})
registry('ir.sequence').unlink(cr, ADMIN_USER_ID, ids, {})
cr.commit()
cr.close()
@classmethod
def tearDownClass(cls):
drop_sequence('test_sequence_type_3')
drop_sequence('test_sequence_type_4')
class test_ir_sequence_generate(unittest2.TestCase):
""" Create sequence objects and generate some values. """
def test_ir_sequence_create(self):
""" Try to create a sequence object. """
cr = cursor()
d = dict(code='test_sequence_type_5', name='Test sequence type')
c = registry('ir.sequence.type').create(cr, ADMIN_USER_ID, d, {})
assert c
d = dict(code='test_sequence_type_5', name='Test sequence')
c = registry('ir.sequence').create(cr, ADMIN_USER_ID, d, {})
assert c
cr.commit()
cr.close()
cr = cursor()
f = lambda *a: registry('ir.sequence').next_by_code(cr, ADMIN_USER_ID, 'test_sequence_type_5', {})
assert all(str(x) == f() for x in xrange(1,10))
cr.commit()
cr.close()
def test_ir_sequence_create_no_gap(self):
""" Try to create a sequence object. """
cr = cursor()
d = dict(code='test_sequence_type_6', name='Test sequence type')
c = registry('ir.sequence.type').create(cr, ADMIN_USER_ID, d, {})
assert c
d = dict(code='test_sequence_type_6', name='Test sequence')
c = registry('ir.sequence').create(cr, ADMIN_USER_ID, d, {})
assert c
cr.commit()
cr.close()
cr = cursor()
f = lambda *a: registry('ir.sequence').next_by_code(cr, ADMIN_USER_ID, 'test_sequence_type_6', {})
assert all(str(x) == f() for x in xrange(1,10))
cr.commit()
cr.close()
@classmethod
def tearDownClass(cls):
drop_sequence('test_sequence_type_5')
drop_sequence('test_sequence_type_6')
class Test_ir_sequence_init(common.TransactionCase):
def test_00(self):
registry, cr, uid = self.registry, self.cr, self.uid
# test if read statement return the good number_next value (from postgreSQL sequence and not ir_sequence value)
sequence = registry('ir.sequence')
# first creation of sequence (normal)
values = {'number_next': 1,
'company_id': 1,
'padding': 4,
'number_increment': 1,
'implementation': 'standard',
'name': 'test-sequence-00'}
seq_id = sequence.create(cr, uid, values)
# Call get next 4 times
sequence.next_by_id(cr, uid, seq_id)
sequence.next_by_id(cr, uid, seq_id)
sequence.next_by_id(cr, uid, seq_id)
read_sequence = sequence.next_by_id(cr, uid, seq_id)
# Read the value of the current sequence
assert read_sequence == "0004", 'The actual sequence value must be 4. reading : %s' % read_sequence
# reset sequence to 1 by write method calling
sequence.write(cr, uid, [seq_id], {'number_next': 1})
# Read the value of the current sequence
read_sequence = sequence.next_by_id(cr, uid, seq_id)
assert read_sequence == "0001", 'The actual sequence value must be 1. reading : %s' % read_sequence
if __name__ == "__main__":
unittest2.main()
| agpl-3.0 | -7,385,507,765,631,423,000 | 36.955466 | 132 | 0.5872 | false |
PatrickOReilly/scikit-learn | examples/linear_model/plot_ridge_path.py | 55 | 2138 | """
===========================================================
Plot Ridge coefficients as a function of the regularization
===========================================================
Shows the effect of collinearity in the coefficients of an estimator.
.. currentmodule:: sklearn.linear_model
:class:`Ridge` Regression is the estimator used in this example.
Each color represents a different feature of the
coefficient vector, and this is displayed as a function of the
regularization parameter.
This example also shows the usefulness of applying Ridge regression
to highly ill-conditioned matrices. For such matrices, a slight
change in the target variable can cause huge variances in the
calculated weights. In such cases, it is useful to set a certain
regularization (alpha) to reduce this variation (noise).
When alpha is very large, the regularization effect dominates the
squared loss function and the coefficients tend to zero.
At the end of the path, as alpha tends toward zero
and the solution tends towards the ordinary least squares, coefficients
exhibit big oscillations. In practise it is necessary to tune alpha
in such a way that a balance is maintained between both.
"""
# Author: Fabian Pedregosa -- <[email protected]>
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# X is the 10x10 Hilbert matrix
X = 1. / (np.arange(1, 11) + np.arange(0, 10)[:, np.newaxis])
y = np.ones(10)
###############################################################################
# Compute paths
n_alphas = 200
alphas = np.logspace(-10, -2, n_alphas)
clf = linear_model.Ridge(fit_intercept=False)
coefs = []
for a in alphas:
clf.set_params(alpha=a)
clf.fit(X, y)
coefs.append(clf.coef_)
###############################################################################
# Display results
ax = plt.gca()
ax.plot(alphas, coefs)
ax.set_xscale('log')
ax.set_xlim(ax.get_xlim()[::-1]) # reverse axis
plt.xlabel('alpha')
plt.ylabel('weights')
plt.title('Ridge coefficients as a function of the regularization')
plt.axis('tight')
plt.show()
| bsd-3-clause | 4,764,854,656,955,683,000 | 30.910448 | 79 | 0.655753 | false |
gorjuce/odoo | addons/website_sale_options/models/sale_order.py | 237 | 2831 | # -*- coding: utf-8 -*-
from openerp import SUPERUSER_ID
from openerp.osv import osv, orm, fields
from openerp.tools.translate import _
class sale_order_line(osv.Model):
_inherit = "sale.order.line"
_columns = {
'linked_line_id': fields.many2one('sale.order.line', 'Linked Order Line', domain="[('order_id','!=',order_id)]", ondelete='cascade'),
'option_line_ids': fields.one2many('sale.order.line', 'linked_line_id', string='Options Linked'),
}
class sale_order(osv.Model):
_inherit = "sale.order"
def _cart_find_product_line(self, cr, uid, ids, product_id=None, line_id=None, context=None, **kwargs):
line_ids = super(sale_order, self)._cart_find_product_line(cr, uid, ids, product_id, line_id, context=context)
if line_id:
return line_ids
linked_line_id = kwargs.get('linked_line_id')
optional_product_ids = kwargs.get('optional_product_ids')
for so in self.browse(cr, uid, ids, context=context):
domain = [('id', 'in', line_ids)]
domain += linked_line_id and [('linked_line_id', '=', linked_line_id)] or [('linked_line_id', '=', False)]
if optional_product_ids:
domain += [('option_line_ids.product_id', '=', pid) for pid in optional_product_ids]
else:
domain += [('option_line_ids', '=', False)]
return self.pool.get('sale.order.line').search(cr, SUPERUSER_ID, domain, context=context)
def _cart_update(self, cr, uid, ids, product_id=None, line_id=None, add_qty=0, set_qty=0, context=None, **kwargs):
""" Add or set product quantity, add_qty can be negative """
value = super(sale_order, self)._cart_update(cr, uid, ids, product_id, line_id, add_qty, set_qty, context=context, **kwargs)
linked_line_id = kwargs.get('linked_line_id')
sol = self.pool.get('sale.order.line')
line = sol.browse(cr, SUPERUSER_ID, value.get('line_id'), context=context)
for so in self.browse(cr, uid, ids, context=context):
if linked_line_id and linked_line_id in map(int,so.order_line):
linked = sol.browse(cr, SUPERUSER_ID, linked_line_id, context=context)
line.write({
"name": _("%s\nOption for: %s") % (line.name, linked.product_id.name_get()[0][1]),
"linked_line_id": linked_line_id
})
# select linked product
option_ids = [l for l in so.order_line if l.linked_line_id.id == line.id]
# update line
for l in option_ids:
super(sale_order, self)._cart_update(cr, uid, ids, l.product_id.id, l.id, add_qty, set_qty, context=context, **kwargs)
value['option_ids'] = [l.id for l in option_ids]
return value
| agpl-3.0 | 3,673,970,409,678,653,400 | 47.810345 | 141 | 0.588131 | false |
chrissimpkins/hsh | setup.py | 1 | 2256 | import os
import re
from setuptools import setup, find_packages
def docs_read(fname):
return open(os.path.join(os.path.dirname(__file__), 'docs', fname)).read()
def version_read():
settings_file = open(os.path.join(os.path.dirname(__file__), 'lib', 'hsh', 'settings.py')).read()
major_regex = """major_version\s*?=\s*?["']{1}(\d+)["']{1}"""
minor_regex = """minor_version\s*?=\s*?["']{1}(\d+)["']{1}"""
patch_regex = """patch_version\s*?=\s*?["']{1}(\d+)["']{1}"""
major_match = re.search(major_regex, settings_file)
minor_match = re.search(minor_regex, settings_file)
patch_match = re.search(patch_regex, settings_file)
major_version = major_match.group(1)
minor_version = minor_match.group(1)
patch_version = patch_match.group(1)
if len(major_version) == 0:
major_version = 0
if len(minor_version) == 0:
minor_version = 0
if len(patch_version) == 0:
patch_version = 0
return major_version + "." + minor_version + "." + patch_version
setup(
name='hsh',
version=version_read(),
description='Simple file hash digests and file integrity checks',
long_description=(docs_read('README.rst')),
url='https://github.com/chrissimpkins/hsh',
license='MIT license',
author='Christopher Simpkins',
author_email='',
platforms=['any'],
entry_points = {
'console_scripts': [
'hsh = hsh.app:main'
],
},
packages=find_packages("lib"),
package_dir={'': 'lib'},
install_requires=['commandlines'],
keywords='file,hash,hash digest,checksum,file comparison,file integrity,file checksum,file check,SHA,MD5,SHA1,SHA224,SHA256,SHA384,SHA512',
include_package_data=True,
classifiers=[
'Intended Audience :: End Users/Desktop',
'Development Status :: 5 - Production/Stable',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: Microsoft :: Windows'
],
)
| mit | -2,023,645,155,544,529,400 | 34.809524 | 143 | 0.606383 | false |
vicky2135/lucious | oscar/lib/python2.7/site-packages/django/db/backends/postgresql/introspection.py | 51 | 10204 | from __future__ import unicode_literals
from collections import namedtuple
from django.db.backends.base.introspection import (
BaseDatabaseIntrospection, FieldInfo, TableInfo,
)
from django.utils.encoding import force_text
FieldInfo = namedtuple('FieldInfo', FieldInfo._fields + ('default',))
class DatabaseIntrospection(BaseDatabaseIntrospection):
# Maps type codes to Django Field types.
data_types_reverse = {
16: 'BooleanField',
17: 'BinaryField',
20: 'BigIntegerField',
21: 'SmallIntegerField',
23: 'IntegerField',
25: 'TextField',
700: 'FloatField',
701: 'FloatField',
869: 'GenericIPAddressField',
1042: 'CharField', # blank-padded
1043: 'CharField',
1082: 'DateField',
1083: 'TimeField',
1114: 'DateTimeField',
1184: 'DateTimeField',
1266: 'TimeField',
1700: 'DecimalField',
}
ignored_tables = []
_get_indexes_query = """
SELECT attr.attname, idx.indkey, idx.indisunique, idx.indisprimary
FROM pg_catalog.pg_class c, pg_catalog.pg_class c2,
pg_catalog.pg_index idx, pg_catalog.pg_attribute attr
WHERE c.oid = idx.indrelid
AND idx.indexrelid = c2.oid
AND attr.attrelid = c.oid
AND attr.attnum = idx.indkey[0]
AND c.relname = %s"""
def get_field_type(self, data_type, description):
field_type = super(DatabaseIntrospection, self).get_field_type(data_type, description)
if description.default and 'nextval' in description.default:
if field_type == 'IntegerField':
return 'AutoField'
elif field_type == 'BigIntegerField':
return 'BigAutoField'
return field_type
def get_table_list(self, cursor):
"""
Returns a list of table and view names in the current database.
"""
cursor.execute("""
SELECT c.relname, c.relkind
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE c.relkind IN ('r', 'v')
AND n.nspname NOT IN ('pg_catalog', 'pg_toast')
AND pg_catalog.pg_table_is_visible(c.oid)""")
return [TableInfo(row[0], {'r': 't', 'v': 'v'}.get(row[1]))
for row in cursor.fetchall()
if row[0] not in self.ignored_tables]
def get_table_description(self, cursor, table_name):
"Returns a description of the table, with the DB-API cursor.description interface."
# As cursor.description does not return reliably the nullable property,
# we have to query the information_schema (#7783)
cursor.execute("""
SELECT column_name, is_nullable, column_default
FROM information_schema.columns
WHERE table_name = %s""", [table_name])
field_map = {line[0]: line[1:] for line in cursor.fetchall()}
cursor.execute("SELECT * FROM %s LIMIT 1" % self.connection.ops.quote_name(table_name))
return [
FieldInfo(*(
(force_text(line[0]),) +
line[1:6] +
(field_map[force_text(line[0])][0] == 'YES', field_map[force_text(line[0])][1])
)) for line in cursor.description
]
def get_relations(self, cursor, table_name):
"""
Returns a dictionary of {field_name: (field_name_other_table, other_table)}
representing all relationships to the given table.
"""
cursor.execute("""
SELECT c2.relname, a1.attname, a2.attname
FROM pg_constraint con
LEFT JOIN pg_class c1 ON con.conrelid = c1.oid
LEFT JOIN pg_class c2 ON con.confrelid = c2.oid
LEFT JOIN pg_attribute a1 ON c1.oid = a1.attrelid AND a1.attnum = con.conkey[1]
LEFT JOIN pg_attribute a2 ON c2.oid = a2.attrelid AND a2.attnum = con.confkey[1]
WHERE c1.relname = %s
AND con.contype = 'f'""", [table_name])
relations = {}
for row in cursor.fetchall():
relations[row[1]] = (row[2], row[0])
return relations
def get_key_columns(self, cursor, table_name):
key_columns = []
cursor.execute("""
SELECT kcu.column_name, ccu.table_name AS referenced_table, ccu.column_name AS referenced_column
FROM information_schema.constraint_column_usage ccu
LEFT JOIN information_schema.key_column_usage kcu
ON ccu.constraint_catalog = kcu.constraint_catalog
AND ccu.constraint_schema = kcu.constraint_schema
AND ccu.constraint_name = kcu.constraint_name
LEFT JOIN information_schema.table_constraints tc
ON ccu.constraint_catalog = tc.constraint_catalog
AND ccu.constraint_schema = tc.constraint_schema
AND ccu.constraint_name = tc.constraint_name
WHERE kcu.table_name = %s AND tc.constraint_type = 'FOREIGN KEY'""", [table_name])
key_columns.extend(cursor.fetchall())
return key_columns
def get_indexes(self, cursor, table_name):
# This query retrieves each index on the given table, including the
# first associated field name
cursor.execute(self._get_indexes_query, [table_name])
indexes = {}
for row in cursor.fetchall():
# row[1] (idx.indkey) is stored in the DB as an array. It comes out as
# a string of space-separated integers. This designates the field
# indexes (1-based) of the fields that have indexes on the table.
# Here, we skip any indexes across multiple fields.
if ' ' in row[1]:
continue
if row[0] not in indexes:
indexes[row[0]] = {'primary_key': False, 'unique': False}
# It's possible to have the unique and PK constraints in separate indexes.
if row[3]:
indexes[row[0]]['primary_key'] = True
if row[2]:
indexes[row[0]]['unique'] = True
return indexes
def get_constraints(self, cursor, table_name):
"""
Retrieves any constraints or keys (unique, pk, fk, check, index) across one or more columns.
"""
constraints = {}
# Loop over the key table, collecting things as constraints
# This will get PKs, FKs, and uniques, but not CHECK
cursor.execute("""
SELECT
kc.constraint_name,
kc.column_name,
c.constraint_type,
array(SELECT table_name::text || '.' || column_name::text
FROM information_schema.constraint_column_usage
WHERE constraint_name = kc.constraint_name)
FROM information_schema.key_column_usage AS kc
JOIN information_schema.table_constraints AS c ON
kc.table_schema = c.table_schema AND
kc.table_name = c.table_name AND
kc.constraint_name = c.constraint_name
WHERE
kc.table_schema = %s AND
kc.table_name = %s
ORDER BY kc.ordinal_position ASC
""", ["public", table_name])
for constraint, column, kind, used_cols in cursor.fetchall():
# If we're the first column, make the record
if constraint not in constraints:
constraints[constraint] = {
"columns": [],
"primary_key": kind.lower() == "primary key",
"unique": kind.lower() in ["primary key", "unique"],
"foreign_key": tuple(used_cols[0].split(".", 1)) if kind.lower() == "foreign key" else None,
"check": False,
"index": False,
}
# Record the details
constraints[constraint]['columns'].append(column)
# Now get CHECK constraint columns
cursor.execute("""
SELECT kc.constraint_name, kc.column_name
FROM information_schema.constraint_column_usage AS kc
JOIN information_schema.table_constraints AS c ON
kc.table_schema = c.table_schema AND
kc.table_name = c.table_name AND
kc.constraint_name = c.constraint_name
WHERE
c.constraint_type = 'CHECK' AND
kc.table_schema = %s AND
kc.table_name = %s
""", ["public", table_name])
for constraint, column in cursor.fetchall():
# If we're the first column, make the record
if constraint not in constraints:
constraints[constraint] = {
"columns": [],
"primary_key": False,
"unique": False,
"foreign_key": None,
"check": True,
"index": False,
}
# Record the details
constraints[constraint]['columns'].append(column)
# Now get indexes
cursor.execute("""
SELECT
c2.relname,
ARRAY(
SELECT (SELECT attname FROM pg_catalog.pg_attribute WHERE attnum = i AND attrelid = c.oid)
FROM unnest(idx.indkey) i
),
idx.indisunique,
idx.indisprimary
FROM pg_catalog.pg_class c, pg_catalog.pg_class c2,
pg_catalog.pg_index idx
WHERE c.oid = idx.indrelid
AND idx.indexrelid = c2.oid
AND c.relname = %s
""", [table_name])
for index, columns, unique, primary in cursor.fetchall():
if index not in constraints:
constraints[index] = {
"columns": list(columns),
"primary_key": primary,
"unique": unique,
"foreign_key": None,
"check": False,
"index": True,
}
return constraints
| bsd-3-clause | -6,323,520,865,439,469,000 | 42.237288 | 112 | 0.551646 | false |
yyoshinori/CastlePortal | public/assets/adminlte/bower_components/jvectormap/converter/converter.py | 129 | 10451 | #
# jVectorMap version 2.0.4
#
# Copyright 2011-2013, Kirill Lebedev
#
import sys
import shapely.geometry
import shapely.wkb
import shapely.affinity
from osgeo import ogr
from osgeo import osr
import json
import codecs
import copy
class Map:
def __init__(self, name, language):
self.paths = {}
self.name = name
self.language = language
self.width = 0
self.height = 0
self.bbox = []
def addPath(self, path, code, name):
self.paths[code] = {"path": path, "name": name}
def getJSCode(self):
map = {"paths": self.paths, "width": self.width, "height": self.height, "insets": self.insets, "projection": self.projection}
return "jQuery.fn.vectorMap('addMap', '"+self.name+"_"+self.projection['type']+"_"+self.language+"',"+json.dumps(map)+');'
class Converter:
def __init__(self, config):
args = {
'buffer_distance': -0.4,
'simplify_tolerance': 0.2,
'longitude0': 0,
'projection': 'mill',
'name': 'world',
'width': 900,
'language': 'en',
'precision': 2,
'insets': []
}
args.update(config)
self.map = Map(args['name'], args.get('language'))
if args.get('sources'):
self.sources = args['sources']
else:
self.sources = [{
'input_file': args.get('input_file'),
'where': args.get('where'),
'name_field': args.get('name_field'),
'code_field': args.get('code_field'),
'input_file_encoding': args.get('input_file_encoding')
}]
default_source = {
'where': '',
'name_field': 0,
'code_field': 1,
'input_file_encoding': 'iso-8859-1'
}
for index in range(len(self.sources)):
for key in default_source:
if self.sources[index].get(key) is None:
self.sources[index][key] = default_source[key]
self.features = {}
self.width = args.get('width')
self.minimal_area = args.get('minimal_area')
self.longitude0 = float(args.get('longitude0'))
self.projection = args.get('projection')
self.precision = args.get('precision')
self.buffer_distance = args.get('buffer_distance')
self.simplify_tolerance = args.get('simplify_tolerance')
self.for_each = args.get('for_each')
self.emulate_longitude0 = args.get('emulate_longitude0')
if args.get('emulate_longitude0') is None and (self.projection == 'merc' or self.projection =='mill') and self.longitude0 != 0:
self.emulate_longitude0 = True
if args.get('viewport'):
self.viewport = map(lambda s: float(s), args.get('viewport').split(' '))
else:
self.viewport = False
# spatial reference to convert to
self.spatialRef = osr.SpatialReference()
projString = '+proj='+str(self.projection)+' +a=6381372 +b=6381372 +lat_0=0'
if not self.emulate_longitude0:
projString += ' +lon_0='+str(self.longitude0)
self.spatialRef.ImportFromProj4(projString)
# handle map insets
if args.get('insets'):
self.insets = args.get('insets')
else:
self.insets = []
def loadData(self):
for sourceConfig in self.sources:
self.loadDataSource( sourceConfig )
def loadDataSource(self, sourceConfig):
source = ogr.Open( sourceConfig['input_file'] )
layer = source.GetLayer(0)
layer.SetAttributeFilter( sourceConfig['where'].encode('ascii') )
self.viewportRect = False
transformation = osr.CoordinateTransformation( layer.GetSpatialRef(), self.spatialRef )
if self.viewport:
layer.SetSpatialFilterRect( *self.viewport )
point1 = transformation.TransformPoint(self.viewport[0], self.viewport[1])
point2 = transformation.TransformPoint(self.viewport[2], self.viewport[3])
self.viewportRect = shapely.geometry.box(point1[0], point1[1], point2[0], point2[1])
layer.ResetReading()
codes = {}
if self.emulate_longitude0:
meridian = -180 + self.longitude0
p1 = transformation.TransformPoint(-180, 89)
p2 = transformation.TransformPoint(meridian, -89)
left = shapely.geometry.box(p1[0], p1[1], p2[0], p2[1])
p3 = transformation.TransformPoint(meridian, 89)
p4 = transformation.TransformPoint(180, -89)
right = shapely.geometry.box(p3[0], p3[1], p4[0], p4[1])
# load features
nextCode = 0
for feature in layer:
geometry = feature.GetGeometryRef()
geometryType = geometry.GetGeometryType()
if geometryType == ogr.wkbPolygon or geometryType == ogr.wkbMultiPolygon:
geometry.TransformTo( self.spatialRef )
shapelyGeometry = shapely.wkb.loads( geometry.ExportToWkb() )
if not shapelyGeometry.is_valid:
shapelyGeometry = shapelyGeometry.buffer(0, 1)
if self.emulate_longitude0:
leftPart = shapely.affinity.translate(shapelyGeometry.intersection(left), p4[0] - p3[0])
rightPart = shapely.affinity.translate(shapelyGeometry.intersection(right), p1[0] - p2[0])
shapelyGeometry = leftPart.buffer(0.1, 1).union(rightPart.buffer(0.1, 1)).buffer(-0.1, 1)
if not shapelyGeometry.is_valid:
shapelyGeometry = shapelyGeometry.buffer(0, 1)
shapelyGeometry = self.applyFilters(shapelyGeometry)
if shapelyGeometry:
name = feature.GetFieldAsString(str(sourceConfig.get('name_field'))).decode(sourceConfig.get('input_file_encoding'))
code = feature.GetFieldAsString(str(sourceConfig.get('code_field'))).decode(sourceConfig.get('input_file_encoding'))
if code in codes:
code = '_' + str(nextCode)
nextCode += 1
codes[code] = name
self.features[code] = {"geometry": shapelyGeometry, "name": name, "code": code}
else:
raise Exception, "Wrong geometry type: "+geometryType
def convert(self, outputFile):
print 'Generating '+outputFile
self.loadData()
codes = self.features.keys()
main_codes = copy.copy(codes)
self.map.insets = []
envelope = []
for inset in self.insets:
insetBbox = self.renderMapInset(inset['codes'], inset['left'], inset['top'], inset['width'])
insetHeight = (insetBbox[3] - insetBbox[1]) * (inset['width'] / (insetBbox[2] - insetBbox[0]))
self.map.insets.append({
"bbox": [{"x": insetBbox[0], "y": -insetBbox[3]}, {"x": insetBbox[2], "y": -insetBbox[1]}],
"left": inset['left'],
"top": inset['top'],
"width": inset['width'],
"height": insetHeight
})
envelope.append(
shapely.geometry.box(
inset['left'], inset['top'], inset['left'] + inset['width'], inset['top'] + insetHeight
)
)
for code in inset['codes']:
main_codes.remove(code)
insetBbox = self.renderMapInset(main_codes, 0, 0, self.width)
insetHeight = (insetBbox[3] - insetBbox[1]) * (self.width / (insetBbox[2] - insetBbox[0]))
envelope.append( shapely.geometry.box( 0, 0, self.width, insetHeight ) )
mapBbox = shapely.geometry.MultiPolygon( envelope ).bounds
self.map.width = mapBbox[2] - mapBbox[0]
self.map.height = mapBbox[3] - mapBbox[1]
self.map.insets.append({
"bbox": [{"x": insetBbox[0], "y": -insetBbox[3]}, {"x": insetBbox[2], "y": -insetBbox[1]}],
"left": 0,
"top": 0,
"width": self.width,
"height": insetHeight
})
self.map.projection = {"type": self.projection, "centralMeridian": float(self.longitude0)}
open(outputFile, 'w').write( self.map.getJSCode() )
if self.for_each is not None:
for code in codes:
childConfig = copy.deepcopy(self.for_each)
for param in ('input_file', 'output_file', 'where', 'name'):
childConfig[param] = childConfig[param].replace('{{code}}', code.lower())
converter = Converter(childConfig)
converter.convert(childConfig['output_file'])
def renderMapInset(self, codes, left, top, width):
envelope = []
for code in codes:
envelope.append( self.features[code]['geometry'].envelope )
bbox = shapely.geometry.MultiPolygon( envelope ).bounds
scale = (bbox[2]-bbox[0]) / width
# generate SVG paths
for code in codes:
feature = self.features[code]
geometry = feature['geometry']
if self.buffer_distance:
geometry = geometry.buffer(self.buffer_distance*scale, 1)
if geometry.is_empty:
continue
if self.simplify_tolerance:
geometry = geometry.simplify(self.simplify_tolerance*scale, preserve_topology=True)
if isinstance(geometry, shapely.geometry.multipolygon.MultiPolygon):
polygons = geometry.geoms
else:
polygons = [geometry]
path = ''
for polygon in polygons:
rings = []
rings.append(polygon.exterior)
rings.extend(polygon.interiors)
for ring in rings:
for pointIndex in range( len(ring.coords) ):
point = ring.coords[pointIndex]
if pointIndex == 0:
path += 'M'+str( round( (point[0]-bbox[0]) / scale + left, self.precision) )
path += ','+str( round( (bbox[3] - point[1]) / scale + top, self.precision) )
else:
path += 'l' + str( round(point[0]/scale - ring.coords[pointIndex-1][0]/scale, self.precision) )
path += ',' + str( round(ring.coords[pointIndex-1][1]/scale - point[1]/scale, self.precision) )
path += 'Z'
self.map.addPath(path, feature['code'], feature['name'])
return bbox
def applyFilters(self, geometry):
if self.viewportRect:
geometry = self.filterByViewport(geometry)
if not geometry:
return False
if self.minimal_area:
geometry = self.filterByMinimalArea(geometry)
if not geometry:
return False
return geometry
def filterByViewport(self, geometry):
try:
return geometry.intersection(self.viewportRect)
except shapely.geos.TopologicalError:
return False
def filterByMinimalArea(self, geometry):
if isinstance(geometry, shapely.geometry.multipolygon.MultiPolygon):
polygons = geometry.geoms
else:
polygons = [geometry]
polygons = filter(lambda p: p.area > self.minimal_area, polygons)
return shapely.geometry.multipolygon.MultiPolygon(polygons)
args = {}
if len(sys.argv) > 1:
paramsJson = open(sys.argv[1], 'r').read()
else:
paramsJson = sys.stdin.read()
paramsJson = json.loads(paramsJson)
converter = Converter(paramsJson)
converter.convert(paramsJson['output_file'])
| mit | -5,114,847,656,760,458,000 | 33.953177 | 131 | 0.632188 | false |
winndows/cinder | cinder/openstack/common/scheduler/filters/json_filter.py | 22 | 4914 | # Copyright (c) 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import operator
from oslo_serialization import jsonutils
import six
from cinder.openstack.common.scheduler import filters
class JsonFilter(filters.BaseHostFilter):
"""Host Filter to allow simple JSON-based grammar for
selecting hosts.
"""
def _op_compare(self, args, op):
"""Returns True if the specified operator can successfully
compare the first item in the args with all the rest. Will
return False if only one item is in the list.
"""
if len(args) < 2:
return False
if op is operator.contains:
bad = args[0] not in args[1:]
else:
bad = [arg for arg in args[1:]
if not op(args[0], arg)]
return not bool(bad)
def _equals(self, args):
"""First term is == all the other terms."""
return self._op_compare(args, operator.eq)
def _less_than(self, args):
"""First term is < all the other terms."""
return self._op_compare(args, operator.lt)
def _greater_than(self, args):
"""First term is > all the other terms."""
return self._op_compare(args, operator.gt)
def _in(self, args):
"""First term is in set of remaining terms."""
return self._op_compare(args, operator.contains)
def _less_than_equal(self, args):
"""First term is <= all the other terms."""
return self._op_compare(args, operator.le)
def _greater_than_equal(self, args):
"""First term is >= all the other terms."""
return self._op_compare(args, operator.ge)
def _not(self, args):
"""Flip each of the arguments."""
return [not arg for arg in args]
def _or(self, args):
"""True if any arg is True."""
return any(args)
def _and(self, args):
"""True if all args are True."""
return all(args)
commands = {
'=': _equals,
'<': _less_than,
'>': _greater_than,
'in': _in,
'<=': _less_than_equal,
'>=': _greater_than_equal,
'not': _not,
'or': _or,
'and': _and,
}
def _parse_string(self, string, host_state):
"""Strings prefixed with $ are capability lookups in the
form '$variable' where 'variable' is an attribute in the
HostState class. If $variable is a dictionary, you may
use: $variable.dictkey
"""
if not string:
return None
if not string.startswith("$"):
return string
path = string[1:].split(".")
obj = getattr(host_state, path[0], None)
if obj is None:
return None
for item in path[1:]:
obj = obj.get(item)
if obj is None:
return None
return obj
def _process_filter(self, query, host_state):
"""Recursively parse the query structure."""
if not query:
return True
cmd = query[0]
method = self.commands[cmd]
cooked_args = []
for arg in query[1:]:
if isinstance(arg, list):
arg = self._process_filter(arg, host_state)
elif isinstance(arg, six.string_types):
arg = self._parse_string(arg, host_state)
if arg is not None:
cooked_args.append(arg)
result = method(self, cooked_args)
return result
def host_passes(self, host_state, filter_properties):
"""Return a list of hosts that can fulfill the requirements
specified in the query.
"""
# TODO(zhiteng) Add description for filter_properties structure
# and scheduler_hints.
try:
query = filter_properties['scheduler_hints']['query']
except KeyError:
query = None
if not query:
return True
# NOTE(comstud): Not checking capabilities or service for
# enabled/disabled so that a provided json filter can decide
result = self._process_filter(jsonutils.loads(query), host_state)
if isinstance(result, list):
# If any succeeded, include the host
result = any(result)
if result:
# Filter it out.
return True
return False
| apache-2.0 | 4,159,557,113,865,763,300 | 31.543046 | 78 | 0.580383 | false |
cneill/barbican | barbican/queue/keystone_listener.py | 2 | 6732 | # Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Server-side (i.e. worker side) Keystone notification related classes and logic.
"""
import oslo_messaging
from oslo_service import service
from barbican.common import utils
from barbican import queue
from barbican.tasks import keystone_consumer
LOG = utils.getLogger(__name__)
class NotificationTask(object):
"""Task which exposes the API for consuming priority based notifications.
The Oslo notification framework delivers notifications based on priority to
matching callback APIs as defined in its notification listener endpoint
list.
Currently from Keystone perspective, `info` API is sufficient as Keystone
send notifications at `info` priority ONLY. Other priority level APIs
(warn, error, critical, audit, debug) are not needed here.
"""
def __init__(self, conf):
self.conf = conf
def info(self, ctxt, publisher_id, event_type, payload, metadata):
"""Receives notification at info level."""
return self.process_event(ctxt, publisher_id, event_type, payload,
metadata)
def process_event(self, ctxt, publisher_id, event_type, payload, metadata):
"""Process Keystone Event based on event_type and payload data.
Parses notification data to identify if the event is related to delete
project or not. In case of delete project event, it passes project_id
to KeystoneEventConsumer logic for further processing. Barbican service
is not interested in other events so in that case it just returns None
as acknowledgment.
Messaging server considers message is acknowledged when either return
value is `oslo_messaging.NotificationResult.HANDLED` or None.
In case of successful processing of notification, the returned value is
`oslo_messaging.NotificationResult.HANDLED`
In case of notification processing error, the value returned
is oslo_messaging.NotificationResult.REQUEUE when transport
supports this feature otherwise
`oslo_messaging.NotificationResult.HANDLED` is returned.
"""
LOG.debug("Input keystone event publisher_id = %s", publisher_id)
LOG.debug("Input keystone event payload = %s", payload)
LOG.debug("Input keystone event type = %s", event_type)
LOG.debug("Input keystone event metadata = %s", metadata)
project_id = self._parse_payload_for_project_id(payload)
resource_type, operation_type = self._parse_event_type(event_type)
LOG.debug('Keystone Event: resource type={0}, operation type={1}, '
'keystone id={2}'.format(resource_type, operation_type,
project_id))
if (project_id and resource_type == 'project' and
operation_type == 'deleted'):
task = keystone_consumer.KeystoneEventConsumer()
try:
task.process(project_id=project_id,
resource_type=resource_type,
operation_type=operation_type)
return oslo_messaging.NotificationResult.HANDLED
except Exception:
# No need to log message here as task process method has
# already logged it
# TODO(john-wood-w) This really should be retried on a
# schedule and really only if the database is down, not
# for any exception otherwise tasks will be re-queued
# repeatedly. Revisit as part of the retry task work later.
if self.conf.keystone_notifications.allow_requeue:
return oslo_messaging.NotificationResult.REQUEUE
else:
return oslo_messaging.NotificationResult.HANDLED
return None # in case event is not project delete
def _parse_event_type(self, event_type):
"""Parses event type provided as part of notification.
Parses to identify what operation is performed and on which Keystone
resource.
A few event type sample values are provided below::
identity.project.deleted
identity.role.created
identity.domain.updated
identity.authenticate
"""
resource_type = None
operation_type = None
if event_type:
type_list = event_type.split('.')
# 2 is min. number of dot delimiters expected in event_type value.
if len(type_list) > 2:
resource_type = type_list[-2].lower()
operation_type = type_list[-1].lower()
return resource_type, operation_type
def _parse_payload_for_project_id(self, payload_s):
"""Gets project resource identifier from payload
Sample payload is provided below::
{'resource_info': u'2b99a94ad02741978e613fb52dd1f4cd'}
"""
if payload_s:
return payload_s.get('resource_info')
class MessageServer(NotificationTask, service.Service):
"""Server to retrieve messages from queue used by Keystone.
This is used to send public notifications for openstack service
consumption.
This server is an Oslo notification server that exposes set of standard
APIs for events consumption based on event priority.
Some of messaging server configuration needs to match with Keystone
deployment notification configuration e.g. exchange name, topic name
"""
def __init__(self, conf):
pool_size = conf.keystone_notifications.thread_pool_size
NotificationTask.__init__(self, conf)
service.Service.__init__(self, threads=pool_size)
self.target = queue.get_notification_target()
self._msg_server = queue.get_notification_server(targets=[self.target],
endpoints=[self])
def start(self):
self._msg_server.start()
super(MessageServer, self).start()
def stop(self):
super(MessageServer, self).stop()
self._msg_server.stop()
queue.cleanup()
| apache-2.0 | -2,886,367,449,153,176,000 | 39.8 | 79 | 0.653446 | false |
emergebtc/muddery | evennia/evennia/players/players.py | 1 | 26439 | """
Typeclass for Player objects
Note that this object is primarily intended to
store OOC information, not game info! This
object represents the actual user (not their
character) and has NO actual precence in the
game world (this is handled by the associated
character object, so you should customize that
instead for most things).
"""
from django.conf import settings
from django.utils import timezone
from evennia.typeclasses.models import TypeclassBase
from evennia.players.manager import PlayerManager
from evennia.players.models import PlayerDB
from evennia.comms.models import ChannelDB
from evennia.commands import cmdhandler
from evennia.utils import logger
from evennia.utils.utils import (lazy_property, to_str,
make_iter, to_unicode,
variable_from_module)
from evennia.typeclasses.attributes import NickHandler
from evennia.scripts.scripthandler import ScriptHandler
from evennia.commands.cmdsethandler import CmdSetHandler
from django.utils.translation import ugettext as _
__all__ = ("DefaultPlayer",)
_SESSIONS = None
_AT_SEARCH_RESULT = variable_from_module(*settings.SEARCH_AT_RESULT.rsplit('.', 1))
_MULTISESSION_MODE = settings.MULTISESSION_MODE
_CMDSET_PLAYER = settings.CMDSET_PLAYER
_CONNECT_CHANNEL = None
class DefaultPlayer(PlayerDB):
"""
This is the base Typeclass for all Players. Players represent
the person playing the game and tracks account info, password
etc. They are OOC entities without presence in-game. A Player
can connect to a Character Object in order to "enter" the
game.
Player Typeclass API:
* Available properties (only available on initiated typeclass objects)
key (string) - name of player
name (string)- wrapper for user.username
aliases (list of strings) - aliases to the object. Will be saved to
database as AliasDB entries but returned as strings.
dbref (int, read-only) - unique #id-number. Also "id" can be used.
date_created (string) - time stamp of object creation
permissions (list of strings) - list of permission strings
user (User, read-only) - django User authorization object
obj (Object) - game object controlled by player. 'character' can also
be used.
sessions (list of Sessions) - sessions connected to this player
is_superuser (bool, read-only) - if the connected user is a superuser
* Handlers
locks - lock-handler: use locks.add() to add new lock strings
db - attribute-handler: store/retrieve database attributes on this
self.db.myattr=val, val=self.db.myattr
ndb - non-persistent attribute handler: same as db but does not
create a database entry when storing data
scripts - script-handler. Add new scripts to object with scripts.add()
cmdset - cmdset-handler. Use cmdset.add() to add new cmdsets to object
nicks - nick-handler. New nicks with nicks.add().
* Helper methods
msg(outgoing_string, from_obj=None, **kwargs)
#swap_character(new_character, delete_old_character=False)
execute_cmd(raw_string)
search(ostring, global_search=False, attribute_name=None,
use_nicks=False, location=None,
ignore_errors=False, player=False)
is_typeclass(typeclass, exact=False)
swap_typeclass(new_typeclass, clean_attributes=False, no_default=True)
access(accessing_obj, access_type='read', default=False, no_superuser_bypass=False)
check_permstring(permstring)
* Hook methods
basetype_setup()
at_player_creation()
- note that the following hooks are also found on Objects and are
usually handled on the character level:
at_init()
at_access()
at_cmdset_get(**kwargs)
at_first_login()
at_post_login(sessid=None)
at_disconnect()
at_message_receive()
at_message_send()
at_server_reload()
at_server_shutdown()
"""
__metaclass__ = TypeclassBase
objects = PlayerManager()
# properties
@lazy_property
def cmdset(self):
return CmdSetHandler(self, True)
@lazy_property
def scripts(self):
return ScriptHandler(self)
@lazy_property
def nicks(self):
return NickHandler(self)
# session-related methods
def get_session(self, sessid):
"""
Return session with given sessid connected to this player.
note that the sessionhandler also accepts sessid as an iterable.
"""
global _SESSIONS
if not _SESSIONS:
from evennia.server.sessionhandler import SESSIONS as _SESSIONS
return _SESSIONS.session_from_player(self, sessid)
def get_all_sessions(self):
"Return all sessions connected to this player"
global _SESSIONS
if not _SESSIONS:
from evennia.server.sessionhandler import SESSIONS as _SESSIONS
return _SESSIONS.sessions_from_player(self)
sessions = property(get_all_sessions) # alias shortcut
def disconnect_session_from_player(self, sessid):
"""
Access method for disconnecting a given session from the player
(connection happens automatically in the sessionhandler)
"""
# this should only be one value, loop just to make sure to
# clean everything
sessions = (session for session in self.get_all_sessions()
if session.sessid == sessid)
for session in sessions:
# this will also trigger unpuppeting
session.sessionhandler.disconnect(session)
# puppeting operations
def puppet_object(self, sessid, obj):
"""
Use the given session to control (puppet) the given object (usually
a Character type).
Args:
sessid (int): session id of session to connect
obj (Object): the object to start puppeting
Raises:
RuntimeError with message if puppeting is not possible
returns True if successful, False otherwise
"""
# safety checks
if not obj:
raise RuntimeError("Object not found")
session = self.get_session(sessid)
if not session:
raise RuntimeError("Session not found")
if self.get_puppet(sessid) == obj:
# already puppeting this object
raise RuntimeError("You are already puppeting this object.")
if not obj.access(self, 'puppet'):
# no access
raise RuntimeError("You don't have permission to puppet '%s'." % obj.key)
if obj.player:
# object already puppeted
if obj.player == self:
if obj.sessid.count():
# we may take over another of our sessions
# output messages to the affected sessions
if _MULTISESSION_MODE in (1, 3):
txt1 = "{c%s{n{G is now shared from another of your sessions.{n"
txt2 = "Sharing {c%s{n with another of your sessions."
else:
txt1 = "{c%s{n{R is now acted from another of your sessions.{n"
txt2 = "Taking over {c%s{n from another of your sessions."
self.unpuppet_object(obj.sessid.get())
self.msg(txt1 % obj.name, sessid=obj.sessid.get(), _forced_nomulti=True)
self.msg(txt2 % obj.name, sessid=sessid, _forced_nomulti=True)
elif obj.player.is_connected:
# controlled by another player
raise RuntimeError("{R{c%s{R is already puppeted by another Player.")
# do the puppeting
if session.puppet:
# cleanly unpuppet eventual previous object puppeted by this session
self.unpuppet_object(sessid)
# if we get to this point the character is ready to puppet or it
# was left with a lingering player/sessid reference from an unclean
# server kill or similar
obj.at_pre_puppet(self, sessid=sessid)
# do the connection
obj.sessid.add(sessid)
obj.player = self
session.puid = obj.id
session.puppet = obj
# validate/start persistent scripts on object
obj.scripts.validate()
obj.at_post_puppet()
# re-cache locks to make sure superuser bypass is updated
obj.locks.cache_lock_bypass(obj)
def unpuppet_object(self, sessid):
"""
Disengage control over an object
Args:
sessid(int): the session id to disengage
Raises:
RuntimeError with message about error.
"""
if _MULTISESSION_MODE == 1:
sessions = self.get_all_sessions()
else:
sessions = self.get_session(sessid)
if not sessions:
raise RuntimeError("No session was found.")
for session in make_iter(sessions):
obj = session.puppet or None
if not obj:
raise RuntimeError("No puppet was found to disconnect from.")
elif obj:
# do the disconnect, but only if we are the last session to puppet
obj.at_pre_unpuppet()
obj.sessid.remove(session.sessid)
if not obj.sessid.count():
del obj.player
obj.at_post_unpuppet(self, sessid=sessid)
# Just to be sure we're always clear.
session.puppet = None
session.puid = None
def unpuppet_all(self):
"""
Disconnect all puppets. This is called by server
before a reset/shutdown.
"""
for session in (sess for sess in self.get_all_sessions() if sess.puppet):
self.unpuppet_object(session.sessid)
def get_puppet(self, sessid, return_dbobj=False):
"""
Get an object puppeted by this session through this player. This is
the main method for retrieving the puppeted object from the
player's end.
sessid - return character connected to this sessid,
"""
session = self.get_session(sessid)
if not session:
return None
if return_dbobj:
return session.puppet
return session.puppet and session.puppet or None
def get_all_puppets(self):
"""
Get all currently puppeted objects as a list.
"""
return list(set(session.puppet for session in self.get_all_sessions()
if session.puppet))
def __get_single_puppet(self):
"""
This is a legacy convenience link for users of
MULTISESSION_MODE 0 or 1. It will return
only the first puppet. For mode 2, this returns
a list of all characters.
"""
puppets = self.get_all_puppets()
if _MULTISESSION_MODE in (0, 1):
return puppets and puppets[0] or None
return puppets
character = property(__get_single_puppet)
puppet = property(__get_single_puppet)
# utility methods
def delete(self, *args, **kwargs):
"""
Deletes the player permanently.
"""
for session in self.get_all_sessions():
# unpuppeting all objects and disconnecting the user, if any
# sessions remain (should usually be handled from the
# deleting command)
try:
self.unpuppet_object(session.sessid)
except RuntimeError:
# no puppet to disconnect from
pass
session.sessionhandler.disconnect(session, reason=_("Player being deleted."))
self.scripts.stop()
self.attributes.clear()
self.nicks.clear()
self.aliases.clear()
super(PlayerDB, self).delete(*args, **kwargs)
## methods inherited from database model
def msg(self, text=None, from_obj=None, sessid=None, **kwargs):
"""
Evennia -> User
This is the main route for sending data back to the user from the
server.
Args:
text (str, optional): text data to send
from_obj (Object or Player, optional): object sending. If given,
its at_msg_send() hook will be called.
sessid (int or list, optional): session id or ids to receive this
send. If given, overrules MULTISESSION_MODE.
Notes:
All other keywords are passed on to the protocol.
"""
text = to_str(text, force_string=True) if text else ""
if from_obj:
# call hook
try:
from_obj.at_msg_send(text=text, to_obj=self, **kwargs)
except Exception:
pass
# session relay
if sessid:
# this could still be an iterable if sessid is an iterable
sessions = self.get_session(sessid)
if sessions:
# this is a special instruction to ignore MULTISESSION_MODE
# and only relay to this given session.
kwargs["_nomulti"] = True
for session in make_iter(sessions):
session.msg(text=text, **kwargs)
return
# we only send to the first of any connected sessions - the sessionhandler
# will disperse this to the other sessions based on MULTISESSION_MODE.
sessions = self.get_all_sessions()
if sessions:
sessions[0].msg(text=text, **kwargs)
def execute_cmd(self, raw_string, sessid=None, **kwargs):
"""
Do something as this player. This method is never called normally,
but only when the player object itself is supposed to execute the
command. It takes player nicks into account, but not nicks of
eventual puppets.
raw_string - raw command input coming from the command line.
sessid - the optional session id to be responsible for the command-send
**kwargs - other keyword arguments will be added to the found command
object instace as variables before it executes. This is
unused by default Evennia but may be used to set flags and
change operating paramaters for commands at run-time.
"""
raw_string = to_unicode(raw_string)
raw_string = self.nicks.nickreplace(raw_string,
categories=("inputline", "channel"), include_player=False)
if not sessid and _MULTISESSION_MODE in (0, 1):
# in this case, we should either have only one sessid, or the sessid
# should not matter (since the return goes to all of them we can
# just use the first one as the source)
try:
sessid = self.get_all_sessions()[0].sessid
except IndexError:
# this can happen for bots
sessid = None
return cmdhandler.cmdhandler(self, raw_string,
callertype="player", sessid=sessid, **kwargs)
def search(self, searchdata, return_puppet=False,
nofound_string=None, multimatch_string=None, **kwargs):
"""
This is similar to the ObjectDB search method but will search for
Players only. Errors will be echoed, and None returned if no Player
is found.
searchdata - search criterion, the Player's key or dbref to search for
return_puppet - will try to return the object the player controls
instead of the Player object itself. If no
puppeted object exists (since Player is OOC), None will
be returned.
nofound_string - optional custom string for not-found error message.
multimatch_string - optional custom string for multimatch error header.
Extra keywords are ignored, but are allowed in call in order to make
API more consistent with objects.models.TypedObject.search.
"""
# handle me, self and *me, *self
if isinstance(searchdata, basestring):
# handle wrapping of common terms
if searchdata.lower() in ("me", "*me", "self", "*self",):
return self
matches = self.__class__.objects.player_search(searchdata)
matches = _AT_SEARCH_RESULT(self, searchdata, matches, global_search=True,
nofound_string=nofound_string,
multimatch_string=multimatch_string)
if matches and return_puppet:
try:
return matches.puppet
except AttributeError:
return None
return matches
def access(self, accessing_obj, access_type='read', default=False, no_superuser_bypass=False, **kwargs):
"""
Determines if another object has permission to access this object
in whatever way.
Args:
accessing_obj (Object): Object trying to access this one
access_type (str, optional): Type of access sought
default (bool, optional): What to return if no lock of access_type was found
no_superuser_bypass (bool, optional): Turn off superuser
lock bypassing. Be careful with this one.
Kwargs:
Passed to the at_access hook along with the result.
Returns:
result (bool): Result of access check.
"""
result = super(DefaultPlayer, self).access(accessing_obj, access_type=access_type,
default=default, no_superuser_bypass=no_superuser_bypass)
self.at_access(result, accessing_obj, access_type, **kwargs)
return result
## player hooks
def basetype_setup(self):
"""
This sets up the basic properties for a player.
Overload this with at_player_creation rather than
changing this method.
"""
# A basic security setup
lockstring = "examine:perm(Wizards);edit:perm(Wizards);delete:perm(Wizards);boot:perm(Wizards);msg:all()"
self.locks.add(lockstring)
# The ooc player cmdset
self.cmdset.add_default(_CMDSET_PLAYER, permanent=True)
def at_player_creation(self):
"""
This is called once, the very first time
the player is created (i.e. first time they
register with the game). It's a good place
to store attributes all players should have,
like configuration values etc.
"""
# set an (empty) attribute holding the characters this player has
lockstring = "attrread:perm(Admins);attredit:perm(Admins);attrcreate:perm(Admins)"
self.attributes.add("_playable_characters", [], lockstring=lockstring)
def at_init(self):
"""
This is always called whenever this object is initiated --
that is, whenever it its typeclass is cached from memory. This
happens on-demand first time the object is used or activated
in some way after being created but also after each server
restart or reload. In the case of player objects, this usually
happens the moment the player logs in or reconnects after a
reload.
"""
pass
# Note that the hooks below also exist in the character object's
# typeclass. You can often ignore these and rely on the character
# ones instead, unless you are implementing a multi-character game
# and have some things that should be done regardless of which
# character is currently connected to this player.
def at_first_save(self):
"""
This is a generic hook called by Evennia when this object is
saved to the database the very first time. You generally
don't override this method but the hooks called by it.
"""
self.basetype_setup()
self.at_player_creation()
permissions = settings.PERMISSION_PLAYER_DEFAULT
if hasattr(self, "_createdict"):
# this will only be set if the utils.create_player
# function was used to create the object.
cdict = self._createdict
if cdict.get("locks"):
self.locks.add(cdict["locks"])
if cdict.get("permissions"):
permissions = cdict["permissions"]
del self._createdict
self.permissions.add(permissions)
def at_access(self, result, accessing_obj, access_type, **kwargs):
"""
This is called with the result of an access call, along with
any kwargs used for that call. The return of this method does
not affect the result of the lock check. It can be used e.g. to
customize error messages in a central location or other effects
based on the access result.
"""
pass
def at_cmdset_get(self, **kwargs):
"""
Called just before cmdsets on this player are requested by the
command handler. If changes need to be done on the fly to the
cmdset before passing them on to the cmdhandler, this is the
place to do it. This is called also if the player currently
have no cmdsets. kwargs are usually not used unless the
cmdset is generated dynamically.
"""
pass
def at_first_login(self):
"""
Called the very first time this player logs into the game.
"""
pass
def at_pre_login(self):
"""
Called every time the user logs in, just before the actual
login-state is set.
"""
pass
def _send_to_connect_channel(self, message):
"Helper method for loading the default comm channel"
global _CONNECT_CHANNEL
if not _CONNECT_CHANNEL:
try:
_CONNECT_CHANNEL = ChannelDB.objects.filter(db_key=settings.DEFAULT_CHANNELS[1]["key"])[0]
except Exception:
logger.log_trace()
now = timezone.now()
now = "%02i-%02i-%02i(%02i:%02i)" % (now.year, now.month,
now.day, now.hour, now.minute)
if _CONNECT_CHANNEL:
_CONNECT_CHANNEL.tempmsg("[%s, %s]: %s" % (_CONNECT_CHANNEL.key, now, message))
else:
logger.log_infomsg("[%s]: %s" % (now, message))
def at_post_login(self, sessid=None):
"""
Called at the end of the login process, just before letting
the player loose. This is called before an eventual Character's
at_post_login hook.
"""
self._send_to_connect_channel("{G%s connected{n" % self.key)
if _MULTISESSION_MODE == 0:
# in this mode we should have only one character available. We
# try to auto-connect to our last conneted object, if any
self.puppet_object(sessid, self.db._last_puppet)
elif _MULTISESSION_MODE == 1:
# in this mode all sessions connect to the same puppet.
self.puppet_object(sessid, self.db._last_puppet)
elif _MULTISESSION_MODE in (2, 3):
# In this mode we by default end up at a character selection
# screen. We execute look on the player.
self.execute_cmd("look", sessid=sessid)
def at_disconnect(self, reason=None):
"""
Called just before user is disconnected.
"""
reason = reason and "(%s)" % reason or ""
self._send_to_connect_channel("{R%s disconnected %s{n" % (self.key, reason))
def at_post_disconnect(self):
"""
This is called after disconnection is complete. No messages
can be relayed to the player from here. After this call, the
player should not be accessed any more, making this a good
spot for deleting it (in the case of a guest player account,
for example).
"""
pass
def at_message_receive(self, message, from_obj=None):
"""
Called when any text is emitted to this
object. If it returns False, no text
will be sent automatically.
"""
return True
def at_message_send(self, message, to_object):
"""
Called whenever this object tries to send text
to another object. Only called if the object supplied
itself as a sender in the msg() call.
"""
pass
def at_server_reload(self):
"""
This hook is called whenever the server is shutting down for
restart/reboot. If you want to, for example, save non-persistent
properties across a restart, this is the place to do it.
"""
pass
def at_server_shutdown(self):
"""
This hook is called whenever the server is shutting down fully
(i.e. not for a restart).
"""
pass
class DefaultGuest(DefaultPlayer):
"""
This class is used for guest logins. Unlike Players, Guests and their
characters are deleted after disconnection.
"""
def at_post_login(self, sessid=None):
"""
In theory, guests only have one character regardless of which
MULTISESSION_MODE we're in. They don't get a choice.
"""
self._send_to_connect_channel("{G%s connected{n" % self.key)
self.puppet_object(sessid, self.db._last_puppet)
def at_disconnect(self):
"""
A Guest's characters aren't meant to linger on the server. When a
Guest disconnects, we remove its character.
"""
super(DefaultGuest, self).at_disconnect()
characters = self.db._playable_characters
for character in filter(None, characters):
character.delete()
def at_server_shutdown(self):
"""
We repeat at_disconnect() here just to be on the safe side.
"""
super(DefaultGuest, self).at_server_shutdown()
characters = self.db._playable_characters
for character in filter(None, characters):
character.delete()
def at_post_disconnect(self):
"""
Guests aren't meant to linger on the server, either. We need to wait
until after the Guest disconnects to delete it, though.
"""
super(DefaultGuest, self).at_post_disconnect()
self.delete()
| bsd-3-clause | 4,594,062,004,599,702,000 | 37.823789 | 113 | 0.610046 | false |
bhamza/ntu-dsi-dcn | src/core/bindings/modulegen_customizations.py | 19 | 20774 | import re
import os
import sys
from pybindgen.typehandlers import base as typehandlers
from pybindgen import ReturnValue, Parameter
from pybindgen.cppmethod import CustomCppMethodWrapper, CustomCppConstructorWrapper
from pybindgen.typehandlers.codesink import MemoryCodeSink
from pybindgen.typehandlers import ctypeparser
from pybindgen import cppclass, param, retval
import warnings
from pybindgen.typehandlers.base import CodeGenerationError
# class SmartPointerTransformation(typehandlers.TypeTransformation):
# """
# This class provides a "type transformation" that tends to support
# NS-3 smart pointers. Parameters such as "Ptr<Foo> foo" are
# transformed into something like Parameter.new("Foo*", "foo",
# transfer_ownership=False). Return values such as Ptr<Foo> are
# transformed into ReturnValue.new("Foo*",
# caller_owns_return=False). Since the underlying objects have
# reference counting, PyBindGen does the right thing.
# """
# def __init__(self):
# super(SmartPointerTransformation, self).__init__()
# self.rx = re.compile(r'(ns3::|::ns3::|)Ptr<([^>]+)>\s*$')
# def _get_untransformed_type_traits(self, name):
# m = self.rx.match(name)
# is_const = False
# if m is None:
# return None, False
# else:
# name1 = m.group(2).strip()
# if name1.startswith('const '):
# name1 = name1[len('const '):]
# is_const = True
# if name1.endswith(' const'):
# name1 = name1[:-len(' const')]
# is_const = True
# new_name = name1+' *'
# if new_name.startswith('::'):
# new_name = new_name[2:]
# return new_name, is_const
# def get_untransformed_name(self, name):
# new_name, dummy_is_const = self._get_untransformed_type_traits(name)
# return new_name
# def create_type_handler(self, type_handler, *args, **kwargs):
# if issubclass(type_handler, Parameter):
# kwargs['transfer_ownership'] = False
# elif issubclass(type_handler, ReturnValue):
# kwargs['caller_owns_return'] = False
# else:
# raise AssertionError
# ## fix the ctype, add ns3:: namespace
# orig_ctype, is_const = self._get_untransformed_type_traits(args[0])
# if is_const:
# correct_ctype = 'ns3::Ptr< %s const >' % orig_ctype[:-2]
# else:
# correct_ctype = 'ns3::Ptr< %s >' % orig_ctype[:-2]
# args = tuple([correct_ctype] + list(args[1:]))
# handler = type_handler(*args, **kwargs)
# handler.set_tranformation(self, orig_ctype)
# return handler
# def untransform(self, type_handler, declarations, code_block, expression):
# return 'const_cast<%s> (ns3::PeekPointer (%s))' % (type_handler.untransformed_ctype, expression)
# def transform(self, type_handler, declarations, code_block, expression):
# assert type_handler.untransformed_ctype[-1] == '*'
# return 'ns3::Ptr< %s > (%s)' % (type_handler.untransformed_ctype[:-1], expression)
# ## register the type transformation
# transf = SmartPointerTransformation()
# typehandlers.return_type_matcher.register_transformation(transf)
# typehandlers.param_type_matcher.register_transformation(transf)
# del transf
class ArgvParam(Parameter):
"""
Converts a python list-of-strings argument to a pair of 'int argc,
char *argv[]' arguments to pass into C.
One Python argument becomes two C function arguments -> it's a miracle!
Note: this parameter type handler is not registered by any name;
must be used explicitly.
"""
DIRECTIONS = [Parameter.DIRECTION_IN]
CTYPES = []
def convert_c_to_python(self, wrapper):
raise NotImplementedError
def convert_python_to_c(self, wrapper):
py_name = wrapper.declarations.declare_variable('PyObject*', 'py_' + self.name)
argc_var = wrapper.declarations.declare_variable('int', 'argc')
name = wrapper.declarations.declare_variable('char**', self.name)
idx = wrapper.declarations.declare_variable('Py_ssize_t', 'idx')
wrapper.parse_params.add_parameter('O!', ['&PyList_Type', '&'+py_name], self.name)
#wrapper.before_call.write_error_check('!PyList_Check(%s)' % py_name) # XXX
wrapper.before_call.write_code("%s = (char **) malloc(sizeof(char*)*PyList_Size(%s));"
% (name, py_name))
wrapper.before_call.add_cleanup_code('free(%s);' % name)
wrapper.before_call.write_code('''
for (%(idx)s = 0; %(idx)s < PyList_Size(%(py_name)s); %(idx)s++)
{
''' % vars())
wrapper.before_call.sink.indent()
wrapper.before_call.write_code('''
PyObject *item = PyList_GET_ITEM(%(py_name)s, %(idx)s);
''' % vars())
#wrapper.before_call.write_error_check('item == NULL')
wrapper.before_call.write_error_check(
'!PyString_Check(item)',
failure_cleanup=('PyErr_SetString(PyExc_TypeError, '
'"argument %s must be a list of strings");') % self.name)
wrapper.before_call.write_code(
'%s[%s] = PyString_AsString(item);' % (name, idx))
wrapper.before_call.sink.unindent()
wrapper.before_call.write_code('}')
wrapper.before_call.write_code('%s = PyList_Size(%s);' % (argc_var, py_name))
wrapper.call_params.append(argc_var)
wrapper.call_params.append(name)
# class CallbackImplProxyMethod(typehandlers.ReverseWrapperBase):
# """
# Class that generates a proxy virtual method that calls a similarly named python method.
# """
# def __init__(self, return_value, parameters):
# super(CallbackImplProxyMethod, self).__init__(return_value, parameters)
# def generate_python_call(self):
# """code to call the python method"""
# build_params = self.build_params.get_parameters(force_tuple_creation=True)
# if build_params[0][0] == '"':
# build_params[0] = '(char *) ' + build_params[0]
# args = self.before_call.declare_variable('PyObject*', 'args')
# self.before_call.write_code('%s = Py_BuildValue(%s);'
# % (args, ', '.join(build_params)))
# self.before_call.add_cleanup_code('Py_DECREF(%s);' % args)
# self.before_call.write_code('py_retval = PyObject_CallObject(m_callback, %s);' % args)
# self.before_call.write_error_check('py_retval == NULL')
# self.before_call.add_cleanup_code('Py_DECREF(py_retval);')
# def generate_callback_classes(out, callbacks):
# for callback_impl_num, template_parameters in enumerate(callbacks):
# sink = MemoryCodeSink()
# cls_name = "ns3::Callback< %s >" % ', '.join(template_parameters)
# #print >> sys.stderr, "***** trying to register callback: %r" % cls_name
# class_name = "PythonCallbackImpl%i" % callback_impl_num
# sink.writeln('''
# class %s : public ns3::CallbackImpl<%s>
# {
# public:
# PyObject *m_callback;
# %s(PyObject *callback)
# {
# Py_INCREF(callback);
# m_callback = callback;
# }
# virtual ~%s()
# {
# Py_DECREF(m_callback);
# m_callback = NULL;
# }
# virtual bool IsEqual(ns3::Ptr<const ns3::CallbackImplBase> other_base) const
# {
# const %s *other = dynamic_cast<const %s*> (ns3::PeekPointer (other_base));
# if (other != NULL)
# return (other->m_callback == m_callback);
# else
# return false;
# }
# ''' % (class_name, ', '.join(template_parameters), class_name, class_name, class_name, class_name))
# sink.indent()
# callback_return = template_parameters[0]
# return_ctype = ctypeparser.parse_type(callback_return)
# if ('const' in return_ctype.remove_modifiers()):
# kwargs = {'is_const': True}
# else:
# kwargs = {}
# try:
# return_type = ReturnValue.new(str(return_ctype), **kwargs)
# except (typehandlers.TypeLookupError, typehandlers.TypeConfigurationError), ex:
# warnings.warn("***** Unable to register callback; Return value '%s' error (used in %s): %r"
# % (callback_return, cls_name, ex),
# Warning)
# continue
# arguments = []
# ok = True
# callback_parameters = [arg for arg in template_parameters[1:] if arg != 'ns3::empty']
# for arg_num, arg_type in enumerate(callback_parameters):
# arg_name = 'arg%i' % (arg_num+1)
# param_ctype = ctypeparser.parse_type(arg_type)
# if ('const' in param_ctype.remove_modifiers()):
# kwargs = {'is_const': True}
# else:
# kwargs = {}
# try:
# arguments.append(Parameter.new(str(param_ctype), arg_name, **kwargs))
# except (typehandlers.TypeLookupError, typehandlers.TypeConfigurationError), ex:
# warnings.warn("***** Unable to register callback; parameter '%s %s' error (used in %s): %r"
# % (arg_type, arg_name, cls_name, ex),
# Warning)
# ok = False
# if not ok:
# continue
# wrapper = CallbackImplProxyMethod(return_type, arguments)
# wrapper.generate(sink, 'operator()', decl_modifiers=[])
# sink.unindent()
# sink.writeln('};\n')
# sink.flush_to(out)
# class PythonCallbackParameter(Parameter):
# "Class handlers"
# CTYPES = [cls_name]
# #print >> sys.stderr, "***** registering callback handler: %r" % ctypeparser.normalize_type_string(cls_name)
# DIRECTIONS = [Parameter.DIRECTION_IN]
# PYTHON_CALLBACK_IMPL_NAME = class_name
# TEMPLATE_ARGS = template_parameters
# def convert_python_to_c(self, wrapper):
# "parses python args to get C++ value"
# assert isinstance(wrapper, typehandlers.ForwardWrapperBase)
# if self.default_value is None:
# py_callback = wrapper.declarations.declare_variable('PyObject*', self.name)
# wrapper.parse_params.add_parameter('O', ['&'+py_callback], self.name)
# wrapper.before_call.write_error_check(
# '!PyCallable_Check(%s)' % py_callback,
# 'PyErr_SetString(PyExc_TypeError, "parameter \'%s\' must be callbale");' % self.name)
# callback_impl = wrapper.declarations.declare_variable(
# 'ns3::Ptr<%s>' % self.PYTHON_CALLBACK_IMPL_NAME,
# '%s_cb_impl' % self.name)
# wrapper.before_call.write_code("%s = ns3::Create<%s> (%s);"
# % (callback_impl, self.PYTHON_CALLBACK_IMPL_NAME, py_callback))
# wrapper.call_params.append(
# 'ns3::Callback<%s> (%s)' % (', '.join(self.TEMPLATE_ARGS), callback_impl))
# else:
# py_callback = wrapper.declarations.declare_variable('PyObject*', self.name, 'NULL')
# wrapper.parse_params.add_parameter('O', ['&'+py_callback], self.name, optional=True)
# value = wrapper.declarations.declare_variable(
# 'ns3::Callback<%s>' % ', '.join(self.TEMPLATE_ARGS),
# self.name+'_value',
# self.default_value)
# wrapper.before_call.write_code("if (%s) {" % (py_callback,))
# wrapper.before_call.indent()
# wrapper.before_call.write_error_check(
# '!PyCallable_Check(%s)' % py_callback,
# 'PyErr_SetString(PyExc_TypeError, "parameter \'%s\' must be callbale");' % self.name)
# wrapper.before_call.write_code("%s = ns3::Callback<%s> (ns3::Create<%s> (%s));"
# % (value, ', '.join(self.TEMPLATE_ARGS),
# self.PYTHON_CALLBACK_IMPL_NAME, py_callback))
# wrapper.before_call.unindent()
# wrapper.before_call.write_code("}") # closes: if (py_callback) {
# wrapper.call_params.append(value)
# def convert_c_to_python(self, wrapper):
# raise typehandlers.NotSupportedError("Reverse wrappers for ns3::Callback<...> types "
# "(python using callbacks defined in C++) not implemented.")
# def write_preamble(out):
# pybindgen.write_preamble(out)
# out.writeln("#include \"ns3/everything.h\"")
def Simulator_customizations(module):
Simulator = module['ns3::Simulator']
## Simulator::Schedule(delay, callback, ...user..args...)
Simulator.add_custom_method_wrapper("Schedule", "_wrap_Simulator_Schedule",
flags=["METH_VARARGS", "METH_KEYWORDS", "METH_STATIC"])
## Simulator::ScheduleNow(callback, ...user..args...)
Simulator.add_custom_method_wrapper("ScheduleNow", "_wrap_Simulator_ScheduleNow",
flags=["METH_VARARGS", "METH_KEYWORDS", "METH_STATIC"])
## Simulator::ScheduleDestroy(callback, ...user..args...)
Simulator.add_custom_method_wrapper("ScheduleDestroy", "_wrap_Simulator_ScheduleDestroy",
flags=["METH_VARARGS", "METH_KEYWORDS", "METH_STATIC"])
Simulator.add_custom_method_wrapper("Run", "_wrap_Simulator_Run",
flags=["METH_VARARGS", "METH_KEYWORDS", "METH_STATIC"])
def CommandLine_customizations(module):
CommandLine = module['ns3::CommandLine']
CommandLine.add_method('Parse', None, [ArgvParam(None, 'argv')],
is_static=False)
CommandLine.add_custom_method_wrapper("AddValue", "_wrap_CommandLine_AddValue",
flags=["METH_VARARGS", "METH_KEYWORDS"])
# def Object_customizations(module):
# ## ---------------------------------------------------------------------
# ## Here we generate custom constructor code for all classes that
# ## derive from ns3::Object. The custom constructors are needed in
# ## order to support kwargs only and to translate kwargs into ns3
# ## attributes, etc.
# ## ---------------------------------------------------------------------
# Object = module['ns3::Object']
# ## add a GetTypeId method to all generatd helper classes
# def helper_class_hook(helper_class):
# decl = """
# static ns3::TypeId GetTypeId (void)
# {
# static ns3::TypeId tid = ns3::TypeId ("%s")
# .SetParent< %s > ()
# ;
# return tid;
# }""" % (helper_class.name, helper_class.class_.full_name)
# helper_class.add_custom_method(decl)
# helper_class.add_post_generation_code(
# "NS_OBJECT_ENSURE_REGISTERED (%s);" % helper_class.name)
# Object.add_helper_class_hook(helper_class_hook)
# def ns3_object_instance_creation_function(cpp_class, code_block, lvalue,
# parameters, construct_type_name):
# assert lvalue
# assert not lvalue.startswith('None')
# if cpp_class.cannot_be_constructed:
# raise CodeGenerationError("%s cannot be constructed (%s)"
# % cpp_class.full_name)
# if cpp_class.incomplete_type:
# raise CodeGenerationError("%s cannot be constructed (incomplete type)"
# % cpp_class.full_name)
# code_block.write_code("%s = new %s(%s);" % (lvalue, construct_type_name, parameters))
# code_block.write_code("%s->Ref ();" % (lvalue))
# def ns3_object_post_instance_creation_function(cpp_class, code_block, lvalue,
# parameters, construct_type_name):
# code_block.write_code("ns3::CompleteConstruct(%s);" % (lvalue, ))
# Object.set_instance_creation_function(ns3_object_instance_creation_function)
# Object.set_post_instance_creation_function(ns3_object_post_instance_creation_function)
# def Attribute_customizations(module):
# # Fix up for the "const AttributeValue &v = EmptyAttribute()"
# # case, as used extensively by helper classes.
# # Here's why we need to do this: pybindgen.gccxmlscanner, when
# # scanning parameter default values, is only provided with the
# # value as a simple C expression string. (py)gccxml does not
# # report the type of the default value.
# # As a workaround, here we iterate over all parameters of all
# # methods of all classes and tell pybindgen what is the type of
# # the default value for attributes.
# for cls in module.classes:
# for meth in cls.get_all_methods():
# for param in meth.parameters:
# if isinstance(param, cppclass.CppClassRefParameter):
# if param.cpp_class.name == 'AttributeValue' \
# and param.default_value is not None \
# and param.default_value_type is None:
# param.default_value_type = 'ns3::EmptyAttributeValue'
def TypeId_customizations(module):
TypeId = module['ns3::TypeId']
TypeId.add_custom_method_wrapper("LookupByNameFailSafe", "_wrap_TypeId_LookupByNameFailSafe",
flags=["METH_VARARGS", "METH_KEYWORDS", "METH_STATIC"])
def add_std_ofstream(module):
module.add_include('<fstream>')
ostream = module.add_class('ostream', foreign_cpp_namespace='::std')
ostream.set_cannot_be_constructed("abstract base class")
ofstream = module.add_class('ofstream', foreign_cpp_namespace='::std', parent=ostream)
ofstream.add_enum('openmode', [
('app', 'std::ios_base::app'),
('ate', 'std::ios_base::ate'),
('binary', 'std::ios_base::binary'),
('in', 'std::ios_base::in'),
('out', 'std::ios_base::out'),
('trunc', 'std::ios_base::trunc'),
])
ofstream.add_constructor([Parameter.new("const char *", 'filename'),
Parameter.new("::std::ofstream::openmode", 'mode', default_value="std::ios_base::out")])
ofstream.add_method('close', None, [])
import pybindgen.typehandlers.base
for alias in "std::_Ios_Openmode", "std::ios::openmode":
pybindgen.typehandlers.base.param_type_matcher.add_type_alias(alias, "int")
for flag in 'in', 'out', 'ate', 'app', 'trunc', 'binary':
module.after_init.write_code('PyModule_AddIntConstant(m, (char *) "STD_IOS_%s", std::ios::%s);'
% (flag.upper(), flag))
def add_ipv4_address_tp_hash(module):
module.body.writeln('''
long
_ns3_Ipv4Address_tp_hash (PyObject *obj)
{
PyNs3Ipv4Address *addr = reinterpret_cast<PyNs3Ipv4Address *> (obj);
return static_cast<long> (ns3::Ipv4AddressHash () (*addr->obj));
}
''')
module.header.writeln('long _ns3_Ipv4Address_tp_hash (PyObject *obj);')
module['Ipv4Address'].pytype.slots['tp_hash'] = "_ns3_Ipv4Address_tp_hash"
def post_register_types(root_module):
Simulator_customizations(root_module)
CommandLine_customizations(root_module)
TypeId_customizations(root_module)
add_std_ofstream(root_module)
enabled_features = os.environ['NS3_ENABLED_FEATURES'].split(',')
if 'Threading' not in enabled_features:
for clsname in ['SystemThread', 'SystemMutex', 'SystemCondition', 'CriticalSection',
'SimpleRefCount< ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> >']:
root_module.classes.remove(root_module['ns3::%s' % clsname])
if 'RealTime' not in enabled_features:
for clsname in ['WallClockSynchronizer', 'RealtimeSimulatorImpl']:
root_module.classes.remove(root_module['ns3::%s' % clsname])
root_module.enums.remove(root_module['ns3::RealtimeSimulatorImpl::SynchronizationMode'])
# these are already in the main script, so commented out here
# Object_customizations(root_module)
# Attribute_customizations(root_module)
#def post_register_functions(root_module):
# pass
| gpl-2.0 | -9,196,821,017,809,083,000 | 43.294243 | 122 | 0.576634 | false |
sqlfocus/linux | tools/perf/python/twatch.py | 625 | 2726 | #! /usr/bin/python
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo <[email protected]>
#
# This application is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import perf
def main(context_switch = 0, thread = -1):
cpus = perf.cpu_map()
threads = perf.thread_map(thread)
evsel = perf.evsel(type = perf.TYPE_SOFTWARE,
config = perf.COUNT_SW_DUMMY,
task = 1, comm = 1, mmap = 0, freq = 0,
wakeup_events = 1, watermark = 1,
sample_id_all = 1, context_switch = context_switch,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU)
"""What we want are just the PERF_RECORD_ lifetime events for threads,
using the default, PERF_TYPE_HARDWARE + PERF_COUNT_HW_CYCLES & freq=1
(the default), makes perf reenable irq_vectors:local_timer_entry, when
disabling nohz, not good for some use cases where all we want is to get
threads comes and goes... So use (perf.TYPE_SOFTWARE, perf_COUNT_SW_DUMMY,
freq=0) instead."""
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
event.sample_pid,
event.sample_tid),
print event
if __name__ == '__main__':
"""
To test the PERF_RECORD_SWITCH record, pick a pid and replace
in the following line.
Example output:
cpu: 3, pid: 31463, tid: 31593 { type: context_switch, next_prev_pid: 31463, next_prev_tid: 31593, switch_out: 1 }
cpu: 1, pid: 31463, tid: 31489 { type: context_switch, next_prev_pid: 31463, next_prev_tid: 31489, switch_out: 1 }
cpu: 2, pid: 31463, tid: 31496 { type: context_switch, next_prev_pid: 31463, next_prev_tid: 31496, switch_out: 1 }
cpu: 3, pid: 31463, tid: 31491 { type: context_switch, next_prev_pid: 31463, next_prev_tid: 31491, switch_out: 0 }
It is possible as well to use event.misc & perf.PERF_RECORD_MISC_SWITCH_OUT
to figure out if this is a context switch in or out of the monitored threads.
If bored, please add command line option parsing support for these options :-)
"""
# main(context_switch = 1, thread = 31463)
main()
| gpl-2.0 | 3,934,246,376,490,157,600 | 39.088235 | 114 | 0.687454 | false |
inares/edx-platform | openedx/core/djangoapps/user_api/preferences/api.py | 48 | 17601 | """
API for managing user preferences.
"""
import logging
import analytics
from eventtracking import tracker
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.db import IntegrityError
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_noop
from student.models import User, UserProfile
from request_cache import get_request_or_stub
from ..errors import (
UserAPIInternalError, UserAPIRequestError, UserNotFound, UserNotAuthorized,
PreferenceValidationError, PreferenceUpdateError
)
from ..helpers import intercept_errors
from ..models import UserOrgTag, UserPreference
from ..serializers import UserSerializer, RawUserPreferenceSerializer
log = logging.getLogger(__name__)
@intercept_errors(UserAPIInternalError, ignore_errors=[UserAPIRequestError])
def get_user_preference(requesting_user, preference_key, username=None):
"""Returns the value of the user preference with the specified key.
Args:
requesting_user (User): The user requesting the user preferences. Only the user with username
`username` or users with "is_staff" privileges can access the preferences.
preference_key (str): The key for the user preference.
username (str): Optional username for which to look up the preferences. If not specified,
`requesting_user.username` is assumed.
Returns:
The value for the user preference which is always a string, or None if a preference
has not been specified.
Raises:
UserNotFound: no user with username `username` exists (or `requesting_user.username` if
`username` is not specified)
UserNotAuthorized: the requesting_user does not have access to the user preference.
UserAPIInternalError: the operation failed due to an unexpected error.
"""
existing_user = _get_authorized_user(requesting_user, username, allow_staff=True)
return UserPreference.get_value(existing_user, preference_key)
@intercept_errors(UserAPIInternalError, ignore_errors=[UserAPIRequestError])
def get_user_preferences(requesting_user, username=None):
"""Returns all user preferences as a JSON response.
Args:
requesting_user (User): The user requesting the user preferences. Only the user with username
`username` or users with "is_staff" privileges can access the preferences.
username (str): Optional username for which to look up the preferences. If not specified,
`requesting_user.username` is assumed.
Returns:
A dict containing account fields.
Raises:
UserNotFound: no user with username `username` exists (or `requesting_user.username` if
`username` is not specified)
UserNotAuthorized: the requesting_user does not have access to the user preference.
UserAPIInternalError: the operation failed due to an unexpected error.
"""
existing_user = _get_authorized_user(requesting_user, username, allow_staff=True)
# Django Rest Framework V3 uses the current request to version
# hyperlinked URLS, so we need to retrieve the request and pass
# it in the serializer's context (otherwise we get an AssertionError).
# We're retrieving the request from the cache rather than passing it in
# as an argument because this is an implementation detail of how we're
# serializing data, which we want to encapsulate in the API call.
context = {
"request": get_request_or_stub()
}
user_serializer = UserSerializer(existing_user, context=context)
return user_serializer.data["preferences"]
@intercept_errors(UserAPIInternalError, ignore_errors=[UserAPIRequestError])
def update_user_preferences(requesting_user, update, user=None):
"""Update the user preferences for the given user.
Note:
It is up to the caller of this method to enforce the contract that this method is only called
with the user who made the request.
Arguments:
requesting_user (User): The user requesting to modify account information. Only the user with username
'username' has permissions to modify account information.
update (dict): The updated account field values.
Some notes:
Values are expected to be strings. Non-string values will be converted to strings.
Null values for a preference will be treated as a request to delete the key in question.
user (str/User): Optional, either username string or user object specifying which account should be updated.
If not specified, `requesting_user.username` is assumed.
Raises:
UserNotFound: no user with username `username` exists (or `requesting_user.username` if
`username` is not specified)
UserNotAuthorized: the requesting_user does not have access to change the account
associated with `username`
PreferenceValidationError: the update was not attempted because validation errors were found
PreferenceUpdateError: the operation failed when performing the update.
UserAPIInternalError: the operation failed due to an unexpected error.
"""
if not user or isinstance(user, basestring):
user = _get_authorized_user(requesting_user, user)
else:
_check_authorized(requesting_user, user.username)
# First validate each preference setting
errors = {}
serializers = {}
for preference_key in update.keys():
preference_value = update[preference_key]
if preference_value is not None:
try:
serializer = create_user_preference_serializer(user, preference_key, preference_value)
validate_user_preference_serializer(serializer, preference_key, preference_value)
serializers[preference_key] = serializer
except PreferenceValidationError as error:
preference_error = error.preference_errors[preference_key]
errors[preference_key] = {
"developer_message": preference_error["developer_message"],
"user_message": preference_error["user_message"],
}
if errors:
raise PreferenceValidationError(errors)
# Then perform the patch
for preference_key in update.keys():
preference_value = update[preference_key]
if preference_value is not None:
try:
serializer = serializers[preference_key]
serializer.save()
except Exception as error:
raise _create_preference_update_error(preference_key, preference_value, error)
else:
delete_user_preference(requesting_user, preference_key)
@intercept_errors(UserAPIInternalError, ignore_errors=[UserAPIRequestError])
def set_user_preference(requesting_user, preference_key, preference_value, username=None):
"""Update a user preference for the given username.
Note:
It is up to the caller of this method to enforce the contract that this method is only called
with the user who made the request.
Arguments:
requesting_user (User): The user requesting to modify account information. Only the user with username
'username' has permissions to modify account information.
preference_key (str): The key for the user preference.
preference_value (str): The value to be stored. Non-string values will be converted to strings.
username (str): Optional username specifying which account should be updated. If not specified,
`requesting_user.username` is assumed.
Raises:
UserNotFound: no user with username `username` exists (or `requesting_user.username` if
`username` is not specified)
UserNotAuthorized: the requesting_user does not have access to change the account
associated with `username`
PreferenceValidationError: the update was not attempted because validation errors were found
PreferenceUpdateError: the operation failed when performing the update.
UserAPIInternalError: the operation failed due to an unexpected error.
"""
existing_user = _get_authorized_user(requesting_user, username)
serializer = create_user_preference_serializer(existing_user, preference_key, preference_value)
validate_user_preference_serializer(serializer, preference_key, preference_value)
try:
serializer.save()
except Exception as error:
raise _create_preference_update_error(preference_key, preference_value, error)
@intercept_errors(UserAPIInternalError, ignore_errors=[UserAPIRequestError])
def delete_user_preference(requesting_user, preference_key, username=None):
"""Deletes a user preference on behalf of a requesting user.
Note:
It is up to the caller of this method to enforce the contract that this method is only called
with the user who made the request.
Arguments:
requesting_user (User): The user requesting to delete the preference. Only the user with username
'username' has permissions to delete their own preference.
preference_key (str): The key for the user preference.
username (str): Optional username specifying which account should be updated. If not specified,
`requesting_user.username` is assumed.
Returns:
True if the preference was deleted, False if the user did not have a preference with the supplied key.
Raises:
UserNotFound: no user with username `username` exists (or `requesting_user.username` if
`username` is not specified)
UserNotAuthorized: the requesting_user does not have access to change the account
associated with `username`
PreferenceUpdateError: the operation failed when performing the update.
UserAPIInternalError: the operation failed due to an unexpected error.
"""
existing_user = _get_authorized_user(requesting_user, username)
try:
user_preference = UserPreference.objects.get(user=existing_user, key=preference_key)
except ObjectDoesNotExist:
return False
try:
user_preference.delete()
except Exception as error:
raise PreferenceUpdateError(
developer_message=u"Delete failed for user preference '{preference_key}': {error}".format(
preference_key=preference_key, error=error
),
user_message=_(u"Delete failed for user preference '{preference_key}'.").format(
preference_key=preference_key
),
)
return True
@intercept_errors(UserAPIInternalError, ignore_errors=[UserAPIRequestError])
def update_email_opt_in(user, org, opt_in):
"""Updates a user's preference for receiving org-wide emails.
Sets a User Org Tag defining the choice to opt in or opt out of organization-wide
emails.
Arguments:
user (User): The user to set a preference for.
org (str): The org is used to determine the organization this setting is related to.
opt_in (bool): True if the user is choosing to receive emails for this organization.
If the user requires parental consent then email-optin is set to False regardless.
Returns:
None
Raises:
UserNotFound: no user profile exists for the specified user.
"""
preference, _ = UserOrgTag.objects.get_or_create(
user=user, org=org, key='email-optin'
)
# If the user requires parental consent, then don't allow opt-in
try:
user_profile = UserProfile.objects.get(user=user)
except ObjectDoesNotExist:
raise UserNotFound()
if user_profile.requires_parental_consent(
age_limit=getattr(settings, 'EMAIL_OPTIN_MINIMUM_AGE', 13),
default_requires_consent=False,
):
opt_in = False
# Update the preference and save it
preference.value = str(opt_in)
try:
preference.save()
if hasattr(settings, 'LMS_SEGMENT_KEY') and settings.LMS_SEGMENT_KEY:
_track_update_email_opt_in(user.id, org, opt_in)
except IntegrityError as err:
log.warn(u"Could not update organization wide preference due to IntegrityError: {}".format(err.message))
def _track_update_email_opt_in(user_id, organization, opt_in):
"""Track an email opt-in preference change.
Arguments:
user_id (str): The ID of the user making the preference change.
organization (str): The organization whose emails are being opted into or out of by the user.
opt_in (bool): Whether the user has chosen to opt-in to emails from the organization.
Returns:
None
"""
event_name = 'edx.bi.user.org_email.opted_in' if opt_in else 'edx.bi.user.org_email.opted_out'
tracking_context = tracker.get_tracker().resolve_context()
analytics.track(
user_id,
event_name,
{
'category': 'communication',
'label': organization
},
context={
'ip': tracking_context.get('ip'),
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
}
)
def _get_authorized_user(requesting_user, username=None, allow_staff=False):
"""
Helper method to return the authorized user for a given username.
If username is not provided, requesting_user.username is assumed.
"""
if username is None:
username = requesting_user.username
try:
existing_user = User.objects.get(username=username)
except ObjectDoesNotExist:
raise UserNotFound()
_check_authorized(requesting_user, username, allow_staff)
return existing_user
def _check_authorized(requesting_user, username, allow_staff=False):
"""
Helper method that raises UserNotAuthorized if requesting user
is not owner user or is not staff if access to staff is given
(i.e. 'allow_staff' = true)
"""
if requesting_user.username != username:
if not requesting_user.is_staff or not allow_staff:
raise UserNotAuthorized()
def create_user_preference_serializer(user, preference_key, preference_value):
"""Creates a serializer for the specified user preference.
Arguments:
user (User): The user whose preference is being serialized.
preference_key (str): The key for the user preference.
preference_value (str): The value to be stored. Non-string values will be converted to strings.
Returns:
A serializer that can be used to save the user preference.
"""
try:
existing_user_preference = UserPreference.objects.get(user=user, key=preference_key)
except ObjectDoesNotExist:
existing_user_preference = None
new_data = {
"user": user.id,
"key": preference_key,
"value": preference_value,
}
if existing_user_preference:
serializer = RawUserPreferenceSerializer(existing_user_preference, data=new_data)
else:
serializer = RawUserPreferenceSerializer(data=new_data)
return serializer
def validate_user_preference_serializer(serializer, preference_key, preference_value):
"""Validates a user preference serializer.
Arguments:
serializer (UserPreferenceSerializer): The serializer to be validated.
preference_key (str): The key for the user preference.
preference_value (str): The value to be stored. Non-string values will be converted to strings.
Raises:
PreferenceValidationError: the supplied key and/or value for a user preference are invalid.
"""
if preference_value is None or unicode(preference_value).strip() == '':
format_string = ugettext_noop(u"Preference '{preference_key}' cannot be set to an empty value.")
raise PreferenceValidationError({
preference_key: {
"developer_message": format_string.format(preference_key=preference_key),
"user_message": _(format_string).format(preference_key=preference_key)
}
})
if not serializer.is_valid():
developer_message = u"Value '{preference_value}' not valid for preference '{preference_key}': {error}".format(
preference_key=preference_key, preference_value=preference_value, error=serializer.errors
)
if "key" in serializer.errors:
user_message = _(u"Invalid user preference key '{preference_key}'.").format(
preference_key=preference_key
)
else:
user_message = _(u"Value '{preference_value}' is not valid for user preference '{preference_key}'.").format(
preference_key=preference_key, preference_value=preference_value
)
raise PreferenceValidationError({
preference_key: {
"developer_message": developer_message,
"user_message": user_message,
}
})
def _create_preference_update_error(preference_key, preference_value, error):
""" Creates a PreferenceUpdateError with developer_message and user_message. """
return PreferenceUpdateError(
developer_message=u"Save failed for user preference '{key}' with value '{value}': {error}".format(
key=preference_key, value=preference_value, error=error
),
user_message=_(u"Save failed for user preference '{key}' with value '{value}'.").format(
key=preference_key, value=preference_value
),
)
| agpl-3.0 | -3,942,451,042,873,405,000 | 42.352217 | 120 | 0.684904 | false |
franchenstein/master_project | main.py | 1 | 18773 | #!/usr/bin
import probabilisticgraph as pg
import graphgenerator as gg
import dmarkov as dm
import sequenceanalyzer as sa
import yaml
import matplotlib.pyplot as plt
import synchwordfinder as swf
def main(config_file, fsw=False, terminate=False, dmark=False, generate=False, gen_seq=False, an_seq=False, plot=False,
seq_len=10000000, tag='default'):
with open(config_file, 'r') as f:
configs = yaml.load(f)
graph_path = configs['graph_path']
terminations = configs['terminations']
lmax = configs['lmax']
algorithms = configs['algorithms']
lrange = configs['lrange']
alpharange = configs['alpharange']
drange = configs['drange']
test = configs['test']
synch_words = configs['synch_words']
l2range = configs['l2range']
if fsw:
p = 'configs/' + graph_path + '/fsw_params.yaml'
with open(p, 'r') as f:
fsw_params = yaml.load(f)
find_synch_words(graph_path, fsw_params['w'], lmax, fsw_params['alpha'], fsw_params['test'], l2range)
if terminate:
terminate_graphs(graph_path, terminations, lrange, lmax, alpharange, test)
if dmark:
generate_dmarkov(graph_path, drange, lmax)
if generate:
seq_path = 'sequences/' + graph_path + '/original_length_' + str(seq_len) + '.yaml'
generate_graphs(algorithms, terminations, lmax, lrange, l2range, alpharange, graph_path, synch_words, test,
seq_path)
if gen_seq:
generate_sequences(graph_path, algorithms, drange, terminations, lrange, l2range, alpharange, seq_len)
if an_seq:
p = 'configs/' + graph_path + '/params.yaml'
with open(p, 'r') as f:
params = yaml.load(f)
analyze_sequences(graph_path, algorithms, drange, terminations, lrange, l2range, alpharange, seq_len,
params['to_analyze'], params['other_params'])
if plot:
p = 'configs/' + graph_path + '/plotconfigs.yaml'
with open(p, 'r') as f:
params = yaml.load(f)
if params['cond_entropy']:
plot_entropies(graph_path, algorithms, terminations, drange, lrange, alpharange, params['eval_l'], tag)
if params['autocorrelation']:
plot_autocorr(graph_path, algorithms, terminations, drange, lrange, alpharange, params['upto'], tag)
if params['kld']:
plot_others('kld', graph_path, algorithms, terminations, drange, lrange, alpharange, tag)
if params['l1metric']:
plot_others('l1metric', graph_path, algorithms, terminations, drange, lrange, alpharange, tag)
def find_synch_words(graph_path, w, l, alpha, test, l2range=[1]):
s = swf.SynchWordFinder(graph_path, w, l, alpha, test, l2range)
sw = s.find_synch_words()
path = "synch_words/" + graph_path + "/sw.yaml"
with open(path, "w") as f:
yaml.dump(sw, f)
def terminate_graphs(graph_path, terminations, lrange, lmax, alpharange, test):
g = pg.ProbabilisticGraph([], [])
if 'omega_inverted' in terminations:
synch_path = 'synch_words/' + graph_path + '/sw.yaml'
with open(synch_path, 'r') as f:
synch_words = yaml.load(f)
else:
synch_words = []
for t in terminations:
for l in lrange:
for alpha in alpharange:
p = 'graphs/' + graph_path + '/rtp_L' + str(lmax) + '.yaml'
g.open_graph_file(p)
h = g.expand_last_level(l, t, alpha, test, synch_words)
path = 'graphs/' + graph_path + '/rtp_L' + str(l) + '_alpha' + str(alpha) + '_' + t + '.yaml'
h.save_graph_file(path)
def generate_graphs(algorithms, terminations, maxl, lrange, l2range, alpharange, save_path, synch_words, test,
seq_path):
for t in terminations:
for l in lrange:
for alpha in alpharange:
p1 = 'graphs/' + save_path + '/rtp_L' + str(l) + '_alpha' + str(alpha) + '_' + t + '.yaml'
p2 = 'graphs/' + save_path + '/L' + str(l) + '_alpha' + str(alpha) + '_' + t
g = gg.GraphGenerator(p1, synch_words, p2, seq_path)
for algo in algorithms:
if algo == 'mk1':
g.mk1(test, alpha, l2range[-1])
elif algo == 'mk2':
g.mk2()
elif algo == 'mk2_moore':
g.mk2_moore(test, alpha, l2range[-1])
elif algo == 'mk3':
g.mk3(test, alpha)
if 'crissis' in algorithms:
p1 = 'graphs/' + save_path + '/rtp_L' + str(maxl) + '.yaml'
for l2 in l2range:
for alpha in alpharange:
p2 = 'graphs/' + save_path + '/L_2_' + str(l2) + '_alpha' + str(alpha)
g = gg.GraphGenerator(p1, synch_words, p2, seq_path)
g.crissis(test, alpha, l2)
def generate_dmarkov(graph_path, drange, lmax):
g = pg.ProbabilisticGraph([], [])
for d in drange:
p = 'graphs/' + graph_path + '/rtp_L' + str(lmax) + '.yaml'
g.open_graph_file(p)
h = dm.DMarkov(g, d)
path = 'graphs/' + graph_path + '/dmarkov_d' + str(d) + '.yaml'
h.save_graph_file(path)
def generate_sequences(graph_path, algorithms, drange, terminations, lrange, l2range, alpharange, seq_len):
g = pg.ProbabilisticGraph([], [])
for algo in algorithms:
if algo == 'dmark':
for d in drange:
p = 'dmarkov_d' + str(d) + '.yaml'
path = 'graphs/' + graph_path + '/' + p
generate_sequences_core(g, graph_path, path, p, seq_len)
elif algo == 'crissis':
for l2 in l2range:
for alpha in alpharange:
p = 'L_2_' + str(l2) + '_alpha' + str(alpha) + '_crissis.yaml'
path = 'graphs/' + graph_path + '/' + p
generate_sequences_core(g, graph_path, path, p, seq_len)
else:
for t in terminations:
for l in lrange:
for alpha in alpharange:
p = 'L' + str(l) + '_alpha' + str(alpha) + '_' + t + '_' + algo + '.yaml'
path = 'graphs/' + graph_path + '/' + p
generate_sequences_core(g, graph_path, path, p, seq_len)
def generate_sequences_core(g, graph_path, path, p, seq_len):
g.open_graph_file(path)
seq, v = g.generate_sequence(seq_len, g.states[0])
p = 'sequences/' + graph_path + '/len_' + str(seq_len) + '_' + p
with open(p, 'w') as f:
yaml.dump(seq, f)
def analyze_sequences(graph_path, algorithms, drange, terminations,
lrange, l2range, alpharange, seq_len, to_analyze, params):
for algo in algorithms:
if algo == 'dmark':
kld = []
l1 = []
for d in drange:
p = 'dmarkov_d' + str(d) + '.yaml'
path = 'sequences/' + graph_path + '/len_' +str(seq_len) + '_' + p
seq_an = sa.SequenceAnalyzer(path)
kld_step, l1_step = analyze_sequences_core_1(graph_path, p, to_analyze, params, seq_an)
kld.append(kld_step)
l1.append(l1_step)
if to_analyze['kld']:
k_path = 'results/' + graph_path + '/kld/dmarkov.yaml'
with open(k_path, 'w') as f:
yaml.dump(kld, f)
if to_analyze['l1metric']:
l_path = 'results/' + graph_path + '/l1metric/dmarkov.yaml'
with open(l_path, 'w') as f:
yaml.dump(l1, f)
elif algo == 'crissis':
kld = []
l1 = []
for l2 in l2range:
for alpha in alpharange:
p = 'L_2_' + str(l2) + '_alpha' + str(alpha) + '_crissis.yaml'
path = 'sequences/' + graph_path + '/len_' +str(seq_len) + '_' + p
seq_an = sa.SequenceAnalyzer(path)
kld_step, l1_step = analyze_sequences_core_1(graph_path, p, to_analyze, params, seq_an)
kld.append(kld_step)
l1.append(l1_step)
if to_analyze['kld']:
k_path = 'results/' + graph_path + '/kld/crissis.yaml'
with open(k_path, 'w') as f:
yaml.dump(kld, f)
if to_analyze['l1metric']:
l_path = 'results/' + graph_path + '/l1metric/crissis.yaml'
with open(l_path, 'w') as f:
yaml.dump(l1, f)
else:
for t in terminations:
kld = []
l1 = []
for l in lrange:
for alpha in alpharange:
p = 'L' + str(l) + '_alpha' + str(alpha) + '_' + t + '_' + algo + '.yaml'
path = 'sequences/' + graph_path + '/len_' +str(seq_len) + '_' + p
seq_an = sa.SequenceAnalyzer(path)
kld_step, l1_step = analyze_sequences_core_1(graph_path, p, to_analyze, params, seq_an)
kld.append(kld_step)
l1.append(l1_step)
if to_analyze['kld']:
k_path = 'results/' + graph_path + '/kld/' + t + '_' + algo + '.yaml'
with open(k_path, 'w') as f:
yaml.dump(kld, f)
if to_analyze['l1metric']:
l_path = 'results/' + graph_path + '/l1metric/' + t + '_' + algo + '.yaml'
with open(l_path, 'w') as f:
yaml.dump(l1, f)
def analyze_sequences_core_1(graph_path, path, to_analyze, params, seq_an):
kld = 0
l1 = 0
if to_analyze['probabilities']:
p, alph = seq_an.calc_probs(params['L'])
p_path = 'results/'+ graph_path + '/probabilities/' + path
with open(p_path, 'w') as f:
yaml.dump([p, alph], f)
if to_analyze['cond_probabilities']:
check_probs(seq_an, graph_path, path)
p_cond = seq_an.calc_cond_probs(params['L']-1)
p_cond_path = 'results/'+ graph_path + '/probabilities/cond_' + path
with open(p_cond_path, 'w') as f:
yaml.dump(p_cond, f)
if to_analyze['cond_entropy']:
check_probs(seq_an, graph_path, path)
check_cond_probs(seq_an, graph_path, path)
h = seq_an.calc_cond_entropy(params['L']-1)
h_path = 'results/'+ graph_path + '/cond_entropies/' + path
with open(h_path, 'w') as f:
yaml.dump(h, f)
if to_analyze['autocorrelation']:
a = seq_an.calc_autocorrelation(params['upto'])
a_path = 'results/' + graph_path + '/autocorrelations/' + path
with open(a_path, 'w') as f:
yaml.dump(a, f)
if to_analyze['kld']:
check_probs(seq_an, graph_path, path)
p = load_reference_probs(graph_path)
kld = seq_an.calc_kldivergence(p, params['K'])
if to_analyze['l1metric']:
check_probs(seq_an, graph_path, path)
p = load_reference_probs(graph_path)
l1 = seq_an.calc_l1metric(p, params['l1'])
return [kld, l1]
def check_probs(seq_an, graph_path, path):
if not seq_an.probabilities:
p_path = 'results/'+ graph_path + '/probabilities/' + path
with open(p_path, 'r') as f:
p, alph = yaml.load(f)
seq_an.probabilities = p
seq_an.alphabet = alph
def check_cond_probs(seq_an, graph_path, path):
if not seq_an.conditional_probabilities:
p_path = 'results/'+ graph_path + '/probabilities/cond_' + path
with open(p_path, 'r') as f:
pcond = yaml.load(f)
seq_an.conditional_probabilities = pcond
def load_reference_probs(graph_path):
path = 'results/' + graph_path + '/probabilities/original.yaml'
with open(path, 'r') as f:
p = yaml.load(f)
return p[0]
def plot_entropies(graph_path, algorithms, terminations, drange, lrange, alpharange, eval_l, tag):
path_original = 'results/' + graph_path + '/cond_entropies/original.yaml'
with open(path_original, 'r') as f:
h_original = yaml.load(f)
h_base = h_original[eval_l]
h = []
states = []
labels = []
g = pg.ProbabilisticGraph([], [])
for algo in algorithms:
if algo == 'dmark':
h_dmark = []
states_dmark = []
for d in drange:
h_path = 'results/' + graph_path + '/cond_entropies/dmarkov_d' + str(d) + '.yaml'
with open(h_path, 'r') as f:
h_eval = yaml.load(f)
h_dmark.append(h_eval[eval_l])
g_path = 'graphs/' + graph_path + '/dmarkov_d' + str(d) + '.yaml'
g.open_graph_file(g_path)
states_dmark.append(len(g.states))
h.append(h_dmark)
states.append(states_dmark)
lbl = 'D-Markov, D from ' + str(drange[0]) + ' to ' + str(drange[-1])
labels.append(lbl)
else:
for t in terminations:
h_term = []
states_term = []
for l in lrange:
for alpha in alpharange:
p = 'L' + str(l) + '_alpha' + str(alpha) + '_' + t + '_' + algo + '.yaml'
h_path = 'results/' + graph_path + '/cond_entropies/' + p
with open(h_path, 'r') as f:
h_eval = yaml.load(f)
h_term.append(h_eval[eval_l])
g_path = 'graphs/' + graph_path + '/' + p
g.open_graph_file(g_path)
states_term.append(len(g.states))
lbl = algo + ', ' + t
labels.append(lbl)
h.append(h_term)
states.append(states_term)
i = 0
for entropy in h:
plt.semilogx(states[i], entropy, marker='o', label = labels[i])
i += 1
plt.axhline(y=h_base, color='k', linewidth = 3, label='Original sequence baseline')
plt.legend(loc='upper right', shadow=False, fontsize='medium')
plt.xlabel('Number of states')
plt.ylabel('Conditional Entropy')
#fig_mngr = plt.get_current_fig_manager()
#fig_mngr.window.showMaximized()
save_path = 'plots/' + graph_path + '/cond_entropies_' + tag + '.png'
plt.savefig(save_path, bbox_inches='tight')
plt.show()
def plot_others(kind, graph_path, algorithms, terminations, drange, lrange, alpharange, tag):
h = []
states = []
labels = []
g = pg.ProbabilisticGraph([], [])
for algo in algorithms:
if algo == 'dmark':
states_dmark = []
h_path = 'results/' + graph_path + '/' + kind + '/dmarkov.yaml'
with open(h_path, 'r') as f:
h.append(yaml.load(f))
for d in drange:
g_path = 'graphs/' + graph_path + '/dmarkov_d' + str(d) + '.yaml'
g.open_graph_file(g_path)
states_dmark.append(len(g.states))
states.append(states_dmark)
lbl = 'D-Markov, D from ' + str(drange[0]) + ' to ' + str(drange[-1])
labels.append(lbl)
else:
for t in terminations:
states_term = []
h_path = 'results/' + graph_path + '/' + kind + '/' + t + '_' + algo + '.yaml'
with open(h_path, 'r') as f:
h.append(yaml.load(f))
for l in lrange:
for alpha in alpharange:
p = 'L' + str(l) + '_alpha' + str(alpha) + '_' + t + '_' + algo + '.yaml'
g_path = 'graphs/' + graph_path + '/' + p
g.open_graph_file(g_path)
states_term.append(len(g.states))
lbl = algo + ', ' + t
labels.append(lbl)
states.append(states_term)
i = 0
for value in h:
print len(value)
print len(states[i])
plt.semilogx(states[i], value, marker='o', label=labels[i])
i += 1
plt.legend(loc='upper right', shadow=False, fontsize='medium')
plt.xlabel('Number of states')
if kind == 'l1metric':
plt.ylabel('L1-Metric')
elif kind == 'kld':
plt.ylabel('Kullback-Leibler Divergence')
save_path = 'plots/' + graph_path + '/' + kind + '_' + tag + '.png'
#fig_mngr = plt.get_current_fig_manager()
#fig_mngr.window.showMaximized()
plt.savefig(save_path, bbox_inches='tight')
plt.show()
def plot_autocorr(graph_path, algorithms, terminations, drange, lrange, alpharange, up_to, tag):
path_original = 'results/' + graph_path + '/autocorrelations/original.yaml'
with open(path_original, 'r') as f:
h_base = yaml.load(f)
h = []
labels = []
g = pg.ProbabilisticGraph([], [])
for algo in algorithms:
if algo == 'dmark':
for d in drange:
h_path = 'results/' + graph_path + '/autocorrelations/dmarkov_d' + str(d) + '.yaml'
with open(h_path, 'r') as f:
h_eval = yaml.load(f)
h.append(h_eval)
g_path = 'graphs/' + graph_path + '/dmarkov_d' + str(d) + '.yaml'
g.open_graph_file(g_path)
lbl = 'D-Markov, D = ' + str(d) + ', ' + str(len(g.states)) + ' states'
labels.append(lbl)
else:
for t in terminations:
for l in lrange:
for alpha in alpharange:
p = 'L' + str(l) + '_alpha' + str(alpha) + '_' + t + '_' + algo + '.yaml'
h_path = 'results/' + graph_path + '/autocorrelations/' + p
with open(h_path, 'r') as f:
h_eval = yaml.load(f)
h.append(h_eval)
g_path = 'graphs/' + graph_path + '/' + p
g.open_graph_file(g_path)
lbl = algo + ', ' + t + ', L = ' +str(l) + '. ' + str(len(g.states)) + ' states'
labels.append(lbl)
i = 0
x = range(1, up_to)
for autocorr in h:
plt.plot(x, autocorr[1:up_to], marker='o', label=labels[i])
i += 1
plt.plot(x, h_base[1:up_to], color='k', linewidth=3, label='Original sequence')
plt.legend(loc='upper right', shadow=False, fontsize='medium')
plt.xlabel('Lag')
plt.ylabel('Autocorrelation')
#fig_mngr = plt.get_current_fig_manager()
#fig_mngr.window.showMaximized()
save_path = 'plots/' + graph_path + '/autocorrelations_' + tag + '.png'
plt.savefig(save_path, bbox_inches='tight')
plt.show()
| mit | -6,483,332,397,972,804,000 | 42.456019 | 119 | 0.506898 | false |
zaxliu/deepnap | experiments/kdd-exps/experiment_message_2016-6-11_BUF2_G5_FR100_legacy.py | 1 | 4371 | # System built-in modules
import time
from datetime import datetime
import sys
import os
from multiprocessing import Pool
# Project dependency modules
import pandas as pd
pd.set_option('mode.chained_assignment', None) # block warnings due to DataFrame value assignment
import lasagne
# Project modules
sys.path.append('../')
from sleep_control.traffic_emulator import TrafficEmulator
from sleep_control.traffic_server import TrafficServer
from sleep_control.controller import QController, DummyController, NController
from sleep_control.integration import Emulation
from sleep_control.env_models import SJTUModel
from rl.qtable import QAgent
from rl.qnn_theano import QAgentNN
from rl.mixin import PhiMixin, DynaMixin
sys_stdout = sys.stdout
log_file_name = 'message_2016-6-11_BUF2_G5_FR100.log'
# Composite classes
class Phi_QAgentNN(PhiMixin, QAgentNN):
def __init__(self, **kwargs):
super(Phi_QAgentNN, self).__init__(**kwargs)
# Parameters
# |- Agent
# |- QAgent
actions = [(True, None), (False, 'serve_all')]
gamma, alpha = 0.5, 0.9
explore_strategy, epsilon = 'epsilon', 0.02 # exploration
# |- QAgentNN
# | - Phi
phi_length = 5
dim_state = (1, phi_length, 3+2)
range_state_slice = [(0, 10), (0, 10), (0, 10), (0, 1), (0, 1)]
range_state = [[range_state_slice]*phi_length]
# | - Other params
momentum, learning_rate = 0.9, 0.01 # SGD
num_buffer, memory_size = 2, 200
reward_scaling, reward_scaling_update = 1, 'adaptive'
batch_size, update_period, freeze_period, rs_period = 100, 4, 16, 32
# |- Env model
Rs, Rw, Rf, Co, Cw = 1.0, -1.0, -10.0, -5.0, 0.0
beta = 0.5
reward_params = (Rs, Rw, Rf, Co, Cw, beta)
# |- Env
# |- Time
start_time = pd.to_datetime('2014-11-05 09:20:00')
total_time = pd.Timedelta(days=7)
time_step = pd.Timedelta(seconds=2)
backoff_epochs = num_buffer*memory_size+phi_length
head_datetime = start_time - time_step*backoff_epochs
tail_datetime = head_datetime + total_time
TOTAL_EPOCHS = int(total_time/time_step)
# |- Reward
rewarding = {'serve': Rs, 'wait': Rw, 'fail': Rf}
# load from processed data
session_df =pd.read_csv(
filepath_or_buffer='../data/trace_dh3.dat',
parse_dates=['startTime_datetime', 'endTime_datetime']
)
te = TrafficEmulator(
session_df=session_df, time_step=time_step,
head_datetime=head_datetime, tail_datetime=tail_datetime,
rewarding=rewarding,
verbose=2)
ts = TrafficServer(cost=(Co, Cw), verbose=2)
agent = Phi_QAgentNN(
phi_length=phi_length,
dim_state=dim_state, range_state=range_state,
f_build_net = None,
batch_size=batch_size, learning_rate=learning_rate, momentum=momentum,
reward_scaling=reward_scaling, reward_scaling_update=reward_scaling_update, rs_period=rs_period,
update_period=update_period, freeze_period=freeze_period,
memory_size=memory_size, num_buffer=num_buffer,
# Below is QAgent params
actions=actions, alpha=alpha, gamma=gamma,
explore_strategy=explore_strategy, epsilon=epsilon,
verbose=2)
c = QController(agent=agent)
emu = Emulation(te=te, ts=ts, c=c, beta=beta)
# Heavyliftings
t = time.time()
sys.stdout = sys_stdout
log_path = './log/'
if os.path.isfile(log_path+log_file_name):
print "Log file {} already exist. Experiment cancelled.".format(log_file_name)
else:
log_file = open(log_path+log_file_name,"w")
print datetime.now().strftime('[%Y-%m-%d %H:%M:%S]'),
print '{}%'.format(int(100.0*emu.epoch/TOTAL_EPOCHS)),
print log_file_name
time.sleep(1)
sys.stdout = log_file
while emu.epoch is not None and emu.epoch<TOTAL_EPOCHS:
# log time
print "Epoch {},".format(emu.epoch),
left = emu.te.head_datetime + emu.te.epoch*emu.te.time_step
right = left + emu.te.time_step
print "{} - {}".format(left.strftime("%Y-%m-%d %H:%M:%S"), right.strftime("%Y-%m-%d %H:%M:%S"))
emu.step()
print
if emu.epoch%(0.05*TOTAL_EPOCHS)==0:
sys.stdout = sys_stdout
print datetime.now().strftime('[%Y-%m-%d %H:%M:%S]'),
print '{}%'.format(int(100.0*emu.epoch/TOTAL_EPOCHS)),
print log_file_name
time.sleep(1)
sys.stdout = log_file
sys.stdout = sys_stdout
log_file.close()
print
print log_file_name,
print '{:.3f} sec,'.format(time.time()-t),
print '{:.3f} min'.format((time.time()-t)/60)
| bsd-3-clause | 1,144,453,517,155,813,800 | 32.366412 | 103 | 0.668039 | false |
jemsbhai/otp | lib/asn1/test/asn1_SUITE_data/SwCDR.py | 97 | 5470 | SwCDR DEFINITIONS
IMPLICIT TAGS ::=
BEGIN
EXPORTS
SwCDR;
SwCDR ::= CHOICE
{
origSvcCallRecord [0] OrigSvcCallRecord,
termSvcCallRecord [1] TermSvcCallRecord
}
--OrigSvcCallRecord ::= SET
OrigSvcCallRecord ::= SEQUENCE
{
callCorrelationId [0] INTEGER ,
chargingIndicator [1] ChargingIndicator,
sequenceNo [2] INTEGER ,
callingParty [3] CallingNumber,
calledParty [4] CalledNumber,
connectedNumber [5] ConnectedNumber,
startDate [6] StartDate,
startTime [7] StartTime,
duration [8] CallDuration ,
-- bearerClass [] BearerClass ,
trafficType [9] TrafficType ,
qosFwd [10] QoSClass ,
qosBkwd [11] QoSClass ,
forwardPcrClp0 [12] CellRate ,
forwardPcrClp01 [13] CellRate ,
backwardPcrClp0 [14] CellRate ,
backwardPcrClp01 [15] CellRate ,
forwardScrClp0 [16] CellRate ,
forwardScrClp01 [17] CellRate ,
backwardScrClp0 [18] CellRate ,
backwardScrClp01 [19] CellRate ,
forwardMcrClp0 [20] CellRate ,
forwardMcrClp01 [21] CellRate ,
backwardMcrClp0 [22] CellRate ,
backwardMcrClp01 [23] CellRate ,
forwardMbsClp0 [24] CellRate ,
forwardMbsClp01 [25] CellRate ,
forwardBEI [26] INTEGER ,
backwardBEI [27] INTEGER ,
forwardTagging [28] INTEGER ,
backwardTagging [29] INTEGER ,
-- egressCellrate0 [] INTEGER,
-- egressCellrate01 [] INTEGER,
ingressCellrate0 [30] INTEGER ,
-- ingressCellrate01 [] INTEGER ,
ingressCellrate1 [31] INTEGER ,
connectionConfig [32] UserPlaneConnection OPTIONAL
-- causeForTerm [33] CauseForTerm OPTIONAL
}
--TermSvcCallRecord ::= SET
TermSvcCallRecord ::= SEQUENCE
{
callCorrelationId [0] INTEGER ,
chargingIndicator [1] ChargingIndicator,
sequenceNo [2] INTEGER ,
callingParty [3] CallingNumber,
calledParty [4] CalledNumber,
connectedNumber [5] ConnectedNumber,
startDate [6] StartDate,
startTime [7] StartTime,
duration [8] CallDuration ,
-- bearerClass [] BearerClass ,
trafficType [9] TrafficType ,
qosFwd [10] QoSClass ,
qosBkwd [11] QoSClass ,
forwardPcrClp0 [12] CellRate ,
forwardPcrClp01 [13] CellRate ,
backwardPcrClp0 [14] CellRate ,
backwardPcrClp01 [15] CellRate ,
forwardScrClp0 [16] CellRate ,
forwardScrClp01 [17] CellRate ,
backwardScrClp0 [18] CellRate ,
backwardScrClp01 [19] CellRate ,
forwardMcrClp0 [20] CellRate ,
forwardMcrClp01 [21] CellRate ,
backwardMcrClp0 [22] CellRate ,
backwardMcrClp01 [23] CellRate ,
forwardMbsClp0 [24] CellRate ,
forwardMbsClp01 [25] CellRate ,
forwardBEI [26] INTEGER ,
backwardBEI [27] INTEGER ,
forwardTagging [28] INTEGER ,
backwardTagging [29] INTEGER ,
-- egressCellrate0 [] INTEGER ,
-- egressCellrate01 [] INTEGER ,
ingressCellrate0 [30] INTEGER ,
-- ingressCellrate01 [] INTEGER ,
ingressCellrate1 [31] INTEGER ,
connectionConfig [32] UserPlaneConnection OPTIONAL
-- causeForTerm [33] CauseForTerm OPTIONAL
}
ChargingIndicator ::= INTEGER
{
origCallRecord (0),
termCallRecord (1)
}
CallingNumber ::= OCTET STRING (SIZE (12))
-- BCD encoded representation of the number.
-- Contains: TypeOfNumber, NumberingPlanInformation
-- and either an E.164 number or a NSAP style of number,
-- including a possible subaddress.
CalledNumber ::= OCTET STRING (SIZE (20))
-- BCD encoded representation of the number.
-- Contains: TypeOfNumber, NumberingPlanInformation,
-- PresentationIndicator, ScreeningIndicator
-- and either an E.164 number or a NSAP style of number,
-- including a possible subaddress.
ConnectedNumber ::= OCTET STRING (SIZE (12))
-- BCD encoded representation of the number.
-- Contains: TypeOfNumber, NumberingPlanInformation,
-- PresentationIndicator, ScreeningIndicator
-- and either an E.164 number or a NSAP style of number,
-- including a possible subaddress.
QoSClass ::= INTEGER
-- Explicit values ToBeDefined,
-- until then: value received in SETUP-msg
--BearerClass ::= INTEGER
--{
-- bcobA (0),
-- bcobC (1),
-- bcobX (2)
--}
TrafficType ::= INTEGER
{
noIndication (0),
abr (1),
cbr (2),
vbr (3),
vbrrt (4),
vbrnrt (5),
ubr (6)
}
--TimingRequirements ::= INTEGER
--{
-- noIndication (0),
-- endToEndRequired (1),
-- endToEndNotRequired (2)
--}
--ClippingSusceptibility ::= INTEGER
--{
-- notSusceptible (0),
-- susceptible (1)
--}
UserPlaneConnection ::= INTEGER
{
pointToPoint (0),
pointToMultipoint (1)
}
--AALParameters ::= INTEGER AAL Type only
--{
-- userDefined (0),
-- aal1 (1),
-- aal2 (2),
-- aal34 (3),
-- aal5 (5)
--}
CellRate ::= INTEGER
-- Value range not less than 2^24.
-- BurstSize ::= ToBeDefined
-- TaggingRequest ::= ToBeDefined
--Timestamp ::= OCTET STRING (SIZE (11))
-- The contents of this field is a compact form of
-- the UTCTime format, containing local time plus
-- an offset to universal time.
-- The compact format is YYMMDDhhmmssdddShhmm, where:
-- YY = year, 00-99, BCD encoded
-- MM = month, 01-12, BCD encoded
-- DD = day, 01-31, BCD encoded
-- hh = hour, 00-23, BCD encoded
-- mm = minute, 00-59, BCD encoded
-- ss = second, 00-59, BCD encoded
-- ddd = millisecond, 000-999, BCD encoded
-- and rightjustified as "0ddd"
-- S = sign, "+"/"-", ASCII encoded
StartDate ::= OCTET STRING (SIZE (8))
StartTime ::= OCTET STRING (SIZE (6))
CallDuration ::= INTEGER
-- Expressed as number of millseconds
Cellrate ::= INTEGER
-- Value range 0-2^64
CauseForTerm ::= INTEGER
{
unsuccessfulCallAttempt (0),
abnormalTermination (1)
}
END
| apache-2.0 | 5,048,592,684,623,571,000 | 24.680751 | 57 | 0.695064 | false |
xiejianying/pjsip_trunk | pjsip-apps/src/pygui/call.py | 26 | 3368 | # $Id$
#
# pjsua Python GUI Demo
#
# Copyright (C)2013 Teluu Inc. (http://www.teluu.com)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
import sys
if sys.version_info[0] >= 3: # Python 3
import tkinter as tk
from tkinter import ttk
from tkinter import messagebox as msgbox
else:
import Tkinter as tk
import tkMessageBox as msgbox
import ttk
import random
import pjsua2 as pj
import application
import endpoint as ep
# Call class
class Call(pj.Call):
"""
High level Python Call object, derived from pjsua2's Call object.
"""
def __init__(self, acc, peer_uri='', chat=None, call_id = pj.PJSUA_INVALID_ID):
pj.Call.__init__(self, acc, call_id)
self.acc = acc
self.peerUri = peer_uri
self.chat = chat
self.connected = False
self.onhold = False
def onCallState(self, prm):
ci = self.getInfo()
self.connected = ci.state == pj.PJSIP_INV_STATE_CONFIRMED
if self.chat:
self.chat.updateCallState(self, ci)
def onCallMediaState(self, prm):
ci = self.getInfo()
for mi in ci.media:
if mi.type == pj.PJMEDIA_TYPE_AUDIO and \
(mi.status == pj.PJSUA_CALL_MEDIA_ACTIVE or \
mi.status == pj.PJSUA_CALL_MEDIA_REMOTE_HOLD):
m = self.getMedia(mi.index)
am = pj.AudioMedia.typecastFromMedia(m)
# connect ports
ep.Endpoint.instance.audDevManager().getCaptureDevMedia().startTransmit(am)
am.startTransmit(ep.Endpoint.instance.audDevManager().getPlaybackDevMedia())
if mi.status == pj.PJSUA_CALL_MEDIA_REMOTE_HOLD and not self.onhold:
self.chat.addMessage(None, "'%s' sets call onhold" % (self.peerUri))
self.onhold = True
elif mi.status == pj.PJSUA_CALL_MEDIA_ACTIVE and self.onhold:
self.chat.addMessage(None, "'%s' sets call active" % (self.peerUri))
self.onhold = False
if self.chat:
self.chat.updateCallMediaState(self, ci)
def onInstantMessage(self, prm):
# chat instance should have been initalized
if not self.chat: return
self.chat.addMessage(self.peerUri, prm.msgBody)
self.chat.showWindow()
def onInstantMessageStatus(self, prm):
if prm.code/100 == 2: return
# chat instance should have been initalized
if not self.chat: return
self.chat.addMessage(None, "Failed sending message to '%s' (%d): %s" % (self.peerUri, prm.code, prm.reason))
def onTypingIndication(self, prm):
# chat instance should have been initalized
if not self.chat: return
self.chat.setTypingIndication(self.peerUri, prm.isTyping)
def onDtmfDigit(self, prm):
#msgbox.showinfo("pygui", 'Got DTMF:' + prm.digit)
pass
def onCallMediaTransportState(self, prm):
#msgbox.showinfo("pygui", "Media transport state")
pass
if __name__ == '__main__':
application.main()
| gpl-2.0 | 1,756,865,188,619,859,500 | 30.773585 | 110 | 0.706948 | false |
huntxu/fuel-web | nailgun/nailgun/test/unit/test_db_migrations.py | 4 | 1070 | # Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import alembic
from nailgun.db import dropdb
from nailgun.db.migration import ALEMBIC_CONFIG
from nailgun.test import base
class TestDbMigrations(base.BaseTestCase):
def test_clean_downgrade(self):
# We don't have data migration for clusters with vip_type 'ovs'
# so checking migration only for clean DB
dropdb()
alembic.command.upgrade(ALEMBIC_CONFIG, 'head')
alembic.command.downgrade(ALEMBIC_CONFIG, 'base')
| apache-2.0 | -3,443,971,066,139,028,000 | 34.666667 | 78 | 0.720561 | false |
JRock007/boxxy | dist/Boxxy.app/Contents/Resources/lib/python2.7/numpy/lib/stride_tricks.py | 35 | 4228 | """
Utilities that manipulate strides to achieve desirable effects.
An explanation of strides can be found in the "ndarray.rst" file in the
NumPy reference guide.
"""
from __future__ import division, absolute_import, print_function
import numpy as np
__all__ = ['broadcast_arrays']
class DummyArray(object):
"""Dummy object that just exists to hang __array_interface__ dictionaries
and possibly keep alive a reference to a base array.
"""
def __init__(self, interface, base=None):
self.__array_interface__ = interface
self.base = base
def as_strided(x, shape=None, strides=None):
""" Make an ndarray from the given array with the given shape and strides.
"""
interface = dict(x.__array_interface__)
if shape is not None:
interface['shape'] = tuple(shape)
if strides is not None:
interface['strides'] = tuple(strides)
array = np.asarray(DummyArray(interface, base=x))
# Make sure dtype is correct in case of custom dtype
if array.dtype.kind == 'V':
array.dtype = x.dtype
return array
def broadcast_arrays(*args):
"""
Broadcast any number of arrays against each other.
Parameters
----------
`*args` : array_likes
The arrays to broadcast.
Returns
-------
broadcasted : list of arrays
These arrays are views on the original arrays. They are typically
not contiguous. Furthermore, more than one element of a
broadcasted array may refer to a single memory location. If you
need to write to the arrays, make copies first.
Examples
--------
>>> x = np.array([[1,2,3]])
>>> y = np.array([[1],[2],[3]])
>>> np.broadcast_arrays(x, y)
[array([[1, 2, 3],
[1, 2, 3],
[1, 2, 3]]), array([[1, 1, 1],
[2, 2, 2],
[3, 3, 3]])]
Here is a useful idiom for getting contiguous copies instead of
non-contiguous views.
>>> [np.array(a) for a in np.broadcast_arrays(x, y)]
[array([[1, 2, 3],
[1, 2, 3],
[1, 2, 3]]), array([[1, 1, 1],
[2, 2, 2],
[3, 3, 3]])]
"""
args = [np.asarray(_m) for _m in args]
shapes = [x.shape for x in args]
if len(set(shapes)) == 1:
# Common case where nothing needs to be broadcasted.
return args
shapes = [list(s) for s in shapes]
strides = [list(x.strides) for x in args]
nds = [len(s) for s in shapes]
biggest = max(nds)
# Go through each array and prepend dimensions of length 1 to each of
# the shapes in order to make the number of dimensions equal.
for i in range(len(args)):
diff = biggest - nds[i]
if diff > 0:
shapes[i] = [1] * diff + shapes[i]
strides[i] = [0] * diff + strides[i]
# Chech each dimension for compatibility. A dimension length of 1 is
# accepted as compatible with any other length.
common_shape = []
for axis in range(biggest):
lengths = [s[axis] for s in shapes]
unique = set(lengths + [1])
if len(unique) > 2:
# There must be at least two non-1 lengths for this axis.
raise ValueError("shape mismatch: two or more arrays have "
"incompatible dimensions on axis %r." % (axis,))
elif len(unique) == 2:
# There is exactly one non-1 length. The common shape will take
# this value.
unique.remove(1)
new_length = unique.pop()
common_shape.append(new_length)
# For each array, if this axis is being broadcasted from a
# length of 1, then set its stride to 0 so that it repeats its
# data.
for i in range(len(args)):
if shapes[i][axis] == 1:
shapes[i][axis] = new_length
strides[i][axis] = 0
else:
# Every array has a length of 1 on this axis. Strides can be
# left alone as nothing is broadcasted.
common_shape.append(1)
# Construct the new arrays.
broadcasted = [as_strided(x, shape=sh, strides=st) for (x, sh, st) in
zip(args, shapes, strides)]
return broadcasted
| mit | -8,233,565,416,835,632,000 | 33.373984 | 78 | 0.576159 | false |
DirtyUnicorns/android_external_chromium_org | tools/telemetry/telemetry/core/possible_browser.py | 25 | 2112 | # Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class PossibleBrowser(object):
"""A browser that can be controlled.
Call Create() to launch the browser and begin manipulating it..
"""
def __init__(self, browser_type, target_os, finder_options,
supports_tab_control):
self._browser_type = browser_type
self._target_os = target_os
self._finder_options = finder_options
self._supports_tab_control = supports_tab_control
self._platform = None
self._platform_backend = None
self._archive_path = None
self._append_to_existing_wpr = False
self._make_javascript_deterministic = True
self._credentials_path = None
def __repr__(self):
return 'PossibleBrowser(browser_type=%s)' % self.browser_type
@property
def browser_type(self):
return self._browser_type
@property
def target_os(self):
"""Target OS, the browser will run on."""
return self._target_os
@property
def finder_options(self):
return self._finder_options
@property
def supports_tab_control(self):
return self._supports_tab_control
@property
def platform(self):
self._InitPlatformIfNeeded()
return self._platform
def _InitPlatformIfNeeded(self):
raise NotImplementedError()
def Create(self):
raise NotImplementedError()
def SupportsOptions(self, finder_options):
"""Tests for extension support."""
raise NotImplementedError()
def IsRemote(self):
return False
def RunRemote(self):
pass
def UpdateExecutableIfNeeded(self):
pass
def last_modification_time(self):
return -1
def SetReplayArchivePath(self, archive_path, append_to_existing_wpr,
make_javascript_deterministic):
self._archive_path = archive_path
self._append_to_existing_wpr = append_to_existing_wpr
self._make_javascript_deterministic = make_javascript_deterministic
def SetCredentialsPath(self, credentials_path):
self._credentials_path = credentials_path
| bsd-3-clause | 7,329,895,406,938,758,000 | 25.4 | 72 | 0.696023 | false |
aospx-kitkat/platform_external_chromium_org | chrome/test/pyautolib/bookmark_model.py | 80 | 3206 | # Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""BookmarkModel: python representation of the bookmark model.
Obtain one of these from PyUITestSuite::GetBookmarkModel() call.
"""
import os
import simplejson as json
import sys
class BookmarkModel(object):
def __init__(self, json_string):
"""Initialize a BookmarkModel from a string of json.
The JSON representation is the same as used by the bookmark model
to save to disk.
Args:
json_string: a string of JSON.
"""
self.bookdict = json.loads(json_string)
def BookmarkBar(self):
"""Return the bookmark bar node as a dict."""
return self.bookdict['roots']['bookmark_bar']
def Other(self):
"""Return the 'other' node (e.g. parent of "Other Bookmarks")"""
return self.bookdict['roots']['other']
def NodeCount(self, node=None):
"""Return a count of bookmark nodes, including folders.
The root node itself is included in the count.
Args:
node: the root to start with. If not specified, count all."""
if node == None:
return reduce(lambda x, y: x + y,
[self.NodeCount(x)
for x in self.bookdict['roots'].values()])
total = 1
children = node.get('children', None)
if children:
total = total + reduce(lambda x,y: x + y,
[self.NodeCount(x) for x in children])
return total
def FindByID(self, id, nodes=None):
"""Find the bookmark by id. Return the dict or None.
Args:
id: the id to look for.
nodes: an iterable of nodes to start with. If not specified, search all.
'Not specified' means None, not [].
"""
# Careful; we may get an empty list which is different than not
# having specified a list.
if nodes == None:
nodes = self.bookdict['roots'].values()
# Check each item. If it matches, return. If not, check each of
# their kids.
for node in nodes:
if node['id'] == id:
return node
for child in node.get('children', []):
found_node = self.FindByID(id, [child])
if found_node:
return found_node
# Not found at all.
return None
def FindByTitle(self, title, nodes=None):
"""Return a tuple of all nodes which have |title| in their title.
Args:
title: the title to look for.
node: an iterable of nodes to start with. If not specified, search all.
'Not specified' means None, not [].
"""
# Careful; we may get an empty list which is different than not
# having specified a list.
if nodes == None:
nodes = self.bookdict['roots'].values()
# Check each item. If it matches, return. If not, check each of
# their kids.
results = []
for node in nodes:
node_title = node.get('title', None) or node.get('name', None)
if title == node_title:
results.append(node)
# Note we check everything; unlike the FindByID, we do not stop early.
for child in node.get('children', []):
results += self.FindByTitle(title, [child])
return results
| bsd-3-clause | 8,028,592,411,174,994,000 | 31.383838 | 79 | 0.626949 | false |
icebreaker/dotfiles | gnome/gnome2/gedit/plugins.symlink/snapopen/__init__.py | 1 | 10248 | # VERSION 1.1.5
# Updated by Alexandre da Silva for GMate Project (http://blog.siverti.com.br/gmate)
import gedit, gtk, gtk.glade
import gconf
import gnomevfs
import pygtk
pygtk.require('2.0')
import os, os.path, gobject
# set this to true for gedit versions before 2.16
pre216_version = False
max_result = 50
ui_str="""<ui>
<menubar name="MenuBar">
<menu name="SearchMenu" action="Search">
<placeholder name="SearchOps_7">
<menuitem name="GoToFile" action="GoToFileAction"/>
</placeholder>
</menu>
</menubar>
</ui>
"""
# essential interface
class SnapOpenPluginInstance:
def __init__(self, plugin, window):
self._window = window
self._plugin = plugin
if pre216_version:
self._encoding = gedit.gedit_encoding_get_current()
else:
self._encoding = gedit.encoding_get_current()
self._rootdir = "file://" + os.getcwd()
self._show_hidden = False
self._liststore = None;
self._init_glade()
self._insert_menu()
def deactivate(self):
self._remove_menu()
self._action_group = None
self._window = None
self._plugin = None
self._liststore = None;
def update_ui(self):
return
# MENU STUFF
def _insert_menu(self):
manager = self._window.get_ui_manager()
actions = [
('GoToFileAction', gtk.STOCK_JUMP_TO, _('Go to File...'), '<Ctrl><Alt>O', _("Go to a file with regex search"), self.on_snapopen_action)
]
self._action_group = gtk.ActionGroup("SnapOpenPluginActions")
self._action_group.add_actions(actions, self._window)
manager.insert_action_group(self._action_group, -1)
manager.add_ui_from_string(ui_str)
self._ui_id = manager.new_merge_id()
def _remove_menu(self):
manager = self._window.get_ui_manager()
manager.remove_ui(self._ui_id)
manager.remove_action_group(self._action_group)
# UI DIALOGUES
def _init_glade(self):
self._snapopen_glade = gtk.glade.XML(os.path.dirname(__file__) + "/snapopen.glade")
#setup window
self._snapopen_window = self._snapopen_glade.get_widget("SnapOpenWindow")
self._snapopen_window.connect("key-release-event", self.on_window_key)
self._snapopen_window.set_transient_for(self._window)
#setup buttons
self._snapopen_glade.get_widget("ok_button").connect("clicked", self.open_selected_item)
self._snapopen_glade.get_widget("cancel_button").connect("clicked", lambda a: self._snapopen_window.hide())
#setup entry field
self._glade_entry_name = self._snapopen_glade.get_widget("entry_name")
self._glade_entry_name.connect("key-release-event", self.on_pattern_entry)
#setup list field
self._hit_list = self._snapopen_glade.get_widget("hit_list")
self._hit_list.connect("select-cursor-row", self.on_select_from_list)
self._hit_list.connect("button_press_event", self.on_list_mouse)
self._liststore = gtk.ListStore(str, str, str)
self._hit_list.set_model(self._liststore)
column = gtk.TreeViewColumn("Name" , gtk.CellRendererText(), markup=0)
column.set_sizing(gtk.TREE_VIEW_COLUMN_AUTOSIZE)
column2 = gtk.TreeViewColumn("File", gtk.CellRendererText(), markup=1)
column2.set_sizing(gtk.TREE_VIEW_COLUMN_AUTOSIZE)
self._hit_list.append_column(column)
self._hit_list.append_column(column2)
self._hit_list.get_selection().set_mode(gtk.SELECTION_MULTIPLE)
#mouse event on list
def on_list_mouse(self, widget, event):
if event.type == gtk.gdk._2BUTTON_PRESS:
self.open_selected_item(event)
#key selects from list (passthrough 3 args)
def on_select_from_list(self, widget, event):
self.open_selected_item(event)
#keyboard event on entry field
def on_pattern_entry(self, widget, event):
oldtitle = self._snapopen_window.get_title().replace(" * too many hits", "")
if event.keyval == gtk.keysyms.Return:
self.open_selected_item(event)
return
pattern = self._glade_entry_name.get_text()
pattern = pattern.replace(" ","*")
#modify lines below as needed, these defaults work pretty well
rawpath = self._rootdir.replace("file://", "")
filefilter = " | grep -s -v \"/\.\""
cmd = ""
if self._show_hidden:
filefilter = ""
if len(pattern) > 0:
cmd = "cd " + rawpath + "; find . -maxdepth 10 -depth -type f -iwholename \"*" + pattern + "*\" " + filefilter + " | grep -v \"~$\" | head -n " + repr(max_result + 1) + " | sort"
self._snapopen_window.set_title("Searching ... ")
else:
self._snapopen_window.set_title("Enter pattern ... ")
#print cmd
self._liststore.clear()
maxcount = 0
hits = os.popen(cmd).readlines()
for file in hits:
file = file.rstrip().replace("./", "") #remove cwd prefix
name = os.path.basename(file)
self._liststore.append([self.highlight_pattern(name, pattern), self.highlight_pattern(file, pattern), file])
if maxcount > max_result:
break
maxcount = maxcount + 1
if maxcount > max_result:
oldtitle = oldtitle + " * too many hits"
self._snapopen_window.set_title(oldtitle)
selected = []
self._hit_list.get_selection().selected_foreach(self.foreach, selected)
if len(selected) == 0:
iter = self._liststore.get_iter_first()
if iter != None:
self._hit_list.get_selection().select_iter(iter)
def highlight_pattern(self, path, pattern):
query_list = pattern.lower().split("*")
last_postion = 0
for word in query_list:
location = path.lower().find(word, last_postion)
if location > -1:
last_postion = (location + len(word) + 3)
a_path = list(path)
a_path.insert(location, "<b>")
a_path.insert(location + len(word) + 1, "</b>")
path = "".join(a_path)
return path
#on menuitem activation (incl. shortcut)
def on_snapopen_action(self, *args):
fbroot = self.get_filebrowser_root()
if fbroot != "" and fbroot is not None:
self._rootdir = fbroot
self._snapopen_window.set_title("Snap open (Filebrowser integration)")
else:
eddtroot = self.get_eddt_root()
if eddtroot != "" and eddtroot is not None:
self._rootdir = eddtroot
self._snapopen_window.set_title("Snap open (EDDT integration)")
else:
self._snapopen_window.set_title("Snap open (cwd): " + self._rootdir)
self._snapopen_window.show()
self._glade_entry_name.select_region(0,-1)
self._glade_entry_name.grab_focus()
#on any keyboard event in main window
def on_window_key(self, widget, event):
if event.keyval == gtk.keysyms.Escape:
self._snapopen_window.hide()
def foreach(self, model, path, iter, selected):
selected.append(model.get_value(iter, 2))
#open file in selection and hide window
def open_selected_item(self, event):
selected = []
self._hit_list.get_selection().selected_foreach(self.foreach, selected)
for selected_file in selected:
self._open_file (selected_file)
self._snapopen_window.hide()
#gedit < 2.16 version (get_tab_from_uri)
def old_get_tab_from_uri(self, window, uri):
docs = window.get_documents()
for doc in docs:
if doc.get_uri() == uri:
return gedit.tab_get_from_document(doc)
return None
#opens (or switches to) the given file
def _open_file(self, filename):
uri = self._rootdir + "/" + filename
if pre216_version:
tab = self.old_get_tab_from_uri(self._window, uri)
else:
tab = self._window.get_tab_from_uri(uri)
if tab == None:
tab = self._window.create_tab_from_uri(uri, self._encoding, 0, False, False)
self._window.set_active_tab(tab)
# EDDT integration
def get_eddt_root(self):
base = u'/apps/gedit-2/plugins/eddt'
client = gconf.client_get_default()
client.add_dir(base, gconf.CLIENT_PRELOAD_NONE)
path = os.path.join(base, u'repository')
val = client.get(path)
if val is not None:
return val.get_string()
# FILEBROWSER integration
def get_filebrowser_root(self):
base = u'/apps/gedit-2/plugins/filebrowser/on_load'
client = gconf.client_get_default()
client.add_dir(base, gconf.CLIENT_PRELOAD_NONE)
path = os.path.join(base, u'virtual_root')
val = client.get(path)
if val is not None:
#also read hidden files setting
base = u'/apps/gedit-2/plugins/filebrowser'
client = gconf.client_get_default()
client.add_dir(base, gconf.CLIENT_PRELOAD_NONE)
path = os.path.join(base, u'filter_mode')
try:
fbfilter = client.get(path).get_string()
except AttributeError:
fbfilter = "hidden"
if fbfilter.find("hidden") == -1:
self._show_hidden = True
else:
self._show_hidden = False
return val.get_string()
# STANDARD PLUMMING
class SnapOpenPlugin(gedit.Plugin):
DATA_TAG = "SnapOpenPluginInstance"
def __init__(self):
gedit.Plugin.__init__(self)
def _get_instance(self, window):
return window.get_data(self.DATA_TAG)
def _set_instance(self, window, instance):
window.set_data(self.DATA_TAG, instance)
def activate(self, window):
self._set_instance(window, SnapOpenPluginInstance(self, window))
def deactivate(self, window):
self._get_instance(window).deactivate()
self._set_instance(window, None)
def update_ui(self, window):
self._get_instance(window).update_ui()
| mit | 2,291,364,714,291,323,600 | 36.676471 | 190 | 0.594165 | false |
keerts/home-assistant | homeassistant/components/switch/dlink.py | 11 | 4665 | """
Support for D-link W215 smart switch.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/switch.dlink/
"""
import logging
import voluptuous as vol
from homeassistant.components.switch import (SwitchDevice, PLATFORM_SCHEMA)
from homeassistant.const import (
CONF_HOST, CONF_NAME, CONF_PASSWORD, CONF_USERNAME)
import homeassistant.helpers.config_validation as cv
from homeassistant.const import TEMP_CELSIUS, STATE_UNKNOWN
REQUIREMENTS = ['https://github.com/LinuxChristian/pyW215/archive/'
'v0.4.zip#pyW215==0.4']
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'D-link Smart Plug W215'
DEFAULT_PASSWORD = ''
DEFAULT_USERNAME = 'admin'
CONF_USE_LEGACY_PROTOCOL = 'use_legacy_protocol'
ATTR_CURRENT_CONSUMPTION = 'Current Consumption'
ATTR_TOTAL_CONSUMPTION = 'Total Consumption'
ATTR_TEMPERATURE = 'Temperature'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_USERNAME, default=DEFAULT_USERNAME): cv.string,
vol.Required(CONF_PASSWORD, default=DEFAULT_PASSWORD): cv.string,
vol.Optional(CONF_USE_LEGACY_PROTOCOL, default=False): cv.boolean,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
})
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup a D-Link Smart Plug."""
from pyW215.pyW215 import SmartPlug
host = config.get(CONF_HOST)
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
use_legacy_protocol = config.get(CONF_USE_LEGACY_PROTOCOL)
name = config.get(CONF_NAME)
data = SmartPlugData(SmartPlug(host,
password,
username,
use_legacy_protocol))
add_devices([SmartPlugSwitch(hass, data, name)], True)
class SmartPlugSwitch(SwitchDevice):
"""Representation of a D-link Smart Plug switch."""
def __init__(self, hass, data, name):
"""Initialize the switch."""
self.units = hass.config.units
self.data = data
self._name = name
@property
def name(self):
"""Return the name of the Smart Plug, if any."""
return self._name
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
try:
ui_temp = self.units.temperature(int(self.data.temperature),
TEMP_CELSIUS)
temperature = "%i %s" % \
(ui_temp, self.units.temperature_unit)
except (ValueError, TypeError):
temperature = STATE_UNKNOWN
try:
current_consumption = "%.2f W" % \
float(self.data.current_consumption)
except ValueError:
current_consumption = STATE_UNKNOWN
try:
total_consumption = "%.1f kWh" % \
float(self.data.total_consumption)
except ValueError:
total_consumption = STATE_UNKNOWN
attrs = {
ATTR_CURRENT_CONSUMPTION: current_consumption,
ATTR_TOTAL_CONSUMPTION: total_consumption,
ATTR_TEMPERATURE: temperature
}
return attrs
@property
def current_power_watt(self):
"""Return the current power usage in Watt."""
try:
return float(self.data.current_consumption)
except ValueError:
return None
@property
def is_on(self):
"""Return true if switch is on."""
return self.data.state == 'ON'
def turn_on(self, **kwargs):
"""Turn the switch on."""
self.data.smartplug.state = 'ON'
def turn_off(self):
"""Turn the switch off."""
self.data.smartplug.state = 'OFF'
def update(self):
"""Get the latest data from the smart plug and updates the states."""
self.data.update()
class SmartPlugData(object):
"""Get the latest data from smart plug."""
def __init__(self, smartplug):
"""Initialize the data object."""
self.smartplug = smartplug
self.state = None
self.temperature = None
self.current_consumption = None
self.total_consumption = None
def update(self):
"""Get the latest data from the smart plug."""
self.state = self.smartplug.state
self.temperature = self.smartplug.temperature
self.current_consumption = self.smartplug.current_consumption
self.total_consumption = self.smartplug.total_consumption
| apache-2.0 | 8,022,456,309,852,401,000 | 30.952055 | 77 | 0.621651 | false |
jbowes/yselect | test/mainmenutests.py | 1 | 2556 | # yselect - An RPM/Yum package handling frontend.
# Copyright (C) 2006 James Bowes <[email protected]>
# Copyright (C) 2006 Devan Goodwin <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
import unittest
import settestpath
import mainmenu
class MainMenuModelTests(unittest.TestCase):
def setUp(self):
self.model = mainmenu.MainMenuModel("program name")
def testSelectBorderValues(self):
try:
self.model.select(-1)
self.fail()
except:
# Expected, do nothing.
pass
try:
self.model.select(len(self.model.entries))
self.fail()
except:
pass
try:
self.model.select(0)
except:
self.fail()
try:
self.model.select(len(self.model.entries) - 1)
except:
self.fail()
def testSignals(self):
observer = TestObserver()
self.model.add_observer("quit", observer)
self.model.add_observer("select", observer)
self.model.emit_signal("quit")
self.assertTrue(observer.been_notified)
self.assertEquals("quit", observer.notified_signal)
observer.reset()
self.model.emit_signal("select")
self.assertTrue(observer.been_notified)
self.assertEquals("select", observer.notified_signal)
class TestObserver:
def __init__(self):
self.been_notified = False
self.notified_signal = None
def notify(self, observable, signal_name):
self.been_notified = True
self.notified_signal = signal_name
def reset(self):
self.been_notified = False
self.notified_signal = None
def suite():
return unittest.makeSuite(MainMenuModelTests)
if __name__ == "__main__":
unittest.main(defaultTest="suite")
| gpl-2.0 | -521,749,370,643,694,900 | 28.72093 | 72 | 0.638106 | false |
bealdav/OCB | addons/account/__openerp__.py | 41 | 7694 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name' : 'eInvoicing',
'version' : '1.1',
'author' : 'OpenERP SA',
'category' : 'Accounting & Finance',
'description' : """
Accounting and Financial Management.
====================================
Financial and accounting module that covers:
--------------------------------------------
* General Accounting
* Cost/Analytic accounting
* Third party accounting
* Taxes management
* Budgets
* Customer and Supplier Invoices
* Bank statements
* Reconciliation process by partner
Creates a dashboard for accountants that includes:
--------------------------------------------------
* List of Customer Invoices to Approve
* Company Analysis
* Graph of Treasury
Processes like maintaining general ledgers are done through the defined Financial Journals (entry move line or grouping is maintained through a journal)
for a particular financial year and for preparation of vouchers there is a module named account_voucher.
""",
'website': 'https://www.odoo.com/page/billing',
'images' : ['images/accounts.jpeg','images/bank_statement.jpeg','images/cash_register.jpeg','images/chart_of_accounts.jpeg','images/customer_invoice.jpeg','images/journal_entries.jpeg'],
'depends' : ['base_setup', 'product', 'analytic', 'board', 'edi', 'report'],
'data': [
'security/account_security.xml',
'security/ir.model.access.csv',
'account_menuitem.xml',
'report/account_invoice_report_view.xml',
'report/account_entries_report_view.xml',
'report/account_treasury_report_view.xml',
'report/account_report_view.xml',
'report/account_analytic_entries_report_view.xml',
'wizard/account_move_bank_reconcile_view.xml',
'wizard/account_use_model_view.xml',
'account_installer.xml',
'wizard/account_period_close_view.xml',
'wizard/account_reconcile_view.xml',
'wizard/account_unreconcile_view.xml',
'wizard/account_statement_from_invoice_view.xml',
'account_view.xml',
'account_report.xml',
'account_financial_report_data.xml',
'wizard/account_report_common_view.xml',
'wizard/account_invoice_refund_view.xml',
'wizard/account_fiscalyear_close_state.xml',
'wizard/account_chart_view.xml',
'wizard/account_tax_chart_view.xml',
'wizard/account_move_line_reconcile_select_view.xml',
'wizard/account_open_closed_fiscalyear_view.xml',
'wizard/account_move_line_unreconcile_select_view.xml',
'wizard/account_vat_view.xml',
'wizard/account_report_print_journal_view.xml',
'wizard/account_report_general_journal_view.xml',
'wizard/account_report_central_journal_view.xml',
'wizard/account_subscription_generate_view.xml',
'wizard/account_fiscalyear_close_view.xml',
'wizard/account_state_open_view.xml',
'wizard/account_journal_select_view.xml',
'wizard/account_change_currency_view.xml',
'wizard/account_validate_move_view.xml',
'wizard/account_report_general_ledger_view.xml',
'wizard/account_invoice_state_view.xml',
'wizard/account_report_partner_balance_view.xml',
'wizard/account_report_account_balance_view.xml',
'wizard/account_report_aged_partner_balance_view.xml',
'wizard/account_report_partner_ledger_view.xml',
'wizard/account_reconcile_partner_process_view.xml',
'wizard/account_automatic_reconcile_view.xml',
'wizard/account_financial_report_view.xml',
'wizard/pos_box.xml',
'project/wizard/project_account_analytic_line_view.xml',
'account_end_fy.xml',
'account_invoice_view.xml',
'data/account_data.xml',
'data/data_account_type.xml',
'data/configurable_account_chart.xml',
'account_invoice_workflow.xml',
'project/project_view.xml',
'project/project_report.xml',
'project/wizard/account_analytic_balance_report_view.xml',
'project/wizard/account_analytic_cost_ledger_view.xml',
'project/wizard/account_analytic_inverted_balance_report.xml',
'project/wizard/account_analytic_journal_report_view.xml',
'project/wizard/account_analytic_cost_ledger_for_journal_report_view.xml',
'project/wizard/account_analytic_chart_view.xml',
'partner_view.xml',
'product_view.xml',
'account_assert_test.xml',
'ir_sequence_view.xml',
'company_view.xml',
'edi/invoice_action_data.xml',
'account_bank_view.xml',
'res_config_view.xml',
'account_pre_install.yml',
'views/report_vat.xml',
'views/report_invoice.xml',
'views/report_trialbalance.xml',
'views/report_centraljournal.xml',
'views/report_overdue.xml',
'views/report_generaljournal.xml',
'views/report_journal.xml',
'views/report_salepurchasejournal.xml',
'views/report_partnerbalance.xml',
'views/report_agedpartnerbalance.xml',
'views/report_partnerledger.xml',
'views/report_partnerledgerother.xml',
'views/report_financial.xml',
'views/report_generalledger.xml',
'project/views/report_analyticbalance.xml',
'project/views/report_analyticjournal.xml',
'project/views/report_analyticcostledgerquantity.xml',
'project/views/report_analyticcostledger.xml',
'project/views/report_invertedanalyticbalance.xml',
'views/account.xml',
],
'qweb' : [
"static/src/xml/account_move_reconciliation.xml",
"static/src/xml/account_move_line_quickadd.xml",
"static/src/xml/account_bank_statement_reconciliation.xml",
],
'demo': [
'demo/account_demo.xml',
'project/project_demo.xml',
'project/analytic_account_demo.xml',
'demo/account_minimal.xml',
'demo/account_invoice_demo.xml',
'demo/account_bank_statement.xml',
'account_unit_test.xml',
],
'test': [
'test/account_test_users.yml',
'test/account_customer_invoice.yml',
'test/account_supplier_invoice.yml',
'test/account_change_currency.yml',
'test/chart_of_account.yml',
'test/account_period_close.yml',
'test/account_use_model.yml',
'test/account_validate_account_move.yml',
'test/test_edi_invoice.yml',
'test/account_report.yml',
'test/account_fiscalyear_close.yml', #last test, as it will definitively close the demo fiscalyear
],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -3,867,018,676,972,128,000 | 42.715909 | 190 | 0.63582 | false |
dllsf/odootest | openerp/addons/test_new_api/tests/test_new_fields.py | 51 | 15560 | #
# test cases for new-style fields
#
from datetime import date, datetime
from collections import defaultdict
from openerp.tests import common
from openerp.exceptions import except_orm
class TestNewFields(common.TransactionCase):
def test_00_basics(self):
""" test accessing new fields """
# find a discussion
discussion = self.env.ref('test_new_api.discussion_0')
# read field as a record attribute or as a record item
self.assertIsInstance(discussion.name, basestring)
self.assertIsInstance(discussion['name'], basestring)
self.assertEqual(discussion['name'], discussion.name)
# read it with method read()
values = discussion.read(['name'])[0]
self.assertEqual(values['name'], discussion.name)
def test_01_basic_get_assertion(self):
""" test item getter """
# field access works on single record
record = self.env.ref('test_new_api.message_0_0')
self.assertEqual(len(record), 1)
ok = record.body
# field access fails on multiple records
records = self.env['test_new_api.message'].search([])
assert len(records) > 1
with self.assertRaises(except_orm):
faulty = records.body
def test_01_basic_set_assertion(self):
""" test item setter """
# field assignment works on single record
record = self.env.ref('test_new_api.message_0_0')
self.assertEqual(len(record), 1)
record.body = 'OK'
# field assignment fails on multiple records
records = self.env['test_new_api.message'].search([])
assert len(records) > 1
with self.assertRaises(except_orm):
records.body = 'Faulty'
def test_10_computed(self):
""" check definition of computed fields """
# by default function fields are not stored and readonly
field = self.env['test_new_api.message']._fields['size']
self.assertFalse(field.store)
self.assertTrue(field.readonly)
field = self.env['test_new_api.message']._fields['name']
self.assertTrue(field.store)
self.assertTrue(field.readonly)
def test_10_non_stored(self):
""" test non-stored fields """
# find messages
for message in self.env['test_new_api.message'].search([]):
# check definition of field
self.assertEqual(message.size, len(message.body or ''))
# check recomputation after record is modified
size = message.size
message.write({'body': (message.body or '') + "!!!"})
self.assertEqual(message.size, size + 3)
# special case: computed field without dependency must be computed
record = self.env['test_new_api.mixed'].create({})
self.assertTrue(record.now)
def test_11_stored(self):
""" test stored fields """
# find the demo discussion
discussion = self.env.ref('test_new_api.discussion_0')
self.assertTrue(len(discussion.messages) > 0)
# check messages
name0 = discussion.name or ""
for message in discussion.messages:
self.assertEqual(message.name, "[%s] %s" % (name0, message.author.name))
# modify discussion name, and check again messages
discussion.name = name1 = 'Talking about stuff...'
for message in discussion.messages:
self.assertEqual(message.name, "[%s] %s" % (name1, message.author.name))
# switch message from discussion, and check again
name2 = 'Another discussion'
discussion2 = discussion.copy({'name': name2})
message2 = discussion.messages[0]
message2.discussion = discussion2
for message in discussion2.messages:
self.assertEqual(message.name, "[%s] %s" % (name2, message.author.name))
def test_12_recursive(self):
""" test recursively dependent fields """
Category = self.env['test_new_api.category']
abel = Category.create({'name': 'Abel'})
beth = Category.create({'name': 'Bethany'})
cath = Category.create({'name': 'Catherine'})
dean = Category.create({'name': 'Dean'})
ewan = Category.create({'name': 'Ewan'})
finn = Category.create({'name': 'Finnley'})
gabe = Category.create({'name': 'Gabriel'})
cath.parent = finn.parent = gabe
abel.parent = beth.parent = cath
dean.parent = ewan.parent = finn
self.assertEqual(abel.display_name, "Gabriel / Catherine / Abel")
self.assertEqual(beth.display_name, "Gabriel / Catherine / Bethany")
self.assertEqual(cath.display_name, "Gabriel / Catherine")
self.assertEqual(dean.display_name, "Gabriel / Finnley / Dean")
self.assertEqual(ewan.display_name, "Gabriel / Finnley / Ewan")
self.assertEqual(finn.display_name, "Gabriel / Finnley")
self.assertEqual(gabe.display_name, "Gabriel")
ewan.parent = cath
self.assertEqual(ewan.display_name, "Gabriel / Catherine / Ewan")
cath.parent = finn
self.assertEqual(ewan.display_name, "Gabriel / Finnley / Catherine / Ewan")
def test_12_cascade(self):
""" test computed field depending on computed field """
message = self.env.ref('test_new_api.message_0_0')
message.invalidate_cache()
double_size = message.double_size
self.assertEqual(double_size, message.size)
def test_13_inverse(self):
""" test inverse computation of fields """
Category = self.env['test_new_api.category']
abel = Category.create({'name': 'Abel'})
beth = Category.create({'name': 'Bethany'})
cath = Category.create({'name': 'Catherine'})
dean = Category.create({'name': 'Dean'})
ewan = Category.create({'name': 'Ewan'})
finn = Category.create({'name': 'Finnley'})
gabe = Category.create({'name': 'Gabriel'})
self.assertEqual(ewan.display_name, "Ewan")
ewan.display_name = "Abel / Bethany / Catherine / Erwan"
self.assertEqual(beth.parent, abel)
self.assertEqual(cath.parent, beth)
self.assertEqual(ewan.parent, cath)
self.assertEqual(ewan.name, "Erwan")
def test_14_search(self):
""" test search on computed fields """
discussion = self.env.ref('test_new_api.discussion_0')
# determine message sizes
sizes = set(message.size for message in discussion.messages)
# search for messages based on their size
for size in sizes:
messages0 = self.env['test_new_api.message'].search(
[('discussion', '=', discussion.id), ('size', '<=', size)])
messages1 = self.env['test_new_api.message'].browse()
for message in discussion.messages:
if message.size <= size:
messages1 += message
self.assertEqual(messages0, messages1)
def test_15_constraint(self):
""" test new-style Python constraints """
discussion = self.env.ref('test_new_api.discussion_0')
# remove oneself from discussion participants: we can no longer create
# messages in discussion
discussion.participants -= self.env.user
with self.assertRaises(Exception):
self.env['test_new_api.message'].create({'discussion': discussion.id, 'body': 'Whatever'})
# put back oneself into discussion participants: now we can create
# messages in discussion
discussion.participants += self.env.user
self.env['test_new_api.message'].create({'discussion': discussion.id, 'body': 'Whatever'})
def test_20_float(self):
""" test float fields """
record = self.env['test_new_api.mixed'].create({})
# assign value, and expect rounding
record.write({'number': 2.4999999999999996})
self.assertEqual(record.number, 2.50)
# same with field setter
record.number = 2.4999999999999996
self.assertEqual(record.number, 2.50)
def test_21_date(self):
""" test date fields """
record = self.env['test_new_api.mixed'].create({})
# one may assign False or None
record.date = None
self.assertFalse(record.date)
# one may assign date and datetime objects
record.date = date(2012, 05, 01)
self.assertEqual(record.date, '2012-05-01')
record.date = datetime(2012, 05, 01, 10, 45, 00)
self.assertEqual(record.date, '2012-05-01')
# one may assign dates in the default format, and it must be checked
record.date = '2012-05-01'
self.assertEqual(record.date, '2012-05-01')
with self.assertRaises(ValueError):
record.date = '12-5-1'
def test_22_selection(self):
""" test selection fields """
record = self.env['test_new_api.mixed'].create({})
# one may assign False or None
record.lang = None
self.assertFalse(record.lang)
# one may assign a value, and it must be checked
for language in self.env['res.lang'].search([]):
record.lang = language.code
with self.assertRaises(ValueError):
record.lang = 'zz_ZZ'
def test_23_relation(self):
""" test relation fields """
demo = self.env.ref('base.user_demo')
message = self.env.ref('test_new_api.message_0_0')
# check environment of record and related records
self.assertEqual(message.env, self.env)
self.assertEqual(message.discussion.env, self.env)
demo_env = self.env(user=demo)
self.assertNotEqual(demo_env, self.env)
# check environment of record and related records
self.assertEqual(message.env, self.env)
self.assertEqual(message.discussion.env, self.env)
# "migrate" message into demo_env, and check again
demo_message = message.sudo(demo)
self.assertEqual(demo_message.env, demo_env)
self.assertEqual(demo_message.discussion.env, demo_env)
# assign record's parent to a record in demo_env
message.discussion = message.discussion.copy({'name': 'Copy'})
# both message and its parent field must be in self.env
self.assertEqual(message.env, self.env)
self.assertEqual(message.discussion.env, self.env)
def test_24_reference(self):
""" test reference fields. """
record = self.env['test_new_api.mixed'].create({})
# one may assign False or None
record.reference = None
self.assertFalse(record.reference)
# one may assign a user or a partner...
record.reference = self.env.user
self.assertEqual(record.reference, self.env.user)
record.reference = self.env.user.partner_id
self.assertEqual(record.reference, self.env.user.partner_id)
# ... but no record from a model that starts with 'ir.'
with self.assertRaises(ValueError):
record.reference = self.env['ir.model'].search([], limit=1)
def test_25_related(self):
""" test related fields. """
message = self.env.ref('test_new_api.message_0_0')
discussion = message.discussion
# by default related fields are not stored
field = message._fields['discussion_name']
self.assertFalse(field.store)
self.assertTrue(field.readonly)
# check value of related field
self.assertEqual(message.discussion_name, discussion.name)
# change discussion name, and check result
discussion.name = 'Foo'
self.assertEqual(message.discussion_name, 'Foo')
# change discussion name via related field, and check result
message.discussion_name = 'Bar'
self.assertEqual(discussion.name, 'Bar')
self.assertEqual(message.discussion_name, 'Bar')
# search on related field, and check result
search_on_related = self.env['test_new_api.message'].search([('discussion_name', '=', 'Bar')])
search_on_regular = self.env['test_new_api.message'].search([('discussion.name', '=', 'Bar')])
self.assertEqual(search_on_related, search_on_regular)
# check that field attributes are copied
message_field = message.fields_get(['discussion_name'])['discussion_name']
discussion_field = discussion.fields_get(['name'])['name']
self.assertEqual(message_field['help'], discussion_field['help'])
def test_26_inherited(self):
""" test inherited fields. """
# a bunch of fields are inherited from res_partner
for user in self.env['res.users'].search([]):
partner = user.partner_id
for field in ('is_company', 'name', 'email', 'country_id'):
self.assertEqual(getattr(user, field), getattr(partner, field))
self.assertEqual(user[field], partner[field])
def test_30_read(self):
""" test computed fields as returned by read(). """
discussion = self.env.ref('test_new_api.discussion_0')
for message in discussion.messages:
display_name = message.display_name
size = message.size
data = message.read(['display_name', 'size'])[0]
self.assertEqual(data['display_name'], display_name)
self.assertEqual(data['size'], size)
def test_40_new(self):
""" test new records. """
discussion = self.env.ref('test_new_api.discussion_0')
# create a new message
message = self.env['test_new_api.message'].new()
self.assertFalse(message.id)
# assign some fields; should have no side effect
message.discussion = discussion
message.body = BODY = "May the Force be with you."
self.assertEqual(message.discussion, discussion)
self.assertEqual(message.body, BODY)
self.assertNotIn(message, discussion.messages)
# check computed values of fields
user = self.env.user
self.assertEqual(message.author, user)
self.assertEqual(message.name, "[%s] %s" % (discussion.name, user.name))
self.assertEqual(message.size, len(BODY))
def test_41_defaults(self):
""" test default values. """
fields = ['discussion', 'body', 'author', 'size']
defaults = self.env['test_new_api.message'].default_get(fields)
self.assertEqual(defaults, {'author': self.env.uid, 'size': 0})
defaults = self.env['test_new_api.mixed'].default_get(['number'])
self.assertEqual(defaults, {'number': 3.14})
class TestMagicFields(common.TransactionCase):
def test_write_date(self):
record = self.env['test_new_api.discussion'].create({'name': 'Booba'})
self.assertEqual(record.create_uid, self.env.user)
self.assertEqual(record.write_uid, self.env.user)
class TestInherits(common.TransactionCase):
def test_inherits(self):
""" Check that a many2one field with delegate=True adds an entry in _inherits """
Talk = self.env['test_new_api.talk']
self.assertEqual(Talk._inherits, {'test_new_api.discussion': 'parent'})
self.assertIn('name', Talk._fields)
self.assertEqual(Talk._fields['name'].related, ('parent', 'name'))
talk = Talk.create({'name': 'Foo'})
discussion = talk.parent
self.assertTrue(discussion)
self.assertEqual(talk._name, 'test_new_api.talk')
self.assertEqual(discussion._name, 'test_new_api.discussion')
self.assertEqual(talk.name, discussion.name)
| agpl-3.0 | -2,057,183,693,867,115,800 | 38.592875 | 102 | 0.622172 | false |
QinerTech/QinerApps | openerp/addons/decimal_precision/decimal_precision.py | 47 | 2671 | # -*- encoding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import openerp
from openerp import SUPERUSER_ID
from openerp import tools
from openerp.osv import orm, fields
from openerp.modules.registry import RegistryManager
class decimal_precision(orm.Model):
_name = 'decimal.precision'
_columns = {
'name': fields.char('Usage', select=True, required=True),
'digits': fields.integer('Digits', required=True),
}
_defaults = {
'digits': 2,
}
_sql_constraints = [
('name_uniq', 'unique (name)', """Only one value can be defined for each given usage!"""),
]
@tools.ormcache('application')
def precision_get(self, cr, uid, application):
cr.execute('select digits from decimal_precision where name=%s', (application,))
res = cr.fetchone()
return res[0] if res else 2
def clear_cache(self, cr):
""" Deprecated, use `clear_caches` instead. """
self.clear_caches()
def create(self, cr, uid, data, context=None):
res = super(decimal_precision, self).create(cr, uid, data, context=context)
self.clear_caches()
return res
def unlink(self, cr, uid, ids, context=None):
res = super(decimal_precision, self).unlink(cr, uid, ids, context=context)
self.clear_caches()
return res
def write(self, cr, uid, ids, data, *args, **argv):
res = super(decimal_precision, self).write(cr, uid, ids, data, *args, **argv)
self.clear_caches()
return res
def get_precision(application):
def change_digit(cr):
decimal_precision = openerp.registry(cr.dbname)['decimal.precision']
res = decimal_precision.precision_get(cr, SUPERUSER_ID, application)
return (16, res)
return change_digit
class DecimalPrecisionFloat(orm.AbstractModel):
""" Override qweb.field.float to add a `decimal_precision` domain option
and use that instead of the column's own value if it is specified
"""
_inherit = 'ir.qweb.field.float'
def precision(self, cr, uid, field, options=None, context=None):
dp = options and options.get('decimal_precision')
if dp:
return self.pool['decimal.precision'].precision_get(
cr, uid, dp)
return super(DecimalPrecisionFloat, self).precision(
cr, uid, field, options=options, context=context)
class DecimalPrecisionTestModel(orm.Model):
_name = 'decimal.precision.test'
_columns = {
'float': fields.float(),
'float_2': fields.float(digits=(16, 2)),
'float_4': fields.float(digits=(16, 4)),
}
| gpl-3.0 | 7,741,261,408,718,611,000 | 32.3875 | 98 | 0.635717 | false |
psztorc/Truthcoin | lib-other/pylib/consensus/custommath.py | 2 | 4765 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Supporting math for the consensus mechanism.
"""
from __future__ import division
from numpy import *
from numpy.linalg import *
def WeightedMedian(data, weights):
"""Calculate a weighted median.
Args:
data (list or numpy.array): data
weights (list or numpy.array): weights
"""
data, weights = array(data).squeeze(), array(weights).squeeze()
s_data, s_weights = map(array, zip(*sorted(zip(data, weights))))
midpoint = 0.5 * sum(s_weights)
if any(weights > midpoint):
w_median = median(data[weights == max(weights)])
else:
cs_weights = cumsum(s_weights)
idx = where(cs_weights <= midpoint)[0][-1]
if cs_weights[idx] == midpoint:
w_median = mean(s_data[idx:idx+2])
else:
w_median = s_data[idx+1]
return w_median
def Rescale(UnscaledMatrix, Scales):
"""Forces a matrix of raw (user-supplied) information
(for example, # of House Seats, or DJIA) to conform to
svd-appropriate range.
Practically, this is done by subtracting min and dividing by
scaled-range (which itself is max-min).
"""
# Calulate multiplicative factors
InvSpan = []
for scale in Scales:
InvSpan.append(1 / float(scale["max"] - scale["min"]))
# Recenter
OutMatrix = ma.copy(UnscaledMatrix)
cols = UnscaledMatrix.shape[1]
for i in range(cols):
OutMatrix[:,i] -= Scales[i]["min"]
# Rescale
NaIndex = isnan(OutMatrix)
OutMatrix[NaIndex] = 0
OutMatrix = dot(OutMatrix, diag(InvSpan))
OutMatrix[NaIndex] = nan
return OutMatrix
def MeanNa(Vec):
"""Takes masked array, replaces missing values with array mean."""
MM = mean(Vec)
Vec[where(Vec.mask)] = MM
return(Vec)
def GetWeight(Vec, AddMean=0):
"""Takes an array (vector in practice), and returns proportional distance from zero."""
New = abs(Vec) #Absolute Value
if AddMean == 1: #Add the mean to each element of the vector
New = New + mean(New)
if sum(New) == 0: #Catch an error here
New = New + 1
New = New/sum(New) #Normalize
return(New)
def Catch(X,Tolerance=0):
"""Forces continuous values into bins at 0, .5, and 1"""
if X < (.5-(Tolerance/2)):
return(0)
elif X > (.5+(Tolerance/2)):
return(1)
else:
return(.5)
def Influence(Weight):
"""Takes a normalized Vector (one that sums to 1), and computes relative strength of the indicators."""
N = len(Weight)
Expected = [[1/N]]*N
Out = []
for i in range(1, N):
Out.append(Weight[i]/Expected[i])
return(Out)
def ReWeight(Vec):
"""Get the relative influence of numbers, treat NaN as influence-less."""
Out = Vec
Exclude = isnan(Vec)
Out[Exclude] = 0 #set missing to 0
Out = Out / sum(Out) #normalize
return(Out)
def ReverseMatrix(Mat): #tecnically an array now, sorry about the terminology confusion
return( (Mat-1) * -1 )
def DemocracyCoin(Mat):
"""For testing, easier to assume uniform coin distribution."""
# print("NOTE: No coin distribution given, assuming democracy [one row, one vote].")
Rep = GetWeight( array([[1]]*len(Mat) )) #Uniform weights if none were provided.
return( Rep )
def WeightedCov(Mat,Rep=-1):
"""Takes 1] a masked array, and 2] an [n x 1] dimentional array of weights, and computes the weighted covariance
matrix and center of a given array.
Taken from http://stats.stackexchange.com/questions/61225/correct-equation-for-weighted-unbiased-sample-covariance"""
if type(Rep) is int:
Rep = DemocracyCoin(Mat)
Coins = ma.copy(Rep)
for i in range(len(Rep)):
Coins[i] = (int( (Rep[i] * 1000000)[0] ))
Mean = ma.average(Mat, axis=0, weights=hstack(Coins)) # Computing the weighted sample mean (fast, efficient and precise)
XM = matrix( Mat-Mean ) # xm = X diff to mean
sigma2 = matrix( 1/(sum(Coins)-1) * ma.multiply(XM, Coins).T.dot(XM) ); # Compute the unbiased weighted sample covariance
return( {'Cov':array(sigma2), 'Center':array(XM) } )
def WeightedPrinComp(Mat,Rep=-1):
"""Takes a matrix and row-weights and manually computes the statistical procedure known as Principal Components Analysis (PCA)
This version of the procedure is so basic, that it can also be thought of as merely a singular-value decomposition on a weighted covariance matrix."""
wCVM = WeightedCov(Mat,Rep)
SVD = svd(wCVM['Cov'])
L = SVD[0].T[0] #First loading
S = dot(wCVM['Center'],SVD[0]).T[0] #First Score
return(L,S)
if __name__ == "__main__":
pass
| mit | 695,838,901,312,724,700 | 30.979866 | 160 | 0.623295 | false |
felipenaselva/repo.felipe | plugin.video.uwc/tubepornclassic.py | 1 | 3359 | '''
Ultimate Whitecream
Copyright (C) 2015 mortael
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import urllib, re
import xbmc, xbmcplugin, xbmcgui, xbmcaddon
import utils
progress = utils.progress
def Main():
utils.addDir('[COLOR hotpink]Categories[/COLOR]','http://www.tubepornclassic.com/categories/', 363, '', '')
utils.addDir('[COLOR hotpink]Top Rated[/COLOR]','http://www.tubepornclassic.com/top-rated/', 361, '', '')
utils.addDir('[COLOR hotpink]Most Viewed[/COLOR]','http://www.tubepornclassic.com/most-popular/', 361, '', '')
utils.addDir('[COLOR hotpink]Search[/COLOR]','http://www.tubepornclassic.com/search/', 364, '', '')
List('http://www.tubepornclassic.com/latest-updates/')
xbmcplugin.endOfDirectory(utils.addon_handle)
def List(url):
listhtml = utils.getHtml(url, '')
match = re.compile('<a href="([^"]+)" title="([^"]+)".*?original="([^"]+)".*?duration">([^<]+)<', re.DOTALL | re.IGNORECASE).findall(listhtml)
for videopage, name, img, duration in match:
name = utils.cleantext(name)
name = name + " [COLOR deeppink]" + duration + "[/COLOR]"
utils.addDownLink(name, videopage, 362, img, '')
try:
nextp = re.compile('<a href="([^"]+)"[^>]+>Next', re.DOTALL | re.IGNORECASE).findall(listhtml)
utils.addDir('Next Page', 'http://www.tubepornclassic.com/' + nextp[0], 361,'')
except: pass
xbmcplugin.endOfDirectory(utils.addon_handle)
def Search(url, keyword=None):
searchUrl = url
if not keyword:
utils.searchDir(url, 364)
else:
title = keyword.replace(' ','%20')
searchUrl = searchUrl + title + "/"
print "Searching URL: " + searchUrl
List(searchUrl)
def Cat(url):
listhtml = utils.getHtml(url, '')
match = re.compile('<a class="item" href="([^"]+)" title="([^"]+)".*?data-original="([^"]+)".*?videos">([^<]+)<', re.DOTALL | re.IGNORECASE).findall(listhtml)
for catpage, name, img, videos in match:
name = utils.cleantext(name) + " [COLOR deeppink]" + videos + "[/COLOR]"
utils.addDir(name, catpage, 361, img, '')
xbmcplugin.endOfDirectory(utils.addon_handle)
def Playvid(url, name, download=None):
videopage = utils.getHtml(url, '')
videourl = re.compile("video_url: '([^']+)", re.DOTALL | re.IGNORECASE).findall(videopage)[0]
videourl = utils.getVideoLink(videourl, url)
if download == 1:
utils.downloadVideo(videourl, name)
else:
iconimage = xbmc.getInfoImage("ListItem.Thumb")
listitem = xbmcgui.ListItem(name, iconImage="DefaultVideo.png", thumbnailImage=iconimage)
listitem.setInfo('video', {'Title': name, 'Genre': 'Porn'})
xbmc.Player().play(videourl, listitem) | gpl-2.0 | 877,605,947,504,740,400 | 41.531646 | 162 | 0.648407 | false |
ebsaral/django-rest-framework | rest_framework/permissions.py | 71 | 6444 | """
Provides a set of pluggable permission policies.
"""
from __future__ import unicode_literals
from django.http import Http404
from rest_framework.compat import get_model_name
SAFE_METHODS = ('GET', 'HEAD', 'OPTIONS')
class BasePermission(object):
"""
A base class from which all permission classes should inherit.
"""
def has_permission(self, request, view):
"""
Return `True` if permission is granted, `False` otherwise.
"""
return True
def has_object_permission(self, request, view, obj):
"""
Return `True` if permission is granted, `False` otherwise.
"""
return True
class AllowAny(BasePermission):
"""
Allow any access.
This isn't strictly required, since you could use an empty
permission_classes list, but it's useful because it makes the intention
more explicit.
"""
def has_permission(self, request, view):
return True
class IsAuthenticated(BasePermission):
"""
Allows access only to authenticated users.
"""
def has_permission(self, request, view):
return request.user and request.user.is_authenticated()
class IsAdminUser(BasePermission):
"""
Allows access only to admin users.
"""
def has_permission(self, request, view):
return request.user and request.user.is_staff
class IsAuthenticatedOrReadOnly(BasePermission):
"""
The request is authenticated as a user, or is a read-only request.
"""
def has_permission(self, request, view):
return (
request.method in SAFE_METHODS or
request.user and
request.user.is_authenticated()
)
class DjangoModelPermissions(BasePermission):
"""
The request is authenticated using `django.contrib.auth` permissions.
See: https://docs.djangoproject.com/en/dev/topics/auth/#permissions
It ensures that the user is authenticated, and has the appropriate
`add`/`change`/`delete` permissions on the model.
This permission can only be applied against view classes that
provide a `.queryset` attribute.
"""
# Map methods into required permission codes.
# Override this if you need to also provide 'view' permissions,
# or if you want to provide custom permission codes.
perms_map = {
'GET': [],
'OPTIONS': [],
'HEAD': [],
'POST': ['%(app_label)s.add_%(model_name)s'],
'PUT': ['%(app_label)s.change_%(model_name)s'],
'PATCH': ['%(app_label)s.change_%(model_name)s'],
'DELETE': ['%(app_label)s.delete_%(model_name)s'],
}
authenticated_users_only = True
def get_required_permissions(self, method, model_cls):
"""
Given a model and an HTTP method, return the list of permission
codes that the user is required to have.
"""
kwargs = {
'app_label': model_cls._meta.app_label,
'model_name': get_model_name(model_cls)
}
return [perm % kwargs for perm in self.perms_map[method]]
def has_permission(self, request, view):
# Workaround to ensure DjangoModelPermissions are not applied
# to the root view when using DefaultRouter.
if getattr(view, '_ignore_model_permissions', False):
return True
try:
queryset = view.get_queryset()
except AttributeError:
queryset = getattr(view, 'queryset', None)
assert queryset is not None, (
'Cannot apply DjangoModelPermissions on a view that '
'does not have `.queryset` property or overrides the '
'`.get_queryset()` method.')
perms = self.get_required_permissions(request.method, queryset.model)
return (
request.user and
(request.user.is_authenticated() or not self.authenticated_users_only) and
request.user.has_perms(perms)
)
class DjangoModelPermissionsOrAnonReadOnly(DjangoModelPermissions):
"""
Similar to DjangoModelPermissions, except that anonymous users are
allowed read-only access.
"""
authenticated_users_only = False
class DjangoObjectPermissions(DjangoModelPermissions):
"""
The request is authenticated using Django's object-level permissions.
It requires an object-permissions-enabled backend, such as Django Guardian.
It ensures that the user is authenticated, and has the appropriate
`add`/`change`/`delete` permissions on the object using .has_perms.
This permission can only be applied against view classes that
provide a `.queryset` attribute.
"""
perms_map = {
'GET': [],
'OPTIONS': [],
'HEAD': [],
'POST': ['%(app_label)s.add_%(model_name)s'],
'PUT': ['%(app_label)s.change_%(model_name)s'],
'PATCH': ['%(app_label)s.change_%(model_name)s'],
'DELETE': ['%(app_label)s.delete_%(model_name)s'],
}
def get_required_object_permissions(self, method, model_cls):
kwargs = {
'app_label': model_cls._meta.app_label,
'model_name': get_model_name(model_cls)
}
return [perm % kwargs for perm in self.perms_map[method]]
def has_object_permission(self, request, view, obj):
try:
queryset = view.get_queryset()
except AttributeError:
queryset = getattr(view, 'queryset', None)
assert queryset is not None, (
'Cannot apply DjangoObjectPermissions on a view that '
'does not have `.queryset` property or overrides the '
'`.get_queryset()` method.')
model_cls = queryset.model
user = request.user
perms = self.get_required_object_permissions(request.method, model_cls)
if not user.has_perms(perms, obj):
# If the user does not have permissions we need to determine if
# they have read permissions to see 403, or not, and simply see
# a 404 response.
if request.method in SAFE_METHODS:
# Read permissions already checked and failed, no need
# to make another lookup.
raise Http404
read_perms = self.get_required_object_permissions('GET', model_cls)
if not user.has_perms(read_perms, obj):
raise Http404
# Has read permissions.
return False
return True
| bsd-2-clause | 204,754,534,900,979,840 | 30.281553 | 86 | 0.619957 | false |
jcoady9/beets | test/testall.py | 1 | 1320 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
from __future__ import division, absolute_import, print_function
import os
import re
import sys
from test._common import unittest
pkgpath = os.path.dirname(__file__) or '.'
sys.path.append(pkgpath)
os.chdir(pkgpath)
def suite():
s = unittest.TestSuite()
# Get the suite() of every module in this directory beginning with
# "test_".
for fname in os.listdir(pkgpath):
match = re.match(r'(test_\S+)\.py$', fname)
if match:
modname = match.group(1)
s.addTest(__import__(modname).suite())
return s
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| mit | 5,019,083,917,032,204,000 | 29 | 71 | 0.696212 | false |
kkdd/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Lib/test/test_with.py | 53 | 23715 | #!/usr/bin/env python
"""Unit tests for the with statement specified in PEP 343."""
__author__ = "Mike Bland"
__email__ = "mbland at acm dot org"
import sys
import unittest
from collections import deque
from contextlib import GeneratorContextManager, contextmanager
from test.test_support import run_unittest
class MockContextManager(GeneratorContextManager):
def __init__(self, gen):
GeneratorContextManager.__init__(self, gen)
self.enter_called = False
self.exit_called = False
self.exit_args = None
def __enter__(self):
self.enter_called = True
return GeneratorContextManager.__enter__(self)
def __exit__(self, type, value, traceback):
self.exit_called = True
self.exit_args = (type, value, traceback)
return GeneratorContextManager.__exit__(self, type,
value, traceback)
def mock_contextmanager(func):
def helper(*args, **kwds):
return MockContextManager(func(*args, **kwds))
return helper
class MockResource(object):
def __init__(self):
self.yielded = False
self.stopped = False
@mock_contextmanager
def mock_contextmanager_generator():
mock = MockResource()
try:
mock.yielded = True
yield mock
finally:
mock.stopped = True
class Nested(object):
def __init__(self, *managers):
self.managers = managers
self.entered = None
def __enter__(self):
if self.entered is not None:
raise RuntimeError("Context is not reentrant")
self.entered = deque()
vars = []
try:
for mgr in self.managers:
vars.append(mgr.__enter__())
self.entered.appendleft(mgr)
except:
if not self.__exit__(*sys.exc_info()):
raise
return vars
def __exit__(self, *exc_info):
# Behave like nested with statements
# first in, last out
# New exceptions override old ones
ex = exc_info
for mgr in self.entered:
try:
if mgr.__exit__(*ex):
ex = (None, None, None)
except:
ex = sys.exc_info()
self.entered = None
if ex is not exc_info:
raise ex[0], ex[1], ex[2]
class MockNested(Nested):
def __init__(self, *managers):
Nested.__init__(self, *managers)
self.enter_called = False
self.exit_called = False
self.exit_args = None
def __enter__(self):
self.enter_called = True
return Nested.__enter__(self)
def __exit__(self, *exc_info):
self.exit_called = True
self.exit_args = exc_info
return Nested.__exit__(self, *exc_info)
class FailureTestCase(unittest.TestCase):
def testNameError(self):
def fooNotDeclared():
with foo: pass
self.assertRaises(NameError, fooNotDeclared)
def testEnterAttributeError(self):
class LacksEnter(object):
def __exit__(self, type, value, traceback):
pass
def fooLacksEnter():
foo = LacksEnter()
with foo: pass
self.assertRaises(AttributeError, fooLacksEnter)
def testExitAttributeError(self):
class LacksExit(object):
def __enter__(self):
pass
def fooLacksExit():
foo = LacksExit()
with foo: pass
self.assertRaises(AttributeError, fooLacksExit)
def assertRaisesSyntaxError(self, codestr):
def shouldRaiseSyntaxError(s):
compile(s, '', 'single')
self.assertRaises(SyntaxError, shouldRaiseSyntaxError, codestr)
def testAssignmentToNoneError(self):
self.assertRaisesSyntaxError('with mock as None:\n pass')
self.assertRaisesSyntaxError(
'with mock as (None):\n'
' pass')
def testAssignmentToEmptyTupleError(self):
self.assertRaisesSyntaxError(
'with mock as ():\n'
' pass')
def testAssignmentToTupleOnlyContainingNoneError(self):
self.assertRaisesSyntaxError('with mock as None,:\n pass')
self.assertRaisesSyntaxError(
'with mock as (None,):\n'
' pass')
def testAssignmentToTupleContainingNoneError(self):
self.assertRaisesSyntaxError(
'with mock as (foo, None, bar):\n'
' pass')
def testEnterThrows(self):
class EnterThrows(object):
def __enter__(self):
raise RuntimeError("Enter threw")
def __exit__(self, *args):
pass
def shouldThrow():
ct = EnterThrows()
self.foo = None
with ct as self.foo:
pass
self.assertRaises(RuntimeError, shouldThrow)
self.assertEqual(self.foo, None)
def testExitThrows(self):
class ExitThrows(object):
def __enter__(self):
return
def __exit__(self, *args):
raise RuntimeError(42)
def shouldThrow():
with ExitThrows():
pass
self.assertRaises(RuntimeError, shouldThrow)
class ContextmanagerAssertionMixin(object):
TEST_EXCEPTION = RuntimeError("test exception")
def assertInWithManagerInvariants(self, mock_manager):
self.assertTrue(mock_manager.enter_called)
self.assertFalse(mock_manager.exit_called)
self.assertEqual(mock_manager.exit_args, None)
def assertAfterWithManagerInvariants(self, mock_manager, exit_args):
self.assertTrue(mock_manager.enter_called)
self.assertTrue(mock_manager.exit_called)
self.assertEqual(mock_manager.exit_args, exit_args)
def assertAfterWithManagerInvariantsNoError(self, mock_manager):
self.assertAfterWithManagerInvariants(mock_manager,
(None, None, None))
def assertInWithGeneratorInvariants(self, mock_generator):
self.assertTrue(mock_generator.yielded)
self.assertFalse(mock_generator.stopped)
def assertAfterWithGeneratorInvariantsNoError(self, mock_generator):
self.assertTrue(mock_generator.yielded)
self.assertTrue(mock_generator.stopped)
def raiseTestException(self):
raise self.TEST_EXCEPTION
def assertAfterWithManagerInvariantsWithError(self, mock_manager):
self.assertTrue(mock_manager.enter_called)
self.assertTrue(mock_manager.exit_called)
self.assertEqual(mock_manager.exit_args[0], RuntimeError)
self.assertEqual(mock_manager.exit_args[1], self.TEST_EXCEPTION)
def assertAfterWithGeneratorInvariantsWithError(self, mock_generator):
self.assertTrue(mock_generator.yielded)
self.assertTrue(mock_generator.stopped)
class NonexceptionalTestCase(unittest.TestCase, ContextmanagerAssertionMixin):
def testInlineGeneratorSyntax(self):
with mock_contextmanager_generator():
pass
def testUnboundGenerator(self):
mock = mock_contextmanager_generator()
with mock:
pass
self.assertAfterWithManagerInvariantsNoError(mock)
def testInlineGeneratorBoundSyntax(self):
with mock_contextmanager_generator() as foo:
self.assertInWithGeneratorInvariants(foo)
# FIXME: In the future, we'll try to keep the bound names from leaking
self.assertAfterWithGeneratorInvariantsNoError(foo)
def testInlineGeneratorBoundToExistingVariable(self):
foo = None
with mock_contextmanager_generator() as foo:
self.assertInWithGeneratorInvariants(foo)
self.assertAfterWithGeneratorInvariantsNoError(foo)
def testInlineGeneratorBoundToDottedVariable(self):
with mock_contextmanager_generator() as self.foo:
self.assertInWithGeneratorInvariants(self.foo)
self.assertAfterWithGeneratorInvariantsNoError(self.foo)
def testBoundGenerator(self):
mock = mock_contextmanager_generator()
with mock as foo:
self.assertInWithGeneratorInvariants(foo)
self.assertInWithManagerInvariants(mock)
self.assertAfterWithGeneratorInvariantsNoError(foo)
self.assertAfterWithManagerInvariantsNoError(mock)
def testNestedSingleStatements(self):
mock_a = mock_contextmanager_generator()
with mock_a as foo:
mock_b = mock_contextmanager_generator()
with mock_b as bar:
self.assertInWithManagerInvariants(mock_a)
self.assertInWithManagerInvariants(mock_b)
self.assertInWithGeneratorInvariants(foo)
self.assertInWithGeneratorInvariants(bar)
self.assertAfterWithManagerInvariantsNoError(mock_b)
self.assertAfterWithGeneratorInvariantsNoError(bar)
self.assertInWithManagerInvariants(mock_a)
self.assertInWithGeneratorInvariants(foo)
self.assertAfterWithManagerInvariantsNoError(mock_a)
self.assertAfterWithGeneratorInvariantsNoError(foo)
class NestedNonexceptionalTestCase(unittest.TestCase,
ContextmanagerAssertionMixin):
def testSingleArgInlineGeneratorSyntax(self):
with Nested(mock_contextmanager_generator()):
pass
def testSingleArgUnbound(self):
mock_contextmanager = mock_contextmanager_generator()
mock_nested = MockNested(mock_contextmanager)
with mock_nested:
self.assertInWithManagerInvariants(mock_contextmanager)
self.assertInWithManagerInvariants(mock_nested)
self.assertAfterWithManagerInvariantsNoError(mock_contextmanager)
self.assertAfterWithManagerInvariantsNoError(mock_nested)
def testSingleArgBoundToNonTuple(self):
m = mock_contextmanager_generator()
# This will bind all the arguments to nested() into a single list
# assigned to foo.
with Nested(m) as foo:
self.assertInWithManagerInvariants(m)
self.assertAfterWithManagerInvariantsNoError(m)
def testSingleArgBoundToSingleElementParenthesizedList(self):
m = mock_contextmanager_generator()
# This will bind all the arguments to nested() into a single list
# assigned to foo.
with Nested(m) as (foo):
self.assertInWithManagerInvariants(m)
self.assertAfterWithManagerInvariantsNoError(m)
def testSingleArgBoundToMultipleElementTupleError(self):
def shouldThrowValueError():
with Nested(mock_contextmanager_generator()) as (foo, bar):
pass
self.assertRaises(ValueError, shouldThrowValueError)
def testSingleArgUnbound(self):
mock_contextmanager = mock_contextmanager_generator()
mock_nested = MockNested(mock_contextmanager)
with mock_nested:
self.assertInWithManagerInvariants(mock_contextmanager)
self.assertInWithManagerInvariants(mock_nested)
self.assertAfterWithManagerInvariantsNoError(mock_contextmanager)
self.assertAfterWithManagerInvariantsNoError(mock_nested)
def testMultipleArgUnbound(self):
m = mock_contextmanager_generator()
n = mock_contextmanager_generator()
o = mock_contextmanager_generator()
mock_nested = MockNested(m, n, o)
with mock_nested:
self.assertInWithManagerInvariants(m)
self.assertInWithManagerInvariants(n)
self.assertInWithManagerInvariants(o)
self.assertInWithManagerInvariants(mock_nested)
self.assertAfterWithManagerInvariantsNoError(m)
self.assertAfterWithManagerInvariantsNoError(n)
self.assertAfterWithManagerInvariantsNoError(o)
self.assertAfterWithManagerInvariantsNoError(mock_nested)
def testMultipleArgBound(self):
mock_nested = MockNested(mock_contextmanager_generator(),
mock_contextmanager_generator(), mock_contextmanager_generator())
with mock_nested as (m, n, o):
self.assertInWithGeneratorInvariants(m)
self.assertInWithGeneratorInvariants(n)
self.assertInWithGeneratorInvariants(o)
self.assertInWithManagerInvariants(mock_nested)
self.assertAfterWithGeneratorInvariantsNoError(m)
self.assertAfterWithGeneratorInvariantsNoError(n)
self.assertAfterWithGeneratorInvariantsNoError(o)
self.assertAfterWithManagerInvariantsNoError(mock_nested)
class ExceptionalTestCase(unittest.TestCase, ContextmanagerAssertionMixin):
def testSingleResource(self):
cm = mock_contextmanager_generator()
def shouldThrow():
with cm as self.resource:
self.assertInWithManagerInvariants(cm)
self.assertInWithGeneratorInvariants(self.resource)
self.raiseTestException()
self.assertRaises(RuntimeError, shouldThrow)
self.assertAfterWithManagerInvariantsWithError(cm)
self.assertAfterWithGeneratorInvariantsWithError(self.resource)
def testNestedSingleStatements(self):
mock_a = mock_contextmanager_generator()
mock_b = mock_contextmanager_generator()
def shouldThrow():
with mock_a as self.foo:
with mock_b as self.bar:
self.assertInWithManagerInvariants(mock_a)
self.assertInWithManagerInvariants(mock_b)
self.assertInWithGeneratorInvariants(self.foo)
self.assertInWithGeneratorInvariants(self.bar)
self.raiseTestException()
self.assertRaises(RuntimeError, shouldThrow)
self.assertAfterWithManagerInvariantsWithError(mock_a)
self.assertAfterWithManagerInvariantsWithError(mock_b)
self.assertAfterWithGeneratorInvariantsWithError(self.foo)
self.assertAfterWithGeneratorInvariantsWithError(self.bar)
def testMultipleResourcesInSingleStatement(self):
cm_a = mock_contextmanager_generator()
cm_b = mock_contextmanager_generator()
mock_nested = MockNested(cm_a, cm_b)
def shouldThrow():
with mock_nested as (self.resource_a, self.resource_b):
self.assertInWithManagerInvariants(cm_a)
self.assertInWithManagerInvariants(cm_b)
self.assertInWithManagerInvariants(mock_nested)
self.assertInWithGeneratorInvariants(self.resource_a)
self.assertInWithGeneratorInvariants(self.resource_b)
self.raiseTestException()
self.assertRaises(RuntimeError, shouldThrow)
self.assertAfterWithManagerInvariantsWithError(cm_a)
self.assertAfterWithManagerInvariantsWithError(cm_b)
self.assertAfterWithManagerInvariantsWithError(mock_nested)
self.assertAfterWithGeneratorInvariantsWithError(self.resource_a)
self.assertAfterWithGeneratorInvariantsWithError(self.resource_b)
def testNestedExceptionBeforeInnerStatement(self):
mock_a = mock_contextmanager_generator()
mock_b = mock_contextmanager_generator()
self.bar = None
def shouldThrow():
with mock_a as self.foo:
self.assertInWithManagerInvariants(mock_a)
self.assertInWithGeneratorInvariants(self.foo)
self.raiseTestException()
with mock_b as self.bar:
pass
self.assertRaises(RuntimeError, shouldThrow)
self.assertAfterWithManagerInvariantsWithError(mock_a)
self.assertAfterWithGeneratorInvariantsWithError(self.foo)
# The inner statement stuff should never have been touched
self.assertEqual(self.bar, None)
self.assertFalse(mock_b.enter_called)
self.assertFalse(mock_b.exit_called)
self.assertEqual(mock_b.exit_args, None)
def testNestedExceptionAfterInnerStatement(self):
mock_a = mock_contextmanager_generator()
mock_b = mock_contextmanager_generator()
def shouldThrow():
with mock_a as self.foo:
with mock_b as self.bar:
self.assertInWithManagerInvariants(mock_a)
self.assertInWithManagerInvariants(mock_b)
self.assertInWithGeneratorInvariants(self.foo)
self.assertInWithGeneratorInvariants(self.bar)
self.raiseTestException()
self.assertRaises(RuntimeError, shouldThrow)
self.assertAfterWithManagerInvariantsWithError(mock_a)
self.assertAfterWithManagerInvariantsNoError(mock_b)
self.assertAfterWithGeneratorInvariantsWithError(self.foo)
self.assertAfterWithGeneratorInvariantsNoError(self.bar)
def testRaisedStopIteration1(self):
# From bug 1462485
@contextmanager
def cm():
yield
def shouldThrow():
with cm():
raise StopIteration("from with")
self.assertRaises(StopIteration, shouldThrow)
def testRaisedStopIteration2(self):
# From bug 1462485
class cm(object):
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
pass
def shouldThrow():
with cm():
raise StopIteration("from with")
self.assertRaises(StopIteration, shouldThrow)
def testRaisedStopIteration3(self):
# Another variant where the exception hasn't been instantiated
# From bug 1705170
@contextmanager
def cm():
yield
def shouldThrow():
with cm():
raise iter([]).next()
self.assertRaises(StopIteration, shouldThrow)
def testRaisedGeneratorExit1(self):
# From bug 1462485
@contextmanager
def cm():
yield
def shouldThrow():
with cm():
raise GeneratorExit("from with")
self.assertRaises(GeneratorExit, shouldThrow)
def testRaisedGeneratorExit2(self):
# From bug 1462485
class cm (object):
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
pass
def shouldThrow():
with cm():
raise GeneratorExit("from with")
self.assertRaises(GeneratorExit, shouldThrow)
def testErrorsInBool(self):
# issue4589: __exit__ return code may raise an exception
# when looking at its truth value.
class cm(object):
def __init__(self, bool_conversion):
class Bool:
def __nonzero__(self):
return bool_conversion()
self.exit_result = Bool()
def __enter__(self):
return 3
def __exit__(self, a, b, c):
return self.exit_result
def trueAsBool():
with cm(lambda: True):
self.fail("Should NOT see this")
trueAsBool()
def falseAsBool():
with cm(lambda: False):
self.fail("Should raise")
self.assertRaises(AssertionError, falseAsBool)
def failAsBool():
with cm(lambda: 1//0):
self.fail("Should NOT see this")
self.assertRaises(ZeroDivisionError, failAsBool)
class NonLocalFlowControlTestCase(unittest.TestCase):
def testWithBreak(self):
counter = 0
while True:
counter += 1
with mock_contextmanager_generator():
counter += 10
break
counter += 100 # Not reached
self.assertEqual(counter, 11)
def testWithContinue(self):
counter = 0
while True:
counter += 1
if counter > 2:
break
with mock_contextmanager_generator():
counter += 10
continue
counter += 100 # Not reached
self.assertEqual(counter, 12)
def testWithReturn(self):
def foo():
counter = 0
while True:
counter += 1
with mock_contextmanager_generator():
counter += 10
return counter
counter += 100 # Not reached
self.assertEqual(foo(), 11)
def testWithYield(self):
def gen():
with mock_contextmanager_generator():
yield 12
yield 13
x = list(gen())
self.assertEqual(x, [12, 13])
def testWithRaise(self):
counter = 0
try:
counter += 1
with mock_contextmanager_generator():
counter += 10
raise RuntimeError
counter += 100 # Not reached
except RuntimeError:
self.assertEqual(counter, 11)
else:
self.fail("Didn't raise RuntimeError")
class AssignmentTargetTestCase(unittest.TestCase):
def testSingleComplexTarget(self):
targets = {1: [0, 1, 2]}
with mock_contextmanager_generator() as targets[1][0]:
self.assertEqual(targets.keys(), [1])
self.assertEqual(targets[1][0].__class__, MockResource)
with mock_contextmanager_generator() as targets.values()[0][1]:
self.assertEqual(targets.keys(), [1])
self.assertEqual(targets[1][1].__class__, MockResource)
with mock_contextmanager_generator() as targets[2]:
keys = targets.keys()
keys.sort()
self.assertEqual(keys, [1, 2])
class C: pass
blah = C()
with mock_contextmanager_generator() as blah.foo:
self.assertEqual(hasattr(blah, "foo"), True)
def testMultipleComplexTargets(self):
class C:
def __enter__(self): return 1, 2, 3
def __exit__(self, t, v, tb): pass
targets = {1: [0, 1, 2]}
with C() as (targets[1][0], targets[1][1], targets[1][2]):
self.assertEqual(targets, {1: [1, 2, 3]})
with C() as (targets.values()[0][2], targets.values()[0][1], targets.values()[0][0]):
self.assertEqual(targets, {1: [3, 2, 1]})
with C() as (targets[1], targets[2], targets[3]):
self.assertEqual(targets, {1: 1, 2: 2, 3: 3})
class B: pass
blah = B()
with C() as (blah.one, blah.two, blah.three):
self.assertEqual(blah.one, 1)
self.assertEqual(blah.two, 2)
self.assertEqual(blah.three, 3)
class ExitSwallowsExceptionTestCase(unittest.TestCase):
def testExitTrueSwallowsException(self):
class AfricanSwallow:
def __enter__(self): pass
def __exit__(self, t, v, tb): return True
try:
with AfricanSwallow():
1/0
except ZeroDivisionError:
self.fail("ZeroDivisionError should have been swallowed")
def testExitFalseDoesntSwallowException(self):
class EuropeanSwallow:
def __enter__(self): pass
def __exit__(self, t, v, tb): return False
try:
with EuropeanSwallow():
1/0
except ZeroDivisionError:
pass
else:
self.fail("ZeroDivisionError should have been raised")
def test_main():
run_unittest(FailureTestCase, NonexceptionalTestCase,
NestedNonexceptionalTestCase, ExceptionalTestCase,
NonLocalFlowControlTestCase,
AssignmentTargetTestCase,
ExitSwallowsExceptionTestCase)
if __name__ == '__main__':
test_main()
| apache-2.0 | 333,787,565,870,434,100 | 34.608108 | 93 | 0.62315 | false |
BaconPancakes/valor | lib/youtube_dl/extractor/mitele.py | 9 | 8002 | # coding: utf-8
from __future__ import unicode_literals
import uuid
from .common import InfoExtractor
from .ooyala import OoyalaIE
from ..compat import (
compat_str,
compat_urllib_parse_urlencode,
compat_urlparse,
)
from ..utils import (
int_or_none,
extract_attributes,
determine_ext,
smuggle_url,
parse_duration,
)
class MiTeleBaseIE(InfoExtractor):
def _get_player_info(self, url, webpage):
player_data = extract_attributes(self._search_regex(
r'(?s)(<ms-video-player.+?</ms-video-player>)',
webpage, 'ms video player'))
video_id = player_data['data-media-id']
if player_data.get('data-cms-id') == 'ooyala':
return self.url_result(
'ooyala:%s' % video_id, ie=OoyalaIE.ie_key(), video_id=video_id)
config_url = compat_urlparse.urljoin(url, player_data['data-config'])
config = self._download_json(
config_url, video_id, 'Downloading config JSON')
mmc_url = config['services']['mmc']
duration = None
formats = []
for m_url in (mmc_url, mmc_url.replace('/flash.json', '/html5.json')):
mmc = self._download_json(
m_url, video_id, 'Downloading mmc JSON')
if not duration:
duration = int_or_none(mmc.get('duration'))
for location in mmc['locations']:
gat = self._proto_relative_url(location.get('gat'), 'http:')
bas = location.get('bas')
loc = location.get('loc')
ogn = location.get('ogn')
if None in (gat, bas, loc, ogn):
continue
token_data = {
'bas': bas,
'icd': loc,
'ogn': ogn,
'sta': '0',
}
media = self._download_json(
'%s/?%s' % (gat, compat_urllib_parse_urlencode(token_data)),
video_id, 'Downloading %s JSON' % location['loc'])
file_ = media.get('file')
if not file_:
continue
ext = determine_ext(file_)
if ext == 'f4m':
formats.extend(self._extract_f4m_formats(
file_ + '&hdcore=3.2.0&plugin=aasp-3.2.0.77.18',
video_id, f4m_id='hds', fatal=False))
elif ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
file_, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False))
self._sort_formats(formats)
return {
'id': video_id,
'formats': formats,
'thumbnail': player_data.get('data-poster') or config.get('poster', {}).get('imageUrl'),
'duration': duration,
}
class MiTeleIE(InfoExtractor):
IE_DESC = 'mitele.es'
_VALID_URL = r'https?://(?:www\.)?mitele\.es/(?:[^/]+/)+(?P<id>[^/]+)/player'
_TESTS = [{
'url': 'http://www.mitele.es/programas-tv/diario-de/57b0dfb9c715da65618b4afa/player',
'info_dict': {
'id': '57b0dfb9c715da65618b4afa',
'ext': 'mp4',
'title': 'Tor, la web invisible',
'description': 'md5:3b6fce7eaa41b2d97358726378d9369f',
'series': 'Diario de',
'season': 'La redacción',
'season_number': 14,
'season_id': 'diario_de_t14_11981',
'episode': 'Programa 144',
'episode_number': 3,
'thumbnail': r're:(?i)^https?://.*\.jpg$',
'duration': 2913,
},
'add_ie': ['Ooyala'],
}, {
# no explicit title
'url': 'http://www.mitele.es/programas-tv/cuarto-milenio/57b0de3dc915da14058b4876/player',
'info_dict': {
'id': '57b0de3dc915da14058b4876',
'ext': 'mp4',
'title': 'Cuarto Milenio Temporada 6 Programa 226',
'description': 'md5:5ff132013f0cd968ffbf1f5f3538a65f',
'series': 'Cuarto Milenio',
'season': 'Temporada 6',
'season_number': 6,
'season_id': 'cuarto_milenio_t06_12715',
'episode': 'Programa 226',
'episode_number': 24,
'thumbnail': r're:(?i)^https?://.*\.jpg$',
'duration': 7313,
},
'params': {
'skip_download': True,
},
'add_ie': ['Ooyala'],
}, {
'url': 'http://www.mitele.es/series-online/la-que-se-avecina/57aac5c1c915da951a8b45ed/player',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
gigya_url = self._search_regex(
r'<gigya-api>[^>]*</gigya-api>[^>]*<script\s+src="([^"]*)">[^>]*</script>',
webpage, 'gigya', default=None)
gigya_sc = self._download_webpage(
compat_urlparse.urljoin('http://www.mitele.es/', gigya_url),
video_id, 'Downloading gigya script')
# Get a appKey/uuid for getting the session key
appKey_var = self._search_regex(
r'value\s*\(\s*["\']appGridApplicationKey["\']\s*,\s*([0-9a-f]+)',
gigya_sc, 'appKey variable')
appKey = self._search_regex(
r'var\s+%s\s*=\s*["\']([0-9a-f]+)' % appKey_var, gigya_sc, 'appKey')
session_json = self._download_json(
'https://appgrid-api.cloud.accedo.tv/session',
video_id, 'Downloading session keys', query={
'appKey': appKey,
'uuid': compat_str(uuid.uuid4()),
})
paths = self._download_json(
'https://appgrid-api.cloud.accedo.tv/metadata/general_configuration,%20web_configuration',
video_id, 'Downloading paths JSON',
query={'sessionKey': compat_str(session_json['sessionKey'])})
ooyala_s = paths['general_configuration']['api_configuration']['ooyala_search']
source = self._download_json(
'http://%s%s%s/docs/%s' % (
ooyala_s['base_url'], ooyala_s['full_path'],
ooyala_s['provider_id'], video_id),
video_id, 'Downloading data JSON', query={
'include_titles': 'Series,Season',
'product_name': 'test',
'format': 'full',
})['hits']['hits'][0]['_source']
embedCode = source['offers'][0]['embed_codes'][0]
titles = source['localizable_titles'][0]
title = titles.get('title_medium') or titles['title_long']
description = titles.get('summary_long') or titles.get('summary_medium')
def get(key1, key2):
value1 = source.get(key1)
if not value1 or not isinstance(value1, list):
return
if not isinstance(value1[0], dict):
return
return value1[0].get(key2)
series = get('localizable_titles_series', 'title_medium')
season = get('localizable_titles_season', 'title_medium')
season_number = int_or_none(source.get('season_number'))
season_id = source.get('season_id')
episode = titles.get('title_sort_name')
episode_number = int_or_none(source.get('episode_number'))
duration = parse_duration(get('videos', 'duration'))
return {
'_type': 'url_transparent',
# for some reason only HLS is supported
'url': smuggle_url('ooyala:' + embedCode, {'supportedformats': 'm3u8,dash'}),
'id': video_id,
'title': title,
'description': description,
'series': series,
'season': season,
'season_number': season_number,
'season_id': season_id,
'episode': episode,
'episode_number': episode_number,
'duration': duration,
'thumbnail': get('images', 'url'),
}
| gpl-3.0 | 6,421,910,630,682,222,000 | 37.282297 | 102 | 0.515936 | false |
LaboratoireMecaniqueLille/Ximea | old/ximea_display_REC.py | 1 | 8856 | import numpy as np
import cv2
import time
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib import rcParams
import SimpleITK as sitk
from multiprocessing import Process, Pipe, Value
from matplotlib.widgets import Slider, Button
rcParams['font.family'] = 'serif'
#ps aux | grep python # KILL python process ...
#kill -9 insert_here_the_python_thread_number # ... try it if ximea won't open again.
plt.close('all')
############################## Parameters
nbr_images=400 # enter here the numer of images you need to save.
save_directory="/home/corentin/Bureau/ximea/" # path to save repository. BE AWARE that this scripts will erase previous images without regrets or remorse.
exposure= 10000 # exposition time, in microseconds
gain=2
height=1024 # reducing this one allows one to increase the FPS
width=1024 # doesn't work for this one
data_format=6 #0=8 bits, 1=16(10)bits, 5=8bits RAW, 6=16(10)bits RAW
external_trigger= False #set to True if you trig with external source (arduino...). BE AWARE there is a 10s waiting time for the ximea, meaning if you wait more that 10 sec to trigg, ximea will return an error and stop working.
set_FPS=False # set to True if you want to manually set the frame rate. It has 0.1 FPS precison @88FPS . If you need more precision, please use external trigger with arduino.
FPS=50 # set here the frame rate you need. This parameter will only work if set_FPS =True.
numdevice = 0 # Set the number of the camera (if several cameras plugged)
##############################
rec_send , rec_recv = Pipe()
anim_send, anim_recv = Pipe()
rec_signal=Value('i',0)
plot_signal=Value('i',0)
#cap = cv2.VideoCapture(cv2.CAP_XIAPI) # open the ximea device
#cap = cv2.VideoCapture(cv2.CAP_XIAPI + numdevice) # open the ximea device Ximea devices start at 1100. 1100 => device 0, 1101 => device 1
#if external_trigger==True: # this condition activate the trigger mode
#cap.set(cv2.CAP_PROP_XI_TRG_SOURCE,1)
#cap.set(cv2.CAP_PROP_XI_GPI_SELECTOR,1)
#cap.set(cv2.CAP_PROP_XI_GPI_MODE,1)
#cap.set(cv2.CAP_PROP_XI_DATA_FORMAT,data_format) #0=8 bits, 1=16(10)bits, 5=8bits RAW, 6=16(10)bits RAW
#if data_format ==1 or data_format==6: #increase the FPS in 10 bits
#cap.set(cv2.CAP_PROP_XI_OUTPUT_DATA_BIT_DEPTH,10)
#cap.set(cv2.CAP_PROP_XI_DATA_PACKING,1)
#cap.set(cv2.CAP_PROP_XI_AEAG,0)#auto gain auto exposure
#cap.set(cv2.CAP_PROP_FRAME_WIDTH,width); # doesn't work for this one
##cap.set(cv2.CAP_PROP_XI_OFFSET_X,640);
#cap.set(cv2.CAP_PROP_FRAME_HEIGHT,height); # reducing this one allows one to increase the FPS
##cap.set(cv2.CAP_PROP_XI_DOWNSAMPLING,0) # activate this one if you need to downsample your images, i.e if you need a very high FPS and other options are not enough
##print cap.get(cv2.CAP_PROP_FRAME_WIDTH)
##print cap.get(cv2.CAP_PROP_FRAME_HEIGHT);
#cap.set(cv2.CAP_PROP_EXPOSURE,exposure) # setting up exposure
#cap.set(cv2.CAP_PROP_GAIN,gain) #setting up gain
#ret, frame = cap.read() # read a frame
### initialising the histogram
#if cap.get(cv2.CAP_PROP_XI_DATA_FORMAT)==0 or cap.get(cv2.CAP_PROP_XI_DATA_FORMAT)==5:
#x=np.arange(0,256,4)
#if cap.get(cv2.CAP_PROP_XI_DATA_FORMAT)==1 or cap.get(cv2.CAP_PROP_XI_DATA_FORMAT)==6:
#x=np.arange(0,1024,4)
#hist=np.ones(np.shape(x))
### initialising graph and axes
rat = 0.7
Width=7
Height=7.
rat = 0.7
Width=7
Height=7.
fig=plt.figure(figsize=(Height, Width))
frame=np.zeros((height,width))
axim = fig.add_axes([0.15, 0.135, rat, rat*(Height/Width)]) # Image frame
im = axim.imshow(frame,cmap=plt.cm.gray,interpolation='nearest') # display the first image
RECax = plt.axes([0.01, (0.15+rat)/2, 0.05, 0.05]) # define size and position
button = Button(RECax, 'REC', color='red', hovercolor='0.975') # define button
#fig=plt.figure(figsize=(Height, Width))
#ax=fig.add_subplot(111)
##axim = fig.add_axes([0.15, 0.135, rat, rat*(Height/Width)]) # Image frame
##cax = fig.add_axes([0.17+rat, 0.135, 0.02, rat*(Height/Width)]) # colorbar frame
##axhist=fig.add_axes([0.15,(0.17+rat),rat,0.1]) # histogram frame
##axhist.set_xlim([0,max(x)]) #set histogram limit in x...
##axhist.set_ylim([0,1]) # ... and y
#frame=np.zeros((height,width))
#im = ax.imshow(frame,cmap=plt.cm.gray,interpolation='nearest') # display the first image
##li,= axhist.plot(x,hist) #plot first histogram
##cb = fig.colorbar(im, cax=cax) #plot colorbar
##cax.axis('off')
#fig.canvas.draw()
#plt.show(block=False)
### define cursors here
#axcolor = 'lightgoldenrodyellow'
#axExp = plt.axes([0.15, 0.02,rat, 0.03], axisbg=axcolor) # define position and size
#sExp = Slider(axExp, 'Exposure', 200, 50000, valinit=exposure) #Exposition max = 1000000 # define slider with previous position and size
#axGain= plt.axes([0.15, 0.07,rat, 0.03], axisbg=axcolor)
#sGain = Slider(axGain, 'Gain', -1, 6, valinit=gain)
#def update(val): # this function updates the exposure and gain values
#cap.set(cv2.CAP_PROP_EXPOSURE,sExp.val)
#cap.set(cv2.CAP_PROP_GAIN,sGain.val)
#fig.canvas.draw_idle()
#sExp.on_changed(update) # call for update everytime the cursors change
#sGain.on_changed(update)
### define buttons here
#RECax = plt.axes([0.01, (0.15+rat)/2, 0.05, 0.05]) # define size and position
#button = Button(RECax, 'REC', color='red', hovercolor='0.975') # define button
def REC(): # when called, read "nbr_images" and save them as .tiff in save_directory
while True:
while rec_signal.value!=1:
indent=True
t0=time.time()
last_t=0
i=0
while(i<nbr_images):
if set_FPS==True and last_t!=0: #This loop is used to set the FPS
while (time.time()-last_t) < 1./FPS:
indent=True
last_t=time.time()
frame = rec_recv.recv()
image=sitk.GetImageFromArray(frame)
sitk.WriteImage(image,save_directory+"img_%.5d.tiff" %i) ### works fast in 8 or 16 bit, always use sitk.
i+=1
rec_signal.value=0
t=time.time()-t0
print "FPS = %s"%(nbr_images/t)
#def REC_one(event): # when called, read 1 image and save it as .tiff in save_directory with a timestamp, so the next REC will not erase the previous one
#ret, frame = cap.read()
#image=sitk.GetImageFromArray(frame)
#sitk.WriteImage(image,save_directory+"img_%.5d.tiff" %(time.time())) ### works fast in 8 or 16 bit, always use sitk.
def REC2(event):
rec_signal.value=1
#button.on_clicked(REC2) # on click, call the REC function
### Main
def function(i):
print "function aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
plot_signal.value=1
frame=anim_recv.recv() # read a frame
plot_signal.value=0
print "received!"
print frame[0]
im.set_data(frame)
return axim
def get_frame():
cap = cv2.VideoCapture(cv2.CAP_XIAPI + numdevice) # open the ximea device Ximea devices start at 1100. 1100 => device 0, 1101 => device 1
if external_trigger==True: # this condition activate the trigger mode
cap.set(cv2.CAP_PROP_XI_TRG_SOURCE,1)
cap.set(cv2.CAP_PROP_XI_GPI_SELECTOR,1)
cap.set(cv2.CAP_PROP_XI_GPI_MODE,1)
cap.set(cv2.CAP_PROP_XI_DATA_FORMAT,data_format) #0=8 bits, 1=16(10)bits, 5=8bits RAW, 6=16(10)bits RAW
if data_format ==1 or data_format==6: #increase the FPS in 10 bits
cap.set(cv2.CAP_PROP_XI_OUTPUT_DATA_BIT_DEPTH,10)
cap.set(cv2.CAP_PROP_XI_DATA_PACKING,1)
cap.set(cv2.CAP_PROP_XI_AEAG,0)#auto gain auto exposure
cap.set(cv2.CAP_PROP_FRAME_WIDTH,width); # doesn't work for this one
#cap.set(cv2.CAP_PROP_XI_OFFSET_X,640);
cap.set(cv2.CAP_PROP_FRAME_HEIGHT,height); # reducing this one allows one to increase the FPS
#cap.set(cv2.CAP_PROP_XI_DOWNSAMPLING,0) # activate this one if you need to downsample your images, i.e if you need a very high FPS and other options are not enough
#print cap.get(cv2.CAP_PROP_FRAME_WIDTH)
#print cap.get(cv2.CAP_PROP_FRAME_HEIGHT);
cap.set(cv2.CAP_PROP_EXPOSURE,exposure) # setting up exposure
cap.set(cv2.CAP_PROP_GAIN,gain) #setting up gain
while True:
ret, frame=cap.read()
print "this is Patrick"
print frame[0]
print plot_signal.value
if plot_signal.value==1:
anim_send.send(frame)
print "sended"
print rec_signal.value
if rec_signal.value==1:
rec_send.send(frame)
Get_frame=Process(target=get_frame,args=())
time.sleep(1)
#Rec=Process(target=REC,args=())
#Ani=Process(target=ani,args=())
Get_frame.start()
time.sleep(1)
#Ani.start()
#Rec.start()
#ani = animation.FuncAnimation(fig, anim, interval=20, frames=20, blit=False) # This function call the anim function to update averything in the figure.
#plt.show()
Get_frame.join()
time.sleep(1)
#Ani.join()
animation.FuncAnimation(fig, function, interval=20, frames=20, blit=False) # This function call the anim function to update averything in the figure.
plt.show()
#Rec.join()
| gpl-2.0 | 2,651,419,275,315,394,000 | 38.535714 | 227 | 0.704042 | false |
jeasoft/odoo | addons/purchase_requisition/__openerp__.py | 260 | 2424 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Purchase Requisitions',
'version': '0.1',
'author': 'OpenERP SA',
'category': 'Purchase Management',
'website': 'https://www.odoo.com/page/purchase',
'description': """
This module allows you to manage your Purchase Requisition.
===========================================================
When a purchase order is created, you now have the opportunity to save the
related requisition. This new object will regroup and will allow you to easily
keep track and order all your purchase orders.
""",
'depends' : ['purchase'],
'demo': ['purchase_requisition_demo.xml'],
'data': ['views/purchase_requisition.xml',
'security/purchase_tender.xml',
'wizard/purchase_requisition_partner_view.xml',
'wizard/bid_line_qty_view.xml',
'purchase_requisition_data.xml',
'purchase_requisition_view.xml',
'purchase_requisition_report.xml',
'purchase_requisition_workflow.xml',
'security/ir.model.access.csv','purchase_requisition_sequence.xml',
'views/report_purchaserequisition.xml',
],
'auto_install': False,
'test': [
'test/purchase_requisition_users.yml',
'test/purchase_requisition_demo.yml',
'test/cancel_purchase_requisition.yml',
'test/purchase_requisition.yml',
],
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -9,032,068,168,876,607,000 | 41.526316 | 81 | 0.608498 | false |
chenc10/Spark-PAF | dist/ec2/lib/boto-2.34.0/boto/vpc/networkacl.py | 151 | 4976 | # Copyright (c) 2009-2010 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Represents a Network ACL
"""
from boto.ec2.ec2object import TaggedEC2Object
from boto.resultset import ResultSet
class Icmp(object):
"""
Defines the ICMP code and type.
"""
def __init__(self, connection=None):
self.code = None
self.type = None
def __repr__(self):
return 'Icmp::code:%s, type:%s)' % ( self.code, self.type)
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'code':
self.code = value
elif name == 'type':
self.type = value
class NetworkAcl(TaggedEC2Object):
def __init__(self, connection=None):
super(NetworkAcl, self).__init__(connection)
self.id = None
self.vpc_id = None
self.network_acl_entries = []
self.associations = []
def __repr__(self):
return 'NetworkAcl:%s' % self.id
def startElement(self, name, attrs, connection):
result = super(NetworkAcl, self).startElement(name, attrs, connection)
if result is not None:
# Parent found an interested element, just return it
return result
if name == 'entrySet':
self.network_acl_entries = ResultSet([('item', NetworkAclEntry)])
return self.network_acl_entries
elif name == 'associationSet':
self.associations = ResultSet([('item', NetworkAclAssociation)])
return self.associations
else:
return None
def endElement(self, name, value, connection):
if name == 'networkAclId':
self.id = value
elif name == 'vpcId':
self.vpc_id = value
else:
setattr(self, name, value)
class NetworkAclEntry(object):
def __init__(self, connection=None):
self.rule_number = None
self.protocol = None
self.rule_action = None
self.egress = None
self.cidr_block = None
self.port_range = PortRange()
self.icmp = Icmp()
def __repr__(self):
return 'Acl:%s' % self.rule_number
def startElement(self, name, attrs, connection):
if name == 'portRange':
return self.port_range
elif name == 'icmpTypeCode':
return self.icmp
else:
return None
def endElement(self, name, value, connection):
if name == 'cidrBlock':
self.cidr_block = value
elif name == 'egress':
self.egress = value
elif name == 'protocol':
self.protocol = value
elif name == 'ruleAction':
self.rule_action = value
elif name == 'ruleNumber':
self.rule_number = value
class NetworkAclAssociation(object):
def __init__(self, connection=None):
self.id = None
self.subnet_id = None
self.network_acl_id = None
def __repr__(self):
return 'NetworkAclAssociation:%s' % self.id
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'networkAclAssociationId':
self.id = value
elif name == 'networkAclId':
self.network_acl_id = value
elif name == 'subnetId':
self.subnet_id = value
class PortRange(object):
"""
Define the port range for the ACL entry if it is tcp / udp
"""
def __init__(self, connection=None):
self.from_port = None
self.to_port = None
def __repr__(self):
return 'PortRange:(%s-%s)' % ( self.from_port, self.to_port)
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'from':
self.from_port = value
elif name == 'to':
self.to_port = value
| apache-2.0 | -1,707,816,727,419,553,300 | 29.341463 | 78 | 0.611133 | false |
uselessfire/altaire | libs/xmpp/commands.py | 2 | 14199 | ## $Id: commands.py,v 1.17 2007/08/28 09:54:15 normanr Exp $
## Ad-Hoc Command manager
## Mike Albon (c) 5th January 2005
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2, or (at your option)
## any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
"""This module is a ad-hoc command processor for xmpppy. It uses the plug-in mechanism like most of the core library. It depends on a DISCO browser manager.
There are 3 classes here, a command processor Commands like the Browser, and a command template plugin Command, and an example command.
To use this module:
Instansiate the module with the parent transport and disco browser manager as parameters.
'Plug in' commands using the command template.
The command feature must be added to existing disco replies where neccessary.
What it supplies:
Automatic command registration with the disco browser manager.
Automatic listing of commands in the public command list.
A means of handling requests, by redirection though the command manager.
"""
from protocol import *
from client import PlugIn
class Commands(PlugIn):
"""Commands is an ancestor of PlugIn and can be attached to any session.
The commands class provides a lookup and browse mechnism. It follows the same priciple of the Browser class, for Service Discovery to provide the list of commands, it adds the 'list' disco type to your existing disco handler function.
How it works:
The commands are added into the existing Browser on the correct nodes. When the command list is built the supplied discovery handler function needs to have a 'list' option in type. This then gets enumerated, all results returned as None are ignored.
The command executed is then called using it's Execute method. All session management is handled by the command itself.
"""
def __init__(self, browser):
"""Initialises class and sets up local variables"""
PlugIn.__init__(self)
DBG_LINE='commands'
self._exported_methods=[]
self._handlers={'':{}}
self._browser = browser
def plugin(self, owner):
"""Makes handlers within the session"""
# Plug into the session and the disco manager
# We only need get and set, results are not needed by a service provider, only a service user.
owner.RegisterHandler('iq',self._CommandHandler,typ='set',ns=NS_COMMANDS)
owner.RegisterHandler('iq',self._CommandHandler,typ='get',ns=NS_COMMANDS)
self._browser.setDiscoHandler(self._DiscoHandler,node=NS_COMMANDS,jid='')
def plugout(self):
"""Removes handlers from the session"""
# unPlug from the session and the disco manager
self._owner.UnregisterHandler('iq',self._CommandHandler,ns=NS_COMMANDS)
for jid in self._handlers:
self._browser.delDiscoHandler(self._DiscoHandler,node=NS_COMMANDS)
def _CommandHandler(self,conn,request):
"""The internal method to process the routing of command execution requests"""
# This is the command handler itself.
# We must:
# Pass on command execution to command handler
# (Do we need to keep session details here, or can that be done in the command?)
jid = str(request.getTo())
try:
node = request.getTagAttr('command','node')
except:
conn.send(Error(request,ERR_BAD_REQUEST))
raise NodeProcessed
if self._handlers.has_key(jid):
if self._handlers[jid].has_key(node):
self._handlers[jid][node]['execute'](conn,request)
else:
conn.send(Error(request,ERR_ITEM_NOT_FOUND))
raise NodeProcessed
elif self._handlers[''].has_key(node):
self._handlers[''][node]['execute'](conn,request)
else:
conn.send(Error(request,ERR_ITEM_NOT_FOUND))
raise NodeProcessed
def _DiscoHandler(self,conn,request,typ):
"""The internal method to process service discovery requests"""
# This is the disco manager handler.
if typ == 'items':
# We must:
# Generate a list of commands and return the list
# * This handler does not handle individual commands disco requests.
# Pseudo:
# Enumerate the 'item' disco of each command for the specified jid
# Build responce and send
# To make this code easy to write we add an 'list' disco type, it returns a tuple or 'none' if not advertised
list = []
items = []
jid = str(request.getTo())
# Get specific jid based results
if self._handlers.has_key(jid):
for each in self._handlers[jid].keys():
items.append((jid,each))
else:
# Get generic results
for each in self._handlers[''].keys():
items.append(('',each))
if items != []:
for each in items:
i = self._handlers[each[0]][each[1]]['disco'](conn,request,'list')
if i != None:
list.append(Node(tag='item',attrs={'jid':i[0],'node':i[1],'name':i[2]}))
iq = request.buildReply('result')
if request.getQuerynode(): iq.setQuerynode(request.getQuerynode())
iq.setQueryPayload(list)
conn.send(iq)
else:
conn.send(Error(request,ERR_ITEM_NOT_FOUND))
raise NodeProcessed
elif typ == 'info':
return {'ids':[{'category':'automation','type':'command-list'}],'features':[]}
def addCommand(self,name,cmddisco,cmdexecute,jid=''):
"""The method to call if adding a new command to the session, the requred parameters of cmddisco and cmdexecute are the methods to enable that command to be executed"""
# This command takes a command object and the name of the command for registration
# We must:
# Add item into disco
# Add item into command list
if not self._handlers.has_key(jid):
self._handlers[jid]={}
self._browser.setDiscoHandler(self._DiscoHandler,node=NS_COMMANDS,jid=jid)
if self._handlers[jid].has_key(name):
raise NameError,'Command Exists'
else:
self._handlers[jid][name]={'disco':cmddisco,'execute':cmdexecute}
# Need to add disco stuff here
self._browser.setDiscoHandler(cmddisco,node=name,jid=jid)
def delCommand(self,name,jid=''):
"""Removed command from the session"""
# This command takes a command object and the name used for registration
# We must:
# Remove item from disco
# Remove item from command list
if not self._handlers.has_key(jid):
raise NameError,'Jid not found'
if not self._handlers[jid].has_key(name):
raise NameError, 'Command not found'
else:
#Do disco removal here
command = self.getCommand(name,jid)['disco']
del self._handlers[jid][name]
self._browser.delDiscoHandler(command,node=name,jid=jid)
def getCommand(self,name,jid=''):
"""Returns the command tuple"""
# This gets the command object with name
# We must:
# Return item that matches this name
if not self._handlers.has_key(jid):
raise NameError,'Jid not found'
elif not self._handlers[jid].has_key(name):
raise NameError,'Command not found'
else:
return self._handlers[jid][name]
class Command_Handler_Prototype(PlugIn):
"""This is a prototype command handler, as each command uses a disco method
and execute method you can implement it any way you like, however this is
my first attempt at making a generic handler that you can hang process
stages on too. There is an example command below.
The parameters are as follows:
name : the name of the command within the jabber environment
description : the natural language description
discofeatures : the features supported by the command
initial : the initial command in the from of {'execute':commandname}
All stages set the 'actions' dictionary for each session to represent the possible options available.
"""
name = 'examplecommand'
count = 0
description = 'an example command'
discofeatures = [NS_COMMANDS,NS_DATA]
# This is the command template
def __init__(self,jid=''):
"""Set up the class"""
PlugIn.__init__(self)
DBG_LINE='command'
self.sessioncount = 0
self.sessions = {}
# Disco information for command list pre-formatted as a tuple
self.discoinfo = {'ids':[{'category':'automation','type':'command-node','name':self.description}],'features': self.discofeatures}
self._jid = jid
def plugin(self,owner):
"""Plug command into the commands class"""
# The owner in this instance is the Command Processor
self._commands = owner
self._owner = owner._owner
self._commands.addCommand(self.name,self._DiscoHandler,self.Execute,jid=self._jid)
def plugout(self):
"""Remove command from the commands class"""
self._commands.delCommand(self.name,self._jid)
def getSessionID(self):
"""Returns an id for the command session"""
self.count = self.count+1
return 'cmd-%s-%d'%(self.name,self.count)
def Execute(self,conn,request):
"""The method that handles all the commands, and routes them to the correct method for that stage."""
# New request or old?
try:
session = request.getTagAttr('command','sessionid')
except:
session = None
try:
action = request.getTagAttr('command','action')
except:
action = None
if action == None: action = 'execute'
# Check session is in session list
if self.sessions.has_key(session):
if self.sessions[session]['jid']==request.getFrom():
# Check action is vaild
if self.sessions[session]['actions'].has_key(action):
# Execute next action
self.sessions[session]['actions'][action](conn,request)
else:
# Stage not presented as an option
self._owner.send(Error(request,ERR_BAD_REQUEST))
raise NodeProcessed
else:
# Jid and session don't match. Go away imposter
self._owner.send(Error(request,ERR_BAD_REQUEST))
raise NodeProcessed
elif session != None:
# Not on this sessionid you won't.
self._owner.send(Error(request,ERR_BAD_REQUEST))
raise NodeProcessed
else:
# New session
self.initial[action](conn,request)
def _DiscoHandler(self,conn,request,type):
"""The handler for discovery events"""
if type == 'list':
return (request.getTo(),self.name,self.description)
elif type == 'items':
return []
elif type == 'info':
return self.discoinfo
class TestCommand(Command_Handler_Prototype):
""" Example class. You should read source if you wish to understate how it works.
Generally, it presents a "master" that giudes user through to calculate something.
"""
name = 'testcommand'
description = 'a noddy example command'
def __init__(self,jid=''):
""" Init internal constants. """
Command_Handler_Prototype.__init__(self,jid)
self.initial = {'execute':self.cmdFirstStage}
def cmdFirstStage(self,conn,request):
""" Determine """
# This is the only place this should be repeated as all other stages should have SessionIDs
try:
session = request.getTagAttr('command','sessionid')
except:
session = None
if session == None:
session = self.getSessionID()
self.sessions[session]={'jid':request.getFrom(),'actions':{'cancel':self.cmdCancel,'next':self.cmdSecondStage,'execute':self.cmdSecondStage},'data':{'type':None}}
# As this is the first stage we only send a form
reply = request.buildReply('result')
form = DataForm(title='Select type of operation',data=['Use the combobox to select the type of calculation you would like to do, then click Next',DataField(name='calctype',desc='Calculation Type',value=self.sessions[session]['data']['type'],options=[['circlediameter','Calculate the Diameter of a circle'],['circlearea','Calculate the area of a circle']],typ='list-single',required=1)])
replypayload = [Node('actions',attrs={'execute':'next'},payload=[Node('next')]),form]
reply.addChild(name='command',namespace=NS_COMMANDS,attrs={'node':request.getTagAttr('command','node'),'sessionid':session,'status':'executing'},payload=replypayload)
self._owner.send(reply)
raise NodeProcessed
def cmdSecondStage(self,conn,request):
form = DataForm(node = request.getTag(name='command').getTag(name='x',namespace=NS_DATA))
self.sessions[request.getTagAttr('command','sessionid')]['data']['type']=form.getField('calctype').getValue()
self.sessions[request.getTagAttr('command','sessionid')]['actions']={'cancel':self.cmdCancel,None:self.cmdThirdStage,'previous':self.cmdFirstStage,'execute':self.cmdThirdStage,'next':self.cmdThirdStage}
# The form generation is split out to another method as it may be called by cmdThirdStage
self.cmdSecondStageReply(conn,request)
def cmdSecondStageReply(self,conn,request):
reply = request.buildReply('result')
form = DataForm(title = 'Enter the radius', data=['Enter the radius of the circle (numbers only)',DataField(desc='Radius',name='radius',typ='text-single')])
replypayload = [Node('actions',attrs={'execute':'complete'},payload=[Node('complete'),Node('prev')]),form]
reply.addChild(name='command',namespace=NS_COMMANDS,attrs={'node':request.getTagAttr('command','node'),'sessionid':request.getTagAttr('command','sessionid'),'status':'executing'},payload=replypayload)
self._owner.send(reply)
raise NodeProcessed
def cmdThirdStage(self,conn,request):
form = DataForm(node = request.getTag(name='command').getTag(name='x',namespace=NS_DATA))
try:
num = float(form.getField('radius').getValue())
except:
self.cmdSecondStageReply(conn,request)
from math import pi
if self.sessions[request.getTagAttr('command','sessionid')]['data']['type'] == 'circlearea':
result = (num**2)*pi
else:
result = num*2*pi
reply = request.buildReply('result')
form = DataForm(typ='result',data=[DataField(desc='result',name='result',value=result)])
reply.addChild(name='command',namespace=NS_COMMANDS,attrs={'node':request.getTagAttr('command','node'),'sessionid':request.getTagAttr('command','sessionid'),'status':'completed'},payload=[form])
self._owner.send(reply)
raise NodeProcessed
def cmdCancel(self,conn,request):
reply = request.buildReply('result')
reply.addChild(name='command',namespace=NS_COMMANDS,attrs={'node':request.getTagAttr('command','node'),'sessionid':request.getTagAttr('command','sessionid'),'status':'cancelled'})
self._owner.send(reply)
del self.sessions[request.getTagAttr('command','sessionid')]
| apache-2.0 | 6,745,296,311,923,583,000 | 42.422018 | 388 | 0.719699 | false |
egbertbouman/tribler-g | Tribler/Main/Build/Win32/setuptribler.py | 1 | 1721 | # Written by ABC authors and Arno Bakker
# see LICENSE.txt for license information
import sys
import os
try:
import py2exe.mf as modulefinder
import win32com
for p in win32com.__path__[1:]:
modulefinder.AddPackagePath("win32com", p)
for extra in ["win32com.shell"]:
__import__(extra)
m = sys.modules[extra]
for p in m.__path__[1:]:
modulefinder.AddPackagePath(extra, p)
except ImportError:
pass
from distutils.core import setup
import py2exe
################################################################
#
# Setup script used for py2exe
#
# *** Important note: ***
# Setting Python's optimize flag when building disables
# "assert" statements, which are used throughout the
# BitTornado core for error-handling.
#
################################################################
mainfile = os.path.join('Tribler','Main','tribler.py')
progicofile = os.path.join('Tribler','Images','tribler.ico')
target = {
"script": mainfile,
"icon_resources": [(1, progicofile)],
}
# gui panels to include (=those not found by py2exe from imports)
includePanels=[
"TopSearchPanel", "home", "games", "list", "settingsDialog"
]
#packages = ["Tribler.Core","encodings"] + ["Tribler.Main.vwxGUI.%s" % x for x in includePanels]
packages = ["encodings"] + ["Tribler.Main.vwxGUI.%s" % x for x in includePanels]
setup(
# (Disabling bundle_files for now -- apparently causes some issues with Win98)
# options = {"py2exe": {"bundle_files": 1}},
# zipfile = None,
options = {"py2exe": {"packages": packages,"optimize": 2}},
data_files = [("installdir",[])],
windows = [target],
)
| lgpl-2.1 | -7,421,252,580,503,476,000 | 28.192982 | 96 | 0.589773 | false |
BenMotz/cubetoolkit | toolkit/diary/templatetags/noprefix_url.py | 1 | 1089 | import logging
import urllib
from django import template
from django.template.defaulttags import url, URLNode
from django.urls import get_script_prefix
register = template.Library()
logger = logging.getLogger(__name__)
class NoPrefixURLNode(URLNode):
def __init__(self, url_node):
super(NoPrefixURLNode, self).__init__(
url_node.view_name, url_node.args, url_node.kwargs, url_node.asvar)
def render(self, context):
text = super(NoPrefixURLNode, self).render(context)
prefix = get_script_prefix()
parts = urllib.parse.urlsplit(text)
if not parts.path.startswith(prefix):
logger.error("Path %s doesn't start with prefix %s", text, prefix)
new_parts = list(parts)
new_parts[2] = parts.path[len(prefix) - 1:]
return urllib.parse.urlunsplit(new_parts)
@register.tag
def noprefix_url(parser, token):
"""
Returns an absolute URL matching given view with its parameters, with any
path prefix from the WSGI request stripped.
"""
return NoPrefixURLNode(url(parser, token))
| agpl-3.0 | -3,562,881,094,150,982,000 | 28.432432 | 79 | 0.677686 | false |
mozilla/captain | vendor/lib/python/django/core/servers/fastcgi.py | 241 | 6638 | """
FastCGI (or SCGI, or AJP1.3 ...) server that implements the WSGI protocol.
Uses the flup python package: http://www.saddi.com/software/flup/
This is a adaptation of the flup package to add FastCGI server support
to run Django apps from Web servers that support the FastCGI protocol.
This module can be run standalone or from the django-admin / manage.py
scripts using the "runfcgi" directive.
Run with the extra option "help" for a list of additional options you can
pass to this server.
"""
import os
import sys
from django.utils import importlib
__version__ = "0.1"
__all__ = ["runfastcgi"]
FASTCGI_OPTIONS = {
'protocol': 'fcgi',
'host': None,
'port': None,
'socket': None,
'method': 'fork',
'daemonize': None,
'workdir': '/',
'pidfile': None,
'maxspare': 5,
'minspare': 2,
'maxchildren': 50,
'maxrequests': 0,
'debug': None,
'outlog': None,
'errlog': None,
'umask': None,
}
FASTCGI_HELP = r"""
Run this project as a fastcgi (or some other protocol supported
by flup) application. To do this, the flup package from
http://www.saddi.com/software/flup/ is required.
runfcgi [options] [fcgi settings]
Optional Fcgi settings: (setting=value)
protocol=PROTOCOL fcgi, scgi, ajp, ... (default %(protocol)s)
host=HOSTNAME hostname to listen on.
port=PORTNUM port to listen on.
socket=FILE UNIX socket to listen on.
method=IMPL prefork or threaded (default %(method)s).
maxrequests=NUMBER number of requests a child handles before it is
killed and a new child is forked (0 = no limit).
maxspare=NUMBER max number of spare processes / threads (default %(maxspare)s).
minspare=NUMBER min number of spare processes / threads (default %(minspare)s).
maxchildren=NUMBER hard limit number of processes / threads (default %(maxchildren)s).
daemonize=BOOL whether to detach from terminal.
pidfile=FILE write the spawned process-id to this file.
workdir=DIRECTORY change to this directory when daemonizing (default %(workdir)s).
debug=BOOL set to true to enable flup tracebacks.
outlog=FILE write stdout to this file.
errlog=FILE write stderr to this file.
umask=UMASK umask to use when daemonizing, in octal notation (default 022).
Examples:
Run a "standard" fastcgi process on a file-descriptor
(for Web servers which spawn your processes for you)
$ manage.py runfcgi method=threaded
Run a scgi server on a TCP host/port
$ manage.py runfcgi protocol=scgi method=prefork host=127.0.0.1 port=8025
Run a fastcgi server on a UNIX domain socket (posix platforms only)
$ manage.py runfcgi method=prefork socket=/tmp/fcgi.sock
Run a fastCGI as a daemon and write the spawned PID in a file
$ manage.py runfcgi socket=/tmp/fcgi.sock method=prefork \
daemonize=true pidfile=/var/run/django-fcgi.pid
""" % FASTCGI_OPTIONS
def fastcgi_help(message=None):
print(FASTCGI_HELP)
if message:
print(message)
return False
def runfastcgi(argset=[], **kwargs):
options = FASTCGI_OPTIONS.copy()
options.update(kwargs)
for x in argset:
if "=" in x:
k, v = x.split('=', 1)
else:
k, v = x, True
options[k.lower()] = v
if "help" in options:
return fastcgi_help()
try:
import flup
except ImportError as e:
sys.stderr.write("ERROR: %s\n" % e)
sys.stderr.write(" Unable to load the flup package. In order to run django\n")
sys.stderr.write(" as a FastCGI application, you will need to get flup from\n")
sys.stderr.write(" http://www.saddi.com/software/flup/ If you've already\n")
sys.stderr.write(" installed flup, then make sure you have it in your PYTHONPATH.\n")
return False
flup_module = 'server.' + options['protocol']
if options['method'] in ('prefork', 'fork'):
wsgi_opts = {
'maxSpare': int(options["maxspare"]),
'minSpare': int(options["minspare"]),
'maxChildren': int(options["maxchildren"]),
'maxRequests': int(options["maxrequests"]),
}
flup_module += '_fork'
elif options['method'] in ('thread', 'threaded'):
wsgi_opts = {
'maxSpare': int(options["maxspare"]),
'minSpare': int(options["minspare"]),
'maxThreads': int(options["maxchildren"]),
}
else:
return fastcgi_help("ERROR: Implementation must be one of prefork or "
"thread.")
wsgi_opts['debug'] = options['debug'] is not None
try:
module = importlib.import_module('.%s' % flup_module, 'flup')
WSGIServer = module.WSGIServer
except Exception:
print("Can't import flup." + flup_module)
return False
# Prep up and go
from django.core.servers.basehttp import get_internal_wsgi_application
if options["host"] and options["port"] and not options["socket"]:
wsgi_opts['bindAddress'] = (options["host"], int(options["port"]))
elif options["socket"] and not options["host"] and not options["port"]:
wsgi_opts['bindAddress'] = options["socket"]
elif not options["socket"] and not options["host"] and not options["port"]:
wsgi_opts['bindAddress'] = None
else:
return fastcgi_help("Invalid combination of host, port, socket.")
if options["daemonize"] is None:
# Default to daemonizing if we're running on a socket/named pipe.
daemonize = (wsgi_opts['bindAddress'] is not None)
else:
if options["daemonize"].lower() in ('true', 'yes', 't'):
daemonize = True
elif options["daemonize"].lower() in ('false', 'no', 'f'):
daemonize = False
else:
return fastcgi_help("ERROR: Invalid option for daemonize "
"parameter.")
daemon_kwargs = {}
if options['outlog']:
daemon_kwargs['out_log'] = options['outlog']
if options['errlog']:
daemon_kwargs['err_log'] = options['errlog']
if options['umask']:
daemon_kwargs['umask'] = int(options['umask'], 8)
if daemonize:
from django.utils.daemonize import become_daemon
become_daemon(our_home_dir=options["workdir"], **daemon_kwargs)
if options["pidfile"]:
with open(options["pidfile"], "w") as fp:
fp.write("%d\n" % os.getpid())
WSGIServer(get_internal_wsgi_application(), **wsgi_opts).run()
if __name__ == '__main__':
runfastcgi(sys.argv[1:])
| mpl-2.0 | -5,673,734,926,278,020,000 | 34.881081 | 94 | 0.626695 | false |
rileymcdowell/genomic-neuralnet | genomic_neuralnet/methods/generic_keras_net.py | 1 | 6998 | from __future__ import print_function
import os
import time
import json
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Lambda
from keras.optimizers import Nadam as Trainer
#from keras.optimizers import Adam as Trainer
from keras.regularizers import WeightRegularizer
from keras.callbacks import EarlyStopping, Callback, LearningRateScheduler
from sklearn.preprocessing import MinMaxScaler
from genomic_neuralnet.util import get_is_time_stats, get_should_plot
TIMING_EPOCHS = 12000
class LossHistory(Callback):
def on_train_begin(self, logs={}):
self.losses = []
def on_epoch_end(self, epoch, logs={}):
self.losses.append(logs.get('loss'))
class NeuralNetContainer(object):
def __init__(self):
self.model = None
self.learning_rate = None
self.weight_decay = 0.0
self.dropout_prob = 0.0
self.epochs = 25
self.hidden_layers = (10,)
self.verbose = False
self.plot = False
def clone(self):
if not self.model is None:
raise NotImplemented('Cannot clone container after building model')
clone = NeuralNetContainer()
clone.learning_rate = self.learning_rate
clone.weight_decay = self.weight_decay
clone.dropout_prob = self.dropout_prob
clone.epochs = self.epochs
clone.hidden_layers = self.hidden_layers
clone.verbose = self.verbose
clone.plot = self.plot
return clone
def _build_nn(net_container, n_features):
model = Sequential()
# Change scale from (-1, 1) to (0, 1)
model.add(Lambda(lambda x: (x + 1) / 2, input_shape=(n_features,), output_shape=(n_features,)))
if net_container.weight_decay > 0.0:
weight_regularizer = WeightRegularizer(net_container.weight_decay)
else:
weight_regularizer = None
last_dim = n_features
for lidx, n_nodes in enumerate(net_container.hidden_layers):
# Layer, activation, and dropout, in that order.
model.add(Dense(output_dim=n_nodes, input_dim=last_dim, W_regularizer=weight_regularizer))
model.add(Activation('sigmoid'))
if net_container.dropout_prob > 0.0:
model.add(Dropout(net_container.dropout_prob))
last_dim = n_nodes
model.add(Dense(output_dim=1, input_dim=last_dim, bias=False))
model.add(Activation('linear'))
if not net_container.learning_rate is None:
optimizer = Trainer(lr=net_container.learning_rate)
else:
#optimizer = Trainer(lr=0.0001)
optimizer = Trainer()
model.compile( optimizer=optimizer
, loss='mean_squared_error'
)
net_container.model = model
def _train_net(container, X, y, override_epochs=None, is_check_train=False):
"""
Given a container, X (inputs), and y (outputs) train the network in the container.
* If override_epochs is an integer, just run that many epochs.
* The is_check_train parameter signifies that this training is a quick check to make
sure that the network is properly initialized and that the output error
is decreasing. The best "check trained" network will be passed in again
for an additional full set of training epochs.
"""
model = container.model
epochs = override_epochs if (not override_epochs is None) else container.epochs
verbose = int(container.verbose)
def rate_func(epoch):
if epochs - epoch == 2000:
# Settle down during last 2000 epochs.
model.optimizer.lr.set_value(model.optimizer.lr.get_value()/4.0)
if epochs - epoch == 500:
# Go a bit further in last 500 epochs.
model.optimizer.lr.set_value(model.optimizer.lr.get_value()/4.0)
return float(model.optimizer.lr.get_value())
lr_scheduler = LearningRateScheduler(rate_func)
loss_history = LossHistory()
callbacks = [loss_history, lr_scheduler]
model.fit( X,
y,
nb_epoch=epochs,
batch_size=X.shape[0] / 4,
verbose=verbose,
callbacks=callbacks
)
if (isinstance(override_epochs, int)) and (not is_check_train) and container.plot:
# Plot, but only if this is not overriden epochs.
import matplotlib.pyplot as plt
plt.plot(range(len(loss_history.losses)), loss_history.losses)
plt.show()
return loss_history.losses[-1]
def _predict(container, X):
model = container.model
return model.predict(X)
_NET_TRIES = 2
def _get_initial_net(container, n_features, X, y):
"""
Create a few networks. Start the training process for a few epochs, then take
the best one to continue training. This eliminates networks that are poorly
initialized and will not converge.
"""
candidates = []
for _ in range(_NET_TRIES):
cont = container.clone()
_build_nn(cont, n_features)
candidates.append(cont)
losses = []
for candidate in candidates:
# Train each candidate for 100 epochs.
loss = _train_net(candidate, X, y, override_epochs=100, is_check_train=True)
losses.append(loss)
best_idx = np.argmin(losses)
return candidates[best_idx]
def get_net_prediction( train_data, train_truth, test_data, test_truth
, hidden=(5,), weight_decay=0.0, dropout_prob=0.0
, learning_rate=None, epochs=25, verbose=False
, iter_id=None
):
container = NeuralNetContainer()
container.learning_rate = learning_rate
container.dropout_prob = dropout_prob
container.weight_decay = weight_decay
container.epochs = epochs
container.hidden_layers = hidden
container.verbose = verbose
container.plot = get_should_plot()
mms = MinMaxScaler(feature_range= (-1, 1)) # Scale output from -1 to 1.
train_y = mms.fit_transform(train_truth[:,np.newaxis])
n_features = train_data.shape[1]
collect_time_stats = get_is_time_stats()
if collect_time_stats:
start = time.time()
# Find and return an effectively initialized network to start.
container = _get_initial_net(container, n_features, train_data, train_y)
# Train the network.
if collect_time_stats:
# Train a specific time, never terminating early.
_train_net(container, train_data, train_y, override_epochs=TIMING_EPOCHS, is_check_train=False)
else:
# Normal training, enable all heuristics.
_train_net(container, train_data, train_y)
if collect_time_stats:
end = time.time()
print('Fitting took {} seconds'.format(end - start))
print(json.dumps({'seconds': end - start, 'hidden': container.hidden_layers}))
# Unsupervised (test) dataset.
predicted = _predict(container, test_data)
predicted = mms.inverse_transform(predicted)
return predicted.ravel()
| mit | 6,255,983,787,568,090,000 | 33.472906 | 103 | 0.648042 | false |
eliksir/mailmojo-python-sdk | mailmojo_sdk/models/category.py | 1 | 2937 | # coding: utf-8
"""
MailMojo API
v1 of the MailMojo API # noqa: E501
OpenAPI spec version: 1.1.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class Category(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str'
}
attribute_map = {
'name': 'name'
}
def __init__(self, name=None): # noqa: E501
"""Category - a model defined in Swagger""" # noqa: E501
self._name = None
self.discriminator = None
if name is not None:
self.name = name
@property
def name(self):
"""Gets the name of this Category. # noqa: E501
:return: The name of this Category. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this Category.
:param name: The name of this Category. # noqa: E501
:type: str
"""
self._name = name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Category, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Category):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| apache-2.0 | -7,411,837,408,233,760,000 | 24.53913 | 80 | 0.520259 | false |
Mozhuowen/brython | www/src/Lib/test/test_quopri.py | 171 | 7715 | from test import support
import unittest
import sys, os, io, subprocess
import quopri
ENCSAMPLE = b"""\
Here's a bunch of special=20
=A1=A2=A3=A4=A5=A6=A7=A8=A9
=AA=AB=AC=AD=AE=AF=B0=B1=B2=B3
=B4=B5=B6=B7=B8=B9=BA=BB=BC=BD=BE
=BF=C0=C1=C2=C3=C4=C5=C6
=C7=C8=C9=CA=CB=CC=CD=CE=CF
=D0=D1=D2=D3=D4=D5=D6=D7
=D8=D9=DA=DB=DC=DD=DE=DF
=E0=E1=E2=E3=E4=E5=E6=E7
=E8=E9=EA=EB=EC=ED=EE=EF
=F0=F1=F2=F3=F4=F5=F6=F7
=F8=F9=FA=FB=FC=FD=FE=FF
characters... have fun!
"""
# First line ends with a space
DECSAMPLE = b"Here's a bunch of special \n" + \
b"""\
\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9
\xaa\xab\xac\xad\xae\xaf\xb0\xb1\xb2\xb3
\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe
\xbf\xc0\xc1\xc2\xc3\xc4\xc5\xc6
\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf
\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7
\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf
\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7
\xe8\xe9\xea\xeb\xec\xed\xee\xef
\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7
\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff
characters... have fun!
"""
def withpythonimplementation(testfunc):
def newtest(self):
# Test default implementation
testfunc(self)
# Test Python implementation
if quopri.b2a_qp is not None or quopri.a2b_qp is not None:
oldencode = quopri.b2a_qp
olddecode = quopri.a2b_qp
try:
quopri.b2a_qp = None
quopri.a2b_qp = None
testfunc(self)
finally:
quopri.b2a_qp = oldencode
quopri.a2b_qp = olddecode
newtest.__name__ = testfunc.__name__
return newtest
class QuopriTestCase(unittest.TestCase):
# Each entry is a tuple of (plaintext, encoded string). These strings are
# used in the "quotetabs=0" tests.
STRINGS = (
# Some normal strings
(b'hello', b'hello'),
(b'''hello
there
world''', b'''hello
there
world'''),
(b'''hello
there
world
''', b'''hello
there
world
'''),
(b'\201\202\203', b'=81=82=83'),
# Add some trailing MUST QUOTE strings
(b'hello ', b'hello=20'),
(b'hello\t', b'hello=09'),
# Some long lines. First, a single line of 108 characters
(b'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\xd8\xd9\xda\xdb\xdc\xdd\xde\xdfxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx',
b'''xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx=D8=D9=DA=DB=DC=DD=DE=DFx=
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'''),
# A line of exactly 76 characters, no soft line break should be needed
(b'yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy',
b'yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy'),
# A line of 77 characters, forcing a soft line break at position 75,
# and a second line of exactly 2 characters (because the soft line
# break `=' sign counts against the line length limit).
(b'zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz',
b'''zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz=
zz'''),
# A line of 151 characters, forcing a soft line break at position 75,
# with a second line of exactly 76 characters and no trailing =
(b'zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz',
b'''zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz=
zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz'''),
# A string containing a hard line break, but which the first line is
# 151 characters and the second line is exactly 76 characters. This
# should leave us with three lines, the first which has a soft line
# break, and which the second and third do not.
(b'''yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy
zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz''',
b'''yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy=
yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy
zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz'''),
# Now some really complex stuff ;)
(DECSAMPLE, ENCSAMPLE),
)
# These are used in the "quotetabs=1" tests.
ESTRINGS = (
(b'hello world', b'hello=20world'),
(b'hello\tworld', b'hello=09world'),
)
# These are used in the "header=1" tests.
HSTRINGS = (
(b'hello world', b'hello_world'),
(b'hello_world', b'hello=5Fworld'),
)
@withpythonimplementation
def test_encodestring(self):
for p, e in self.STRINGS:
self.assertEqual(quopri.encodestring(p), e)
@withpythonimplementation
def test_decodestring(self):
for p, e in self.STRINGS:
self.assertEqual(quopri.decodestring(e), p)
@withpythonimplementation
def test_idempotent_string(self):
for p, e in self.STRINGS:
self.assertEqual(quopri.decodestring(quopri.encodestring(e)), e)
@withpythonimplementation
def test_encode(self):
for p, e in self.STRINGS:
infp = io.BytesIO(p)
outfp = io.BytesIO()
quopri.encode(infp, outfp, quotetabs=False)
self.assertEqual(outfp.getvalue(), e)
@withpythonimplementation
def test_decode(self):
for p, e in self.STRINGS:
infp = io.BytesIO(e)
outfp = io.BytesIO()
quopri.decode(infp, outfp)
self.assertEqual(outfp.getvalue(), p)
@withpythonimplementation
def test_embedded_ws(self):
for p, e in self.ESTRINGS:
self.assertEqual(quopri.encodestring(p, quotetabs=True), e)
self.assertEqual(quopri.decodestring(e), p)
@withpythonimplementation
def test_encode_header(self):
for p, e in self.HSTRINGS:
self.assertEqual(quopri.encodestring(p, header=True), e)
@withpythonimplementation
def test_decode_header(self):
for p, e in self.HSTRINGS:
self.assertEqual(quopri.decodestring(e, header=True), p)
def test_scriptencode(self):
(p, e) = self.STRINGS[-1]
process = subprocess.Popen([sys.executable, "-mquopri"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
self.addCleanup(process.stdout.close)
cout, cerr = process.communicate(p)
# On Windows, Python will output the result to stdout using
# CRLF, as the mode of stdout is text mode. To compare this
# with the expected result, we need to do a line-by-line comparison.
cout = cout.decode('latin-1').splitlines()
e = e.decode('latin-1').splitlines()
assert len(cout)==len(e)
for i in range(len(cout)):
self.assertEqual(cout[i], e[i])
self.assertEqual(cout, e)
def test_scriptdecode(self):
(p, e) = self.STRINGS[-1]
process = subprocess.Popen([sys.executable, "-mquopri", "-d"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
self.addCleanup(process.stdout.close)
cout, cerr = process.communicate(e)
cout = cout.decode('latin-1')
p = p.decode('latin-1')
self.assertEqual(cout.splitlines(), p.splitlines())
def test_main():
support.run_unittest(QuopriTestCase)
if __name__ == "__main__":
test_main()
| bsd-3-clause | 8,384,330,332,231,103,000 | 36.091346 | 164 | 0.659624 | false |
linuxdeepin/deepin-media-player | src/widget/window.py | 1 | 5101 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2013 Deepin, Inc.
# 2013 Hailong Qiu
#
# Author: Hailong Qiu <[email protected]>
# Maintainer: Hailong Qiu <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from utils import propagate_expose
import math
import cairo
import gtk
class MenuWindow(gtk.Window):
def __init__(self):
gtk.Window.__init__(self, gtk.WINDOW_POPUP)
self.__init_values()
self.__init_settings()
self.__init_events()
def __init_values(self):
self.on_paint_expose_event = None
self.alpha = 1.0
# 阴影.
self.__sahow_check = True
self.__sahow_value = 2
self.__sahow_color = ("#FFFFFF", 0.5)
#
self.__surface = None
#
self.__old_w, self.__old_h = 0, 0
def get_sahow_value(self):
return self.__sahow_value
def __init_settings(self):
self.set_colormap(gtk.gdk.Screen().get_rgba_colormap())
self.set_decorated(False)
self.set_app_paintable(True)
self.set_skip_pager_hint(True)
self.set_skip_taskbar_hint(True)
self.set_position(gtk.WIN_POS_NONE)
self.set_type_hint(gtk.gdk.WINDOW_TYPE_HINT_MENU)
self.set_opacity(self.alpha)
def __init_events(self):
self.add_events(gtk.gdk.ALL_EVENTS_MASK)
self.connect("size-allocate", self.__on_size_allocate)
self.connect("expose-event", self.__expose_event)
self.connect("destroy", lambda w : gtk.main_quit())
def __on_size_allocate(self, widget, alloc):
x, y, w, h = self.allocation
# 防止重复的加载.
if (self.__old_w == w and self.__old_h == h):
return False
self.__surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, w, h)
self.__surface_context = cairo.Context(self.__surface)
self.__compute_shadow(w, h)
#
self.__old_w = w
self.__old_h = h
def __compute_shadow(self, w, h):
#cr = self.__surface_context
x, y = 0, 0
'''
self.on_draw_rectangle(x, y, w, h)
cr.set_source_rgba(*alpha_color_hex_to_cairo((self.__sahow_color)))
cr.fill_preserve()
gaussian_blur(self.__surface, self.__sahow_value)
# 画外边框.
cr.clip()
self.on_draw_rectangle(x, y, w, h)
self.__border_out_color = ("#000000", 1.0)
cr.set_source_rgba(*alpha_color_hex_to_cairo((self.__border_out_color)))
cr.fill_preserve()
# 画内边框.
cr.clip()
self.on_draw_rectangle(x + 0.5, y + 0.5, w, h)
self.__border_out_color = ("#FFFFFF", 0.9)
cr.set_source_rgba(*alpha_color_hex_to_cairo((self.__border_out_color)))
cr.fill_preserve()
'''
def on_draw_rectangle(self, x, y, w, h):
cr = self.__surface_context
radius = 5
x += radius
y += radius
w = w - x * 2
h = h - y * 2
# 集合.
arc_set = [
(x + radius, y + radius, radius, 1 * math.pi, 1.5 * math.pi),
(x + w - radius, y + radius, radius, math.pi * 1.5, math.pi * 2.0),
(x + w - radius, y + h - radius, radius, 0, math.pi * 0.5),
(x + radius, y + h - radius, radius, math.pi * 0.5, math.pi)
]
#
for x, y, r, start, end in arc_set:
cr.arc(x, y, r, start, end)
cr.close_path()
def __expose_event(self, widget, event):
cr = widget.window.cairo_create()
rect = widget.allocation
#
cr.rectangle(*rect)
cr.set_source_rgba(1, 1, 1, 0)
cr.set_operator(cairo.OPERATOR_SOURCE)
cr.paint()
#
cr = widget.window.cairo_create()
#
'''
if self.__sahow_check: # 是否显示阴影.
self.draw_surface_expose_event(cr)
else: # 如果不开启阴影.
cr.set_source_rgba(1, 1, 1, 1.0)
cr.paint()
#
'''
if self.on_paint_expose_event:
self.on_paint_expose_event(widget, event)
#
propagate_expose(widget, event)
return True
def draw_surface_expose_event(self, cr):
if self.__surface:
cr.set_source_surface(self.__surface, 0, 0)
cr.paint()
if __name__ == "__main__":
win = MenuWindow()
win.move(300, 300)
win.set_size_request(300, 300)
win.show_all()
gtk.main()
| gpl-3.0 | -4,402,501,843,107,886,600 | 30.679245 | 84 | 0.55271 | false |
grenade/password-store-fork | contrib/importers/kwallet2pass.py | 37 | 3626 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Juhamatti Niemelä <[email protected]>. All Rights Reserved.
# Copyright (C) 2014 Diggory Hardy <[email protected]>. All Rights Reserved.
# This file is licensed under the GPLv2+. Please see COPYING for more information.
import sys
import re
from subprocess import Popen, PIPE
from xml.etree import ElementTree
HEAD = '/passwords/'
def insert_data(path,text):
""" Insert data into the password store.
(1) removes HEAD from path
(2) ensures text ends with a new line and encodes in UTF-8
(3) inserts
"""
global HEAD
if path.startswith(HEAD):
path = path[len(HEAD):]
if not text.endswith('\n'):
text = text + '\n'
text = text.encode('utf8')
#print "Import: " + path + ": " + text
proc = Popen(['pass', 'insert', '--multiline', '--force', path],
stdin=PIPE, stdout=PIPE)
proc.communicate(text)
proc.wait()
def space_to_camelcase(value):
output = ""
first_word_passed = False
for word in value.split(" "):
if not word:
output += "_"
continue
if first_word_passed:
output += word.capitalize()
else:
output += word.lower()
first_word_passed = True
return output
def cleanTitle(title):
# make the title more command line friendly
title = re.sub("(\\|\||\(|\)|/)", "-", title)
title = re.sub("-$", "", title)
title = re.sub("\@", "At", title)
title = re.sub("'", "", title)
return title
def path_for(element, path=''):
""" Generate path name from elements title and current path """
title = cleanTitle(space_to_camelcase(element.attrib['name']))
return '/'.join([path, title])
def unexpected(element, path):
print "Unexpected element: " + path + '/' + element.tag + "\tAttributes: " + str(element.attrib)
def import_map(element, path):
npath = path_for(element, path)
nEntries = 0
text = 'Map'
for child in element:
if child.tag == 'mapentry':
name = child.attrib['name']
text = text + '\n\n' + name + '\n' + child.text
nEntries += 1
for child2 in child:
unexpected(child, path_for(child, npath))
else:
unexpected(child, npath)
insert_data(npath, text)
print "Map " + npath + " [" + str(nEntries) + " entries]"
def import_password(element, path=''):
""" Import new password entry to password-store using pass insert
command """
npath = path_for(element, path)
text = element.text
if text == None:
print "Password " + npath + ": no text"
text = ""
insert_data(npath, text)
for child in element:
unexpected(child, npath)
def import_folder(element, path=''):
""" Import all entries and folders from given folder """
npath = path_for(element, path)
print "Importing folder " + npath
nPasswords = 0
for child in element:
if child.tag == 'folder':
import_folder(child, npath)
elif child.tag == 'password':
import_password(child, npath)
nPasswords += 1
elif child.tag == 'map':
import_map(child, npath)
else:
unexpected(child, npath)
if nPasswords > 0:
print "[" + str(nPasswords) + " passwords]"
def main(xml_file):
""" Parse XML entries from a KWallet """
element = ElementTree.parse(xml_file).getroot()
assert element.tag == 'wallet'
import_folder(element)
if __name__ == '__main__':
main(sys.argv[1])
| gpl-2.0 | 2,012,227,413,242,666,200 | 28.958678 | 100 | 0.58069 | false |
Pistachitos/Sick-Beard | lib/hachoir_parser/image/gif.py | 90 | 8192 | """
GIF picture parser.
Author: Victor Stinner
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (FieldSet, ParserError,
Enum, UInt8, UInt16,
Bit, Bits, NullBytes,
String, PascalString8, Character,
NullBits, RawBytes)
from lib.hachoir_parser.image.common import PaletteRGB
from lib.hachoir_core.endian import LITTLE_ENDIAN
from lib.hachoir_core.tools import humanDuration
from lib.hachoir_core.text_handler import textHandler, displayHandler, hexadecimal
# Maximum image dimension (in pixel)
MAX_WIDTH = 6000
MAX_HEIGHT = MAX_WIDTH
MAX_FILE_SIZE = 100 * 1024 * 1024
class Image(FieldSet):
def createFields(self):
yield UInt16(self, "left", "Left")
yield UInt16(self, "top", "Top")
yield UInt16(self, "width", "Width")
yield UInt16(self, "height", "Height")
yield Bits(self, "bpp", 3, "Bits / pixel minus one")
yield NullBits(self, "nul", 2)
yield Bit(self, "sorted", "Sorted??")
yield Bit(self, "interlaced", "Interlaced?")
yield Bit(self, "has_local_map", "Use local color map?")
if self["has_local_map"].value:
nb_color = 1 << (1 + self["bpp"].value)
yield PaletteRGB(self, "local_map", nb_color, "Local color map")
yield UInt8(self, "code_size", "LZW Minimum Code Size")
while True:
blen = UInt8(self, "block_len[]", "Block Length")
yield blen
if blen.value != 0:
yield RawBytes(self, "data[]", blen.value, "Image Data")
else:
break
def createDescription(self):
return "Image: %ux%u pixels at (%u,%u)" % (
self["width"].value, self["height"].value,
self["left"].value, self["top"].value)
DISPOSAL_METHOD = {
0: "No disposal specified",
1: "Do not dispose",
2: "Restore to background color",
3: "Restore to previous",
}
NETSCAPE_CODE = {
1: "Loop count",
}
def parseApplicationExtension(parent):
yield PascalString8(parent, "app_name", "Application name")
yield UInt8(parent, "size")
size = parent["size"].value
if parent["app_name"].value == "NETSCAPE2.0" and size == 3:
yield Enum(UInt8(parent, "netscape_code"), NETSCAPE_CODE)
if parent["netscape_code"].value == 1:
yield UInt16(parent, "loop_count")
else:
yield RawBytes(parent, "raw", 2)
else:
yield RawBytes(parent, "raw", size)
yield NullBytes(parent, "terminator", 1, "Terminator (0)")
def parseGraphicControl(parent):
yield UInt8(parent, "size", "Block size (4)")
yield Bit(parent, "has_transp", "Has transparency")
yield Bit(parent, "user_input", "User input")
yield Enum(Bits(parent, "disposal_method", 3), DISPOSAL_METHOD)
yield NullBits(parent, "reserved[]", 3)
if parent["size"].value != 4:
raise ParserError("Invalid graphic control size")
yield displayHandler(UInt16(parent, "delay", "Delay time in millisecond"), humanDuration)
yield UInt8(parent, "transp", "Transparent color index")
yield NullBytes(parent, "terminator", 1, "Terminator (0)")
def parseComments(parent):
while True:
field = PascalString8(parent, "comment[]", strip=" \0\r\n\t")
yield field
if field.length == 0:
break
def parseTextExtension(parent):
yield UInt8(parent, "block_size", "Block Size")
yield UInt16(parent, "left", "Text Grid Left")
yield UInt16(parent, "top", "Text Grid Top")
yield UInt16(parent, "width", "Text Grid Width")
yield UInt16(parent, "height", "Text Grid Height")
yield UInt8(parent, "cell_width", "Character Cell Width")
yield UInt8(parent, "cell_height", "Character Cell Height")
yield UInt8(parent, "fg_color", "Foreground Color Index")
yield UInt8(parent, "bg_color", "Background Color Index")
while True:
field = PascalString8(parent, "comment[]", strip=" \0\r\n\t")
yield field
if field.length == 0:
break
def defaultExtensionParser(parent):
while True:
size = UInt8(parent, "size[]", "Size (in bytes)")
yield size
if 0 < size.value:
yield RawBytes(parent, "content[]", size.value)
else:
break
class Extension(FieldSet):
ext_code = {
0xf9: ("graphic_ctl[]", parseGraphicControl, "Graphic control"),
0xfe: ("comments[]", parseComments, "Comments"),
0xff: ("app_ext[]", parseApplicationExtension, "Application extension"),
0x01: ("text_ext[]", parseTextExtension, "Plain text extension")
}
def __init__(self, *args):
FieldSet.__init__(self, *args)
code = self["code"].value
if code in self.ext_code:
self._name, self.parser, self._description = self.ext_code[code]
else:
self.parser = defaultExtensionParser
def createFields(self):
yield textHandler(UInt8(self, "code", "Extension code"), hexadecimal)
for field in self.parser(self):
yield field
def createDescription(self):
return "Extension: function %s" % self["func"].display
class ScreenDescriptor(FieldSet):
def createFields(self):
yield UInt16(self, "width", "Width")
yield UInt16(self, "height", "Height")
yield Bits(self, "bpp", 3, "Bits per pixel minus one")
yield Bit(self, "reserved", "(reserved)")
yield Bits(self, "color_res", 3, "Color resolution minus one")
yield Bit(self, "global_map", "Has global map?")
yield UInt8(self, "background", "Background color")
yield UInt8(self, "pixel_aspect_ratio", "Pixel Aspect Ratio")
def createDescription(self):
colors = 1 << (self["bpp"].value+1)
return "Screen descriptor: %ux%u pixels %u colors" \
% (self["width"].value, self["height"].value, colors)
class GifFile(Parser):
endian = LITTLE_ENDIAN
separator_name = {
"!": "Extension",
",": "Image",
";": "Terminator"
}
PARSER_TAGS = {
"id": "gif",
"category": "image",
"file_ext": ("gif",),
"mime": (u"image/gif",),
"min_size": (6 + 7 + 1 + 9)*8, # signature + screen + separator + image
"magic": (("GIF87a", 0), ("GIF89a", 0)),
"description": "GIF picture"
}
def validate(self):
if self.stream.readBytes(0, 6) not in ("GIF87a", "GIF89a"):
return "Wrong header"
if self["screen/width"].value == 0 or self["screen/height"].value == 0:
return "Invalid image size"
if MAX_WIDTH < self["screen/width"].value:
return "Image width too big (%u)" % self["screen/width"].value
if MAX_HEIGHT < self["screen/height"].value:
return "Image height too big (%u)" % self["screen/height"].value
return True
def createFields(self):
# Header
yield String(self, "magic", 3, "File magic code", charset="ASCII")
yield String(self, "version", 3, "GIF version", charset="ASCII")
yield ScreenDescriptor(self, "screen")
if self["screen/global_map"].value:
bpp = (self["screen/bpp"].value+1)
yield PaletteRGB(self, "color_map", 1 << bpp, "Color map")
self.color_map = self["color_map"]
else:
self.color_map = None
self.images = []
while True:
code = Enum(Character(self, "separator[]", "Separator code"), self.separator_name)
yield code
code = code.value
if code == "!":
yield Extension(self, "extensions[]")
elif code == ",":
yield Image(self, "image[]")
elif code == ";":
# GIF Terminator
break
else:
raise ParserError("Wrong GIF image separator: 0x%02X" % ord(code))
def createContentSize(self):
field = self["image[0]"]
start = field.absolute_address + field.size
end = start + MAX_FILE_SIZE*8
pos = self.stream.searchBytes("\0;", start, end)
if pos:
return pos + 16
return None
| gpl-3.0 | 8,210,896,480,294,096,000 | 35.088106 | 94 | 0.588501 | false |
floydhub/dockerfiles | dl/tensorflow/tests/1.5.0/mnist_eager.py | 1 | 9530 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A deep MNIST classifier using convolutional layers.
Sample usage:
python mnist.py --help
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import functools
import os
import sys
import time
import tensorflow as tf
import tensorflow.contrib.eager as tfe
from tensorflow.examples.tutorials.mnist import input_data
FLAGS = None
class MNISTModel(tfe.Network):
"""MNIST Network.
Network structure is equivalent to:
https://github.com/tensorflow/tensorflow/blob/r1.5/tensorflow/examples/tutorials/mnist/mnist_deep.py
and
https://github.com/tensorflow/models/blob/master/tutorials/image/mnist/convolutional.py
But written using the tf.layers API.
"""
def __init__(self, data_format):
"""Creates a model for classifying a hand-written digit.
Args:
data_format: Either 'channels_first' or 'channels_last'.
'channels_first' is typically faster on GPUs while 'channels_last' is
typically faster on CPUs. See
https://www.tensorflow.org/performance/performance_guide#data_formats
"""
super(MNISTModel, self).__init__(name='')
if data_format == 'channels_first':
self._input_shape = [-1, 1, 28, 28]
else:
assert data_format == 'channels_last'
self._input_shape = [-1, 28, 28, 1]
self.conv1 = self.track_layer(
tf.layers.Conv2D(32, 5, data_format=data_format, activation=tf.nn.relu))
self.conv2 = self.track_layer(
tf.layers.Conv2D(64, 5, data_format=data_format, activation=tf.nn.relu))
self.fc1 = self.track_layer(tf.layers.Dense(1024, activation=tf.nn.relu))
self.fc2 = self.track_layer(tf.layers.Dense(10))
self.dropout = self.track_layer(tf.layers.Dropout(0.5))
self.max_pool2d = self.track_layer(
tf.layers.MaxPooling2D(
(2, 2), (2, 2), padding='SAME', data_format=data_format))
def call(self, inputs, training):
"""Computes labels from inputs.
Users should invoke __call__ to run the network, which delegates to this
method (and not call this method directly).
Args:
inputs: A batch of images as a Tensor with shape [batch_size, 784].
training: True if invoked in the context of training (causing dropout to
be applied). False otherwise.
Returns:
A Tensor with shape [batch_size, 10] containing the predicted logits
for each image in the batch, for each of the 10 classes.
"""
x = tf.reshape(inputs, self._input_shape)
x = self.conv1(x)
x = self.max_pool2d(x)
x = self.conv2(x)
x = self.max_pool2d(x)
x = tf.layers.flatten(x)
x = self.fc1(x)
if training:
x = self.dropout(x)
x = self.fc2(x)
return x
def loss(predictions, labels):
return tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(
logits=predictions, labels=labels))
def compute_accuracy(predictions, labels):
return tf.reduce_sum(
tf.cast(
tf.equal(
tf.argmax(predictions, axis=1,
output_type=tf.int64),
tf.argmax(labels, axis=1,
output_type=tf.int64)),
dtype=tf.float32)) / float(predictions.shape[0].value)
def train_one_epoch(model, optimizer, dataset, log_interval=None):
"""Trains model on `dataset` using `optimizer`."""
tf.train.get_or_create_global_step()
def model_loss(labels, images):
prediction = model(images, training=True)
loss_value = loss(prediction, labels)
tf.contrib.summary.scalar('loss', loss_value)
tf.contrib.summary.scalar('accuracy',
compute_accuracy(prediction, labels))
return loss_value
for (batch, (images, labels)) in enumerate(tfe.Iterator(dataset)):
with tf.contrib.summary.record_summaries_every_n_global_steps(10):
batch_model_loss = functools.partial(model_loss, labels, images)
optimizer.minimize(
batch_model_loss, global_step=tf.train.get_global_step())
if log_interval and batch % log_interval == 0:
print('Batch #%d\tLoss: %.6f' % (batch, batch_model_loss()))
def test(model, dataset):
"""Perform an evaluation of `model` on the examples from `dataset`."""
avg_loss = tfe.metrics.Mean('loss')
accuracy = tfe.metrics.Accuracy('accuracy')
for (images, labels) in tfe.Iterator(dataset):
predictions = model(images, training=False)
avg_loss(loss(predictions, labels))
accuracy(tf.argmax(predictions, axis=1, output_type=tf.int64),
tf.argmax(labels, axis=1, output_type=tf.int64))
print('Test set: Average loss: %.4f, Accuracy: %4f%%\n' %
(avg_loss.result(), 100 * accuracy.result()))
with tf.contrib.summary.always_record_summaries():
tf.contrib.summary.scalar('loss', avg_loss.result())
tf.contrib.summary.scalar('accuracy', accuracy.result())
def load_data(data_dir):
"""Returns training and test tf.data.Dataset objects."""
data = input_data.read_data_sets(data_dir, one_hot=True)
train_ds = tf.data.Dataset.from_tensor_slices((data.train.images,
data.train.labels))
test_ds = tf.data.Dataset.from_tensors((data.test.images, data.test.labels))
return (train_ds, test_ds)
def main(_):
tfe.enable_eager_execution()
# Log Info
print("-" * 64)
print("TEST INFO - EAGER")
print("-" * 64)
print("TF version:\t {}".format(tf.__version__))
print("Dataset:\t MNIST")
print("Model:\t CNN")
(device, data_format) = ('/gpu:0', 'channels_first')
if FLAGS.no_gpu or tfe.num_gpus() <= 0:
(device, data_format) = ('/cpu:0', 'channels_last')
print('Device:\t {}'.format(device))
if data_format == 'channels_first':
print("Data format:\t NCHW (channel first)")
else:
print("Data format:\t NHWC (channel last)")
print("=" * 64)
# Load the datasets
(train_ds, test_ds) = load_data(FLAGS.data_dir)
train_ds = train_ds.shuffle(60000).batch(FLAGS.batch_size)
# Create the model and optimizer
model = MNISTModel(data_format)
optimizer = tf.train.MomentumOptimizer(FLAGS.lr, FLAGS.momentum)
if FLAGS.output_dir:
train_dir = os.path.join(FLAGS.output_dir, 'train')
test_dir = os.path.join(FLAGS.output_dir, 'eval')
tf.gfile.MakeDirs(FLAGS.output_dir)
else:
train_dir = None
test_dir = None
summary_writer = tf.contrib.summary.create_file_writer(
train_dir, flush_millis=10000)
test_summary_writer = tf.contrib.summary.create_file_writer(
test_dir, flush_millis=10000, name='test')
checkpoint_prefix = os.path.join(FLAGS.checkpoint_dir, 'ckpt')
with tf.device(device):
for epoch in range(1, 6):
with tfe.restore_variables_on_create(
tf.train.latest_checkpoint(FLAGS.checkpoint_dir)):
global_step = tf.train.get_or_create_global_step()
start = time.time()
with summary_writer.as_default():
train_one_epoch(model, optimizer, train_ds, FLAGS.log_interval)
end = time.time()
print('\nTrain time for epoch #%d (global step %d): %f' % (
epoch, global_step.numpy(), end - start))
with test_summary_writer.as_default():
test(model, test_ds)
all_variables = (
model.variables
+ optimizer.variables()
+ [global_step])
tfe.Saver(all_variables).save(
checkpoint_prefix, global_step=global_step)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--data-dir',
type=str,
default='/tmp/tensorflow/mnist/input_data',
help='Directory for storing input data')
parser.add_argument(
'--batch-size',
type=int,
default=100,
metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument(
'--log-interval',
type=int,
default=10,
metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument(
'--output_dir',
type=str,
default=None,
metavar='N',
help='Directory to write TensorBoard summaries')
parser.add_argument(
'--checkpoint_dir',
type=str,
default='/tmp/tensorflow/mnist/checkpoints/',
metavar='N',
help='Directory to save checkpoints in (once per epoch)')
parser.add_argument(
'--lr',
type=float,
default=0.01,
metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument(
'--momentum',
type=float,
default=0.5,
metavar='M',
help='SGD momentum (default: 0.5)')
parser.add_argument(
'--no-gpu',
action='store_true',
default=False,
help='disables GPU usage even if a GPU is available')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 | -3,333,237,684,150,214,700 | 32.438596 | 102 | 0.645331 | false |
vefimova/rally | tests/unit/plugins/openstack/scenarios/nova/test_utils.py | 1 | 41162 | # Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from rally import exceptions as rally_exceptions
from rally.plugins.openstack.scenarios.nova import utils
from tests.unit import fakes
from tests.unit import test
BM_UTILS = "rally.task.utils"
NOVA_UTILS = "rally.plugins.openstack.scenarios.nova.utils"
CONF = cfg.CONF
class NovaScenarioTestCase(test.ScenarioTestCase):
def setUp(self):
super(NovaScenarioTestCase, self).setUp()
self.server = mock.Mock()
self.server1 = mock.Mock()
self.volume = mock.Mock()
self.floating_ip = mock.Mock()
self.image = mock.Mock()
self.keypair = mock.Mock()
def test__list_servers(self):
servers_list = []
self.clients("nova").servers.list.return_value = servers_list
nova_scenario = utils.NovaScenario()
return_servers_list = nova_scenario._list_servers(True)
self.assertEqual(servers_list, return_servers_list)
self._test_atomic_action_timer(nova_scenario.atomic_actions(),
"nova.list_servers")
@mock.patch(NOVA_UTILS + ".NovaScenario._generate_random_name",
return_value="foo_server_name")
def test__boot_server(self, mock__generate_random_name):
self.clients("nova").servers.create.return_value = self.server
nova_scenario = utils.NovaScenario(context={})
return_server = nova_scenario._boot_server("image_id",
"flavor_id")
self.mock_wait_for.mock.assert_called_once_with(
self.server,
is_ready=self.mock_resource_is.mock.return_value,
update_resource=self.mock_get_from_manager.mock.return_value,
check_interval=CONF.benchmark.nova_server_boot_poll_interval,
timeout=CONF.benchmark.nova_server_boot_timeout)
self.mock_resource_is.mock.assert_called_once_with("ACTIVE")
self.mock_get_from_manager.mock.assert_called_once_with()
self.assertEqual(self.mock_wait_for.mock.return_value, return_server)
self.clients("nova").servers.create.assert_called_once_with(
"foo_server_name", "image_id", "flavor_id")
self._test_atomic_action_timer(nova_scenario.atomic_actions(),
"nova.boot_server")
@mock.patch(NOVA_UTILS + ".NovaScenario._generate_random_name",
return_value="foo_server_name")
def test__boot_server_with_network(self, mock__generate_random_name):
self.clients("nova").servers.create.return_value = self.server
networks = [{"id": "foo_id", "external": False},
{"id": "bar_id", "external": False}]
self.clients("nova").networks.list.return_value = networks
nova_scenario = utils.NovaScenario(context={
"iteration": 3,
"config": {"users": {"tenants": 2}},
"tenant": {"networks": networks}})
return_server = nova_scenario._boot_server("image_id",
"flavor_id",
auto_assign_nic=True)
self.mock_wait_for.mock.assert_called_once_with(
self.server,
is_ready=self.mock_resource_is.mock.return_value,
update_resource=self.mock_get_from_manager.mock.return_value,
check_interval=CONF.benchmark.nova_server_boot_poll_interval,
timeout=CONF.benchmark.nova_server_boot_timeout)
self.mock_resource_is.mock.assert_called_once_with("ACTIVE")
self.mock_get_from_manager.mock.assert_called_once_with()
self.clients("nova").servers.create.assert_called_once_with(
"foo_server_name", "image_id", "flavor_id",
nics=[{"net-id": "bar_id"}])
self.assertEqual(self.mock_wait_for.mock.return_value, return_server)
self._test_atomic_action_timer(nova_scenario.atomic_actions(),
"nova.boot_server")
def test__boot_server_with_network_exception(self):
self.clients("nova").servers.create.return_value = self.server
nova_scenario = utils.NovaScenario(
context={"tenant": {"networks": None}})
self.assertRaises(TypeError, nova_scenario._boot_server,
"image_id", "flavor_id",
auto_assign_nic=True)
@mock.patch(NOVA_UTILS + ".NovaScenario._generate_random_name",
return_value="foo_server_name")
def test__boot_server_with_ssh(self, mock__generate_random_name):
self.clients("nova").servers.create.return_value = self.server
nova_scenario = utils.NovaScenario(context={
"user": {
"secgroup": {"name": "test"},
"endpoint": mock.MagicMock()
}}
)
return_server = nova_scenario._boot_server("image_id", "flavor_id")
self.mock_wait_for.mock.assert_called_once_with(
self.server,
is_ready=self.mock_resource_is.mock.return_value,
update_resource=self.mock_get_from_manager.mock.return_value,
check_interval=CONF.benchmark.nova_server_boot_poll_interval,
timeout=CONF.benchmark.nova_server_boot_timeout)
self.mock_resource_is.mock.assert_called_once_with("ACTIVE")
self.mock_get_from_manager.mock.assert_called_once_with()
self.assertEqual(self.mock_wait_for.mock.return_value, return_server)
self.clients("nova").servers.create.assert_called_once_with(
"foo_server_name", "image_id", "flavor_id",
security_groups=["test"])
self._test_atomic_action_timer(nova_scenario.atomic_actions(),
"nova.boot_server")
@mock.patch(NOVA_UTILS + ".NovaScenario._generate_random_name",
return_value="foo_server_name")
def test__boot_server_with_sec_group(self, mock__generate_random_name):
self.clients("nova").servers.create.return_value = self.server
nova_scenario = utils.NovaScenario(context={
"user": {
"secgroup": {"name": "new"},
"endpoint": mock.MagicMock()
}
})
return_server = nova_scenario._boot_server(
"image_id", "flavor_id",
security_groups=["test"])
self.mock_wait_for.mock.assert_called_once_with(
self.server,
is_ready=self.mock_resource_is.mock.return_value,
update_resource=self.mock_get_from_manager.mock.return_value,
check_interval=CONF.benchmark.nova_server_boot_poll_interval,
timeout=CONF.benchmark.nova_server_boot_timeout)
self.mock_resource_is.mock.assert_called_once_with("ACTIVE")
self.mock_get_from_manager.mock.assert_called_once_with()
self.assertEqual(self.mock_wait_for.mock.return_value, return_server)
self.clients("nova").servers.create.assert_called_once_with(
"foo_server_name", "image_id", "flavor_id",
security_groups=["test", "new"])
self._test_atomic_action_timer(nova_scenario.atomic_actions(),
"nova.boot_server")
@mock.patch(NOVA_UTILS + ".NovaScenario._generate_random_name",
return_value="foo_server_name")
def test__boot_server_with_similar_sec_group(self,
mock__generate_random_name):
self.clients("nova").servers.create.return_value = self.server
nova_scenario = utils.NovaScenario(context={
"user": {
"secgroup": {"name": "test1"},
"endpoint": mock.MagicMock()
}}
)
return_server = nova_scenario._boot_server(
"image_id", "flavor_id",
security_groups=["test1"])
self.mock_wait_for.mock.assert_called_once_with(
self.server,
is_ready=self.mock_resource_is.mock.return_value,
update_resource=self.mock_get_from_manager.mock.return_value,
check_interval=CONF.benchmark.nova_server_boot_poll_interval,
timeout=CONF.benchmark.nova_server_boot_timeout)
self.mock_resource_is.mock.assert_called_once_with("ACTIVE")
self.mock_get_from_manager.mock.assert_called_once_with()
self.assertEqual(self.mock_wait_for.mock.return_value, return_server)
self.clients("nova").servers.create.assert_called_once_with(
"foo_server_name", "image_id", "flavor_id",
security_groups=["test1"])
self._test_atomic_action_timer(nova_scenario.atomic_actions(),
"nova.boot_server")
def test__suspend_server(self):
nova_scenario = utils.NovaScenario()
nova_scenario._suspend_server(self.server)
self.server.suspend.assert_called_once_with()
self.mock_wait_for.mock.assert_called_once_with(
self.server,
is_ready=self.mock_resource_is.mock.return_value,
update_resource=self.mock_get_from_manager.mock.return_value,
check_interval=CONF.benchmark.nova_server_suspend_poll_interval,
timeout=CONF.benchmark.nova_server_suspend_timeout)
self.mock_resource_is.mock.assert_called_once_with("SUSPENDED")
self.mock_get_from_manager.mock.assert_called_once_with()
self._test_atomic_action_timer(nova_scenario.atomic_actions(),
"nova.suspend_server")
def test__resume_server(self):
nova_scenario = utils.NovaScenario()
nova_scenario._resume_server(self.server)
self.server.resume.assert_called_once_with()
self.mock_wait_for.mock.assert_called_once_with(
self.server,
is_ready=self.mock_resource_is.mock.return_value,
update_resource=self.mock_get_from_manager.mock.return_value,
check_interval=CONF.benchmark.nova_server_resume_poll_interval,
timeout=CONF.benchmark.nova_server_resume_timeout)
self.mock_resource_is.mock.assert_called_once_with("ACTIVE")
self.mock_get_from_manager.mock.assert_called_once_with()
self._test_atomic_action_timer(nova_scenario.atomic_actions(),
"nova.resume_server")
def test__pause_server(self):
nova_scenario = utils.NovaScenario()
nova_scenario._pause_server(self.server)
self.server.pause.assert_called_once_with()
self.mock_wait_for.mock.assert_called_once_with(
self.server,
is_ready=self.mock_resource_is.mock.return_value,
update_resource=self.mock_get_from_manager.mock.return_value,
check_interval=CONF.benchmark.nova_server_pause_poll_interval,
timeout=CONF.benchmark.nova_server_pause_timeout)
self.mock_resource_is.mock.assert_called_once_with("PAUSED")
self.mock_get_from_manager.mock.assert_called_once_with()
self._test_atomic_action_timer(nova_scenario.atomic_actions(),
"nova.pause_server")
def test__unpause_server(self):
nova_scenario = utils.NovaScenario()
nova_scenario._unpause_server(self.server)
self.server.unpause.assert_called_once_with()
self.mock_wait_for.mock.assert_called_once_with(
self.server,
is_ready=self.mock_resource_is.mock.return_value,
update_resource=self.mock_get_from_manager.mock.return_value,
check_interval=CONF.benchmark.nova_server_unpause_poll_interval,
timeout=CONF.benchmark.nova_server_unpause_timeout)
self.mock_resource_is.mock.assert_called_once_with("ACTIVE")
self.mock_get_from_manager.mock.assert_called_once_with()
self._test_atomic_action_timer(nova_scenario.atomic_actions(),
"nova.unpause_server")
def test__shelve_server(self):
nova_scenario = utils.NovaScenario()
nova_scenario._shelve_server(self.server)
self.server.shelve.assert_called_once_with()
self.mock_wait_for.mock.assert_called_once_with(
self.server,
is_ready=self.mock_resource_is.mock.return_value,
update_resource=self.mock_get_from_manager.mock.return_value,
check_interval=CONF.benchmark.nova_server_shelve_poll_interval,
timeout=CONF.benchmark.nova_server_shelve_timeout)
self.mock_resource_is.mock.assert_called_once_with("SHELVED_OFFLOADED")
self.mock_get_from_manager.mock.assert_called_once_with()
self._test_atomic_action_timer(nova_scenario.atomic_actions(),
"nova.shelve_server")
def test__unshelve_server(self):
nova_scenario = utils.NovaScenario()
nova_scenario._unshelve_server(self.server)
self.server.unshelve.assert_called_once_with()
self.mock_wait_for.mock.assert_called_once_with(
self.server,
is_ready=self.mock_resource_is.mock.return_value,
update_resource=self.mock_get_from_manager.mock.return_value,
check_interval=CONF.benchmark.nova_server_unshelve_poll_interval,
timeout=CONF.benchmark.nova_server_unshelve_timeout)
self.mock_resource_is.mock.assert_called_once_with("ACTIVE")
self.mock_get_from_manager.mock.assert_called_once_with()
self._test_atomic_action_timer(nova_scenario.atomic_actions(),
"nova.unshelve_server")
def test__create_image(self):
self.clients("nova").images.get.return_value = self.image
nova_scenario = utils.NovaScenario()
return_image = nova_scenario._create_image(self.server)
self.mock_wait_for.mock.assert_called_once_with(
self.image,
is_ready=self.mock_resource_is.mock.return_value,
update_resource=self.mock_get_from_manager.mock.return_value,
check_interval=CONF.benchmark.
nova_server_image_create_poll_interval,
timeout=CONF.benchmark.nova_server_image_create_timeout)
self.mock_resource_is.mock.assert_called_once_with("ACTIVE")
self.mock_get_from_manager.mock.assert_called_once_with()
self.assertEqual(self.mock_wait_for.mock.return_value, return_image)
self._test_atomic_action_timer(nova_scenario.atomic_actions(),
"nova.create_image")
def test__default_delete_server(self):
nova_scenario = utils.NovaScenario()
nova_scenario._delete_server(self.server)
self.server.delete.assert_called_once_with()
self.mock_wait_for_delete.mock.assert_called_once_with(
self.server,
update_resource=self.mock_get_from_manager.mock.return_value,
check_interval=CONF.benchmark.nova_server_delete_poll_interval,
timeout=CONF.benchmark.nova_server_delete_timeout)
self.mock_get_from_manager.mock.assert_called_once_with()
self._test_atomic_action_timer(nova_scenario.atomic_actions(),
"nova.delete_server")
def test__force_delete_server(self):
nova_scenario = utils.NovaScenario()
nova_scenario._delete_server(self.server, force=True)
self.server.force_delete.assert_called_once_with()
self.mock_wait_for_delete.mock.assert_called_once_with(
self.server,
update_resource=self.mock_get_from_manager.mock.return_value,
check_interval=CONF.benchmark.nova_server_delete_poll_interval,
timeout=CONF.benchmark.nova_server_delete_timeout)
self.mock_get_from_manager.mock.assert_called_once_with()
self._test_atomic_action_timer(nova_scenario.atomic_actions(),
"nova.force_delete_server")
def test__reboot_server(self):
nova_scenario = utils.NovaScenario()
nova_scenario._reboot_server(self.server)
self.server.reboot.assert_called_once_with(reboot_type="HARD")
self.mock_wait_for.mock.assert_called_once_with(
self.server,
is_ready=self.mock_resource_is.mock.return_value,
update_resource=self.mock_get_from_manager.mock.return_value,
check_interval=CONF.benchmark.nova_server_reboot_poll_interval,
timeout=CONF.benchmark.nova_server_reboot_timeout)
self.mock_resource_is.mock.assert_called_once_with("ACTIVE")
self.mock_get_from_manager.mock.assert_called_once_with()
self._test_atomic_action_timer(nova_scenario.atomic_actions(),
"nova.reboot_server")
def test__soft_reboot_server(self):
nova_scenario = utils.NovaScenario()
nova_scenario._soft_reboot_server(self.server)
self.server.reboot.assert_called_once_with(reboot_type="SOFT")
self.mock_wait_for.mock.assert_called_once_with(
self.server,
is_ready=self.mock_resource_is.mock.return_value,
update_resource=self.mock_get_from_manager.mock.return_value,
check_interval=CONF.benchmark.nova_server_reboot_poll_interval,
timeout=CONF.benchmark.nova_server_reboot_timeout)
self.mock_resource_is.mock.assert_called_once_with("ACTIVE")
self.mock_get_from_manager.mock.assert_called_once_with()
self._test_atomic_action_timer(nova_scenario.atomic_actions(),
"nova.soft_reboot_server")
def test__rebuild_server(self):
nova_scenario = utils.NovaScenario()
nova_scenario._rebuild_server(self.server, "img", fakearg="fakearg")
self.server.rebuild.assert_called_once_with("img", fakearg="fakearg")
self.mock_wait_for.mock.assert_called_once_with(
self.server,
is_ready=self.mock_resource_is.mock.return_value,
update_resource=self.mock_get_from_manager.mock.return_value,
check_interval=CONF.benchmark.nova_server_rebuild_poll_interval,
timeout=CONF.benchmark.nova_server_rebuild_timeout)
self.mock_resource_is.mock.assert_called_once_with("ACTIVE")
self.mock_get_from_manager.mock.assert_called_once_with()
self._test_atomic_action_timer(nova_scenario.atomic_actions(),
"nova.rebuild_server")
def test__start_server(self):
nova_scenario = utils.NovaScenario()
nova_scenario._start_server(self.server)
self.server.start.assert_called_once_with()
self.mock_wait_for.mock.assert_called_once_with(
self.server,
is_ready=self.mock_resource_is.mock.return_value,
update_resource=self.mock_get_from_manager.mock.return_value,
check_interval=CONF.benchmark.nova_server_start_poll_interval,
timeout=CONF.benchmark.nova_server_start_timeout)
self.mock_resource_is.mock.assert_called_once_with("ACTIVE")
self.mock_get_from_manager.mock.assert_called_once_with()
self._test_atomic_action_timer(nova_scenario.atomic_actions(),
"nova.start_server")
def test__stop_server(self):
nova_scenario = utils.NovaScenario()
nova_scenario._stop_server(self.server)
self.server.stop.assert_called_once_with()
self.mock_wait_for.mock.assert_called_once_with(
self.server,
is_ready=self.mock_resource_is.mock.return_value,
update_resource=self.mock_get_from_manager.mock.return_value,
check_interval=CONF.benchmark.nova_server_stop_poll_interval,
timeout=CONF.benchmark.nova_server_stop_timeout)
self.mock_resource_is.mock.assert_called_once_with("SHUTOFF")
self.mock_get_from_manager.mock.assert_called_once_with()
self._test_atomic_action_timer(nova_scenario.atomic_actions(),
"nova.stop_server")
def test__rescue_server(self):
nova_scenario = utils.NovaScenario()
nova_scenario._rescue_server(self.server)
self.server.rescue.assert_called_once_with()
self.mock_wait_for.mock.assert_called_once_with(
self.server,
is_ready=self.mock_resource_is.mock.return_value,
update_resource=self.mock_get_from_manager.mock.return_value,
check_interval=CONF.benchmark.nova_server_rescue_poll_interval,
timeout=CONF.benchmark.nova_server_rescue_timeout)
self.mock_resource_is.mock.assert_called_once_with("RESCUE")
self.mock_get_from_manager.mock.assert_called_once_with()
self._test_atomic_action_timer(nova_scenario.atomic_actions(),
"nova.rescue_server")
def test__unrescue_server(self):
nova_scenario = utils.NovaScenario()
nova_scenario._unrescue_server(self.server)
self.server.unrescue.assert_called_once_with()
self.mock_wait_for.mock.assert_called_once_with(
self.server,
is_ready=self.mock_resource_is.mock.return_value,
update_resource=self.mock_get_from_manager.mock.return_value,
check_interval=CONF.benchmark.nova_server_unrescue_poll_interval,
timeout=CONF.benchmark.nova_server_unrescue_timeout)
self.mock_resource_is.mock.assert_called_once_with("ACTIVE")
self.mock_get_from_manager.mock.assert_called_once_with()
self._test_atomic_action_timer(nova_scenario.atomic_actions(),
"nova.unrescue_server")
def _test_delete_servers(self, force=False):
servers = [self.server, self.server1]
nova_scenario = utils.NovaScenario()
nova_scenario._delete_servers(servers, force=force)
check_interval = CONF.benchmark.nova_server_delete_poll_interval
expected = []
for server in servers:
expected.append(mock.call(
server,
update_resource=self.mock_get_from_manager.mock.return_value,
check_interval=check_interval,
timeout=CONF.benchmark.nova_server_delete_timeout))
if force:
server.force_delete.assert_called_once_with()
self.assertFalse(server.delete.called)
else:
server.delete.assert_called_once_with()
self.assertFalse(server.force_delete.called)
self.mock_wait_for_delete.mock.assert_has_calls(expected)
timer_name = "nova.%sdelete_servers" % ("force_" if force else "")
self._test_atomic_action_timer(nova_scenario.atomic_actions(),
timer_name)
def test__default_delete_servers(self):
self._test_delete_servers()
def test__force_delete_servers(self):
self._test_delete_servers(force=True)
def test__delete_image(self):
nova_scenario = utils.NovaScenario()
nova_scenario._delete_image(self.image)
self.image.delete.assert_called_once_with()
self.mock_wait_for_delete.mock.assert_called_once_with(
self.image,
update_resource=self.mock_get_from_manager.mock.return_value,
check_interval=CONF.benchmark.
nova_server_image_delete_poll_interval,
timeout=CONF.benchmark.nova_server_image_delete_timeout)
self.mock_get_from_manager.mock.assert_called_once_with()
self._test_atomic_action_timer(nova_scenario.atomic_actions(),
"nova.delete_image")
def test__boot_servers(self):
self.clients("nova").servers.list.return_value = [self.server,
self.server1]
nova_scenario = utils.NovaScenario()
nova_scenario._boot_servers("image", "flavor", 2)
expected = [
mock.call(
self.server,
is_ready=self.mock_resource_is.mock.return_value,
update_resource=self.mock_get_from_manager.mock.return_value,
check_interval=CONF.benchmark.nova_server_boot_poll_interval,
timeout=CONF.benchmark.nova_server_boot_timeout
),
mock.call(
self.server1,
is_ready=self.mock_resource_is.mock.return_value,
update_resource=self.mock_get_from_manager.mock.return_value,
check_interval=CONF.benchmark.nova_server_boot_poll_interval,
timeout=CONF.benchmark.nova_server_boot_timeout
)
]
self.mock_wait_for.mock.assert_has_calls(expected)
self.mock_resource_is.mock.assert_has_calls([mock.call("ACTIVE"),
mock.call("ACTIVE")])
self.mock_get_from_manager.mock.assert_has_calls([mock.call(),
mock.call()])
self._test_atomic_action_timer(nova_scenario.atomic_actions(),
"nova.boot_servers")
def test__associate_floating_ip(self):
nova_scenario = utils.NovaScenario()
nova_scenario._associate_floating_ip(self.server, self.floating_ip)
self.server.add_floating_ip.assert_called_once_with(self.floating_ip,
fixed_address=None)
self._test_atomic_action_timer(nova_scenario.atomic_actions(),
"nova.associate_floating_ip")
def test__dissociate_floating_ip(self):
nova_scenario = utils.NovaScenario()
nova_scenario._dissociate_floating_ip(self.server, self.floating_ip)
self.server.remove_floating_ip.assert_called_once_with(
self.floating_ip)
self._test_atomic_action_timer(nova_scenario.atomic_actions(),
"nova.dissociate_floating_ip")
def test__check_ip_address(self):
nova_scenario = utils.NovaScenario()
fake_server = fakes.FakeServerManager().create("test_server",
"image_id_01",
"flavor_id_01")
fake_server.addresses = {
"private": [
{"version": 4, "addr": "1.2.3.4"},
]}
floating_ip = fakes.FakeFloatingIP()
floating_ip.ip = "10.20.30.40"
# Also test function check_ip_address accept a string as attr
self.assertFalse(
nova_scenario.check_ip_address(floating_ip.ip)(fake_server))
self.assertTrue(
nova_scenario.check_ip_address(floating_ip.ip, must_exist=False)
(fake_server))
fake_server.addresses["private"].append(
{"version": 4, "addr": floating_ip.ip}
)
# Also test function check_ip_address accept an object with attr ip
self.assertTrue(
nova_scenario.check_ip_address(floating_ip)
(fake_server))
self.assertFalse(
nova_scenario.check_ip_address(floating_ip, must_exist=False)
(fake_server))
def test__list_networks(self):
network_list = []
self.clients("nova").networks.list.return_value = network_list
nova_scenario = utils.NovaScenario()
return_network_list = nova_scenario._list_networks()
self.assertEqual(network_list, return_network_list)
self._test_atomic_action_timer(nova_scenario.atomic_actions(),
"nova.list_networks")
def test__resize(self):
nova_scenario = utils.NovaScenario()
to_flavor = mock.Mock()
nova_scenario._resize(self.server, to_flavor)
self._test_atomic_action_timer(nova_scenario.atomic_actions(),
"nova.resize")
def test__resize_confirm(self):
nova_scenario = utils.NovaScenario()
nova_scenario._resize_confirm(self.server)
self._test_atomic_action_timer(nova_scenario.atomic_actions(),
"nova.resize_confirm")
def test__resize_revert(self):
nova_scenario = utils.NovaScenario()
nova_scenario._resize_revert(self.server)
self._test_atomic_action_timer(nova_scenario.atomic_actions(),
"nova.resize_revert")
def test__attach_volume(self):
self.clients("nova").volumes.create_server_volume.return_value = None
nova_scenario = utils.NovaScenario()
nova_scenario._attach_volume(self.server, self.volume)
self._test_atomic_action_timer(nova_scenario.atomic_actions(),
"nova.attach_volume")
def test__detach_volume(self):
self.clients("nova").volumes.delete_server_volume.return_value = None
nova_scenario = utils.NovaScenario()
nova_scenario._detach_volume(self.server, self.volume)
self._test_atomic_action_timer(nova_scenario.atomic_actions(),
"nova.detach_volume")
def test__live_migrate_server(self):
fake_host = mock.MagicMock()
self.admin_clients("nova").servers.get(return_value=self.server)
nova_scenario = utils.NovaScenario()
nova_scenario._live_migrate(self.server,
fake_host,
block_migration=False,
disk_over_commit=False,
skip_host_check=True)
self.mock_wait_for.mock.assert_called_once_with(
self.server,
is_ready=self.mock_resource_is.mock.return_value,
update_resource=self.mock_get_from_manager.mock.return_value,
check_interval=CONF.benchmark.
nova_server_live_migrate_poll_interval,
timeout=CONF.benchmark.nova_server_live_migrate_timeout)
self.mock_resource_is.mock.assert_called_once_with("ACTIVE")
self.mock_get_from_manager.mock.assert_called_once_with()
self._test_atomic_action_timer(nova_scenario.atomic_actions(),
"nova.live_migrate")
def test__find_host_to_migrate(self):
fake_server = self.server
fake_host = {"nova-compute": {"available": True}}
self.admin_clients("nova").servers.get.return_value = fake_server
self.admin_clients("nova").availability_zones.list.return_value = [
mock.MagicMock(zoneName="a",
hosts={"a1": fake_host, "a2": fake_host,
"a3": fake_host}),
mock.MagicMock(zoneName="b",
hosts={"b1": fake_host, "b2": fake_host,
"b3": fake_host}),
mock.MagicMock(zoneName="c",
hosts={"c1": fake_host,
"c2": fake_host, "c3": fake_host})
]
setattr(fake_server, "OS-EXT-SRV-ATTR:host", "b2")
setattr(fake_server, "OS-EXT-AZ:availability_zone", "b")
nova_scenario = utils.NovaScenario()
self.assertIn(
nova_scenario._find_host_to_migrate(fake_server), ["b1", "b3"])
def test__migrate_server(self):
fake_server = self.server
setattr(fake_server, "OS-EXT-SRV-ATTR:host", "a1")
self.clients("nova").servers.get(return_value=fake_server)
nova_scenario = utils.NovaScenario()
nova_scenario._migrate(fake_server, skip_host_check=True)
self.mock_wait_for.mock.assert_called_once_with(
fake_server,
is_ready=self.mock_resource_is.mock.return_value,
update_resource=self.mock_get_from_manager.mock.return_value,
check_interval=CONF.benchmark.nova_server_migrate_poll_interval,
timeout=CONF.benchmark.nova_server_migrate_timeout)
self.mock_resource_is.mock.assert_called_once_with("VERIFY_RESIZE")
self.mock_get_from_manager.mock.assert_called_once_with()
self._test_atomic_action_timer(nova_scenario.atomic_actions(),
"nova.migrate")
self.assertRaises(rally_exceptions.MigrateException,
nova_scenario._migrate,
fake_server, skip_host_check=False)
def test__create_security_groups(self):
nova_scenario = utils.NovaScenario()
nova_scenario._generate_random_name = mock.MagicMock()
security_group_count = 5
sec_groups = nova_scenario._create_security_groups(
security_group_count)
self.assertEqual(security_group_count, len(sec_groups))
self.assertEqual(security_group_count,
nova_scenario._generate_random_name.call_count)
self.assertEqual(
security_group_count,
self.clients("nova").security_groups.create.call_count)
self._test_atomic_action_timer(
nova_scenario.atomic_actions(),
"nova.create_%s_security_groups" % security_group_count)
def test__create_rules_for_security_group(self):
nova_scenario = utils.NovaScenario()
fake_secgroups = [fakes.FakeSecurityGroup(None, None, 1, "uuid1"),
fakes.FakeSecurityGroup(None, None, 2, "uuid2")]
rules_per_security_group = 10
nova_scenario._create_rules_for_security_group(
fake_secgroups, rules_per_security_group)
self.assertEqual(
len(fake_secgroups) * rules_per_security_group,
self.clients("nova").security_group_rules.create.call_count)
self._test_atomic_action_timer(
nova_scenario.atomic_actions(),
"nova.create_%s_rules" %
(rules_per_security_group * len(fake_secgroups)))
def test__delete_security_groups(self):
nova_scenario = utils.NovaScenario()
fake_secgroups = [fakes.FakeSecurityGroup(None, None, 1, "uuid1"),
fakes.FakeSecurityGroup(None, None, 2, "uuid2")]
nova_scenario._delete_security_groups(fake_secgroups)
self.assertSequenceEqual(
map(lambda x: mock.call(x.id), fake_secgroups),
self.clients("nova").security_groups.delete.call_args_list)
self._test_atomic_action_timer(
nova_scenario.atomic_actions(),
"nova.delete_%s_security_groups" % len(fake_secgroups))
def test__list_security_groups(self):
nova_scenario = utils.NovaScenario()
nova_scenario._list_security_groups()
self.clients("nova").security_groups.list.assert_called_once_with()
self._test_atomic_action_timer(nova_scenario.atomic_actions(),
"nova.list_security_groups")
def test__list_keypairs(self):
keypairs_list = ["foo_keypair"]
self.clients("nova").keypairs.list.return_value = keypairs_list
nova_scenario = utils.NovaScenario()
return_keypairs_list = nova_scenario._list_keypairs()
self.assertEqual(keypairs_list, return_keypairs_list)
self._test_atomic_action_timer(nova_scenario.atomic_actions(),
"nova.list_keypairs")
def test__create_keypair(self):
self.clients("nova").keypairs.create.return_value.name = self.keypair
nova_scenario = utils.NovaScenario()
return_keypair = nova_scenario._create_keypair()
self.assertEqual(self.keypair, return_keypair)
self._test_atomic_action_timer(nova_scenario.atomic_actions(),
"nova.create_keypair")
def test__delete_keypair(self):
nova_scenario = utils.NovaScenario()
nova_scenario._delete_keypair(self.keypair)
self.clients("nova").keypairs.delete.assert_called_once_with(
self.keypair)
self._test_atomic_action_timer(nova_scenario.atomic_actions(),
"nova.delete_keypair")
def test__list_floating_ips_bulk(self):
floating_ips_bulk_list = ["foo_floating_ips_bulk"]
self.admin_clients("nova").floating_ips_bulk.list.return_value = (
floating_ips_bulk_list)
nova_scenario = utils.NovaScenario()
return_floating_ips_bulk_list = nova_scenario._list_floating_ips_bulk()
self.assertEqual(floating_ips_bulk_list, return_floating_ips_bulk_list)
self._test_atomic_action_timer(nova_scenario.atomic_actions(),
"nova.list_floating_ips_bulk")
@mock.patch(NOVA_UTILS + ".network_wrapper.generate_cidr")
def test__create_floating_ips_bulk(self, mock_generate_cidr):
fake_cidr = "10.2.0.0/24"
fake_pool = "test1"
fake_floating_ips_bulk = mock.MagicMock()
fake_floating_ips_bulk.ip_range = fake_cidr
fake_floating_ips_bulk.pool = fake_pool
self.admin_clients("nova").floating_ips_bulk.create.return_value = (
fake_floating_ips_bulk)
nova_scenario = utils.NovaScenario()
return_iprange = nova_scenario._create_floating_ips_bulk(fake_cidr)
mock_generate_cidr.assert_called_once_with(start_cidr=fake_cidr)
self.assertEqual(return_iprange, fake_floating_ips_bulk)
self._test_atomic_action_timer(nova_scenario.atomic_actions(),
"nova.create_floating_ips_bulk")
def test__delete_floating_ips_bulk(self):
fake_cidr = "10.2.0.0/24"
nova_scenario = utils.NovaScenario()
nova_scenario._delete_floating_ips_bulk(fake_cidr)
self.admin_clients(
"nova").floating_ips_bulk.delete.assert_called_once_with(fake_cidr)
self._test_atomic_action_timer(nova_scenario.atomic_actions(),
"nova.delete_floating_ips_bulk")
def test__list_hypervisors(self):
nova_scenario = utils.NovaScenario()
nova_scenario._list_hypervisors(detailed=False)
self.admin_clients("nova").hypervisors.list.assert_called_once_with(
False)
self._test_atomic_action_timer(nova_scenario.atomic_actions(),
"nova.list_hypervisors")
def test__lock_server(self):
server = mock.Mock()
nova_scenario = utils.NovaScenario()
nova_scenario._lock_server(server)
server.lock.assert_called_once_with()
self._test_atomic_action_timer(nova_scenario.atomic_actions(),
"nova.lock_server")
def test__unlock_server(self):
server = mock.Mock()
nova_scenario = utils.NovaScenario()
nova_scenario._unlock_server(server)
server.unlock.assert_called_once_with()
self._test_atomic_action_timer(nova_scenario.atomic_actions(),
"nova.unlock_server")
def test__delete_network(self):
fake_netlabel = "test1"
nova_scenario = utils.NovaScenario()
nova_scenario._delete_network(fake_netlabel)
self.admin_clients("nova").networks.delete.assert_called_once_with(
fake_netlabel)
self._test_atomic_action_timer(nova_scenario.atomic_actions(),
"nova.delete_network")
@mock.patch(NOVA_UTILS + ".network_wrapper.generate_cidr")
def test__create_network(self, mock_generate_cidr):
fake_cidr = "10.2.0.0/24"
fake_net = mock.MagicMock()
fake_net.cidr = fake_cidr
self.admin_clients("nova").networks.create.return_value = (fake_net)
nova_scenario = utils.NovaScenario()
nova_scenario._generate_random_name = mock.Mock(
return_value="rally_novanet_fake")
return_netlabel = nova_scenario._create_network(fake_cidr,
fakearg="fakearg")
mock_generate_cidr.assert_called_once_with(start_cidr=fake_cidr)
self.admin_clients("nova").networks.create.assert_called_once_with(
label="rally_novanet_fake", cidr=mock_generate_cidr.return_value,
fakearg="fakearg")
self.assertEqual(return_netlabel, fake_net)
self._test_atomic_action_timer(nova_scenario.atomic_actions(),
"nova.create_network")
| apache-2.0 | 6,694,983,293,901,197,000 | 48.71256 | 79 | 0.60981 | false |
vitaliykomarov/NEUCOGAR | nest/noradrenaline/nest-2.10.0/examples/nest/music/msgtest.py | 13 | 1200 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# msgtest.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
import nest
nest.sli_run("statusdict/have_music ::")
if not nest.spp():
import sys
print("NEST was not compiled with support for MUSIC, not running.")
sys.exit()
mmip = nest.Create('music_message_in_proxy')
nest.SetStatus(mmip, {'port_name' : 'msgdata'})
# Simulate and get message data with a granularity of 10 ms:
time = 0
while time < 1000:
nest.Simulate (10)
data = nest.GetStatus(mmip, 'data')
print data
time += 10
| gpl-2.0 | 8,782,903,406,521,920,000 | 29 | 71 | 0.710833 | false |
seaotterman/tensorflow | tensorflow/python/saved_model/signature_def_utils.py | 89 | 1583 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SignatureDef utility functions.
Utility functions for constructing SignatureDef protos.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.saved_model.signature_def_utils_impl import build_signature_def
from tensorflow.python.saved_model.signature_def_utils_impl import classification_signature_def
from tensorflow.python.saved_model.signature_def_utils_impl import predict_signature_def
from tensorflow.python.saved_model.signature_def_utils_impl import regression_signature_def
# pylint: enable=unused-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
"build_signature_def",
"classification_signature_def",
"predict_signature_def",
"regression_signature_def",
]
remove_undocumented(__name__, _allowed_symbols)
| apache-2.0 | -7,277,540,993,253,692,000 | 40.657895 | 95 | 0.742893 | false |
kevin8909/xjerp | openerp/addons/account/project/wizard/__init__.py | 427 | 1337 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_analytic_journal_report
import account_analytic_balance_report
import account_analytic_inverted_balance_report
import account_analytic_cost_ledger_report
import account_analytic_cost_ledger_for_journal_report
import project_account_analytic_line
import account_analytic_chart
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 2,914,894,981,712,608,000 | 43.566667 | 78 | 0.666417 | false |
Cube777/dotgit | tests/test_plugins_plain.py | 1 | 2081 | import os
from dotgit.plugins.plain import PlainPlugin
class TestPlainPlugin:
def test_apply(self, tmp_path):
plugin = PlainPlugin(str(tmp_path / 'data'))
data = 'test data'
with open(tmp_path / 'file', 'w') as f:
f.write(data)
plugin.apply(tmp_path / 'file', tmp_path / 'file2')
assert (tmp_path / 'file').exists()
assert (tmp_path / 'file2').exists()
assert not (tmp_path / 'file').is_symlink()
assert not (tmp_path / 'file2').is_symlink()
with open(tmp_path / 'file2', 'r') as f:
assert f.read() == data
def test_remove(self, tmp_path):
plugin = PlainPlugin(str(tmp_path / 'data'))
open(tmp_path / 'file', 'w').close()
plugin.remove(tmp_path / 'file', tmp_path / 'file2')
assert (tmp_path / 'file').exists()
assert (tmp_path / 'file2').exists()
assert not (tmp_path / 'file').is_symlink()
assert (tmp_path / 'file2').is_symlink()
assert (tmp_path / 'file').samefile(tmp_path / 'file2')
def test_samefile_link(self, tmp_path):
plugin = PlainPlugin(str(tmp_path / 'data'))
open(tmp_path / 'file', 'w').close()
os.symlink(tmp_path / 'file', tmp_path / 'file2')
assert plugin.samefile(tmp_path / 'file', tmp_path / 'file2')
def test_samefile_copy(self, tmp_path):
plugin = PlainPlugin(str(tmp_path / 'data'))
open(tmp_path / 'file', 'w').close()
open(tmp_path / 'file2', 'w').close()
assert not plugin.samefile(tmp_path / 'file', tmp_path / 'file2')
def test_hard_mode(self, tmp_path):
plugin = PlainPlugin(str(tmp_path / 'data'), hard=True)
open(tmp_path / 'file', 'w').close()
plugin.remove(tmp_path / 'file', tmp_path / 'file2')
assert (tmp_path / 'file').exists()
assert (tmp_path / 'file2').exists()
assert not (tmp_path / 'file').is_symlink()
assert not (tmp_path / 'file2').is_symlink()
assert not (tmp_path / 'file').samefile(tmp_path / 'file2')
| gpl-2.0 | -7,911,380,932,798,663,000 | 32.031746 | 73 | 0.56223 | false |
hal0x2328/neo-python | neo/Core/Utils.py | 1 | 1665 | import base58
from neo.Settings import settings
from neo.Core.Fixed8 import Fixed8
from typing import Tuple
def isValidPublicAddress(address: str) -> bool:
"""Check if address is a valid NEO address"""
valid = False
if len(address) == 34 and address[0] == 'A':
try:
base58.b58decode_check(address.encode())
valid = True
except ValueError:
# checksum mismatch
valid = False
return valid
def validate_simple_policy(tx) -> Tuple[bool, str]:
"""
Validate transaction policies
Args:
tx: Transaction object
Returns:
tuple:
result: True if it passes the policy checks. False otherwise.
error_msg: empty str if policy passes, otherwise reason for failure.
"""
# verify the maximum tx size is not exceeded
if tx.Size() > tx.MAX_TX_SIZE:
return False, f"Transaction cancelled. The tx size ({tx.Size()}) exceeds the maximum tx size ({tx.MAX_TX_SIZE})."
# calculate and verify the required network fee for the tx
fee = tx.NetworkFee()
if tx.Size() > settings.MAX_FREE_TX_SIZE and not tx.Type == b'\x02': # Claim Transactions are High Priority
req_fee = Fixed8.FromDecimal(settings.FEE_PER_EXTRA_BYTE * (tx.Size() - settings.MAX_FREE_TX_SIZE))
if req_fee < settings.LOW_PRIORITY_THRESHOLD:
req_fee = settings.LOW_PRIORITY_THRESHOLD
if fee < req_fee:
return False, f'Transaction cancelled. The tx size ({tx.Size()}) exceeds the max free tx size ({settings.MAX_FREE_TX_SIZE}).\nA network fee of {req_fee.ToString()} GAS is required.'
return True, ""
| mit | -3,247,313,663,172,657,000 | 35.195652 | 193 | 0.640841 | false |
sunlianqiang/kbengine | kbe/src/lib/python/Lib/encodings/utf_16_le.py | 860 | 1037 | """ Python 'utf-16-le' Codec
Written by Marc-Andre Lemburg ([email protected]).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import codecs
### Codec APIs
encode = codecs.utf_16_le_encode
def decode(input, errors='strict'):
return codecs.utf_16_le_decode(input, errors, True)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.utf_16_le_encode(input, self.errors)[0]
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
_buffer_decode = codecs.utf_16_le_decode
class StreamWriter(codecs.StreamWriter):
encode = codecs.utf_16_le_encode
class StreamReader(codecs.StreamReader):
decode = codecs.utf_16_le_decode
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='utf-16-le',
encode=encode,
decode=decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| lgpl-3.0 | -5,593,843,268,516,763,000 | 23.690476 | 61 | 0.712633 | false |
BubuLK/sfepy | examples/linear_elasticity/shell10x_cantilever.py | 5 | 3878 | r"""
Bending of a long thin cantilever beam computed using the
:class:`dw_shell10x <sfepy.terms.terms_shells.Shell10XTerm>` term.
Find displacements of the central plane :math:`\ul{u}`, and rotations
:math:`\ul{\alpha}` such that:
.. math::
\int_{\Omega} D_{ijkl}\ e_{ij}(\ul{v}, \ul{\beta})
e_{kl}(\ul{u}, \ul{\alpha})
= - \int_{\Gamma_{right}} \ul{v} \cdot \ul{f}
\;, \quad \forall \ul{v} \;,
where :math:`D_{ijkl}` is the isotropic elastic tensor, given using the Young's
modulus :math:`E` and the Poisson's ratio :math:`\nu`.
The variable ``u`` below holds both :math:`\ul{u}` and :math:`\ul{\alpha}`
DOFs. For visualization, it is saved as two fields ``u_disp`` and ``u_rot``,
corresponding to :math:`\ul{u}` and :math:`\ul{\alpha}`, respectively.
See also :ref:`linear_elasticity-shell10x_cantilever_interactive` example.
View the results using::
python postproc.py shell10x.vtk -d 'u_disp,plot_displacements,rel_scaling=1.0' --opacity='wireframe=0.5' -b --wireframe
"""
from __future__ import absolute_import
from sfepy.base.base import output
from sfepy.discrete.fem.meshio import UserMeshIO
from sfepy.discrete import Integral
import sfepy.mechanics.shell10x as sh
import examples.linear_elasticity.shell10x_cantilever_interactive as sci
# Beam dimensions.
dims = [0.2, 0.01, 0.001]
thickness = dims[2]
transform = 'bend' # None, 'bend' or 'twist'
# Mesh resolution: increase to improve accuracy.
shape = [11, 2]
# Material parameters.
young = 210e9
poisson = 0.3
# Loading force.
force = -1.0
def mesh_hook(mesh, mode):
"""
Generate the beam mesh.
"""
if mode == 'read':
mesh = sci.make_mesh(dims[:2], shape, transform=transform)
return mesh
def post_process(out, problem, state, extend=False):
u = problem.get_variables()['u']
gamma2 = problem.domain.regions['Gamma2']
dofs = u.get_state_in_region(gamma2)
output('DOFs along the loaded edge:')
output('\n%s' % dofs)
if transform != 'twist':
label, ii = {None : ('u_3', 2), 'bend' : ('u_1', 0)}[transform]
u_exact = sci.get_analytical_displacement(dims, young, force,
transform=transform)
output('max. %s displacement:' % label, dofs[0, ii])
output('analytical value:', u_exact)
return out
filename_mesh = UserMeshIO(mesh_hook)
options = {
'nls' : 'newton',
'ls' : 'ls',
'post_process_hook' : 'post_process',
}
if transform is None:
pload = [[0.0, 0.0, force / shape[1], 0.0, 0.0, 0.0]] * shape[1]
elif transform == 'bend':
pload = [[force / shape[1], 0.0, 0.0, 0.0, 0.0, 0.0]] * shape[1]
elif transform == 'twist':
pload = [[0.0, force / shape[1], 0.0, 0.0, 0.0, 0.0]] * shape[1]
materials = {
'm' : ({
'D' : sh.create_elastic_tensor(young=young, poisson=poisson),
'.drill' : 1e-7,
},),
'load' : ({
'.val' : pload,
},)
}
xmin = (-0.5 + 1e-12) * dims[0]
xmax = (0.5 - 1e-12) * dims[0]
regions = {
'Omega' : 'all',
'Gamma1' : ('vertices in (x < %.14f)' % xmin, 'facet'),
'Gamma2' : ('vertices in (x > %.14f)' % xmax, 'facet'),
}
fields = {
'fu': ('real', 6, 'Omega', 1, 'H1', 'shell10x'),
}
variables = {
'u' : ('unknown field', 'fu', 0),
'v' : ('test field', 'fu', 'u'),
}
ebcs = {
'fix' : ('Gamma1', {'u.all' : 0.0}),
}
# Custom integral.
aux = Integral('i', order=3)
qp_coors, qp_weights = aux.get_qp('3_8')
qp_coors[:, 2] = thickness * (qp_coors[:, 2] - 0.5)
qp_weights *= thickness
integrals = {
'i' : ('custom', qp_coors, qp_weights),
}
equations = {
'elasticity' :
"""dw_shell10x.i.Omega(m.D, m.drill, v, u)
= dw_point_load.i.Gamma2(load.val, v)""",
}
solvers = {
'ls' : ('ls.scipy_direct', {}),
'newton' : ('nls.newton', {
'i_max' : 1,
'eps_a' : 1e-7,
}),
}
| bsd-3-clause | 162,297,522,961,934,000 | 25.202703 | 121 | 0.58097 | false |
Zhongqilong/mykbengineer | kbe/res/scripts/common/Lib/site-packages/pip/_vendor/html5lib/treebuilders/__init__.py | 1730 | 3405 | """A collection of modules for building different kinds of tree from
HTML documents.
To create a treebuilder for a new type of tree, you need to do
implement several things:
1) A set of classes for various types of elements: Document, Doctype,
Comment, Element. These must implement the interface of
_base.treebuilders.Node (although comment nodes have a different
signature for their constructor, see treebuilders.etree.Comment)
Textual content may also be implemented as another node type, or not, as
your tree implementation requires.
2) A treebuilder object (called TreeBuilder by convention) that
inherits from treebuilders._base.TreeBuilder. This has 4 required attributes:
documentClass - the class to use for the bottommost node of a document
elementClass - the class to use for HTML Elements
commentClass - the class to use for comments
doctypeClass - the class to use for doctypes
It also has one required method:
getDocument - Returns the root node of the complete document tree
3) If you wish to run the unit tests, you must also create a
testSerializer method on your treebuilder which accepts a node and
returns a string containing Node and its children serialized according
to the format used in the unittests
"""
from __future__ import absolute_import, division, unicode_literals
from ..utils import default_etree
treeBuilderCache = {}
def getTreeBuilder(treeType, implementation=None, **kwargs):
"""Get a TreeBuilder class for various types of tree with built-in support
treeType - the name of the tree type required (case-insensitive). Supported
values are:
"dom" - A generic builder for DOM implementations, defaulting to
a xml.dom.minidom based implementation.
"etree" - A generic builder for tree implementations exposing an
ElementTree-like interface, defaulting to
xml.etree.cElementTree if available and
xml.etree.ElementTree if not.
"lxml" - A etree-based builder for lxml.etree, handling
limitations of lxml's implementation.
implementation - (Currently applies to the "etree" and "dom" tree types). A
module implementing the tree type e.g.
xml.etree.ElementTree or xml.etree.cElementTree."""
treeType = treeType.lower()
if treeType not in treeBuilderCache:
if treeType == "dom":
from . import dom
# Come up with a sane default (pref. from the stdlib)
if implementation is None:
from xml.dom import minidom
implementation = minidom
# NEVER cache here, caching is done in the dom submodule
return dom.getDomModule(implementation, **kwargs).TreeBuilder
elif treeType == "lxml":
from . import etree_lxml
treeBuilderCache[treeType] = etree_lxml.TreeBuilder
elif treeType == "etree":
from . import etree
if implementation is None:
implementation = default_etree
# NEVER cache here, caching is done in the etree submodule
return etree.getETreeModule(implementation, **kwargs).TreeBuilder
else:
raise ValueError("""Unrecognised treebuilder "%s" """ % treeType)
return treeBuilderCache.get(treeType)
| lgpl-3.0 | 1,670,213,874,503,822,600 | 43.802632 | 79 | 0.681938 | false |
DDelon/youtube-dl | youtube_dl/extractor/blinkx.py | 199 | 3217 | from __future__ import unicode_literals
import json
from .common import InfoExtractor
from ..utils import (
remove_start,
int_or_none,
)
class BlinkxIE(InfoExtractor):
_VALID_URL = r'(?:https?://(?:www\.)blinkx\.com/#?ce/|blinkx:)(?P<id>[^?]+)'
IE_NAME = 'blinkx'
_TEST = {
'url': 'http://www.blinkx.com/ce/Da0Gw3xc5ucpNduzLuDDlv4WC9PuI4fDi1-t6Y3LyfdY2SZS5Urbvn-UPJvrvbo8LTKTc67Wu2rPKSQDJyZeeORCR8bYkhs8lI7eqddznH2ofh5WEEdjYXnoRtj7ByQwt7atMErmXIeYKPsSDuMAAqJDlQZ-3Ff4HJVeH_s3Gh8oQ',
'md5': '337cf7a344663ec79bf93a526a2e06c7',
'info_dict': {
'id': 'Da0Gw3xc',
'ext': 'mp4',
'title': 'No Daily Show for John Oliver; HBO Show Renewed - IGN News',
'uploader': 'IGN News',
'upload_date': '20150217',
'timestamp': 1424215740,
'description': 'HBO has renewed Last Week Tonight With John Oliver for two more seasons.',
'duration': 47.743333,
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
display_id = video_id[:8]
api_url = ('https://apib4.blinkx.com/api.php?action=play_video&' +
'video=%s' % video_id)
data_json = self._download_webpage(api_url, display_id)
data = json.loads(data_json)['api']['results'][0]
duration = None
thumbnails = []
formats = []
for m in data['media']:
if m['type'] == 'jpg':
thumbnails.append({
'url': m['link'],
'width': int(m['w']),
'height': int(m['h']),
})
elif m['type'] == 'original':
duration = float(m['d'])
elif m['type'] == 'youtube':
yt_id = m['link']
self.to_screen('Youtube video detected: %s' % yt_id)
return self.url_result(yt_id, 'Youtube', video_id=yt_id)
elif m['type'] in ('flv', 'mp4'):
vcodec = remove_start(m['vcodec'], 'ff')
acodec = remove_start(m['acodec'], 'ff')
vbr = int_or_none(m.get('vbr') or m.get('vbitrate'), 1000)
abr = int_or_none(m.get('abr') or m.get('abitrate'), 1000)
tbr = vbr + abr if vbr and abr else None
format_id = '%s-%sk-%s' % (vcodec, tbr, m['w'])
formats.append({
'format_id': format_id,
'url': m['link'],
'vcodec': vcodec,
'acodec': acodec,
'abr': abr,
'vbr': vbr,
'tbr': tbr,
'width': int_or_none(m.get('w')),
'height': int_or_none(m.get('h')),
})
self._sort_formats(formats)
return {
'id': display_id,
'fullid': video_id,
'title': data['title'],
'formats': formats,
'uploader': data['channel_name'],
'timestamp': data['pubdate_epoch'],
'description': data.get('description'),
'thumbnails': thumbnails,
'duration': duration,
}
| unlicense | 3,884,145,687,458,209,300 | 36.406977 | 216 | 0.482437 | false |
kfeiWang/pythonUtils | estimator.py | 1 | 1971 | # -*- coding:utf8 -*-
from __future__ import print_function
import codecs
def caluPAndRAndF1(file1, file2):
'''
计算准确率p和召回率r,f1值
file1: 参考文件
file2: 处理后文件
'''
with codecs.open(file1, 'r', 'utf8') as fin1:
with codecs.open(file2, 'r', 'utf8') as fin2:
line1 = fin1.readline()
line2 = fin2.readline()
totalWordCount = 0
rightWordCount = 0
splitWordCount = 0
while line1 and line2:
line1Arr = line1.strip().split(' ')
line2Arr = line2.strip().split(' ')
if len(line1Arr) != len(line2Arr):
raise Exception('句子词数量不一致')
for w1, w2 in zip(line1Arr, line2Arr): # 循环对应句子中每个词
set1 = packSet(w1) # 将word以/切分后放入集合
set2 = packSet(w2) # 将word以/切分后放入集合
#print('w1:', w1, len(set1), 'set1:', set1, 'w2', w2, len(set2), 'set2:', set2)
totalWordCount += len(set1) # 参考文件中全部词片段数量
splitWordCount += len(set2) # 切分后全部词片段数量
rightWordCount += len(set1.intersection(set2)) # 参考文件词片段和切分后全部词片段交集
line1 = fin1.readline()
line2 = fin2.readline()
p = rightWordCount*1.0/totalWordCount # 计算准确率
r = rightWordCount*1.0/splitWordCount # 计算召回率
f1 = p*r*2/(p+r) # 计算f1值
return p,r,f1
def packSet(word):
'''
word:以/划分
'''
setR = set()
wArr = word.split('/')
for w in wArr:
setR.add(w)
return setR
def testCaseCaluPAndRAndF1():
p,r,f1 = caluPAndRAndF1('testData/srcFile', 'testData/splitFile')
print('p:', p, 'r:', r, 'f1:', f1)
if __name__=='__main__':
testCaseCaluPAndRAndF1() | mit | -6,780,598,785,652,810,000 | 31.407407 | 99 | 0.531161 | false |
radicalbit/ambari | ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/params_linux.py | 2 | 22595 | #!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import re
import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
import status_params
from ambari_commons.constants import AMBARI_SUDO_BINARY
from ambari_commons import yaml_utils
from resource_management.libraries.functions import format
from resource_management.libraries.functions.default import default
from resource_management.libraries.functions.get_bare_principal import get_bare_principal
from resource_management.libraries.script import Script
from resource_management.libraries.resources.hdfs_resource import HdfsResource
from resource_management.libraries.functions import stack_select
from resource_management.libraries.functions import conf_select
from resource_management.libraries.functions import get_kinit_path
from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.libraries.functions.stack_features import get_stack_feature_version
from resource_management.libraries.functions import StackFeature
from resource_management.libraries.functions.expect import expect
from resource_management.libraries.functions.setup_atlas_hook import has_atlas_in_cluster
from resource_management.libraries.functions import is_empty
from ambari_commons.ambari_metrics_helper import select_metric_collector_hosts_from_hostnames
from resource_management.libraries.functions.setup_ranger_plugin_xml import get_audit_configs, generate_ranger_service_config
# server configurations
config = Script.get_config()
tmp_dir = Script.get_tmp_dir()
stack_root = status_params.stack_root
sudo = AMBARI_SUDO_BINARY
limits_conf_dir = "/etc/security/limits.d"
# Needed since this is an Atlas Hook service.
cluster_name = config['clusterName']
stack_name = status_params.stack_name
upgrade_direction = default("/commandParams/upgrade_direction", None)
version = default("/commandParams/version", None)
agent_stack_retry_on_unavailability = config['hostLevelParams']['agent_stack_retry_on_unavailability']
agent_stack_retry_count = expect("/hostLevelParams/agent_stack_retry_count", int)
storm_component_home_dir = status_params.storm_component_home_dir
conf_dir = status_params.conf_dir
stack_version_unformatted = status_params.stack_version_unformatted
stack_version_formatted = status_params.stack_version_formatted
stack_supports_ru = stack_version_formatted and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version_formatted)
stack_supports_storm_kerberos = stack_version_formatted and check_stack_feature(StackFeature.STORM_KERBEROS, stack_version_formatted)
stack_supports_storm_ams = stack_version_formatted and check_stack_feature(StackFeature.STORM_AMS, stack_version_formatted)
stack_supports_core_site_for_ranger_plugin = check_stack_feature(StackFeature.CORE_SITE_FOR_RANGER_PLUGINS_SUPPORT, stack_version_formatted)
# get the correct version to use for checking stack features
version_for_stack_feature_checks = get_stack_feature_version(config)
stack_supports_ranger_kerberos = check_stack_feature(StackFeature.RANGER_KERBEROS_SUPPORT, version_for_stack_feature_checks)
stack_supports_ranger_audit_db = check_stack_feature(StackFeature.RANGER_AUDIT_DB_SUPPORT, version_for_stack_feature_checks)
# default hadoop params
rest_lib_dir = "/usr/lib/storm/contrib/storm-rest"
storm_bin_dir = "/usr/bin"
storm_lib_dir = "/usr/lib/storm/lib/"
# hadoop parameters for 2.2+
if stack_supports_ru:
rest_lib_dir = format("{storm_component_home_dir}/contrib/storm-rest")
storm_bin_dir = format("{storm_component_home_dir}/bin")
storm_lib_dir = format("{storm_component_home_dir}/lib")
log4j_dir = format("{storm_component_home_dir}/log4j2")
storm_user = config['configurations']['storm-env']['storm_user']
log_dir = config['configurations']['storm-env']['storm_log_dir']
pid_dir = status_params.pid_dir
local_dir = config['configurations']['storm-site']['storm.local.dir']
user_group = config['configurations']['cluster-env']['user_group']
java64_home = config['hostLevelParams']['java_home']
jps_binary = format("{java64_home}/bin/jps")
nimbus_port = config['configurations']['storm-site']['nimbus.thrift.port']
storm_zookeeper_root_dir = default('/configurations/storm-site/storm.zookeeper.root', None)
storm_zookeeper_servers = config['configurations']['storm-site']['storm.zookeeper.servers']
storm_zookeeper_port = config['configurations']['storm-site']['storm.zookeeper.port']
storm_logs_supported = config['configurations']['storm-env']['storm_logs_supported']
# nimbus.seeds is supported in HDP 2.3.0.0 and higher
nimbus_seeds_supported = default('/configurations/storm-env/nimbus_seeds_supported', False)
nimbus_host = default('/configurations/storm-site/nimbus.host', None)
nimbus_seeds = default('/configurations/storm-site/nimbus.seeds', None)
default_topology_max_replication_wait_time_sec = default('/configurations/storm-site/topology.max.replication.wait.time.sec.default', -1)
nimbus_hosts = default("/clusterHostInfo/nimbus_hosts", [])
default_topology_min_replication_count = default('/configurations/storm-site/topology.min.replication.count.default', 1)
#Calculate topology.max.replication.wait.time.sec and topology.min.replication.count
if len(nimbus_hosts) > 1:
# for HA Nimbus
actual_topology_max_replication_wait_time_sec = -1
actual_topology_min_replication_count = len(nimbus_hosts) / 2 + 1
else:
# for non-HA Nimbus
actual_topology_max_replication_wait_time_sec = default_topology_max_replication_wait_time_sec
actual_topology_min_replication_count = default_topology_min_replication_count
if 'topology.max.replication.wait.time.sec.default' in config['configurations']['storm-site']:
del config['configurations']['storm-site']['topology.max.replication.wait.time.sec.default']
if 'topology.min.replication.count.default' in config['configurations']['storm-site']:
del config['configurations']['storm-site']['topology.min.replication.count.default']
rest_api_port = "8745"
rest_api_admin_port = "8746"
rest_api_conf_file = format("{conf_dir}/config.yaml")
storm_env_sh_template = config['configurations']['storm-env']['content']
jmxremote_port = config['configurations']['storm-env']['jmxremote_port']
if 'ganglia_server_host' in config['clusterHostInfo'] and len(config['clusterHostInfo']['ganglia_server_host'])>0:
ganglia_installed = True
ganglia_server = config['clusterHostInfo']['ganglia_server_host'][0]
ganglia_report_interval = 60
else:
ganglia_installed = False
security_enabled = config['configurations']['cluster-env']['security_enabled']
storm_ui_host = default("/clusterHostInfo/storm_ui_server_hosts", [])
storm_user_nofile_limit = default('/configurations/storm-env/storm_user_nofile_limit', 128000)
storm_user_nproc_limit = default('/configurations/storm-env/storm_user_noproc_limit', 65536)
if security_enabled:
_hostname_lowercase = config['hostname'].lower()
_storm_principal_name = config['configurations']['storm-env']['storm_principal_name']
storm_jaas_principal = _storm_principal_name.replace('_HOST',_hostname_lowercase)
_ambari_principal_name = default('/configurations/cluster-env/ambari_principal_name', None)
storm_keytab_path = config['configurations']['storm-env']['storm_keytab']
if stack_supports_storm_kerberos:
storm_ui_keytab_path = config['configurations']['storm-env']['storm_ui_keytab']
_storm_ui_jaas_principal_name = config['configurations']['storm-env']['storm_ui_principal_name']
storm_ui_jaas_principal = _storm_ui_jaas_principal_name.replace('_HOST',_hostname_lowercase)
storm_bare_jaas_principal = get_bare_principal(_storm_principal_name)
if _ambari_principal_name:
ambari_bare_jaas_principal = get_bare_principal(_ambari_principal_name)
_nimbus_principal_name = config['configurations']['storm-env']['nimbus_principal_name']
nimbus_jaas_principal = _nimbus_principal_name.replace('_HOST', _hostname_lowercase)
nimbus_bare_jaas_principal = get_bare_principal(_nimbus_principal_name)
nimbus_keytab_path = config['configurations']['storm-env']['nimbus_keytab']
kafka_bare_jaas_principal = None
if stack_supports_storm_kerberos:
if security_enabled:
storm_thrift_transport = config['configurations']['storm-site']['_storm.thrift.secure.transport']
# generate KafkaClient jaas config if kafka is kerberoized
_kafka_principal_name = default("/configurations/kafka-env/kafka_principal_name", None)
kafka_bare_jaas_principal = get_bare_principal(_kafka_principal_name)
else:
storm_thrift_transport = config['configurations']['storm-site']['_storm.thrift.nonsecure.transport']
set_instanceId = "false"
if 'cluster-env' in config['configurations'] and \
'metrics_collector_external_hosts' in config['configurations']['cluster-env']:
ams_collector_hosts = config['configurations']['cluster-env']['metrics_collector_external_hosts']
set_instanceId = "true"
else:
ams_collector_hosts = ",".join(default("/clusterHostInfo/metrics_collector_hosts", []))
has_metric_collector = not len(ams_collector_hosts) == 0
metric_collector_port = None
if has_metric_collector:
if 'cluster-env' in config['configurations'] and \
'metrics_collector_external_port' in config['configurations']['cluster-env']:
metric_collector_port = config['configurations']['cluster-env']['metrics_collector_external_port']
else:
metric_collector_web_address = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "0.0.0.0:6188")
if metric_collector_web_address.find(':') != -1:
metric_collector_port = metric_collector_web_address.split(':')[1]
else:
metric_collector_port = '6188'
metric_collector_report_interval = 60
metric_collector_app_id = "nimbus"
if default("/configurations/ams-site/timeline.metrics.service.http.policy", "HTTP_ONLY") == "HTTPS_ONLY":
metric_collector_protocol = 'https'
else:
metric_collector_protocol = 'http'
metric_truststore_path= default("/configurations/ams-ssl-client/ssl.client.truststore.location", "")
metric_truststore_type= default("/configurations/ams-ssl-client/ssl.client.truststore.type", "")
metric_truststore_password= default("/configurations/ams-ssl-client/ssl.client.truststore.password", "")
pass
metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)
metric_collector_sink_jar = "/usr/lib/storm/lib/ambari-metrics-storm-sink-with-common-*.jar"
metric_collector_legacy_sink_jar = "/usr/lib/storm/lib/ambari-metrics-storm-sink-legacy-with-common-*.jar"
host_in_memory_aggregation = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation", True)
host_in_memory_aggregation_port = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation.port", 61888)
# Cluster Zookeeper quorum
zookeeper_quorum = ""
if storm_zookeeper_servers:
storm_zookeeper_servers_list = yaml_utils.get_values_from_yaml_array(storm_zookeeper_servers)
zookeeper_quorum = (":" + storm_zookeeper_port + ",").join(storm_zookeeper_servers_list)
zookeeper_quorum += ":" + storm_zookeeper_port
jar_jvm_opts = ''
########################################################
############# Atlas related params #####################
########################################################
#region Atlas Hooks
storm_atlas_application_properties = default('/configurations/storm-atlas-application.properties', {})
enable_atlas_hook = default('/configurations/storm-env/storm.atlas.hook', False)
atlas_hook_filename = default('/configurations/atlas-env/metadata_conf_file', 'atlas-application.properties')
if enable_atlas_hook:
# Only append /etc/atlas/conf to classpath if on HDP 2.4.*
if check_stack_feature(StackFeature.ATLAS_CONF_DIR_IN_PATH, stack_version_formatted):
atlas_conf_dir = format('{stack_root}/current/atlas-server/conf')
jar_jvm_opts += '-Datlas.conf=' + atlas_conf_dir
#endregion
storm_ui_port = config['configurations']['storm-site']['ui.port']
#Storm log4j properties
storm_a1_maxfilesize = default('/configurations/storm-cluster-log4j/storm_a1_maxfilesize', 100)
storm_a1_maxbackupindex = default('/configurations/storm-cluster-log4j/storm_a1_maxbackupindex', 9)
storm_wrkr_a1_maxfilesize = default('/configurations/storm-worker-log4j/storm_wrkr_a1_maxfilesize', 100)
storm_wrkr_a1_maxbackupindex = default('/configurations/storm-worker-log4j/storm_wrkr_a1_maxbackupindex', 9)
storm_wrkr_out_maxfilesize = default('/configurations/storm-worker-log4j/storm_wrkr_out_maxfilesize', 100)
storm_wrkr_out_maxbackupindex = default('/configurations/storm-worker-log4j/storm_wrkr_out_maxbackupindex', 4)
storm_wrkr_err_maxfilesize = default('/configurations/storm-worker-log4j/storm_wrkr_err_maxfilesize', 100)
storm_wrkr_err_maxbackupindex = default('/configurations/storm-worker-log4j/storm_wrkr_err_maxbackupindex', 4)
storm_cluster_log4j_content = config['configurations']['storm-cluster-log4j']['content']
storm_worker_log4j_content = config['configurations']['storm-worker-log4j']['content']
# some commands may need to supply the JAAS location when running as storm
storm_jaas_file = format("{conf_dir}/storm_jaas.conf")
# for curl command in ranger plugin to get db connector
jdk_location = config['hostLevelParams']['jdk_location']
# ranger storm plugin start section
# ranger host
ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
has_ranger_admin = not len(ranger_admin_hosts) == 0
# ranger support xml_configuration flag, instead of depending on ranger xml_configurations_supported/ranger-env, using stack feature
xml_configurations_supported = check_stack_feature(StackFeature.RANGER_XML_CONFIGURATION, version_for_stack_feature_checks)
# ambari-server hostname
ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
# ranger storm plugin enabled property
enable_ranger_storm = default("/configurations/ranger-storm-plugin-properties/ranger-storm-plugin-enabled", "No")
enable_ranger_storm = True if enable_ranger_storm.lower() == 'yes' else False
# ranger storm properties
if enable_ranger_storm:
# get ranger policy url
policymgr_mgr_url = config['configurations']['admin-properties']['policymgr_external_url']
if xml_configurations_supported:
policymgr_mgr_url = config['configurations']['ranger-storm-security']['ranger.plugin.storm.policy.rest.url']
if not is_empty(policymgr_mgr_url) and policymgr_mgr_url.endswith('/'):
policymgr_mgr_url = policymgr_mgr_url.rstrip('/')
# ranger audit db user
xa_audit_db_user = default('/configurations/admin-properties/audit_db_user', 'rangerlogger')
# ranger storm service name
repo_name = str(config['clusterName']) + '_storm'
repo_name_value = config['configurations']['ranger-storm-security']['ranger.plugin.storm.service.name']
if not is_empty(repo_name_value) and repo_name_value != "{{repo_name}}":
repo_name = repo_name_value
common_name_for_certificate = config['configurations']['ranger-storm-plugin-properties']['common.name.for.certificate']
repo_config_username = config['configurations']['ranger-storm-plugin-properties']['REPOSITORY_CONFIG_USERNAME']
# ranger-env config
ranger_env = config['configurations']['ranger-env']
# create ranger-env config having external ranger credential properties
if not has_ranger_admin and enable_ranger_storm:
external_admin_username = default('/configurations/ranger-storm-plugin-properties/external_admin_username', 'admin')
external_admin_password = default('/configurations/ranger-storm-plugin-properties/external_admin_password', 'admin')
external_ranger_admin_username = default('/configurations/ranger-storm-plugin-properties/external_ranger_admin_username', 'amb_ranger_admin')
external_ranger_admin_password = default('/configurations/ranger-storm-plugin-properties/external_ranger_admin_password', 'amb_ranger_admin')
ranger_env = {}
ranger_env['admin_username'] = external_admin_username
ranger_env['admin_password'] = external_admin_password
ranger_env['ranger_admin_username'] = external_ranger_admin_username
ranger_env['ranger_admin_password'] = external_ranger_admin_password
ranger_plugin_properties = config['configurations']['ranger-storm-plugin-properties']
policy_user = storm_user
repo_config_password = config['configurations']['ranger-storm-plugin-properties']['REPOSITORY_CONFIG_PASSWORD']
xa_audit_db_password = ''
if not is_empty(config['configurations']['admin-properties']['audit_db_password']) and stack_supports_ranger_audit_db and has_ranger_admin:
xa_audit_db_password = config['configurations']['admin-properties']['audit_db_password']
repo_config_password = config['configurations']['ranger-storm-plugin-properties']['REPOSITORY_CONFIG_PASSWORD']
downloaded_custom_connector = None
previous_jdbc_jar_name = None
driver_curl_source = None
driver_curl_target = None
previous_jdbc_jar = None
if has_ranger_admin and stack_supports_ranger_audit_db:
xa_audit_db_flavor = config['configurations']['admin-properties']['DB_FLAVOR']
jdbc_jar_name, previous_jdbc_jar_name, audit_jdbc_url, jdbc_driver = get_audit_configs(config)
downloaded_custom_connector = format("{tmp_dir}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
driver_curl_source = format("{jdk_location}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
driver_curl_target = format("{storm_component_home_dir}/lib/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
previous_jdbc_jar = format("{storm_component_home_dir}/lib/{previous_jdbc_jar_name}") if stack_supports_ranger_audit_db else None
sql_connector_jar = ''
storm_ranger_plugin_config = {
'username': repo_config_username,
'password': repo_config_password,
'nimbus.url': 'http://' + storm_ui_host[0].lower() + ':' + str(storm_ui_port),
'commonNameForCertificate': common_name_for_certificate
}
storm_ranger_plugin_repo = {
'isActive': 'true',
'config': json.dumps(storm_ranger_plugin_config),
'description': 'storm repo',
'name': repo_name,
'repositoryType': 'storm',
'assetType': '6'
}
custom_ranger_service_config = generate_ranger_service_config(ranger_plugin_properties)
if len(custom_ranger_service_config) > 0:
storm_ranger_plugin_config.update(custom_ranger_service_config)
if stack_supports_ranger_kerberos and security_enabled:
policy_user = format('{storm_user},{storm_bare_jaas_principal}')
storm_ranger_plugin_config['policy.download.auth.users'] = policy_user
storm_ranger_plugin_config['tag.download.auth.users'] = policy_user
storm_ranger_plugin_config['ambari.service.check.user'] = policy_user
storm_ranger_plugin_repo = {
'isEnabled': 'true',
'configs': storm_ranger_plugin_config,
'description': 'storm repo',
'name': repo_name,
'type': 'storm'
}
ranger_storm_principal = None
ranger_storm_keytab = None
if stack_supports_ranger_kerberos and security_enabled:
ranger_storm_principal = storm_jaas_principal
ranger_storm_keytab = storm_keytab_path
xa_audit_db_is_enabled = False
if xml_configurations_supported and stack_supports_ranger_audit_db:
xa_audit_db_is_enabled = config['configurations']['ranger-storm-audit']['xasecure.audit.destination.db']
xa_audit_hdfs_is_enabled = default('/configurations/ranger-storm-audit/xasecure.audit.destination.hdfs', False)
ssl_keystore_password = config['configurations']['ranger-storm-policymgr-ssl']['xasecure.policymgr.clientssl.keystore.password'] if xml_configurations_supported else None
ssl_truststore_password = config['configurations']['ranger-storm-policymgr-ssl']['xasecure.policymgr.clientssl.truststore.password'] if xml_configurations_supported else None
credential_file = format('/etc/ranger/{repo_name}/cred.jceks')
# for SQLA explicitly disable audit to DB for Ranger
if has_ranger_admin and stack_supports_ranger_audit_db and xa_audit_db_flavor.lower() == 'sqla':
xa_audit_db_is_enabled = False
# ranger storm plugin end section
namenode_hosts = default("/clusterHostInfo/namenode_host", [])
has_namenode = not len(namenode_hosts) == 0
hdfs_user = config['configurations']['hadoop-env']['hdfs_user'] if has_namenode else None
hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab'] if has_namenode else None
hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name'] if has_namenode else None
hdfs_site = config['configurations']['hdfs-site'] if has_namenode else None
default_fs = config['configurations']['core-site']['fs.defaultFS'] if has_namenode else None
hadoop_bin_dir = stack_select.get_hadoop_dir("bin") if has_namenode else None
hadoop_conf_dir = conf_select.get_hadoop_conf_dir() if has_namenode else None
kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
import functools
#create partial functions with common arguments for every HdfsResource call
#to create/delete hdfs directory/file/copyfromlocal we need to call params.HdfsResource in code
HdfsResource = functools.partial(
HdfsResource,
user=hdfs_user,
hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
security_enabled = security_enabled,
keytab = hdfs_user_keytab,
kinit_path_local = kinit_path_local,
hadoop_bin_dir = hadoop_bin_dir,
hadoop_conf_dir = hadoop_conf_dir,
principal_name = hdfs_principal_name,
hdfs_site = hdfs_site,
default_fs = default_fs,
immutable_paths = get_not_managed_resources()
)
| apache-2.0 | -3,615,344,942,602,335,700 | 52.542654 | 176 | 0.756406 | false |
hacksterio/pygments.rb | vendor/pygments-main/pygments/lexers/r.py | 47 | 23755 | # -*- coding: utf-8 -*-
"""
pygments.lexers.r
~~~~~~~~~~~~~~~~~
Lexers for the R/S languages.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import Lexer, RegexLexer, include, words, do_insertions
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Generic
__all__ = ['RConsoleLexer', 'SLexer', 'RdLexer']
line_re = re.compile('.*?\n')
class RConsoleLexer(Lexer):
"""
For R console transcripts or R CMD BATCH output files.
"""
name = 'RConsole'
aliases = ['rconsole', 'rout']
filenames = ['*.Rout']
def get_tokens_unprocessed(self, text):
slexer = SLexer(**self.options)
current_code_block = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
if line.startswith('>') or line.startswith('+'):
# Colorize the prompt as such,
# then put rest of line into current_code_block
insertions.append((len(current_code_block),
[(0, Generic.Prompt, line[:2])]))
current_code_block += line[2:]
else:
# We have reached a non-prompt line!
# If we have stored prompt lines, need to process them first.
if current_code_block:
# Weave together the prompts and highlight code.
for item in do_insertions(
insertions, slexer.get_tokens_unprocessed(current_code_block)):
yield item
# Reset vars for next code block.
current_code_block = ''
insertions = []
# Now process the actual line itself, this is output from R.
yield match.start(), Generic.Output, line
# If we happen to end on a code block with nothing after it, need to
# process the last code block. This is neither elegant nor DRY so
# should be changed.
if current_code_block:
for item in do_insertions(
insertions, slexer.get_tokens_unprocessed(current_code_block)):
yield item
class SLexer(RegexLexer):
"""
For S, S-plus, and R source code.
.. versionadded:: 0.10
"""
name = 'S'
aliases = ['splus', 's', 'r']
filenames = ['*.S', '*.R', '.Rhistory', '.Rprofile', '.Renviron']
mimetypes = ['text/S-plus', 'text/S', 'text/x-r-source', 'text/x-r',
'text/x-R', 'text/x-r-history', 'text/x-r-profile']
builtins_base = (
'Arg', 'Conj', 'Cstack_info', 'Encoding', 'FALSE',
'Filter', 'Find', 'I', 'ISOdate', 'ISOdatetime', 'Im', 'Inf',
'La.svd', 'Map', 'Math.Date', 'Math.POSIXt', 'Math.data.frame',
'Math.difftime', 'Math.factor', 'Mod', 'NA_character_',
'NA_complex_', 'NA_real_', 'NCOL', 'NROW', 'NULLNA_integer_', 'NaN',
'Negate', 'NextMethod', 'Ops.Date', 'Ops.POSIXt', 'Ops.data.frame',
'Ops.difftime', 'Ops.factor', 'Ops.numeric_version', 'Ops.ordered',
'Position', 'R.Version', 'R.home', 'R.version', 'R.version.string',
'RNGkind', 'RNGversion', 'R_system_version', 'Re', 'Recall',
'Reduce', 'Summary.Date', 'Summary.POSIXct', 'Summary.POSIXlt',
'Summary.data.frame', 'Summary.difftime', 'Summary.factor',
'Summary.numeric_version', 'Summary.ordered', 'Sys.Date',
'Sys.chmod', 'Sys.getenv', 'Sys.getlocale', 'Sys.getpid',
'Sys.glob', 'Sys.info', 'Sys.localeconv', 'Sys.readlink',
'Sys.setFileTime', 'Sys.setenv', 'Sys.setlocale', 'Sys.sleep',
'Sys.time', 'Sys.timezone', 'Sys.umask', 'Sys.unsetenv',
'Sys.which', 'TRUE', 'UseMethod', 'Vectorize', 'abbreviate', 'abs',
'acos', 'acosh', 'addNA', 'addTaskCallback', 'agrep', 'alist',
'all', 'all.equal', 'all.equal.POSIXct', 'all.equal.character',
'all.equal.default', 'all.equal.factor', 'all.equal.formula',
'all.equal.language', 'all.equal.list', 'all.equal.numeric',
'all.equal.raw', 'all.names', 'all.vars', 'any', 'anyDuplicated',
'anyDuplicated.array', 'anyDuplicated.data.frame',
'anyDuplicated.default', 'anyDuplicated.matrix', 'aperm',
'aperm.default', 'aperm.table', 'append', 'apply', 'args',
'arrayInd', 'as.Date', 'as.Date.POSIXct', 'as.Date.POSIXlt',
'as.Date.character', 'as.Date.date', 'as.Date.dates',
'as.Date.default', 'as.Date.factor', 'as.Date.numeric',
'as.POSIXct', 'as.POSIXct.Date', 'as.POSIXct.POSIXlt',
'as.POSIXct.date', 'as.POSIXct.dates', 'as.POSIXct.default',
'as.POSIXct.numeric', 'as.POSIXlt', 'as.POSIXlt.Date',
'as.POSIXlt.POSIXct', 'as.POSIXlt.character', 'as.POSIXlt.date',
'as.POSIXlt.dates', 'as.POSIXlt.default', 'as.POSIXlt.factor',
'as.POSIXlt.numeric', 'as.array', 'as.array.default', 'as.call',
'as.character', 'as.character.Date', 'as.character.POSIXt',
'as.character.condition', 'as.character.default',
'as.character.error', 'as.character.factor', 'as.character.hexmode',
'as.character.numeric_version', 'as.character.octmode',
'as.character.srcref', 'as.complex', 'as.data.frame',
'as.data.frame.AsIs', 'as.data.frame.Date', 'as.data.frame.POSIXct',
'as.data.frame.POSIXlt', 'as.data.frame.array',
'as.data.frame.character', 'as.data.frame.complex',
'as.data.frame.data.frame', 'as.data.frame.default',
'as.data.frame.difftime', 'as.data.frame.factor',
'as.data.frame.integer', 'as.data.frame.list',
'as.data.frame.logical', 'as.data.frame.matrix',
'as.data.frame.model.matrix', 'as.data.frame.numeric',
'as.data.frame.numeric_version', 'as.data.frame.ordered',
'as.data.frame.raw', 'as.data.frame.table', 'as.data.frame.ts',
'as.data.frame.vector', 'as.difftime', 'as.double',
'as.double.POSIXlt', 'as.double.difftime', 'as.environment',
'as.expression', 'as.expression.default', 'as.factor',
'as.function', 'as.function.default', 'as.hexmode', 'as.integer',
'as.list', 'as.list.Date', 'as.list.POSIXct', 'as.list.data.frame',
'as.list.default', 'as.list.environment', 'as.list.factor',
'as.list.function', 'as.list.numeric_version', 'as.logical',
'as.logical.factor', 'as.matrix', 'as.matrix.POSIXlt',
'as.matrix.data.frame', 'as.matrix.default', 'as.matrix.noquote',
'as.name', 'as.null', 'as.null.default', 'as.numeric',
'as.numeric_version', 'as.octmode', 'as.ordered',
'as.package_version', 'as.pairlist', 'as.qr', 'as.raw', 'as.single',
'as.single.default', 'as.symbol', 'as.table', 'as.table.default',
'as.vector', 'as.vector.factor', 'asNamespace', 'asS3', 'asS4',
'asin', 'asinh', 'assign', 'atan', 'atan2', 'atanh',
'attachNamespace', 'attr', 'attr.all.equal', 'attributes',
'autoload', 'autoloader', 'backsolve', 'baseenv', 'basename',
'besselI', 'besselJ', 'besselK', 'besselY', 'beta',
'bindingIsActive', 'bindingIsLocked', 'bindtextdomain', 'bitwAnd',
'bitwNot', 'bitwOr', 'bitwShiftL', 'bitwShiftR', 'bitwXor', 'body',
'bquote', 'browser', 'browserCondition', 'browserSetDebug',
'browserText', 'builtins', 'by', 'by.data.frame', 'by.default',
'bzfile', 'c.Date', 'c.POSIXct', 'c.POSIXlt', 'c.noquote',
'c.numeric_version', 'call', 'callCC', 'capabilities', 'casefold',
'cat', 'category', 'cbind', 'cbind.data.frame', 'ceiling',
'char.expand', 'charToRaw', 'charmatch', 'chartr', 'check_tzones',
'chol', 'chol.default', 'chol2inv', 'choose', 'class',
'clearPushBack', 'close', 'close.connection', 'close.srcfile',
'close.srcfilealias', 'closeAllConnections', 'col', 'colMeans',
'colSums', 'colnames', 'commandArgs', 'comment', 'computeRestarts',
'conditionCall', 'conditionCall.condition', 'conditionMessage',
'conditionMessage.condition', 'conflicts', 'contributors', 'cos',
'cosh', 'crossprod', 'cummax', 'cummin', 'cumprod', 'cumsum', 'cut',
'cut.Date', 'cut.POSIXt', 'cut.default', 'dQuote', 'data.class',
'data.matrix', 'date', 'debug', 'debugonce',
'default.stringsAsFactors', 'delayedAssign', 'deparse', 'det',
'determinant', 'determinant.matrix', 'dget', 'diag', 'diff',
'diff.Date', 'diff.POSIXt', 'diff.default', 'difftime', 'digamma',
'dim', 'dim.data.frame', 'dimnames', 'dimnames.data.frame', 'dir',
'dir.create', 'dirname', 'do.call', 'dput', 'drop', 'droplevels',
'droplevels.data.frame', 'droplevels.factor', 'dump', 'duplicated',
'duplicated.POSIXlt', 'duplicated.array', 'duplicated.data.frame',
'duplicated.default', 'duplicated.matrix',
'duplicated.numeric_version', 'dyn.load', 'dyn.unload', 'eapply',
'eigen', 'else', 'emptyenv', 'enc2native', 'enc2utf8',
'encodeString', 'enquote', 'env.profile', 'environment',
'environmentIsLocked', 'environmentName', 'eval', 'eval.parent',
'evalq', 'exists', 'exp', 'expand.grid', 'expm1', 'expression',
'factor', 'factorial', 'fifo', 'file', 'file.access', 'file.append',
'file.choose', 'file.copy', 'file.create', 'file.exists',
'file.info', 'file.link', 'file.path', 'file.remove', 'file.rename',
'file.show', 'file.symlink', 'find.package', 'findInterval',
'findPackageEnv', 'findRestart', 'floor', 'flush',
'flush.connection', 'force', 'formals', 'format',
'format.AsIs', 'format.Date', 'format.POSIXct', 'format.POSIXlt',
'format.data.frame', 'format.default', 'format.difftime',
'format.factor', 'format.hexmode', 'format.info',
'format.libraryIQR', 'format.numeric_version', 'format.octmode',
'format.packageInfo', 'format.pval', 'format.summaryDefault',
'formatC', 'formatDL', 'forwardsolve', 'gamma', 'gc', 'gc.time',
'gcinfo', 'gctorture', 'gctorture2', 'get', 'getAllConnections',
'getCallingDLL', 'getCallingDLLe', 'getConnection',
'getDLLRegisteredRoutines', 'getDLLRegisteredRoutines.DLLInfo',
'getDLLRegisteredRoutines.character', 'getElement',
'getExportedValue', 'getHook', 'getLoadedDLLs', 'getNamespace',
'getNamespaceExports', 'getNamespaceImports', 'getNamespaceInfo',
'getNamespaceName', 'getNamespaceUsers', 'getNamespaceVersion',
'getNativeSymbolInfo', 'getOption', 'getRversion', 'getSrcLines',
'getTaskCallbackNames', 'geterrmessage', 'gettext', 'gettextf',
'getwd', 'gl', 'globalenv', 'gregexpr', 'grep', 'grepRaw', 'grepl',
'gsub', 'gzcon', 'gzfile', 'head', 'iconv', 'iconvlist',
'icuSetCollate', 'identical', 'identity', 'ifelse', 'importIntoEnv',
'in', 'inherits', 'intToBits', 'intToUtf8', 'interaction', 'interactive',
'intersect', 'inverse.rle', 'invisible', 'invokeRestart',
'invokeRestartInteractively', 'is.R', 'is.array', 'is.atomic',
'is.call', 'is.character', 'is.complex', 'is.data.frame',
'is.double', 'is.element', 'is.environment', 'is.expression',
'is.factor', 'is.finite', 'is.function', 'is.infinite',
'is.integer', 'is.language', 'is.list', 'is.loaded', 'is.logical',
'is.matrix', 'is.na', 'is.na.POSIXlt', 'is.na.data.frame',
'is.na.numeric_version', 'is.name', 'is.nan', 'is.null',
'is.numeric', 'is.numeric.Date', 'is.numeric.POSIXt',
'is.numeric.difftime', 'is.numeric_version', 'is.object',
'is.ordered', 'is.package_version', 'is.pairlist', 'is.primitive',
'is.qr', 'is.raw', 'is.recursive', 'is.single', 'is.symbol',
'is.table', 'is.unsorted', 'is.vector', 'isBaseNamespace',
'isIncomplete', 'isNamespace', 'isOpen', 'isRestart', 'isS4',
'isSeekable', 'isSymmetric', 'isSymmetric.matrix', 'isTRUE',
'isatty', 'isdebugged', 'jitter', 'julian', 'julian.Date',
'julian.POSIXt', 'kappa', 'kappa.default', 'kappa.lm', 'kappa.qr',
'kronecker', 'l10n_info', 'labels', 'labels.default', 'lapply',
'lazyLoad', 'lazyLoadDBexec', 'lazyLoadDBfetch', 'lbeta', 'lchoose',
'length', 'length.POSIXlt', 'letters', 'levels', 'levels.default',
'lfactorial', 'lgamma', 'library.dynam', 'library.dynam.unload',
'licence', 'license', 'list.dirs', 'list.files', 'list2env', 'load',
'loadNamespace', 'loadedNamespaces', 'loadingNamespaceInfo',
'local', 'lockBinding', 'lockEnvironment', 'log', 'log10', 'log1p',
'log2', 'logb', 'lower.tri', 'ls', 'make.names', 'make.unique',
'makeActiveBinding', 'mapply', 'margin.table', 'mat.or.vec',
'match', 'match.arg', 'match.call', 'match.fun', 'max', 'max.col',
'mean', 'mean.Date', 'mean.POSIXct', 'mean.POSIXlt', 'mean.default',
'mean.difftime', 'mem.limits', 'memCompress', 'memDecompress',
'memory.profile', 'merge', 'merge.data.frame', 'merge.default',
'message', 'mget', 'min', 'missing', 'mode', 'month.abb',
'month.name', 'months', 'months.Date', 'months.POSIXt',
'months.abb', 'months.nameletters', 'names', 'names.POSIXlt',
'namespaceExport', 'namespaceImport', 'namespaceImportClasses',
'namespaceImportFrom', 'namespaceImportMethods', 'nargs', 'nchar',
'ncol', 'new.env', 'ngettext', 'nlevels', 'noquote', 'norm',
'normalizePath', 'nrow', 'numeric_version', 'nzchar', 'objects',
'oldClass', 'on.exit', 'open', 'open.connection', 'open.srcfile',
'open.srcfilealias', 'open.srcfilecopy', 'options', 'order',
'ordered', 'outer', 'packBits', 'packageEvent',
'packageHasNamespace', 'packageStartupMessage', 'package_version',
'pairlist', 'parent.env', 'parent.frame', 'parse',
'parseNamespaceFile', 'paste', 'paste0', 'path.expand',
'path.package', 'pipe', 'pmatch', 'pmax', 'pmax.int', 'pmin',
'pmin.int', 'polyroot', 'pos.to.env', 'pretty', 'pretty.default',
'prettyNum', 'print', 'print.AsIs', 'print.DLLInfo',
'print.DLLInfoList', 'print.DLLRegisteredRoutines', 'print.Date',
'print.NativeRoutineList', 'print.POSIXct', 'print.POSIXlt',
'print.by', 'print.condition', 'print.connection',
'print.data.frame', 'print.default', 'print.difftime',
'print.factor', 'print.function', 'print.hexmode',
'print.libraryIQR', 'print.listof', 'print.noquote',
'print.numeric_version', 'print.octmode', 'print.packageInfo',
'print.proc_time', 'print.restart', 'print.rle',
'print.simple.list', 'print.srcfile', 'print.srcref',
'print.summary.table', 'print.summaryDefault', 'print.table',
'print.warnings', 'prmatrix', 'proc.time', 'prod', 'prop.table',
'provideDimnames', 'psigamma', 'pushBack', 'pushBackLength', 'q',
'qr', 'qr.Q', 'qr.R', 'qr.X', 'qr.coef', 'qr.default', 'qr.fitted',
'qr.qty', 'qr.qy', 'qr.resid', 'qr.solve', 'quarters',
'quarters.Date', 'quarters.POSIXt', 'quit', 'quote', 'range',
'range.default', 'rank', 'rapply', 'raw', 'rawConnection',
'rawConnectionValue', 'rawShift', 'rawToBits', 'rawToChar', 'rbind',
'rbind.data.frame', 'rcond', 'read.dcf', 'readBin', 'readChar',
'readLines', 'readRDS', 'readRenviron', 'readline', 'reg.finalizer',
'regexec', 'regexpr', 'registerS3method', 'registerS3methods',
'regmatches', 'remove', 'removeTaskCallback', 'rep', 'rep.Date',
'rep.POSIXct', 'rep.POSIXlt', 'rep.factor', 'rep.int',
'rep.numeric_version', 'rep_len', 'replace', 'replicate',
'requireNamespace', 'restartDescription', 'restartFormals',
'retracemem', 'rev', 'rev.default', 'rle', 'rm', 'round',
'round.Date', 'round.POSIXt', 'row', 'row.names',
'row.names.data.frame', 'row.names.default', 'rowMeans', 'rowSums',
'rownames', 'rowsum', 'rowsum.data.frame', 'rowsum.default',
'sQuote', 'sample', 'sample.int', 'sapply', 'save', 'save.image',
'saveRDS', 'scale', 'scale.default', 'scan', 'search',
'searchpaths', 'seek', 'seek.connection', 'seq', 'seq.Date',
'seq.POSIXt', 'seq.default', 'seq.int', 'seq_along', 'seq_len',
'sequence', 'serialize', 'set.seed', 'setHook', 'setNamespaceInfo',
'setSessionTimeLimit', 'setTimeLimit', 'setdiff', 'setequal',
'setwd', 'shQuote', 'showConnections', 'sign', 'signalCondition',
'signif', 'simpleCondition', 'simpleError', 'simpleMessage',
'simpleWarning', 'simplify2array', 'sin', 'single',
'sinh', 'sink', 'sink.number', 'slice.index', 'socketConnection',
'socketSelect', 'solve', 'solve.default', 'solve.qr', 'sort',
'sort.POSIXlt', 'sort.default', 'sort.int', 'sort.list', 'split',
'split.Date', 'split.POSIXct', 'split.data.frame', 'split.default',
'sprintf', 'sqrt', 'srcfile', 'srcfilealias', 'srcfilecopy',
'srcref', 'standardGeneric', 'stderr', 'stdin', 'stdout', 'stop',
'stopifnot', 'storage.mode', 'strftime', 'strptime', 'strsplit',
'strtoi', 'strtrim', 'structure', 'strwrap', 'sub', 'subset',
'subset.data.frame', 'subset.default', 'subset.matrix',
'substitute', 'substr', 'substring', 'sum', 'summary',
'summary.Date', 'summary.POSIXct', 'summary.POSIXlt',
'summary.connection', 'summary.data.frame', 'summary.default',
'summary.factor', 'summary.matrix', 'summary.proc_time',
'summary.srcfile', 'summary.srcref', 'summary.table',
'suppressMessages', 'suppressPackageStartupMessages',
'suppressWarnings', 'svd', 'sweep', 'sys.call', 'sys.calls',
'sys.frame', 'sys.frames', 'sys.function', 'sys.load.image',
'sys.nframe', 'sys.on.exit', 'sys.parent', 'sys.parents',
'sys.save.image', 'sys.source', 'sys.status', 'system',
'system.file', 'system.time', 'system2', 't', 't.data.frame',
't.default', 'table', 'tabulate', 'tail', 'tan', 'tanh', 'tapply',
'taskCallbackManager', 'tcrossprod', 'tempdir', 'tempfile',
'testPlatformEquivalence', 'textConnection', 'textConnectionValue',
'toString', 'toString.default', 'tolower', 'topenv', 'toupper',
'trace', 'traceback', 'tracemem', 'tracingState', 'transform',
'transform.data.frame', 'transform.default', 'trigamma', 'trunc',
'trunc.Date', 'trunc.POSIXt', 'truncate', 'truncate.connection',
'try', 'tryCatch', 'typeof', 'unclass', 'undebug', 'union',
'unique', 'unique.POSIXlt', 'unique.array', 'unique.data.frame',
'unique.default', 'unique.matrix', 'unique.numeric_version',
'units', 'units.difftime', 'unix.time', 'unlink', 'unlist',
'unloadNamespace', 'unlockBinding', 'unname', 'unserialize',
'unsplit', 'untrace', 'untracemem', 'unz', 'upper.tri', 'url',
'utf8ToInt', 'vapply', 'version', 'warning', 'warnings', 'weekdays',
'weekdays.Date', 'weekdays.POSIXt', 'which', 'which.max',
'which.min', 'with', 'with.default', 'withCallingHandlers',
'withRestarts', 'withVisible', 'within', 'within.data.frame',
'within.list', 'write', 'write.dcf', 'writeBin', 'writeChar',
'writeLines', 'xor', 'xor.hexmode', 'xor.octmode',
'xpdrows.data.frame', 'xtfrm', 'xtfrm.AsIs', 'xtfrm.Date',
'xtfrm.POSIXct', 'xtfrm.POSIXlt', 'xtfrm.Surv', 'xtfrm.default',
'xtfrm.difftime', 'xtfrm.factor', 'xtfrm.numeric_version', 'xzfile',
'zapsmall'
)
tokens = {
'comments': [
(r'#.*$', Comment.Single),
],
'valid_name': [
(r'[a-zA-Z][\w.]*', Text),
# can begin with ., but not if that is followed by a digit
(r'\.[a-zA-Z_][\w.]*', Text),
],
'punctuation': [
(r'\[{1,2}|\]{1,2}|\(|\)|;|,', Punctuation),
],
'keywords': [
(words(builtins_base, suffix=r'(?![\w. =])'),
Keyword.Pseudo),
(r'(if|else|for|while|repeat|in|next|break|return|switch|function)'
r'(?![\w.])',
Keyword.Reserved),
(r'(array|category|character|complex|double|function|integer|list|'
r'logical|matrix|numeric|vector|data.frame|c)'
r'(?![\w.])',
Keyword.Type),
(r'(library|require|attach|detach|source)'
r'(?![\w.])',
Keyword.Namespace)
],
'operators': [
(r'<<?-|->>?|-|==|<=|>=|<|>|&&?|!=|\|\|?|\?', Operator),
(r'\*|\+|\^|/|!|%[^%]*%|=|~|\$|@|:{1,3}', Operator)
],
'builtin_symbols': [
(r'(NULL|NA(_(integer|real|complex|character)_)?|'
r'letters|LETTERS|Inf|TRUE|FALSE|NaN|pi|\.\.(\.|[0-9]+))'
r'(?![\w.])',
Keyword.Constant),
(r'(T|F)\b', Name.Builtin.Pseudo),
],
'numbers': [
# hex number
(r'0[xX][a-fA-F0-9]+([pP][0-9]+)?[Li]?', Number.Hex),
# decimal number
(r'[+-]?([0-9]+(\.[0-9]+)?|\.[0-9]+|\.)([eE][+-]?[0-9]+)?[Li]?',
Number),
],
'statements': [
include('comments'),
# whitespaces
(r'\s+', Text),
(r'`.*?`', String.Backtick),
(r'\'', String, 'string_squote'),
(r'\"', String, 'string_dquote'),
include('builtin_symbols'),
include('numbers'),
include('keywords'),
include('punctuation'),
include('operators'),
include('valid_name'),
],
'root': [
include('statements'),
# blocks:
(r'\{|\}', Punctuation),
# (r'\{', Punctuation, 'block'),
(r'.', Text),
],
# 'block': [
# include('statements'),
# ('\{', Punctuation, '#push'),
# ('\}', Punctuation, '#pop')
# ],
'string_squote': [
(r'([^\'\\]|\\.)*\'', String, '#pop'),
],
'string_dquote': [
(r'([^"\\]|\\.)*"', String, '#pop'),
],
}
def analyse_text(text):
if re.search(r'[a-z0-9_\])\s]<-(?!-)', text):
return 0.11
class RdLexer(RegexLexer):
"""
Pygments Lexer for R documentation (Rd) files
This is a very minimal implementation, highlighting little more
than the macros. A description of Rd syntax is found in `Writing R
Extensions <http://cran.r-project.org/doc/manuals/R-exts.html>`_
and `Parsing Rd files <developer.r-project.org/parseRd.pdf>`_.
.. versionadded:: 1.6
"""
name = 'Rd'
aliases = ['rd']
filenames = ['*.Rd']
mimetypes = ['text/x-r-doc']
# To account for verbatim / LaTeX-like / and R-like areas
# would require parsing.
tokens = {
'root': [
# catch escaped brackets and percent sign
(r'\\[\\{}%]', String.Escape),
# comments
(r'%.*$', Comment),
# special macros with no arguments
(r'\\(?:cr|l?dots|R|tab)\b', Keyword.Constant),
# macros
(r'\\[a-zA-Z]+\b', Keyword),
# special preprocessor macros
(r'^\s*#(?:ifn?def|endif).*\b', Comment.Preproc),
# non-escaped brackets
(r'[{}]', Name.Builtin),
# everything else
(r'[^\\%\n{}]+', Text),
(r'.', Text),
]
}
| mit | -7,440,505,547,704,729,000 | 51.439294 | 91 | 0.56426 | false |
danakj/chromium | tools/perf/page_sets/tough_layout_cases.py | 5 | 1694 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import cache_temperature as cache_temperature_module
from telemetry.page import page as page_module
from telemetry.page import shared_page_state
from telemetry import story
class ToughLayoutCasesPage(page_module.Page):
def __init__(self, url, page_set, cache_temperature=None):
super(ToughLayoutCasesPage, self).__init__(
url=url, page_set=page_set, credentials_path = 'data/credentials.json',
shared_page_state_class=shared_page_state.SharedDesktopPageState,
cache_temperature=cache_temperature)
self.archive_data_file = 'data/tough_layout_cases.json'
class ToughLayoutCasesPageSet(story.StorySet):
"""
The slowest layouts observed in the alexa top 1 million sites in July 2013.
"""
def __init__(self, cache_temperatures=None):
super(ToughLayoutCasesPageSet, self).__init__(
archive_data_file='data/tough_layout_cases.json',
cloud_storage_bucket=story.PARTNER_BUCKET)
if cache_temperatures is None:
cache_temperatures = [cache_temperature_module.ANY]
urls_list = [
'http://oilevent.com',
'http://www.muzoboss.ru',
'http://natunkantha.com',
'http://www.mossiella.com',
'http://bookish.com',
'http://mydiyclub.com',
'http://amarchoti.blogspot.com',
'http://picarisimo.es',
'http://chinaapache.com',
'http://indoritel.com'
]
for url in urls_list:
for temp in cache_temperatures:
self.AddStory(ToughLayoutCasesPage(url, self, cache_temperature=temp))
| bsd-3-clause | -1,261,042,277,175,611,600 | 34.291667 | 79 | 0.697757 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.