repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
arun6582/django | django/db/models/sql/compiler.py | 2 | 56499 | import re
from itertools import chain
from django.core.exceptions import EmptyResultSet, FieldError
from django.db.models.constants import LOOKUP_SEP
from django.db.models.expressions import OrderBy, Random, RawSQL, Ref
from django.db.models.query_utils import QueryWrapper, select_related_descend
from django.db.models.sql.constants import (
CURSOR, GET_ITERATOR_CHUNK_SIZE, MULTI, NO_RESULTS, ORDER_DIR, SINGLE,
)
from django.db.models.sql.query import Query, get_order_dir
from django.db.transaction import TransactionManagementError
from django.db.utils import DatabaseError
FORCE = object()
class SQLCompiler:
def __init__(self, query, connection, using):
self.query = query
self.connection = connection
self.using = using
self.quote_cache = {'*': '*'}
# The select, klass_info, and annotations are needed by QuerySet.iterator()
# these are set as a side-effect of executing the query. Note that we calculate
# separately a list of extra select columns needed for grammatical correctness
# of the query, but these columns are not included in self.select.
self.select = None
self.annotation_col_map = None
self.klass_info = None
self.ordering_parts = re.compile(r'(.*)\s(ASC|DESC)(.*)')
def setup_query(self):
if all(self.query.alias_refcount[a] == 0 for a in self.query.tables):
self.query.get_initial_alias()
self.select, self.klass_info, self.annotation_col_map = self.get_select()
self.col_count = len(self.select)
def pre_sql_setup(self):
"""
Do any necessary class setup immediately prior to producing SQL. This
is for things that can't necessarily be done in __init__ because we
might not have all the pieces in place at that time.
"""
self.setup_query()
order_by = self.get_order_by()
self.where, self.having = self.query.where.split_having()
extra_select = self.get_extra_select(order_by, self.select)
group_by = self.get_group_by(self.select + extra_select, order_by)
return extra_select, order_by, group_by
def get_group_by(self, select, order_by):
"""
Return a list of 2-tuples of form (sql, params).
The logic of what exactly the GROUP BY clause contains is hard
to describe in other words than "if it passes the test suite,
then it is correct".
"""
# Some examples:
# SomeModel.objects.annotate(Count('somecol'))
# GROUP BY: all fields of the model
#
# SomeModel.objects.values('name').annotate(Count('somecol'))
# GROUP BY: name
#
# SomeModel.objects.annotate(Count('somecol')).values('name')
# GROUP BY: all cols of the model
#
# SomeModel.objects.values('name', 'pk').annotate(Count('somecol')).values('pk')
# GROUP BY: name, pk
#
# SomeModel.objects.values('name').annotate(Count('somecol')).values('pk')
# GROUP BY: name, pk
#
# In fact, the self.query.group_by is the minimal set to GROUP BY. It
# can't be ever restricted to a smaller set, but additional columns in
# HAVING, ORDER BY, and SELECT clauses are added to it. Unfortunately
# the end result is that it is impossible to force the query to have
# a chosen GROUP BY clause - you can almost do this by using the form:
# .values(*wanted_cols).annotate(AnAggregate())
# but any later annotations, extra selects, values calls that
# refer some column outside of the wanted_cols, order_by, or even
# filter calls can alter the GROUP BY clause.
# The query.group_by is either None (no GROUP BY at all), True
# (group by select fields), or a list of expressions to be added
# to the group by.
if self.query.group_by is None:
return []
expressions = []
if self.query.group_by is not True:
# If the group by is set to a list (by .values() call most likely),
# then we need to add everything in it to the GROUP BY clause.
# Backwards compatibility hack for setting query.group_by. Remove
# when we have public API way of forcing the GROUP BY clause.
# Converts string references to expressions.
for expr in self.query.group_by:
if not hasattr(expr, 'as_sql'):
expressions.append(self.query.resolve_ref(expr))
else:
expressions.append(expr)
# Note that even if the group_by is set, it is only the minimal
# set to group by. So, we need to add cols in select, order_by, and
# having into the select in any case.
for expr, _, _ in select:
cols = expr.get_group_by_cols()
for col in cols:
expressions.append(col)
for expr, (sql, params, is_ref) in order_by:
if expr.contains_aggregate:
continue
# We can skip References to select clause, as all expressions in
# the select clause are already part of the group by.
if is_ref:
continue
expressions.extend(expr.get_source_expressions())
having_group_by = self.having.get_group_by_cols() if self.having else ()
for expr in having_group_by:
expressions.append(expr)
result = []
seen = set()
expressions = self.collapse_group_by(expressions, having_group_by)
for expr in expressions:
sql, params = self.compile(expr)
if (sql, tuple(params)) not in seen:
result.append((sql, params))
seen.add((sql, tuple(params)))
return result
def collapse_group_by(self, expressions, having):
# If the DB can group by primary key, then group by the primary key of
# query's main model. Note that for PostgreSQL the GROUP BY clause must
# include the primary key of every table, but for MySQL it is enough to
# have the main table's primary key.
if self.connection.features.allows_group_by_pk:
# The logic here is: if the main model's primary key is in the
# query, then set new_expressions to that field. If that happens,
# then also add having expressions to group by.
pk = None
for expr in expressions:
# Is this a reference to query's base table primary key? If the
# expression isn't a Col-like, then skip the expression.
if (getattr(expr, 'target', None) == self.query.model._meta.pk and
getattr(expr, 'alias', None) == self.query.tables[0]):
pk = expr
break
if pk:
# MySQLism: Columns in HAVING clause must be added to the GROUP BY.
expressions = [pk] + [expr for expr in expressions if expr in having]
elif self.connection.features.allows_group_by_selected_pks:
# Filter out all expressions associated with a table's primary key
# present in the grouped columns. This is done by identifying all
# tables that have their primary key included in the grouped
# columns and removing non-primary key columns referring to them.
pks = {expr for expr in expressions if hasattr(expr, 'target') and expr.target.primary_key}
aliases = {expr.alias for expr in pks}
expressions = [
expr for expr in expressions if expr in pks or getattr(expr, 'alias', None) not in aliases
]
return expressions
def get_select(self):
"""
Return three values:
- a list of 3-tuples of (expression, (sql, params), alias)
- a klass_info structure,
- a dictionary of annotations
The (sql, params) is what the expression will produce, and alias is the
"AS alias" for the column (possibly None).
The klass_info structure contains the following information:
- Which model to instantiate
- Which columns for that model are present in the query (by
position of the select clause).
- related_klass_infos: [f, klass_info] to descent into
The annotations is a dictionary of {'attname': column position} values.
"""
select = []
klass_info = None
annotations = {}
select_idx = 0
for alias, (sql, params) in self.query.extra_select.items():
annotations[alias] = select_idx
select.append((RawSQL(sql, params), alias))
select_idx += 1
assert not (self.query.select and self.query.default_cols)
if self.query.default_cols:
select_list = []
for c in self.get_default_columns():
select_list.append(select_idx)
select.append((c, None))
select_idx += 1
klass_info = {
'model': self.query.model,
'select_fields': select_list,
}
# self.query.select is a special case. These columns never go to
# any model.
for col in self.query.select:
select.append((col, None))
select_idx += 1
for alias, annotation in self.query.annotation_select.items():
annotations[alias] = select_idx
select.append((annotation, alias))
select_idx += 1
if self.query.select_related:
related_klass_infos = self.get_related_selections(select)
klass_info['related_klass_infos'] = related_klass_infos
def get_select_from_parent(klass_info):
for ki in klass_info['related_klass_infos']:
if ki['from_parent']:
ki['select_fields'] = (klass_info['select_fields'] +
ki['select_fields'])
get_select_from_parent(ki)
get_select_from_parent(klass_info)
ret = []
for col, alias in select:
try:
sql, params = self.compile(col, select_format=True)
except EmptyResultSet:
# Select a predicate that's always False.
sql, params = '0', ()
ret.append((col, (sql, params), alias))
return ret, klass_info, annotations
def get_order_by(self):
"""
Return a list of 2-tuples of form (expr, (sql, params, is_ref)) for the
ORDER BY clause.
The order_by clause can alter the select clause (for example it
can add aliases to clauses that do not yet have one, or it can
add totally new select clauses).
"""
if self.query.extra_order_by:
ordering = self.query.extra_order_by
elif not self.query.default_ordering:
ordering = self.query.order_by
else:
ordering = (self.query.order_by or self.query.get_meta().ordering or [])
if self.query.standard_ordering:
asc, desc = ORDER_DIR['ASC']
else:
asc, desc = ORDER_DIR['DESC']
order_by = []
for field in ordering:
if hasattr(field, 'resolve_expression'):
if not isinstance(field, OrderBy):
field = field.asc()
if not self.query.standard_ordering:
field.reverse_ordering()
order_by.append((field, False))
continue
if field == '?': # random
order_by.append((OrderBy(Random()), False))
continue
col, order = get_order_dir(field, asc)
descending = True if order == 'DESC' else False
if col in self.query.annotation_select:
# Reference to expression in SELECT clause
order_by.append((
OrderBy(Ref(col, self.query.annotation_select[col]), descending=descending),
True))
continue
if col in self.query.annotations:
# References to an expression which is masked out of the SELECT clause
order_by.append((
OrderBy(self.query.annotations[col], descending=descending),
False))
continue
if '.' in field:
# This came in through an extra(order_by=...) addition. Pass it
# on verbatim.
table, col = col.split('.', 1)
order_by.append((
OrderBy(
RawSQL('%s.%s' % (self.quote_name_unless_alias(table), col), []),
descending=descending
), False))
continue
if not self.query._extra or col not in self.query._extra:
# 'col' is of the form 'field' or 'field1__field2' or
# '-field1__field2__field', etc.
order_by.extend(self.find_ordering_name(
field, self.query.get_meta(), default_order=asc))
else:
if col not in self.query.extra_select:
order_by.append((
OrderBy(RawSQL(*self.query.extra[col]), descending=descending),
False))
else:
order_by.append((
OrderBy(Ref(col, RawSQL(*self.query.extra[col])), descending=descending),
True))
result = []
seen = set()
for expr, is_ref in order_by:
if self.query.combinator:
src = expr.get_source_expressions()[0]
# Relabel order by columns to raw numbers if this is a combined
# query; necessary since the columns can't be referenced by the
# fully qualified name and the simple column names may collide.
for idx, (sel_expr, _, col_alias) in enumerate(self.select):
if is_ref and col_alias == src.refs:
src = src.source
elif col_alias:
continue
if src == sel_expr:
expr.set_source_expressions([RawSQL('%d' % (idx + 1), ())])
break
else:
raise DatabaseError('ORDER BY term does not match any column in the result set.')
resolved = expr.resolve_expression(
self.query, allow_joins=True, reuse=None)
sql, params = self.compile(resolved)
# Don't add the same column twice, but the order direction is
# not taken into account so we strip it. When this entire method
# is refactored into expressions, then we can check each part as we
# generate it.
without_ordering = self.ordering_parts.search(sql).group(1)
if (without_ordering, tuple(params)) in seen:
continue
seen.add((without_ordering, tuple(params)))
result.append((resolved, (sql, params, is_ref)))
return result
def get_extra_select(self, order_by, select):
extra_select = []
select_sql = [t[1] for t in select]
if self.query.distinct and not self.query.distinct_fields:
for expr, (sql, params, is_ref) in order_by:
without_ordering = self.ordering_parts.search(sql).group(1)
if not is_ref and (without_ordering, params) not in select_sql:
extra_select.append((expr, (without_ordering, params), None))
return extra_select
def quote_name_unless_alias(self, name):
"""
A wrapper around connection.ops.quote_name that doesn't quote aliases
for table names. This avoids problems with some SQL dialects that treat
quoted strings specially (e.g. PostgreSQL).
"""
if name in self.quote_cache:
return self.quote_cache[name]
if ((name in self.query.alias_map and name not in self.query.table_map) or
name in self.query.extra_select or (
name in self.query.external_aliases and name not in self.query.table_map)):
self.quote_cache[name] = name
return name
r = self.connection.ops.quote_name(name)
self.quote_cache[name] = r
return r
def compile(self, node, select_format=False):
vendor_impl = getattr(node, 'as_' + self.connection.vendor, None)
if vendor_impl:
sql, params = vendor_impl(self, self.connection)
else:
sql, params = node.as_sql(self, self.connection)
if select_format is FORCE or (select_format and not self.query.subquery):
return node.output_field.select_format(self, sql, params)
return sql, params
def get_combinator_sql(self, combinator, all):
features = self.connection.features
compilers = [
query.get_compiler(self.using, self.connection)
for query in self.query.combined_queries
]
if not features.supports_slicing_ordering_in_compound:
for query, compiler in zip(self.query.combined_queries, compilers):
if query.low_mark or query.high_mark:
raise DatabaseError('LIMIT/OFFSET not allowed in subqueries of compound statements.')
if compiler.get_order_by():
raise DatabaseError('ORDER BY not allowed in subqueries of compound statements.')
parts = (compiler.as_sql() for compiler in compilers)
combinator_sql = self.connection.ops.set_operators[combinator]
if all and combinator == 'union':
combinator_sql += ' ALL'
braces = '({})' if features.supports_slicing_ordering_in_compound else '{}'
sql_parts, args_parts = zip(*((braces.format(sql), args) for sql, args in parts))
result = [' {} '.format(combinator_sql).join(sql_parts)]
params = []
for part in args_parts:
params.extend(part)
return result, params
def as_sql(self, with_limits=True, with_col_aliases=False):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
If 'with_limits' is False, any limit/offset information is not included
in the query.
"""
refcounts_before = self.query.alias_refcount.copy()
try:
extra_select, order_by, group_by = self.pre_sql_setup()
distinct_fields = self.get_distinct()
# This must come after 'select', 'ordering', and 'distinct' -- see
# docstring of get_from_clause() for details.
from_, f_params = self.get_from_clause()
for_update_part = None
where, w_params = self.compile(self.where) if self.where is not None else ("", [])
having, h_params = self.compile(self.having) if self.having is not None else ("", [])
combinator = self.query.combinator
features = self.connection.features
if combinator:
if not getattr(features, 'supports_select_{}'.format(combinator)):
raise DatabaseError('{} not supported on this database backend.'.format(combinator))
result, params = self.get_combinator_sql(combinator, self.query.combinator_all)
else:
result = ['SELECT']
params = []
if self.query.distinct:
result.append(self.connection.ops.distinct_sql(distinct_fields))
out_cols = []
col_idx = 1
for _, (s_sql, s_params), alias in self.select + extra_select:
if alias:
s_sql = '%s AS %s' % (s_sql, self.connection.ops.quote_name(alias))
elif with_col_aliases:
s_sql = '%s AS %s' % (s_sql, 'Col%d' % col_idx)
col_idx += 1
params.extend(s_params)
out_cols.append(s_sql)
result.append(', '.join(out_cols))
result.append('FROM')
result.extend(from_)
params.extend(f_params)
if self.query.select_for_update and self.connection.features.has_select_for_update:
if self.connection.get_autocommit():
raise TransactionManagementError('select_for_update cannot be used outside of a transaction.')
nowait = self.query.select_for_update_nowait
skip_locked = self.query.select_for_update_skip_locked
# If it's a NOWAIT/SKIP LOCKED query but the backend
# doesn't support it, raise a DatabaseError to prevent a
# possible deadlock.
if nowait and not self.connection.features.has_select_for_update_nowait:
raise DatabaseError('NOWAIT is not supported on this database backend.')
elif skip_locked and not self.connection.features.has_select_for_update_skip_locked:
raise DatabaseError('SKIP LOCKED is not supported on this database backend.')
for_update_part = self.connection.ops.for_update_sql(nowait=nowait, skip_locked=skip_locked)
if for_update_part and self.connection.features.for_update_after_from:
result.append(for_update_part)
if where:
result.append('WHERE %s' % where)
params.extend(w_params)
grouping = []
for g_sql, g_params in group_by:
grouping.append(g_sql)
params.extend(g_params)
if grouping:
if distinct_fields:
raise NotImplementedError('annotate() + distinct(fields) is not implemented.')
if not order_by:
order_by = self.connection.ops.force_no_ordering()
result.append('GROUP BY %s' % ', '.join(grouping))
if having:
result.append('HAVING %s' % having)
params.extend(h_params)
if order_by:
ordering = []
for _, (o_sql, o_params, _) in order_by:
ordering.append(o_sql)
params.extend(o_params)
result.append('ORDER BY %s' % ', '.join(ordering))
if with_limits:
if self.query.high_mark is not None:
result.append('LIMIT %d' % (self.query.high_mark - self.query.low_mark))
if self.query.low_mark:
if self.query.high_mark is None:
val = self.connection.ops.no_limit_value()
if val:
result.append('LIMIT %d' % val)
result.append('OFFSET %d' % self.query.low_mark)
if for_update_part and not self.connection.features.for_update_after_from:
result.append(for_update_part)
return ' '.join(result), tuple(params)
finally:
# Finally do cleanup - get rid of the joins we created above.
self.query.reset_refcounts(refcounts_before)
def get_default_columns(self, start_alias=None, opts=None, from_parent=None):
"""
Compute the default columns for selecting every field in the base
model. Will sometimes be called to pull in related models (e.g. via
select_related), in which case "opts" and "start_alias" will be given
to provide a starting point for the traversal.
Return a list of strings, quoted appropriately for use in SQL
directly, as well as a set of aliases used in the select statement (if
'as_pairs' is True, return a list of (alias, col_name) pairs instead
of strings as the first component and None as the second component).
"""
result = []
if opts is None:
opts = self.query.get_meta()
only_load = self.deferred_to_columns()
if not start_alias:
start_alias = self.query.get_initial_alias()
# The 'seen_models' is used to optimize checking the needed parent
# alias for a given field. This also includes None -> start_alias to
# be used by local fields.
seen_models = {None: start_alias}
for field in opts.concrete_fields:
model = field.model._meta.concrete_model
# A proxy model will have a different model and concrete_model. We
# will assign None if the field belongs to this model.
if model == opts.model:
model = None
if from_parent and model is not None and issubclass(
from_parent._meta.concrete_model, model._meta.concrete_model):
# Avoid loading data for already loaded parents.
# We end up here in the case select_related() resolution
# proceeds from parent model to child model. In that case the
# parent model data is already present in the SELECT clause,
# and we want to avoid reloading the same data again.
continue
if field.model in only_load and field.attname not in only_load[field.model]:
continue
alias = self.query.join_parent_model(opts, model, start_alias,
seen_models)
column = field.get_col(alias)
result.append(column)
return result
def get_distinct(self):
"""
Return a quoted list of fields to use in DISTINCT ON part of the query.
This method can alter the tables in the query, and thus it must be
called before get_from_clause().
"""
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
result = []
opts = self.query.get_meta()
for name in self.query.distinct_fields:
parts = name.split(LOOKUP_SEP)
_, targets, alias, joins, path, _ = self._setup_joins(parts, opts, None)
targets, alias, _ = self.query.trim_joins(targets, joins, path)
for target in targets:
if name in self.query.annotation_select:
result.append(name)
else:
result.append("%s.%s" % (qn(alias), qn2(target.column)))
return result
def find_ordering_name(self, name, opts, alias=None, default_order='ASC',
already_seen=None):
"""
Return the table alias (the name might be ambiguous, the alias will
not be) and column name for ordering by the given 'name' parameter.
The 'name' is of the form 'field1__field2__...__fieldN'.
"""
name, order = get_order_dir(name, default_order)
descending = True if order == 'DESC' else False
pieces = name.split(LOOKUP_SEP)
field, targets, alias, joins, path, opts = self._setup_joins(pieces, opts, alias)
# If we get to this point and the field is a relation to another model,
# append the default ordering for that model unless the attribute name
# of the field is specified.
if field.is_relation and opts.ordering and getattr(field, 'attname', None) != name:
# Firstly, avoid infinite loops.
if not already_seen:
already_seen = set()
join_tuple = tuple(getattr(self.query.alias_map[j], 'join_cols', None) for j in joins)
if join_tuple in already_seen:
raise FieldError('Infinite loop caused by ordering.')
already_seen.add(join_tuple)
results = []
for item in opts.ordering:
results.extend(self.find_ordering_name(item, opts, alias,
order, already_seen))
return results
targets, alias, _ = self.query.trim_joins(targets, joins, path)
return [(OrderBy(t.get_col(alias), descending=descending), False) for t in targets]
def _setup_joins(self, pieces, opts, alias):
"""
Helper method for get_order_by() and get_distinct().
get_ordering() and get_distinct() must produce same target columns on
same input, as the prefixes of get_ordering() and get_distinct() must
match. Executing SQL where this is not true is an error.
"""
if not alias:
alias = self.query.get_initial_alias()
field, targets, opts, joins, path = self.query.setup_joins(
pieces, opts, alias)
alias = joins[-1]
return field, targets, alias, joins, path, opts
def get_from_clause(self):
"""
Return a list of strings that are joined together to go after the
"FROM" part of the query, as well as a list any extra parameters that
need to be included. Subclasses, can override this to create a
from-clause via a "select".
This should only be called after any SQL construction methods that
might change the tables that are needed. This means the select columns,
ordering, and distinct must be done first.
"""
result = []
params = []
for alias in self.query.tables:
if not self.query.alias_refcount[alias]:
continue
try:
from_clause = self.query.alias_map[alias]
except KeyError:
# Extra tables can end up in self.tables, but not in the
# alias_map if they aren't in a join. That's OK. We skip them.
continue
clause_sql, clause_params = self.compile(from_clause)
result.append(clause_sql)
params.extend(clause_params)
for t in self.query.extra_tables:
alias, _ = self.query.table_alias(t)
# Only add the alias if it's not already present (the table_alias()
# call increments the refcount, so an alias refcount of one means
# this is the only reference).
if alias not in self.query.alias_map or self.query.alias_refcount[alias] == 1:
result.append(', %s' % self.quote_name_unless_alias(alias))
return result, params
def get_related_selections(self, select, opts=None, root_alias=None, cur_depth=1,
requested=None, restricted=None):
"""
Fill in the information needed for a select_related query. The current
depth is measured as the number of connections away from the root model
(for example, cur_depth=1 means we are looking at models with direct
connections to the root model).
"""
def _get_field_choices():
direct_choices = (f.name for f in opts.fields if f.is_relation)
reverse_choices = (
f.field.related_query_name()
for f in opts.related_objects if f.field.unique
)
return chain(direct_choices, reverse_choices)
related_klass_infos = []
if not restricted and self.query.max_depth and cur_depth > self.query.max_depth:
# We've recursed far enough; bail out.
return related_klass_infos
if not opts:
opts = self.query.get_meta()
root_alias = self.query.get_initial_alias()
only_load = self.query.get_loaded_field_names()
# Setup for the case when only particular related fields should be
# included in the related selection.
fields_found = set()
if requested is None:
if isinstance(self.query.select_related, dict):
requested = self.query.select_related
restricted = True
else:
restricted = False
def get_related_klass_infos(klass_info, related_klass_infos):
klass_info['related_klass_infos'] = related_klass_infos
for f in opts.fields:
field_model = f.model._meta.concrete_model
fields_found.add(f.name)
if restricted:
next = requested.get(f.name, {})
if not f.is_relation:
# If a non-related field is used like a relation,
# or if a single non-relational field is given.
if next or f.name in requested:
raise FieldError(
"Non-relational field given in select_related: '%s'. "
"Choices are: %s" % (
f.name,
", ".join(_get_field_choices()) or '(none)',
)
)
else:
next = False
if not select_related_descend(f, restricted, requested,
only_load.get(field_model)):
continue
klass_info = {
'model': f.remote_field.model,
'field': f,
'reverse': False,
'from_parent': False,
}
related_klass_infos.append(klass_info)
select_fields = []
_, _, _, joins, _ = self.query.setup_joins(
[f.name], opts, root_alias)
alias = joins[-1]
columns = self.get_default_columns(start_alias=alias, opts=f.remote_field.model._meta)
for col in columns:
select_fields.append(len(select))
select.append((col, None))
klass_info['select_fields'] = select_fields
next_klass_infos = self.get_related_selections(
select, f.remote_field.model._meta, alias, cur_depth + 1, next, restricted)
get_related_klass_infos(klass_info, next_klass_infos)
if restricted:
related_fields = [
(o.field, o.related_model)
for o in opts.related_objects
if o.field.unique and not o.many_to_many
]
for f, model in related_fields:
if not select_related_descend(f, restricted, requested,
only_load.get(model), reverse=True):
continue
related_field_name = f.related_query_name()
fields_found.add(related_field_name)
_, _, _, joins, _ = self.query.setup_joins([related_field_name], opts, root_alias)
alias = joins[-1]
from_parent = issubclass(model, opts.model) and model is not opts.model
klass_info = {
'model': model,
'field': f,
'reverse': True,
'from_parent': from_parent,
}
related_klass_infos.append(klass_info)
select_fields = []
columns = self.get_default_columns(
start_alias=alias, opts=model._meta, from_parent=opts.model)
for col in columns:
select_fields.append(len(select))
select.append((col, None))
klass_info['select_fields'] = select_fields
next = requested.get(f.related_query_name(), {})
next_klass_infos = self.get_related_selections(
select, model._meta, alias, cur_depth + 1,
next, restricted)
get_related_klass_infos(klass_info, next_klass_infos)
fields_not_found = set(requested.keys()).difference(fields_found)
if fields_not_found:
invalid_fields = ("'%s'" % s for s in fields_not_found)
raise FieldError(
'Invalid field name(s) given in select_related: %s. '
'Choices are: %s' % (
', '.join(invalid_fields),
', '.join(_get_field_choices()) or '(none)',
)
)
return related_klass_infos
def deferred_to_columns(self):
"""
Convert the self.deferred_loading data structure to mapping of table
names to sets of column names which are to be loaded. Return the
dictionary.
"""
columns = {}
self.query.deferred_to_data(columns, self.query.get_loaded_field_names_cb)
return columns
def get_converters(self, expressions):
converters = {}
for i, expression in enumerate(expressions):
if expression:
backend_converters = self.connection.ops.get_db_converters(expression)
field_converters = expression.get_db_converters(self.connection)
if backend_converters or field_converters:
converters[i] = (backend_converters + field_converters, expression)
return converters
def apply_converters(self, row, converters):
row = list(row)
for pos, (convs, expression) in converters.items():
value = row[pos]
for converter in convs:
value = converter(value, expression, self.connection, self.query.context)
row[pos] = value
return tuple(row)
def results_iter(self, results=None):
"""Return an iterator over the results from executing this query."""
if results is None:
results = self.execute_sql(MULTI)
fields = [s[0] for s in self.select[0:self.col_count]]
converters = self.get_converters(fields)
for rows in results:
for row in rows:
if converters:
row = self.apply_converters(row, converters)
yield row
def has_results(self):
"""
Backends (e.g. NoSQL) can override this in order to use optimized
versions of "query has any results."
"""
# This is always executed on a query clone, so we can modify self.query
self.query.add_extra({'a': 1}, None, None, None, None, None)
self.query.set_extra_mask(['a'])
return bool(self.execute_sql(SINGLE))
def execute_sql(self, result_type=MULTI, chunked_fetch=False):
"""
Run the query against the database and return the result(s). The
return value is a single data item if result_type is SINGLE, or an
iterator over the results if the result_type is MULTI.
result_type is either MULTI (use fetchmany() to retrieve all rows),
SINGLE (only retrieve a single row), or None. In this last case, the
cursor is returned if any query is executed, since it's used by
subclasses such as InsertQuery). It's possible, however, that no query
is needed, as the filters describe an empty set. In that case, None is
returned, to avoid any unnecessary database interaction.
"""
if not result_type:
result_type = NO_RESULTS
try:
sql, params = self.as_sql()
if not sql:
raise EmptyResultSet
except EmptyResultSet:
if result_type == MULTI:
return iter([])
else:
return
if chunked_fetch:
cursor = self.connection.chunked_cursor()
else:
cursor = self.connection.cursor()
try:
cursor.execute(sql, params)
except Exception:
# Might fail for server-side cursors (e.g. connection closed)
cursor.close()
raise
if result_type == CURSOR:
# Caller didn't specify a result_type, so just give them back the
# cursor to process (and close).
return cursor
if result_type == SINGLE:
try:
val = cursor.fetchone()
if val:
return val[0:self.col_count]
return val
finally:
# done with the cursor
cursor.close()
if result_type == NO_RESULTS:
cursor.close()
return
result = cursor_iter(
cursor, self.connection.features.empty_fetchmany_value,
self.col_count
)
if not chunked_fetch and not self.connection.features.can_use_chunked_reads:
try:
# If we are using non-chunked reads, we return the same data
# structure as normally, but ensure it is all read into memory
# before going any further. Use chunked_fetch if requested.
return list(result)
finally:
# done with the cursor
cursor.close()
return result
def as_subquery_condition(self, alias, columns, compiler):
qn = compiler.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
for index, select_col in enumerate(self.query.select):
lhs_sql, lhs_params = self.compile(select_col)
rhs = '%s.%s' % (qn(alias), qn2(columns[index]))
self.query.where.add(
QueryWrapper('%s = %s' % (lhs_sql, rhs), lhs_params), 'AND')
sql, params = self.as_sql()
return 'EXISTS (%s)' % sql, params
class SQLInsertCompiler(SQLCompiler):
def __init__(self, *args, **kwargs):
self.return_id = False
super().__init__(*args, **kwargs)
def field_as_sql(self, field, val):
"""
Take a field and a value intended to be saved on that field, and
return placeholder SQL and accompanying params. Check for raw values,
expressions, and fields with get_placeholder() defined in that order.
When field is None, consider the value raw and use it as the
placeholder, with no corresponding parameters returned.
"""
if field is None:
# A field value of None means the value is raw.
sql, params = val, []
elif hasattr(val, 'as_sql'):
# This is an expression, let's compile it.
sql, params = self.compile(val)
elif hasattr(field, 'get_placeholder'):
# Some fields (e.g. geo fields) need special munging before
# they can be inserted.
sql, params = field.get_placeholder(val, self, self.connection), [val]
else:
# Return the common case for the placeholder
sql, params = '%s', [val]
# The following hook is only used by Oracle Spatial, which sometimes
# needs to yield 'NULL' and [] as its placeholder and params instead
# of '%s' and [None]. The 'NULL' placeholder is produced earlier by
# OracleOperations.get_geom_placeholder(). The following line removes
# the corresponding None parameter. See ticket #10888.
params = self.connection.ops.modify_insert_params(sql, params)
return sql, params
def prepare_value(self, field, value):
"""
Prepare a value to be used in a query by resolving it if it is an
expression and otherwise calling the field's get_db_prep_save().
"""
if hasattr(value, 'resolve_expression'):
value = value.resolve_expression(self.query, allow_joins=False, for_save=True)
# Don't allow values containing Col expressions. They refer to
# existing columns on a row, but in the case of insert the row
# doesn't exist yet.
if value.contains_column_references:
raise ValueError(
'Failed to insert expression "%s" on %s. F() expressions '
'can only be used to update, not to insert.' % (value, field)
)
if value.contains_aggregate:
raise FieldError("Aggregate functions are not allowed in this query")
else:
value = field.get_db_prep_save(value, connection=self.connection)
return value
def pre_save_val(self, field, obj):
"""
Get the given field's value off the given obj. pre_save() is used for
things like auto_now on DateTimeField. Skip it if this is a raw query.
"""
if self.query.raw:
return getattr(obj, field.attname)
return field.pre_save(obj, add=True)
def assemble_as_sql(self, fields, value_rows):
"""
Take a sequence of N fields and a sequence of M rows of values, and
generate placeholder SQL and parameters for each field and value.
Return a pair containing:
* a sequence of M rows of N SQL placeholder strings, and
* a sequence of M rows of corresponding parameter values.
Each placeholder string may contain any number of '%s' interpolation
strings, and each parameter row will contain exactly as many params
as the total number of '%s's in the corresponding placeholder row.
"""
if not value_rows:
return [], []
# list of (sql, [params]) tuples for each object to be saved
# Shape: [n_objs][n_fields][2]
rows_of_fields_as_sql = (
(self.field_as_sql(field, v) for field, v in zip(fields, row))
for row in value_rows
)
# tuple like ([sqls], [[params]s]) for each object to be saved
# Shape: [n_objs][2][n_fields]
sql_and_param_pair_rows = (zip(*row) for row in rows_of_fields_as_sql)
# Extract separate lists for placeholders and params.
# Each of these has shape [n_objs][n_fields]
placeholder_rows, param_rows = zip(*sql_and_param_pair_rows)
# Params for each field are still lists, and need to be flattened.
param_rows = [[p for ps in row for p in ps] for row in param_rows]
return placeholder_rows, param_rows
def as_sql(self):
# We don't need quote_name_unless_alias() here, since these are all
# going to be column names (so we can avoid the extra overhead).
qn = self.connection.ops.quote_name
opts = self.query.get_meta()
result = ['INSERT INTO %s' % qn(opts.db_table)]
has_fields = bool(self.query.fields)
fields = self.query.fields if has_fields else [opts.pk]
result.append('(%s)' % ', '.join(qn(f.column) for f in fields))
if has_fields:
value_rows = [
[self.prepare_value(field, self.pre_save_val(field, obj)) for field in fields]
for obj in self.query.objs
]
else:
# An empty object.
value_rows = [[self.connection.ops.pk_default_value()] for _ in self.query.objs]
fields = [None]
# Currently the backends just accept values when generating bulk
# queries and generate their own placeholders. Doing that isn't
# necessary and it should be possible to use placeholders and
# expressions in bulk inserts too.
can_bulk = (not self.return_id and self.connection.features.has_bulk_insert)
placeholder_rows, param_rows = self.assemble_as_sql(fields, value_rows)
if self.return_id and self.connection.features.can_return_id_from_insert:
if self.connection.features.can_return_ids_from_bulk_insert:
result.append(self.connection.ops.bulk_insert_sql(fields, placeholder_rows))
params = param_rows
else:
result.append("VALUES (%s)" % ", ".join(placeholder_rows[0]))
params = [param_rows[0]]
col = "%s.%s" % (qn(opts.db_table), qn(opts.pk.column))
r_fmt, r_params = self.connection.ops.return_insert_id()
# Skip empty r_fmt to allow subclasses to customize behavior for
# 3rd party backends. Refs #19096.
if r_fmt:
result.append(r_fmt % col)
params += [r_params]
return [(" ".join(result), tuple(chain.from_iterable(params)))]
if can_bulk:
result.append(self.connection.ops.bulk_insert_sql(fields, placeholder_rows))
return [(" ".join(result), tuple(p for ps in param_rows for p in ps))]
else:
return [
(" ".join(result + ["VALUES (%s)" % ", ".join(p)]), vals)
for p, vals in zip(placeholder_rows, param_rows)
]
def execute_sql(self, return_id=False):
assert not (
return_id and len(self.query.objs) != 1 and
not self.connection.features.can_return_ids_from_bulk_insert
)
self.return_id = return_id
with self.connection.cursor() as cursor:
for sql, params in self.as_sql():
cursor.execute(sql, params)
if not (return_id and cursor):
return
if self.connection.features.can_return_ids_from_bulk_insert and len(self.query.objs) > 1:
return self.connection.ops.fetch_returned_insert_ids(cursor)
if self.connection.features.can_return_id_from_insert:
assert len(self.query.objs) == 1
return self.connection.ops.fetch_returned_insert_id(cursor)
return self.connection.ops.last_insert_id(
cursor, self.query.get_meta().db_table, self.query.get_meta().pk.column
)
class SQLDeleteCompiler(SQLCompiler):
def as_sql(self):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
"""
assert len([t for t in self.query.tables if self.query.alias_refcount[t] > 0]) == 1, \
"Can only delete from one table at a time."
qn = self.quote_name_unless_alias
result = ['DELETE FROM %s' % qn(self.query.tables[0])]
where, params = self.compile(self.query.where)
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(params)
class SQLUpdateCompiler(SQLCompiler):
def as_sql(self):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
"""
self.pre_sql_setup()
if not self.query.values:
return '', ()
qn = self.quote_name_unless_alias
values, update_params = [], []
for field, model, val in self.query.values:
if hasattr(val, 'resolve_expression'):
val = val.resolve_expression(self.query, allow_joins=False, for_save=True)
if val.contains_aggregate:
raise FieldError("Aggregate functions are not allowed in this query")
elif hasattr(val, 'prepare_database_save'):
if field.remote_field:
val = field.get_db_prep_save(
val.prepare_database_save(field),
connection=self.connection,
)
else:
raise TypeError(
"Tried to update field %s with a model instance, %r. "
"Use a value compatible with %s."
% (field, val, field.__class__.__name__)
)
else:
val = field.get_db_prep_save(val, connection=self.connection)
# Getting the placeholder for the field.
if hasattr(field, 'get_placeholder'):
placeholder = field.get_placeholder(val, self, self.connection)
else:
placeholder = '%s'
name = field.column
if hasattr(val, 'as_sql'):
sql, params = self.compile(val)
values.append('%s = %s' % (qn(name), sql))
update_params.extend(params)
elif val is not None:
values.append('%s = %s' % (qn(name), placeholder))
update_params.append(val)
else:
values.append('%s = NULL' % qn(name))
table = self.query.tables[0]
result = [
'UPDATE %s SET' % qn(table),
', '.join(values),
]
where, params = self.compile(self.query.where)
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(update_params + params)
def execute_sql(self, result_type):
"""
Execute the specified update. Return the number of rows affected by
the primary update query. The "primary update query" is the first
non-empty query that is executed. Row counts for any subsequent,
related queries are not available.
"""
cursor = super().execute_sql(result_type)
try:
rows = cursor.rowcount if cursor else 0
is_empty = cursor is None
finally:
if cursor:
cursor.close()
for query in self.query.get_related_updates():
aux_rows = query.get_compiler(self.using).execute_sql(result_type)
if is_empty and aux_rows:
rows = aux_rows
is_empty = False
return rows
def pre_sql_setup(self):
"""
If the update depends on results from other tables, munge the "where"
conditions to match the format required for (portable) SQL updates.
If multiple updates are required, pull out the id values to update at
this point so that they don't change as a result of the progressive
updates.
"""
refcounts_before = self.query.alias_refcount.copy()
# Ensure base table is in the query
self.query.get_initial_alias()
count = self.query.count_active_tables()
if not self.query.related_updates and count == 1:
return
query = self.query.clone(klass=Query)
query.select_related = False
query.clear_ordering(True)
query._extra = {}
query.select = []
query.add_fields([query.get_meta().pk.name])
super().pre_sql_setup()
must_pre_select = count > 1 and not self.connection.features.update_can_self_select
# Now we adjust the current query: reset the where clause and get rid
# of all the tables we don't need (since they're in the sub-select).
self.query.where = self.query.where_class()
if self.query.related_updates or must_pre_select:
# Either we're using the idents in multiple update queries (so
# don't want them to change), or the db backend doesn't support
# selecting from the updating table (e.g. MySQL).
idents = []
for rows in query.get_compiler(self.using).execute_sql(MULTI):
idents.extend(r[0] for r in rows)
self.query.add_filter(('pk__in', idents))
self.query.related_ids = idents
else:
# The fast path. Filters and updates in one query.
self.query.add_filter(('pk__in', query))
self.query.reset_refcounts(refcounts_before)
class SQLAggregateCompiler(SQLCompiler):
def as_sql(self):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
"""
sql, params = [], []
for annotation in self.query.annotation_select.values():
ann_sql, ann_params = self.compile(annotation, select_format=FORCE)
sql.append(ann_sql)
params.extend(ann_params)
self.col_count = len(self.query.annotation_select)
sql = ', '.join(sql)
params = tuple(params)
sql = 'SELECT %s FROM (%s) subquery' % (sql, self.query.subquery)
params = params + self.query.sub_params
return sql, params
def cursor_iter(cursor, sentinel, col_count):
"""
Yield blocks of rows from a cursor and ensure the cursor is closed when
done.
"""
try:
for rows in iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)),
sentinel):
yield [r[0:col_count] for r in rows]
finally:
cursor.close()
| bsd-3-clause | -4,896,346,399,500,584,000 | 43.522459 | 118 | 0.565037 | false |
arthurfurlan/django-shortim | src/shortim/migrations/0004_auto__add_field_shorturl_collect_tries.py | 1 | 2388 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'ShortURL.collect_tries'
db.add_column('shortim_shorturl', 'collect_tries', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False)
def backwards(self, orm):
# Deleting field 'ShortURL.collect_tries'
db.delete_column('shortim_shorturl', 'collect_tries')
models = {
'shortim.shorturl': {
'Meta': {'ordering': "['-id']", 'object_name': 'ShortURL'},
'canonical_url': ('django.db.models.fields.URLField', [], {'default': 'None', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'collect_date': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'collect_tries': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mime': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'remote_user': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'removed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'db_index': 'True'})
},
'shortim.shorturlhit': {
'Meta': {'ordering': "['-date']", 'object_name': 'ShortURLHit'},
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'remote_user': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'shorturl': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'hits'", 'to': "orm['shortim.ShortURL']"})
}
}
complete_apps = ['shortim']
| gpl-3.0 | -5,950,783,668,536,782,000 | 53.272727 | 145 | 0.56742 | false |
kbidarkar/robottelo | tests/foreman/rhai/test_rhai.py | 2 | 6626 | """Tests for Red Hat Access Insights
:Requirement: Rhai
:CaseAutomation: Automated
:CaseLevel: Acceptance
:CaseComponent: UI
:TestType: Functional
:CaseImportance: High
:Upstream: No
"""
import time
from fauxfactory import gen_string
from nailgun import entities
from robottelo import manifests
from robottelo.api.utils import upload_manifest
from robottelo.constants import DEFAULT_SUBSCRIPTION_NAME
from robottelo.constants import DISTRO_RHEL6, DISTRO_RHEL7
from robottelo.decorators import run_in_one_thread, skip_if_not_set
from robottelo.test import UITestCase
from robottelo.ui.locators import locators
from robottelo.ui.navigator import Navigator
from robottelo.ui.session import Session
from robottelo.vm import VirtualMachine
@run_in_one_thread
class RHAITestCase(UITestCase):
@classmethod
def setUpClass(cls): # noqa
super(RHAITestCase, cls).setUpClass()
# Create a new organization with prefix 'insights'
org = entities.Organization(
name='insights_{0}'.format(gen_string('alpha', 6))
).create()
# Upload manifest
with manifests.clone() as manifest:
upload_manifest(org.id, manifest.content)
# Create activation key using default CV and library environment
activation_key = entities.ActivationKey(
auto_attach=True,
content_view=org.default_content_view.id,
environment=org.library.id,
name=gen_string('alpha'),
organization=org,
).create()
# Walk through the list of subscriptions.
# Find the "Red Hat Employee Subscription" and attach it to the
# recently-created activation key.
for subs in entities.Subscription(organization=org).search():
if subs.read_json()['product_name'] == DEFAULT_SUBSCRIPTION_NAME:
# 'quantity' must be 1, not subscription['quantity']. Greater
# values produce this error: "RuntimeError: Error: Only pools
# with multi-entitlement product subscriptions can be added to
# the activation key with a quantity greater than one."
activation_key.add_subscriptions(data={
'quantity': 1,
'subscription_id': subs.id,
})
break
cls.org_label = org.label
cls.ak_name = activation_key.name
cls.org_name = org.name
@skip_if_not_set('clients')
def test_positive_register_client_to_rhai(self):
"""Check client registration to redhat-access-insights service.
:id: f3aefdb3-ac99-402d-afd9-e53e9ee1e8d7
:expectedresults: Registered client should appear in the Systems sub-
menu of Red Hat Access Insights
"""
# Register a VM to Access Insights Service
with VirtualMachine(distro=DISTRO_RHEL6) as vm:
try:
vm.configure_rhai_client(self.ak_name, self.org_label,
DISTRO_RHEL6)
with Session(self) as session:
# view clients registered to Red Hat Access Insights
session.nav.go_to_select_org(self.org_name)
Navigator(self.browser).go_to_insights_systems()
result = self.rhai.view_registered_systems()
self.assertIn("1", result,
'Registered clients are not listed')
finally:
vm.get('/var/log/redhat-access-insights/'
'redhat-access-insights.log',
'./insights_client_registration.log')
def test_negative_org_not_selected(self):
"""Verify that user attempting to access RHAI is directed to select an
Organization if there is no organization selected
:id: 6ddfdb29-eeb5-41a4-8851-ad19130b112c
:expectedresults: 'Organization Selection Required' message must be
displayed if the user tries to view Access Insights overview
without selecting an org
"""
with Session(self) as session:
# Given that the user does not specify any Organization
session.nav.go_to_select_org("Any Organization")
session.nav.go_to_insights_overview()
# 'Organization Selection Required' message must be present
result = session.nav.wait_until_element(
locators['insights.org_selection_msg']).text
self.assertIn("Organization Selection Required", result)
@skip_if_not_set('clients')
def test_positive_unregister_client_from_rhai(self):
"""Verify that 'Unregister' a system from RHAI works correctly then the
system should not be able to use the service.
:id: 580f9704-8c6d-4f63-b027-68a6ac97af77
:expectedresults: Once the system is unregistered from the RHAI web
interface then the unregistered system should return `1` on running
the service 'redhat-access-insights'
"""
# Register a VM to Access Insights Service
with VirtualMachine(distro=DISTRO_RHEL7) as vm:
try:
vm.configure_rhai_client(self.ak_name, self.org_label,
DISTRO_RHEL7)
with Session(self) as session:
session.nav.go_to_select_org(self.org_name)
Navigator(self.browser).go_to_insights_systems()
# Click on the unregister icon 'X' in the table against the
# registered system listed.
strategy, value = locators['insights.unregister_system']
session.nav.click(
(strategy, value % vm.hostname),
wait_for_ajax=True,
ajax_timeout=40,
)
# Confirm selection for clicking on 'Yes' to unregister the
# system
session.nav.click(
locators['insights.unregister_button']
)
self.browser.refresh()
time.sleep(60)
self.browser.refresh()
result = vm.run('redhat-access-insights')
self.assertEqual(result.return_code, 1,
"System has not been unregistered")
finally:
vm.get('/var/log/redhat-access-insights/'
'redhat-access-insights.log',
'./insights_unregister.log')
| gpl-3.0 | -4,276,507,252,918,347,000 | 38.915663 | 79 | 0.594627 | false |
mancoast/CPythonPyc_test | cpython/221_test_cl.py | 21 | 3925 | #! /usr/bin/env python
"""Whimpy test script for the cl module
Roger E. Masse
"""
import cl
from test_support import verbose
clattrs = ['ADDED_ALGORITHM_ERROR', 'ALAW', 'ALGORITHM_ID',
'ALGORITHM_VERSION', 'AUDIO', 'AWARE_ERROR', 'AWARE_MPEG_AUDIO',
'AWARE_MULTIRATE', 'AWCMP_CONST_QUAL', 'AWCMP_FIXED_RATE',
'AWCMP_INDEPENDENT', 'AWCMP_JOINT_STEREO', 'AWCMP_LOSSLESS',
'AWCMP_MPEG_LAYER_I', 'AWCMP_MPEG_LAYER_II', 'AWCMP_STEREO',
'Algorithm', 'AlgorithmNumber', 'AlgorithmType', 'AudioFormatName',
'BAD_ALGORITHM_NAME', 'BAD_ALGORITHM_TYPE', 'BAD_BLOCK_SIZE',
'BAD_BOARD', 'BAD_BUFFERING', 'BAD_BUFFERLENGTH_NEG',
'BAD_BUFFERLENGTH_ODD', 'BAD_BUFFER_EXISTS', 'BAD_BUFFER_HANDLE',
'BAD_BUFFER_POINTER', 'BAD_BUFFER_QUERY_SIZE', 'BAD_BUFFER_SIZE',
'BAD_BUFFER_SIZE_POINTER', 'BAD_BUFFER_TYPE',
'BAD_COMPRESSION_SCHEME', 'BAD_COMPRESSOR_HANDLE',
'BAD_COMPRESSOR_HANDLE_POINTER', 'BAD_FRAME_SIZE',
'BAD_FUNCTIONALITY', 'BAD_FUNCTION_POINTER', 'BAD_HEADER_SIZE',
'BAD_INITIAL_VALUE', 'BAD_INTERNAL_FORMAT', 'BAD_LICENSE',
'BAD_MIN_GT_MAX', 'BAD_NO_BUFFERSPACE', 'BAD_NUMBER_OF_BLOCKS',
'BAD_PARAM', 'BAD_PARAM_ID_POINTER', 'BAD_PARAM_TYPE', 'BAD_POINTER',
'BAD_PVBUFFER', 'BAD_SCHEME_POINTER', 'BAD_STREAM_HEADER',
'BAD_STRING_POINTER', 'BAD_TEXT_STRING_PTR', 'BEST_FIT',
'BIDIRECTIONAL', 'BITRATE_POLICY', 'BITRATE_TARGET',
'BITS_PER_COMPONENT', 'BLENDING', 'BLOCK_SIZE', 'BOTTOM_UP',
'BUFFER_NOT_CREATED', 'BUF_DATA', 'BUF_FRAME', 'BytesPerPixel',
'BytesPerSample', 'CHANNEL_POLICY', 'CHROMA_THRESHOLD', 'CODEC',
'COMPONENTS', 'COMPRESSED_BUFFER_SIZE', 'COMPRESSION_RATIO',
'COMPRESSOR', 'CONTINUOUS_BLOCK', 'CONTINUOUS_NONBLOCK',
'CompressImage', 'DATA', 'DECOMPRESSOR', 'DecompressImage',
'EDGE_THRESHOLD', 'ENABLE_IMAGEINFO', 'END_OF_SEQUENCE', 'ENUM_VALUE',
'EXACT_COMPRESSION_RATIO', 'EXTERNAL_DEVICE', 'FLOATING_ENUM_VALUE',
'FLOATING_RANGE_VALUE', 'FRAME', 'FRAME_BUFFER_SIZE',
'FRAME_BUFFER_SIZE_ZERO', 'FRAME_RATE', 'FRAME_TYPE', 'G711_ALAW',
'G711_ULAW', 'GRAYSCALE', 'GetAlgorithmName', 'HDCC',
'HDCC_SAMPLES_PER_TILE', 'HDCC_TILE_THRESHOLD', 'HEADER_START_CODE',
'IMAGE_HEIGHT', 'IMAGE_WIDTH', 'INTERNAL_FORMAT',
'INTERNAL_IMAGE_HEIGHT', 'INTERNAL_IMAGE_WIDTH', 'INTRA', 'JPEG',
'JPEG_ERROR', 'JPEG_NUM_PARAMS', 'JPEG_QUALITY_FACTOR',
'JPEG_QUANTIZATION_TABLES', 'JPEG_SOFTWARE', 'JPEG_STREAM_HEADERS',
'KEYFRAME', 'LAST_FRAME_INDEX', 'LAYER', 'LUMA_THRESHOLD',
'MAX_NUMBER_OF_AUDIO_ALGORITHMS', 'MAX_NUMBER_OF_ORIGINAL_FORMATS',
'MAX_NUMBER_OF_PARAMS', 'MAX_NUMBER_OF_VIDEO_ALGORITHMS', 'MONO',
'MPEG_VIDEO', 'MVC1', 'MVC2', 'MVC2_BLENDING', 'MVC2_BLENDING_OFF',
'MVC2_BLENDING_ON', 'MVC2_CHROMA_THRESHOLD', 'MVC2_EDGE_THRESHOLD',
'MVC2_ERROR', 'MVC2_LUMA_THRESHOLD', 'NEXT_NOT_AVAILABLE',
'NOISE_MARGIN', 'NONE', 'NUMBER_OF_FRAMES', 'NUMBER_OF_PARAMS',
'ORIENTATION', 'ORIGINAL_FORMAT', 'OpenCompressor',
'OpenDecompressor', 'PARAM_OUT_OF_RANGE', 'PREDICTED', 'PREROLL',
'ParamID', 'ParamNumber', 'ParamType', 'QUALITY_FACTOR',
'QUALITY_LEVEL', 'QueryAlgorithms', 'QueryMaxHeaderSize',
'QueryScheme', 'QuerySchemeFromName', 'RANGE_VALUE', 'RGB', 'RGB332',
'RGB8', 'RGBA', 'RGBX', 'RLE', 'RLE24', 'RTR', 'RTR1',
'RTR_QUALITY_LEVEL', 'SAMPLES_PER_TILE', 'SCHEME_BUSY',
'SCHEME_NOT_AVAILABLE', 'SPEED', 'STEREO_INTERLEAVED',
'STREAM_HEADERS', 'SetDefault', 'SetMax', 'SetMin', 'TILE_THRESHOLD',
'TOP_DOWN', 'ULAW', 'UNCOMPRESSED', 'UNCOMPRESSED_AUDIO',
'UNCOMPRESSED_VIDEO', 'UNKNOWN_SCHEME', 'VIDEO', 'VideoFormatName',
'Y', 'YCbCr', 'YCbCr422', 'YCbCr422DC', 'YCbCr422HC', 'YUV', 'YUV422',
'YUV422DC', 'YUV422HC', '__doc__', '__name__', 'cvt_type', 'error']
# This is a very inobtrusive test for the existence of the cl
# module and all it's attributes.
def main():
# touch all the attributes of al without doing anything
if verbose:
print 'Touching cl module attributes...'
for attr in clattrs:
if verbose:
print 'touching: ', attr
getattr(cl, attr)
main()
| gpl-3.0 | -3,731,051,349,121,502,700 | 49.320513 | 70 | 0.69758 | false |
mancoast/CPythonPyc_test | cpython/272_test_slice.py | 113 | 4413 | # tests for slice objects; in particular the indices method.
import unittest
from test import test_support
from cPickle import loads, dumps
import sys
class SliceTest(unittest.TestCase):
def test_constructor(self):
self.assertRaises(TypeError, slice)
self.assertRaises(TypeError, slice, 1, 2, 3, 4)
def test_repr(self):
self.assertEqual(repr(slice(1, 2, 3)), "slice(1, 2, 3)")
def test_hash(self):
# Verify clearing of SF bug #800796
self.assertRaises(TypeError, hash, slice(5))
self.assertRaises(TypeError, slice(5).__hash__)
def test_cmp(self):
s1 = slice(1, 2, 3)
s2 = slice(1, 2, 3)
s3 = slice(1, 2, 4)
self.assertEqual(s1, s2)
self.assertNotEqual(s1, s3)
class Exc(Exception):
pass
class BadCmp(object):
def __eq__(self, other):
raise Exc
__hash__ = None # Silence Py3k warning
s1 = slice(BadCmp())
s2 = slice(BadCmp())
self.assertRaises(Exc, cmp, s1, s2)
self.assertEqual(s1, s1)
s1 = slice(1, BadCmp())
s2 = slice(1, BadCmp())
self.assertEqual(s1, s1)
self.assertRaises(Exc, cmp, s1, s2)
s1 = slice(1, 2, BadCmp())
s2 = slice(1, 2, BadCmp())
self.assertEqual(s1, s1)
self.assertRaises(Exc, cmp, s1, s2)
def test_members(self):
s = slice(1)
self.assertEqual(s.start, None)
self.assertEqual(s.stop, 1)
self.assertEqual(s.step, None)
s = slice(1, 2)
self.assertEqual(s.start, 1)
self.assertEqual(s.stop, 2)
self.assertEqual(s.step, None)
s = slice(1, 2, 3)
self.assertEqual(s.start, 1)
self.assertEqual(s.stop, 2)
self.assertEqual(s.step, 3)
class AnyClass:
pass
obj = AnyClass()
s = slice(obj)
self.assertTrue(s.stop is obj)
def test_indices(self):
self.assertEqual(slice(None ).indices(10), (0, 10, 1))
self.assertEqual(slice(None, None, 2).indices(10), (0, 10, 2))
self.assertEqual(slice(1, None, 2).indices(10), (1, 10, 2))
self.assertEqual(slice(None, None, -1).indices(10), (9, -1, -1))
self.assertEqual(slice(None, None, -2).indices(10), (9, -1, -2))
self.assertEqual(slice(3, None, -2).indices(10), (3, -1, -2))
# issue 3004 tests
self.assertEqual(slice(None, -9).indices(10), (0, 1, 1))
self.assertEqual(slice(None, -10).indices(10), (0, 0, 1))
self.assertEqual(slice(None, -11).indices(10), (0, 0, 1))
self.assertEqual(slice(None, -10, -1).indices(10), (9, 0, -1))
self.assertEqual(slice(None, -11, -1).indices(10), (9, -1, -1))
self.assertEqual(slice(None, -12, -1).indices(10), (9, -1, -1))
self.assertEqual(slice(None, 9).indices(10), (0, 9, 1))
self.assertEqual(slice(None, 10).indices(10), (0, 10, 1))
self.assertEqual(slice(None, 11).indices(10), (0, 10, 1))
self.assertEqual(slice(None, 8, -1).indices(10), (9, 8, -1))
self.assertEqual(slice(None, 9, -1).indices(10), (9, 9, -1))
self.assertEqual(slice(None, 10, -1).indices(10), (9, 9, -1))
self.assertEqual(
slice(-100, 100 ).indices(10),
slice(None).indices(10)
)
self.assertEqual(
slice(100, -100, -1).indices(10),
slice(None, None, -1).indices(10)
)
self.assertEqual(slice(-100L, 100L, 2L).indices(10), (0, 10, 2))
self.assertEqual(range(10)[::sys.maxint - 1], [0])
self.assertRaises(OverflowError, slice(None).indices, 1L<<100)
def test_setslice_without_getslice(self):
tmp = []
class X(object):
def __setslice__(self, i, j, k):
tmp.append((i, j, k))
x = X()
with test_support.check_py3k_warnings():
x[1:2] = 42
self.assertEqual(tmp, [(1, 2, 42)])
def test_pickle(self):
s = slice(10, 20, 3)
for protocol in (0,1,2):
t = loads(dumps(s, protocol))
self.assertEqual(s, t)
self.assertEqual(s.indices(15), t.indices(15))
self.assertNotEqual(id(s), id(t))
def test_main():
test_support.run_unittest(SliceTest)
if __name__ == "__main__":
test_main()
| gpl-3.0 | -5,527,695,207,957,464,000 | 31.932836 | 73 | 0.544528 | false |
RCHG/blog_backup | _vendor/bundle/ruby/2.1.0/gems/pygments.rb-0.6.0/vendor/pygments-main/scripts/get_vimkw.py | 38 | 1478 | from __future__ import print_function
import re
r_line = re.compile(r"^(syn keyword vimCommand contained|syn keyword vimOption "
r"contained|syn keyword vimAutoEvent contained)\s+(.*)")
r_item = re.compile(r"(\w+)(?:\[(\w+)\])?")
def getkw(input, output):
out = file(output, 'w')
output_info = {'command': [], 'option': [], 'auto': []}
for line in file(input):
m = r_line.match(line)
if m:
# Decide which output gets mapped to d
if 'vimCommand' in m.group(1):
d = output_info['command']
elif 'AutoEvent' in m.group(1):
d = output_info['auto']
else:
d = output_info['option']
# Extract all the shortened versions
for i in r_item.finditer(m.group(2)):
d.append('(%r,%r)' %
(i.group(1), "%s%s" % (i.group(1), i.group(2) or '')))
output_info['option'].append("('nnoremap','nnoremap')")
output_info['option'].append("('inoremap','inoremap')")
output_info['option'].append("('vnoremap','vnoremap')")
for a, b in output_info.items():
b.sort()
print('%s=[%s]' % (a, ','.join(b)), file=out)
def is_keyword(w, keywords):
for i in range(len(w), 0, -1):
if w[:i] in keywords:
return keywords[w[:i]][:len(w)] == w
return False
if __name__ == "__main__":
getkw("/usr/share/vim/vim73/syntax/vim.vim", "temp.py")
| mit | 2,092,608,149,730,451,500 | 33.372093 | 80 | 0.518268 | false |
cernops/nova | nova/db/sqlalchemy/migrate_repo/versions/273_sqlite_foreign_keys.py | 79 | 4690 | # Copyright 2014 Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from migrate import ForeignKeyConstraint, UniqueConstraint
from oslo_db.sqlalchemy import utils
from sqlalchemy import MetaData, schema, Table
FKEYS = [
('fixed_ips', 'instance_uuid', 'instances', 'uuid',
'fixed_ips_instance_uuid_fkey'),
('block_device_mapping', 'instance_uuid', 'instances', 'uuid',
'block_device_mapping_instance_uuid_fkey'),
('instance_info_caches', 'instance_uuid', 'instances', 'uuid',
'instance_info_caches_instance_uuid_fkey'),
('instance_metadata', 'instance_uuid', 'instances', 'uuid',
'instance_metadata_instance_uuid_fkey'),
('instance_system_metadata', 'instance_uuid', 'instances', 'uuid',
'instance_system_metadata_ibfk_1'),
('instance_type_projects', 'instance_type_id', 'instance_types', 'id',
'instance_type_projects_ibfk_1'),
('iscsi_targets', 'volume_id', 'volumes', 'id',
'iscsi_targets_volume_id_fkey'),
('reservations', 'usage_id', 'quota_usages', 'id',
'reservations_ibfk_1'),
('security_group_instance_association', 'instance_uuid',
'instances', 'uuid',
'security_group_instance_association_instance_uuid_fkey'),
('security_group_instance_association', 'security_group_id',
'security_groups', 'id',
'security_group_instance_association_ibfk_1'),
('virtual_interfaces', 'instance_uuid', 'instances', 'uuid',
'virtual_interfaces_instance_uuid_fkey'),
('compute_nodes', 'service_id', 'services', 'id',
'fk_compute_nodes_service_id'),
('instance_actions', 'instance_uuid', 'instances', 'uuid',
'fk_instance_actions_instance_uuid'),
('instance_faults', 'instance_uuid', 'instances', 'uuid',
'fk_instance_faults_instance_uuid'),
('migrations', 'instance_uuid', 'instances', 'uuid',
'fk_migrations_instance_uuid'),
]
UNIQUES = [
('compute_nodes', 'uniq_compute_nodes0host0hypervisor_hostname',
['host', 'hypervisor_hostname']),
('fixed_ips', 'uniq_fixed_ips0address0deleted',
['address', 'deleted']),
('instance_info_caches', 'uniq_instance_info_caches0instance_uuid',
['instance_uuid']),
('instance_type_projects',
'uniq_instance_type_projects0instance_type_id0project_id0deleted',
['instance_type_id', 'project_id', 'deleted']),
('pci_devices', 'uniq_pci_devices0compute_node_id0address0deleted',
['compute_node_id', 'address', 'deleted']),
('virtual_interfaces', 'uniq_virtual_interfaces0address0deleted',
['address', 'deleted']),
]
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
if migrate_engine.name == 'sqlite':
# SQLite is also missing this one index
if not utils.index_exists(migrate_engine, 'fixed_ips', 'address'):
utils.add_index(migrate_engine, 'fixed_ips', 'address',
['address'])
for src_table, src_column, dst_table, dst_column, name in FKEYS:
src_table = Table(src_table, meta, autoload=True)
if name in set(fk.name for fk in src_table.foreign_keys):
continue
src_column = src_table.c[src_column]
dst_table = Table(dst_table, meta, autoload=True)
dst_column = dst_table.c[dst_column]
fkey = ForeignKeyConstraint(columns=[src_column],
refcolumns=[dst_column],
name=name)
fkey.create()
# SQLAlchemy versions < 1.0.0 don't reflect unique constraints
# for SQLite correctly causing sqlalchemy-migrate to recreate
# some tables with missing unique constraints. Re-add some
# potentially missing unique constraints as a workaround.
for table_name, name, column_names in UNIQUES:
table = Table(table_name, meta, autoload=True)
if name in set(c.name for c in table.constraints
if isinstance(table, schema.UniqueConstraint)):
continue
uc = UniqueConstraint(*column_names, table=table, name=name)
uc.create()
| apache-2.0 | 4,451,525,298,448,156,700 | 42.425926 | 78 | 0.641365 | false |
engdan77/edoAutoHomeMobile | twisted/runner/procmontap.py | 65 | 2298 | # -*- test-case-name: twisted.runner.test.test_procmontap -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Support for creating a service which runs a process monitor.
"""
from twisted.python import usage
from twisted.runner.procmon import ProcessMonitor
class Options(usage.Options):
"""
Define the options accepted by the I{twistd procmon} plugin.
"""
synopsis = "[procmon options] commandline"
optParameters = [["threshold", "t", 1, "How long a process has to live "
"before the death is considered instant, in seconds.",
float],
["killtime", "k", 5, "How long a process being killed "
"has to get its affairs in order before it gets killed "
"with an unmaskable signal.",
float],
["minrestartdelay", "m", 1, "The minimum time (in "
"seconds) to wait before attempting to restart a "
"process", float],
["maxrestartdelay", "M", 3600, "The maximum time (in "
"seconds) to wait before attempting to restart a "
"process", float]]
optFlags = []
longdesc = """\
procmon runs processes, monitors their progress, and restarts them when they
die.
procmon will not attempt to restart a process that appears to die instantly;
with each "instant" death (less than 1 second, by default), it will delay
approximately twice as long before restarting it. A successful run will reset
the counter.
Eg twistd procmon sleep 10"""
def parseArgs(self, *args):
"""
Grab the command line that is going to be started and monitored
"""
self['args'] = args
def postOptions(self):
"""
Check for dependencies.
"""
if len(self["args"]) < 1:
raise usage.UsageError("Please specify a process commandline")
def makeService(config):
s = ProcessMonitor()
s.threshold = config["threshold"]
s.killTime = config["killtime"]
s.minRestartDelay = config["minrestartdelay"]
s.maxRestartDelay = config["maxrestartdelay"]
s.addProcess(" ".join(config["args"]), config["args"])
return s
| mit | -4,269,868,125,983,666,700 | 30.479452 | 78 | 0.601828 | false |
mandeepdhami/neutron | neutron/db/migration/alembic_migrations/firewall_init_ops.py | 61 | 3536 | # Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Initial schema operations for firewall service plugin
from alembic import op
import sqlalchemy as sa
action_types = sa.Enum('allow', 'deny', name='firewallrules_action')
def upgrade():
op.create_table(
'firewall_policies',
sa.Column('tenant_id', sa.String(length=255), nullable=True),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('name', sa.String(length=255), nullable=True),
sa.Column('description', sa.String(length=1024), nullable=True),
sa.Column('shared', sa.Boolean(), nullable=True),
sa.Column('audited', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('id'))
op.create_table(
'firewalls',
sa.Column('tenant_id', sa.String(length=255), nullable=True),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('name', sa.String(length=255), nullable=True),
sa.Column('description', sa.String(length=1024), nullable=True),
sa.Column('shared', sa.Boolean(), nullable=True),
sa.Column('admin_state_up', sa.Boolean(), nullable=True),
sa.Column('status', sa.String(length=16), nullable=True),
sa.Column('firewall_policy_id', sa.String(length=36), nullable=True),
sa.ForeignKeyConstraint(['firewall_policy_id'],
['firewall_policies.id'],
name='firewalls_ibfk_1'),
sa.PrimaryKeyConstraint('id'))
op.create_table(
'firewall_rules',
sa.Column('tenant_id', sa.String(length=255), nullable=True),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('name', sa.String(length=255), nullable=True),
sa.Column('description', sa.String(length=1024), nullable=True),
sa.Column('firewall_policy_id', sa.String(length=36), nullable=True),
sa.Column('shared', sa.Boolean(), nullable=True),
sa.Column('protocol', sa.String(length=40), nullable=True),
sa.Column('ip_version', sa.Integer(), nullable=False),
sa.Column('source_ip_address', sa.String(length=46), nullable=True),
sa.Column('destination_ip_address', sa.String(length=46),
nullable=True),
sa.Column('source_port_range_min', sa.Integer(), nullable=True),
sa.Column('source_port_range_max', sa.Integer(), nullable=True),
sa.Column('destination_port_range_min', sa.Integer(), nullable=True),
sa.Column('destination_port_range_max', sa.Integer(), nullable=True),
sa.Column('action', action_types, nullable=True),
sa.Column('enabled', sa.Boolean(), nullable=True),
sa.Column('position', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['firewall_policy_id'],
['firewall_policies.id'],
name='firewall_rules_ibfk_1'),
sa.PrimaryKeyConstraint('id'))
| apache-2.0 | 7,335,430,847,341,203,000 | 46.146667 | 78 | 0.63405 | false |
0x7678/youtube-dl | youtube_dl/extractor/slideshare.py | 16 | 2025 | from __future__ import unicode_literals
import re
import json
from .common import InfoExtractor
from ..compat import (
compat_urlparse,
)
from ..utils import (
ExtractorError,
)
class SlideshareIE(InfoExtractor):
_VALID_URL = r'https?://www\.slideshare\.net/[^/]+?/(?P<title>.+?)($|\?)'
_TEST = {
'url': 'http://www.slideshare.net/Dataversity/keynote-presentation-managing-scale-and-complexity',
'info_dict': {
'id': '25665706',
'ext': 'mp4',
'title': 'Managing Scale and Complexity',
'description': 'This was a keynote presentation at the NoSQL Now! 2013 Conference & Expo (http://www.nosqlnow.com). This presentation was given by Adrian Cockcroft from Netflix.',
},
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
page_title = mobj.group('title')
webpage = self._download_webpage(url, page_title)
slideshare_obj = self._search_regex(
r'var slideshare_object = ({.*?}); var user_info =',
webpage, 'slideshare object')
info = json.loads(slideshare_obj)
if info['slideshow']['type'] != 'video':
raise ExtractorError('Webpage type is "%s": only video extraction is supported for Slideshare' % info['slideshow']['type'], expected=True)
doc = info['doc']
bucket = info['jsplayer']['video_bucket']
ext = info['jsplayer']['video_extension']
video_url = compat_urlparse.urljoin(bucket, doc + '-SD.' + ext)
description = self._html_search_regex(
r'<p\s+(?:style="[^"]*"\s+)?class=".*?description.*?"[^>]*>(.*?)</p>', webpage,
'description', fatal=False)
return {
'_type': 'video',
'id': info['slideshow']['id'],
'title': info['slideshow']['title'],
'ext': ext,
'url': video_url,
'thumbnail': info['slideshow']['pin_image_url'],
'description': description,
}
| unlicense | 1,844,048,444,652,582,100 | 35.818182 | 191 | 0.56642 | false |
foreni-packages/golismero | misc/old_tests/plugin_tests/ui/test.py | 8 | 4271 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__license__ = """
GoLismero 2.0 - The web knife - Copyright (C) 2011-2014
Golismero project site: https://github.com/golismero
Golismero project mail: [email protected]
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
from golismero.api.audit import get_audit_count
from golismero.api.config import Config
from golismero.api.data import Data
from golismero.api.data.db import Database
from golismero.api.plugin import UIPlugin
from golismero.main.console import colorize
from golismero.messaging.codes import MessageType, MessageCode, MessagePriority
from golismero.messaging.message import Message
import time
import warnings
#------------------------------------------------------------------------------
class TestUIPlugin(UIPlugin):
"""
Test UI plugin.
"""
#--------------------------------------------------------------------------
def run(self, info):
if not isinstance(info, Data):
raise TypeError("Expected Data, got %r instead" % type(info))
print "-" * 79
print "ID: %s" % info.identity
print "Data: %r" % info
history = Database.get_plugin_history(info.identity)
if history:
print "History:"
for plugin_id in history:
print " " + plugin_id
print
#--------------------------------------------------------------------------
def recv_msg(self, message):
if not isinstance(message, Message):
raise TypeError("Expected Message, got %r instead" % type(message))
print "-" * 79
print "Message:"
print " Timestamp: %s" % time.ctime(message.timestamp)
print " Audit: %s" % message.audit_name
print " Plugin: %s" % message.plugin_id
print " Type: %s" % MessageType.get_name_from_value(message.message_type)
print " Code: %s" % MessageCode.get_name_from_value_and_type(message.message_code, message.message_type)
print " Priority: %s" % MessagePriority.get_name_from_value(message.priority)
print " Payload: %r" % (message.message_info,)
print
if message.message_type == MessageType.MSG_TYPE_CONTROL:
if message.message_code == MessageCode.MSG_CONTROL_STOP_AUDIT:
if get_audit_count() == 1:
Config._context.send_msg(
message_type = MessageType.MSG_TYPE_CONTROL,
message_code = MessageCode.MSG_CONTROL_STOP,
message_info = True,
priority = MessagePriority.MSG_PRIORITY_LOW
)
elif message.message_code == MessageCode.MSG_CONTROL_LOG:
(text, level, is_error) = message.message_info
if is_error:
print colorize(text, "magenta")
else:
print colorize(text, "cyan")
elif message.message_code == MessageCode.MSG_CONTROL_ERROR:
(description, traceback) = message.message_info
print colorize(description, "magenta")
print colorize(traceback, "magenta")
elif message.message_code == MessageCode.MSG_CONTROL_WARNING:
for w in message.message_info:
formatted = warnings.formatwarning(w.message, w.category, w.filename, w.lineno, w.line)
print colorize(formatted, "yellow")
#--------------------------------------------------------------------------
def get_accepted_types(self):
pass
| gpl-2.0 | -2,678,954,885,038,705,000 | 38.915888 | 118 | 0.584641 | false |
micropython/micropython | examples/asmled.py | 15 | 1679 | # flash LED #1 using inline assembler
# this version is overly verbose and uses word stores
@micropython.asm_thumb
def flash_led(r0):
movw(r1, (stm.GPIOA + stm.GPIO_BSRRL) & 0xFFFF)
movt(r1, ((stm.GPIOA + stm.GPIO_BSRRL) >> 16) & 0x7FFF)
movw(r2, 1 << 13)
movt(r2, 0)
movw(r3, 0)
movt(r3, 1 << 13)
b(loop_entry)
label(loop1)
# turn LED on
str(r2, [r1, 0])
# delay for a bit
movw(r4, 5599900 & 0xFFFF)
movt(r4, (5599900 >> 16) & 0xFFFF)
label(delay_on)
sub(r4, r4, 1)
cmp(r4, 0)
bgt(delay_on)
# turn LED off
str(r3, [r1, 0])
# delay for a bit
movw(r4, 5599900 & 0xFFFF)
movt(r4, (5599900 >> 16) & 0xFFFF)
label(delay_off)
sub(r4, r4, 1)
cmp(r4, 0)
bgt(delay_off)
# loop r0 times
sub(r0, r0, 1)
label(loop_entry)
cmp(r0, 0)
bgt(loop1)
# flash LED #2 using inline assembler
# this version uses half-word sortes, and the convenience assembler operation 'movwt'
@micropython.asm_thumb
def flash_led_v2(r0):
# get the GPIOA address in r1
movwt(r1, stm.GPIOA)
# get the bit mask for PA14 (the pin LED #2 is on)
movw(r2, 1 << 14)
b(loop_entry)
label(loop1)
# turn LED on
strh(r2, [r1, stm.GPIO_BSRRL])
# delay for a bit
movwt(r4, 5599900)
label(delay_on)
sub(r4, r4, 1)
cmp(r4, 0)
bgt(delay_on)
# turn LED off
strh(r2, [r1, stm.GPIO_BSRRH])
# delay for a bit
movwt(r4, 5599900)
label(delay_off)
sub(r4, r4, 1)
cmp(r4, 0)
bgt(delay_off)
# loop r0 times
sub(r0, r0, 1)
label(loop_entry)
cmp(r0, 0)
bgt(loop1)
flash_led(5)
flash_led_v2(5)
| mit | -8,229,000,693,064,108,000 | 18.298851 | 85 | 0.575342 | false |
saadqc/Sub-Merge | VideoPlayer.py | 1 | 7603 | #! /usr/bin/python
#
# Qt example for VLC Python bindings
# Copyright (C) 2009-2010 the VideoLAN team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA.
#
import sys
import vlc
from PyQt4 import QtGui, QtCore
from Messenger import Messenger
class Player(QtGui.QMainWindow):
"""A simple Media Player using VLC and Qt
"""
def __init__(self, master=None):
QtGui.QMainWindow.__init__(self, master)
self.setWindowTitle("Media Player")
# creating a basic vlc instance
self.instance = vlc.Instance()
# creating an empty vlc media player
self.mediaplayer = self.instance.media_player_new()
self.createUI()
self.isPaused = False
def createUI(self):
"""Set up the user interface, signals & slots
"""
self.widget = QtGui.QWidget(self)
self.setCentralWidget(self.widget)
# In this widget, the video will be drawn
if sys.platform == "darwin": # for MacOS
self.videoframe = QtGui.QMacCocoaViewContainer(0)
else:
self.videoframe = QtGui.QFrame()
self.palette = self.videoframe.palette()
self.palette.setColor (QtGui.QPalette.Window,
QtGui.QColor(0,0,0))
self.videoframe.setPalette(self.palette)
self.videoframe.setAutoFillBackground(True)
self.positionslider = QtGui.QSlider(QtCore.Qt.Horizontal, self)
self.positionslider.setToolTip("Position")
self.positionslider.setMaximum(1000)
self.connect(self.positionslider,
QtCore.SIGNAL("sliderMoved(int)"), self.setPosition)
self.hbuttonbox = QtGui.QHBoxLayout()
self.playbutton = QtGui.QPushButton("Play")
self.hbuttonbox.addWidget(self.playbutton)
self.connect(self.playbutton, QtCore.SIGNAL("clicked()"),
self.PlayPause)
self.stopbutton = QtGui.QPushButton("Stop")
self.hbuttonbox.addWidget(self.stopbutton)
self.connect(self.stopbutton, QtCore.SIGNAL("clicked()"),
self.Stop)
self.hbuttonbox.addStretch(1)
self.volumeslider = QtGui.QSlider(QtCore.Qt.Horizontal, self)
self.volumeslider.setMaximum(100)
self.volumeslider.setValue(self.mediaplayer.audio_get_volume())
self.volumeslider.setToolTip("Volume")
self.hbuttonbox.addWidget(self.volumeslider)
self.connect(self.volumeslider,
QtCore.SIGNAL("valueChanged(int)"),
self.setVolume)
self.vboxlayout = QtGui.QVBoxLayout()
self.vboxlayout.addWidget(self.videoframe)
self.vboxlayout.addWidget(self.positionslider)
self.vboxlayout.addLayout(self.hbuttonbox)
self.widget.setLayout(self.vboxlayout)
self.timer = QtCore.QTimer(self)
self.timer.setInterval(200)
self.connect(self.timer, QtCore.SIGNAL("timeout()"),
self.updateUI)
def PlayPause(self):
"""Toggle play/pause status
"""
if self.mediaplayer.is_playing():
self.mediaplayer.pause()
self.playbutton.setText("Play")
self.isPaused = True
else:
if self.mediaplayer.play() == -1:
self.OpenFile()
return
self.mediaplayer.play()
self.playbutton.setText("Pause")
self.timer.start()
self.isPaused = False
def isPaused(self):
"""
:return:
"""
return not self.mediaplayer.is_playing()
def closeEvent(self, QCloseEvent):
"""
Stop media player on window close event
:param QCloseEvent:
:return:
"""
self.mediaplayer.stop()
self.mediaplayer.release()
Messenger.main_window.player = None
def Stop(self):
"""Stop player
"""
self.mediaplayer.stop()
self.playbutton.setText("Play")
def OpenFile(self, filename=None):
"""Open a media file in a MediaPlayer
"""
# create the media
if sys.version < '3':
filename = unicode(filename)
self.media = self.instance.media_new(filename)
# put the media in the media player
self.mediaplayer.set_media(self.media)
# parse the metadata of the file
self.media.parse()
# set the title of the track as window title
self.setWindowTitle(self.media.get_meta(0))
# the media player has to be 'connected' to the QFrame
# (otherwise a video would be displayed in it's own window)
# this is platform specific!
# you have to give the id of the QFrame (or similar object) to
# vlc, different platforms have different functions for this
if sys.platform.startswith('linux'): # for Linux using the X Server
self.mediaplayer.set_xwindow(self.videoframe.winId())
elif sys.platform == "win32": # for Windows
self.mediaplayer.set_hwnd(self.videoframe.winId())
elif sys.platform == "darwin": # for MacOS
self.mediaplayer.set_nsobject(self.videoframe.winId())
self.PlayPause()
def setVolume(self, Volume):
"""Set the volume
"""
self.mediaplayer.audio_set_volume(Volume)
def setPosition(self, position):
"""Set the position
"""
# setting the position to where the slider was dragged
self.mediaplayer.set_position(position / 1000.0)
# the vlc MediaPlayer needs a float value between 0 and 1, Qt
# uses integer variables, so you need a factor; the higher the
# factor, the more precise are the results
# (1000 should be enough)
def setTime(self, position):
"""
Set position of video in ms
:param position:
:return:
"""
self.mediaplayer.set_time(position)
def getPosition(self):
"""
Get Video Position
"""
return self.mediaplayer.get_time()
def getLength(self):
"""
Get video length
:return:
"""
return self.mediaplayer.get_length()
def getVideoSize(self):
"""
Return video size
:return:
"""
video_size = self.mediaplayer.video_get_size()
return video_size
def updateUI(self):
"""updates the user interface"""
# setting the slider to the desired position
self.positionslider.setValue(self.mediaplayer.get_position() * 1000)
if not self.mediaplayer.is_playing():
# no need to call this function if nothing is played
self.timer.stop()
if not self.isPaused:
# after the video finished, the play button stills shows
# "Pause", not the desired behavior of a media player
# this will fix it
self.Stop()
| gpl-3.0 | -4,836,117,426,121,215,000 | 33.09417 | 79 | 0.61502 | false |
bluedynamics/node.ext.python | src/node/ext/python/parser.py | 1 | 23088 | import os
import _ast
import ast
import types
import copy
import exceptions
from odict import odict
from zope.component import provideHandler
from node.ext.directory.interfaces import IFileAddedEvent
from utils import get_dotted_name_from_astnode
from node.ext.python.interfaces import (
CODESECTION_STARTTOKEN,
CODESECTION_ENDTOKEN,
Call,
IModule,
IFunction,
IDocstring,
IImport,
IAttribute,
IDecorator,
)
from node.ext.python.nodes import (
Module,
Docstring,
ProtectedSection,
Import,
Attribute,
Decorator,
Function,
Class,
Block,
)
CODESECTION_STARTTOKEN = '##code-section '
CODESECTION_ENDTOKEN = '##/code-section '
POSITION_INSERT = 0
POSITION_AFTER = 1
POSITION_BEFORE = -1
class BaseParser(object):
def __init__(self, model):
self.model = model
def __call__(self):
raise NotImplemented(u'BaseParser does not implement ``__call__``')
def _createastchild(self, astnode):
if hasattr(astnode, 'lineno'):
if astnode.lineno - 1 in self.model.readlines:
return
if isinstance(astnode, _ast.Import) \
or isinstance(astnode, _ast.ImportFrom):
import_ = Import(None, [], astnode, self.model.buffer)
import_.readlines = self.model.readlines
self.model[str(import_.uuid)] = import_
elif isinstance(astnode, _ast.FunctionDef):
function = Function(None, astnode, self.model.buffer)
function.readlines = self.model.readlines
self.model[str(function.uuid)] = function
for childastnode in astnode.body:
function.parser._createastchild(childastnode)
function.initdecorators()
elif isinstance(astnode, _ast.ClassDef):
class_ = Class(None, astnode, self.model.buffer)
class_.readlines = self.model.readlines
self.model[str(class_.uuid)] = class_
for childastnode in astnode.body:
class_.parser._createastchild(childastnode)
class_.initdecorators()
elif isinstance(astnode, _ast.Expr) \
and isinstance(astnode.value, _ast.Str):
docstring = Docstring(None, astnode, self.model.buffer)
docstring.readlines = self.model.readlines
self.model[str(docstring.uuid)] = docstring
elif isinstance(astnode, _ast.Assign):
if not IFunction.providedBy(self.model):
attribute = Attribute([], None, astnode, self.model.buffer)
attribute.readlines = self.model.readlines
self.model[str(attribute.uuid)] = attribute
def _marklines(self, *args):
for arg in args:
if not arg in self.model.readlines:
self.model.readlines.append(arg)
def _findbodyend(self, node):
if not hasattr(node, '_fields'):
return
for fieldname in node._fields:
fields = getattr(node, fieldname)
if type(fields) is not types.ListType:
fields = [fields]
for field in fields:
if hasattr(field, 'lineno'):
if field.lineno > self.model.bufend:
self.model.bufend = field.lineno
self._findbodyend(field)
def _checkbodyendsmultilined(self):
pointer = self.model.bufend
buflen = len(self.model.buffer)
source = ''
while True:
if buflen - 1 <= pointer:
break
line = self.model.buffer[pointer].strip()
source = '%s\n%s' % (source, line)
try:
compile(source, '<string>', 'exec')
break
except SyntaxError, e:
pointer += 1
self.model.bufend = pointer
def _checkbodyendsprotected(self):
pointer = self.model.bufend
if pointer < len(self.model.buffer) - 1:
next = self.model.buffer[pointer].strip()
if next.startswith(CODESECTION_ENDTOKEN):
self.model.bufend += 1
def _findnodeposition(self, startlineno, endlineno, indent):
values = [v for v in self.model.values() \
if not IDecorator.providedBy(v)]
if not values:
return self.model, POSITION_INSERT
last = None
for child in values:
# inrange case
if child.startlineno <= startlineno \
and child.endlineno >= endlineno:
return child.parser._findnodeposition(startlineno,
endlineno,
indent)
# before case
if endlineno < child.startlineno:
return child, POSITION_BEFORE
last = child
# after case - indent check
if last.indent == indent:
return last, POSITION_AFTER
return self.model, POSITION_AFTER
def _findindent(self, lines):
indent = None
for line in lines:
if not line.strip():
continue
curindent = 0
for char in line:
if char != u' ':
break
curindent += 1
if indent is None or curindent < indent:
indent = curindent
if indent is None:
return None
return indent / 4 # XXX improve
def _cutline(self, line):
return line[self.model.indent * 4:] # XXX improve
def _resolvearg(self, arg):
if isinstance(arg, _ast.Str):
return repr(arg.s)
elif isinstance(arg, _ast.Num):
return arg.n
elif isinstance(arg, _ast.Name):
return arg.id
elif isinstance(arg, _ast.Call):
args = list()
for a in arg.args:
args.append(self._resolvearg(a))
kwargs = odict()
for keyword in arg.keywords:
kwargs[keyword.arg] = self._resolvearg(keyword.value)
try:
return Call(name=arg.func.id, args=args, kwargs=kwargs)
except AttributeError:
return Call(name=arg.func.attr, args=args, kwargs=kwargs)
elif isinstance(arg, _ast.Tuple) or isinstance(arg, _ast.List):
ret = list()
for a in arg.elts:
ret.append(self._resolvearg(a))
if isinstance(arg, _ast.Tuple):
ret = tuple(ret)
return ret
elif isinstance(arg, _ast.Dict):
ret = dict()
pointer = 0
for key in arg.keys:
key = self._resolvearg(key)
ret[key] = self._resolvearg(arg.values[pointer])
pointer += 1
return ret
def parsedecorators(self, astnode):
for dec in astnode.decorator_list:
decorator = Decorator(None, dec)
decorator.buffer = self.model.buffer
decorator.readlines = self.model.readlines
self.model._decorators.append(decorator)
def parse_module_handler(obj, event):
"""Called, if ``Module`` is created and added to ``Directory`` node.
"""
obj.parser()
provideHandler(parse_module_handler, [IModule, IFileAddedEvent])
class ModuleParser(BaseParser):
def __call__(self):
path = self.model.filepath
self.model._buffer = list()
if not os.path.exists(path):
return
if self.model._do_parse:
self._parse()
def _parse(self):
file = open(self.model.filepath, 'r')
cont = file.read()
# Leading and trailing blank lines cause problems in the builtin
# "compile" function, so we strip them. In order to provide correct
# line numbers we store the offset - we use in case of an Exception...
before = len(cont.split(os.linesep))
cont = cont.lstrip()
after = len(cont.split(os.linesep))
cont = cont.rstrip()
self.model._buffer = cont.split(os.linesep)
offset = before - after
file.close()
self.model.readlines = list()
self._extractencoding()
self.model.bufstart = 0
self.model.bufend = len(self.model._buffer)
self.model.bufoffset = offset
try:
self.model.astnode = ast.parse(
os.linesep.join(self.model.buffer).strip(),
self.model.filepath)
except SyntaxError, e:
# Since the python source files are being stripped we have to
# add an offset to the line number we get thrown from compile()
ex = exceptions.SyntaxError((e[0], \
(e[1][0], e[1][1] + offset, e[1][2], e[1][3])))
# <- don't read that
raise ex
except TypeError, e:
# We don't have to modify TypeErrors since they don't contain
# line numbers.
raise e
children = self._protectedsections()
for node in children:
self._marklines(*range(node.bufstart, node.bufend))
# for i in xrange(len(self.model.astnode.body)):
# astnode = self.model.astnode.body
for astnode in self.model.astnode.body:
self._createastchild(astnode)
self._markastrelated(self.model)
children += self._parsecodeblocks()
self._hookchildren(children)
def _extractencoding(self):
if len(self.model.buffer) == 0:
return
line = self.model.buffer[0].strip()
if line.startswith(u'# -*- coding:') \
and line.endswith(u'-*-'):
encoding = line[14:len(line) - 3].strip()
self.model.encoding = unicode(encoding)
self.model.readlines.append(0)
def _markastrelated(self, node):
for child in node.values():
if IDocstring.providedBy(child) \
or IImport.providedBy(child) \
or IAttribute.providedBy(child) \
or IDecorator.providedBy(child):
self._marklines(*range(child.bufstart, child.bufend))
else:
self._marklines(*range(child.bufstart, child.defendlineno))
self._markastrelated(child)
def _protectedsections(self):
i = 0
currentnode = None
in_protected_section = False
allnodes = list()
for line in self.model.buffer:
line_strip = line.strip()
if line_strip.startswith('#'):
if line_strip.startswith(CODESECTION_STARTTOKEN):
if in_protected_section:
print "WARNING: Nested protected sections"
continue
# Protected section is starting here
in_protected_section = True
name = line_strip[len(CODESECTION_STARTTOKEN):]
node = ProtectedSection(name, self.model.buffer)
node.sectionname = name
node.readlines = self.model.readlines
node.bufstart = i
currentnode = node
elif line_strip.startswith(CODESECTION_ENDTOKEN):
if not in_protected_section:
raise RuntimeError, \
"ERROR: Protected section closed without open"
if line_strip != CODESECTION_ENDTOKEN + \
currentnode.__name__:
# Protected section is continuing here
currentnode.lines.append(line)
continue
# Protected section is ending here
currentnode.bufend = i + 1
allnodes.append(currentnode)
in_protected_section = False
currentnode = None
i += 1
if in_protected_section:
raise RuntimeError, \
"ERROR: Protected section did not close"
return allnodes
def _parsecodeblocks(self):
blocks = list()
start = end = 0
curline = 0
for line in self.model.buffer:
if curline in self.model.readlines:
if start != end:
blocks += self._createcodeblocks(start, end)
start = end + 1
else:
start = curline + 1
curline += 1
end = curline
blocks += self._createcodeblocks(start, end)
return blocks
def _createcodeblocks(self, start, end):
lines = self.model.buffer[start:end]
if not ''.join(lines).strip():
return []
previndent = None
pointer = 0
ret = []
for line in lines:
pointer += 1
if not line.strip() or line.strip().startswith('#'):
continue
if previndent is None:
previndent = self._findindent([self.model.buffer[start]])
curindent = self._findindent([line])
if curindent >= previndent:
continue
elif curindent < previndent:
block = Block(None, self.model.buffer)
block.readlines = self.model.readlines
block.bufstart = start
block.bufend = start + pointer - 1
ret.append(block)
start = start + pointer - 1
previndent = curindent
block = Block(None, self.model.buffer)
block.readlines = self.model.readlines
block.bufstart = start
block.bufend = end
ret.append(block)
return ret
def _hookchildren(self, children):
for child in children:
if not child.__name__:
child.__name__ = str(child.uuid)
child.__parent__ = self.model
node, position = self._findnodeposition(child.startlineno,
child.endlineno,
child.indent)
child.__parent__ = None
if position == POSITION_INSERT:
node[child.__name__] = child
elif position == POSITION_BEFORE:
node.__parent__.insertbefore(child, node)
elif position == POSITION_AFTER:
try:
node.__parent__.insertafter(child, node)
except AttributeError:
#XXX: handle this problem correctly
#Currently I added the message, so that the user
#knows how to workaround
if isinstance(child,Block):
code='\n'.join(child.buffer)
raise ValueError( 'This should not have happened, the parser has \n\
currently problems when a function or class ends\n \
with a comment\n \
So please check if your block has a comment at its end\n \
and remove it if necessary\n\
see the code: \n%s' % code)
else:
raise
class ImportParser(BaseParser):
def __call__(self):
astnode = self.model.astnode
if isinstance(astnode, _ast.ImportFrom):
self.model.fromimport = unicode(astnode.module)
for name in astnode.names:
asname = name.asname is not None and unicode(name.asname) or None
self.model.names.append([unicode(name.name), asname])
self.model._fromimport_orgin = copy.deepcopy(self.model.fromimport)
self.model._names_orgin = copy.deepcopy(self.model.names)
def _definitionends(self, bufno):
if len(self.model.buffer) < bufno:
return True
if len(self.model.buffer) <= bufno + 1:
return True
line = self.model.buffer[bufno + 1].strip()
for term in [u'from ', u'import ', u'if ', u'for ', u'while ', u'try ',
u'with ', u'class ', u'def ', u'@', u'#', u'"""',
u'\'\'\'']:
if line.startswith(term):
return True
if line == u'' or line.find(u'=') != -1:
return True
return False
class AttributeParser(BaseParser):
def __call__(self):
astnode = self.model.astnode
for target in astnode.targets:
if isinstance(target, _ast.Tuple):
for name in target.elts:
self.model.targets.append(name.id)
elif isinstance(target, _ast.Subscript):
self.model.targets.append(get_dotted_name_from_astnode(target.value))
else:
try:
self.model.targets.append(target.id)
except AttributeError:
self.model.targets.append(target.value.id)
self.model._targets_orgin = copy.deepcopy(self.model.targets)
self._findattributeend()
self._extractvalue()
self._parseastargs(astnode)
self.model._args_orgin = copy.deepcopy(self.model.args)
self.model._kwargs_orgin = copy.deepcopy(self.model.kwargs)
def _findattributeend(self):
pointer = self.model.bufstart
buflen = len(self.model.buffer)
source = ''
while True:
#if pointer + 1 == buflen:
if pointer == buflen:
break
line = self.model.buffer[pointer].strip()
source = '%s\n%s' % (source, line)
try:
compile(source, '<string>', 'exec')
pointer += 1
break
except SyntaxError, e:
pointer += 1
self.model.bufend = pointer
def _extractvalue(self):
lines = self.model.buffer[self.model.bufstart:self.model.bufend]
if not lines:
lines.append(self.model.buffer[self.model.bufstart])
lines[0] = lines[0][lines[0].find('=') + 1:].strip()
for i in range(1, len(lines)):
lines[i] = self._cutline(lines[i])
self.model.value = '\n'.join(lines)
self.model._value_orgin = '\n'.join(lines)
def _parseastargs(self, astnode):
if not hasattr(astnode.value, 'args'):
return
for arg in astnode.value.args:
self.model.args.append(self._resolvearg(arg))
for keyword in astnode.value.keywords:
self.model.kwargs[keyword.arg] = self._resolvearg(keyword.value)
class DecoratorParser(BaseParser):
def __call__(self):
astnode = self.model.astnode
if isinstance(astnode, _ast.Name) or isinstance(astnode, _ast.Attribute):
#the case where the decorator has no parameters
if not getattr(astnode, 'id', None):
# XXX: added by phil because sometimes astnode.id is None
astnode.id = get_dotted_name_from_astnode(astnode)
self.model.decoratorname = astnode.id
self.model._decoratorname_orgin = astnode.id
return
#the decorator has parameters
self.model.is_callable=True
if not getattr(astnode.func, 'id', None):
astnode.func.id=get_dotted_name_from_astnode(astnode.func)
self.model.decoratorname = astnode.func.id
self.model._decoratorname_orgin = astnode.func.id
self._parseastargs(astnode)
self.model._args_orgin = copy.deepcopy(self.model.args)
self.model._kwargs_orgin = copy.deepcopy(self.model.kwargs)
def _parseastargs(self, astnode):
for arg in astnode.args:
self.model.args.append(self._resolvearg(arg))
for keyword in astnode.keywords:
self.model.kwargs[keyword.arg] = self._resolvearg(keyword.value)
def _definitionends(self, bufno):
if len(self.model.buffer) <= bufno:
return True
line = self.model.buffer[bufno + 1].strip()
for term in [u'class ', u'def ', u'@']:
if line.startswith(term):
return True
return False
class FunctionParser(BaseParser):
def __call__(self):
astnode = self.model.astnode
self.model.functionname = astnode.name
self._findbodyend(astnode)
self._checkbodyendsmultilined()
self._checkbodyendsprotected()
self._parseastargs(astnode)
self.model._args_orgin = copy.deepcopy(self.model.args)
self.model._kwargs_orgin = copy.deepcopy(self.model.kwargs)
self.parsedecorators(astnode)
def _parseastargs(self, astnode):
all = list()
for arg in astnode.args.args:
all.append(self._resolvearg(arg))
args = all[:len(all) - len(astnode.args.defaults)]
kwargs = all[len(all) - len(astnode.args.defaults):]
for arg in astnode.args.args:
resolved = self._resolvearg(arg)
if resolved in args:
self.model.args.append(resolved)
pointer = 0
for kwarg in astnode.args.defaults:
self.model.kwargs[kwargs[pointer]] = self._resolvearg(kwarg)
pointer += 1
if astnode.args.vararg:
self.model.args.append('*%s' % astnode.args.vararg)
if astnode.args.kwarg:
self.model.kwargs['**%s' % astnode.args.kwarg] = None
def _definitionends(self, bufno):
if len(self.model.buffer) <= bufno:
return True
line = self.model.buffer[bufno].strip()
if line.find(u'#') > 0:
line = line[0:line.find(u'#')].strip()
if line.endswith(u'\\') \
or line.endswith(u','):
return False
if line.endswith(u':'):
return True
return False
class ClassParser(BaseParser):
def __call__(self):
astnode = self.model.astnode
self.model.classname = astnode.name
self._findbodyend(astnode)
self._checkbodyendsmultilined()
self._checkbodyendsprotected()
def base_name(astnode):
name = list()
while True:
if isinstance(astnode, _ast.Attribute):
name.append(astnode.attr)
astnode = astnode.value
else:
name.append(astnode.id)
break
name.reverse()
return '.'.join(name)
self.model.bases = [base_name(base) for base in astnode.bases]
self.model._bases_orgin = copy.deepcopy(self.model.bases)
self.parsedecorators(astnode)
def _definitionends(self, bufno):
if len(self.model.buffer) <= bufno:
return True
line = self.model.buffer[bufno].strip()
if line.find(u'#') > 0:
line = line[0:line.find(u'#')].strip()
if line.endswith(u'\\') \
or line.endswith(u','):
return False
if line.endswith(u':'):
return True
return False
| bsd-3-clause | -3,549,827,161,211,386,000 | 36.178744 | 92 | 0.545825 | false |
kouaw/CouchPotatoServer | couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/ndr.py | 9 | 3029 | # encoding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
int_or_none,
qualities,
)
class NDRIE(InfoExtractor):
IE_NAME = 'ndr'
IE_DESC = 'NDR.de - Mediathek'
_VALID_URL = r'https?://www\.ndr\.de/.+?(?P<id>\d+)\.html'
_TESTS = [
{
'url': 'http://www.ndr.de/fernsehen/media/dienordreportage325.html',
'md5': '4a4eeafd17c3058b65f0c8f091355855',
'note': 'Video file',
'info_dict': {
'id': '325',
'ext': 'mp4',
'title': 'Blaue Bohnen aus Blocken',
'description': 'md5:190d71ba2ccddc805ed01547718963bc',
'duration': 1715,
},
},
{
'url': 'http://www.ndr.de/info/audio51535.html',
'md5': 'bb3cd38e24fbcc866d13b50ca59307b8',
'note': 'Audio file',
'info_dict': {
'id': '51535',
'ext': 'mp3',
'title': 'La Valette entgeht der Hinrichtung',
'description': 'md5:22f9541913a40fe50091d5cdd7c9f536',
'duration': 884,
}
}
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
page = self._download_webpage(url, video_id, 'Downloading page')
title = self._og_search_title(page).strip()
description = self._og_search_description(page)
if description:
description = description.strip()
duration = int_or_none(self._html_search_regex(r'duration: (\d+),\n', page, 'duration', fatal=False))
formats = []
mp3_url = re.search(r'''\{src:'(?P<audio>[^']+)', type:"audio/mp3"},''', page)
if mp3_url:
formats.append({
'url': mp3_url.group('audio'),
'format_id': 'mp3',
})
thumbnail = None
video_url = re.search(r'''3: \{src:'(?P<video>.+?)\.hi\.mp4', type:"video/mp4"},''', page)
if video_url:
thumbnails = re.findall(r'''\d+: \{src: "([^"]+)"(?: \|\| '[^']+')?, quality: '([^']+)'}''', page)
if thumbnails:
quality_key = qualities(['xs', 's', 'm', 'l', 'xl'])
largest = max(thumbnails, key=lambda thumb: quality_key(thumb[1]))
thumbnail = 'http://www.ndr.de' + largest[0]
for format_id in 'lo', 'hi', 'hq':
formats.append({
'url': '%s.%s.mp4' % (video_url.group('video'), format_id),
'format_id': format_id,
})
if not formats:
raise ExtractorError('No media links available for %s' % video_id)
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
} | gpl-3.0 | -8,857,106,535,945,729,000 | 31.234043 | 110 | 0.482998 | false |
solashirai/edx-platform | common/djangoapps/student/tests/test_roles.py | 147 | 7798 | """
Tests of student.roles
"""
import ddt
from django.test import TestCase
from courseware.tests.factories import UserFactory, StaffFactory, InstructorFactory
from student.tests.factories import AnonymousUserFactory
from student.roles import (
GlobalStaff, CourseRole, CourseStaffRole, CourseInstructorRole,
OrgStaffRole, OrgInstructorRole, RoleCache, CourseBetaTesterRole
)
from opaque_keys.edx.locations import SlashSeparatedCourseKey
class RolesTestCase(TestCase):
"""
Tests of student.roles
"""
def setUp(self):
super(RolesTestCase, self).setUp()
self.course_key = SlashSeparatedCourseKey('edX', 'toy', '2012_Fall')
self.course_loc = self.course_key.make_usage_key('course', '2012_Fall')
self.anonymous_user = AnonymousUserFactory()
self.student = UserFactory()
self.global_staff = UserFactory(is_staff=True)
self.course_staff = StaffFactory(course_key=self.course_key)
self.course_instructor = InstructorFactory(course_key=self.course_key)
def test_global_staff(self):
self.assertFalse(GlobalStaff().has_user(self.student))
self.assertFalse(GlobalStaff().has_user(self.course_staff))
self.assertFalse(GlobalStaff().has_user(self.course_instructor))
self.assertTrue(GlobalStaff().has_user(self.global_staff))
def test_group_name_case_sensitive(self):
uppercase_course_id = "ORG/COURSE/NAME"
lowercase_course_id = uppercase_course_id.lower()
uppercase_course_key = SlashSeparatedCourseKey.from_deprecated_string(uppercase_course_id)
lowercase_course_key = SlashSeparatedCourseKey.from_deprecated_string(lowercase_course_id)
role = "role"
lowercase_user = UserFactory()
CourseRole(role, lowercase_course_key).add_users(lowercase_user)
uppercase_user = UserFactory()
CourseRole(role, uppercase_course_key).add_users(uppercase_user)
self.assertTrue(CourseRole(role, lowercase_course_key).has_user(lowercase_user))
self.assertFalse(CourseRole(role, uppercase_course_key).has_user(lowercase_user))
self.assertFalse(CourseRole(role, lowercase_course_key).has_user(uppercase_user))
self.assertTrue(CourseRole(role, uppercase_course_key).has_user(uppercase_user))
def test_course_role(self):
"""
Test that giving a user a course role enables access appropriately
"""
self.assertFalse(
CourseStaffRole(self.course_key).has_user(self.student),
"Student has premature access to {}".format(self.course_key)
)
CourseStaffRole(self.course_key).add_users(self.student)
self.assertTrue(
CourseStaffRole(self.course_key).has_user(self.student),
"Student doesn't have access to {}".format(unicode(self.course_key))
)
# remove access and confirm
CourseStaffRole(self.course_key).remove_users(self.student)
self.assertFalse(
CourseStaffRole(self.course_key).has_user(self.student),
"Student still has access to {}".format(self.course_key)
)
def test_org_role(self):
"""
Test that giving a user an org role enables access appropriately
"""
self.assertFalse(
OrgStaffRole(self.course_key.org).has_user(self.student),
"Student has premature access to {}".format(self.course_key.org)
)
OrgStaffRole(self.course_key.org).add_users(self.student)
self.assertTrue(
OrgStaffRole(self.course_key.org).has_user(self.student),
"Student doesn't have access to {}".format(unicode(self.course_key.org))
)
# remove access and confirm
OrgStaffRole(self.course_key.org).remove_users(self.student)
if hasattr(self.student, '_roles'):
del self.student._roles
self.assertFalse(
OrgStaffRole(self.course_key.org).has_user(self.student),
"Student still has access to {}".format(self.course_key.org)
)
def test_org_and_course_roles(self):
"""
Test that Org and course roles don't interfere with course roles or vice versa
"""
OrgInstructorRole(self.course_key.org).add_users(self.student)
CourseInstructorRole(self.course_key).add_users(self.student)
self.assertTrue(
OrgInstructorRole(self.course_key.org).has_user(self.student),
"Student doesn't have access to {}".format(unicode(self.course_key.org))
)
self.assertTrue(
CourseInstructorRole(self.course_key).has_user(self.student),
"Student doesn't have access to {}".format(unicode(self.course_key))
)
# remove access and confirm
OrgInstructorRole(self.course_key.org).remove_users(self.student)
self.assertFalse(
OrgInstructorRole(self.course_key.org).has_user(self.student),
"Student still has access to {}".format(self.course_key.org)
)
self.assertTrue(
CourseInstructorRole(self.course_key).has_user(self.student),
"Student doesn't have access to {}".format(unicode(self.course_key))
)
# ok now keep org role and get rid of course one
OrgInstructorRole(self.course_key.org).add_users(self.student)
CourseInstructorRole(self.course_key).remove_users(self.student)
self.assertTrue(
OrgInstructorRole(self.course_key.org).has_user(self.student),
"Student lost has access to {}".format(self.course_key.org)
)
self.assertFalse(
CourseInstructorRole(self.course_key).has_user(self.student),
"Student doesn't have access to {}".format(unicode(self.course_key))
)
def test_get_user_for_role(self):
"""
test users_for_role
"""
role = CourseStaffRole(self.course_key)
role.add_users(self.student)
self.assertGreater(len(role.users_with_role()), 0)
def test_add_users_doesnt_add_duplicate_entry(self):
"""
Tests that calling add_users multiple times before a single call
to remove_users does not result in the user remaining in the group.
"""
role = CourseStaffRole(self.course_key)
role.add_users(self.student)
self.assertTrue(role.has_user(self.student))
# Call add_users a second time, then remove just once.
role.add_users(self.student)
role.remove_users(self.student)
self.assertFalse(role.has_user(self.student))
@ddt.ddt
class RoleCacheTestCase(TestCase):
IN_KEY = SlashSeparatedCourseKey('edX', 'toy', '2012_Fall')
NOT_IN_KEY = SlashSeparatedCourseKey('edX', 'toy', '2013_Fall')
ROLES = (
(CourseStaffRole(IN_KEY), ('staff', IN_KEY, 'edX')),
(CourseInstructorRole(IN_KEY), ('instructor', IN_KEY, 'edX')),
(OrgStaffRole(IN_KEY.org), ('staff', None, 'edX')),
(OrgInstructorRole(IN_KEY.org), ('instructor', None, 'edX')),
(CourseBetaTesterRole(IN_KEY), ('beta_testers', IN_KEY, 'edX')),
)
def setUp(self):
super(RoleCacheTestCase, self).setUp()
self.user = UserFactory()
@ddt.data(*ROLES)
@ddt.unpack
def test_only_in_role(self, role, target):
role.add_users(self.user)
cache = RoleCache(self.user)
self.assertTrue(cache.has_role(*target))
for other_role, other_target in self.ROLES:
if other_role == role:
continue
self.assertFalse(cache.has_role(*other_target))
@ddt.data(*ROLES)
@ddt.unpack
def test_empty_cache(self, role, target):
cache = RoleCache(self.user)
self.assertFalse(cache.has_role(*target))
| agpl-3.0 | 1,205,417,160,697,388,500 | 38.989744 | 98 | 0.649141 | false |
tadeo/xhtml2pdf | xhtml2pdf/w3c/cssParser.py | 53 | 39118 | #!/usr/bin/env python
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
##~ Copyright (C) 2002-2004 TechGame Networks, LLC.
##~
##~ This library is free software; you can redistribute it and/or
##~ modify it under the terms of the BSD style License as found in the
##~ LICENSE file included with this distribution.
##
## Modified by Dirk Holtwick <[email protected]>, 2007-2008
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""CSS-2.1 parser.
The CSS 2.1 Specification this parser was derived from can be found at http://www.w3.org/TR/CSS21/
Primary Classes:
* CSSParser
Parses CSS source forms into results using a Builder Pattern. Must
provide concrete implemenation of CSSBuilderAbstract.
* CSSBuilderAbstract
Outlines the interface between CSSParser and it's rule-builder.
Compose CSSParser with a concrete implementation of the builder to get
usable results from the CSS parser.
Dependencies:
python 2.3 (or greater)
re
"""
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~ Imports
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
import re
import cssSpecial
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~ Definitions
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def isAtRuleIdent(src, ident):
return re.match(r'^@' + ident + r'\s*', src)
def stripAtRuleIdent(src):
return re.sub(r'^@[a-z\-]+\s*', '', src)
class CSSSelectorAbstract(object):
"""Outlines the interface between CSSParser and it's rule-builder for selectors.
CSSBuilderAbstract.selector and CSSBuilderAbstract.combineSelectors must
return concrete implementations of this abstract.
See css.CSSMutableSelector for an example implementation.
"""
def addHashId(self, hashId):
raise NotImplementedError('Subclass responsibility')
def addClass(self, class_):
raise NotImplementedError('Subclass responsibility')
def addAttribute(self, attrName):
raise NotImplementedError('Subclass responsibility')
def addAttributeOperation(self, attrName, op, attrValue):
raise NotImplementedError('Subclass responsibility')
def addPseudo(self, name):
raise NotImplementedError('Subclass responsibility')
def addPseudoFunction(self, name, value):
raise NotImplementedError('Subclass responsibility')
class CSSBuilderAbstract(object):
"""Outlines the interface between CSSParser and it's rule-builder. Compose
CSSParser with a concrete implementation of the builder to get usable
results from the CSS parser.
See css.CSSBuilder for an example implementation
"""
def setCharset(self, charset):
raise NotImplementedError('Subclass responsibility')
#~ css results ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def beginStylesheet(self):
raise NotImplementedError('Subclass responsibility')
def stylesheet(self, elements):
raise NotImplementedError('Subclass responsibility')
def endStylesheet(self):
raise NotImplementedError('Subclass responsibility')
def beginInline(self):
raise NotImplementedError('Subclass responsibility')
def inline(self, declarations):
raise NotImplementedError('Subclass responsibility')
def endInline(self):
raise NotImplementedError('Subclass responsibility')
def ruleset(self, selectors, declarations):
raise NotImplementedError('Subclass responsibility')
#~ css namespaces ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def resolveNamespacePrefix(self, nsPrefix, name):
raise NotImplementedError('Subclass responsibility')
#~ css @ directives ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def atCharset(self, charset):
raise NotImplementedError('Subclass responsibility')
def atImport(self, import_, mediums, cssParser):
raise NotImplementedError('Subclass responsibility')
def atNamespace(self, nsPrefix, uri):
raise NotImplementedError('Subclass responsibility')
def atMedia(self, mediums, ruleset):
raise NotImplementedError('Subclass responsibility')
def atPage(self, page, pseudopage, declarations):
raise NotImplementedError('Subclass responsibility')
def atFontFace(self, declarations):
raise NotImplementedError('Subclass responsibility')
def atIdent(self, atIdent, cssParser, src):
return src, NotImplemented
#~ css selectors ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def combineSelectors(self, selectorA, combiner, selectorB):
"""Return value must implement CSSSelectorAbstract"""
raise NotImplementedError('Subclass responsibility')
def selector(self, name):
"""Return value must implement CSSSelectorAbstract"""
raise NotImplementedError('Subclass responsibility')
#~ css declarations ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def property(self, name, value, important=False):
raise NotImplementedError('Subclass responsibility')
def combineTerms(self, termA, combiner, termB):
raise NotImplementedError('Subclass responsibility')
def termIdent(self, value):
raise NotImplementedError('Subclass responsibility')
def termNumber(self, value, units=None):
raise NotImplementedError('Subclass responsibility')
def termRGB(self, value):
raise NotImplementedError('Subclass responsibility')
def termURI(self, value):
raise NotImplementedError('Subclass responsibility')
def termString(self, value):
raise NotImplementedError('Subclass responsibility')
def termUnicodeRange(self, value):
raise NotImplementedError('Subclass responsibility')
def termFunction(self, name, value):
raise NotImplementedError('Subclass responsibility')
def termUnknown(self, src):
raise NotImplementedError('Subclass responsibility')
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~ CSS Parser
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class CSSParseError(Exception):
src = None
ctxsrc = None
fullsrc = None
inline = False
srcCtxIdx = None
srcFullIdx = None
ctxsrcFullIdx = None
def __init__(self, msg, src, ctxsrc=None):
Exception.__init__(self, msg)
self.src = src
self.ctxsrc = ctxsrc or src
if self.ctxsrc:
self.srcCtxIdx = self.ctxsrc.find(self.src)
if self.srcCtxIdx < 0:
del self.srcCtxIdx
def __str__(self):
if self.ctxsrc:
return Exception.__str__(self) + ':: (' + repr(self.ctxsrc[:self.srcCtxIdx]) + ', ' + repr(self.ctxsrc[self.srcCtxIdx:self.srcCtxIdx+20]) + ')'
else:
return Exception.__str__(self) + ':: ' + repr(self.src[:40])
def setFullCSSSource(self, fullsrc, inline=False):
self.fullsrc = fullsrc
if inline:
self.inline = inline
if self.fullsrc:
self.srcFullIdx = self.fullsrc.find(self.src)
if self.srcFullIdx < 0:
del self.srcFullIdx
self.ctxsrcFullIdx = self.fullsrc.find(self.ctxsrc)
if self.ctxsrcFullIdx < 0:
del self.ctxsrcFullIdx
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class CSSParser(object):
"""CSS-2.1 parser dependent only upon the re module.
Implemented directly from http://www.w3.org/TR/CSS21/grammar.html
Tested with some existing CSS stylesheets for portability.
CSS Parsing API:
* setCSSBuilder()
To set your concrete implementation of CSSBuilderAbstract
* parseFile()
Use to parse external stylesheets using a file-like object
>>> cssFile = open('test.css', 'r')
>>> stylesheets = myCSSParser.parseFile(cssFile)
* parse()
Use to parse embedded stylesheets using source string
>>> cssSrc = '''
body,body.body {
font: 110%, "Times New Roman", Arial, Verdana, Helvetica, serif;
background: White;
color: Black;
}
a {text-decoration: underline;}
'''
>>> stylesheets = myCSSParser.parse(cssSrc)
* parseInline()
Use to parse inline stylesheets using attribute source string
>>> style = 'font: 110%, "Times New Roman", Arial, Verdana, Helvetica, serif; background: White; color: Black'
>>> stylesheets = myCSSParser.parseInline(style)
* parseAttributes()
Use to parse attribute string values into inline stylesheets
>>> stylesheets = myCSSParser.parseAttributes(
font='110%, "Times New Roman", Arial, Verdana, Helvetica, serif',
background='White',
color='Black')
* parseSingleAttr()
Use to parse a single string value into a CSS expression
>>> fontValue = myCSSParser.parseSingleAttr('110%, "Times New Roman", Arial, Verdana, Helvetica, serif')
"""
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~ Constants / Variables / Etc.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
ParseError = CSSParseError
AttributeOperators = ['=', '~=', '|=', '&=', '^=', '!=', '<>']
SelectorQualifiers = ('#', '.', '[', ':')
SelectorCombiners = ['+', '>']
ExpressionOperators = ('/', '+', ',')
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~ Regular expressions
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if True: # makes the following code foldable
_orRule = lambda *args: '|'.join(args)
_reflags = re.I | re.M | re.U
i_hex = '[0-9a-fA-F]'
i_nonascii = u'[\200-\377]'
i_unicode = '\\\\(?:%s){1,6}\s?' % i_hex
i_escape = _orRule(i_unicode, u'\\\\[ -~\200-\377]')
# i_nmstart = _orRule('[A-Za-z_]', i_nonascii, i_escape)
i_nmstart = _orRule('\-[^0-9]|[A-Za-z_]', i_nonascii, i_escape) # XXX Added hyphen, http://www.w3.org/TR/CSS21/syndata.html#value-def-identifier
i_nmchar = _orRule('[-0-9A-Za-z_]', i_nonascii, i_escape)
i_ident = '((?:%s)(?:%s)*)' % (i_nmstart,i_nmchar)
re_ident = re.compile(i_ident, _reflags)
i_element_name = '((?:%s)|\*)' % (i_ident[1:-1],)
re_element_name = re.compile(i_element_name, _reflags)
i_namespace_selector = '((?:%s)|\*|)\|(?!=)' % (i_ident[1:-1],)
re_namespace_selector = re.compile(i_namespace_selector, _reflags)
i_class = '\\.' + i_ident
re_class = re.compile(i_class, _reflags)
i_hash = '#((?:%s)+)' % i_nmchar
re_hash = re.compile(i_hash, _reflags)
i_rgbcolor = '(#%s{6}|#%s{3})' % (i_hex, i_hex)
re_rgbcolor = re.compile(i_rgbcolor, _reflags)
i_nl = u'\n|\r\n|\r|\f'
i_escape_nl = u'\\\\(?:%s)' % i_nl
i_string_content = _orRule(u'[\t !#$%&(-~]', i_escape_nl, i_nonascii, i_escape)
i_string1 = u'\"((?:%s|\')*)\"' % i_string_content
i_string2 = u'\'((?:%s|\")*)\'' % i_string_content
i_string = _orRule(i_string1, i_string2)
re_string = re.compile(i_string, _reflags)
i_uri = (u'url\\(\s*(?:(?:%s)|((?:%s)+))\s*\\)'
% (i_string, _orRule('[!#$%&*-~]', i_nonascii, i_escape)))
# XXX For now
# i_uri = u'(url\\(.*?\\))'
re_uri = re.compile(i_uri, _reflags)
i_num = u'(([-+]?[0-9]+(?:\\.[0-9]+)?)|([-+]?\\.[0-9]+))' # XXX Added out paranthesis, because e.g. .5em was not parsed correctly
re_num = re.compile(i_num, _reflags)
i_unit = '(%%|%s)?' % i_ident
re_unit = re.compile(i_unit, _reflags)
i_function = i_ident + '\\('
re_function = re.compile(i_function, _reflags)
i_functionterm = u'[-+]?' + i_function
re_functionterm = re.compile(i_functionterm, _reflags)
i_unicoderange1 = "(?:U\\+%s{1,6}-%s{1,6})" % (i_hex, i_hex)
i_unicoderange2 = "(?:U\\+\?{1,6}|{h}(\?{0,5}|{h}(\?{0,4}|{h}(\?{0,3}|{h}(\?{0,2}|{h}(\??|{h}))))))"
i_unicoderange = i_unicoderange1 # u'(%s|%s)' % (i_unicoderange1, i_unicoderange2)
re_unicoderange = re.compile(i_unicoderange, _reflags)
# i_comment = u'(?:\/\*[^*]*\*+([^/*][^*]*\*+)*\/)|(?://.*)'
# gabriel: only C convention for comments is allowed in CSS
i_comment = u'(?:\/\*[^*]*\*+([^/*][^*]*\*+)*\/)'
re_comment = re.compile(i_comment, _reflags)
i_important = u'!\s*(important)'
re_important = re.compile(i_important, _reflags)
del _orRule
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~ Public
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def __init__(self, cssBuilder=None):
self.setCSSBuilder(cssBuilder)
#~ CSS Builder to delegate to ~~~~~~~~~~~~~~~~~~~~~~~~
def getCSSBuilder(self):
"""A concrete instance implementing CSSBuilderAbstract"""
return self._cssBuilder
def setCSSBuilder(self, cssBuilder):
"""A concrete instance implementing CSSBuilderAbstract"""
self._cssBuilder = cssBuilder
cssBuilder = property(getCSSBuilder, setCSSBuilder)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~ Public CSS Parsing API
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def parseFile(self, srcFile, closeFile=False):
"""Parses CSS file-like objects using the current cssBuilder.
Use for external stylesheets."""
try:
result = self.parse(srcFile.read())
finally:
if closeFile:
srcFile.close()
return result
def parse(self, src):
"""Parses CSS string source using the current cssBuilder.
Use for embedded stylesheets."""
self.cssBuilder.beginStylesheet()
try:
# XXX Some simple preprocessing
src = cssSpecial.cleanupCSS(src)
try:
src, stylesheet = self._parseStylesheet(src)
except self.ParseError, err:
err.setFullCSSSource(src)
raise
finally:
self.cssBuilder.endStylesheet()
return stylesheet
def parseInline(self, src):
"""Parses CSS inline source string using the current cssBuilder.
Use to parse a tag's 'sytle'-like attribute."""
self.cssBuilder.beginInline()
try:
try:
src, properties = self._parseDeclarationGroup(src.strip(), braces=False)
except self.ParseError, err:
err.setFullCSSSource(src, inline=True)
raise
result = self.cssBuilder.inline(properties)
finally:
self.cssBuilder.endInline()
return result
def parseAttributes(self, attributes={}, **kwAttributes):
"""Parses CSS attribute source strings, and return as an inline stylesheet.
Use to parse a tag's highly CSS-based attributes like 'font'.
See also: parseSingleAttr
"""
if attributes:
kwAttributes.update(attributes)
self.cssBuilder.beginInline()
try:
properties = []
try:
for propertyName, src in kwAttributes.iteritems():
src, property = self._parseDeclarationProperty(src.strip(), propertyName)
properties.append(property)
except self.ParseError, err:
err.setFullCSSSource(src, inline=True)
raise
result = self.cssBuilder.inline(properties)
finally:
self.cssBuilder.endInline()
return result
def parseSingleAttr(self, attrValue):
"""Parse a single CSS attribute source string, and returns the built CSS expression.
Use to parse a tag's highly CSS-based attributes like 'font'.
See also: parseAttributes
"""
results = self.parseAttributes(temp=attrValue)
if 'temp' in results[1]:
return results[1]['temp']
else:
return results[0]['temp']
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~ Internal _parse methods
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def _parseStylesheet(self, src):
"""stylesheet
: [ CHARSET_SYM S* STRING S* ';' ]?
[S|CDO|CDC]* [ import [S|CDO|CDC]* ]*
[ [ ruleset | media | page | font_face ] [S|CDO|CDC]* ]*
;
"""
# Get rid of the comments
src = self.re_comment.sub(u'', src)
# [ CHARSET_SYM S* STRING S* ';' ]?
src = self._parseAtCharset(src)
# [S|CDO|CDC]*
src = self._parseSCDOCDC(src)
# [ import [S|CDO|CDC]* ]*
src, stylesheetImports = self._parseAtImports(src)
# [ namespace [S|CDO|CDC]* ]*
src = self._parseAtNamespace(src)
stylesheetElements = []
# [ [ ruleset | atkeywords ] [S|CDO|CDC]* ]*
while src: # due to ending with ]*
if src.startswith('@'):
# @media, @page, @font-face
src, atResults = self._parseAtKeyword(src)
if atResults is not None:
stylesheetElements.extend(atResults)
else:
# ruleset
src, ruleset = self._parseRuleset(src)
stylesheetElements.append(ruleset)
# [S|CDO|CDC]*
src = self._parseSCDOCDC(src)
stylesheet = self.cssBuilder.stylesheet(stylesheetElements, stylesheetImports)
return src, stylesheet
def _parseSCDOCDC(self, src):
"""[S|CDO|CDC]*"""
while 1:
src = src.lstrip()
if src.startswith('<!--'):
src = src[4:]
elif src.startswith('-->'):
src = src[3:]
else:
break
return src
#~ CSS @ directives ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def _parseAtCharset(self, src):
"""[ CHARSET_SYM S* STRING S* ';' ]?"""
if isAtRuleIdent(src, 'charset'):
src = stripAtRuleIdent(src)
charset, src = self._getString(src)
src = src.lstrip()
if src[:1] != ';':
raise self.ParseError('@charset expected a terminating \';\'', src, ctxsrc)
src = src[1:].lstrip()
self.cssBuilder.atCharset(charset)
return src
def _parseAtImports(self, src):
"""[ import [S|CDO|CDC]* ]*"""
result = []
while isAtRuleIdent(src, 'import'):
ctxsrc = src
src = stripAtRuleIdent(src)
import_, src = self._getStringOrURI(src)
if import_ is None:
raise self.ParseError('Import expecting string or url', src, ctxsrc)
mediums = []
medium, src = self._getIdent(src.lstrip())
while medium is not None:
mediums.append(medium)
if src[:1] == ',':
src = src[1:].lstrip()
medium, src = self._getIdent(src)
else:
break
# XXX No medium inherits and then "all" is appropriate
if not mediums:
mediums = ["all"]
if src[:1] != ';':
raise self.ParseError('@import expected a terminating \';\'', src, ctxsrc)
src = src[1:].lstrip()
stylesheet = self.cssBuilder.atImport(import_, mediums, self)
if stylesheet is not None:
result.append(stylesheet)
src = self._parseSCDOCDC(src)
return src, result
def _parseAtNamespace(self, src):
"""namespace :
@namespace S* [IDENT S*]? [STRING|URI] S* ';' S*
"""
src = self._parseSCDOCDC(src)
while isAtRuleIdent(src, 'namespace'):
ctxsrc = src
src = stripAtRuleIdent(src)
namespace, src = self._getStringOrURI(src)
if namespace is None:
nsPrefix, src = self._getIdent(src)
if nsPrefix is None:
raise self.ParseError('@namespace expected an identifier or a URI', src, ctxsrc)
namespace, src = self._getStringOrURI(src.lstrip())
if namespace is None:
raise self.ParseError('@namespace expected a URI', src, ctxsrc)
else:
nsPrefix = None
src = src.lstrip()
if src[:1] != ';':
raise self.ParseError('@namespace expected a terminating \';\'', src, ctxsrc)
src = src[1:].lstrip()
self.cssBuilder.atNamespace(nsPrefix, namespace)
src = self._parseSCDOCDC(src)
return src
def _parseAtKeyword(self, src):
"""[media | page | font_face | unknown_keyword]"""
ctxsrc = src
if isAtRuleIdent(src, 'media'):
src, result = self._parseAtMedia(src)
elif isAtRuleIdent(src, 'page'):
src, result = self._parseAtPage(src)
elif isAtRuleIdent(src, 'font-face'):
src, result = self._parseAtFontFace(src)
# XXX added @import, was missing!
elif isAtRuleIdent(src, 'import'):
src, result = self._parseAtImports(src)
elif isAtRuleIdent(src, 'frame'):
src, result = self._parseAtFrame(src)
elif src.startswith('@'):
src, result = self._parseAtIdent(src)
else:
raise self.ParseError('Unknown state in atKeyword', src, ctxsrc)
return src, result
def _parseAtMedia(self, src):
"""media
: MEDIA_SYM S* medium [ ',' S* medium ]* '{' S* ruleset* '}' S*
;
"""
ctxsrc = src
src = src[len('@media '):].lstrip()
mediums = []
while src and src[0] != '{':
medium, src = self._getIdent(src)
if medium is None:
raise self.ParseError('@media rule expected media identifier', src, ctxsrc)
mediums.append(medium)
if src[0] == ',':
src = src[1:].lstrip()
else:
src = src.lstrip()
if not src.startswith('{'):
raise self.ParseError('Ruleset opening \'{\' not found', src, ctxsrc)
src = src[1:].lstrip()
stylesheetElements = []
#while src and not src.startswith('}'):
# src, ruleset = self._parseRuleset(src)
# stylesheetElements.append(ruleset)
# src = src.lstrip()
# Containing @ where not found and parsed
while src and not src.startswith('}'):
if src.startswith('@'):
# @media, @page, @font-face
src, atResults = self._parseAtKeyword(src)
if atResults is not None:
stylesheetElements.extend(atResults)
else:
# ruleset
src, ruleset = self._parseRuleset(src)
stylesheetElements.append(ruleset)
src = src.lstrip()
if not src.startswith('}'):
raise self.ParseError('Ruleset closing \'}\' not found', src, ctxsrc)
else:
src = src[1:].lstrip()
result = self.cssBuilder.atMedia(mediums, stylesheetElements)
return src, result
def _parseAtPage(self, src):
"""page
: PAGE_SYM S* IDENT? pseudo_page? S*
'{' S* declaration [ ';' S* declaration ]* '}' S*
;
"""
ctxsrc = src
src = src[len('@page '):].lstrip()
page, src = self._getIdent(src)
if src[:1] == ':':
pseudopage, src = self._getIdent(src[1:])
page = page + '_' + pseudopage
else:
pseudopage = None
#src, properties = self._parseDeclarationGroup(src.lstrip())
# Containing @ where not found and parsed
stylesheetElements = []
src = src.lstrip()
properties = []
# XXX Extended for PDF use
if not src.startswith('{'):
raise self.ParseError('Ruleset opening \'{\' not found', src, ctxsrc)
else:
src = src[1:].lstrip()
while src and not src.startswith('}'):
if src.startswith('@'):
# @media, @page, @font-face
src, atResults = self._parseAtKeyword(src)
if atResults is not None:
stylesheetElements.extend(atResults)
else:
src, nproperties = self._parseDeclarationGroup(src.lstrip(), braces=False)
properties += nproperties
src = src.lstrip()
result = [self.cssBuilder.atPage(page, pseudopage, properties)]
return src[1:].lstrip(), result
def _parseAtFrame(self, src):
"""
XXX Proprietary for PDF
"""
ctxsrc = src
src = src[len('@frame '):].lstrip()
box, src = self._getIdent(src)
src, properties = self._parseDeclarationGroup(src.lstrip())
result = [self.cssBuilder.atFrame(box, properties)]
return src.lstrip(), result
def _parseAtFontFace(self, src):
ctxsrc = src
src = src[len('@font-face '):].lstrip()
src, properties = self._parseDeclarationGroup(src)
result = [self.cssBuilder.atFontFace(properties)]
return src, result
def _parseAtIdent(self, src):
ctxsrc = src
atIdent, src = self._getIdent(src[1:])
if atIdent is None:
raise self.ParseError('At-rule expected an identifier for the rule', src, ctxsrc)
src, result = self.cssBuilder.atIdent(atIdent, self, src)
if result is NotImplemented:
# An at-rule consists of everything up to and including the next semicolon (;) or the next block, whichever comes first
semiIdx = src.find(';')
if semiIdx < 0:
semiIdx = None
blockIdx = src[:semiIdx].find('{')
if blockIdx < 0:
blockIdx = None
if semiIdx is not None and semiIdx < blockIdx:
src = src[semiIdx+1:].lstrip()
elif blockIdx is None:
# consume the rest of the content since we didn't find a block or a semicolon
src = src[-1:-1]
elif blockIdx is not None:
# expecing a block...
src = src[blockIdx:]
try:
# try to parse it as a declarations block
src, declarations = self._parseDeclarationGroup(src)
except self.ParseError:
# try to parse it as a stylesheet block
src, stylesheet = self._parseStylesheet(src)
else:
raise self.ParserError('Unable to ignore @-rule block', src, ctxsrc)
return src.lstrip(), result
#~ ruleset - see selector and declaration groups ~~~~
def _parseRuleset(self, src):
"""ruleset
: selector [ ',' S* selector ]*
'{' S* declaration [ ';' S* declaration ]* '}' S*
;
"""
src, selectors = self._parseSelectorGroup(src)
src, properties = self._parseDeclarationGroup(src.lstrip())
result = self.cssBuilder.ruleset(selectors, properties)
return src, result
#~ selector parsing ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def _parseSelectorGroup(self, src):
selectors = []
while src[:1] not in ('{','}', ']','(',')', ';', ''):
src, selector = self._parseSelector(src)
if selector is None:
break
selectors.append(selector)
if src.startswith(','):
src = src[1:].lstrip()
return src, selectors
def _parseSelector(self, src):
"""selector
: simple_selector [ combinator simple_selector ]*
;
"""
src, selector = self._parseSimpleSelector(src)
srcLen = len(src) # XXX
while src[:1] not in ('', ',', ';', '{','}', '[',']','(',')'):
for combiner in self.SelectorCombiners:
if src.startswith(combiner):
src = src[len(combiner):].lstrip()
break
else:
combiner = ' '
src, selectorB = self._parseSimpleSelector(src)
# XXX Fix a bug that occured here e.g. : .1 {...}
if len(src) >= srcLen:
src = src[1:]
while src and (src[:1] not in ('', ',', ';', '{','}', '[',']','(',')')):
src = src[1:]
return src.lstrip(), None
selector = self.cssBuilder.combineSelectors(selector, combiner, selectorB)
return src.lstrip(), selector
def _parseSimpleSelector(self, src):
"""simple_selector
: [ namespace_selector ]? element_name? [ HASH | class | attrib | pseudo ]* S*
;
"""
ctxsrc = src.lstrip()
nsPrefix, src = self._getMatchResult(self.re_namespace_selector, src)
name, src = self._getMatchResult(self.re_element_name, src)
if name:
pass # already *successfully* assigned
elif src[:1] in self.SelectorQualifiers:
name = '*'
else:
raise self.ParseError('Selector name or qualifier expected', src, ctxsrc)
name = self.cssBuilder.resolveNamespacePrefix(nsPrefix, name)
selector = self.cssBuilder.selector(name)
while src and src[:1] in self.SelectorQualifiers:
hash_, src = self._getMatchResult(self.re_hash, src)
if hash_ is not None:
selector.addHashId(hash_)
continue
class_, src = self._getMatchResult(self.re_class, src)
if class_ is not None:
selector.addClass(class_)
continue
if src.startswith('['):
src, selector = self._parseSelectorAttribute(src, selector)
elif src.startswith(':'):
src, selector = self._parseSelectorPseudo(src, selector)
else:
break
return src.lstrip(), selector
def _parseSelectorAttribute(self, src, selector):
"""attrib
: '[' S* [ namespace_selector ]? IDENT S* [ [ '=' | INCLUDES | DASHMATCH ] S*
[ IDENT | STRING ] S* ]? ']'
;
"""
ctxsrc = src
if not src.startswith('['):
raise self.ParseError('Selector Attribute opening \'[\' not found', src, ctxsrc)
src = src[1:].lstrip()
nsPrefix, src = self._getMatchResult(self.re_namespace_selector, src)
attrName, src = self._getIdent(src)
src=src.lstrip()
if attrName is None:
raise self.ParseError('Expected a selector attribute name', src, ctxsrc)
if nsPrefix is not None:
attrName = self.cssBuilder.resolveNamespacePrefix(nsPrefix, attrName)
for op in self.AttributeOperators:
if src.startswith(op):
break
else:
op = ''
src = src[len(op):].lstrip()
if op:
attrValue, src = self._getIdent(src)
if attrValue is None:
attrValue, src = self._getString(src)
if attrValue is None:
raise self.ParseError('Expected a selector attribute value', src, ctxsrc)
else:
attrValue = None
if not src.startswith(']'):
raise self.ParseError('Selector Attribute closing \']\' not found', src, ctxsrc)
else:
src = src[1:]
if op:
selector.addAttributeOperation(attrName, op, attrValue)
else:
selector.addAttribute(attrName)
return src, selector
def _parseSelectorPseudo(self, src, selector):
"""pseudo
: ':' [ IDENT | function ]
;
"""
ctxsrc = src
if not src.startswith(':'):
raise self.ParseError('Selector Pseudo \':\' not found', src, ctxsrc)
src = re.search('^:{1,2}(.*)', src, re.M | re.S).group(1)
name, src = self._getIdent(src)
if not name:
raise self.ParseError('Selector Pseudo identifier not found', src, ctxsrc)
if src.startswith('('):
# function
src = src[1:].lstrip()
src, term = self._parseExpression(src, True)
if not src.startswith(')'):
raise self.ParseError('Selector Pseudo Function closing \')\' not found', src, ctxsrc)
src = src[1:]
selector.addPseudoFunction(name, term)
else:
selector.addPseudo(name)
return src, selector
#~ declaration and expression parsing ~~~~~~~~~~~~~~~
def _parseDeclarationGroup(self, src, braces=True):
ctxsrc = src
if src.startswith('{'):
src, braces = src[1:], True
elif braces:
raise self.ParseError('Declaration group opening \'{\' not found', src, ctxsrc)
properties = []
src = src.lstrip()
while src[:1] not in ('', ',', '{','}', '[',']','(',')','@'): # XXX @?
src, property = self._parseDeclaration(src)
# XXX Workaround for styles like "*font: smaller"
if src.startswith("*"):
src = "-nothing-" + src[1:]
continue
if property is None:
break
properties.append(property)
if src.startswith(';'):
src = src[1:].lstrip()
else:
break
if braces:
if not src.startswith('}'):
raise self.ParseError('Declaration group closing \'}\' not found', src, ctxsrc)
src = src[1:]
return src.lstrip(), properties
def _parseDeclaration(self, src):
"""declaration
: ident S* ':' S* expr prio?
| /* empty */
;
"""
# property
propertyName, src = self._getIdent(src)
if propertyName is not None:
src = src.lstrip()
# S* : S*
if src[:1] in (':', '='):
# Note: we are being fairly flexable here... technically, the
# ":" is *required*, but in the name of flexibility we
# suppor a null transition, as well as an "=" transition
src = src[1:].lstrip()
src, property = self._parseDeclarationProperty(src, propertyName)
else:
property = None
return src, property
def _parseDeclarationProperty(self, src, propertyName):
# expr
src, expr = self._parseExpression(src)
# prio?
important, src = self._getMatchResult(self.re_important, src)
src = src.lstrip()
property = self.cssBuilder.property(propertyName, expr, important)
return src, property
def _parseExpression(self, src, returnList=False):
"""
expr
: term [ operator term ]*
;
"""
src, term = self._parseExpressionTerm(src)
operator = None
while src[:1] not in ('', ';', '{','}', '[',']', ')'):
for operator in self.ExpressionOperators:
if src.startswith(operator):
src = src[len(operator):]
break
else:
operator = ' '
src, term2 = self._parseExpressionTerm(src.lstrip())
if term2 is NotImplemented:
break
else:
term = self.cssBuilder.combineTerms(term, operator, term2)
if operator is None and returnList:
term = self.cssBuilder.combineTerms(term, None, None)
return src, term
else:
return src, term
def _parseExpressionTerm(self, src):
"""term
: unary_operator?
[ NUMBER S* | PERCENTAGE S* | LENGTH S* | EMS S* | EXS S* | ANGLE S* |
TIME S* | FREQ S* | function ]
| STRING S* | IDENT S* | URI S* | RGB S* | UNICODERANGE S* | hexcolor
;
"""
ctxsrc = src
result, src = self._getMatchResult(self.re_num, src)
if result is not None:
units, src = self._getMatchResult(self.re_unit, src)
term = self.cssBuilder.termNumber(result, units)
return src.lstrip(), term
result, src = self._getString(src, self.re_uri)
if result is not None:
# XXX URL!!!!
term = self.cssBuilder.termURI(result)
return src.lstrip(), term
result, src = self._getString(src)
if result is not None:
term = self.cssBuilder.termString(result)
return src.lstrip(), term
result, src = self._getMatchResult(self.re_functionterm, src)
if result is not None:
src, params = self._parseExpression(src, True)
if src[0] != ')':
raise self.ParseError('Terminal function expression expected closing \')\'', src, ctxsrc)
src = src[1:].lstrip()
term = self.cssBuilder.termFunction(result, params)
return src, term
result, src = self._getMatchResult(self.re_rgbcolor, src)
if result is not None:
term = self.cssBuilder.termRGB(result)
return src.lstrip(), term
result, src = self._getMatchResult(self.re_unicoderange, src)
if result is not None:
term = self.cssBuilder.termUnicodeRange(result)
return src.lstrip(), term
nsPrefix, src = self._getMatchResult(self.re_namespace_selector, src)
result, src = self._getIdent(src)
if result is not None:
if nsPrefix is not None:
result = self.cssBuilder.resolveNamespacePrefix(nsPrefix, result)
term = self.cssBuilder.termIdent(result)
return src.lstrip(), term
return self.cssBuilder.termUnknown(src)
#~ utility methods ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def _getIdent(self, src, default=None):
return self._getMatchResult(self.re_ident, src, default)
def _getString(self, src, rexpression=None, default=None):
if rexpression is None:
rexpression = self.re_string
result = rexpression.match(src)
if result:
strres = filter(None, result.groups())
if strres:
strres = strres[0]
else:
strres = ''
return strres, src[result.end():]
else:
return default, src
def _getStringOrURI(self, src):
result, src = self._getString(src, self.re_uri)
if result is None:
result, src = self._getString(src)
return result, src
def _getMatchResult(self, rexpression, src, default=None, group=1):
result = rexpression.match(src)
if result:
return result.group(group), src[result.end():]
else:
return default, src
| apache-2.0 | 8,179,135,156,528,571,000 | 35.321263 | 155 | 0.537016 | false |
bilderbuchi/GCode_Z_Splice | splicer_GUI.py | 1 | 3376 | import splicer_GUI_FB
from templatesGCodePanel import GCodePanel
from templatesTransitionPanel import TransitionPanel
import wx
import wx.xrc
import logging
import splicer
logger = logging.getLogger(__name__)
# Logic implementation file for the GUI
class MyFrame( splicer_GUI_FB.mainFrameGUI ):
def __init__( self, parent ):
#GCode Splicer init code
splicer_GUI_FB.mainFrameGUI.__init__( self, parent )
self.FirstGCodePanel.title.SetLabel('G-code file 1')
self.FirstGCodePanel.z_from.SetValue('0')
self.FirstGCodePanel.z_from.Enable(False)
self.FirstTransitionPanel.title.SetLabel('Transition file 1')
self.SecondGCodePanel.title.SetLabel('G-code file 2')
self.SecondGCodePanel.z_from.Enable(False)
self.SecondGCodePanel.z_to.Enable(False)
# self.GCodePanel2 = wx.Panel( self.m_scrolledWindow, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
# self.title = wx.StaticText( self.GCodePanel2, wx.ID_ANY, u"G-code file 2", wx.DefaultPosition, wx.DefaultSize, 0 )
# self.m_scrolledWindow.GetSizer().Add( self.GCodePanel2, 0, wx.ALL|wx.EXPAND, 5 )
# # Construct GUI manually and dynamically from self-defined classes
# self.panel1 = GCodePanelTemplate(self.m_scrolledWindow)
# self.panel1.title.SetLabel('templated Gcodefile 2')
# self.m_scrolledWindow.GetSizer().Add( self.panel1, 0, wx.ALL|wx.EXPAND, 5 )
# self.m_staticline_column = wx.StaticLine( self.m_scrolledWindow, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.LI_VERTICAL )
# self.m_scrolledWindow.GetSizer().Add( self.m_staticline_column, 0, wx.EXPAND, 5 )
# self.panel2 = TransitionPanel(self.m_scrolledWindow)
# self.panel2.title.SetLabel('templated Transitionfile 2')
# self.m_scrolledWindow.GetSizer().Add( self.panel2, 0, wx.ALL|wx.EXPAND, 5 )
# Handlers for mainFrameGUI events.
def OnAddFile( self, event ):
# TODO: Implement OnAddFile
pass
def OnRemoveFile( self, event ):
# TODO: Implement OnRemoveFile
pass
def OnGenerate( self, event ):
logger.info('Generating files')
output=self.resultFilePicker.GetPath()
logger.info('Output file: ' + output)
g_files=[]
z_values=[]
t_files=[]
for c in self.m_scrolledWindow.GetSizer().GetChildren():
pass
# TODO: implement scan over panels
# TODO: make sure this is independent of order
widget = c.GetWindow()
if type(widget) is GCodePanel:
logger.info('Found GCode panel ' + widget.title.GetLabel())
path=widget.filePicker.GetPath()
if path:
logger.debug('path: ' + path + '.')
g_files.append(path)
zval=widget.z_to.GetValue()
if path and zval:
logger.info('Found Z value')
logger.debug(zval)
z_values.append(float(zval))
elif type(widget) is TransitionPanel:
logger.info('Found transition panel ' + widget.title.GetLabel())
path=widget.filePicker.GetPath()
if path and widget.useTransition_checkbox.IsChecked():
logger.debug('path: ' + path +'.')
t_files.append(path)
logger.info(g_files)
logger.info(z_values)
logger.info(t_files)
if splicer.splice_files(output, g_files, z_values, t_files) is not 0:
logger.error('An error occurred during splicing!')
logger.info('Finished splicing!')
def OnClose( self, event ):
logger.info('Closing GUI')
self.Close()
def OnResultFileSelected( self, event ):
# TODO: Implement OnResultFileSelected
pass
| agpl-3.0 | 9,203,873,516,078,526,000 | 31.152381 | 132 | 0.716528 | false |
jedie/pypyjs-standalone | website/js/pypy.js-0.3.0/lib/modules/pyrepl/console.py | 13 | 2560 | # Copyright 2000-2004 Michael Hudson-Doyle <[email protected]>
#
# All Rights Reserved
#
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose is hereby granted without fee,
# provided that the above copyright notice appear in all copies and
# that both that copyright notice and this permission notice appear in
# supporting documentation.
#
# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO
# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL,
# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF
# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
class Event:
"""An Event. `evt' is 'key' or somesuch."""
def __init__(self, evt, data, raw=''):
self.evt = evt
self.data = data
self.raw = raw
def __repr__(self):
return 'Event(%r, %r)'%(self.evt, self.data)
class Console:
"""Attributes:
screen,
height,
width,
"""
def refresh(self, screen, xy):
pass
def prepare(self):
pass
def restore(self):
pass
def move_cursor(self, x, y):
pass
def set_cursor_vis(self, vis):
pass
def getheightwidth(self):
"""Return (height, width) where height and width are the height
and width of the terminal window in characters."""
pass
def get_event(self, block=1):
"""Return an Event instance. Returns None if |block| is false
and there is no event pending, otherwise waits for the
completion of an event."""
pass
def beep(self):
pass
def clear(self):
"""Wipe the screen"""
pass
def finish(self):
"""Move the cursor to the end of the display and otherwise get
ready for end. XXX could be merged with restore? Hmm."""
pass
def flushoutput(self):
"""Flush all output to the screen (assuming there's some
buffering going on somewhere)."""
pass
def forgetinput(self):
"""Forget all pending, but not yet processed input."""
pass
def getpending(self):
"""Return the characters that have been typed but not yet
processed."""
pass
def wait(self):
"""Wait for an event."""
pass
| mit | -7,316,285,660,186,032,000 | 26.526882 | 71 | 0.630469 | false |
googleads/google-ads-python | google/ads/googleads/v7/services/types/detail_placement_view_service.py | 1 | 1273 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v7.services",
marshal="google.ads.googleads.v7",
manifest={"GetDetailPlacementViewRequest",},
)
class GetDetailPlacementViewRequest(proto.Message):
r"""Request message for
[DetailPlacementViewService.GetDetailPlacementView][google.ads.googleads.v7.services.DetailPlacementViewService.GetDetailPlacementView].
Attributes:
resource_name (str):
Required. The resource name of the Detail
Placement view to fetch.
"""
resource_name = proto.Field(proto.STRING, number=1,)
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 | -2,607,583,219,530,520,000 | 31.641026 | 140 | 0.725844 | false |
dharmasam9/moose-core | scripts/cmake_sanity_check.py | 1 | 2830 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""cmake_sanity_check.py: Check if Cmake files are ok.
Last modified: Sat Jan 18, 2014 05:01PM
NOTE: Run in this directory only.
"""
__author__ = "Dilawar Singh"
__copyright__ = "Copyright 2013, Dilawar Singh and NCBS Bangalore"
__credits__ = ["NCBS Bangalore"]
__license__ = "GNU GPL"
__version__ = "1.0.0"
__maintainer__ = "Dilawar Singh"
__email__ = "[email protected]"
__status__ = "Development"
import sys
import os
import re
from collections import defaultdict
makefiles = {}
cmakefiles = {}
makedirs = set()
cmakedirs = set()
def check(d):
searchMakefiles(d)
checkMissingCMake()
checkSrcs()
def checkMissingCMake():
if (makedirs - cmakedirs):
print("[Failed] Test 1")
print("Following directories have Makefile but not a CMakeFiles.txt file.")
print("%s" % "\t\n".join(makedirs - cmakedirs))
def searchMakefiles(topdir):
for d, subd, fs in os.walk(topdir):
if "../_build" in d or ".." == d: continue
if "CMakeLists.txt" in fs:
cmakedirs.add(d)
cmakefiles[d] = fs
if "Makefile" in fs:
if "_build" in d:
continue
else:
makedirs.add(d)
makefiles[d] = fs
else: pass
def checkSrcs():
objPat = re.compile(r"\w+\.o")
srcPat = re.compile(r"\w+\.cpp")
srcs = []
csrcs = []
for d in makefiles:
with open(os.path.join(d, "Makefile"), "r") as f:
txt = f.read()
for i in txt.split("\n\n"):
if "OBJ =" in i.upper():
for j in i.split():
if ".o" in j.strip():
srcs.append("%s"%(j.strip()))
try:
with open(os.path.join(d, "CMakeLists.txt"), "r") as f:
txt = f.read()
csrcs = srcPat.findall(txt)
except:
print("Dir {} does not have CMakeLists.txt".format(d))
csrcs = []
#print("[TEST 2] Checking if CMake is creating extra objects")
for csr in csrcs:
objName = csr.replace(".cpp", ".o")
if objName in srcs:
pass
else:
print(" Failed: In dir {}, CMake is creating extra object {}".format(d, objName))
pass
print("[TEST 3] Checking if CMake is missing some objects")
for obj in srcs:
srcName = obj.replace(".o", ".cpp")
if srcName in csrcs: pass
else:
print(" Failed: In dir {}, CMake is missing object {}".format(d,
srcName))
def main():
test_dir = sys.argv[1]
check(test_dir)
if __name__ == '__main__':
main()
| gpl-3.0 | -4,250,139,840,957,038,600 | 27.877551 | 97 | 0.506714 | false |
laiqiqi886/kbengine | kbe/res/scripts/common/Lib/abc.py | 106 | 8624 | # Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Abstract Base Classes (ABCs) according to PEP 3119."""
from _weakrefset import WeakSet
def abstractmethod(funcobj):
"""A decorator indicating abstract methods.
Requires that the metaclass is ABCMeta or derived from it. A
class that has a metaclass derived from ABCMeta cannot be
instantiated unless all of its abstract methods are overridden.
The abstract methods can be called using any of the normal
'super' call mechanisms.
Usage:
class C(metaclass=ABCMeta):
@abstractmethod
def my_abstract_method(self, ...):
...
"""
funcobj.__isabstractmethod__ = True
return funcobj
class abstractclassmethod(classmethod):
"""
A decorator indicating abstract classmethods.
Similar to abstractmethod.
Usage:
class C(metaclass=ABCMeta):
@abstractclassmethod
def my_abstract_classmethod(cls, ...):
...
'abstractclassmethod' is deprecated. Use 'classmethod' with
'abstractmethod' instead.
"""
__isabstractmethod__ = True
def __init__(self, callable):
callable.__isabstractmethod__ = True
super().__init__(callable)
class abstractstaticmethod(staticmethod):
"""
A decorator indicating abstract staticmethods.
Similar to abstractmethod.
Usage:
class C(metaclass=ABCMeta):
@abstractstaticmethod
def my_abstract_staticmethod(...):
...
'abstractstaticmethod' is deprecated. Use 'staticmethod' with
'abstractmethod' instead.
"""
__isabstractmethod__ = True
def __init__(self, callable):
callable.__isabstractmethod__ = True
super().__init__(callable)
class abstractproperty(property):
"""
A decorator indicating abstract properties.
Requires that the metaclass is ABCMeta or derived from it. A
class that has a metaclass derived from ABCMeta cannot be
instantiated unless all of its abstract properties are overridden.
The abstract properties can be called using any of the normal
'super' call mechanisms.
Usage:
class C(metaclass=ABCMeta):
@abstractproperty
def my_abstract_property(self):
...
This defines a read-only property; you can also define a read-write
abstract property using the 'long' form of property declaration:
class C(metaclass=ABCMeta):
def getx(self): ...
def setx(self, value): ...
x = abstractproperty(getx, setx)
'abstractproperty' is deprecated. Use 'property' with 'abstractmethod'
instead.
"""
__isabstractmethod__ = True
class ABCMeta(type):
"""Metaclass for defining Abstract Base Classes (ABCs).
Use this metaclass to create an ABC. An ABC can be subclassed
directly, and then acts as a mix-in class. You can also register
unrelated concrete classes (even built-in classes) and unrelated
ABCs as 'virtual subclasses' -- these and their descendants will
be considered subclasses of the registering ABC by the built-in
issubclass() function, but the registering ABC won't show up in
their MRO (Method Resolution Order) nor will method
implementations defined by the registering ABC be callable (not
even via super()).
"""
# A global counter that is incremented each time a class is
# registered as a virtual subclass of anything. It forces the
# negative cache to be cleared before its next use.
# Note: this counter is private. Use `abc.get_cache_token()` for
# external code.
_abc_invalidation_counter = 0
def __new__(mcls, name, bases, namespace):
cls = super().__new__(mcls, name, bases, namespace)
# Compute set of abstract method names
abstracts = {name
for name, value in namespace.items()
if getattr(value, "__isabstractmethod__", False)}
for base in bases:
for name in getattr(base, "__abstractmethods__", set()):
value = getattr(cls, name, None)
if getattr(value, "__isabstractmethod__", False):
abstracts.add(name)
cls.__abstractmethods__ = frozenset(abstracts)
# Set up inheritance registry
cls._abc_registry = WeakSet()
cls._abc_cache = WeakSet()
cls._abc_negative_cache = WeakSet()
cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter
return cls
def register(cls, subclass):
"""Register a virtual subclass of an ABC.
Returns the subclass, to allow usage as a class decorator.
"""
if not isinstance(subclass, type):
raise TypeError("Can only register classes")
if issubclass(subclass, cls):
return subclass # Already a subclass
# Subtle: test for cycles *after* testing for "already a subclass";
# this means we allow X.register(X) and interpret it as a no-op.
if issubclass(cls, subclass):
# This would create a cycle, which is bad for the algorithm below
raise RuntimeError("Refusing to create an inheritance cycle")
cls._abc_registry.add(subclass)
ABCMeta._abc_invalidation_counter += 1 # Invalidate negative cache
return subclass
def _dump_registry(cls, file=None):
"""Debug helper to print the ABC registry."""
print("Class: %s.%s" % (cls.__module__, cls.__name__), file=file)
print("Inv.counter: %s" % ABCMeta._abc_invalidation_counter, file=file)
for name in sorted(cls.__dict__.keys()):
if name.startswith("_abc_"):
value = getattr(cls, name)
print("%s: %r" % (name, value), file=file)
def __instancecheck__(cls, instance):
"""Override for isinstance(instance, cls)."""
# Inline the cache checking
subclass = instance.__class__
if subclass in cls._abc_cache:
return True
subtype = type(instance)
if subtype is subclass:
if (cls._abc_negative_cache_version ==
ABCMeta._abc_invalidation_counter and
subclass in cls._abc_negative_cache):
return False
# Fall back to the subclass check.
return cls.__subclasscheck__(subclass)
return any(cls.__subclasscheck__(c) for c in {subclass, subtype})
def __subclasscheck__(cls, subclass):
"""Override for issubclass(subclass, cls)."""
# Check cache
if subclass in cls._abc_cache:
return True
# Check negative cache; may have to invalidate
if cls._abc_negative_cache_version < ABCMeta._abc_invalidation_counter:
# Invalidate the negative cache
cls._abc_negative_cache = WeakSet()
cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter
elif subclass in cls._abc_negative_cache:
return False
# Check the subclass hook
ok = cls.__subclasshook__(subclass)
if ok is not NotImplemented:
assert isinstance(ok, bool)
if ok:
cls._abc_cache.add(subclass)
else:
cls._abc_negative_cache.add(subclass)
return ok
# Check if it's a direct subclass
if cls in getattr(subclass, '__mro__', ()):
cls._abc_cache.add(subclass)
return True
# Check if it's a subclass of a registered class (recursive)
for rcls in cls._abc_registry:
if issubclass(subclass, rcls):
cls._abc_cache.add(subclass)
return True
# Check if it's a subclass of a subclass (recursive)
for scls in cls.__subclasses__():
if issubclass(subclass, scls):
cls._abc_cache.add(subclass)
return True
# No dice; update negative cache
cls._abc_negative_cache.add(subclass)
return False
class ABC(metaclass=ABCMeta):
"""Helper class that provides a standard way to create an ABC using
inheritance.
"""
pass
def get_cache_token():
"""Returns the current ABC cache token.
The token is an opaque object (supporting equality testing) identifying the
current version of the ABC cache for virtual subclasses. The token changes
with every call to ``register()`` on any ABC.
"""
return ABCMeta._abc_invalidation_counter
| lgpl-3.0 | -1,269,972,387,605,203,500 | 33.774194 | 79 | 0.619666 | false |
caphrim007/ansible | lib/ansible/modules/cloud/azure/azure_rm_mysqldatabase_facts.py | 14 | 5925 | #!/usr/bin/python
#
# Copyright (c) 2017 Zim Kalinowski, <[email protected]>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_mysqldatabase_facts
version_added: "2.7"
short_description: Get Azure MySQL Database facts.
description:
- Get facts of MySQL Database.
options:
resource_group:
description:
- The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
required: True
server_name:
description:
- The name of the server.
required: True
name:
description:
- The name of the database.
extends_documentation_fragment:
- azure
author:
- "Zim Kalinowski (@zikalino)"
'''
EXAMPLES = '''
- name: Get instance of MySQL Database
azure_rm_mysqldatabase_facts:
resource_group: resource_group_name
server_name: server_name
name: database_name
- name: List instances of MySQL Database
azure_rm_mysqldatabase_facts:
resource_group: resource_group_name
server_name: server_name
'''
RETURN = '''
databases:
description: A list of dictionaries containing facts for MySQL Databases.
returned: always
type: complex
contains:
id:
description:
- Resource ID
returned: always
type: str
sample: "/subscriptions/ffffffff-ffff-ffff-ffff-ffffffffffff/resourceGroups/TestGroup/providers/Microsoft.DBforMySQL/servers/testser
ver/databases/db1"
resource_group:
description:
- Resource group name.
returned: always
type: str
sample: testrg
server_name:
description:
- Server name.
returned: always
type: str
sample: testserver
name:
description:
- Resource name.
returned: always
type: str
sample: db1
charset:
description:
- The charset of the database.
returned: always
type: str
sample: UTF8
collation:
description:
- The collation of the database.
returned: always
type: str
sample: English_United States.1252
'''
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
from azure.mgmt.rdbms.mysql import MySQLManagementClient
from msrest.serialization import Model
except ImportError:
# This is handled in azure_rm_common
pass
class AzureRMDatabasesFacts(AzureRMModuleBase):
def __init__(self):
# define user inputs into argument
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
server_name=dict(
type='str',
required=True
),
name=dict(
type='str'
)
)
# store the results of the module operation
self.results = dict(
changed=False
)
self.resource_group = None
self.server_name = None
self.name = None
super(AzureRMDatabasesFacts, self).__init__(self.module_arg_spec, supports_tags=False)
def exec_module(self, **kwargs):
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
if (self.resource_group is not None and
self.server_name is not None and
self.name is not None):
self.results['databases'] = self.get()
elif (self.resource_group is not None and
self.server_name is not None):
self.results['databases'] = self.list_by_server()
return self.results
def get(self):
response = None
results = []
try:
response = self.mysql_client.databases.get(resource_group_name=self.resource_group,
server_name=self.server_name,
database_name=self.name)
self.log("Response : {0}".format(response))
except CloudError as e:
self.log('Could not get facts for Databases.')
if response is not None:
results.append(self.format_item(response))
return results
def list_by_server(self):
response = None
results = []
try:
response = self.mysql_client.databases.list_by_server(resource_group_name=self.resource_group,
server_name=self.server_name)
self.log("Response : {0}".format(response))
except CloudError as e:
self.fail("Error listing for server {0} - {1}".format(self.server_name, str(e)))
if response is not None:
for item in response:
results.append(self.format_item(item))
return results
def format_item(self, item):
d = item.as_dict()
d = {
'resource_group': self.resource_group,
'server_name': self.server_name,
'name': d['name'],
'charset': d['charset'],
'collation': d['collation']
}
return d
def main():
AzureRMDatabasesFacts()
if __name__ == '__main__':
main()
| gpl-3.0 | 5,414,794,238,133,418,000 | 28.331683 | 149 | 0.560675 | false |
willkg/phil | phil/check.py | 1 | 5531 | ######################################################################
# This file is part of phil.
#
# Copyright (C) 2011, 2012, 2013 Will Kahn-Greene
#
# phil is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# phil is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with phil. If not, see <http://www.gnu.org/licenses/>.
#######################################################################
import ConfigParser
import datetime
import phil.util
from phil.util import (
out, err, parse_configuration, parse_ics, get_next_date, should_remind,
format_date, generate_date_bits)
class Phil(object):
def __init__(self, quiet=False, debug=False):
self.config = None
self.quiet = quiet
self.debug = debug
def _run(self):
dtstart = datetime.datetime.today()
if not self.quiet:
out('Loading state....')
state = phil.util.load_state(self.config.datadir)
if not self.quiet:
out('Parsing ics file "{0}"....'.format(self.config.icsfile))
events = parse_ics(self.config.icsfile)
for event in events:
if not self.quiet:
out('Looking at event "{0}"....'.format(event.summary))
next_date = get_next_date(dtstart, event.rrule)
previous_remind = state.get(event.event_id)
if previous_remind and previous_remind == str(next_date.date()):
if not self.quiet:
out('Already sent a reminder for this meeting.')
continue
if should_remind(dtstart, next_date, self.config.remind):
if not self.quiet:
out('Sending reminder....')
summary = '{0} ({1})'.format(
event.summary, format_date(next_date))
description = event.description % generate_date_bits(next_date)
if self.debug:
out('From:', self.config.sender)
out('To:', self.config.to_list)
out('Subject:', summary)
out('Body:')
out(description, indent=' ', wrap=False)
else:
phil.util.send_mail_smtp(
self.config.sender, self.config.to_list, summary,
description, self.config.host, self.config.port)
state[event.event_id] = str(next_date.date())
elif not self.quiet:
out('Next reminder should get sent on {0}.'.format(
next_date.date() - datetime.timedelta(self.config.remind)))
phil.util.save_state(self.config.datadir, state)
def run(self, conffile):
if not self.quiet:
out('Parsing config file....')
try:
self.config = parse_configuration(conffile)
except ConfigParser.NoOptionError, noe:
err('Missing option in config file: {0}'.format(noe))
return 1
try:
self._run()
except Exception:
import traceback
err(''.join(traceback.format_exc()), wrap=False)
err('phil has died unexpectedly. If you think this is an error '
'(which it is), then contact phil\'s authors for help.')
return 1
if not self.quiet:
out('Finished!')
return 0
def _next6(self):
# TODO: This is a repeat of _run for the most part.
dtstart = datetime.datetime.today()
out('Loading state....')
state = phil.util.load_state(self.config.datadir)
out('Parsing ics file "{0}"....'.format(self.config.icsfile))
events = parse_ics(self.config.icsfile)
for event in events:
out('Looking at event "{0}"....'.format(event.summary))
next_date = dtstart
for i in range(6):
next_date = get_next_date(next_date, event.rrule)
previous_remind = state.get(event.event_id)
if (previous_remind
and previous_remind == str(next_date.date())):
out('* {0} (sent reminder already)'.format(
next_date.strftime('%c')))
else:
out('* {0}'.format(next_date.strftime('%c')))
next_date = next_date + datetime.timedelta(1)
def next6(self, conffile):
if not self.quiet:
out('Parsing config file....')
try:
self.config = parse_configuration(conffile)
except ConfigParser.NoOptionError, noe:
err('Missing option in config file: {0}'.format(noe))
return 1
try:
self._next6()
except Exception:
import traceback
err(''.join(traceback.format_exc()), wrap=False)
err('phil has died unexpectedly. If you think this is an error '
'(which it is), then contact phil\'s authors for help.')
return 1
if not self.quiet:
out('Finished!')
return 0
| gpl-3.0 | 2,489,440,115,486,904,000 | 34.455128 | 79 | 0.542036 | false |
ocadotechnology/boto | boto/fps/response.py | 153 | 7866 | # Copyright (c) 2012 Andy Davidoff http://www.disruptek.com/
# Copyright (c) 2010 Jason R. Coombs http://www.jaraco.com/
# Copyright (c) 2008 Chris Moyer http://coredumped.org/
# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from decimal import Decimal
from boto.compat import filter, map
def ResponseFactory(action):
class FPSResponse(Response):
_action = action
_Result = globals().get(action + 'Result', ResponseElement)
# due to nodes receiving their closing tags
def endElement(self, name, value, connection):
if name != action + 'Response':
super(FPSResponse, self).endElement(name, value, connection)
return FPSResponse
class ResponseElement(object):
def __init__(self, connection=None, name=None):
if connection is not None:
self._connection = connection
self._name = name or self.__class__.__name__
@property
def connection(self):
return self._connection
def __repr__(self):
render = lambda pair: '{!s}: {!r}'.format(*pair)
do_show = lambda pair: not pair[0].startswith('_')
attrs = filter(do_show, self.__dict__.items())
return '{0}({1})'.format(self.__class__.__name__,
', '.join(map(render, attrs)))
def startElement(self, name, attrs, connection):
return None
# due to nodes receiving their closing tags
def endElement(self, name, value, connection):
if name != self._name:
setattr(self, name, value)
class Response(ResponseElement):
_action = 'Undefined'
def startElement(self, name, attrs, connection):
if name == 'ResponseMetadata':
setattr(self, name, ResponseElement(name=name))
elif name == self._action + 'Result':
setattr(self, name, self._Result(name=name))
else:
return super(Response, self).startElement(name, attrs, connection)
return getattr(self, name)
class ComplexAmount(ResponseElement):
def __repr__(self):
return '{0} {1}'.format(self.CurrencyCode, self.Value)
def __float__(self):
return float(self.Value)
def __str__(self):
return str(self.Value)
def startElement(self, name, attrs, connection):
if name not in ('CurrencyCode', 'Value'):
message = 'Unrecognized tag {0} in ComplexAmount'.format(name)
raise AssertionError(message)
return super(ComplexAmount, self).startElement(name, attrs, connection)
def endElement(self, name, value, connection):
if name == 'Value':
value = Decimal(value)
super(ComplexAmount, self).endElement(name, value, connection)
class AmountCollection(ResponseElement):
def startElement(self, name, attrs, connection):
setattr(self, name, ComplexAmount(name=name))
return getattr(self, name)
class AccountBalance(AmountCollection):
def startElement(self, name, attrs, connection):
if name == 'AvailableBalances':
setattr(self, name, AmountCollection(name=name))
return getattr(self, name)
return super(AccountBalance, self).startElement(name, attrs, connection)
class GetAccountBalanceResult(ResponseElement):
def startElement(self, name, attrs, connection):
if name == 'AccountBalance':
setattr(self, name, AccountBalance(name=name))
return getattr(self, name)
return super(GetAccountBalanceResult, self).startElement(name, attrs,
connection)
class GetTotalPrepaidLiabilityResult(ResponseElement):
def startElement(self, name, attrs, connection):
if name == 'OutstandingPrepaidLiability':
setattr(self, name, AmountCollection(name=name))
return getattr(self, name)
return super(GetTotalPrepaidLiabilityResult, self).startElement(name,
attrs, connection)
class GetPrepaidBalanceResult(ResponseElement):
def startElement(self, name, attrs, connection):
if name == 'PrepaidBalance':
setattr(self, name, AmountCollection(name=name))
return getattr(self, name)
return super(GetPrepaidBalanceResult, self).startElement(name, attrs,
connection)
class GetOutstandingDebtBalanceResult(ResponseElement):
def startElement(self, name, attrs, connection):
if name == 'OutstandingDebt':
setattr(self, name, AmountCollection(name=name))
return getattr(self, name)
return super(GetOutstandingDebtBalanceResult, self).startElement(name,
attrs, connection)
class TransactionPart(ResponseElement):
def startElement(self, name, attrs, connection):
if name == 'FeesPaid':
setattr(self, name, ComplexAmount(name=name))
return getattr(self, name)
return super(TransactionPart, self).startElement(name, attrs,
connection)
class Transaction(ResponseElement):
def __init__(self, *args, **kw):
self.TransactionPart = []
super(Transaction, self).__init__(*args, **kw)
def startElement(self, name, attrs, connection):
if name == 'TransactionPart':
getattr(self, name).append(TransactionPart(name=name))
return getattr(self, name)[-1]
if name in ('TransactionAmount', 'FPSFees', 'Balance'):
setattr(self, name, ComplexAmount(name=name))
return getattr(self, name)
return super(Transaction, self).startElement(name, attrs, connection)
class GetAccountActivityResult(ResponseElement):
def __init__(self, *args, **kw):
self.Transaction = []
super(GetAccountActivityResult, self).__init__(*args, **kw)
def startElement(self, name, attrs, connection):
if name == 'Transaction':
getattr(self, name).append(Transaction(name=name))
return getattr(self, name)[-1]
return super(GetAccountActivityResult, self).startElement(name, attrs,
connection)
class GetTransactionResult(ResponseElement):
def startElement(self, name, attrs, connection):
if name == 'Transaction':
setattr(self, name, Transaction(name=name))
return getattr(self, name)
return super(GetTransactionResult, self).startElement(name, attrs,
connection)
class GetTokensResult(ResponseElement):
def __init__(self, *args, **kw):
self.Token = []
super(GetTokensResult, self).__init__(*args, **kw)
def startElement(self, name, attrs, connection):
if name == 'Token':
getattr(self, name).append(ResponseElement(name=name))
return getattr(self, name)[-1]
return super(GetTokensResult, self).startElement(name, attrs,
connection)
| mit | -3,586,349,682,251,648,500 | 37 | 80 | 0.659166 | false |
dancingdan/tensorflow | tensorflow/contrib/distributions/python/kernel_tests/bijectors/softplus_test.py | 24 | 5768 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops.bijectors.softplus import Softplus
from tensorflow.python.ops.distributions.bijector_test_util import assert_bijective_and_finite
from tensorflow.python.ops.distributions.bijector_test_util import assert_scalar_congruency
from tensorflow.python.platform import test
rng = np.random.RandomState(42)
class SoftplusBijectorTest(test.TestCase):
"""Tests the correctness of the Y = g(X) = Log[1 + exp(X)] transformation."""
def _softplus(self, x):
return np.log(1 + np.exp(x))
def _softplus_inverse(self, y):
return np.log(np.exp(y) - 1)
def _softplus_ildj_before_reduction(self, y):
"""Inverse log det jacobian, before being reduced."""
return -np.log(1 - np.exp(-y))
def testHingeSoftnessZeroRaises(self):
with self.cached_session():
bijector = Softplus(hinge_softness=0., validate_args=True)
with self.assertRaisesOpError("must be non-zero"):
bijector.forward([1., 1.]).eval()
def testBijectorForwardInverseEventDimsZero(self):
with self.cached_session():
bijector = Softplus()
self.assertEqual("softplus", bijector.name)
x = 2 * rng.randn(2, 10)
y = self._softplus(x)
self.assertAllClose(y, bijector.forward(x).eval())
self.assertAllClose(x, bijector.inverse(y).eval())
def testBijectorForwardInverseWithHingeSoftnessEventDimsZero(self):
with self.cached_session():
bijector = Softplus(hinge_softness=1.5)
x = 2 * rng.randn(2, 10)
y = 1.5 * self._softplus(x / 1.5)
self.assertAllClose(y, bijector.forward(x).eval())
self.assertAllClose(x, bijector.inverse(y).eval())
def testBijectorLogDetJacobianEventDimsZero(self):
with self.cached_session():
bijector = Softplus()
y = 2 * rng.rand(2, 10)
# No reduction needed if event_dims = 0.
ildj = self._softplus_ildj_before_reduction(y)
self.assertAllClose(ildj, bijector.inverse_log_det_jacobian(
y, event_ndims=0).eval())
def testBijectorForwardInverseEventDimsOne(self):
with self.cached_session():
bijector = Softplus()
self.assertEqual("softplus", bijector.name)
x = 2 * rng.randn(2, 10)
y = self._softplus(x)
self.assertAllClose(y, bijector.forward(x).eval())
self.assertAllClose(x, bijector.inverse(y).eval())
def testBijectorLogDetJacobianEventDimsOne(self):
with self.cached_session():
bijector = Softplus()
y = 2 * rng.rand(2, 10)
ildj_before = self._softplus_ildj_before_reduction(y)
ildj = np.sum(ildj_before, axis=1)
self.assertAllClose(ildj, bijector.inverse_log_det_jacobian(
y, event_ndims=1).eval())
def testScalarCongruency(self):
with self.cached_session():
bijector = Softplus()
assert_scalar_congruency(
bijector, lower_x=-2., upper_x=2.)
def testScalarCongruencyWithPositiveHingeSoftness(self):
with self.cached_session():
bijector = Softplus(hinge_softness=1.3)
assert_scalar_congruency(
bijector, lower_x=-2., upper_x=2.)
def testScalarCongruencyWithNegativeHingeSoftness(self):
with self.cached_session():
bijector = Softplus(hinge_softness=-1.3)
assert_scalar_congruency(
bijector, lower_x=-2., upper_x=2.)
def testBijectiveAndFinite32bit(self):
with self.cached_session():
bijector = Softplus()
x = np.linspace(-20., 20., 100).astype(np.float32)
y = np.logspace(-10, 10, 100).astype(np.float32)
assert_bijective_and_finite(
bijector, x, y, event_ndims=0, rtol=1e-2, atol=1e-2)
def testBijectiveAndFiniteWithPositiveHingeSoftness32Bit(self):
with self.cached_session():
bijector = Softplus(hinge_softness=1.23)
x = np.linspace(-20., 20., 100).astype(np.float32)
y = np.logspace(-10, 10, 100).astype(np.float32)
assert_bijective_and_finite(
bijector, x, y, event_ndims=0, rtol=1e-2, atol=1e-2)
def testBijectiveAndFiniteWithNegativeHingeSoftness32Bit(self):
with self.cached_session():
bijector = Softplus(hinge_softness=-0.7)
x = np.linspace(-20., 20., 100).astype(np.float32)
y = -np.logspace(-10, 10, 100).astype(np.float32)
assert_bijective_and_finite(
bijector, x, y, event_ndims=0, rtol=1e-2, atol=1e-2)
def testBijectiveAndFinite16bit(self):
with self.cached_session():
bijector = Softplus()
# softplus(-20) is zero, so we can't use such a large range as in 32bit.
x = np.linspace(-10., 20., 100).astype(np.float16)
# Note that float16 is only in the open set (0, inf) for a smaller
# logspace range. The actual range was (-7, 4), so use something smaller
# for the test.
y = np.logspace(-6, 3, 100).astype(np.float16)
assert_bijective_and_finite(
bijector, x, y, event_ndims=0, rtol=1e-1, atol=1e-3)
if __name__ == "__main__":
test.main()
| apache-2.0 | 6,750,358,959,788,787,000 | 36.212903 | 94 | 0.66973 | false |
SPriyaJain/studybuddy | env/lib/python2.7/site-packages/click/__init__.py | 135 | 2858 | # -*- coding: utf-8 -*-
"""
click
~~~~~
Click is a simple Python module that wraps the stdlib's optparse to make
writing command line scripts fun. Unlike other modules, it's based around
a simple API that does not come with too much magic and is composable.
In case optparse ever gets removed from the stdlib, it will be shipped by
this module.
:copyright: (c) 2014 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
# Core classes
from .core import Context, BaseCommand, Command, MultiCommand, Group, \
CommandCollection, Parameter, Option, Argument
# Globals
from .globals import get_current_context
# Decorators
from .decorators import pass_context, pass_obj, make_pass_decorator, \
command, group, argument, option, confirmation_option, \
password_option, version_option, help_option
# Types
from .types import ParamType, File, Path, Choice, IntRange, Tuple, \
STRING, INT, FLOAT, BOOL, UUID, UNPROCESSED
# Utilities
from .utils import echo, get_binary_stream, get_text_stream, open_file, \
format_filename, get_app_dir, get_os_args
# Terminal functions
from .termui import prompt, confirm, get_terminal_size, echo_via_pager, \
progressbar, clear, style, unstyle, secho, edit, launch, getchar, \
pause
# Exceptions
from .exceptions import ClickException, UsageError, BadParameter, \
FileError, Abort, NoSuchOption, BadOptionUsage, BadArgumentUsage, \
MissingParameter
# Formatting
from .formatting import HelpFormatter, wrap_text
# Parsing
from .parser import OptionParser
__all__ = [
# Core classes
'Context', 'BaseCommand', 'Command', 'MultiCommand', 'Group',
'CommandCollection', 'Parameter', 'Option', 'Argument',
# Globals
'get_current_context',
# Decorators
'pass_context', 'pass_obj', 'make_pass_decorator', 'command', 'group',
'argument', 'option', 'confirmation_option', 'password_option',
'version_option', 'help_option',
# Types
'ParamType', 'File', 'Path', 'Choice', 'IntRange', 'Tuple', 'STRING',
'INT', 'FLOAT', 'BOOL', 'UUID', 'UNPROCESSED',
# Utilities
'echo', 'get_binary_stream', 'get_text_stream', 'open_file',
'format_filename', 'get_app_dir', 'get_os_args',
# Terminal functions
'prompt', 'confirm', 'get_terminal_size', 'echo_via_pager',
'progressbar', 'clear', 'style', 'unstyle', 'secho', 'edit', 'launch',
'getchar', 'pause',
# Exceptions
'ClickException', 'UsageError', 'BadParameter', 'FileError',
'Abort', 'NoSuchOption', 'BadOptionUsage', 'BadArgumentUsage',
'MissingParameter',
# Formatting
'HelpFormatter', 'wrap_text',
# Parsing
'OptionParser',
]
# Controls if click should emit the warning about the use of unicode
# literals.
disable_unicode_literals_warning = False
__version__ = '6.7'
| mit | 8,275,908,814,108,918,000 | 28.163265 | 78 | 0.678796 | false |
mrquim/repository.mrquim | script.module.youtube.dl/lib/youtube_dl/extractor/libraryofcongress.py | 44 | 4662 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
determine_ext,
float_or_none,
int_or_none,
parse_filesize,
)
class LibraryOfCongressIE(InfoExtractor):
IE_NAME = 'loc'
IE_DESC = 'Library of Congress'
_VALID_URL = r'https?://(?:www\.)?loc\.gov/(?:item/|today/cyberlc/feature_wdesc\.php\?.*\brec=)(?P<id>[0-9]+)'
_TESTS = [{
# embedded via <div class="media-player"
'url': 'http://loc.gov/item/90716351/',
'md5': '353917ff7f0255aa6d4b80a034833de8',
'info_dict': {
'id': '90716351',
'ext': 'mp4',
'title': "Pa's trip to Mars",
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 0,
'view_count': int,
},
}, {
# webcast embedded via mediaObjectId
'url': 'https://www.loc.gov/today/cyberlc/feature_wdesc.php?rec=5578',
'info_dict': {
'id': '5578',
'ext': 'mp4',
'title': 'Help! Preservation Training Needs Here, There & Everywhere',
'duration': 3765,
'view_count': int,
'subtitles': 'mincount:1',
},
'params': {
'skip_download': True,
},
}, {
# with direct download links
'url': 'https://www.loc.gov/item/78710669/',
'info_dict': {
'id': '78710669',
'ext': 'mp4',
'title': 'La vie et la passion de Jesus-Christ',
'duration': 0,
'view_count': int,
'formats': 'mincount:4',
},
'params': {
'skip_download': True,
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
media_id = self._search_regex(
(r'id=(["\'])media-player-(?P<id>.+?)\1',
r'<video[^>]+id=(["\'])uuid-(?P<id>.+?)\1',
r'<video[^>]+data-uuid=(["\'])(?P<id>.+?)\1',
r'mediaObjectId\s*:\s*(["\'])(?P<id>.+?)\1'),
webpage, 'media id', group='id')
data = self._download_json(
'https://media.loc.gov/services/v1/media?id=%s&context=json' % media_id,
video_id)['mediaObject']
derivative = data['derivatives'][0]
media_url = derivative['derivativeUrl']
title = derivative.get('shortName') or data.get('shortName') or self._og_search_title(
webpage)
# Following algorithm was extracted from setAVSource js function
# found in webpage
media_url = media_url.replace('rtmp', 'https')
is_video = data.get('mediaType', 'v').lower() == 'v'
ext = determine_ext(media_url)
if ext not in ('mp4', 'mp3'):
media_url += '.mp4' if is_video else '.mp3'
if 'vod/mp4:' in media_url:
formats = [{
'url': media_url.replace('vod/mp4:', 'hls-vod/media/') + '.m3u8',
'format_id': 'hls',
'ext': 'mp4',
'protocol': 'm3u8_native',
'quality': 1,
}]
elif 'vod/mp3:' in media_url:
formats = [{
'url': media_url.replace('vod/mp3:', ''),
'vcodec': 'none',
}]
download_urls = set()
for m in re.finditer(
r'<option[^>]+value=(["\'])(?P<url>.+?)\1[^>]+data-file-download=[^>]+>\s*(?P<id>.+?)(?:(?: |\s+)\((?P<size>.+?)\))?\s*<', webpage):
format_id = m.group('id').lower()
if format_id == 'gif':
continue
download_url = m.group('url')
if download_url in download_urls:
continue
download_urls.add(download_url)
formats.append({
'url': download_url,
'format_id': format_id,
'filesize_approx': parse_filesize(m.group('size')),
})
self._sort_formats(formats)
duration = float_or_none(data.get('duration'))
view_count = int_or_none(data.get('viewCount'))
subtitles = {}
cc_url = data.get('ccUrl')
if cc_url:
subtitles.setdefault('en', []).append({
'url': cc_url,
'ext': 'ttml',
})
return {
'id': video_id,
'title': title,
'thumbnail': self._og_search_thumbnail(webpage, default=None),
'duration': duration,
'view_count': view_count,
'formats': formats,
'subtitles': subtitles,
}
| gpl-2.0 | 2,632,613,533,775,188,500 | 31.601399 | 153 | 0.472973 | false |
nikste/tensorflow | tensorflow/python/debug/cli/tensor_format.py | 43 | 16359 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Format tensors (ndarrays) for screen display and navigation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import re
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.debug.cli import debugger_cli_common
_NUMPY_OMISSION = "...,"
_NUMPY_DEFAULT_EDGE_ITEMS = 3
_NUMBER_REGEX = re.compile(r"[-+]?([0-9][-+0-9eE\.]+|nan|inf)(\s|,|\])")
BEGIN_INDICES_KEY = "i0"
OMITTED_INDICES_KEY = "omitted"
DEFAULT_TENSOR_ELEMENT_HIGHLIGHT_FONT_ATTR = "bold"
class HighlightOptions(object):
"""Options for highlighting elements of a tensor."""
def __init__(self,
criterion,
description=None,
font_attr=DEFAULT_TENSOR_ELEMENT_HIGHLIGHT_FONT_ATTR):
"""Constructor of HighlightOptions.
Args:
criterion: (callable) A callable of the following signature:
def to_highlight(X):
# Args:
# X: The tensor to highlight elements in.
#
# Returns:
# (boolean ndarray) A boolean ndarray of the same shape as X
# indicating which elements are to be highlighted (iff True).
This callable will be used as the argument of np.argwhere() to
determine which elements of the tensor are to be highlighted.
description: (str) Description of the highlight criterion embodied by
criterion.
font_attr: (str) Font attribute to be applied to the
highlighted elements.
"""
self.criterion = criterion
self.description = description
self.font_attr = font_attr
def format_tensor(tensor,
tensor_label,
include_metadata=False,
np_printoptions=None,
highlight_options=None):
"""Generate a RichTextLines object showing a tensor in formatted style.
Args:
tensor: The tensor to be displayed, as a numpy ndarray or other
appropriate format (e.g., None representing uninitialized tensors).
tensor_label: A label for the tensor, as a string. If set to None, will
suppress the tensor name line in the return value.
include_metadata: Whether metadata such as dtype and shape are to be
included in the formatted text.
np_printoptions: A dictionary of keyword arguments that are passed to a
call of np.set_printoptions() to set the text format for display numpy
ndarrays.
highlight_options: (HighlightOptions) options for highlighting elements
of the tensor.
Returns:
A RichTextLines object. Its annotation field has line-by-line markups to
indicate which indices in the array the first element of each line
corresponds to.
"""
lines = []
font_attr_segs = {}
if tensor_label is not None:
lines.append("Tensor \"%s\":" % tensor_label)
suffix = tensor_label.split(":")[-1]
if suffix.isdigit():
# Suffix is a number. Assume it is the output slot index.
font_attr_segs[0] = [(8, 8 + len(tensor_label), "bold")]
else:
# Suffix is not a number. It is auxiliary information such as the debug
# op type. In this case, highlight the suffix with a different color.
debug_op_len = len(suffix)
proper_len = len(tensor_label) - debug_op_len - 1
font_attr_segs[0] = [
(8, 8 + proper_len, "bold"),
(8 + proper_len + 1, 8 + proper_len + 1 + debug_op_len, "yellow")
]
if tensor is None:
if lines:
lines.append("")
lines.append("Uninitialized tensor")
return debugger_cli_common.RichTextLines(lines)
elif not isinstance(tensor, np.ndarray):
# If tensor is not a np.ndarray, return simple text-line representation of
# the object without annotations.
if lines:
lines.append("")
lines.extend(repr(tensor).split("\n"))
return debugger_cli_common.RichTextLines(lines)
if include_metadata:
lines.append(" dtype: %s" % str(tensor.dtype))
lines.append(" shape: %s" % str(tensor.shape))
if lines:
lines.append("")
hlines = len(lines)
# Apply custom string formatting options for numpy ndarray.
if np_printoptions is not None:
np.set_printoptions(**np_printoptions)
array_lines = repr(tensor).split("\n")
lines.extend(array_lines)
if tensor.dtype.type is not np.string_:
# Parse array lines to get beginning indices for each line.
# TODO(cais): Currently, we do not annotate string-type tensors due to
# difficulty in escaping sequences. Address this issue.
annotations = _annotate_ndarray_lines(
array_lines, tensor, np_printoptions=np_printoptions, offset=hlines)
formatted = debugger_cli_common.RichTextLines(
lines, font_attr_segs=font_attr_segs, annotations=annotations)
# Perform optional highlighting.
if highlight_options is not None:
indices_list = list(np.argwhere(highlight_options.criterion(tensor)))
total_elements = np.size(tensor)
highlight_summary = "Highlighted%s: %d of %d element(s) (%.2f%%)" % (
"(%s)" % highlight_options.description if highlight_options.description
else "", len(indices_list), total_elements,
len(indices_list) / float(total_elements) * 100.0)
formatted.lines[0] += " " + highlight_summary
if indices_list:
indices_list = [list(indices) for indices in indices_list]
are_omitted, rows, start_cols, end_cols = locate_tensor_element(
formatted, indices_list)
for is_omitted, row, start_col, end_col in zip(are_omitted, rows,
start_cols, end_cols):
if is_omitted or start_col is None or end_col is None:
continue
if row in formatted.font_attr_segs:
formatted.font_attr_segs[row].append(
(start_col, end_col, highlight_options.font_attr))
else:
formatted.font_attr_segs[row] = [(start_col, end_col,
highlight_options.font_attr)]
return formatted
def _annotate_ndarray_lines(
array_lines, tensor, np_printoptions=None, offset=0):
"""Generate annotations for line-by-line begin indices of tensor text.
Parse the numpy-generated text representation of a numpy ndarray to
determine the indices of the first element of each text line (if any
element is present in the line).
For example, given the following multi-line ndarray text representation:
["array([[ 0. , 0.0625, 0.125 , 0.1875],",
" [ 0.25 , 0.3125, 0.375 , 0.4375],",
" [ 0.5 , 0.5625, 0.625 , 0.6875],",
" [ 0.75 , 0.8125, 0.875 , 0.9375]])"]
the generate annotation will be:
{0: {BEGIN_INDICES_KEY: [0, 0]},
1: {BEGIN_INDICES_KEY: [1, 0]},
2: {BEGIN_INDICES_KEY: [2, 0]},
3: {BEGIN_INDICES_KEY: [3, 0]}}
Args:
array_lines: Text lines representing the tensor, as a list of str.
tensor: The tensor being formatted as string.
np_printoptions: A dictionary of keyword arguments that are passed to a
call of np.set_printoptions().
offset: Line number offset applied to the line indices in the returned
annotation.
Returns:
An annotation as a dict.
"""
if np_printoptions and "edgeitems" in np_printoptions:
edge_items = np_printoptions["edgeitems"]
else:
edge_items = _NUMPY_DEFAULT_EDGE_ITEMS
annotations = {}
# Put metadata about the tensor in the annotations["tensor_metadata"].
annotations["tensor_metadata"] = {
"dtype": tensor.dtype, "shape": tensor.shape}
dims = np.shape(tensor)
ndims = len(dims)
if ndims == 0:
# No indices for a 0D tensor.
return annotations
curr_indices = [0] * len(dims)
curr_dim = 0
for i in xrange(len(array_lines)):
line = array_lines[i].strip()
if not line:
# Skip empty lines, which can appear for >= 3D arrays.
continue
if line == _NUMPY_OMISSION:
annotations[offset + i] = {OMITTED_INDICES_KEY: copy.copy(curr_indices)}
curr_indices[curr_dim - 1] = dims[curr_dim - 1] - edge_items
else:
num_lbrackets = line.count("[") # TODO(cais): String array escaping.
num_rbrackets = line.count("]")
curr_dim += num_lbrackets - num_rbrackets
annotations[offset + i] = {BEGIN_INDICES_KEY: copy.copy(curr_indices)}
if num_rbrackets == 0:
line_content = line[line.rfind("[") + 1:]
num_elements = line_content.count(",")
curr_indices[curr_dim - 1] += num_elements
else:
if curr_dim > 0:
curr_indices[curr_dim - 1] += 1
for k in xrange(curr_dim, ndims):
curr_indices[k] = 0
return annotations
def locate_tensor_element(formatted, indices):
"""Locate a tensor element in formatted text lines, given element indices.
Given a RichTextLines object representing a tensor and indices of the sought
element, return the row number at which the element is located (if exists).
Args:
formatted: A RichTextLines object containing formatted text lines
representing the tensor.
indices: Indices of the sought element, as a list of int or a list of list
of int. The former case is for a single set of indices to look up,
whereas the latter case is for looking up a batch of indices sets at once.
In the latter case, the indices must be in ascending order, or a
ValueError will be raised.
Returns:
1) A boolean indicating whether the element falls into an omitted line.
2) Row index.
3) Column start index, i.e., the first column in which the representation
of the specified tensor starts, if it can be determined. If it cannot
be determined (e.g., due to ellipsis), None.
4) Column end index, i.e., the column right after the last column that
represents the specified tensor. Iff it cannot be determined, None.
For return values described above are based on a single set of indices to
look up. In the case of batch mode (multiple sets of indices), the return
values will be lists of the types described above.
Raises:
AttributeError: If:
Input argument "formatted" does not have the required annotations.
ValueError: If:
1) Indices do not match the dimensions of the tensor, or
2) Indices exceed sizes of the tensor, or
3) Indices contain negative value(s).
4) If in batch mode, and if not all sets of indices are in ascending
order.
"""
if isinstance(indices[0], list):
indices_list = indices
input_batch = True
else:
indices_list = [indices]
input_batch = False
# Check that tensor_metadata is available.
if "tensor_metadata" not in formatted.annotations:
raise AttributeError("tensor_metadata is not available in annotations.")
# Sanity check on input argument.
_validate_indices_list(indices_list, formatted)
dims = formatted.annotations["tensor_metadata"]["shape"]
batch_size = len(indices_list)
lines = formatted.lines
annot = formatted.annotations
prev_r = 0
prev_line = ""
prev_indices = [0] * len(dims)
# Initialize return values
are_omitted = [None] * batch_size
row_indices = [None] * batch_size
start_columns = [None] * batch_size
end_columns = [None] * batch_size
batch_pos = 0 # Current position in the batch.
for r in xrange(len(lines)):
if r not in annot:
continue
if BEGIN_INDICES_KEY in annot[r]:
indices_key = BEGIN_INDICES_KEY
elif OMITTED_INDICES_KEY in annot[r]:
indices_key = OMITTED_INDICES_KEY
matching_indices_list = [
ind for ind in indices_list[batch_pos:]
if prev_indices <= ind < annot[r][indices_key]
]
if matching_indices_list:
num_matches = len(matching_indices_list)
match_start_columns, match_end_columns = _locate_elements_in_line(
prev_line, matching_indices_list, prev_indices)
start_columns[batch_pos:batch_pos + num_matches] = match_start_columns
end_columns[batch_pos:batch_pos + num_matches] = match_end_columns
are_omitted[batch_pos:batch_pos + num_matches] = [
OMITTED_INDICES_KEY in annot[prev_r]
] * num_matches
row_indices[batch_pos:batch_pos + num_matches] = [prev_r] * num_matches
batch_pos += num_matches
if batch_pos >= batch_size:
break
prev_r = r
prev_line = lines[r]
prev_indices = annot[r][indices_key]
if batch_pos < batch_size:
matching_indices_list = indices_list[batch_pos:]
num_matches = len(matching_indices_list)
match_start_columns, match_end_columns = _locate_elements_in_line(
prev_line, matching_indices_list, prev_indices)
start_columns[batch_pos:batch_pos + num_matches] = match_start_columns
end_columns[batch_pos:batch_pos + num_matches] = match_end_columns
are_omitted[batch_pos:batch_pos + num_matches] = [
OMITTED_INDICES_KEY in annot[prev_r]
] * num_matches
row_indices[batch_pos:batch_pos + num_matches] = [prev_r] * num_matches
if input_batch:
return are_omitted, row_indices, start_columns, end_columns
else:
return are_omitted[0], row_indices[0], start_columns[0], end_columns[0]
def _validate_indices_list(indices_list, formatted):
prev_ind = None
for ind in indices_list:
# Check indices match tensor dimensions.
dims = formatted.annotations["tensor_metadata"]["shape"]
if len(ind) != len(dims):
raise ValueError("Dimensions mismatch: requested: %d; actual: %d" %
(len(ind), len(dims)))
# Check indices is within size limits.
for req_idx, siz in zip(ind, dims):
if req_idx >= siz:
raise ValueError("Indices exceed tensor dimensions.")
if req_idx < 0:
raise ValueError("Indices contain negative value(s).")
# Check indices are in ascending order.
if prev_ind and ind < prev_ind:
raise ValueError("Input indices sets are not in ascending order.")
prev_ind = ind
def _locate_elements_in_line(line, indices_list, ref_indices):
"""Determine the start and end indices of an element in a line.
Args:
line: (str) the line in which the element is to be sought.
indices_list: (list of list of int) list of indices of the element to
search for. Assumes that the indices in the batch are unique and sorted
in ascending order.
ref_indices: (list of int) reference indices, i.e., the indices of the
first element represented in the line.
Returns:
start_columns: (list of int) start column indices, if found. If not found,
None.
end_columns: (list of int) end column indices, if found. If not found,
None.
If found, the element is represented in the left-closed-right-open interval
[start_column, end_column].
"""
batch_size = len(indices_list)
offsets = [indices[-1] - ref_indices[-1] for indices in indices_list]
start_columns = [None] * batch_size
end_columns = [None] * batch_size
if _NUMPY_OMISSION in line:
ellipsis_index = line.find(_NUMPY_OMISSION)
else:
ellipsis_index = len(line)
matches_iter = re.finditer(_NUMBER_REGEX, line)
batch_pos = 0
offset_counter = 0
for match in matches_iter:
if match.start() > ellipsis_index:
# Do not attempt to search beyond ellipsis.
break
if offset_counter == offsets[batch_pos]:
start_columns[batch_pos] = match.start()
# Remove the final comma, right bracket, or whitespace.
end_columns[batch_pos] = match.end() - 1
batch_pos += 1
if batch_pos >= batch_size:
break
offset_counter += 1
return start_columns, end_columns
| apache-2.0 | -8,063,628,864,559,425,000 | 34.180645 | 80 | 0.656825 | false |
jeremiahmarks/sl4a | python/gdata/src/gdata/tlslite/integration/IMAP4_TLS.py | 319 | 5145 | """TLS Lite + imaplib."""
import socket
from imaplib import IMAP4
from gdata.tlslite.TLSConnection import TLSConnection
from gdata.tlslite.integration.ClientHelper import ClientHelper
# IMAP TLS PORT
IMAP4_TLS_PORT = 993
class IMAP4_TLS(IMAP4, ClientHelper):
"""This class extends L{imaplib.IMAP4} with TLS support."""
def __init__(self, host = '', port = IMAP4_TLS_PORT,
username=None, password=None, sharedKey=None,
certChain=None, privateKey=None,
cryptoID=None, protocol=None,
x509Fingerprint=None,
x509TrustList=None, x509CommonName=None,
settings=None):
"""Create a new IMAP4_TLS.
For client authentication, use one of these argument
combinations:
- username, password (SRP)
- username, sharedKey (shared-key)
- certChain, privateKey (certificate)
For server authentication, you can either rely on the
implicit mutual authentication performed by SRP or
shared-keys, or you can do certificate-based server
authentication with one of these argument combinations:
- cryptoID[, protocol] (requires cryptoIDlib)
- x509Fingerprint
- x509TrustList[, x509CommonName] (requires cryptlib_py)
Certificate-based server authentication is compatible with
SRP or certificate-based client authentication. It is
not compatible with shared-keys.
The caller should be prepared to handle TLS-specific
exceptions. See the client handshake functions in
L{tlslite.TLSConnection.TLSConnection} for details on which
exceptions might be raised.
@type host: str
@param host: Server to connect to.
@type port: int
@param port: Port to connect to.
@type username: str
@param username: SRP or shared-key username. Requires the
'password' or 'sharedKey' argument.
@type password: str
@param password: SRP password for mutual authentication.
Requires the 'username' argument.
@type sharedKey: str
@param sharedKey: Shared key for mutual authentication.
Requires the 'username' argument.
@type certChain: L{tlslite.X509CertChain.X509CertChain} or
L{cryptoIDlib.CertChain.CertChain}
@param certChain: Certificate chain for client authentication.
Requires the 'privateKey' argument. Excludes the SRP or
shared-key related arguments.
@type privateKey: L{tlslite.utils.RSAKey.RSAKey}
@param privateKey: Private key for client authentication.
Requires the 'certChain' argument. Excludes the SRP or
shared-key related arguments.
@type cryptoID: str
@param cryptoID: cryptoID for server authentication. Mutually
exclusive with the 'x509...' arguments.
@type protocol: str
@param protocol: cryptoID protocol URI for server
authentication. Requires the 'cryptoID' argument.
@type x509Fingerprint: str
@param x509Fingerprint: Hex-encoded X.509 fingerprint for
server authentication. Mutually exclusive with the 'cryptoID'
and 'x509TrustList' arguments.
@type x509TrustList: list of L{tlslite.X509.X509}
@param x509TrustList: A list of trusted root certificates. The
other party must present a certificate chain which extends to
one of these root certificates. The cryptlib_py module must be
installed to use this parameter. Mutually exclusive with the
'cryptoID' and 'x509Fingerprint' arguments.
@type x509CommonName: str
@param x509CommonName: The end-entity certificate's 'CN' field
must match this value. For a web server, this is typically a
server name such as 'www.amazon.com'. Mutually exclusive with
the 'cryptoID' and 'x509Fingerprint' arguments. Requires the
'x509TrustList' argument.
@type settings: L{tlslite.HandshakeSettings.HandshakeSettings}
@param settings: Various settings which can be used to control
the ciphersuites, certificate types, and SSL/TLS versions
offered by the client.
"""
ClientHelper.__init__(self,
username, password, sharedKey,
certChain, privateKey,
cryptoID, protocol,
x509Fingerprint,
x509TrustList, x509CommonName,
settings)
IMAP4.__init__(self, host, port)
def open(self, host = '', port = IMAP4_TLS_PORT):
"""Setup connection to remote server on "host:port".
This connection will be used by the routines:
read, readline, send, shutdown.
"""
self.host = host
self.port = port
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((host, port))
self.sock = TLSConnection(self.sock)
self.sock.closeSocket = True
ClientHelper._handshake(self, self.sock)
self.file = self.sock.makefile('rb')
| apache-2.0 | 4,755,418,242,885,402,000 | 37.977273 | 71 | 0.651895 | false |
ajaali/django | tests/gis_tests/geo3d/models.py | 302 | 1294 | from django.utils.encoding import python_2_unicode_compatible
from ..models import models
@python_2_unicode_compatible
class NamedModel(models.Model):
name = models.CharField(max_length=30)
objects = models.GeoManager()
class Meta:
abstract = True
required_db_features = ['gis_enabled']
def __str__(self):
return self.name
class City3D(NamedModel):
point = models.PointField(dim=3)
class Interstate2D(NamedModel):
line = models.LineStringField(srid=4269)
class Interstate3D(NamedModel):
line = models.LineStringField(dim=3, srid=4269)
class InterstateProj2D(NamedModel):
line = models.LineStringField(srid=32140)
class InterstateProj3D(NamedModel):
line = models.LineStringField(dim=3, srid=32140)
class Polygon2D(NamedModel):
poly = models.PolygonField(srid=32140)
class Polygon3D(NamedModel):
poly = models.PolygonField(dim=3, srid=32140)
class SimpleModel(models.Model):
objects = models.GeoManager()
class Meta:
abstract = True
required_db_features = ['gis_enabled']
class Point2D(SimpleModel):
point = models.PointField()
class Point3D(SimpleModel):
point = models.PointField(dim=3)
class MultiPoint3D(SimpleModel):
mpoint = models.MultiPointField(dim=3)
| bsd-3-clause | 311,652,091,095,572,100 | 18.606061 | 61 | 0.710201 | false |
x2nie/odoo | openerp/osv/fields.py | 14 | 69520 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" Fields:
- simple
- relations (one2many, many2one, many2many)
- function
Fields Attributes:
* _classic_read: is a classic sql fields
* _type : field type
* _auto_join: for one2many and many2one fields, tells whether select
queries will join the relational table instead of replacing the
field condition by an equivalent-one based on a search.
* readonly
* required
* size
"""
import base64
import datetime as DT
import functools
import logging
import pytz
import re
import xmlrpclib
from psycopg2 import Binary
import openerp
import openerp.tools as tools
from openerp.tools.translate import _
from openerp.tools import float_round, float_repr
from openerp.tools import html_sanitize
import simplejson
from openerp import SUPERUSER_ID
_logger = logging.getLogger(__name__)
def _symbol_set(symb):
if symb is None or symb == False:
return None
elif isinstance(symb, unicode):
return symb.encode('utf-8')
return str(symb)
class _column(object):
""" Base of all fields, a database column
An instance of this object is a *description* of a database column. It will
not hold any data, but only provide the methods to manipulate data of an
ORM record or even prepare/update the database to hold such a field of data.
"""
_classic_read = True
_classic_write = True
_auto_join = False
_prefetch = True
_properties = False
_type = 'unknown'
_obj = None
_multi = False
_symbol_c = '%s'
_symbol_f = _symbol_set
_symbol_set = (_symbol_c, _symbol_f)
_symbol_get = None
_deprecated = False
copy = True # whether the field is copied by BaseModel.copy()
def __init__(self, string='unknown', required=False, readonly=False, domain=None, context=None, states=None, priority=0, change_default=False, size=None, ondelete=None, translate=False, select=False, manual=False, **args):
"""
The 'manual' keyword argument specifies if the field is a custom one.
It corresponds to the 'state' column in ir_model_fields.
"""
if domain is None:
domain = []
if context is None:
context = {}
self.states = states or {}
self.string = string
self.readonly = readonly
self.required = required
self.size = size
self.help = args.get('help', '')
self.priority = priority
self.change_default = change_default
self.ondelete = ondelete.lower() if ondelete else None # defaults to 'set null' in ORM
self.translate = translate
self._domain = domain
self._context = context
self.write = False
self.read = False
self.select = select
self.manual = manual
self.selectable = True
self.group_operator = args.get('group_operator', False)
self.groups = False # CSV list of ext IDs of groups that can access this field
self.deprecated = False # Optional deprecation warning
for a in args:
setattr(self, a, args[a])
# prefetch only if self._classic_write, not self.groups, and not
# self.deprecated
if not self._classic_write or self.deprecated:
self._prefetch = False
def to_field(self):
""" convert column `self` to a new-style field """
from openerp.fields import Field
return Field.by_type[self._type](**self.to_field_args())
def to_field_args(self):
""" return a dictionary with all the arguments to pass to the field """
items = [
('_origin', self), # field interfaces self
('copy', self.copy),
('index', self.select),
('manual', self.manual),
('string', self.string),
('help', self.help),
('readonly', self.readonly),
('required', self.required),
('states', self.states),
('groups', self.groups),
('size', self.size),
('ondelete', self.ondelete),
('translate', self.translate),
('domain', self._domain),
('context', self._context),
('change_default', self.change_default),
('deprecated', self.deprecated),
]
return dict(item for item in items if item[1])
def restart(self):
pass
def set(self, cr, obj, id, name, value, user=None, context=None):
cr.execute('update '+obj._table+' set '+name+'='+self._symbol_set[0]+' where id=%s', (self._symbol_set[1](value), id))
def get(self, cr, obj, ids, name, user=None, offset=0, context=None, values=None):
raise Exception(_('undefined get method !'))
def search(self, cr, obj, args, name, value, offset=0, limit=None, uid=None, context=None):
ids = obj.search(cr, uid, args+self._domain+[(name, 'ilike', value)], offset, limit, context=context)
res = obj.read(cr, uid, ids, [name], context=context)
return [x[name] for x in res]
def as_display_name(self, cr, uid, obj, value, context=None):
"""Converts a field value to a suitable string representation for a record,
e.g. when this field is used as ``rec_name``.
:param obj: the ``BaseModel`` instance this column belongs to
:param value: a proper value as returned by :py:meth:`~openerp.orm.osv.BaseModel.read`
for this column
"""
# delegated to class method, so a column type A can delegate
# to a column type B.
return self._as_display_name(self, cr, uid, obj, value, context=None)
@classmethod
def _as_display_name(cls, field, cr, uid, obj, value, context=None):
# This needs to be a class method, in case a column type A as to delegate
# to a column type B.
return tools.ustr(value)
# ---------------------------------------------------------
# Simple fields
# ---------------------------------------------------------
class boolean(_column):
_type = 'boolean'
_symbol_c = '%s'
_symbol_f = bool
_symbol_set = (_symbol_c, _symbol_f)
def __init__(self, string='unknown', required=False, **args):
super(boolean, self).__init__(string=string, required=required, **args)
if required:
_logger.debug(
"required=True is deprecated: making a boolean field"
" `required` has no effect, as NULL values are "
"automatically turned into False. args: %r",args)
class integer(_column):
_type = 'integer'
_symbol_c = '%s'
_symbol_f = lambda x: int(x or 0)
_symbol_set = (_symbol_c, _symbol_f)
_symbol_get = lambda self,x: x or 0
def __init__(self, string='unknown', required=False, **args):
super(integer, self).__init__(string=string, required=required, **args)
class reference(_column):
_type = 'reference'
_classic_read = False # post-process to handle missing target
def __init__(self, string, selection, size=None, **args):
if callable(selection):
from openerp import api
selection = api.expected(api.cr_uid_context, selection)
_column.__init__(self, string=string, size=size, selection=selection, **args)
def to_field_args(self):
args = super(reference, self).to_field_args()
args['selection'] = self.selection
return args
def get(self, cr, obj, ids, name, uid=None, context=None, values=None):
result = {}
# copy initial values fetched previously.
for value in values:
result[value['id']] = value[name]
if value[name]:
model, res_id = value[name].split(',')
if not obj.pool[model].exists(cr, uid, [int(res_id)], context=context):
result[value['id']] = False
return result
@classmethod
def _as_display_name(cls, field, cr, uid, obj, value, context=None):
if value:
# reference fields have a 'model,id'-like value, that we need to convert
# to a real name
model_name, res_id = value.split(',')
if model_name in obj.pool and res_id:
model = obj.pool[model_name]
names = model.name_get(cr, uid, [int(res_id)], context=context)
return names[0][1] if names else False
return tools.ustr(value)
# takes a string (encoded in utf8) and returns a string (encoded in utf8)
def _symbol_set_char(self, symb):
#TODO:
# * we need to remove the "symb==False" from the next line BUT
# for now too many things rely on this broken behavior
# * the symb==None test should be common to all data types
if symb is None or symb == False:
return None
# we need to convert the string to a unicode object to be able
# to evaluate its length (and possibly truncate it) reliably
u_symb = tools.ustr(symb)
return u_symb[:self.size].encode('utf8')
class char(_column):
_type = 'char'
def __init__(self, string="unknown", size=None, **args):
_column.__init__(self, string=string, size=size or None, **args)
# self._symbol_set_char defined to keep the backward compatibility
self._symbol_f = self._symbol_set_char = lambda x: _symbol_set_char(self, x)
self._symbol_set = (self._symbol_c, self._symbol_f)
class text(_column):
_type = 'text'
class html(text):
_type = 'html'
_symbol_c = '%s'
def _symbol_set_html(self, value):
if value is None or value is False:
return None
if not self._sanitize:
return value
return html_sanitize(value)
def __init__(self, string='unknown', sanitize=True, **args):
super(html, self).__init__(string=string, **args)
self._sanitize = sanitize
# symbol_set redefinition because of sanitize specific behavior
self._symbol_f = self._symbol_set_html
self._symbol_set = (self._symbol_c, self._symbol_f)
def to_field_args(self):
args = super(html, self).to_field_args()
args['sanitize'] = self._sanitize
return args
import __builtin__
class float(_column):
_type = 'float'
_symbol_c = '%s'
_symbol_f = lambda x: __builtin__.float(x or 0.0)
_symbol_set = (_symbol_c, _symbol_f)
_symbol_get = lambda self,x: x or 0.0
def __init__(self, string='unknown', digits=None, digits_compute=None, required=False, **args):
_column.__init__(self, string=string, required=required, **args)
self.digits = digits
# synopsis: digits_compute(cr) -> (precision, scale)
self.digits_compute = digits_compute
def to_field_args(self):
args = super(float, self).to_field_args()
args['digits'] = self.digits_compute or self.digits
return args
def digits_change(self, cr):
if self.digits_compute:
self.digits = self.digits_compute(cr)
if self.digits:
precision, scale = self.digits
self._symbol_set = ('%s', lambda x: float_repr(float_round(__builtin__.float(x or 0.0),
precision_digits=scale),
precision_digits=scale))
class date(_column):
_type = 'date'
MONTHS = [
('01', 'January'),
('02', 'February'),
('03', 'March'),
('04', 'April'),
('05', 'May'),
('06', 'June'),
('07', 'July'),
('08', 'August'),
('09', 'September'),
('10', 'October'),
('11', 'November'),
('12', 'December')
]
@staticmethod
def today(*args):
""" Returns the current date in a format fit for being a
default value to a ``date`` field.
This method should be provided as is to the _defaults dict, it
should not be called.
"""
return DT.date.today().strftime(
tools.DEFAULT_SERVER_DATE_FORMAT)
@staticmethod
def context_today(model, cr, uid, context=None, timestamp=None):
"""Returns the current date as seen in the client's timezone
in a format fit for date fields.
This method may be passed as value to initialize _defaults.
:param Model model: model (osv) for which the date value is being
computed - automatically passed when used in
_defaults.
:param datetime timestamp: optional datetime value to use instead of
the current date and time (must be a
datetime, regular dates can't be converted
between timezones.)
:param dict context: the 'tz' key in the context should give the
name of the User/Client timezone (otherwise
UTC is used)
:rtype: str
"""
today = timestamp or DT.datetime.now()
context_today = None
if context and context.get('tz'):
tz_name = context['tz']
else:
user = model.pool['res.users'].browse(cr, SUPERUSER_ID, uid)
tz_name = user.tz
if tz_name:
try:
utc = pytz.timezone('UTC')
context_tz = pytz.timezone(tz_name)
utc_today = utc.localize(today, is_dst=False) # UTC = no DST
context_today = utc_today.astimezone(context_tz)
except Exception:
_logger.debug("failed to compute context/client-specific today date, "
"using the UTC value for `today`",
exc_info=True)
return (context_today or today).strftime(tools.DEFAULT_SERVER_DATE_FORMAT)
@staticmethod
def date_to_datetime(model, cr, uid, userdate, context=None):
""" Convert date values expressed in user's timezone to
server-side UTC timestamp, assuming a default arbitrary
time of 12:00 AM - because a time is needed.
:param str userdate: date string in in user time zone
:return: UTC datetime string for server-side use
"""
user_date = DT.datetime.strptime(userdate, tools.DEFAULT_SERVER_DATE_FORMAT)
if context and context.get('tz'):
tz_name = context['tz']
else:
tz_name = model.pool.get('res.users').read(cr, SUPERUSER_ID, uid, ['tz'])['tz']
if tz_name:
utc = pytz.timezone('UTC')
context_tz = pytz.timezone(tz_name)
user_datetime = user_date + DT.timedelta(hours=12.0)
local_timestamp = context_tz.localize(user_datetime, is_dst=False)
user_datetime = local_timestamp.astimezone(utc)
return user_datetime.strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT)
return user_date.strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT)
class datetime(_column):
_type = 'datetime'
MONTHS = [
('01', 'January'),
('02', 'February'),
('03', 'March'),
('04', 'April'),
('05', 'May'),
('06', 'June'),
('07', 'July'),
('08', 'August'),
('09', 'September'),
('10', 'October'),
('11', 'November'),
('12', 'December')
]
@staticmethod
def now(*args):
""" Returns the current datetime in a format fit for being a
default value to a ``datetime`` field.
This method should be provided as is to the _defaults dict, it
should not be called.
"""
return DT.datetime.now().strftime(
tools.DEFAULT_SERVER_DATETIME_FORMAT)
@staticmethod
def context_timestamp(cr, uid, timestamp, context=None):
"""Returns the given timestamp converted to the client's timezone.
This method is *not* meant for use as a _defaults initializer,
because datetime fields are automatically converted upon
display on client side. For _defaults you :meth:`fields.datetime.now`
should be used instead.
:param datetime timestamp: naive datetime value (expressed in UTC)
to be converted to the client timezone
:param dict context: the 'tz' key in the context should give the
name of the User/Client timezone (otherwise
UTC is used)
:rtype: datetime
:return: timestamp converted to timezone-aware datetime in context
timezone
"""
assert isinstance(timestamp, DT.datetime), 'Datetime instance expected'
if context and context.get('tz'):
tz_name = context['tz']
else:
registry = openerp.modules.registry.RegistryManager.get(cr.dbname)
user = registry['res.users'].browse(cr, SUPERUSER_ID, uid)
tz_name = user.tz
utc_timestamp = pytz.utc.localize(timestamp, is_dst=False) # UTC = no DST
if tz_name:
try:
context_tz = pytz.timezone(tz_name)
return utc_timestamp.astimezone(context_tz)
except Exception:
_logger.debug("failed to compute context/client-specific timestamp, "
"using the UTC value",
exc_info=True)
return utc_timestamp
class binary(_column):
_type = 'binary'
_symbol_c = '%s'
# Binary values may be byte strings (python 2.6 byte array), but
# the legacy OpenERP convention is to transfer and store binaries
# as base64-encoded strings. The base64 string may be provided as a
# unicode in some circumstances, hence the str() cast in symbol_f.
# This str coercion will only work for pure ASCII unicode strings,
# on purpose - non base64 data must be passed as a 8bit byte strings.
_symbol_f = lambda symb: symb and Binary(str(symb)) or None
_symbol_set = (_symbol_c, _symbol_f)
_symbol_get = lambda self, x: x and str(x)
_classic_read = False
_prefetch = False
def __init__(self, string='unknown', filters=None, **args):
_column.__init__(self, string=string, **args)
self.filters = filters
def get(self, cr, obj, ids, name, user=None, context=None, values=None):
if not context:
context = {}
if not values:
values = []
res = {}
for i in ids:
val = None
for v in values:
if v['id'] == i:
val = v[name]
break
# If client is requesting only the size of the field, we return it instead
# of the content. Presumably a separate request will be done to read the actual
# content if it's needed at some point.
# TODO: after 6.0 we should consider returning a dict with size and content instead of
# having an implicit convention for the value
if val and context.get('bin_size_%s' % name, context.get('bin_size')):
res[i] = tools.human_size(long(val))
else:
res[i] = val
return res
class selection(_column):
_type = 'selection'
def __init__(self, selection, string='unknown', **args):
if callable(selection):
from openerp import api
selection = api.expected(api.cr_uid_context, selection)
_column.__init__(self, string=string, **args)
self.selection = selection
def to_field_args(self):
args = super(selection, self).to_field_args()
args['selection'] = self.selection
return args
@classmethod
def reify(cls, cr, uid, model, field, context=None):
""" Munges the field's ``selection`` attribute as necessary to get
something useable out of it: calls it if it's a function, applies
translations to labels if it's not.
A callable ``selection`` is considered translated on its own.
:param orm.Model model:
:param _column field:
"""
if callable(field.selection):
return field.selection(model, cr, uid, context)
if not (context and 'lang' in context):
return field.selection
# field_to_dict isn't given a field name, only a field object, we
# need to get the name back in order to perform the translation lookup
field_name = next(
name for name, column in model._columns.iteritems()
if column == field)
translation_filter = "%s,%s" % (model._name, field_name)
translate = functools.partial(
model.pool['ir.translation']._get_source,
cr, uid, translation_filter, 'selection', context['lang'])
return [
(value, translate(label))
for value, label in field.selection
]
# ---------------------------------------------------------
# Relationals fields
# ---------------------------------------------------------
#
# Values: (0, 0, { fields }) create
# (1, ID, { fields }) update
# (2, ID) remove (delete)
# (3, ID) unlink one (target id or target of relation)
# (4, ID) link
# (5) unlink all (only valid for one2many)
#
class many2one(_column):
_classic_read = False
_classic_write = True
_type = 'many2one'
_symbol_c = '%s'
_symbol_f = lambda x: x or None
_symbol_set = (_symbol_c, _symbol_f)
def __init__(self, obj, string='unknown', auto_join=False, **args):
_column.__init__(self, string=string, **args)
self._obj = obj
self._auto_join = auto_join
def to_field_args(self):
args = super(many2one, self).to_field_args()
args['comodel_name'] = self._obj
args['auto_join'] = self._auto_join
return args
def set(self, cr, obj_src, id, field, values, user=None, context=None):
if not context:
context = {}
obj = obj_src.pool[self._obj]
self._table = obj._table
if type(values) == type([]):
for act in values:
if act[0] == 0:
id_new = obj.create(cr, act[2])
cr.execute('update '+obj_src._table+' set '+field+'=%s where id=%s', (id_new, id))
elif act[0] == 1:
obj.write(cr, [act[1]], act[2], context=context)
elif act[0] == 2:
cr.execute('delete from '+self._table+' where id=%s', (act[1],))
elif act[0] == 3 or act[0] == 5:
cr.execute('update '+obj_src._table+' set '+field+'=null where id=%s', (id,))
elif act[0] == 4:
cr.execute('update '+obj_src._table+' set '+field+'=%s where id=%s', (act[1], id))
else:
if values:
cr.execute('update '+obj_src._table+' set '+field+'=%s where id=%s', (values, id))
else:
cr.execute('update '+obj_src._table+' set '+field+'=null where id=%s', (id,))
def search(self, cr, obj, args, name, value, offset=0, limit=None, uid=None, context=None):
return obj.pool[self._obj].search(cr, uid, args+self._domain+[('name', 'like', value)], offset, limit, context=context)
@classmethod
def _as_display_name(cls, field, cr, uid, obj, value, context=None):
return value[1] if isinstance(value, tuple) else tools.ustr(value)
class one2many(_column):
_classic_read = False
_classic_write = False
_prefetch = False
_type = 'one2many'
# one2many columns are not copied by default
copy = False
def __init__(self, obj, fields_id, string='unknown', limit=None, auto_join=False, **args):
_column.__init__(self, string=string, **args)
self._obj = obj
self._fields_id = fields_id
self._limit = limit
self._auto_join = auto_join
#one2many can't be used as condition for defaults
assert(self.change_default != True)
def to_field_args(self):
args = super(one2many, self).to_field_args()
args['comodel_name'] = self._obj
args['inverse_name'] = self._fields_id
args['auto_join'] = self._auto_join
args['limit'] = self._limit
return args
def get(self, cr, obj, ids, name, user=None, offset=0, context=None, values=None):
if self._context:
context = dict(context or {})
context.update(self._context)
# retrieve the records in the comodel
comodel = obj.pool[self._obj].browse(cr, user, [], context)
inverse = self._fields_id
domain = self._domain(obj) if callable(self._domain) else self._domain
domain = domain + [(inverse, 'in', ids)]
records = comodel.search(domain, limit=self._limit)
result = {id: [] for id in ids}
# read the inverse of records without prefetching other fields on them
for record in records.with_context(prefetch_fields=False):
# record[inverse] may be a record or an integer
result[int(record[inverse])].append(record.id)
return result
def set(self, cr, obj, id, field, values, user=None, context=None):
result = []
context = dict(context or {})
context.update(self._context)
context['recompute'] = False # recomputation is done by outer create/write
if not values:
return
obj = obj.pool[self._obj]
_table = obj._table
for act in values:
if act[0] == 0:
act[2][self._fields_id] = id
id_new = obj.create(cr, user, act[2], context=context)
result += obj._store_get_values(cr, user, [id_new], act[2].keys(), context)
elif act[0] == 1:
obj.write(cr, user, [act[1]], act[2], context=context)
elif act[0] == 2:
obj.unlink(cr, user, [act[1]], context=context)
elif act[0] == 3:
reverse_rel = obj._all_columns.get(self._fields_id)
assert reverse_rel, 'Trying to unlink the content of a o2m but the pointed model does not have a m2o'
# if the model has on delete cascade, just delete the row
if reverse_rel.column.ondelete == "cascade":
obj.unlink(cr, user, [act[1]], context=context)
else:
cr.execute('update '+_table+' set '+self._fields_id+'=null where id=%s', (act[1],))
elif act[0] == 4:
# table of the field (parent_model in case of inherit)
field_model = self._fields_id in obj.pool[self._obj]._columns and self._obj or obj.pool[self._obj]._all_columns[self._fields_id].parent_model
field_table = obj.pool[field_model]._table
cr.execute("select 1 from {0} where id=%s and {1}=%s".format(field_table, self._fields_id), (act[1], id))
if not cr.fetchone():
# Must use write() to recompute parent_store structure if needed and check access rules
obj.write(cr, user, [act[1]], {self._fields_id:id}, context=context or {})
elif act[0] == 5:
reverse_rel = obj._all_columns.get(self._fields_id)
assert reverse_rel, 'Trying to unlink the content of a o2m but the pointed model does not have a m2o'
# if the o2m has a static domain we must respect it when unlinking
domain = self._domain(obj) if callable(self._domain) else self._domain
extra_domain = domain or []
ids_to_unlink = obj.search(cr, user, [(self._fields_id,'=',id)] + extra_domain, context=context)
# If the model has cascade deletion, we delete the rows because it is the intended behavior,
# otherwise we only nullify the reverse foreign key column.
if reverse_rel.column.ondelete == "cascade":
obj.unlink(cr, user, ids_to_unlink, context=context)
else:
obj.write(cr, user, ids_to_unlink, {self._fields_id: False}, context=context)
elif act[0] == 6:
# Must use write() to recompute parent_store structure if needed
obj.write(cr, user, act[2], {self._fields_id:id}, context=context or {})
ids2 = act[2] or [0]
cr.execute('select id from '+_table+' where '+self._fields_id+'=%s and id <> ALL (%s)', (id,ids2))
ids3 = map(lambda x:x[0], cr.fetchall())
obj.write(cr, user, ids3, {self._fields_id:False}, context=context or {})
return result
def search(self, cr, obj, args, name, value, offset=0, limit=None, uid=None, operator='like', context=None):
domain = self._domain(obj) if callable(self._domain) else self._domain
return obj.pool[self._obj].name_search(cr, uid, value, domain, operator, context=context,limit=limit)
@classmethod
def _as_display_name(cls, field, cr, uid, obj, value, context=None):
raise NotImplementedError('One2Many columns should not be used as record name (_rec_name)')
#
# Values: (0, 0, { fields }) create
# (1, ID, { fields }) update (write fields to ID)
# (2, ID) remove (calls unlink on ID, that will also delete the relationship because of the ondelete)
# (3, ID) unlink (delete the relationship between the two objects but does not delete ID)
# (4, ID) link (add a relationship)
# (5, ID) unlink all
# (6, ?, ids) set a list of links
#
class many2many(_column):
"""Encapsulates the logic of a many-to-many bidirectional relationship, handling the
low-level details of the intermediary relationship table transparently.
A many-to-many relationship is always symmetrical, and can be declared and accessed
from either endpoint model.
If ``rel`` (relationship table name), ``id1`` (source foreign key column name)
or id2 (destination foreign key column name) are not specified, the system will
provide default values. This will by default only allow one single symmetrical
many-to-many relationship between the source and destination model.
For multiple many-to-many relationship between the same models and for
relationships where source and destination models are the same, ``rel``, ``id1``
and ``id2`` should be specified explicitly.
:param str obj: destination model
:param str rel: optional name of the intermediary relationship table. If not specified,
a canonical name will be derived based on the alphabetically-ordered
model names of the source and destination (in the form: ``amodel_bmodel_rel``).
Automatic naming is not possible when the source and destination are
the same, for obvious ambiguity reasons.
:param str id1: optional name for the column holding the foreign key to the current
model in the relationship table. If not specified, a canonical name
will be derived based on the model name (in the form: `src_model_id`).
:param str id2: optional name for the column holding the foreign key to the destination
model in the relationship table. If not specified, a canonical name
will be derived based on the model name (in the form: `dest_model_id`)
:param str string: field label
"""
_classic_read = False
_classic_write = False
_prefetch = False
_type = 'many2many'
def __init__(self, obj, rel=None, id1=None, id2=None, string='unknown', limit=None, **args):
"""
"""
_column.__init__(self, string=string, **args)
self._obj = obj
if rel and '.' in rel:
raise Exception(_('The second argument of the many2many field %s must be a SQL table !'\
'You used %s, which is not a valid SQL table name.')% (string,rel))
self._rel = rel
self._id1 = id1
self._id2 = id2
self._limit = limit
def to_field_args(self):
args = super(many2many, self).to_field_args()
args['comodel_name'] = self._obj
args['relation'] = self._rel
args['column1'] = self._id1
args['column2'] = self._id2
args['limit'] = self._limit
return args
def _sql_names(self, source_model):
"""Return the SQL names defining the structure of the m2m relationship table
:return: (m2m_table, local_col, dest_col) where m2m_table is the table name,
local_col is the name of the column holding the current model's FK, and
dest_col is the name of the column holding the destination model's FK, and
"""
tbl, col1, col2 = self._rel, self._id1, self._id2
if not all((tbl, col1, col2)):
# the default table name is based on the stable alphabetical order of tables
dest_model = source_model.pool[self._obj]
tables = tuple(sorted([source_model._table, dest_model._table]))
if not tbl:
assert tables[0] != tables[1], 'Implicit/Canonical naming of m2m relationship table '\
'is not possible when source and destination models are '\
'the same'
tbl = '%s_%s_rel' % tables
if not col1:
col1 = '%s_id' % source_model._table
if not col2:
col2 = '%s_id' % dest_model._table
return tbl, col1, col2
def _get_query_and_where_params(self, cr, model, ids, values, where_params):
""" Extracted from ``get`` to facilitate fine-tuning of the generated
query. """
query = 'SELECT %(rel)s.%(id2)s, %(rel)s.%(id1)s \
FROM %(rel)s, %(from_c)s \
WHERE %(rel)s.%(id1)s IN %%s \
AND %(rel)s.%(id2)s = %(tbl)s.id \
%(where_c)s \
%(order_by)s \
%(limit)s \
OFFSET %(offset)d' \
% values
return query, where_params
def get(self, cr, model, ids, name, user=None, offset=0, context=None, values=None):
if not context:
context = {}
if not values:
values = {}
res = {}
if not ids:
return res
for id in ids:
res[id] = []
if offset:
_logger.warning(
"Specifying offset at a many2many.get() is deprecated and may"
" produce unpredictable results.")
obj = model.pool[self._obj]
rel, id1, id2 = self._sql_names(model)
# static domains are lists, and are evaluated both here and on client-side, while string
# domains supposed by dynamic and evaluated on client-side only (thus ignored here)
# FIXME: make this distinction explicit in API!
domain = isinstance(self._domain, list) and self._domain or []
wquery = obj._where_calc(cr, user, domain, context=context)
obj._apply_ir_rules(cr, user, wquery, 'read', context=context)
from_c, where_c, where_params = wquery.get_sql()
if where_c:
where_c = ' AND ' + where_c
order_by = ' ORDER BY "%s".%s' %(obj._table, obj._order.split(',')[0])
limit_str = ''
if self._limit is not None:
limit_str = ' LIMIT %d' % self._limit
query, where_params = self._get_query_and_where_params(cr, model, ids, {'rel': rel,
'from_c': from_c,
'tbl': obj._table,
'id1': id1,
'id2': id2,
'where_c': where_c,
'limit': limit_str,
'order_by': order_by,
'offset': offset,
}, where_params)
cr.execute(query, [tuple(ids),] + where_params)
for r in cr.fetchall():
res[r[1]].append(r[0])
return res
def set(self, cr, model, id, name, values, user=None, context=None):
if not context:
context = {}
if not values:
return
rel, id1, id2 = self._sql_names(model)
obj = model.pool[self._obj]
for act in values:
if not (isinstance(act, list) or isinstance(act, tuple)) or not act:
continue
if act[0] == 0:
idnew = obj.create(cr, user, act[2], context=context)
cr.execute('insert into '+rel+' ('+id1+','+id2+') values (%s,%s)', (id, idnew))
elif act[0] == 1:
obj.write(cr, user, [act[1]], act[2], context=context)
elif act[0] == 2:
obj.unlink(cr, user, [act[1]], context=context)
elif act[0] == 3:
cr.execute('delete from '+rel+' where ' + id1 + '=%s and '+ id2 + '=%s', (id, act[1]))
elif act[0] == 4:
# following queries are in the same transaction - so should be relatively safe
cr.execute('SELECT 1 FROM '+rel+' WHERE '+id1+' = %s and '+id2+' = %s', (id, act[1]))
if not cr.fetchone():
cr.execute('insert into '+rel+' ('+id1+','+id2+') values (%s,%s)', (id, act[1]))
elif act[0] == 5:
cr.execute('delete from '+rel+' where ' + id1 + ' = %s', (id,))
elif act[0] == 6:
d1, d2,tables = obj.pool.get('ir.rule').domain_get(cr, user, obj._name, context=context)
if d1:
d1 = ' and ' + ' and '.join(d1)
else:
d1 = ''
cr.execute('delete from '+rel+' where '+id1+'=%s AND '+id2+' IN (SELECT '+rel+'.'+id2+' FROM '+rel+', '+','.join(tables)+' WHERE '+rel+'.'+id1+'=%s AND '+rel+'.'+id2+' = '+obj._table+'.id '+ d1 +')', [id, id]+d2)
for act_nbr in act[2]:
cr.execute('insert into '+rel+' ('+id1+','+id2+') values (%s, %s)', (id, act_nbr))
#
# TODO: use a name_search
#
def search(self, cr, obj, args, name, value, offset=0, limit=None, uid=None, operator='like', context=None):
return obj.pool[self._obj].search(cr, uid, args+self._domain+[('name', operator, value)], offset, limit, context=context)
@classmethod
def _as_display_name(cls, field, cr, uid, obj, value, context=None):
raise NotImplementedError('Many2Many columns should not be used as record name (_rec_name)')
def get_nice_size(value):
size = 0
if isinstance(value, (int,long)):
size = value
elif value: # this is supposed to be a string
size = len(value)
return tools.human_size(size)
# See http://www.w3.org/TR/2000/REC-xml-20001006#NT-Char
# and http://bugs.python.org/issue10066
invalid_xml_low_bytes = re.compile(r'[\x00-\x08\x0b-\x0c\x0e-\x1f]')
def sanitize_binary_value(value):
# binary fields should be 7-bit ASCII base64-encoded data,
# but we do additional sanity checks to make sure the values
# are not something else that won't pass via XML-RPC
if isinstance(value, (xmlrpclib.Binary, tuple, list, dict)):
# these builtin types are meant to pass untouched
return value
# Handle invalid bytes values that will cause problems
# for XML-RPC. See for more info:
# - http://bugs.python.org/issue10066
# - http://www.w3.org/TR/2000/REC-xml-20001006#NT-Char
# Coercing to unicode would normally allow it to properly pass via
# XML-RPC, transparently encoded as UTF-8 by xmlrpclib.
# (this works for _any_ byte values, thanks to the fallback
# to latin-1 passthrough encoding when decoding to unicode)
value = tools.ustr(value)
# Due to Python bug #10066 this could still yield invalid XML
# bytes, specifically in the low byte range, that will crash
# the decoding side: [\x00-\x08\x0b-\x0c\x0e-\x1f]
# So check for low bytes values, and if any, perform
# base64 encoding - not very smart or useful, but this is
# our last resort to avoid crashing the request.
if invalid_xml_low_bytes.search(value):
# b64-encode after restoring the pure bytes with latin-1
# passthrough encoding
value = base64.b64encode(value.encode('latin-1'))
return value
# ---------------------------------------------------------
# Function fields
# ---------------------------------------------------------
class function(_column):
"""
A field whose value is computed by a function (rather
than being read from the database).
:param fnct: the callable that will compute the field value.
:param arg: arbitrary value to be passed to ``fnct`` when computing the value.
:param fnct_inv: the callable that will allow writing values in that field
(if not provided, the field is read-only).
:param fnct_inv_arg: arbitrary value to be passed to ``fnct_inv`` when
writing a value.
:param str type: type of the field simulated by the function field
:param fnct_search: the callable that allows searching on the field
(if not provided, search will not return any result).
:param store: store computed value in database
(see :ref:`The *store* parameter <field-function-store>`).
:type store: True or dict specifying triggers for field computation
:param multi: name of batch for batch computation of function fields.
All fields with the same batch name will be computed by
a single function call. This changes the signature of the
``fnct`` callable.
.. _field-function-fnct: The ``fnct`` parameter
.. rubric:: The ``fnct`` parameter
The callable implementing the function field must have the following signature:
.. function:: fnct(model, cr, uid, ids, field_name(s), arg, context)
Implements the function field.
:param orm model: model to which the field belongs (should be ``self`` for
a model method)
:param field_name(s): name of the field to compute, or if ``multi`` is provided,
list of field names to compute.
:type field_name(s): str | [str]
:param arg: arbitrary value passed when declaring the function field
:rtype: dict
:return: mapping of ``ids`` to computed values, or if multi is provided,
to a map of field_names to computed values
The values in the returned dictionary must be of the type specified by the type
argument in the field declaration.
Here is an example with a simple function ``char`` function field::
# declarations
def compute(self, cr, uid, ids, field_name, arg, context):
result = {}
# ...
return result
_columns['my_char'] = fields.function(compute, type='char', size=50)
# when called with ``ids=[1,2,3]``, ``compute`` could return:
{
1: 'foo',
2: 'bar',
3: False # null values should be returned explicitly too
}
If ``multi`` is set, then ``field_name`` is replaced by ``field_names``: a list
of the field names that should be computed. Each value in the returned
dictionary must then be a dictionary mapping field names to values.
Here is an example where two function fields (``name`` and ``age``)
are both computed by a single function field::
# declarations
def compute(self, cr, uid, ids, field_names, arg, context):
result = {}
# ...
return result
_columns['name'] = fields.function(compute_person_data, type='char',\
size=50, multi='person_data')
_columns[''age'] = fields.function(compute_person_data, type='integer',\
multi='person_data')
# when called with ``ids=[1,2,3]``, ``compute_person_data`` could return:
{
1: {'name': 'Bob', 'age': 23},
2: {'name': 'Sally', 'age': 19},
3: {'name': 'unknown', 'age': False}
}
.. _field-function-fnct-inv:
.. rubric:: The ``fnct_inv`` parameter
This callable implements the write operation for the function field
and must have the following signature:
.. function:: fnct_inv(model, cr, uid, id, field_name, field_value, fnct_inv_arg, context)
Callable that implements the ``write`` operation for the function field.
:param orm model: model to which the field belongs (should be ``self`` for
a model method)
:param int id: the identifier of the object to write on
:param str field_name: name of the field to set
:param fnct_inv_arg: arbitrary value passed when declaring the function field
:return: True
When writing values for a function field, the ``multi`` parameter is ignored.
.. _field-function-fnct-search:
.. rubric:: The ``fnct_search`` parameter
This callable implements the search operation for the function field
and must have the following signature:
.. function:: fnct_search(model, cr, uid, model_again, field_name, criterion, context)
Callable that implements the ``search`` operation for the function field by expanding
a search criterion based on the function field into a new domain based only on
columns that are stored in the database.
:param orm model: model to which the field belongs (should be ``self`` for
a model method)
:param orm model_again: same value as ``model`` (seriously! this is for backwards
compatibility)
:param str field_name: name of the field to search on
:param list criterion: domain component specifying the search criterion on the field.
:rtype: list
:return: domain to use instead of ``criterion`` when performing the search.
This new domain must be based only on columns stored in the database, as it
will be used directly without any translation.
The returned value must be a domain, that is, a list of the form [(field_name, operator, operand)].
The most generic way to implement ``fnct_search`` is to directly search for the records that
match the given ``criterion``, and return their ``ids`` wrapped in a domain, such as
``[('id','in',[1,3,5])]``.
.. _field-function-store:
.. rubric:: The ``store`` parameter
The ``store`` parameter allows caching the result of the field computation in the
database, and defining the triggers that will invalidate that cache and force a
recomputation of the function field.
When not provided, the field is computed every time its value is read.
The value of ``store`` may be either ``True`` (to recompute the field value whenever
any field in the same record is modified), or a dictionary specifying a more
flexible set of recomputation triggers.
A trigger specification is a dictionary that maps the names of the models that
will trigger the computation, to a tuple describing the trigger rule, in the
following form::
store = {
'trigger_model': (mapping_function,
['trigger_field1', 'trigger_field2'],
priority),
}
A trigger rule is defined by a 3-item tuple where:
* The ``mapping_function`` is defined as follows:
.. function:: mapping_function(trigger_model, cr, uid, trigger_ids, context)
Callable that maps record ids of a trigger model to ids of the
corresponding records in the source model (whose field values
need to be recomputed).
:param orm model: trigger_model
:param list trigger_ids: ids of the records of trigger_model that were
modified
:rtype: list
:return: list of ids of the source model whose function field values
need to be recomputed
* The second item is a list of the fields who should act as triggers for
the computation. If an empty list is given, all fields will act as triggers.
* The last item is the priority, used to order the triggers when processing them
after any write operation on a model that has function field triggers. The
default priority is 10.
In fact, setting store = True is the same as using the following trigger dict::
store = {
'model_itself': (lambda self, cr, uid, ids, context: ids,
[],
10)
}
"""
_classic_read = False
_classic_write = False
_prefetch = False
_type = 'function'
_properties = True
# function fields are not copied by default
copy = False
#
# multi: compute several fields in one call
#
def __init__(self, fnct, arg=None, fnct_inv=None, fnct_inv_arg=None, type='float', fnct_search=None, obj=None, store=False, multi=False, **args):
_column.__init__(self, **args)
self._obj = obj
self._fnct = fnct
self._fnct_inv = fnct_inv
self._arg = arg
self._multi = multi
if 'relation' in args:
self._obj = args['relation']
self.digits = args.get('digits', (16,2))
self.digits_compute = args.get('digits_compute', None)
if callable(args.get('selection')):
from openerp import api
self.selection = api.expected(api.cr_uid_context, args['selection'])
self._fnct_inv_arg = fnct_inv_arg
if not fnct_inv:
self.readonly = 1
self._type = type
self._fnct_search = fnct_search
self.store = store
if not fnct_search and not store:
self.selectable = False
if store:
if self._type != 'many2one':
# m2o fields need to return tuples with name_get, not just foreign keys
self._classic_read = True
self._classic_write = True
if type=='binary':
self._symbol_get=lambda x:x and str(x)
else:
self._prefetch = True
if type == 'char':
self._symbol_c = char._symbol_c
self._symbol_f = lambda x: _symbol_set_char(self, x)
self._symbol_set = (self._symbol_c, self._symbol_f)
else:
type_class = globals().get(type)
if type_class is not None:
self._symbol_c = type_class._symbol_c
self._symbol_f = type_class._symbol_f
self._symbol_set = type_class._symbol_set
def to_field_args(self):
args = super(function, self).to_field_args()
if self._type in ('float',):
args['digits'] = self.digits_compute or self.digits
elif self._type in ('selection', 'reference'):
args['selection'] = self.selection
elif self._type in ('many2one', 'one2many', 'many2many'):
args['comodel_name'] = self._obj
return args
def digits_change(self, cr):
if self._type == 'float':
if self.digits_compute:
self.digits = self.digits_compute(cr)
if self.digits:
precision, scale = self.digits
self._symbol_set = ('%s', lambda x: float_repr(float_round(__builtin__.float(x or 0.0),
precision_digits=scale),
precision_digits=scale))
def search(self, cr, uid, obj, name, args, context=None):
if not self._fnct_search:
#CHECKME: should raise an exception
return []
return self._fnct_search(obj, cr, uid, obj, name, args, context=context)
def postprocess(self, cr, uid, obj, field, value=None, context=None):
return self._postprocess_batch(cr, uid, obj, field, {0: value}, context=context)[0]
def _postprocess_batch(self, cr, uid, obj, field, values, context=None):
if not values:
return values
if context is None:
context = {}
field_type = obj._columns[field]._type
new_values = dict(values)
if field_type == 'binary':
if context.get('bin_size'):
# client requests only the size of binary fields
for rid, value in values.iteritems():
if value:
new_values[rid] = get_nice_size(value)
elif not context.get('bin_raw'):
for rid, value in values.iteritems():
if value:
new_values[rid] = sanitize_binary_value(value)
return new_values
def get(self, cr, obj, ids, name, uid=False, context=None, values=None):
multi = self._multi
# if we already have a value, don't recompute it.
# This happen if case of stored many2one fields
if values and not multi and name in values[0]:
result = dict((v['id'], v[name]) for v in values)
elif values and multi and all(n in values[0] for n in name):
result = dict((v['id'], dict((n, v[n]) for n in name)) for v in values)
else:
result = self._fnct(obj, cr, uid, ids, name, self._arg, context)
if multi:
swap = {}
for rid, values in result.iteritems():
for f, v in values.iteritems():
if f not in name:
continue
swap.setdefault(f, {})[rid] = v
for field, values in swap.iteritems():
new_values = self._postprocess_batch(cr, uid, obj, field, values, context)
for rid, value in new_values.iteritems():
result[rid][field] = value
else:
result = self._postprocess_batch(cr, uid, obj, name, result, context)
return result
def set(self, cr, obj, id, name, value, user=None, context=None):
if not context:
context = {}
if self._fnct_inv:
self._fnct_inv(obj, cr, user, id, name, value, self._fnct_inv_arg, context)
@classmethod
def _as_display_name(cls, field, cr, uid, obj, value, context=None):
# Function fields are supposed to emulate a basic field type,
# so they can delegate to the basic type for record name rendering
return globals()[field._type]._as_display_name(field, cr, uid, obj, value, context=context)
# ---------------------------------------------------------
# Related fields
# ---------------------------------------------------------
class related(function):
"""Field that points to some data inside another field of the current record.
Example::
_columns = {
'foo_id': fields.many2one('my.foo', 'Foo'),
'bar': fields.related('foo_id', 'frol', type='char', string='Frol of Foo'),
}
"""
def _fnct_search(self, tobj, cr, uid, obj=None, name=None, domain=None, context=None):
# assume self._arg = ('foo', 'bar', 'baz')
# domain = [(name, op, val)] => search [('foo.bar.baz', op, val)]
field = '.'.join(self._arg)
return map(lambda x: (field, x[1], x[2]), domain)
def _fnct_write(self, obj, cr, uid, ids, field_name, values, args, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
for instance in obj.browse(cr, uid, ids, context=context):
# traverse all fields except the last one
for field in self.arg[:-1]:
instance = instance[field][:1]
if instance:
# write on the last field of the target record
instance.write({self.arg[-1]: values})
def _fnct_read(self, obj, cr, uid, ids, field_name, args, context=None):
res = {}
for record in obj.browse(cr, SUPERUSER_ID, ids, context=context):
value = record
# traverse all fields except the last one
for field in self.arg[:-1]:
value = value[field][:1]
# read the last field on the target record
res[record.id] = value[self.arg[-1]]
if self._type == 'many2one':
# res[id] is a recordset; convert it to (id, name) or False.
# Perform name_get as root, as seeing the name of a related object depends on
# access right of source document, not target, so user may not have access.
value_ids = list(set(value.id for value in res.itervalues() if value))
value_name = dict(obj.pool[self._obj].name_get(cr, SUPERUSER_ID, value_ids, context=context))
res = dict((id, bool(value) and (value.id, value_name[value.id])) for id, value in res.iteritems())
elif self._type in ('one2many', 'many2many'):
# res[id] is a recordset; convert it to a list of ids
res = dict((id, value.ids) for id, value in res.iteritems())
return res
def __init__(self, *arg, **args):
self.arg = arg
self._relations = []
super(related, self).__init__(self._fnct_read, arg, self._fnct_write, fnct_inv_arg=arg, fnct_search=self._fnct_search, **args)
if self.store is True:
# TODO: improve here to change self.store = {...} according to related objects
pass
class sparse(function):
def convert_value(self, obj, cr, uid, record, value, read_value, context=None):
"""
+ For a many2many field, a list of tuples is expected.
Here is the list of tuple that are accepted, with the corresponding semantics ::
(0, 0, { values }) link to a new record that needs to be created with the given values dictionary
(1, ID, { values }) update the linked record with id = ID (write *values* on it)
(2, ID) remove and delete the linked record with id = ID (calls unlink on ID, that will delete the object completely, and the link to it as well)
(3, ID) cut the link to the linked record with id = ID (delete the relationship between the two objects but does not delete the target object itself)
(4, ID) link to existing record with id = ID (adds a relationship)
(5) unlink all (like using (3,ID) for all linked records)
(6, 0, [IDs]) replace the list of linked IDs (like using (5) then (4,ID) for each ID in the list of IDs)
Example:
[(6, 0, [8, 5, 6, 4])] sets the many2many to ids [8, 5, 6, 4]
+ For a one2many field, a lits of tuples is expected.
Here is the list of tuple that are accepted, with the corresponding semantics ::
(0, 0, { values }) link to a new record that needs to be created with the given values dictionary
(1, ID, { values }) update the linked record with id = ID (write *values* on it)
(2, ID) remove and delete the linked record with id = ID (calls unlink on ID, that will delete the object completely, and the link to it as well)
Example:
[(0, 0, {'field_name':field_value_record1, ...}), (0, 0, {'field_name':field_value_record2, ...})]
"""
if self._type == 'many2many':
assert value[0][0] == 6, 'Unsupported m2m value for sparse field: %s' % value
return value[0][2]
elif self._type == 'one2many':
if not read_value:
read_value = []
relation_obj = obj.pool[self.relation]
for vals in value:
assert vals[0] in (0,1,2), 'Unsupported o2m value for sparse field: %s' % vals
if vals[0] == 0:
read_value.append(relation_obj.create(cr, uid, vals[2], context=context))
elif vals[0] == 1:
relation_obj.write(cr, uid, vals[1], vals[2], context=context)
elif vals[0] == 2:
relation_obj.unlink(cr, uid, vals[1], context=context)
read_value.remove(vals[1])
return read_value
return value
def _fnct_write(self,obj,cr, uid, ids, field_name, value, args, context=None):
if not type(ids) == list:
ids = [ids]
records = obj.browse(cr, uid, ids, context=context)
for record in records:
# grab serialized value as object - already deserialized
serialized = getattr(record, self.serialization_field)
if value is None:
# simply delete the key to unset it.
serialized.pop(field_name, None)
else:
serialized[field_name] = self.convert_value(obj, cr, uid, record, value, serialized.get(field_name), context=context)
obj.write(cr, uid, ids, {self.serialization_field: serialized}, context=context)
return True
def _fnct_read(self, obj, cr, uid, ids, field_names, args, context=None):
results = {}
records = obj.browse(cr, uid, ids, context=context)
for record in records:
# grab serialized value as object - already deserialized
serialized = getattr(record, self.serialization_field)
results[record.id] = {}
for field_name in field_names:
field_type = obj._columns[field_name]._type
value = serialized.get(field_name, False)
if field_type in ('one2many','many2many'):
value = value or []
if value:
# filter out deleted records as superuser
relation_obj = obj.pool[obj._columns[field_name].relation]
value = relation_obj.exists(cr, openerp.SUPERUSER_ID, value)
if type(value) in (int,long) and field_type == 'many2one':
relation_obj = obj.pool[obj._columns[field_name].relation]
# check for deleted record as superuser
if not relation_obj.exists(cr, openerp.SUPERUSER_ID, [value]):
value = False
results[record.id][field_name] = value
return results
def __init__(self, serialization_field, **kwargs):
self.serialization_field = serialization_field
super(sparse, self).__init__(self._fnct_read, fnct_inv=self._fnct_write, multi='__sparse_multi', **kwargs)
# ---------------------------------------------------------
# Dummy fields
# ---------------------------------------------------------
class dummy(function):
def _fnct_search(self, tobj, cr, uid, obj=None, name=None, domain=None, context=None):
return []
def _fnct_write(self, obj, cr, uid, ids, field_name, values, args, context=None):
return False
def _fnct_read(self, obj, cr, uid, ids, field_name, args, context=None):
return {}
def __init__(self, *arg, **args):
self.arg = arg
self._relations = []
super(dummy, self).__init__(self._fnct_read, arg, self._fnct_write, fnct_inv_arg=arg, fnct_search=self._fnct_search, **args)
# ---------------------------------------------------------
# Serialized fields
# ---------------------------------------------------------
class serialized(_column):
""" A field able to store an arbitrary python data structure.
Note: only plain components allowed.
"""
def _symbol_set_struct(val):
return simplejson.dumps(val)
def _symbol_get_struct(self, val):
return simplejson.loads(val or '{}')
_prefetch = False
_type = 'serialized'
_symbol_c = '%s'
_symbol_f = _symbol_set_struct
_symbol_set = (_symbol_c, _symbol_f)
_symbol_get = _symbol_get_struct
# TODO: review completly this class for speed improvement
class property(function):
def to_field_args(self):
args = super(property, self).to_field_args()
args['company_dependent'] = True
return args
def _fnct_search(self, tobj, cr, uid, obj, name, domain, context=None):
ir_property = obj.pool['ir.property']
result = []
for field, operator, value in domain:
result += ir_property.search_multi(cr, uid, name, tobj._name, operator, value, context=context)
return result
def _fnct_write(self, obj, cr, uid, id, prop_name, value, obj_dest, context=None):
ir_property = obj.pool['ir.property']
ir_property.set_multi(cr, uid, prop_name, obj._name, {id: value}, context=context)
return True
def _fnct_read(self, obj, cr, uid, ids, prop_names, obj_dest, context=None):
ir_property = obj.pool['ir.property']
res = {id: {} for id in ids}
for prop_name in prop_names:
column = obj._all_columns[prop_name].column
values = ir_property.get_multi(cr, uid, prop_name, obj._name, ids, context=context)
if column._type == 'many2one':
# name_get the non-null values as SUPERUSER_ID
vals = sum(set(filter(None, values.itervalues())),
obj.pool[column._obj].browse(cr, uid, [], context=context))
vals_name = dict(vals.sudo().name_get()) if vals else {}
for id, value in values.iteritems():
ng = False
if value and value.id in vals_name:
ng = value.id, vals_name[value.id]
res[id][prop_name] = ng
else:
for id, value in values.iteritems():
res[id][prop_name] = value
return res
def __init__(self, **args):
if 'view_load' in args:
_logger.warning("view_load attribute is deprecated on ir.fields. Args: %r", args)
args = dict(args)
args['obj'] = args.pop('relation', '') or args.get('obj', '')
super(property, self).__init__(
fnct=self._fnct_read,
fnct_inv=self._fnct_write,
fnct_search=self._fnct_search,
multi='properties',
**args
)
class column_info(object):
""" Struct containing details about an osv column, either one local to
its model, or one inherited via _inherits.
.. attribute:: name
name of the column
.. attribute:: column
column instance, subclass of :class:`_column`
.. attribute:: parent_model
if the column is inherited, name of the model that contains it,
``None`` for local columns.
.. attribute:: parent_column
the name of the column containing the m2o relationship to the
parent model that contains this column, ``None`` for local columns.
.. attribute:: original_parent
if the column is inherited, name of the original parent model that
contains it i.e in case of multilevel inheritance, ``None`` for
local columns.
"""
def __init__(self, name, column, parent_model=None, parent_column=None, original_parent=None):
self.name = name
self.column = column
self.parent_model = parent_model
self.parent_column = parent_column
self.original_parent = original_parent
def __str__(self):
return '%s(%s, %s, %s, %s, %s)' % (
self.__class__.__name__, self.name, self.column,
self.parent_model, self.parent_column, self.original_parent)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -3,693,199,041,015,428,000 | 41.390244 | 228 | 0.564758 | false |
zofuthan/edx-platform | cms/djangoapps/contentstore/tests/test_orphan.py | 77 | 4598 | """
Test finding orphans via the view and django config
"""
import json
from contentstore.tests.utils import CourseTestCase
from student.models import CourseEnrollment
from contentstore.utils import reverse_course_url
class TestOrphanBase(CourseTestCase):
"""
Base class for Studio tests that require orphaned modules
"""
def setUp(self):
super(TestOrphanBase, self).setUp()
# create chapters and add them to course tree
chapter1 = self.store.create_child(self.user.id, self.course.location, 'chapter', "Chapter1")
self.store.publish(chapter1.location, self.user.id)
chapter2 = self.store.create_child(self.user.id, self.course.location, 'chapter', "Chapter2")
self.store.publish(chapter2.location, self.user.id)
# orphan chapter
orphan_chapter = self.store.create_item(self.user.id, self.course.id, 'chapter', "OrphanChapter")
self.store.publish(orphan_chapter.location, self.user.id)
# create vertical and add it as child to chapter1
vertical1 = self.store.create_child(self.user.id, chapter1.location, 'vertical', "Vertical1")
self.store.publish(vertical1.location, self.user.id)
# create orphan vertical
orphan_vertical = self.store.create_item(self.user.id, self.course.id, 'vertical', "OrphanVert")
self.store.publish(orphan_vertical.location, self.user.id)
# create component and add it to vertical1
html1 = self.store.create_child(self.user.id, vertical1.location, 'html', "Html1")
self.store.publish(html1.location, self.user.id)
# create component and add it as a child to vertical1 and orphan_vertical
multi_parent_html = self.store.create_child(self.user.id, vertical1.location, 'html', "multi_parent_html")
self.store.publish(multi_parent_html.location, self.user.id)
orphan_vertical.children.append(multi_parent_html.location)
self.store.update_item(orphan_vertical, self.user.id)
# create an orphaned html module
orphan_html = self.store.create_item(self.user.id, self.course.id, 'html', "OrphanHtml")
self.store.publish(orphan_html.location, self.user.id)
self.store.create_child(self.user.id, self.course.location, 'static_tab', "staticuno")
self.store.create_child(self.user.id, self.course.location, 'about', "overview")
self.store.create_child(self.user.id, self.course.location, 'course_info', "updates")
class TestOrphan(TestOrphanBase):
"""
Test finding orphans via view and django config
"""
def setUp(self):
super(TestOrphan, self).setUp()
self.orphan_url = reverse_course_url('orphan_handler', self.course.id)
def test_mongo_orphan(self):
"""
Test that old mongo finds the orphans
"""
orphans = json.loads(
self.client.get(
self.orphan_url,
HTTP_ACCEPT='application/json'
).content
)
self.assertEqual(len(orphans), 3, "Wrong # {}".format(orphans))
location = self.course.location.replace(category='chapter', name='OrphanChapter')
self.assertIn(location.to_deprecated_string(), orphans)
location = self.course.location.replace(category='vertical', name='OrphanVert')
self.assertIn(location.to_deprecated_string(), orphans)
location = self.course.location.replace(category='html', name='OrphanHtml')
self.assertIn(location.to_deprecated_string(), orphans)
def test_mongo_orphan_delete(self):
"""
Test that old mongo deletes the orphans
"""
self.client.delete(self.orphan_url)
orphans = json.loads(
self.client.get(self.orphan_url, HTTP_ACCEPT='application/json').content
)
self.assertEqual(len(orphans), 0, "Orphans not deleted {}".format(orphans))
# make sure that any children with one orphan parent and one non-orphan
# parent are not deleted
self.assertTrue(self.store.has_item(self.course.id.make_usage_key('html', "multi_parent_html")))
def test_not_permitted(self):
"""
Test that auth restricts get and delete appropriately
"""
test_user_client, test_user = self.create_non_staff_authed_user_client()
CourseEnrollment.enroll(test_user, self.course.id)
response = test_user_client.get(self.orphan_url)
self.assertEqual(response.status_code, 403)
response = test_user_client.delete(self.orphan_url)
self.assertEqual(response.status_code, 403)
| agpl-3.0 | -301,997,989,622,962,700 | 42.790476 | 114 | 0.669204 | false |
zBMNForks/graphite-web | webapp/tests/test_storage.py | 34 | 1383 | import logging
from graphite.storage import Store
from django.conf import settings
from django.test import TestCase
# Silence logging during tests
LOGGER = logging.getLogger()
# logging.NullHandler is a python 2.7ism
if hasattr(logging, "NullHandler"):
LOGGER.addHandler(logging.NullHandler())
class StorageTest(TestCase):
def test_store(self):
# Save settings
old_cluster_servers = settings.CLUSTER_SERVERS
old_remote_exclude_local = settings.REMOTE_EXCLUDE_LOCAL
# Set test cluster servers
settings.CLUSTER_SERVERS = ['127.0.0.1', '8.8.8.8']
# Test REMOTE_EXCLUDE_LOCAL = False
settings.REMOTE_EXCLUDE_LOCAL = False
test_store = Store()
remote_hosts = [remote_store.host for remote_store in test_store.remote_stores]
self.assertTrue('127.0.0.1' in remote_hosts)
self.assertTrue('8.8.8.8' in remote_hosts)
# Test REMOTE_EXCLUDE_LOCAL = True
settings.REMOTE_EXCLUDE_LOCAL = True
test_store = Store()
remote_hosts = [remote_store.host for remote_store in test_store.remote_stores]
self.assertTrue('127.0.0.1' not in remote_hosts)
self.assertTrue('8.8.8.8' in remote_hosts)
# Restore original settings
settings.CLUSTER_SERVERS = old_cluster_servers
settings.REMOTE_EXCLUDE_LOCAL = old_remote_exclude_local
| apache-2.0 | -5,297,889,125,240,194,000 | 31.928571 | 87 | 0.678236 | false |
gavinandresen/bitcoin-git | qa/rpc-tests/getchaintips.py | 101 | 2205 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Exercise the getchaintips API. We introduce a network split, work
# on chains of different lengths, and join the network together again.
# This gives us two tips, verify that it works.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
class GetChainTipsTest (BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 4
self.setup_clean_chain = False
def run_test (self):
tips = self.nodes[0].getchaintips ()
assert_equal (len (tips), 1)
assert_equal (tips[0]['branchlen'], 0)
assert_equal (tips[0]['height'], 200)
assert_equal (tips[0]['status'], 'active')
# Split the network and build two chains of different lengths.
self.split_network ()
self.nodes[0].generate(10)
self.nodes[2].generate(20)
self.sync_all ()
tips = self.nodes[1].getchaintips ()
assert_equal (len (tips), 1)
shortTip = tips[0]
assert_equal (shortTip['branchlen'], 0)
assert_equal (shortTip['height'], 210)
assert_equal (tips[0]['status'], 'active')
tips = self.nodes[3].getchaintips ()
assert_equal (len (tips), 1)
longTip = tips[0]
assert_equal (longTip['branchlen'], 0)
assert_equal (longTip['height'], 220)
assert_equal (tips[0]['status'], 'active')
# Join the network halves and check that we now have two tips
# (at least at the nodes that previously had the short chain).
self.join_network ()
tips = self.nodes[0].getchaintips ()
assert_equal (len (tips), 2)
assert_equal (tips[0], longTip)
assert_equal (tips[1]['branchlen'], 10)
assert_equal (tips[1]['status'], 'valid-fork')
tips[1]['branchlen'] = 0
tips[1]['status'] = 'active'
assert_equal (tips[1], shortTip)
if __name__ == '__main__':
GetChainTipsTest ().main ()
| mit | -5,874,657,404,396,146,000 | 34.564516 | 70 | 0.618594 | false |
CameronLonsdale/sec-tools | python2/lib/python2.7/site-packages/pip/_vendor/html5lib/_tokenizer.py | 385 | 76580 | from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import unichr as chr
from collections import deque
from .constants import spaceCharacters
from .constants import entities
from .constants import asciiLetters, asciiUpper2Lower
from .constants import digits, hexDigits, EOF
from .constants import tokenTypes, tagTokenTypes
from .constants import replacementCharacters
from ._inputstream import HTMLInputStream
from ._trie import Trie
entitiesTrie = Trie(entities)
class HTMLTokenizer(object):
""" This class takes care of tokenizing HTML.
* self.currentToken
Holds the token that is currently being processed.
* self.state
Holds a reference to the method to be invoked... XXX
* self.stream
Points to HTMLInputStream object.
"""
def __init__(self, stream, parser=None, **kwargs):
self.stream = HTMLInputStream(stream, **kwargs)
self.parser = parser
# Setup the initial tokenizer state
self.escapeFlag = False
self.lastFourChars = []
self.state = self.dataState
self.escape = False
# The current token being created
self.currentToken = None
super(HTMLTokenizer, self).__init__()
def __iter__(self):
""" This is where the magic happens.
We do our usually processing through the states and when we have a token
to return we yield the token which pauses processing until the next token
is requested.
"""
self.tokenQueue = deque([])
# Start processing. When EOF is reached self.state will return False
# instead of True and the loop will terminate.
while self.state():
while self.stream.errors:
yield {"type": tokenTypes["ParseError"], "data": self.stream.errors.pop(0)}
while self.tokenQueue:
yield self.tokenQueue.popleft()
def consumeNumberEntity(self, isHex):
"""This function returns either U+FFFD or the character based on the
decimal or hexadecimal representation. It also discards ";" if present.
If not present self.tokenQueue.append({"type": tokenTypes["ParseError"]}) is invoked.
"""
allowed = digits
radix = 10
if isHex:
allowed = hexDigits
radix = 16
charStack = []
# Consume all the characters that are in range while making sure we
# don't hit an EOF.
c = self.stream.char()
while c in allowed and c is not EOF:
charStack.append(c)
c = self.stream.char()
# Convert the set of characters consumed to an int.
charAsInt = int("".join(charStack), radix)
# Certain characters get replaced with others
if charAsInt in replacementCharacters:
char = replacementCharacters[charAsInt]
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"illegal-codepoint-for-numeric-entity",
"datavars": {"charAsInt": charAsInt}})
elif ((0xD800 <= charAsInt <= 0xDFFF) or
(charAsInt > 0x10FFFF)):
char = "\uFFFD"
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"illegal-codepoint-for-numeric-entity",
"datavars": {"charAsInt": charAsInt}})
else:
# Should speed up this check somehow (e.g. move the set to a constant)
if ((0x0001 <= charAsInt <= 0x0008) or
(0x000E <= charAsInt <= 0x001F) or
(0x007F <= charAsInt <= 0x009F) or
(0xFDD0 <= charAsInt <= 0xFDEF) or
charAsInt in frozenset([0x000B, 0xFFFE, 0xFFFF, 0x1FFFE,
0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE,
0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE,
0x5FFFF, 0x6FFFE, 0x6FFFF, 0x7FFFE,
0x7FFFF, 0x8FFFE, 0x8FFFF, 0x9FFFE,
0x9FFFF, 0xAFFFE, 0xAFFFF, 0xBFFFE,
0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE,
0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE,
0xFFFFF, 0x10FFFE, 0x10FFFF])):
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data":
"illegal-codepoint-for-numeric-entity",
"datavars": {"charAsInt": charAsInt}})
try:
# Try/except needed as UCS-2 Python builds' unichar only works
# within the BMP.
char = chr(charAsInt)
except ValueError:
v = charAsInt - 0x10000
char = chr(0xD800 | (v >> 10)) + chr(0xDC00 | (v & 0x3FF))
# Discard the ; if present. Otherwise, put it back on the queue and
# invoke parseError on parser.
if c != ";":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"numeric-entity-without-semicolon"})
self.stream.unget(c)
return char
def consumeEntity(self, allowedChar=None, fromAttribute=False):
# Initialise to the default output for when no entity is matched
output = "&"
charStack = [self.stream.char()]
if (charStack[0] in spaceCharacters or charStack[0] in (EOF, "<", "&") or
(allowedChar is not None and allowedChar == charStack[0])):
self.stream.unget(charStack[0])
elif charStack[0] == "#":
# Read the next character to see if it's hex or decimal
hex = False
charStack.append(self.stream.char())
if charStack[-1] in ("x", "X"):
hex = True
charStack.append(self.stream.char())
# charStack[-1] should be the first digit
if (hex and charStack[-1] in hexDigits) \
or (not hex and charStack[-1] in digits):
# At least one digit found, so consume the whole number
self.stream.unget(charStack[-1])
output = self.consumeNumberEntity(hex)
else:
# No digits found
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "expected-numeric-entity"})
self.stream.unget(charStack.pop())
output = "&" + "".join(charStack)
else:
# At this point in the process might have named entity. Entities
# are stored in the global variable "entities".
#
# Consume characters and compare to these to a substring of the
# entity names in the list until the substring no longer matches.
while (charStack[-1] is not EOF):
if not entitiesTrie.has_keys_with_prefix("".join(charStack)):
break
charStack.append(self.stream.char())
# At this point we have a string that starts with some characters
# that may match an entity
# Try to find the longest entity the string will match to take care
# of ¬i for instance.
try:
entityName = entitiesTrie.longest_prefix("".join(charStack[:-1]))
entityLength = len(entityName)
except KeyError:
entityName = None
if entityName is not None:
if entityName[-1] != ";":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"named-entity-without-semicolon"})
if (entityName[-1] != ";" and fromAttribute and
(charStack[entityLength] in asciiLetters or
charStack[entityLength] in digits or
charStack[entityLength] == "=")):
self.stream.unget(charStack.pop())
output = "&" + "".join(charStack)
else:
output = entities[entityName]
self.stream.unget(charStack.pop())
output += "".join(charStack[entityLength:])
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-named-entity"})
self.stream.unget(charStack.pop())
output = "&" + "".join(charStack)
if fromAttribute:
self.currentToken["data"][-1][1] += output
else:
if output in spaceCharacters:
tokenType = "SpaceCharacters"
else:
tokenType = "Characters"
self.tokenQueue.append({"type": tokenTypes[tokenType], "data": output})
def processEntityInAttribute(self, allowedChar):
"""This method replaces the need for "entityInAttributeValueState".
"""
self.consumeEntity(allowedChar=allowedChar, fromAttribute=True)
def emitCurrentToken(self):
"""This method is a generic handler for emitting the tags. It also sets
the state to "data" because that's what's needed after a token has been
emitted.
"""
token = self.currentToken
# Add token to the queue to be yielded
if (token["type"] in tagTokenTypes):
token["name"] = token["name"].translate(asciiUpper2Lower)
if token["type"] == tokenTypes["EndTag"]:
if token["data"]:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "attributes-in-end-tag"})
if token["selfClosing"]:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "self-closing-flag-on-end-tag"})
self.tokenQueue.append(token)
self.state = self.dataState
# Below are the various tokenizer states worked out.
def dataState(self):
data = self.stream.char()
if data == "&":
self.state = self.entityDataState
elif data == "<":
self.state = self.tagOpenState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\u0000"})
elif data is EOF:
# Tokenization ends.
return False
elif data in spaceCharacters:
# Directly after emitting a token you switch back to the "data
# state". At that point spaceCharacters are important so they are
# emitted separately.
self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data":
data + self.stream.charsUntil(spaceCharacters, True)})
# No need to update lastFourChars here, since the first space will
# have already been appended to lastFourChars and will have broken
# any <!-- or --> sequences
else:
chars = self.stream.charsUntil(("&", "<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def entityDataState(self):
self.consumeEntity()
self.state = self.dataState
return True
def rcdataState(self):
data = self.stream.char()
if data == "&":
self.state = self.characterReferenceInRcdata
elif data == "<":
self.state = self.rcdataLessThanSignState
elif data == EOF:
# Tokenization ends.
return False
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data in spaceCharacters:
# Directly after emitting a token you switch back to the "data
# state". At that point spaceCharacters are important so they are
# emitted separately.
self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data":
data + self.stream.charsUntil(spaceCharacters, True)})
# No need to update lastFourChars here, since the first space will
# have already been appended to lastFourChars and will have broken
# any <!-- or --> sequences
else:
chars = self.stream.charsUntil(("&", "<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def characterReferenceInRcdata(self):
self.consumeEntity()
self.state = self.rcdataState
return True
def rawtextState(self):
data = self.stream.char()
if data == "<":
self.state = self.rawtextLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
# Tokenization ends.
return False
else:
chars = self.stream.charsUntil(("<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def scriptDataState(self):
data = self.stream.char()
if data == "<":
self.state = self.scriptDataLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
# Tokenization ends.
return False
else:
chars = self.stream.charsUntil(("<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def plaintextState(self):
data = self.stream.char()
if data == EOF:
# Tokenization ends.
return False
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + self.stream.charsUntil("\u0000")})
return True
def tagOpenState(self):
data = self.stream.char()
if data == "!":
self.state = self.markupDeclarationOpenState
elif data == "/":
self.state = self.closeTagOpenState
elif data in asciiLetters:
self.currentToken = {"type": tokenTypes["StartTag"],
"name": data, "data": [],
"selfClosing": False,
"selfClosingAcknowledged": False}
self.state = self.tagNameState
elif data == ">":
# XXX In theory it could be something besides a tag name. But
# do we really care?
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-tag-name-but-got-right-bracket"})
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<>"})
self.state = self.dataState
elif data == "?":
# XXX In theory it could be something besides a tag name. But
# do we really care?
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-tag-name-but-got-question-mark"})
self.stream.unget(data)
self.state = self.bogusCommentState
else:
# XXX
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-tag-name"})
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.dataState
return True
def closeTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.currentToken = {"type": tokenTypes["EndTag"], "name": data,
"data": [], "selfClosing": False}
self.state = self.tagNameState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-closing-tag-but-got-right-bracket"})
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-closing-tag-but-got-eof"})
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.state = self.dataState
else:
# XXX data can be _'_...
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-closing-tag-but-got-char",
"datavars": {"data": data}})
self.stream.unget(data)
self.state = self.bogusCommentState
return True
def tagNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeAttributeNameState
elif data == ">":
self.emitCurrentToken()
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-tag-name"})
self.state = self.dataState
elif data == "/":
self.state = self.selfClosingStartTagState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["name"] += "\uFFFD"
else:
self.currentToken["name"] += data
# (Don't use charsUntil here, because tag names are
# very short and it's faster to not do anything fancy)
return True
def rcdataLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.rcdataEndTagOpenState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.rcdataState
return True
def rcdataEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer += data
self.state = self.rcdataEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.rcdataState
return True
def rcdataEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.rcdataState
return True
def rawtextLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.rawtextEndTagOpenState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.rawtextState
return True
def rawtextEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer += data
self.state = self.rawtextEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.rawtextState
return True
def rawtextEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.rawtextState
return True
def scriptDataLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.scriptDataEndTagOpenState
elif data == "!":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<!"})
self.state = self.scriptDataEscapeStartState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer += data
self.state = self.scriptDataEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEscapeStartState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapeStartDashState
else:
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEscapeStartDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapedDashDashState
else:
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEscapedState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapedDashState
elif data == "<":
self.state = self.scriptDataEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
self.state = self.dataState
else:
chars = self.stream.charsUntil(("<", "-", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def scriptDataEscapedDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapedDashDashState
elif data == "<":
self.state = self.scriptDataEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataEscapedState
elif data == EOF:
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedDashDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
elif data == "<":
self.state = self.scriptDataEscapedLessThanSignState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": ">"})
self.state = self.scriptDataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataEscapedState
elif data == EOF:
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.scriptDataEscapedEndTagOpenState
elif data in asciiLetters:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<" + data})
self.temporaryBuffer = data
self.state = self.scriptDataDoubleEscapeStartState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer = data
self.state = self.scriptDataEscapedEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataDoubleEscapeStartState(self):
data = self.stream.char()
if data in (spaceCharacters | frozenset(("/", ">"))):
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
if self.temporaryBuffer.lower() == "script":
self.state = self.scriptDataDoubleEscapedState
else:
self.state = self.scriptDataEscapedState
elif data in asciiLetters:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.temporaryBuffer += data
else:
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataDoubleEscapedState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataDoubleEscapedDashState
elif data == "<":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.state = self.scriptDataDoubleEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-script-in-script"})
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
return True
def scriptDataDoubleEscapedDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataDoubleEscapedDashDashState
elif data == "<":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.state = self.scriptDataDoubleEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataDoubleEscapedState
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-script-in-script"})
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataDoubleEscapedState
return True
def scriptDataDoubleEscapedDashDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
elif data == "<":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.state = self.scriptDataDoubleEscapedLessThanSignState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": ">"})
self.state = self.scriptDataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataDoubleEscapedState
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-script-in-script"})
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataDoubleEscapedState
return True
def scriptDataDoubleEscapedLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "/"})
self.temporaryBuffer = ""
self.state = self.scriptDataDoubleEscapeEndState
else:
self.stream.unget(data)
self.state = self.scriptDataDoubleEscapedState
return True
def scriptDataDoubleEscapeEndState(self):
data = self.stream.char()
if data in (spaceCharacters | frozenset(("/", ">"))):
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
if self.temporaryBuffer.lower() == "script":
self.state = self.scriptDataEscapedState
else:
self.state = self.scriptDataDoubleEscapedState
elif data in asciiLetters:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.temporaryBuffer += data
else:
self.stream.unget(data)
self.state = self.scriptDataDoubleEscapedState
return True
def beforeAttributeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.stream.charsUntil(spaceCharacters, True)
elif data in asciiLetters:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data == ">":
self.emitCurrentToken()
elif data == "/":
self.state = self.selfClosingStartTagState
elif data in ("'", '"', "=", "<"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"invalid-character-in-attribute-name"})
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"].append(["\uFFFD", ""])
self.state = self.attributeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-attribute-name-but-got-eof"})
self.state = self.dataState
else:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
return True
def attributeNameState(self):
data = self.stream.char()
leavingThisState = True
emitToken = False
if data == "=":
self.state = self.beforeAttributeValueState
elif data in asciiLetters:
self.currentToken["data"][-1][0] += data +\
self.stream.charsUntil(asciiLetters, True)
leavingThisState = False
elif data == ">":
# XXX If we emit here the attributes are converted to a dict
# without being checked and when the code below runs we error
# because data is a dict not a list
emitToken = True
elif data in spaceCharacters:
self.state = self.afterAttributeNameState
elif data == "/":
self.state = self.selfClosingStartTagState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][0] += "\uFFFD"
leavingThisState = False
elif data in ("'", '"', "<"):
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data":
"invalid-character-in-attribute-name"})
self.currentToken["data"][-1][0] += data
leavingThisState = False
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "eof-in-attribute-name"})
self.state = self.dataState
else:
self.currentToken["data"][-1][0] += data
leavingThisState = False
if leavingThisState:
# Attributes are not dropped at this stage. That happens when the
# start tag token is emitted so values can still be safely appended
# to attributes, but we do want to report the parse error in time.
self.currentToken["data"][-1][0] = (
self.currentToken["data"][-1][0].translate(asciiUpper2Lower))
for name, _ in self.currentToken["data"][:-1]:
if self.currentToken["data"][-1][0] == name:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"duplicate-attribute"})
break
# XXX Fix for above XXX
if emitToken:
self.emitCurrentToken()
return True
def afterAttributeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.stream.charsUntil(spaceCharacters, True)
elif data == "=":
self.state = self.beforeAttributeValueState
elif data == ">":
self.emitCurrentToken()
elif data in asciiLetters:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data == "/":
self.state = self.selfClosingStartTagState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"].append(["\uFFFD", ""])
self.state = self.attributeNameState
elif data in ("'", '"', "<"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"invalid-character-after-attribute-name"})
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-end-of-tag-but-got-eof"})
self.state = self.dataState
else:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
return True
def beforeAttributeValueState(self):
data = self.stream.char()
if data in spaceCharacters:
self.stream.charsUntil(spaceCharacters, True)
elif data == "\"":
self.state = self.attributeValueDoubleQuotedState
elif data == "&":
self.state = self.attributeValueUnQuotedState
self.stream.unget(data)
elif data == "'":
self.state = self.attributeValueSingleQuotedState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-attribute-value-but-got-right-bracket"})
self.emitCurrentToken()
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
self.state = self.attributeValueUnQuotedState
elif data in ("=", "<", "`"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"equals-in-unquoted-attribute-value"})
self.currentToken["data"][-1][1] += data
self.state = self.attributeValueUnQuotedState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-attribute-value-but-got-eof"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data
self.state = self.attributeValueUnQuotedState
return True
def attributeValueDoubleQuotedState(self):
data = self.stream.char()
if data == "\"":
self.state = self.afterAttributeValueState
elif data == "&":
self.processEntityInAttribute('"')
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-attribute-value-double-quote"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data +\
self.stream.charsUntil(("\"", "&", "\u0000"))
return True
def attributeValueSingleQuotedState(self):
data = self.stream.char()
if data == "'":
self.state = self.afterAttributeValueState
elif data == "&":
self.processEntityInAttribute("'")
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-attribute-value-single-quote"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data +\
self.stream.charsUntil(("'", "&", "\u0000"))
return True
def attributeValueUnQuotedState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeAttributeNameState
elif data == "&":
self.processEntityInAttribute(">")
elif data == ">":
self.emitCurrentToken()
elif data in ('"', "'", "=", "<", "`"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-character-in-unquoted-attribute-value"})
self.currentToken["data"][-1][1] += data
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-attribute-value-no-quotes"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data + self.stream.charsUntil(
frozenset(("&", ">", '"', "'", "=", "<", "`", "\u0000")) | spaceCharacters)
return True
def afterAttributeValueState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeAttributeNameState
elif data == ">":
self.emitCurrentToken()
elif data == "/":
self.state = self.selfClosingStartTagState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-EOF-after-attribute-value"})
self.stream.unget(data)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-character-after-attribute-value"})
self.stream.unget(data)
self.state = self.beforeAttributeNameState
return True
def selfClosingStartTagState(self):
data = self.stream.char()
if data == ">":
self.currentToken["selfClosing"] = True
self.emitCurrentToken()
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data":
"unexpected-EOF-after-solidus-in-tag"})
self.stream.unget(data)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-character-after-solidus-in-tag"})
self.stream.unget(data)
self.state = self.beforeAttributeNameState
return True
def bogusCommentState(self):
# Make a new comment token and give it as value all the characters
# until the first > or EOF (charsUntil checks for EOF automatically)
# and emit it.
data = self.stream.charsUntil(">")
data = data.replace("\u0000", "\uFFFD")
self.tokenQueue.append(
{"type": tokenTypes["Comment"], "data": data})
# Eat the character directly after the bogus comment which is either a
# ">" or an EOF.
self.stream.char()
self.state = self.dataState
return True
def markupDeclarationOpenState(self):
charStack = [self.stream.char()]
if charStack[-1] == "-":
charStack.append(self.stream.char())
if charStack[-1] == "-":
self.currentToken = {"type": tokenTypes["Comment"], "data": ""}
self.state = self.commentStartState
return True
elif charStack[-1] in ('d', 'D'):
matched = True
for expected in (('o', 'O'), ('c', 'C'), ('t', 'T'),
('y', 'Y'), ('p', 'P'), ('e', 'E')):
charStack.append(self.stream.char())
if charStack[-1] not in expected:
matched = False
break
if matched:
self.currentToken = {"type": tokenTypes["Doctype"],
"name": "",
"publicId": None, "systemId": None,
"correct": True}
self.state = self.doctypeState
return True
elif (charStack[-1] == "[" and
self.parser is not None and
self.parser.tree.openElements and
self.parser.tree.openElements[-1].namespace != self.parser.tree.defaultNamespace):
matched = True
for expected in ["C", "D", "A", "T", "A", "["]:
charStack.append(self.stream.char())
if charStack[-1] != expected:
matched = False
break
if matched:
self.state = self.cdataSectionState
return True
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-dashes-or-doctype"})
while charStack:
self.stream.unget(charStack.pop())
self.state = self.bogusCommentState
return True
def commentStartState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentStartDashState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"incorrect-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += data
self.state = self.commentState
return True
def commentStartDashState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentEndState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "-\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"incorrect-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += "-" + data
self.state = self.commentState
return True
def commentState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentEndDashState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "eof-in-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += data + \
self.stream.charsUntil(("-", "\u0000"))
return True
def commentEndDashState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentEndState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "-\uFFFD"
self.state = self.commentState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment-end-dash"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += "-" + data
self.state = self.commentState
return True
def commentEndState(self):
data = self.stream.char()
if data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "--\uFFFD"
self.state = self.commentState
elif data == "!":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-bang-after-double-dash-in-comment"})
self.state = self.commentEndBangState
elif data == "-":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-dash-after-double-dash-in-comment"})
self.currentToken["data"] += data
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment-double-dash"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
# XXX
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-comment"})
self.currentToken["data"] += "--" + data
self.state = self.commentState
return True
def commentEndBangState(self):
data = self.stream.char()
if data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "-":
self.currentToken["data"] += "--!"
self.state = self.commentEndDashState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "--!\uFFFD"
self.state = self.commentState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment-end-bang-state"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += "--!" + data
self.state = self.commentState
return True
def doctypeState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeDoctypeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-doctype-name-but-got-eof"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"need-space-after-doctype"})
self.stream.unget(data)
self.state = self.beforeDoctypeNameState
return True
def beforeDoctypeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-doctype-name-but-got-right-bracket"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["name"] = "\uFFFD"
self.state = self.doctypeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-doctype-name-but-got-eof"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["name"] = data
self.state = self.doctypeNameState
return True
def doctypeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
self.state = self.afterDoctypeNameState
elif data == ">":
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["name"] += "\uFFFD"
self.state = self.doctypeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype-name"})
self.currentToken["correct"] = False
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["name"] += data
return True
def afterDoctypeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.currentToken["correct"] = False
self.stream.unget(data)
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
if data in ("p", "P"):
matched = True
for expected in (("u", "U"), ("b", "B"), ("l", "L"),
("i", "I"), ("c", "C")):
data = self.stream.char()
if data not in expected:
matched = False
break
if matched:
self.state = self.afterDoctypePublicKeywordState
return True
elif data in ("s", "S"):
matched = True
for expected in (("y", "Y"), ("s", "S"), ("t", "T"),
("e", "E"), ("m", "M")):
data = self.stream.char()
if data not in expected:
matched = False
break
if matched:
self.state = self.afterDoctypeSystemKeywordState
return True
# All the characters read before the current 'data' will be
# [a-zA-Z], so they're garbage in the bogus doctype and can be
# discarded; only the latest character might be '>' or EOF
# and needs to be ungetted
self.stream.unget(data)
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-space-or-right-bracket-in-doctype", "datavars":
{"data": data}})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def afterDoctypePublicKeywordState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeDoctypePublicIdentifierState
elif data in ("'", '"'):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.stream.unget(data)
self.state = self.beforeDoctypePublicIdentifierState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.stream.unget(data)
self.state = self.beforeDoctypePublicIdentifierState
return True
def beforeDoctypePublicIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == "\"":
self.currentToken["publicId"] = ""
self.state = self.doctypePublicIdentifierDoubleQuotedState
elif data == "'":
self.currentToken["publicId"] = ""
self.state = self.doctypePublicIdentifierSingleQuotedState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def doctypePublicIdentifierDoubleQuotedState(self):
data = self.stream.char()
if data == "\"":
self.state = self.afterDoctypePublicIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["publicId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["publicId"] += data
return True
def doctypePublicIdentifierSingleQuotedState(self):
data = self.stream.char()
if data == "'":
self.state = self.afterDoctypePublicIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["publicId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["publicId"] += data
return True
def afterDoctypePublicIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.betweenDoctypePublicAndSystemIdentifiersState
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == '"':
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierDoubleQuotedState
elif data == "'":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierSingleQuotedState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def betweenDoctypePublicAndSystemIdentifiersState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == '"':
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierDoubleQuotedState
elif data == "'":
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierSingleQuotedState
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def afterDoctypeSystemKeywordState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeDoctypeSystemIdentifierState
elif data in ("'", '"'):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.stream.unget(data)
self.state = self.beforeDoctypeSystemIdentifierState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.stream.unget(data)
self.state = self.beforeDoctypeSystemIdentifierState
return True
def beforeDoctypeSystemIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == "\"":
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierDoubleQuotedState
elif data == "'":
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierSingleQuotedState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def doctypeSystemIdentifierDoubleQuotedState(self):
data = self.stream.char()
if data == "\"":
self.state = self.afterDoctypeSystemIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["systemId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["systemId"] += data
return True
def doctypeSystemIdentifierSingleQuotedState(self):
data = self.stream.char()
if data == "'":
self.state = self.afterDoctypeSystemIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["systemId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["systemId"] += data
return True
def afterDoctypeSystemIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.state = self.bogusDoctypeState
return True
def bogusDoctypeState(self):
data = self.stream.char()
if data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
# XXX EMIT
self.stream.unget(data)
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
pass
return True
def cdataSectionState(self):
data = []
while True:
data.append(self.stream.charsUntil("]"))
data.append(self.stream.charsUntil(">"))
char = self.stream.char()
if char == EOF:
break
else:
assert char == ">"
if data[-1][-2:] == "]]":
data[-1] = data[-1][:-2]
break
else:
data.append(char)
data = "".join(data) # pylint:disable=redefined-variable-type
# Deal with null here rather than in the parser
nullCount = data.count("\u0000")
if nullCount > 0:
for _ in range(nullCount):
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
data = data.replace("\u0000", "\uFFFD")
if data:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": data})
self.state = self.dataState
return True
| mit | 5,114,314,706,089,042,000 | 43.497385 | 109 | 0.529303 | false |
simartin/servo | tests/wpt/web-platform-tests/tools/wptrunner/wptrunner/browsers/android_webview.py | 1 | 3825 | from .base import require_arg
from .base import get_timeout_multiplier # noqa: F401
from .chrome import executor_kwargs as chrome_executor_kwargs
from .chrome_android import ChromeAndroidBrowserBase
from ..executors.executorwebdriver import (WebDriverTestharnessExecutor, # noqa: F401
WebDriverRefTestExecutor) # noqa: F401
from ..executors.executorchrome import ChromeDriverWdspecExecutor # noqa: F401
__wptrunner__ = {"product": "android_webview",
"check_args": "check_args",
"browser": "SystemWebViewShell",
"executor": {"testharness": "WebDriverTestharnessExecutor",
"reftest": "WebDriverRefTestExecutor",
"wdspec": "ChromeDriverWdspecExecutor"},
"browser_kwargs": "browser_kwargs",
"executor_kwargs": "executor_kwargs",
"env_extras": "env_extras",
"env_options": "env_options",
"timeout_multiplier": "get_timeout_multiplier"}
_wptserve_ports = set()
def check_args(**kwargs):
require_arg(kwargs, "webdriver_binary")
def browser_kwargs(logger, test_type, run_info_data, config, **kwargs):
return {"binary": kwargs["binary"],
"device_serial": kwargs["device_serial"],
"webdriver_binary": kwargs["webdriver_binary"],
"webdriver_args": kwargs.get("webdriver_args")}
def executor_kwargs(logger, test_type, server_config, cache_manager, run_info_data,
**kwargs):
# Use update() to modify the global list in place.
_wptserve_ports.update(set(
server_config['ports']['http'] + server_config['ports']['https'] +
server_config['ports']['ws'] + server_config['ports']['wss']
))
executor_kwargs = chrome_executor_kwargs(logger, test_type, server_config,
cache_manager, run_info_data,
**kwargs)
del executor_kwargs["capabilities"]["goog:chromeOptions"]["prefs"]
capabilities = executor_kwargs["capabilities"]
# Note that for WebView, we launch a test shell and have the test shell use WebView.
# https://chromium.googlesource.com/chromium/src/+/HEAD/android_webview/docs/webview-shell.md
capabilities["goog:chromeOptions"]["androidPackage"] = \
"org.chromium.webview_shell"
capabilities["goog:chromeOptions"]["androidActivity"] = ".WebPlatformTestsActivity"
if kwargs.get('device_serial'):
capabilities["goog:chromeOptions"]["androidDeviceSerial"] = kwargs['device_serial']
# Workaround: driver.quit() cannot quit SystemWebViewShell.
executor_kwargs["pause_after_test"] = False
# Workaround: driver.close() is not supported.
executor_kwargs["restart_after_test"] = True
executor_kwargs["close_after_done"] = False
return executor_kwargs
def env_extras(**kwargs):
return []
def env_options():
# allow the use of host-resolver-rules in lieu of modifying /etc/hosts file
return {"server_host": "127.0.0.1"}
class SystemWebViewShell(ChromeAndroidBrowserBase):
"""Chrome is backed by chromedriver, which is supplied through
``wptrunner.webdriver.ChromeDriverServer``.
"""
def __init__(self, logger, binary, webdriver_binary="chromedriver",
remote_queue=None,
device_serial=None,
webdriver_args=None):
"""Creates a new representation of Chrome. The `binary` argument gives
the browser binary to use for testing."""
super(SystemWebViewShell, self).__init__(logger,
webdriver_binary, remote_queue, device_serial, webdriver_args)
self.binary = binary
self.wptserver_ports = _wptserve_ports
| mpl-2.0 | 4,554,100,334,446,729,000 | 42.465909 | 97 | 0.632157 | false |
Integral-Technology-Solutions/ConfigNOW | Lib/dumbdbm.py | 4 | 4248 | """A dumb and slow but simple dbm clone.
For database spam, spam.dir contains the index (a text file),
spam.bak *may* contain a backup of the index (also a text file),
while spam.dat contains the data (a binary file).
XXX TO DO:
- seems to contain a bug when updating...
- reclaim free space (currently, space once occupied by deleted or expanded
items is never reused)
- support concurrent access (currently, if two processes take turns making
updates, they can mess up the index)
- support efficient access to large databases (currently, the whole index
is read when the database is opened, and some updates rewrite the whole index)
- support opening for read-only (flag = 'm')
"""
_os = __import__('os')
import __builtin__
_open = __builtin__.open
_BLOCKSIZE = 512
error = IOError # For anydbm
class _Database:
def __init__(self, file):
if _os.sep == '.':
endsep = '/'
else:
endsep = '.'
self._dirfile = file + endsep + 'dir'
self._datfile = file + endsep + 'dat'
self._bakfile = file + endsep + 'bak'
# Mod by Jack: create data file if needed
try:
f = _open(self._datfile, 'r')
except IOError:
f = _open(self._datfile, 'w')
f.close()
self._update()
def _update(self):
self._index = {}
try:
f = _open(self._dirfile)
except IOError:
pass
else:
while 1:
line = f.readline().rstrip()
if not line: break
key, (pos, siz) = eval(line)
self._index[key] = (pos, siz)
f.close()
def _commit(self):
try: _os.unlink(self._bakfile)
except _os.error: pass
try: _os.rename(self._dirfile, self._bakfile)
except _os.error: pass
f = _open(self._dirfile, 'w')
for key, (pos, siz) in self._index.items():
f.write("%s, (%s, %s)\n" % (`key`, `pos`, `siz`))
f.close()
def __getitem__(self, key):
pos, siz = self._index[key] # may raise KeyError
f = _open(self._datfile, 'rb')
f.seek(pos)
dat = f.read(siz)
f.close()
return dat
def _addval(self, val):
f = _open(self._datfile, 'rb+')
f.seek(0, 2)
pos = int(f.tell())
## Does not work under MW compiler
## pos = ((pos + _BLOCKSIZE - 1) / _BLOCKSIZE) * _BLOCKSIZE
## f.seek(pos)
npos = ((pos + _BLOCKSIZE - 1) / _BLOCKSIZE) * _BLOCKSIZE
f.write('\0'*(npos-pos))
pos = npos
f.write(val)
f.close()
return (pos, len(val))
def _setval(self, pos, val):
f = _open(self._datfile, 'rb+')
f.seek(pos)
f.write(val)
f.close()
return (pos, len(val))
def _addkey(self, key, (pos, siz)):
self._index[key] = (pos, siz)
f = _open(self._dirfile, 'a')
f.write("%s, (%s, %s)\n" % (`key`, `pos`, `siz`))
f.close()
def __setitem__(self, key, val):
if not type(key) == type('') == type(val):
raise TypeError, "keys and values must be strings"
if not self._index.has_key(key):
(pos, siz) = self._addval(val)
self._addkey(key, (pos, siz))
else:
pos, siz = self._index[key]
oldblocks = (siz + _BLOCKSIZE - 1) / _BLOCKSIZE
newblocks = (len(val) + _BLOCKSIZE - 1) / _BLOCKSIZE
if newblocks <= oldblocks:
pos, siz = self._setval(pos, val)
self._index[key] = pos, siz
else:
pos, siz = self._addval(val)
self._index[key] = pos, siz
def __delitem__(self, key):
del self._index[key]
self._commit()
def keys(self):
return self._index.keys()
def has_key(self, key):
return self._index.has_key(key)
def __len__(self):
return len(self._index)
def close(self):
self._index = None
self._datfile = self._dirfile = self._bakfile = None
def open(file, flag = None, mode = None):
# flag, mode arguments are currently ignored
return _Database(file)
| mit | 923,222,966,603,874,400 | 27.702703 | 78 | 0.519068 | false |
koobonil/Boss2D | Boss2D/addon/tensorflow-1.2.1_for_boss/tensorflow/contrib/keras/python/keras/utils/generic_utils.py | 21 | 10625 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python utilities required by Keras."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import marshal
import sys
import time
import types as python_types
import numpy as np
import six
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
_GLOBAL_CUSTOM_OBJECTS = {}
class CustomObjectScope(object):
"""Provides a scope that changes to `_GLOBAL_CUSTOM_OBJECTS` cannot escape.
Code within a `with` statement will be able to access custom objects
by name. Changes to global custom objects persist
within the enclosing `with` statement. At end of the `with` statement,
global custom objects are reverted to state
at beginning of the `with` statement.
Example:
Consider a custom object `MyObject`
```python
with CustomObjectScope({"MyObject":MyObject}):
layer = Dense(..., W_regularizer="MyObject")
# save, load, etc. will recognize custom object by name
```
"""
def __init__(self, *args):
self.custom_objects = args
self.backup = None
def __enter__(self):
self.backup = _GLOBAL_CUSTOM_OBJECTS.copy()
for objects in self.custom_objects:
_GLOBAL_CUSTOM_OBJECTS.update(objects)
return self
def __exit__(self, *args, **kwargs):
_GLOBAL_CUSTOM_OBJECTS.clear()
_GLOBAL_CUSTOM_OBJECTS.update(self.backup)
def custom_object_scope(*args):
"""Provides a scope that changes to `_GLOBAL_CUSTOM_OBJECTS` cannot escape.
Convenience wrapper for `CustomObjectScope`.
Code within a `with` statement will be able to access custom objects
by name. Changes to global custom objects persist
within the enclosing `with` statement. At end of the `with` statement,
global custom objects are reverted to state
at beginning of the `with` statement.
Example:
Consider a custom object `MyObject`
```python
with custom_object_scope({"MyObject":MyObject}):
layer = Dense(..., W_regularizer="MyObject")
# save, load, etc. will recognize custom object by name
```
Arguments:
*args: Variable length list of dictionaries of name,
class pairs to add to custom objects.
Returns:
Object of type `CustomObjectScope`.
"""
return CustomObjectScope(*args)
def get_custom_objects():
"""Retrieves a live reference to the global dictionary of custom objects.
Updating and clearing custom objects using `custom_object_scope`
is preferred, but `get_custom_objects` can
be used to directly access `_GLOBAL_CUSTOM_OBJECTS`.
Example:
```python
get_custom_objects().clear()
get_custom_objects()["MyObject"] = MyObject
```
Returns:
Global dictionary of names to classes (`_GLOBAL_CUSTOM_OBJECTS`).
"""
return _GLOBAL_CUSTOM_OBJECTS
def serialize_keras_object(instance):
_, instance = tf_decorator.unwrap(instance)
if instance is None:
return None
if hasattr(instance, 'get_config'):
return {
'class_name': instance.__class__.__name__,
'config': instance.get_config()
}
if hasattr(instance, '__name__'):
return instance.__name__
else:
raise ValueError('Cannot serialize', instance)
def deserialize_keras_object(identifier,
module_objects=None,
custom_objects=None,
printable_module_name='object'):
if isinstance(identifier, dict):
# In this case we are dealing with a Keras config dictionary.
config = identifier
if 'class_name' not in config or 'config' not in config:
raise ValueError('Improper config format: ' + str(config))
class_name = config['class_name']
if custom_objects and class_name in custom_objects:
cls = custom_objects[class_name]
elif class_name in _GLOBAL_CUSTOM_OBJECTS:
cls = _GLOBAL_CUSTOM_OBJECTS[class_name]
else:
module_objects = module_objects or {}
cls = module_objects.get(class_name)
if cls is None:
raise ValueError('Unknown ' + printable_module_name + ': ' + class_name)
if hasattr(cls, 'from_config'):
arg_spec = tf_inspect.getargspec(cls.from_config)
if 'custom_objects' in arg_spec.args:
custom_objects = custom_objects or {}
return cls.from_config(
config['config'],
custom_objects=dict(
list(_GLOBAL_CUSTOM_OBJECTS.items()) +
list(custom_objects.items())))
return cls.from_config(config['config'])
else:
# Then `cls` may be a function returning a class.
# in this case by convention `config` holds
# the kwargs of the function.
return cls(**config['config'])
elif isinstance(identifier, six.string_types):
function_name = identifier
if custom_objects and function_name in custom_objects:
fn = custom_objects.get(function_name)
elif function_name in _GLOBAL_CUSTOM_OBJECTS:
fn = _GLOBAL_CUSTOM_OBJECTS[function_name]
else:
fn = module_objects.get(function_name)
if fn is None:
raise ValueError('Unknown ' + printable_module_name,
':' + function_name)
return fn
else:
raise ValueError('Could not interpret serialized ' + printable_module_name +
': ' + identifier)
def make_tuple(*args):
return args
def func_dump(func):
"""Serializes a user defined function.
Arguments:
func: the function to serialize.
Returns:
A tuple `(code, defaults, closure)`.
"""
code = marshal.dumps(func.__code__).decode('raw_unicode_escape')
defaults = func.__defaults__
if func.__closure__:
closure = tuple(c.cell_contents for c in func.__closure__)
else:
closure = None
return code, defaults, closure
def func_load(code, defaults=None, closure=None, globs=None):
"""Deserializes a user defined function.
Arguments:
code: bytecode of the function.
defaults: defaults of the function.
closure: closure of the function.
globs: dictionary of global objects.
Returns:
A function object.
"""
if isinstance(code, (tuple, list)): # unpack previous dump
code, defaults, closure = code
if isinstance(defaults, list):
defaults = tuple(defaults)
code = marshal.loads(code.encode('raw_unicode_escape'))
if globs is None:
globs = globals()
return python_types.FunctionType(
code, globs, name=code.co_name, argdefs=defaults, closure=closure)
class Progbar(object):
"""Displays a progress bar.
Arguments:
target: Total number of steps expected.
interval: Minimum visual progress update interval (in seconds).
"""
def __init__(self, target, width=30, verbose=1, interval=0.05):
self.width = width
self.target = target
self.sum_values = {}
self.unique_values = []
self.start = time.time()
self.last_update = 0
self.interval = interval
self.total_width = 0
self.seen_so_far = 0
self.verbose = verbose
def update(self, current, values=None, force=False):
"""Updates the progress bar.
Arguments:
current: Index of current step.
values: List of tuples (name, value_for_last_step).
The progress bar will display averages for these values.
force: Whether to force visual progress update.
"""
values = values or []
for k, v in values:
if k not in self.sum_values:
self.sum_values[k] = [
v * (current - self.seen_so_far), current - self.seen_so_far
]
self.unique_values.append(k)
else:
self.sum_values[k][0] += v * (current - self.seen_so_far)
self.sum_values[k][1] += (current - self.seen_so_far)
self.seen_so_far = current
now = time.time()
if self.verbose == 1:
if not force and (now - self.last_update) < self.interval:
return
prev_total_width = self.total_width
sys.stdout.write('\b' * prev_total_width)
sys.stdout.write('\r')
numdigits = int(np.floor(np.log10(self.target))) + 1
barstr = '%%%dd/%%%dd [' % (numdigits, numdigits)
bar = barstr % (current, self.target)
prog = float(current) / self.target
prog_width = int(self.width * prog)
if prog_width > 0:
bar += ('=' * (prog_width - 1))
if current < self.target:
bar += '>'
else:
bar += '='
bar += ('.' * (self.width - prog_width))
bar += ']'
sys.stdout.write(bar)
self.total_width = len(bar)
if current:
time_per_unit = (now - self.start) / current
else:
time_per_unit = 0
eta = time_per_unit * (self.target - current)
info = ''
if current < self.target:
info += ' - ETA: %ds' % eta
else:
info += ' - %ds' % (now - self.start)
for k in self.unique_values:
info += ' - %s:' % k
if isinstance(self.sum_values[k], list):
avg = self.sum_values[k][0] / max(1, self.sum_values[k][1])
if abs(avg) > 1e-3:
info += ' %.4f' % avg
else:
info += ' %.4e' % avg
else:
info += ' %s' % self.sum_values[k]
self.total_width += len(info)
if prev_total_width > self.total_width:
info += ((prev_total_width - self.total_width) * ' ')
sys.stdout.write(info)
sys.stdout.flush()
if current >= self.target:
sys.stdout.write('\n')
if self.verbose == 2:
if current >= self.target:
info = '%ds' % (now - self.start)
for k in self.unique_values:
info += ' - %s:' % k
avg = self.sum_values[k][0] / max(1, self.sum_values[k][1])
if avg > 1e-3:
info += ' %.4f' % avg
else:
info += ' %.4e' % avg
sys.stdout.write(info + '\n')
self.last_update = now
def add(self, n, values=None):
self.update(self.seen_so_far + n, values)
| mit | -4,840,637,424,776,162,000 | 30.067251 | 80 | 0.622776 | false |
yalp/fiware-orion | scripts/managedb/garbage-collector.py | 12 | 3881 | #!/usr/bin/python
# -*- coding: latin-1 -*-
# Copyright 2013 Telefonica Investigacion y Desarrollo, S.A.U
#
# This file is part of Orion Context Broker.
#
# Orion Context Broker is free software: you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Orion Context Broker is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero
# General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Orion Context Broker. If not, see http://www.gnu.org/licenses/.
#
# For those usages not covered by this license please contact with
# iot_support at tid dot es
from pymongo import MongoClient
from time import time
from datetime import timedelta, datetime
from sys import argv
def check_coll(collection, collection_name):
n = 0
for doc in collection.find():
id = doc['_id']
if collection_name == CSUB_COLL or collection_name == CASUB_COLL:
ref = doc['reference']
prefix = '-- ID ' + str(id) + ' (' + ref + '): '
elif collection_name == REG_COLL:
# Note that registration could include several entities, but we only
# print the first one (and a '+' sign) in that case, to avoid long lines
l_c = len (doc['contextRegistration'])
l_e = len (doc['contextRegistration'][0]['entities'])
entId = doc['contextRegistration'][0]['entities'][0]['id']
if (doc['contextRegistration'][0]['entities'][0]).has_key('type'):
type = doc['contextRegistration'][0]['entities'][0]['type']
else:
type = '<no type>'
if (l_c > 1) or (l_e > 1):
prefix = '-- ID ' + str(id) + ' ' + entId + ' (' + type + ') [+] : '
else:
prefix = '-- ID ' + str(id) + ' ' + entId + ' (' + type + '): '
else:
prefix = '-- ID ' + str(id) + ': '
n += 1
try:
expiration = int(doc['expiration'])
interval = expiration - time()
if (interval < 0):
interval_str = str(timedelta(seconds=-interval))
print prefix + 'expired by ' + interval_str
doc['expired'] = 1
collection.save(doc)
else:
# In this case, we touch the document only if have already expired: 1,
# this would correspond to the case of an expired registration/subscription that has
# been "reactivated" after receiving an update in duration
if doc.has_key('expired'):
doc.pop('expired', None)
collection.save(doc)
interval_str = str(timedelta(seconds=interval))
print prefix + interval_str + ' left to expiration'
except ValueError:
print prefix + 'invalid expiration format!'
print 'document processed: ' + str(n)
DB = 'orion'
REG_COLL = 'registrations'
CSUB_COLL = 'csubs'
CASUB_COLL = 'casubs'
client = MongoClient('localhost', 27017)
db = client[DB]
now = datetime.now()
print 'Current time: ' + str(now)
# The scripts uses a list of collection as argument, so a given collection is
# processed only if its name appears there
if REG_COLL in argv:
print 'Checking collection: ' + REG_COLL
check_coll(db[REG_COLL], REG_COLL)
if CSUB_COLL in argv:
print 'Checking collection: ' + CSUB_COLL
check_coll(db[CSUB_COLL], CSUB_COLL)
if CASUB_COLL in argv:
print 'Checking collection: ' + CASUB_COLL
check_coll(db[CASUB_COLL], CASUB_COLL)
| agpl-3.0 | -4,381,295,755,405,768,000 | 37.04902 | 100 | 0.604483 | false |
Anonymike/pasta-bot | plugins/_junk.py | 3 | 9285 | from util import hook, user, database
import os
import sys
import re
import json
import time
import subprocess
# @hook.command(autohelp=False, permissions=["permissions_users"], adminonly=True)
# def permissions(inp, bot=None, notice=None):
# """permissions [group] -- lists the users and their permission level who have permissions."""
# permissions = bot.config.get("permissions", [])
# groups = []
# if inp:
# for k in permissions:
# if inp == k:
# groups.append(k)
# else:
# for k in permissions:
# groups.append(k)
# if not groups:
# notice(u"{} is not a group with permissions".format(inp))
# return None
# for v in groups:
# members = ""
# for value in permissions[v]["users"]:
# members = members + value + ", "
# if members:
# notice(u"the members in the {} group are..".format(v))
# notice(members[:-2])
# else:
# notice(u"there are no members in the {} group".format(v))
# @hook.command(permissions=["permissions_users"], adminonly=True)
# def deluser(inp, bot=None, notice=None):
# """deluser [user] [group] -- removes elevated permissions from [user].
# If [group] is specified, they will only be removed from [group]."""
# permissions = bot.config.get("permissions", [])
# inp = inp.split(" ")
# groups = []
# try:
# specgroup = inp[1]
# except IndexError:
# specgroup = None
# for k in permissions:
# groups.append(k)
# else:
# for k in permissions:
# if specgroup == k:
# groups.append(k)
# if not groups:
# notice(u"{} is not a group with permissions".format(inp[1]))
# return None
# removed = 0
# for v in groups:
# users = permissions[v]["users"]
# for value in users:
# if inp[0] == value:
# users.remove(inp[0])
# removed = 1
# notice(u"{} has been removed from the group {}".format(inp[0], v))
# json.dump(bot.config, open('config', 'w'), sort_keys=True, indent=2)
# if specgroup:
# if removed == 0:
# notice(u"{} is not in the group {}".format(inp[0], specgroup))
# else:
# if removed == 0:
# notice(u"{} is not in any groups".format(inp[0]))
# @hook.command(permissions=["permissions_users"], adminonly=True)
# def adduser(inp, bot=None, notice=None):
# """adduser [user] [group] -- adds elevated permissions to [user].
# [group] must be specified."""
# permissions = bot.config.get("permissions", [])
# inp = inp.split(" ")
# try:
# user = inp[0]
# targetgroup = inp[1]
# except IndexError:
# notice(u"the group must be specified")
# return None
# if not re.search('.+!.+@.+', user):
# notice(u"the user must be in the form of \"nick!user@host\"")
# return None
# try:
# users = permissions[targetgroup]["users"]
# except KeyError:
# notice(u"no such group as {}".format(targetgroup))
# return None
# if user in users:
# notice(u"{} is already in {}".format(user, targetgroup))
# return None
# users.append(user)
# notice(u"{} has been added to the group {}".format(user, targetgroup))
# users.sort()
# json.dump(bot.config, open('config', 'w'), sort_keys=True, indent=2)
@hook.command("stfu", adminonly=True)
@hook.command("silence", adminonly=True)
@hook.command(adminonly=True)
def shutup(inp, conn=None, chan=None, notice=None):
"shutup [channel] <user> -- Shuts the user up. "
# inp = inp.split(" ")
if inp[0][0] == "#":
chan = inp.split(" ")[0]
users = inp.split(" ")[1:]
else:
users = inp.split(" ")
for user in users:
out = u"MODE %s +m-voh %s %s %s" % (chan, user, user, user)
conn.send(out)
notice(u"Shut up %s from %s..." % (user, chan))
conn.send(out)
@hook.command(adminonly=True)
def speak(inp, conn=None, chan=None, notice=None):
"speak [channel] <user> -- Shuts the user up. "
if inp[0][0] == "#":
chan = inp.split(" ")[0]
users = inp.split(" ")[1:]
else:
users = inp.split(" ")
for user in users:
out = u"MODE %s -m" % (chan)
conn.send(out)
notice(u"Shut up %s from %s..." % (user, chan))
# @hook.command(adminonly=True, autohelp=False)
# def db(inp,db=None):
# split = inp.split(' ')
# action = split[0]
# if "init" in action:
# result = db.execute("create table if not exists users(nick primary key, host, location, greeting, lastfm, fines, battlestation, desktop, horoscope, version)")
# db.commit()
# return result
# elif "addcol" in action:
# table = split[1]
# col = split[2]
# if table is not None and col is not None:
# db.execute("ALTER TABLE {} ADD COLUMN {}".format(table,col))
# db.commit
# return "Added Column"
# UPDATE usersettings SET fines=(SELECT totalfines FROM fines WHERE nick = usersettings.nick);
def compare_hostmasks(hostmask,matchmask):
hostmask = hostmask.replace('~','').replace('*','\S+').lower()
matchmask = matchmask.replace('*','.+').lower()
if bool(re.search(hostmask,matchmask)): return True
else: return False
@hook.command(adminonly=True)
def checkhost(inp, conn=None, chan=None, notice=None):
inp = inp.split(' ')
hostmask = inp[0]
matchmask = inp[1]
return compare_hostmasks(hostmask,matchmask)
from fnmatch import fnmatch
@hook.command(adminonly=True)
def test(inp,db=None):
#host = user.get_hostmask(inp,db)
nick = inp.strip().replace('~','').lower()
host = database.get(db,'users','mask','nick',nick)
print host
hostmask = host.lower().replace('~','') #.replace('*','\S+')
# hostmask = "*{}*".format(hostmask)
print hostmask
matchmask = "sid18764@.*uxbridge.irccloud.com infinity@.*like.lolis *@i.like.lolis [email protected] [email protected] 680i@.+studby.hig.no themadman@.+want.it.now austin@.+this.is.austin urmom@.+kills.your.gainz moss@.+like.a.hamster quinn@.+fios.verizon.net [email protected] [email protected] ichiroku@.+fios.verizon.net connor@.+nasty.skanky.slut"
#print "{} -- {}".format(matchmask,hostmask)
for pattern in matchmask.split(' '):
if fnmatch(hostmask, pattern):
print "MATCHED: {}".format(pattern)
# print fnmatch(matchmask,hostmask)
# matches = re.search(hostmask,matchmask)
#return matches.group(0)
#if bool(re.search(hostmask,matchmask)): return True
#else: return False
#Database conversion commands
#Update Uguu's default databases
@hook.command(adminonly=True)
def migrate_old_db(inp, notice=None, bot=None, db=None, config=None):
#db.execute("ALTER TABLE seen_user RENAME TO seen")
#db.execute("create table if not exists seen(name, time, quote, chan, host, primary key(name, chan))")
db.commit()
#db.execute("ALTER TABLE weather RENAME TO locations")
#db.execute("DROP TABLE seen")
#db.execute("DROP TABLE seen")
#db.execute("create table if not exists seen(name, time, quote, chan, host, "
# "primary key(name, chan))")
#db.commit()
#db.commit()
#db.execute("ALTER TABLE seen_user RENAME TO seen")
#db.execute("INSERT OR IGNORE INTO usersettings (nick, lastfm) SELECT ircname, lastfmname FROM usernames")
#notice('LastFM data was imported into usersettings')
#db.commit()
#Migrate old CloudBot DBs
#LastFM
#db.execute("create table if not exists usernames (ircname primary key, lastfmname)")
#db.execute("INSERT INTO usernames (ircname, lastfmname) SELECT nick, acc FROM lastfm")
#db.execute("DROP TABLE lastfm")
#db.commit()
#Weather
#db.execute("create table if not exists locationsCopy (ircname primary key, location)")
#db.execute("INSERT INTO locationsCopy (ircname, location) SELECT nick, loc FROM locations")
#db.execute("ALTER TABLE locations RENAME TO locationsOrig")
#db.execute("ALTER TABLE locationsCopy RENAME TO locations")
#db.execute("DROP TABLE locationsOrig")
#db.commit()
conn.send(out)
# OLD
# @hook.command
# def distance(inp):
# "distance <start> to <end> -- Calculate the distance between 2 places."
# if 'from ' in inp: inp = inp.replace('from ','')
# inp = inp.replace(', ','+')
# start = inp.split(" to ")[0].strip().replace(' ','+')
# dest = inp.split(" to ")[1].strip().replace(' ','+')
# url = "http://www.travelmath.com/flying-distance/from/%s/to/%s" % (start, dest)
# print url
# soup = http.get_soup(url)
# query = soup.find('h1', {'class': re.compile('flight-distance')})
# distance = soup.find('h3', {'class': 'space'})
# result = "%s %s" % (query, distance)
# result = http.strip_html(result)
# result = unicode(result, "utf8").replace('flight ','')
# if not distance:
# return "Could not calculate the distance from %s to %s." % (start, dest)
# return result
| gpl-3.0 | 3,346,684,342,997,001,000 | 32.763636 | 403 | 0.597307 | false |
codeforamerica/skillcamp | ENV/lib/python2.7/site-packages/setuptools/ssl_support.py | 332 | 7663 | import os
import socket
import atexit
import re
import pkg_resources
from pkg_resources import ResolutionError, ExtractionError
from setuptools.compat import urllib2
try:
import ssl
except ImportError:
ssl = None
__all__ = [
'VerifyingHTTPSHandler', 'find_ca_bundle', 'is_available', 'cert_paths',
'opener_for'
]
cert_paths = """
/etc/pki/tls/certs/ca-bundle.crt
/etc/ssl/certs/ca-certificates.crt
/usr/share/ssl/certs/ca-bundle.crt
/usr/local/share/certs/ca-root.crt
/etc/ssl/cert.pem
/System/Library/OpenSSL/certs/cert.pem
""".strip().split()
HTTPSHandler = HTTPSConnection = object
for what, where in (
('HTTPSHandler', ['urllib2','urllib.request']),
('HTTPSConnection', ['httplib', 'http.client']),
):
for module in where:
try:
exec("from %s import %s" % (module, what))
except ImportError:
pass
is_available = ssl is not None and object not in (HTTPSHandler, HTTPSConnection)
try:
from ssl import CertificateError, match_hostname
except ImportError:
try:
from backports.ssl_match_hostname import CertificateError
from backports.ssl_match_hostname import match_hostname
except ImportError:
CertificateError = None
match_hostname = None
if not CertificateError:
class CertificateError(ValueError):
pass
if not match_hostname:
def _dnsname_match(dn, hostname, max_wildcards=1):
"""Matching according to RFC 6125, section 6.4.3
http://tools.ietf.org/html/rfc6125#section-6.4.3
"""
pats = []
if not dn:
return False
# Ported from python3-syntax:
# leftmost, *remainder = dn.split(r'.')
parts = dn.split(r'.')
leftmost = parts[0]
remainder = parts[1:]
wildcards = leftmost.count('*')
if wildcards > max_wildcards:
# Issue #17980: avoid denials of service by refusing more
# than one wildcard per fragment. A survey of established
# policy among SSL implementations showed it to be a
# reasonable choice.
raise CertificateError(
"too many wildcards in certificate DNS name: " + repr(dn))
# speed up common case w/o wildcards
if not wildcards:
return dn.lower() == hostname.lower()
# RFC 6125, section 6.4.3, subitem 1.
# The client SHOULD NOT attempt to match a presented identifier in which
# the wildcard character comprises a label other than the left-most label.
if leftmost == '*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
# RFC 6125, section 6.4.3, subitem 3.
# The client SHOULD NOT attempt to match a presented identifier
# where the wildcard character is embedded within an A-label or
# U-label of an internationalized domain name.
pats.append(re.escape(leftmost))
else:
# Otherwise, '*' matches any dotless string, e.g. www*
pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
# add the remaining fragments, ignore any wildcards
for frag in remainder:
pats.append(re.escape(frag))
pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
return pat.match(hostname)
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate")
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r "
"doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r "
"doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or "
"subjectAltName fields were found")
class VerifyingHTTPSHandler(HTTPSHandler):
"""Simple verifying handler: no auth, subclasses, timeouts, etc."""
def __init__(self, ca_bundle):
self.ca_bundle = ca_bundle
HTTPSHandler.__init__(self)
def https_open(self, req):
return self.do_open(
lambda host, **kw: VerifyingHTTPSConn(host, self.ca_bundle, **kw), req
)
class VerifyingHTTPSConn(HTTPSConnection):
"""Simple verifying connection: no auth, subclasses, timeouts, etc."""
def __init__(self, host, ca_bundle, **kw):
HTTPSConnection.__init__(self, host, **kw)
self.ca_bundle = ca_bundle
def connect(self):
sock = socket.create_connection(
(self.host, self.port), getattr(self, 'source_address', None)
)
# Handle the socket if a (proxy) tunnel is present
if hasattr(self, '_tunnel') and getattr(self, '_tunnel_host', None):
self.sock = sock
self._tunnel()
self.sock = ssl.wrap_socket(
sock, cert_reqs=ssl.CERT_REQUIRED, ca_certs=self.ca_bundle
)
try:
match_hostname(self.sock.getpeercert(), self.host)
except CertificateError:
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
raise
def opener_for(ca_bundle=None):
"""Get a urlopen() replacement that uses ca_bundle for verification"""
return urllib2.build_opener(
VerifyingHTTPSHandler(ca_bundle or find_ca_bundle())
).open
_wincerts = None
def get_win_certfile():
global _wincerts
if _wincerts is not None:
return _wincerts.name
try:
from wincertstore import CertFile
except ImportError:
return None
class MyCertFile(CertFile):
def __init__(self, stores=(), certs=()):
CertFile.__init__(self)
for store in stores:
self.addstore(store)
self.addcerts(certs)
atexit.register(self.close)
_wincerts = MyCertFile(stores=['CA', 'ROOT'])
return _wincerts.name
def find_ca_bundle():
"""Return an existing CA bundle path, or None"""
if os.name=='nt':
return get_win_certfile()
else:
for cert_path in cert_paths:
if os.path.isfile(cert_path):
return cert_path
try:
return pkg_resources.resource_filename('certifi', 'cacert.pem')
except (ImportError, ResolutionError, ExtractionError):
return None
| mit | 7,073,828,417,901,418,000 | 31.747863 | 82 | 0.592196 | false |
phantomlinux/vaughn_aiy_raspberrypi | src/action.py | 1 | 14088 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Carry out voice commands by recognising keywords."""
import datetime
import logging
import subprocess
import vlc
import time
import requests
import re
import actionbase
# =============================================================================
#
# Hey, Makers!
#
# This file contains some examples of voice commands that are handled locally,
# right on your Raspberry Pi.
#
# Do you want to add a new voice command? Check out the instructions at:
# https://aiyprojects.withgoogle.com/voice/#makers-guide-3-3--create-a-new-voice-command-or-action
# (MagPi readers - watch out! You should switch to the instructions in the link
# above, since there's a mistake in the MagPi instructions.)
#
# In order to make a new voice command, you need to do two things. First, make a
# new action where it says:
# "Implement your own actions here"
# Secondly, add your new voice command to the actor near the bottom of the file,
# where it says:
# "Add your own voice commands here"
#
# =============================================================================
# Actions might not use the user's command. pylint: disable=unused-argument
# Example: Say a simple response
# ================================
#
# This example will respond to the user by saying something. You choose what it
# says when you add the command below - look for SpeakAction at the bottom of
# the file.
#
# There are two functions:
# __init__ is called when the voice commands are configured, and stores
# information about how the action should work:
# - self.say is a function that says some text aloud.
# - self.words are the words to use as the response.
# run is called when the voice command is used. It gets the user's exact voice
# command as a parameter.
class SpeakAction(object):
"""Says the given text via TTS."""
def __init__(self, say, words):
self.say = say
self.words = words
def run(self, voice_command):
self.say(self.words)
# Example: Tell the current time
# ==============================
#
# This example will tell the time aloud. The to_str function will turn the time
# into helpful text (for example, "It is twenty past four."). The run function
# uses to_str say it aloud.
class SpeakTime(object):
"""Says the current local time with TTS."""
def __init__(self, say):
self.say = say
def run(self, voice_command):
time_str = self.to_str(datetime.datetime.now())
self.say(time_str)
def to_str(self, dt):
"""Convert a datetime to a human-readable string."""
HRS_TEXT = ['midnight', 'one', 'two', 'three', 'four', 'five', 'six',
'seven', 'eight', 'nine', 'ten', 'eleven', 'twelve']
MINS_TEXT = ["five", "ten", "quarter", "twenty", "twenty-five", "half"]
hour = dt.hour
minute = dt.minute
# convert to units of five minutes to the nearest hour
minute_rounded = (minute + 2) // 5
minute_is_inverted = minute_rounded > 6
if minute_is_inverted:
minute_rounded = 12 - minute_rounded
hour = (hour + 1) % 24
# convert time from 24-hour to 12-hour
if hour > 12:
hour -= 12
if minute_rounded == 0:
if hour == 0:
return 'It is midnight.'
return "It is %s o'clock." % HRS_TEXT[hour]
if minute_is_inverted:
return 'It is %s to %s.' % (MINS_TEXT[minute_rounded - 1], HRS_TEXT[hour])
return 'It is %s past %s.' % (MINS_TEXT[minute_rounded - 1], HRS_TEXT[hour])
# Example: Run a shell command and say its output
# ===============================================
#
# This example will use a shell command to work out what to say. You choose the
# shell command when you add the voice command below - look for the example
# below where it says the IP address of the Raspberry Pi.
class SpeakShellCommandOutput(object):
"""Speaks out the output of a shell command."""
def __init__(self, say, shell_command, failure_text):
self.say = say
self.shell_command = shell_command
self.failure_text = failure_text
def run(self, voice_command):
output = subprocess.check_output(self.shell_command, shell=True).strip()
if output:
self.say(output)
elif self.failure_text:
self.say(self.failure_text)
# Example: Change the volume
# ==========================
#
# This example will can change the speaker volume of the Raspberry Pi. It uses
# the shell command SET_VOLUME to change the volume, and then GET_VOLUME gets
# the new volume. The example says the new volume aloud after changing the
# volume.
class VolumeControl(object):
"""Changes the volume and says the new level."""
GET_VOLUME = r'amixer get Master | grep "Front Left:" | sed "s/.*\[\([0-9]\+\)%\].*/\1/"'
SET_VOLUME = 'amixer -q set Master %d%%'
def __init__(self, say, change):
self.say = say
self.change = change
def run(self, voice_command):
res = subprocess.check_output(VolumeControl.GET_VOLUME, shell=True).strip()
try:
logging.info("volume: %s", res)
vol = int(res) + self.change
vol = max(0, min(100, vol))
subprocess.call(VolumeControl.SET_VOLUME % vol, shell=True)
self.say(_('Volume at %d %%.') % vol)
except (ValueError, subprocess.CalledProcessError):
logging.exception("Error using amixer to adjust volume.")
# Example: Repeat after me
# ========================
#
# This example will repeat what the user said. It shows how you can access what
# the user said, and change what you do or how you respond.
class RepeatAfterMe(object):
"""Repeats the user's command."""
def __init__(self, say, keyword):
self.say = say
self.keyword = keyword
def run(self, voice_command):
# The command still has the 'repeat after me' keyword, so we need to
# remove it before saying whatever is left.
to_repeat = voice_command.replace(self.keyword, '', 1)
self.say(to_repeat)
# Power: Shutdown or reboot the pi
# ================================
# Shuts down the pi or reboots with a response
#
class PowerCommand(object):
"""Shutdown or reboot the pi"""
def __init__(self, say, command):
self.say = say
self.command = command
def run(self, voice_command):
if self.command == "shutdown":
self.say("Shutting down, goodbye")
subprocess.call("sudo shutdown now", shell=True)
elif self.command == "reboot":
self.say("Rebooting")
subprocess.call("sudo shutdown -r now", shell=True)
else:
logging.error("Error identifying power command.")
self.say("Sorry I didn't identify that command")
class playRadio(object):
def __init__(self, say, keyword):
self.say = say
self.keyword = keyword
self.instance = vlc.Instance()
global player
player = self.instance.media_player_new()
self.set_state("stopped")
def set_state(self, new_state):
logging.info("setting radio state " + new_state)
global radioState
radioState = new_state
def get_state():
return radioState
def get_station(self, station_name):
# replace the stream for the first line 'radio' with the stream for your default station
stations = {
'1': 'http://a.files.bbci.co.uk/media/live/manifesto/audio/simulcast/hls/uk/sbr_high/ak/bbc_radio_one.m3u8',
'2': 'http://a.files.bbci.co.uk/media/live/manifesto/audio/simulcast/hls/uk/sbr_high/ak/bbc_radio_two.m3u8',
'3': 'http://a.files.bbci.co.uk/media/live/manifesto/audio/simulcast/hls/uk/sbr_high/ak/bbc_radio_three.m3u8',
'4': 'http://a.files.bbci.co.uk/media/live/manifesto/audio/simulcast/hls/uk/sbr_high/ak/bbc_radio_fourfm.m3u8',
'5': 'http://a.files.bbci.co.uk/media/live/manifesto/audio/simulcast/hls/uk/sbr_high/ak/bbc_radio_five_live.m3u8',
'5 sports': 'http://a.files.bbci.co.uk/media/live/manifesto/audio/simulcast/hls/uk/sbr_high/ak/bbc_radio_five_live_sports_extra.m3u8',
'6': 'http://a.files.bbci.co.uk/media/live/manifesto/audio/simulcast/hls/uk/sbr_high/ak/bbc_6music.m3u8',
'1xtra': 'http://a.files.bbci.co.uk/media/live/manifesto/audio/simulcast/hls/uk/sbr_high/ak/bbc_radio_1xtra.m3u8',
'4 extra': 'http://a.files.bbci.co.uk/media/live/manifesto/audio/simulcast/hls/uk/sbr_high/ak/bbc_radio_four_extra.m3u8',
'nottingham': 'http://a.files.bbci.co.uk/media/live/manifesto/audio/simulcast/hls/uk/sbr_high/ak/bbc_radio_nottingham.m3u8',
'hits fm': 'http://www.surfmusic.de/m3u/hitz-fm,11410.m3u',
'hitz fm': 'http://www.surfmusic.de/m3u/hitz-fm,11410.m3u',
'one fm': 'http://www.surfmusic.de/m3u/one-fm-88-1-fm,15944.m3u',
'fly fm': 'http://www.surfmusic.de/m3u/fly-fm-95-8,9447.m3u',
'988 fm': 'http://www.surfmusic.de/m3u/radio-98-8,5253.m3u',
}
return stations[station_name]
def run(self, voice_command):
voice_command = ((voice_command.lower()).replace(self.keyword, '', 1)).strip()
if (voice_command == "stop") or (voice_command == "off"):
logging.info("radio stopped")
player.stop()
self.set_state("stopped")
return
logging.info("starting radio: " + voice_command)
global station
try:
logging.info("searching for: " + voice_command)
station = self.get_station(voice_command)
except KeyError:
# replace this stream with the stream for your default station
self.say("Radio search not found. Playing radio 6")
station = 'http://www.surfmusic.de/m3u/hitz-fm,11410.m3u'
if station.endswith("m3u"):
logging.info("m3u reading manually")
content = requests.get(station, stream=True).text
url = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', content)[0]
station = url.strip()
logging.info("stream " + station)
media = self.instance.media_new(station)
player.set_media(media)
self.set_state("playing")
def pause():
logging.info("pausing radio")
if player is not None:
player.stop()
def resume():
radioState = playRadio.get_state()
logging.info("resuming radio " + radioState)
if radioState == "playing":
player.play()
# =========================================
# Makers! Implement your own actions here.
# =========================================
def make_actor(say):
"""Create an actor to carry out the user's commands."""
actor = actionbase.Actor()
actor.add_keyword(
_('ip address'), SpeakShellCommandOutput(
say, "ip -4 route get 1 | head -1 | cut -d' ' -f8",
_('I do not have an ip address assigned to me.')))
actor.add_keyword(_('volume up'), VolumeControl(say, 10))
actor.add_keyword(_('volume down'), VolumeControl(say, -10))
actor.add_keyword(_('max volume'), VolumeControl(say, 100))
actor.add_keyword(_('repeat after me'),
RepeatAfterMe(say, _('repeat after me')))
# =========================================
# Makers! Add your own voice commands here.
# =========================================
actor.add_keyword(_('power off'), PowerCommand(say, 'shutdown'))
actor.add_keyword(_('reboot'), PowerCommand(say, 'reboot'))
actor.add_keyword(_('radio'), playRadio(say, _('radio')))
return actor
def add_commands_just_for_cloud_speech_api(actor, say):
"""Add simple commands that are only used with the Cloud Speech API."""
def simple_command(keyword, response):
actor.add_keyword(keyword, SpeakAction(say, response))
simple_command('alexa', _("We've been friends since we were both starter projects"))
simple_command(
'beatbox',
'pv zk pv pv zk pv zk kz zk pv pv pv zk pv zk zk pzk pzk pvzkpkzvpvzk kkkkkk bsch')
simple_command(_('clap'), _('clap clap'))
simple_command('google home', _('She taught me everything I know.'))
simple_command(_('hello'), _('hello to you too'))
simple_command(_('tell me a joke'),
_('What do you call an alligator in a vest? An investigator.'))
simple_command(_('three laws of robotics'),
_("""The laws of robotics are
0: A robot may not injure a human being or, through inaction, allow a human
being to come to harm.
1: A robot must obey orders given it by human beings except where such orders
would conflict with the First Law.
2: A robot must protect its own existence as long as such protection does not
conflict with the First or Second Law."""))
simple_command(_('where are you from'), _("A galaxy far, far, just kidding. I'm from Seattle."))
simple_command(_('your name'), _('A machine has no name'))
actor.add_keyword(_('time'), SpeakTime(say))
# =========================================
# Makers! Add commands to pause and resume your actions here
# =========================================
def pauseActors():
"""add your resume actions here"""
playRadio.pause()
def resumeActors():
"""add your pause actions here"""
playRadio.resume()
| apache-2.0 | -8,102,192,599,866,398,000 | 36.073684 | 146 | 0.60697 | false |
alvarolopez/nova | plugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py | 54 | 7407 | #!/usr/bin/env python
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright 2010 OpenStack Foundation
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# NOTE: XenServer still only supports Python 2.4 in it's dom0 userspace
# which means the Nova xenapi plugins must use only Python 2.4 features
#
# XenAPI plugin for reading/writing information to xenstore
#
try:
import json
except ImportError:
import simplejson as json
import utils # noqa
import XenAPIPlugin # noqa
import pluginlib_nova as pluginlib # noqa
pluginlib.configure_logging("xenstore")
class XenstoreError(pluginlib.PluginError):
"""Errors that occur when calling xenstore-* through subprocesses."""
def __init__(self, cmd, return_code, stderr, stdout):
msg = "cmd: %s; returncode: %d; stderr: %s; stdout: %s"
msg = msg % (cmd, return_code, stderr, stdout)
self.cmd = cmd
self.return_code = return_code
self.stderr = stderr
self.stdout = stdout
pluginlib.PluginError.__init__(self, msg)
def jsonify(fnc):
def wrapper(*args, **kwargs):
ret = fnc(*args, **kwargs)
try:
json.loads(ret)
except ValueError:
# Value should already be JSON-encoded, but some operations
# may write raw sting values; this will catch those and
# properly encode them.
ret = json.dumps(ret)
return ret
return wrapper
def record_exists(arg_dict):
"""Returns whether or not the given record exists. The record path
is determined from the given path and dom_id in the arg_dict.
"""
cmd = ["xenstore-exists", "/local/domain/%(dom_id)s/%(path)s" % arg_dict]
try:
_run_command(cmd)
return True
except XenstoreError, e: # noqa
if e.stderr == '':
# if stderr was empty, this just means the path did not exist
return False
# otherwise there was a real problem
raise
@jsonify
def read_record(self, arg_dict):
"""Returns the value stored at the given path for the given dom_id.
These must be encoded as key/value pairs in arg_dict. You can
optinally include a key 'ignore_missing_path'; if this is present
and boolean True, attempting to read a non-existent path will return
the string 'None' instead of raising an exception.
"""
cmd = ["xenstore-read", "/local/domain/%(dom_id)s/%(path)s" % arg_dict]
try:
result = _run_command(cmd)
return result.strip()
except XenstoreError, e: # noqa
if not arg_dict.get("ignore_missing_path", False):
raise
if not record_exists(arg_dict):
return "None"
# Just try again in case the agent write won the race against
# the record_exists check. If this fails again, it will likely raise
# an equally meaningful XenstoreError as the one we just caught
result = _run_command(cmd)
return result.strip()
@jsonify
def write_record(self, arg_dict):
"""Writes to xenstore at the specified path. If there is information
already stored in that location, it is overwritten. As in read_record,
the dom_id and path must be specified in the arg_dict; additionally,
you must specify a 'value' key, whose value must be a string. Typically,
you can json-ify more complex values and store the json output.
"""
cmd = ["xenstore-write",
"/local/domain/%(dom_id)s/%(path)s" % arg_dict,
arg_dict["value"]]
_run_command(cmd)
return arg_dict["value"]
@jsonify
def list_records(self, arg_dict):
"""Returns all the stored data at or below the given path for the
given dom_id. The data is returned as a json-ified dict, with the
path as the key and the stored value as the value. If the path
doesn't exist, an empty dict is returned.
"""
dirpath = "/local/domain/%(dom_id)s/%(path)s" % arg_dict
cmd = ["xenstore-ls", dirpath.rstrip("/")]
try:
recs = _run_command(cmd)
except XenstoreError, e: # noqa
if not record_exists(arg_dict):
return {}
# Just try again in case the path was created in between
# the "ls" and the existence check. If this fails again, it will
# likely raise an equally meaningful XenstoreError
recs = _run_command(cmd)
base_path = arg_dict["path"]
paths = _paths_from_ls(recs)
ret = {}
for path in paths:
if base_path:
arg_dict["path"] = "%s/%s" % (base_path, path)
else:
arg_dict["path"] = path
rec = read_record(self, arg_dict)
try:
val = json.loads(rec)
except ValueError:
val = rec
ret[path] = val
return ret
@jsonify
def delete_record(self, arg_dict):
"""Just like it sounds: it removes the record for the specified
VM and the specified path from xenstore.
"""
cmd = ["xenstore-rm", "/local/domain/%(dom_id)s/%(path)s" % arg_dict]
try:
return _run_command(cmd)
except XenstoreError, e: # noqa
if 'could not remove path' in e.stderr:
# Entry already gone. We're good to go.
return ''
raise
def _paths_from_ls(recs):
"""The xenstore-ls command returns a listing that isn't terribly
useful. This method cleans that up into a dict with each path
as the key, and the associated string as the value.
"""
last_nm = ""
level = 0
path = []
ret = []
for ln in recs.splitlines():
nm, val = ln.rstrip().split(" = ")
barename = nm.lstrip()
this_level = len(nm) - len(barename)
if this_level == 0:
ret.append(barename)
level = 0
path = []
elif this_level == level:
# child of same parent
ret.append("%s/%s" % ("/".join(path), barename))
elif this_level > level:
path.append(last_nm)
ret.append("%s/%s" % ("/".join(path), barename))
level = this_level
elif this_level < level:
path = path[:this_level]
ret.append("%s/%s" % ("/".join(path), barename))
level = this_level
last_nm = barename
return ret
def _run_command(cmd):
"""Wrap utils.run_command to raise XenstoreError on failure
"""
try:
return utils.run_command(cmd)
except utils.SubprocessException, e: # noqa
raise XenstoreError(e.cmdline, e.ret, e.err, e.out)
if __name__ == "__main__":
XenAPIPlugin.dispatch(
{"read_record": read_record,
"write_record": write_record,
"list_records": list_records,
"delete_record": delete_record})
| apache-2.0 | -156,587,064,218,959,680 | 32.977064 | 78 | 0.619684 | false |
ztianjin/jna | native/libffi/generate-osx-source-and-headers.py | 100 | 5200 | #!/usr/bin/env python
import subprocess
import re
import os
import errno
import collections
import sys
class Platform(object):
pass
sdk_re = re.compile(r'.*-sdk ([a-zA-Z0-9.]*)')
def sdkinfo(sdkname):
ret = {}
for line in subprocess.Popen(['xcodebuild', '-sdk', sdkname, '-version'], stdout=subprocess.PIPE).stdout:
kv = line.strip().split(': ', 1)
if len(kv) == 2:
k,v = kv
ret[k] = v
return ret
desktop_sdk_info = sdkinfo('macosx')
def latest_sdks():
latest_desktop = None
for line in subprocess.Popen(['xcodebuild', '-showsdks'], stdout=subprocess.PIPE).stdout:
match = sdk_re.match(line)
if match:
if 'OS X' in line:
latest_desktop = match.group(1)
return latest_desktop
desktop_sdk = latest_sdks()
class desktop_platform_32(Platform):
sdk='macosx'
arch = 'i386'
name = 'mac32'
triple = 'i386-apple-darwin10'
sdkroot = desktop_sdk_info['Path']
prefix = "#if defined(__i386__) && !defined(__x86_64__)\n\n"
suffix = "\n\n#endif"
class desktop_platform_64(Platform):
sdk='macosx'
arch = 'x86_64'
name = 'mac'
triple = 'x86_64-apple-darwin10'
sdkroot = desktop_sdk_info['Path']
prefix = "#if !defined(__i386__) && defined(__x86_64__)\n\n"
suffix = "\n\n#endif"
def move_file(src_dir, dst_dir, filename, file_suffix=None, prefix='', suffix=''):
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
out_filename = filename
if file_suffix:
split_name = os.path.splitext(filename)
out_filename = "%s_%s%s" % (split_name[0], file_suffix, split_name[1])
with open(os.path.join(src_dir, filename)) as in_file:
with open(os.path.join(dst_dir, out_filename), 'w') as out_file:
if prefix:
out_file.write(prefix)
out_file.write(in_file.read())
if suffix:
out_file.write(suffix)
headers_seen = collections.defaultdict(set)
def move_source_tree(src_dir, dest_dir, dest_include_dir, arch=None, prefix=None, suffix=None):
for root, dirs, files in os.walk(src_dir, followlinks=True):
relroot = os.path.relpath(root,src_dir)
def move_dir(arch, prefix='', suffix='', files=[]):
for file in files:
file_suffix = None
if file.endswith('.h'):
if dest_include_dir:
file_suffix = arch
if arch:
headers_seen[file].add(arch)
move_file(root, dest_include_dir, file, arch, prefix=prefix, suffix=suffix)
elif dest_dir:
outroot = os.path.join(dest_dir, relroot)
move_file(root, outroot, file, prefix=prefix, suffix=suffix)
if relroot == '.':
move_dir(arch=arch,
files=files,
prefix=prefix,
suffix=suffix)
elif relroot == 'x86':
move_dir(arch='i386',
prefix="#if defined(__i386__) && !defined(__x86_64__)\n\n",
suffix="\n\n#endif",
files=files)
move_dir(arch='x86_64',
prefix="#if !defined(__i386__) && defined(__x86_64__)\n\n",
suffix="\n\n#endif",
files=files)
def build_target(platform):
def xcrun_cmd(cmd):
return subprocess.check_output(['xcrun', '-sdk', platform.sdkroot, '-find', cmd]).strip()
build_dir = 'build_' + platform.name
if not os.path.exists(build_dir):
os.makedirs(build_dir)
env = dict(CC=xcrun_cmd('clang'),
LD=xcrun_cmd('ld'),
CFLAGS='-arch %s -isysroot %s -mmacosx-version-min=10.6' % (platform.arch, platform.sdkroot))
working_dir=os.getcwd()
try:
os.chdir(build_dir)
subprocess.check_call(['../configure', '-host', platform.triple], env=env)
move_source_tree('.', None, '../osx/include',
arch=platform.arch,
prefix=platform.prefix,
suffix=platform.suffix)
move_source_tree('./include', None, '../osx/include',
arch=platform.arch,
prefix=platform.prefix,
suffix=platform.suffix)
finally:
os.chdir(working_dir)
for header_name, archs in headers_seen.iteritems():
basename, suffix = os.path.splitext(header_name)
def main():
move_source_tree('src', 'osx/src', 'osx/include')
move_source_tree('include', None, 'osx/include')
build_target(desktop_platform_32)
build_target(desktop_platform_64)
for header_name, archs in headers_seen.iteritems():
basename, suffix = os.path.splitext(header_name)
with open(os.path.join('osx/include', header_name), 'w') as header:
for arch in archs:
header.write('#include <%s_%s%s>\n' % (basename, arch, suffix))
if __name__ == '__main__':
main()
| lgpl-2.1 | -4,828,630,680,775,520,000 | 32.986928 | 112 | 0.536538 | false |
coderabhishek/scrapy | scrapy/utils/log.py | 108 | 6012 | # -*- coding: utf-8 -*-
import sys
import logging
import warnings
from logging.config import dictConfig
from twisted.python.failure import Failure
from twisted.python import log as twisted_log
import scrapy
from scrapy.settings import overridden_settings, Settings
from scrapy.exceptions import ScrapyDeprecationWarning
logger = logging.getLogger(__name__)
def failure_to_exc_info(failure):
"""Extract exc_info from Failure instances"""
if isinstance(failure, Failure):
return (failure.type, failure.value, failure.getTracebackObject())
class TopLevelFormatter(logging.Filter):
"""Keep only top level loggers's name (direct children from root) from
records.
This filter will replace Scrapy loggers' names with 'scrapy'. This mimics
the old Scrapy log behaviour and helps shortening long names.
Since it can't be set for just one logger (it won't propagate for its
children), it's going to be set in the root handler, with a parametrized
`loggers` list where it should act.
"""
def __init__(self, loggers=None):
self.loggers = loggers or []
def filter(self, record):
if any(record.name.startswith(l + '.') for l in self.loggers):
record.name = record.name.split('.', 1)[0]
return True
DEFAULT_LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'loggers': {
'scrapy': {
'level': 'DEBUG',
},
'twisted': {
'level': 'ERROR',
},
}
}
def configure_logging(settings=None, install_root_handler=True):
"""
Initialize logging defaults for Scrapy.
:param settings: settings used to create and configure a handler for the
root logger (default: None).
:type settings: dict, :class:`~scrapy.settings.Settings` object or ``None``
:param install_root_handler: whether to install root logging handler
(default: True)
:type install_root_handler: bool
This function does:
- Route warnings and twisted logging through Python standard logging
- Assign DEBUG and ERROR level to Scrapy and Twisted loggers respectively
- Route stdout to log if LOG_STDOUT setting is True
When ``install_root_handler`` is True (default), this function also
creates a handler for the root logger according to given settings
(see :ref:`topics-logging-settings`). You can override default options
using ``settings`` argument. When ``settings`` is empty or None, defaults
are used.
"""
if not sys.warnoptions:
# Route warnings through python logging
logging.captureWarnings(True)
observer = twisted_log.PythonLoggingObserver('twisted')
observer.start()
dictConfig(DEFAULT_LOGGING)
if isinstance(settings, dict) or settings is None:
settings = Settings(settings)
if settings.getbool('LOG_STDOUT'):
sys.stdout = StreamLogger(logging.getLogger('stdout'))
if install_root_handler:
logging.root.setLevel(logging.NOTSET)
handler = _get_handler(settings)
logging.root.addHandler(handler)
def _get_handler(settings):
""" Return a log handler object according to settings """
filename = settings.get('LOG_FILE')
if filename:
encoding = settings.get('LOG_ENCODING')
handler = logging.FileHandler(filename, encoding=encoding)
elif settings.getbool('LOG_ENABLED'):
handler = logging.StreamHandler()
else:
handler = logging.NullHandler()
formatter = logging.Formatter(
fmt=settings.get('LOG_FORMAT'),
datefmt=settings.get('LOG_DATEFORMAT')
)
handler.setFormatter(formatter)
handler.setLevel(settings.get('LOG_LEVEL'))
handler.addFilter(TopLevelFormatter(['scrapy']))
return handler
def log_scrapy_info(settings):
logger.info("Scrapy %(version)s started (bot: %(bot)s)",
{'version': scrapy.__version__, 'bot': settings['BOT_NAME']})
logger.info("Optional features available: %(features)s",
{'features': ", ".join(scrapy.optional_features)})
d = dict(overridden_settings(settings))
logger.info("Overridden settings: %(settings)r", {'settings': d})
class StreamLogger(object):
"""Fake file-like stream object that redirects writes to a logger instance
Taken from:
http://www.electricmonk.nl/log/2011/08/14/redirect-stdout-and-stderr-to-a-logger-in-python/
"""
def __init__(self, logger, log_level=logging.INFO):
self.logger = logger
self.log_level = log_level
self.linebuf = ''
def write(self, buf):
for line in buf.rstrip().splitlines():
self.logger.log(self.log_level, line.rstrip())
class LogCounterHandler(logging.Handler):
"""Record log levels count into a crawler stats"""
def __init__(self, crawler, *args, **kwargs):
super(LogCounterHandler, self).__init__(*args, **kwargs)
self.crawler = crawler
def emit(self, record):
sname = 'log_count/{}'.format(record.levelname)
self.crawler.stats.inc_value(sname)
def logformatter_adapter(logkws):
"""
Helper that takes the dictionary output from the methods in LogFormatter
and adapts it into a tuple of positional arguments for logger.log calls,
handling backward compatibility as well.
"""
if not {'level', 'msg', 'args'} <= set(logkws):
warnings.warn('Missing keys in LogFormatter method',
ScrapyDeprecationWarning)
if 'format' in logkws:
warnings.warn('`format` key in LogFormatter methods has been '
'deprecated, use `msg` instead',
ScrapyDeprecationWarning)
level = logkws.get('level', logging.INFO)
message = logkws.get('format', logkws.get('msg'))
# NOTE: This also handles 'args' being an empty dict, that case doesn't
# play well in logger.log calls
args = logkws if not logkws.get('args') else logkws['args']
return (level, message, args)
| bsd-3-clause | 3,704,174,574,180,508,700 | 31.497297 | 99 | 0.662841 | false |
sarvex/depot-tools | third_party/logilab/common/logging_ext.py | 93 | 6975 | # -*- coding: utf-8 -*-
# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:[email protected]
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see <http://www.gnu.org/licenses/>.
"""Extends the logging module from the standard library."""
__docformat__ = "restructuredtext en"
import os
import sys
import logging
from six import string_types
from logilab.common.textutils import colorize_ansi
def set_log_methods(cls, logger):
"""bind standard logger's methods as methods on the class"""
cls.__logger = logger
for attr in ('debug', 'info', 'warning', 'error', 'critical', 'exception'):
setattr(cls, attr, getattr(logger, attr))
def xxx_cyan(record):
if 'XXX' in record.message:
return 'cyan'
class ColorFormatter(logging.Formatter):
"""
A color Formatter for the logging standard module.
By default, colorize CRITICAL and ERROR in red, WARNING in orange, INFO in
green and DEBUG in yellow.
self.colors is customizable via the 'color' constructor argument (dictionary).
self.colorfilters is a list of functions that get the LogRecord
and return a color name or None.
"""
def __init__(self, fmt=None, datefmt=None, colors=None):
logging.Formatter.__init__(self, fmt, datefmt)
self.colorfilters = []
self.colors = {'CRITICAL': 'red',
'ERROR': 'red',
'WARNING': 'magenta',
'INFO': 'green',
'DEBUG': 'yellow',
}
if colors is not None:
assert isinstance(colors, dict)
self.colors.update(colors)
def format(self, record):
msg = logging.Formatter.format(self, record)
if record.levelname in self.colors:
color = self.colors[record.levelname]
return colorize_ansi(msg, color)
else:
for cf in self.colorfilters:
color = cf(record)
if color:
return colorize_ansi(msg, color)
return msg
def set_color_formatter(logger=None, **kw):
"""
Install a color formatter on the 'logger'. If not given, it will
defaults to the default logger.
Any additional keyword will be passed as-is to the ColorFormatter
constructor.
"""
if logger is None:
logger = logging.getLogger()
if not logger.handlers:
logging.basicConfig()
format_msg = logger.handlers[0].formatter._fmt
fmt = ColorFormatter(format_msg, **kw)
fmt.colorfilters.append(xxx_cyan)
logger.handlers[0].setFormatter(fmt)
LOG_FORMAT = '%(asctime)s - (%(name)s) %(levelname)s: %(message)s'
LOG_DATE_FORMAT = '%Y-%m-%d %H:%M:%S'
def get_handler(debug=False, syslog=False, logfile=None, rotation_parameters=None):
"""get an apropriate handler according to given parameters"""
if os.environ.get('APYCOT_ROOT'):
handler = logging.StreamHandler(sys.stdout)
if debug:
handler = logging.StreamHandler()
elif logfile is None:
if syslog:
from logging import handlers
handler = handlers.SysLogHandler()
else:
handler = logging.StreamHandler()
else:
try:
if rotation_parameters is None:
if os.name == 'posix' and sys.version_info >= (2, 6):
from logging.handlers import WatchedFileHandler
handler = WatchedFileHandler(logfile)
else:
handler = logging.FileHandler(logfile)
else:
from logging.handlers import TimedRotatingFileHandler
handler = TimedRotatingFileHandler(
logfile, **rotation_parameters)
except IOError:
handler = logging.StreamHandler()
return handler
def get_threshold(debug=False, logthreshold=None):
if logthreshold is None:
if debug:
logthreshold = logging.DEBUG
else:
logthreshold = logging.ERROR
elif isinstance(logthreshold, string_types):
logthreshold = getattr(logging, THRESHOLD_MAP.get(logthreshold,
logthreshold))
return logthreshold
def _colorable_terminal():
isatty = hasattr(sys.__stdout__, 'isatty') and sys.__stdout__.isatty()
if not isatty:
return False
if os.name == 'nt':
try:
from colorama import init as init_win32_colors
except ImportError:
return False
init_win32_colors()
return True
def get_formatter(logformat=LOG_FORMAT, logdateformat=LOG_DATE_FORMAT):
if _colorable_terminal():
fmt = ColorFormatter(logformat, logdateformat)
def col_fact(record):
if 'XXX' in record.message:
return 'cyan'
if 'kick' in record.message:
return 'red'
fmt.colorfilters.append(col_fact)
else:
fmt = logging.Formatter(logformat, logdateformat)
return fmt
def init_log(debug=False, syslog=False, logthreshold=None, logfile=None,
logformat=LOG_FORMAT, logdateformat=LOG_DATE_FORMAT, fmt=None,
rotation_parameters=None, handler=None):
"""init the log service"""
logger = logging.getLogger()
if handler is None:
handler = get_handler(debug, syslog, logfile, rotation_parameters)
# only addHandler and removeHandler method while I would like a setHandler
# method, so do it this way :$
logger.handlers = [handler]
logthreshold = get_threshold(debug, logthreshold)
logger.setLevel(logthreshold)
if fmt is None:
if debug:
fmt = get_formatter(logformat=logformat, logdateformat=logdateformat)
else:
fmt = logging.Formatter(logformat, logdateformat)
handler.setFormatter(fmt)
return handler
# map logilab.common.logger thresholds to logging thresholds
THRESHOLD_MAP = {'LOG_DEBUG': 'DEBUG',
'LOG_INFO': 'INFO',
'LOG_NOTICE': 'INFO',
'LOG_WARN': 'WARNING',
'LOG_WARNING': 'WARNING',
'LOG_ERR': 'ERROR',
'LOG_ERROR': 'ERROR',
'LOG_CRIT': 'CRITICAL',
}
| bsd-3-clause | 8,370,586,293,973,365,000 | 34.769231 | 83 | 0.616631 | false |
HopeFOAM/HopeFOAM | ThirdParty-0.1/ParaView-5.0.1/VTK/ThirdParty/Twisted/twisted/tap/ftp.py | 67 | 2002 | # -*- test-case-name: twisted.test.test_ftp_options -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
I am the support module for making a ftp server with twistd.
"""
from twisted.application import internet
from twisted.cred import portal, checkers, strcred
from twisted.protocols import ftp
from twisted.python import usage, deprecate, versions
import warnings
class Options(usage.Options, strcred.AuthOptionMixin):
synopsis = """[options].
WARNING: This FTP server is probably INSECURE do not use it.
"""
optParameters = [
["port", "p", "2121", "set the port number"],
["root", "r", "/usr/local/ftp", "define the root of the ftp-site."],
["userAnonymous", "", "anonymous", "Name of the anonymous user."]
]
compData = usage.Completions(
optActions={"root": usage.CompleteDirs(descr="root of the ftp site")}
)
longdesc = ''
def __init__(self, *a, **kw):
usage.Options.__init__(self, *a, **kw)
self.addChecker(checkers.AllowAnonymousAccess())
def opt_password_file(self, filename):
"""
Specify a file containing username:password login info for
authenticated connections. (DEPRECATED; see --help-auth instead)
"""
self['password-file'] = filename
msg = deprecate.getDeprecationWarningString(
self.opt_password_file, versions.Version('Twisted', 11, 1, 0))
warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
self.addChecker(checkers.FilePasswordDB(filename, cache=True))
def makeService(config):
f = ftp.FTPFactory()
r = ftp.FTPRealm(config['root'])
p = portal.Portal(r, config.get('credCheckers', []))
f.tld = config['root']
f.userAnonymous = config['userAnonymous']
f.portal = p
f.protocol = ftp.FTP
try:
portno = int(config['port'])
except KeyError:
portno = 2121
return internet.TCPServer(portno, f)
| gpl-3.0 | 2,855,521,591,296,898,600 | 28.014493 | 79 | 0.640859 | false |
rossjones/ScraperWikiX | web/codewiki/viewsrpc.py | 1 | 14719 | from django.template import RequestContext
from django.http import HttpResponseRedirect, HttpResponse, HttpResponseNotFound, HttpResponseForbidden
from django.template.loader import render_to_string
from django.shortcuts import render_to_response
from django.core.urlresolvers import reverse
from django.views.decorators.csrf import csrf_exempt
from django.core.mail import send_mail, mail_admins
from django.conf import settings
from codewiki.models.code import MAGIC_RUN_INTERVAL
from codewiki import runsockettotwister
from codewiki import models
import base64
import ConfigParser
import datetime
import logging
import re
import smtplib
import sys
logger = logging
try: import json
except ImportError: import simplejson as json
config = ConfigParser.ConfigParser()
config.readfp(open(settings.CONFIGFILE))
def scraperwikitag(scraper, html, panepresent):
mswpane = re.search('(?i)<div[^>]*?id="scraperwikipane"[^>/]*(?:/\s*>|>.*?</div>)', html)
if mswpane:
startend = (mswpane.start(0), mswpane.end(0))
mclass = re.search('class="([^"]*)"', mswpane.group(0))
if mclass:
paneversion = mclass.group(1)
else:
paneversion = "version-2"
if panepresent != None:
panepresent["scraperwikipane"].append(mswpane)
elif panepresent == None: # case where no div#scraperwikipane is found and it's all there (we're not streaming the html out using php)
# have to insert the pane -- favour doing it after the body tag if it exists
mbody = re.search("(?i)<body.*?>", html)
if mbody:
startend = (mbody.end(0), mbody.end(0))
else:
startend = (0, 0) # (0,0)
paneversion = "version-2"
else:
if len(panepresent["firstfivelines"]) < 5 and re.search("\S", html):
panepresent["firstfivelines"].append(html)
return html
urlbase = settings.MAIN_URL
urlscraperoverview = urlbase + reverse('code_overview', args=[scraper.wiki_type, scraper.short_name])
urlscraperedit = urlbase + reverse('editor_edit', args=[scraper.wiki_type, scraper.short_name])
urlpoweredlogo = settings.MEDIA_URL + "images/powered.png";
swdivstyle = "border:thin #aaf solid; display:block; position:fixed; top:0px; right:0px; background:#eef; margin: 0em; padding: 6pt; font-size: 10pt; z-index: 8675309; "
swlinkstyle = "width:167px; height:17px; margin:0; padding: 0; border-style: none; "
if paneversion == "version-1":
swpane = [ '<div id="scraperwikipane" style="%s;">' % swdivstyle ]
swpane.append('<a href="%s" id="scraperwikipane" style="%s"><img style="border-style: none" src="%s" alt="Powered by ScraperWiki"></a>' % (urlbase, swlinkstyle, urlpoweredlogo))
swpane.append('<br><a href="%s" title="Go to overview page">%s</a>' % (urlscraperoverview, scraper.title))
swpane.append(' (<a href="%s" title="Edit source code for this view">edit</a>)' % (urlscraperedit))
swpane.append('</div>')
else:
swpane = [ '<div id="scraperwikipane" style="%s;">' % swdivstyle ]
swpane.append('<a href="%s" id="scraperwikipane" style="%s"><img style="border-style: none" src="%s" alt="Powered by ScraperWiki"></a>' % (urlscraperoverview, swlinkstyle, urlpoweredlogo))
swpane.append('</div>')
return "%s%s%s" % (html[:startend[0]], "".join(swpane), html[startend[1]:])
def rpcexecute(request, short_name, revision=None):
apikey = request.GET.get('apikey', None)
try:
scraper = models.Code.objects.get(short_name=short_name)
except models.Code.DoesNotExist:
return HttpResponseNotFound(render_to_string('404.html', {'heading':'Not found', 'body':"Sorry, this view does not exist"}, context_instance=RequestContext(request)))
if scraper.wiki_type == 'scraper':
if not scraper.actionauthorized(request.user, "rpcexecute"):
return HttpResponseForbidden(render_to_string('404.html', scraper.authorizationfailedmessage(request.user, "rpcexecute"), context_instance=RequestContext(request)))
if not scraper.api_actionauthorized(apikey):
return HttpResponseForbidden(render_to_string('404.html',
{'heading': 'Not authorized', 'body': 'API key required to access this view'}, context_instance=RequestContext(request)))
if revision:
try:
revision = int(revision)
except ValueError:
revision = None
# quick case where we have PHP with no PHP code in it (it's all pure HTML)
if scraper.language in ['html', 'php', 'javascript']:
code = scraper.saved_code(revision)
if scraper.language == 'php' and not re.search('<\?', code):
return HttpResponse(scraperwikitag(scraper, code, None))
if scraper.language == 'html':
return HttpResponse(scraperwikitag(scraper, code, None))
if scraper.language == 'javascript':
return HttpResponse(code, mimetype='application/javascript')
if revision == None:
revision = -1
# run it the socket method for staff members who can handle being broken
runnerstream = runsockettotwister.RunnerSocket()
runnerstream.runview(request.user, scraper, revision, request.META["QUERY_STRING"])
# We build the response on the fly in case we get an HTTP
# Content-Type header (or similar) before anything is streamed.
response = None
panepresent = {"scraperwikipane":[], "firstfivelines":[]}
for line in runnerstream:
if line == "":
continue
try:
message = json.loads(line)
except:
pass
# Need to log the message here in debug mode so we can track down the
# 'no output for some unknown reason'. Appears to be missing console
# messages from the lxc/uml and has been happening for a while.
if message['message_type'] == "console":
if not response:
response = HttpResponse()
if message.get('encoding') == 'base64':
response.write(base64.decodestring(message["content"]))
else:
response.write(scraperwikitag(scraper, message["content"], panepresent))
elif message['message_type'] == 'exception':
# :todo: can we use "import cgitb" here?
if not response:
response = HttpResponse()
response.write("<h3>%s</h3>\n" % str(message.get("exceptiondescription")).replace("<", "<"))
for stackentry in message["stackdump"]:
response.write("<h3>%s</h3>\n" % str(stackentry).replace("<", "<"))
# These messages are typically generated by
# scraperwiki.utils.httpresponseheader.
elif message['message_type'] == "httpresponseheader":
# Parameter values have been borrowed from
# http://php.net/manual/en/function.header.php (and hence more
# or less follow the HTTP spec).
if message['headerkey'] == 'Content-Type':
if not response:
response = HttpResponse(mimetype=message['headervalue'])
else:
response.write("<h3>Error: httpresponseheader('%s', '%s') called after start of stream</h3>" % (message['headerkey'], message['headervalue']))
elif message['headerkey'] == 'Content-Disposition':
if not response:
response = HttpResponse()
response['Content-Disposition'] = message['headervalue']
elif message['headerkey'] == 'Location':
if not response:
response = HttpResponseRedirect(message['headervalue'])
else:
response.write("<h3>Error: httpresponseheader('%s', '%s') called after start of stream</h3>" % (message['headerkey'], message['headervalue']))
else:
if not response:
response = HttpResponse()
response.write("<h3>Error: httpresponseheader(headerkey='%s', '%s'); headerkey can only have values 'Content-Type' or 'Content-Disposition'</h3>" % (message['headerkey'], message['headervalue']))
# These messages are typically generated by
# scraperwiki.utils.httpstatuscode.
elif message['message_type'] == 'httpstatuscode':
if not response:
response = HttpResponse(status=message['statuscode'])
else:
response.write(
"<h3>Error:"
" it's too late to try setting HTTP Status Code.</h3>")
if not response:
response = HttpResponse('No output received from view.')
# now decide about inserting the powered by scraperwiki panel (avoid doing it on json)
# print [response['Content-Type']] default is DEFAULT_CONTENT_TYPE, comes out as 'text/html; charset=utf-8'
# How about
if 'Content-Type' in response and 'text/html' in response['Content-Type']:
response.write(scraperwikitag(scraper, '<div id="scraperwikipane" class="version-2"/>', panepresent))
return response
# this form is protected by the django key known to twister, so does not need to be obstructed by the csrf machinery
@csrf_exempt
def twistermakesrunevent(request):
try:
return Dtwistermakesrunevent(request)
except Exception, e:
logger.error("twistermakesruneventerror: %s" % (str(e)))
mail_admins(subject="twistermakesruneventerror: %s" % (str(e)[:30]), message=(str(e)))
return HttpResponse("no done %s" % str(e))
def Dtwistermakesrunevent(request):
if request.POST.get("django_key") != config.get('twister', 'djangokey'):
logger.error("twister wrong djangokey")
return HttpResponse("no access")
run_id = request.POST.get("run_id")
revision = request.POST.get('revision')
if not run_id:
logger.error("twisterbad run_id")
return HttpResponse("bad run_id - %s" % (request.POST,) )
matchingevents = models.ScraperRunEvent.objects.filter(run_id=run_id)
if not matchingevents:
event = models.ScraperRunEvent()
event.scraper = models.Scraper.objects.get(short_name=request.POST.get("scrapername"))
# Would be used to kill it.
clientnumber = request.POST.get("clientnumber")
#event.pid = "client# "+ request.POST.get("clientnumber") # only applies when this runner is active
# only applies when this runner is active
event.pid = (100000000+int(clientnumber))
# Set by execution status.
event.run_id = run_id
# Reset by execution status.
event.run_started = datetime.datetime.now()
# Set the last_run field so we don't select this one again
# for the overdue scrapers.
# This field should't exist because we should use the
# runobjects instead, where we can work from a far richer
# report on what has been happening.
event.scraper.last_run = datetime.datetime.now()
event.scraper.save()
else:
event = matchingevents[0]
# standard updates
event.output = request.POST.get("output")
event.records_produced = int(request.POST.get("records_produced"))
event.pages_scraped = int(request.POST.get("pages_scraped"))
event.first_url_scraped = request.POST.get("first_url_scraped", "")
event.exception_message = request.POST.get("exception_message", "")
event.run_ended = datetime.datetime.now() # last update time
# run finished case
if request.POST.get("exitstatus"):
event.pid = -1 # disable the running state of the event
if event.scraper.run_interval == MAGIC_RUN_INTERVAL:
event.scraper.run_interval = -1
event.scraper.status = request.POST.get("exitstatus") == "exceptionmessage" and "sick" or "ok"
event.scraper.last_run = datetime.datetime.now()
# Enable if views ever have metadata that needs updating
# each refresh.
event.scraper.update_meta()
event.scraper.save()
event.save()
# Event needs to be saved first as it is used in the following DomainScraper
if request.POST.get("exitstatus"):
# report the pages that were scraped
jdomainscrapes = request.POST.get("domainscrapes")
domainscrapes = json.loads(jdomainscrapes)
for netloc, vals in domainscrapes.items():
domainscrape = models.DomainScrape(scraper_run_event=event, domain=netloc)
domainscrape.pages_scraped = vals["pages_scraped"]
domainscrape.bytes_scraped = vals["bytes_scraped"]
domainscrape.save()
#####
# We should remove this block below and do alert emails a different way.
#####
# Send email if this is an email scraper
if request.POST.get("exitstatus"):
logger.info('Checking if this is an email scraper')
emailers = event.scraper.users.filter(usercoderole__role='email')
logger.info('There are %d email users' % emailers.count())
if emailers.count() > 0:
subject, message = getemailtext(event)
logger.info("Retrieved subject %s and message %s" % (subject,message,))
if event.scraper.status == 'ok':
logger.info("Status OK")
if message: # no email if blank
logger.info("Have message")
for user in emailers:
try:
send_mail(subject=subject, message=message, from_email=settings.EMAIL_FROM, recipient_list=[user.email], fail_silently=False)
except smtplib.SMTPException, e:
logger.error("emailer failed %s %s" % (str(user), str(e)))
mail_admins(subject="email failed to send: %s" % (str(user)), message=str(e))
else:
logger.info("No message")
else:
#mail_admins(subject="SICK EMAILER: %s" % subject, message=message)
logger.info('SICK EMAILER: %s' % subject)
else:
logger.info('Not a mail scraper ...')
return HttpResponse("done")
# maybe detect the subject title here
def getemailtext(event):
message = event.output
message = re.sub("(?:^|\n)EXECUTIONSTATUS:.*", "", message).strip()
msubject = re.search("(?:^|\n)EMAILSUBJECT:(.*)", message)
if msubject:
subject = msubject.group(1) # snip out the subject
message = "%s%s" % (message[:msubject.start(0)], message[msubject.end(0):])
else:
subject = 'Your ScraperWiki Email - %s' % event.scraper.short_name
return subject, message
| agpl-3.0 | 2,163,062,842,310,524,000 | 42.806548 | 211 | 0.630274 | false |
LinkCareServices/cairotft | docs/conf.py | 1 | 8584 | # -*- coding: utf-8 -*-
#
# Fitch documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 10 18:37:45 2012.
#
# This file is execfile()d with the current directory set to its containing dir
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
"""Config file for sphinx documentation."""
import sys
import os
sys.path.insert(1, os.path.abspath("../"))
from build_scripts.version import get_git_version
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autosummary', 'sphinx.ext.autodoc',
'sphinx.ext.doctest', 'sphinx.ext.viewcode',
'sphinx.ext.intersphinx', 'sphinx.ext.todo',
'sphinx.ext.graphviz', 'sphinx.ext.inheritance_diagram', ]
todo_include_todos = True
intersphinx_mapping = {
'python': ('http://docs.python.org/3.4', None),
}
if tags.has('maintenance'):
autodoc_default_flags = []
else:
autodoc_default_flags = ['members', 'undoc-members',
'private-members']
autoclass_content = 'both'
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'cairotft'
copyright = u'2015 - Thomas Chiroux - Link Care Services'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = get_git_version()
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'armstrong' # 'linfiniti-sphinx-theme' # 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
html_use_index = True
html_use_modindex = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'cairotft'
# -- Options for LaTeX output -------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual])
latex_documents = [
('index', 'cairotft.tex', u'cairotft Documentation',
u'', 'manual'), ]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output -------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'cairotft', u'cairotft Documentation',
[u''], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'cairotft', u'cairotft Documentation',
u'', 'cairotft', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
| bsd-3-clause | -4,289,442,062,764,914,700 | 31.515152 | 79 | 0.693267 | false |
KanoComputing/kano-profile | kano_profile/tracker/__init__.py | 1 | 9447 | #
# __init__.py
#
# Copyright (C) 2014 - 2018 Kano Computing Ltd.
# License: http://www.gnu.org/licenses/gpl-2.0.txt GNU GPL v2
#
# Kano-tracker module
#
# A small module for tracking various metrics the users do in Kano OS
#
__author__ = 'Kano Computing Ltd.'
__email__ = '[email protected]'
import time
import atexit
import datetime
import json
import os
import subprocess
import shlex
from uuid import uuid1, uuid5
from kano.utils.file_operations import read_file_contents, chown_path, \
ensure_dir
from kano.utils.hardware import get_cpu_id
from kano.utils.misc import is_number
from kano.logging import logger
from kano_profile.apps import get_app_state_file, load_app_state_variable, \
save_app_state_variable
from kano_profile.paths import tracker_dir, tracker_events_file, \
tracker_token_file
from kano_profile.tracker.tracker_token import TOKEN, generate_tracker_token, \
load_token
from kano_profile.tracker.tracking_utils import open_locked, \
get_nearest_previous_monday, get_utc_offset
from kano_profile.tracker.tracking_session import TrackingSession
from kano_profile.tracker.tracking_sessions import session_start, session_end, \
list_sessions, get_open_sessions, get_session_file_path, session_log, \
get_session_unique_id, get_session_event, CPU_ID, LANGUAGE, OS_VERSION
# Public imports
from kano_profile.tracker.tracker import Tracker
from kano_profile.tracker.tracking_sessions import session_start, session_end, \
pause_tracking_sessions, unpause_tracking_sessions
def track_data(name, data):
""" Track arbitrary data.
Calling this function will generate a data tracking event.
:param name: The identifier of the data.
:type name: str
:param data: Arbitrary data, must be compatible with JSON.
:type data: dict, list, str, int, float, None
"""
try:
af = open_locked(tracker_events_file, 'a')
except IOError as e:
logger.error("Error opening tracker events file {}".format(e))
else:
with af:
event = get_data_event(name, data)
af.write(json.dumps(event) + "\n")
if 'SUDO_USER' in os.environ:
chown_path(tracker_events_file)
def track_action(name):
""" Trigger an action tracking event.
:param name: The identifier of the action.
:type name: str
"""
try:
af = open_locked(tracker_events_file, 'a')
except IOError as e:
logger.error("Error opening tracker events file {}".format(e))
else:
with af:
event = get_action_event(name)
af.write(json.dumps(event) + "\n")
if 'SUDO_USER' in os.environ:
chown_path(tracker_events_file)
def track_subprocess(name, cmd):
""" Launch and track the session of a process.
:param name: Name of the session.
:type name: str
:param cmd: The command line (env vars are not supported).
:type cmd: str
"""
cmd_args = shlex.split(cmd)
p = subprocess.Popen(cmd_args)
pid = p.pid
session_start(name, pid)
p.wait()
session_end(get_session_file_path(name, pid))
def get_data_event(name, data):
"""TODO"""
return {
'type': 'data',
'time': int(time.time()),
'timezone_offset': get_utc_offset(),
'os_version': OS_VERSION,
'cpu_id': CPU_ID,
'token': TOKEN,
'language': LANGUAGE,
'name': str(name),
'data': data
}
def get_action_event(name):
"""TODO"""
return {
'type': 'action',
'time': int(time.time()),
'timezone_offset': get_utc_offset(),
'os_version': OS_VERSION,
'cpu_id': CPU_ID,
'token': TOKEN,
'language': LANGUAGE,
'name': name
}
def add_runtime_to_app(app, runtime):
""" Saves the tracking data for a given application.
Appends a time period to a given app's runtime stats and raises
starts by one. Apart from the total values, it also updates the
weekly stats.
This function uses advisory file locks (see flock(2)) to avoid
races between different applications saving their tracking data
at the same time.
:param app: The name of the application.
:type app: str
:param runtime: For how long was the app running.
:type runtime: number
"""
if not app or app == 'kano-tracker':
return
if not is_number(runtime):
return
runtime = float(runtime)
app = app.replace('.', '_')
# Make sure no one else is accessing this file
app_state_file = get_app_state_file('kano-tracker')
try:
tracker_store = open_locked(app_state_file, 'r')
except IOError as e:
logger.error("Error opening app state file {}".format(e))
else:
app_stats = load_app_state_variable('kano-tracker', 'app_stats')
if not app_stats:
app_stats = dict()
try:
app_stats[app]['starts'] += 1
app_stats[app]['runtime'] += runtime
except Exception:
app_stats[app] = {
'starts': 1,
'runtime': runtime,
}
# Record usage data on per-week basis
if 'weekly' not in app_stats[app]:
app_stats[app]['weekly'] = {}
week = str(get_nearest_previous_monday())
if week not in app_stats[app]['weekly']:
app_stats[app]['weekly'][week] = {
'starts': 0,
'runtime': 0
}
app_stats[app]['weekly'][week]['starts'] += 1
app_stats[app]['weekly'][week]['runtime'] += runtime
save_app_state_variable('kano-tracker', 'app_stats', app_stats)
# Close the lock
tracker_store.close()
def save_hardware_info():
"""Saves hardware information related to the Raspberry Pi / Kano Kit"""
from kano.logging import logger
from kano.utils.hardware import get_cpu_id, get_mac_address, \
detect_kano_keyboard
logger.info('save_hardware_info')
state = {
'cpu_id': get_cpu_id(),
'mac_address': get_mac_address(),
'kano_keyboard': detect_kano_keyboard(),
}
save_app_state_variable('kano-tracker', 'hardware_info', state)
def save_kano_version():
"""Saves a dict of os-version: time values,
to keep track of the users update process"""
updates = load_app_state_variable('kano-tracker', 'versions')
if not updates:
updates = dict()
version_now = read_file_contents('/etc/kanux_version')
if not version_now:
return
version_now = version_now.replace('.', '_')
time_now = datetime.datetime.utcnow().isoformat()
updates[version_now] = time_now
save_app_state_variable('kano-tracker', 'versions', updates)
def get_tracker_events(old_only=False):
""" Read the events log and return a dictionary with all of them.
:param old_only: Don't return events from the current boot.
:type old_only: boolean
:returns: A dictionary suitable to be sent to the tracker endpoint.
:rtype: dict
"""
data = {'events': []}
try:
rf = open_locked(tracker_events_file, 'r')
except IOError as e:
logger.error("Error opening the tracker events file {}".format(e))
else:
with rf:
for event_line in rf.readlines():
try:
event = json.loads(event_line)
except:
logger.warn("Found a corrupted event, skipping.")
if _validate_event(event) and event['token'] != TOKEN:
data['events'].append(event)
return data
def _validate_event(event):
""" Check whether the event is correct so the API won't reject it.
:param event: The event data.
:type event: dict
:returns: True/False
:rtype: Boolean
"""
if 'type' not in event:
return False
if 'time' not in event or type(event['time']) != int:
return False
if 'timezone_offset' not in event or type(event['timezone_offset']) != int:
return False
if 'os_version' not in event:
return False
if 'cpu_id' not in event:
return False
if 'token' not in event:
return False
if event['timezone_offset'] < -24*60*60 or \
event['timezone_offset'] > 24*60*60:
return False
return True
def clear_tracker_events(old_only=True):
""" Truncate the events file, removing all the cached data.
:param old_only: Don't remove data from the current boot.
:type old_only: boolean
"""
try:
rf = open_locked(tracker_events_file, 'r')
except IOError as e:
logger.error("Error opening tracking events file {}".format(e))
else:
with rf:
events = []
for event_line in rf.readlines():
try:
event = json.loads(event_line)
if 'token' in event and event['token'] == TOKEN:
events.append(event_line)
except:
logger.warn("Found a corrupted event, skipping.")
with open(tracker_events_file, 'w') as wf:
for event_line in events:
wf.write(event_line)
if 'SUDO_USER' in os.environ:
chown_path(tracker_events_file)
| gpl-2.0 | -717,632,270,318,076,000 | 26.949704 | 80 | 0.596803 | false |
miguelgrinberg/heat | heat/tests/api/openstack_v1/test_stacks.py | 2 | 93167 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import mock
from oslo_config import cfg
import six
import webob.exc
import heat.api.middleware.fault as fault
import heat.api.openstack.v1.stacks as stacks
from heat.common import exception as heat_exc
from heat.common import identifier
from heat.common import policy
from heat.common import template_format
from heat.common import urlfetch
from heat.rpc import api as rpc_api
from heat.rpc import client as rpc_client
from heat.tests.api.openstack_v1 import tools
from heat.tests import common
class InstantiationDataTest(common.HeatTestCase):
def test_parse_error_success(self):
with stacks.InstantiationData.parse_error_check('Garbage'):
pass
def test_parse_error(self):
def generate_error():
with stacks.InstantiationData.parse_error_check('Garbage'):
raise ValueError
self.assertRaises(webob.exc.HTTPBadRequest, generate_error)
def test_parse_error_message(self):
# make sure the parser error gets through to the caller.
bad_temp = '''
heat_template_version: '2013-05-23'
parameters:
KeyName:
type: string
description: bla
'''
def generate_error():
with stacks.InstantiationData.parse_error_check('foo'):
template_format.parse(bad_temp)
parse_ex = self.assertRaises(webob.exc.HTTPBadRequest, generate_error)
self.assertIn('foo', six.text_type(parse_ex))
def test_stack_name(self):
body = {'stack_name': 'wibble'}
data = stacks.InstantiationData(body)
self.assertEqual('wibble', data.stack_name())
def test_stack_name_missing(self):
body = {'not the stack_name': 'wibble'}
data = stacks.InstantiationData(body)
self.assertRaises(webob.exc.HTTPBadRequest, data.stack_name)
def test_template_inline(self):
template = {'foo': 'bar', 'blarg': 'wibble'}
body = {'template': template}
data = stacks.InstantiationData(body)
self.assertEqual(template, data.template())
def test_template_string_json(self):
template = ('{"heat_template_version": "2013-05-23",'
'"foo": "bar", "blarg": "wibble"}')
body = {'template': template}
data = stacks.InstantiationData(body)
self.assertEqual(json.loads(template), data.template())
def test_template_string_yaml(self):
template = '''HeatTemplateFormatVersion: 2012-12-12
foo: bar
blarg: wibble
'''
parsed = {u'HeatTemplateFormatVersion': u'2012-12-12',
u'blarg': u'wibble',
u'foo': u'bar'}
body = {'template': template}
data = stacks.InstantiationData(body)
self.assertEqual(parsed, data.template())
def test_template_url(self):
template = {'heat_template_version': '2013-05-23',
'foo': 'bar',
'blarg': 'wibble'}
url = 'http://example.com/template'
body = {'template_url': url}
data = stacks.InstantiationData(body)
self.m.StubOutWithMock(urlfetch, 'get')
urlfetch.get(url).AndReturn(json.dumps(template))
self.m.ReplayAll()
self.assertEqual(template, data.template())
self.m.VerifyAll()
def test_template_priority(self):
template = {'foo': 'bar', 'blarg': 'wibble'}
url = 'http://example.com/template'
body = {'template': template, 'template_url': url}
data = stacks.InstantiationData(body)
self.m.StubOutWithMock(urlfetch, 'get')
self.m.ReplayAll()
self.assertEqual(template, data.template())
self.m.VerifyAll()
def test_template_missing(self):
template = {'foo': 'bar', 'blarg': 'wibble'}
body = {'not the template': template}
data = stacks.InstantiationData(body)
self.assertRaises(webob.exc.HTTPBadRequest, data.template)
def test_parameters(self):
params = {'foo': 'bar', 'blarg': 'wibble'}
body = {'parameters': params,
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}}
data = stacks.InstantiationData(body)
self.assertEqual(body, data.environment())
def test_environment_only_params(self):
env = {'parameters': {'foo': 'bar', 'blarg': 'wibble'}}
body = {'environment': env}
data = stacks.InstantiationData(body)
self.assertEqual(env, data.environment())
def test_environment_and_parameters(self):
body = {'parameters': {'foo': 'bar'},
'environment': {'parameters': {'blarg': 'wibble'}}}
expect = {'parameters': {'blarg': 'wibble',
'foo': 'bar'},
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}}
data = stacks.InstantiationData(body)
self.assertEqual(expect, data.environment())
def test_parameters_override_environment(self):
# This tests that the cli parameters will override
# any parameters in the environment.
body = {'parameters': {'foo': 'bar',
'tester': 'Yes'},
'environment': {'parameters': {'blarg': 'wibble',
'tester': 'fail'}}}
expect = {'parameters': {'blarg': 'wibble',
'foo': 'bar',
'tester': 'Yes'},
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}}
data = stacks.InstantiationData(body)
self.assertEqual(expect, data.environment())
def test_environment_bad_format(self):
env = {'somethingnotsupported': {'blarg': 'wibble'}}
body = {'environment': json.dumps(env)}
data = stacks.InstantiationData(body)
self.assertRaises(webob.exc.HTTPBadRequest, data.environment)
def test_environment_missing(self):
env = {'foo': 'bar', 'blarg': 'wibble'}
body = {'not the environment': env}
data = stacks.InstantiationData(body)
self.assertEqual({'parameters': {}, 'encrypted_param_names': [],
'parameter_defaults': {}, 'resource_registry': {}},
data.environment())
def test_args(self):
body = {
'parameters': {},
'environment': {},
'stack_name': 'foo',
'template': {},
'template_url': 'http://example.com/',
'timeout_mins': 60,
}
data = stacks.InstantiationData(body)
self.assertEqual({'timeout_mins': 60}, data.args())
@mock.patch.object(policy.Enforcer, 'enforce')
class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
'''
Tests the API class which acts as the WSGI controller,
the endpoint processing API requests after they are routed
'''
def setUp(self):
super(StackControllerTest, self).setUp()
# Create WSGI controller instance
class DummyConfig(object):
bind_port = 8004
cfgopts = DummyConfig()
self.controller = stacks.StackController(options=cfgopts)
@mock.patch.object(rpc_client.EngineClient, 'call')
def test_index(self, mock_call, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
req = self._get('/stacks')
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '1')
engine_resp = [
{
u'stack_identity': dict(identity),
u'updated_time': u'2012-07-09T09:13:11Z',
u'template_description': u'blah',
u'description': u'blah',
u'stack_status_reason': u'Stack successfully created',
u'creation_time': u'2012-07-09T09:12:45Z',
u'stack_name': identity.stack_name,
u'stack_action': u'CREATE',
u'stack_status': u'COMPLETE',
u'parameters': {},
u'outputs': [],
u'notification_topics': [],
u'capabilities': [],
u'disable_rollback': True,
u'timeout_mins': 60,
}
]
mock_call.return_value = engine_resp
result = self.controller.index(req, tenant_id=identity.tenant)
expected = {
'stacks': [
{
'links': [{"href": self._url(identity),
"rel": "self"}],
'id': '1',
u'updated_time': u'2012-07-09T09:13:11Z',
u'description': u'blah',
u'stack_status_reason': u'Stack successfully created',
u'creation_time': u'2012-07-09T09:12:45Z',
u'stack_name': u'wordpress',
u'stack_status': u'CREATE_COMPLETE'
}
]
}
self.assertEqual(expected, result)
default_args = {'limit': None, 'sort_keys': None, 'marker': None,
'sort_dir': None, 'filters': None, 'tenant_safe': True,
'show_deleted': False, 'show_nested': False,
'show_hidden': False, 'tags': None,
'tags_any': None, 'not_tags': None,
'not_tags_any': None}
mock_call.assert_called_once_with(
req.context, ('list_stacks', default_args), version='1.8')
@mock.patch.object(rpc_client.EngineClient, 'call')
def test_index_whitelists_pagination_params(self, mock_call, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
params = {
'limit': 10,
'sort_keys': 'fake sort keys',
'marker': 'fake marker',
'sort_dir': 'fake sort dir',
'balrog': 'you shall not pass!'
}
req = self._get('/stacks', params=params)
mock_call.return_value = []
self.controller.index(req, tenant_id=self.tenant)
rpc_call_args, _ = mock_call.call_args
engine_args = rpc_call_args[1][1]
self.assertEqual(13, len(engine_args))
self.assertIn('limit', engine_args)
self.assertIn('sort_keys', engine_args)
self.assertIn('marker', engine_args)
self.assertIn('sort_dir', engine_args)
self.assertIn('filters', engine_args)
self.assertIn('tenant_safe', engine_args)
self.assertNotIn('balrog', engine_args)
@mock.patch.object(rpc_client.EngineClient, 'call')
def test_index_limit_not_int(self, mock_call, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
params = {'limit': 'not-an-int'}
req = self._get('/stacks', params=params)
ex = self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index, req,
tenant_id=self.tenant)
self.assertEqual("Only integer is acceptable by 'limit'.",
six.text_type(ex))
self.assertFalse(mock_call.called)
@mock.patch.object(rpc_client.EngineClient, 'call')
def test_index_whitelist_filter_params(self, mock_call, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
params = {
'id': 'fake id',
'status': 'fake status',
'name': 'fake name',
'action': 'fake action',
'username': 'fake username',
'tenant': 'fake tenant',
'owner_id': 'fake owner-id',
'balrog': 'you shall not pass!'
}
req = self._get('/stacks', params=params)
mock_call.return_value = []
self.controller.index(req, tenant_id=self.tenant)
rpc_call_args, _ = mock_call.call_args
engine_args = rpc_call_args[1][1]
self.assertIn('filters', engine_args)
filters = engine_args['filters']
self.assertEqual(7, len(filters))
self.assertIn('id', filters)
self.assertIn('status', filters)
self.assertIn('name', filters)
self.assertIn('action', filters)
self.assertIn('username', filters)
self.assertIn('tenant', filters)
self.assertIn('owner_id', filters)
self.assertNotIn('balrog', filters)
def test_index_returns_stack_count_if_with_count_is_true(
self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
params = {'with_count': 'True'}
req = self._get('/stacks', params=params)
engine = self.controller.rpc_client
engine.list_stacks = mock.Mock(return_value=[])
engine.count_stacks = mock.Mock(return_value=0)
result = self.controller.index(req, tenant_id=self.tenant)
self.assertEqual(0, result['count'])
def test_index_doesnt_return_stack_count_if_with_count_is_false(
self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
params = {'with_count': 'false'}
req = self._get('/stacks', params=params)
engine = self.controller.rpc_client
engine.list_stacks = mock.Mock(return_value=[])
engine.count_stacks = mock.Mock()
result = self.controller.index(req, tenant_id=self.tenant)
self.assertNotIn('count', result)
assert not engine.count_stacks.called
def test_index_with_count_is_invalid(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
params = {'with_count': 'invalid_value'}
req = self._get('/stacks', params=params)
exc = self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index,
req, tenant_id=self.tenant)
excepted = ('Unrecognized value "invalid_value" for "with_count", '
'acceptable values are: true, false')
self.assertIn(excepted, six.text_type(exc))
@mock.patch.object(rpc_client.EngineClient, 'count_stacks')
def test_index_doesnt_break_with_old_engine(self, mock_count_stacks,
mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
params = {'with_count': 'True'}
req = self._get('/stacks', params=params)
engine = self.controller.rpc_client
engine.list_stacks = mock.Mock(return_value=[])
mock_count_stacks.side_effect = AttributeError("Should not exist")
result = self.controller.index(req, tenant_id=self.tenant)
self.assertNotIn('count', result)
def test_index_enforces_global_index_if_global_tenant(self, mock_enforce):
params = {'global_tenant': 'True'}
req = self._get('/stacks', params=params)
rpc_client = self.controller.rpc_client
rpc_client.list_stacks = mock.Mock(return_value=[])
rpc_client.count_stacks = mock.Mock()
self.controller.index(req, tenant_id=self.tenant)
mock_enforce.assert_called_with(action='global_index',
scope=self.controller.REQUEST_SCOPE,
context=self.context)
def test_global_index_sets_tenant_safe_to_false(self, mock_enforce):
rpc_client = self.controller.rpc_client
rpc_client.list_stacks = mock.Mock(return_value=[])
rpc_client.count_stacks = mock.Mock()
params = {'global_tenant': 'True'}
req = self._get('/stacks', params=params)
self.controller.index(req, tenant_id=self.tenant)
rpc_client.list_stacks.assert_called_once_with(mock.ANY,
filters=mock.ANY,
tenant_safe=False)
def test_global_index_show_deleted_false(self, mock_enforce):
rpc_client = self.controller.rpc_client
rpc_client.list_stacks = mock.Mock(return_value=[])
rpc_client.count_stacks = mock.Mock()
params = {'show_deleted': 'False'}
req = self._get('/stacks', params=params)
self.controller.index(req, tenant_id=self.tenant)
rpc_client.list_stacks.assert_called_once_with(mock.ANY,
filters=mock.ANY,
tenant_safe=True,
show_deleted=False)
def test_global_index_show_deleted_true(self, mock_enforce):
rpc_client = self.controller.rpc_client
rpc_client.list_stacks = mock.Mock(return_value=[])
rpc_client.count_stacks = mock.Mock()
params = {'show_deleted': 'True'}
req = self._get('/stacks', params=params)
self.controller.index(req, tenant_id=self.tenant)
rpc_client.list_stacks.assert_called_once_with(mock.ANY,
filters=mock.ANY,
tenant_safe=True,
show_deleted=True)
def test_global_index_show_nested_false(self, mock_enforce):
rpc_client = self.controller.rpc_client
rpc_client.list_stacks = mock.Mock(return_value=[])
rpc_client.count_stacks = mock.Mock()
params = {'show_nested': 'False'}
req = self._get('/stacks', params=params)
self.controller.index(req, tenant_id=self.tenant)
rpc_client.list_stacks.assert_called_once_with(mock.ANY,
filters=mock.ANY,
tenant_safe=True,
show_nested=False)
def test_global_index_show_nested_true(self, mock_enforce):
rpc_client = self.controller.rpc_client
rpc_client.list_stacks = mock.Mock(return_value=[])
rpc_client.count_stacks = mock.Mock()
params = {'show_nested': 'True'}
req = self._get('/stacks', params=params)
self.controller.index(req, tenant_id=self.tenant)
rpc_client.list_stacks.assert_called_once_with(mock.ANY,
filters=mock.ANY,
tenant_safe=True,
show_nested=True)
def test_index_show_deleted_True_with_count_True(self, mock_enforce):
rpc_client = self.controller.rpc_client
rpc_client.list_stacks = mock.Mock(return_value=[])
rpc_client.count_stacks = mock.Mock(return_value=0)
params = {'show_deleted': 'True',
'with_count': 'True'}
req = self._get('/stacks', params=params)
result = self.controller.index(req, tenant_id=self.tenant)
self.assertEqual(0, result['count'])
rpc_client.list_stacks.assert_called_once_with(mock.ANY,
filters=mock.ANY,
tenant_safe=True,
show_deleted=True)
rpc_client.count_stacks.assert_called_once_with(mock.ANY,
filters=mock.ANY,
tenant_safe=True,
show_deleted=True,
show_nested=False,
show_hidden=False,
tags=None,
tags_any=None,
not_tags=None,
not_tags_any=None)
@mock.patch.object(rpc_client.EngineClient, 'call')
def test_detail(self, mock_call, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'detail', True)
req = self._get('/stacks/detail')
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '1')
engine_resp = [
{
u'stack_identity': dict(identity),
u'updated_time': u'2012-07-09T09:13:11Z',
u'template_description': u'blah',
u'description': u'blah',
u'stack_status_reason': u'Stack successfully created',
u'creation_time': u'2012-07-09T09:12:45Z',
u'stack_name': identity.stack_name,
u'stack_action': u'CREATE',
u'stack_status': u'COMPLETE',
u'parameters': {'foo': 'bar'},
u'outputs': ['key', 'value'],
u'notification_topics': [],
u'capabilities': [],
u'disable_rollback': True,
u'timeout_mins': 60,
}
]
mock_call.return_value = engine_resp
result = self.controller.detail(req, tenant_id=identity.tenant)
expected = {
'stacks': [
{
'links': [{"href": self._url(identity),
"rel": "self"}],
'id': '1',
u'updated_time': u'2012-07-09T09:13:11Z',
u'template_description': u'blah',
u'description': u'blah',
u'stack_status_reason': u'Stack successfully created',
u'creation_time': u'2012-07-09T09:12:45Z',
u'stack_name': identity.stack_name,
u'stack_status': u'CREATE_COMPLETE',
u'parameters': {'foo': 'bar'},
u'outputs': ['key', 'value'],
u'notification_topics': [],
u'capabilities': [],
u'disable_rollback': True,
u'timeout_mins': 60,
}
]
}
self.assertEqual(expected, result)
default_args = {'limit': None, 'sort_keys': None, 'marker': None,
'sort_dir': None, 'filters': None, 'tenant_safe': True,
'show_deleted': False, 'show_nested': False,
'show_hidden': False, 'tags': None,
'tags_any': None, 'not_tags': None,
'not_tags_any': None}
mock_call.assert_called_once_with(
req.context, ('list_stacks', default_args), version='1.8')
@mock.patch.object(rpc_client.EngineClient, 'call')
def test_index_rmt_aterr(self, mock_call, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
req = self._get('/stacks')
mock_call.side_effect = tools.to_remote_error(AttributeError())
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.index,
req, tenant_id=self.tenant)
self.assertEqual(400, resp.json['code'])
self.assertEqual('AttributeError', resp.json['error']['type'])
mock_call.assert_called_once_with(
req.context, ('list_stacks', mock.ANY), version='1.8')
def test_index_err_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', False)
req = self._get('/stacks')
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.index,
req, tenant_id=self.tenant)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp))
@mock.patch.object(rpc_client.EngineClient, 'call')
def test_index_rmt_interr(self, mock_call, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
req = self._get('/stacks')
mock_call.side_effect = tools.to_remote_error(Exception())
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.index,
req, tenant_id=self.tenant)
self.assertEqual(500, resp.json['code'])
self.assertEqual('Exception', resp.json['error']['type'])
mock_call.assert_called_once_with(
req.context, ('list_stacks', mock.ANY), version='1.8')
def test_create(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'create', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '1')
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'stack_name': identity.stack_name,
'parameters': parameters,
'timeout_mins': 30}
req = self._post('/stacks', json.dumps(body))
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('create_stack',
{'stack_name': identity.stack_name,
'template': template,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {},
'args': {'timeout_mins': 30},
'owner_id': None,
'nested_depth': 0,
'user_creds_id': None,
'parent_resource_name': None,
'stack_user_project_id': None}),
version='1.8'
).AndReturn(dict(identity))
self.m.ReplayAll()
response = self.controller.create(req,
tenant_id=identity.tenant,
body=body)
expected = {'stack':
{'id': '1',
'links': [{'href': self._url(identity), 'rel': 'self'}]}}
self.assertEqual(expected, response)
self.m.VerifyAll()
def test_create_with_tags(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'create', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '1')
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'stack_name': identity.stack_name,
'parameters': parameters,
'tags': 'tag1,tag2',
'timeout_mins': 30}
req = self._post('/stacks', json.dumps(body))
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('create_stack',
{'stack_name': identity.stack_name,
'template': template,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {},
'args': {'timeout_mins': 30, 'tags': ['tag1', 'tag2']},
'owner_id': None,
'nested_depth': 0,
'user_creds_id': None,
'parent_resource_name': None,
'stack_user_project_id': None}),
version='1.8'
).AndReturn(dict(identity))
self.m.ReplayAll()
response = self.controller.create(req,
tenant_id=identity.tenant,
body=body)
expected = {'stack':
{'id': '1',
'links': [{'href': self._url(identity), 'rel': 'self'}]}}
self.assertEqual(expected, response)
self.m.VerifyAll()
def test_adopt(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'create', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '1')
template = {
"heat_template_version": "2013-05-23",
"parameters": {"app_dbx": {"type": "string"}},
"resources": {"res1": {"type": "GenericResourceType"}}}
parameters = {"app_dbx": "test"}
adopt_data = {
"status": "COMPLETE",
"name": "rtrove1",
"parameters": parameters,
"template": template,
"action": "CREATE",
"id": "8532f0d3-ea84-444e-b2bb-2543bb1496a4",
"resources": {"res1": {
"status": "COMPLETE",
"name": "database_password",
"resource_id": "yBpuUROjfGQ2gKOD",
"action": "CREATE",
"type": "GenericResourceType",
"metadata": {}}}}
body = {'template': None,
'stack_name': identity.stack_name,
'parameters': parameters,
'timeout_mins': 30,
'adopt_stack_data': str(adopt_data)}
req = self._post('/stacks', json.dumps(body))
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('create_stack',
{'stack_name': identity.stack_name,
'template': template,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {},
'args': {'timeout_mins': 30,
'adopt_stack_data': str(adopt_data)},
'owner_id': None,
'nested_depth': 0,
'user_creds_id': None,
'parent_resource_name': None,
'stack_user_project_id': None}),
version='1.8'
).AndReturn(dict(identity))
self.m.ReplayAll()
response = self.controller.create(req,
tenant_id=identity.tenant,
body=body)
expected = {'stack':
{'id': '1',
'links': [{'href': self._url(identity), 'rel': 'self'}]}}
self.assertEqual(expected, response)
self.m.VerifyAll()
def test_adopt_timeout_not_int(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'create', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '1')
body = {'template': None,
'stack_name': identity.stack_name,
'parameters': {},
'timeout_mins': 'not-an-int',
'adopt_stack_data': 'does not matter'}
req = self._post('/stacks', json.dumps(body))
mock_call = self.patchobject(rpc_client.EngineClient, 'call')
ex = self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req,
tenant_id=self.tenant, body=body)
self.assertEqual("Only integer is acceptable by 'timeout_mins'.",
six.text_type(ex))
self.assertFalse(mock_call.called)
def test_adopt_error(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'create', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '1')
parameters = {"app_dbx": "test"}
adopt_data = ["Test"]
body = {'template': None,
'stack_name': identity.stack_name,
'parameters': parameters,
'timeout_mins': 30,
'adopt_stack_data': str(adopt_data)}
req = self._post('/stacks', json.dumps(body))
self.m.ReplayAll()
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.create,
req, tenant_id=self.tenant,
body=body)
self.assertEqual(400, resp.status_code)
self.assertEqual('400 Bad Request', resp.status)
self.assertIn('Invalid adopt data', resp.text)
self.m.VerifyAll()
def test_create_with_files(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'create', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '1')
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'stack_name': identity.stack_name,
'parameters': parameters,
'files': {'my.yaml': 'This is the file contents.'},
'timeout_mins': 30}
req = self._post('/stacks', json.dumps(body))
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('create_stack',
{'stack_name': identity.stack_name,
'template': template,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {'my.yaml': 'This is the file contents.'},
'args': {'timeout_mins': 30},
'owner_id': None,
'nested_depth': 0,
'user_creds_id': None,
'parent_resource_name': None,
'stack_user_project_id': None}),
version='1.8'
).AndReturn(dict(identity))
self.m.ReplayAll()
result = self.controller.create(req,
tenant_id=identity.tenant,
body=body)
expected = {'stack':
{'id': '1',
'links': [{'href': self._url(identity), 'rel': 'self'}]}}
self.assertEqual(expected, result)
self.m.VerifyAll()
def test_create_err_rpcerr(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'create', True, 3)
stack_name = "wordpress"
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'stack_name': stack_name,
'parameters': parameters,
'timeout_mins': 30}
req = self._post('/stacks', json.dumps(body))
unknown_parameter = heat_exc.UnknownUserParameter(key='a')
missing_parameter = heat_exc.UserParameterMissing(key='a')
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('create_stack',
{'stack_name': stack_name,
'template': template,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {},
'args': {'timeout_mins': 30},
'owner_id': None,
'nested_depth': 0,
'user_creds_id': None,
'parent_resource_name': None,
'stack_user_project_id': None}),
version='1.8'
).AndRaise(tools.to_remote_error(AttributeError()))
rpc_client.EngineClient.call(
req.context,
('create_stack',
{'stack_name': stack_name,
'template': template,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {},
'args': {'timeout_mins': 30},
'owner_id': None,
'nested_depth': 0,
'user_creds_id': None,
'parent_resource_name': None,
'stack_user_project_id': None}),
version='1.8'
).AndRaise(tools.to_remote_error(unknown_parameter))
rpc_client.EngineClient.call(
req.context,
('create_stack',
{'stack_name': stack_name,
'template': template,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {},
'args': {'timeout_mins': 30},
'owner_id': None,
'nested_depth': 0,
'user_creds_id': None,
'parent_resource_name': None,
'stack_user_project_id': None}),
version='1.8'
).AndRaise(tools.to_remote_error(missing_parameter))
self.m.ReplayAll()
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.create,
req, tenant_id=self.tenant,
body=body)
self.assertEqual(400, resp.json['code'])
self.assertEqual('AttributeError', resp.json['error']['type'])
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.create,
req, tenant_id=self.tenant,
body=body)
self.assertEqual(400, resp.json['code'])
self.assertEqual('UnknownUserParameter', resp.json['error']['type'])
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.create,
req, tenant_id=self.tenant,
body=body)
self.assertEqual(400, resp.json['code'])
self.assertEqual('UserParameterMissing', resp.json['error']['type'])
self.m.VerifyAll()
def test_create_err_existing(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'create', True)
stack_name = "wordpress"
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'stack_name': stack_name,
'parameters': parameters,
'timeout_mins': 30}
req = self._post('/stacks', json.dumps(body))
error = heat_exc.StackExists(stack_name='s')
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('create_stack',
{'stack_name': stack_name,
'template': template,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {},
'args': {'timeout_mins': 30},
'owner_id': None,
'nested_depth': 0,
'user_creds_id': None,
'parent_resource_name': None,
'stack_user_project_id': None}),
version='1.8'
).AndRaise(tools.to_remote_error(error))
self.m.ReplayAll()
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.create,
req, tenant_id=self.tenant,
body=body)
self.assertEqual(409, resp.json['code'])
self.assertEqual('StackExists', resp.json['error']['type'])
self.m.VerifyAll()
def test_create_timeout_not_int(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'create', True)
stack_name = "wordpress"
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'stack_name': stack_name,
'parameters': parameters,
'timeout_mins': 'not-an-int'}
req = self._post('/stacks', json.dumps(body))
mock_call = self.patchobject(rpc_client.EngineClient, 'call')
ex = self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req,
tenant_id=self.tenant, body=body)
self.assertEqual("Only integer is acceptable by 'timeout_mins'.",
six.text_type(ex))
self.assertFalse(mock_call.called)
def test_create_err_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'create', False)
stack_name = "wordpress"
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'stack_name': stack_name,
'parameters': parameters,
'timeout_mins': 30}
req = self._post('/stacks', json.dumps(body))
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.create,
req, tenant_id=self.tenant,
body=body)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp))
def test_create_err_engine(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'create', True)
stack_name = "wordpress"
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'stack_name': stack_name,
'parameters': parameters,
'timeout_mins': 30}
req = self._post('/stacks', json.dumps(body))
error = heat_exc.StackValidationFailed(message='')
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('create_stack',
{'stack_name': stack_name,
'template': template,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {},
'args': {'timeout_mins': 30},
'owner_id': None,
'nested_depth': 0,
'user_creds_id': None,
'parent_resource_name': None,
'stack_user_project_id': None}),
version='1.8'
).AndRaise(tools.to_remote_error(error))
self.m.ReplayAll()
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.create,
req, tenant_id=self.tenant,
body=body)
self.assertEqual(400, resp.json['code'])
self.assertEqual('StackValidationFailed', resp.json['error']['type'])
self.m.VerifyAll()
def test_create_err_stack_bad_reqest(self, mock_enforce):
cfg.CONF.set_override('debug', True)
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'parameters': parameters,
'timeout_mins': 30}
req = self._post('/stacks', json.dumps(body))
error = heat_exc.HTTPExceptionDisguise(webob.exc.HTTPBadRequest())
self.controller.create = mock.MagicMock(side_effect=error)
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.create, req, body)
# When HTTP disguised exceptions reach the fault app, they are
# converted into regular responses, just like non-HTTP exceptions
self.assertEqual(400, resp.json['code'])
self.assertEqual('HTTPBadRequest', resp.json['error']['type'])
self.assertIsNotNone(resp.json['error']['traceback'])
@mock.patch.object(rpc_client.EngineClient, 'call')
@mock.patch.object(stacks.stacks_view, 'format_stack')
def test_preview_stack(self, mock_format, mock_call, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'preview', True)
body = {'stack_name': 'foo', 'template': {}}
req = self._get('/stacks/preview', params={})
mock_call.return_value = {}
mock_format.return_value = 'formatted_stack'
result = self.controller.preview(req, tenant_id=self.tenant, body=body)
self.assertEqual({'stack': 'formatted_stack'}, result)
def test_lookup(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'lookup', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '1')
req = self._get('/stacks/%(stack_name)s' % identity)
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('identify_stack', {'stack_name': identity.stack_name})
).AndReturn(identity)
self.m.ReplayAll()
found = self.assertRaises(
webob.exc.HTTPFound, self.controller.lookup, req,
tenant_id=identity.tenant, stack_name=identity.stack_name)
self.assertEqual(self._url(identity), found.location)
self.m.VerifyAll()
def test_lookup_arn(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'lookup', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '1')
req = self._get('/stacks%s' % identity.arn_url_path())
self.m.ReplayAll()
found = self.assertRaises(
webob.exc.HTTPFound, self.controller.lookup,
req, tenant_id=identity.tenant, stack_name=identity.arn())
self.assertEqual(self._url(identity), found.location)
self.m.VerifyAll()
def test_lookup_nonexistent(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'lookup', True)
stack_name = 'wibble'
req = self._get('/stacks/%(stack_name)s' % {
'stack_name': stack_name})
error = heat_exc.StackNotFound(stack_name='a')
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('identify_stack', {'stack_name': stack_name})
).AndRaise(tools.to_remote_error(error))
self.m.ReplayAll()
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.lookup,
req, tenant_id=self.tenant,
stack_name=stack_name)
self.assertEqual(404, resp.json['code'])
self.assertEqual('StackNotFound', resp.json['error']['type'])
self.m.VerifyAll()
def test_lookup_err_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'lookup', False)
stack_name = 'wibble'
req = self._get('/stacks/%(stack_name)s' % {
'stack_name': stack_name})
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.lookup,
req, tenant_id=self.tenant,
stack_name=stack_name)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp))
def test_lookup_resource(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'lookup', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '1')
req = self._get('/stacks/%(stack_name)s/resources' % identity)
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('identify_stack', {'stack_name': identity.stack_name})
).AndReturn(identity)
self.m.ReplayAll()
found = self.assertRaises(
webob.exc.HTTPFound, self.controller.lookup, req,
tenant_id=identity.tenant, stack_name=identity.stack_name,
path='resources')
self.assertEqual(self._url(identity) + '/resources',
found.location)
self.m.VerifyAll()
def test_lookup_resource_nonexistent(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'lookup', True)
stack_name = 'wibble'
req = self._get('/stacks/%(stack_name)s/resources' % {
'stack_name': stack_name})
error = heat_exc.StackNotFound(stack_name='a')
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('identify_stack', {'stack_name': stack_name})
).AndRaise(tools.to_remote_error(error))
self.m.ReplayAll()
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.lookup,
req, tenant_id=self.tenant,
stack_name=stack_name,
path='resources')
self.assertEqual(404, resp.json['code'])
self.assertEqual('StackNotFound', resp.json['error']['type'])
self.m.VerifyAll()
def test_lookup_resource_err_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'lookup', False)
stack_name = 'wibble'
req = self._get('/stacks/%(stack_name)s/resources' % {
'stack_name': stack_name})
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.lookup,
req, tenant_id=self.tenant,
stack_name=stack_name,
path='resources')
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp))
def test_show(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'show', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
req = self._get('/stacks/%(stack_name)s/%(stack_id)s' % identity)
parameters = {u'DBUsername': u'admin',
u'LinuxDistribution': u'F17',
u'InstanceType': u'm1.large',
u'DBRootPassword': u'admin',
u'DBPassword': u'admin',
u'DBName': u'wordpress'}
outputs = [{u'output_key': u'WebsiteURL',
u'description': u'URL for Wordpress wiki',
u'output_value': u'http://10.0.0.8/wordpress'}]
engine_resp = [
{
u'stack_identity': dict(identity),
u'updated_time': u'2012-07-09T09:13:11Z',
u'parameters': parameters,
u'outputs': outputs,
u'stack_status_reason': u'Stack successfully created',
u'creation_time': u'2012-07-09T09:12:45Z',
u'stack_name': identity.stack_name,
u'notification_topics': [],
u'stack_action': u'CREATE',
u'stack_status': u'COMPLETE',
u'description': u'blah',
u'disable_rollback': True,
u'timeout_mins':60,
u'capabilities': [],
}
]
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('show_stack', {'stack_identity': dict(identity)})
).AndReturn(engine_resp)
self.m.ReplayAll()
response = self.controller.show(req,
tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id)
expected = {
'stack': {
'links': [{"href": self._url(identity),
"rel": "self"}],
'id': '6',
u'updated_time': u'2012-07-09T09:13:11Z',
u'parameters': parameters,
u'outputs': outputs,
u'description': u'blah',
u'stack_status_reason': u'Stack successfully created',
u'creation_time': u'2012-07-09T09:12:45Z',
u'stack_name': identity.stack_name,
u'stack_status': u'CREATE_COMPLETE',
u'capabilities': [],
u'notification_topics': [],
u'disable_rollback': True,
u'timeout_mins': 60,
}
}
self.assertEqual(expected, response)
self.m.VerifyAll()
def test_show_notfound(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'show', True)
identity = identifier.HeatIdentifier(self.tenant, 'wibble', '6')
req = self._get('/stacks/%(stack_name)s/%(stack_id)s' % identity)
error = heat_exc.StackNotFound(stack_name='a')
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('show_stack', {'stack_identity': dict(identity)})
).AndRaise(tools.to_remote_error(error))
self.m.ReplayAll()
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.show,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id)
self.assertEqual(404, resp.json['code'])
self.assertEqual('StackNotFound', resp.json['error']['type'])
self.m.VerifyAll()
def test_show_invalidtenant(self, mock_enforce):
identity = identifier.HeatIdentifier('wibble', 'wordpress', '6')
req = self._get('/stacks/%(stack_name)s/%(stack_id)s' % identity)
self.m.ReplayAll()
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.show,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp))
self.m.VerifyAll()
def test_show_err_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'show', False)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
req = self._get('/stacks/%(stack_name)s/%(stack_id)s' % identity)
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.show,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp))
def test_get_template(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'template', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
req = self._get('/stacks/%(stack_name)s/%(stack_id)s' % identity)
template = {u'Foo': u'bar'}
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('get_template', {'stack_identity': dict(identity)})
).AndReturn(template)
self.m.ReplayAll()
response = self.controller.template(req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id)
self.assertEqual(template, response)
self.m.VerifyAll()
def test_get_template_err_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'template', False)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
req = self._get('/stacks/%(stack_name)s/%(stack_id)s/template'
% identity)
self.m.ReplayAll()
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.template,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp))
self.m.VerifyAll()
def test_get_template_err_notfound(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'template', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
req = self._get('/stacks/%(stack_name)s/%(stack_id)s' % identity)
error = heat_exc.StackNotFound(stack_name='a')
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('get_template', {'stack_identity': dict(identity)})
).AndRaise(tools.to_remote_error(error))
self.m.ReplayAll()
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.template,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id)
self.assertEqual(404, resp.json['code'])
self.assertEqual('StackNotFound', resp.json['error']['type'])
self.m.VerifyAll()
def test_update(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'update', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'parameters': parameters,
'files': {},
'timeout_mins': 30}
req = self._put('/stacks/%(stack_name)s/%(stack_id)s' % identity,
json.dumps(body))
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('update_stack',
{'stack_identity': dict(identity),
'template': template,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {},
'args': {'timeout_mins': 30}})
).AndReturn(dict(identity))
self.m.ReplayAll()
self.assertRaises(webob.exc.HTTPAccepted,
self.controller.update,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id,
body=body)
self.m.VerifyAll()
def test_update_with_tags(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'update', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'parameters': parameters,
'files': {},
'tags': 'tag1,tag2',
'timeout_mins': 30}
req = self._put('/stacks/%(stack_name)s/%(stack_id)s' % identity,
json.dumps(body))
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('update_stack',
{'stack_identity': dict(identity),
'template': template,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {},
'args': {'timeout_mins': 30, 'tags': ['tag1', 'tag2']}})
).AndReturn(dict(identity))
self.m.ReplayAll()
self.assertRaises(webob.exc.HTTPAccepted,
self.controller.update,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id,
body=body)
self.m.VerifyAll()
def test_update_bad_name(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'update', True)
identity = identifier.HeatIdentifier(self.tenant, 'wibble', '6')
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'parameters': parameters,
'files': {},
'timeout_mins': 30}
req = self._put('/stacks/%(stack_name)s/%(stack_id)s' % identity,
json.dumps(body))
error = heat_exc.StackNotFound(stack_name='a')
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('update_stack',
{'stack_identity': dict(identity),
'template': template,
'params': {u'parameters': parameters,
u'encrypted_param_names': [],
u'parameter_defaults': {},
u'resource_registry': {}},
'files': {},
'args': {'timeout_mins': 30}})
).AndRaise(tools.to_remote_error(error))
self.m.ReplayAll()
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.update,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id,
body=body)
self.assertEqual(404, resp.json['code'])
self.assertEqual('StackNotFound', resp.json['error']['type'])
self.m.VerifyAll()
def test_update_timeout_not_int(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'update', True)
identity = identifier.HeatIdentifier(self.tenant, 'wibble', '6')
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'parameters': parameters,
'files': {},
'timeout_mins': 'not-int'}
req = self._put('/stacks/%(stack_name)s/%(stack_id)s' % identity,
json.dumps(body))
mock_call = self.patchobject(rpc_client.EngineClient, 'call')
ex = self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req,
tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id,
body=body)
self.assertEqual("Only integer is acceptable by 'timeout_mins'.",
six.text_type(ex))
self.assertFalse(mock_call.called)
def test_update_err_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'update', False)
identity = identifier.HeatIdentifier(self.tenant, 'wibble', '6')
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'parameters': parameters,
'files': {},
'timeout_mins': 30}
req = self._put('/stacks/%(stack_name)s/%(stack_id)s' % identity,
json.dumps(body))
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.update,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id,
body=body)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp))
def test_update_with_existing_parameters(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'update_patch', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
template = {u'Foo': u'bar'}
body = {'template': template,
'parameters': {},
'files': {},
'timeout_mins': 30}
req = self._patch('/stacks/%(stack_name)s/%(stack_id)s' % identity,
json.dumps(body))
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('update_stack',
{'stack_identity': dict(identity),
'template': template,
'params': {'parameters': {},
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {},
'args': {rpc_api.PARAM_EXISTING: True,
'timeout_mins': 30}})
).AndReturn(dict(identity))
self.m.ReplayAll()
self.assertRaises(webob.exc.HTTPAccepted,
self.controller.update_patch,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id,
body=body)
self.m.VerifyAll()
def test_update_with_existing_parameters_with_tags(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'update_patch', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
template = {u'Foo': u'bar'}
body = {'template': template,
'parameters': {},
'files': {},
'tags': 'tag1,tag2',
'timeout_mins': 30}
req = self._patch('/stacks/%(stack_name)s/%(stack_id)s' % identity,
json.dumps(body))
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('update_stack',
{'stack_identity': dict(identity),
'template': template,
'params': {'parameters': {},
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {},
'args': {rpc_api.PARAM_EXISTING: True,
'timeout_mins': 30,
'tags': ['tag1', 'tag2']}})
).AndReturn(dict(identity))
self.m.ReplayAll()
self.assertRaises(webob.exc.HTTPAccepted,
self.controller.update_patch,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id,
body=body)
self.m.VerifyAll()
def test_update_with_patched_existing_parameters(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'update_patch', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'parameters': parameters,
'files': {},
'timeout_mins': 30}
req = self._patch('/stacks/%(stack_name)s/%(stack_id)s' % identity,
json.dumps(body))
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('update_stack',
{'stack_identity': dict(identity),
'template': template,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {},
'args': {rpc_api.PARAM_EXISTING: True,
'timeout_mins': 30}})
).AndReturn(dict(identity))
self.m.ReplayAll()
self.assertRaises(webob.exc.HTTPAccepted,
self.controller.update_patch,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id,
body=body)
self.m.VerifyAll()
def test_update_with_patch_timeout_not_int(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'update_patch', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'parameters': parameters,
'files': {},
'timeout_mins': 'not-int'}
req = self._patch('/stacks/%(stack_name)s/%(stack_id)s' % identity,
json.dumps(body))
mock_call = self.patchobject(rpc_client.EngineClient, 'call')
ex = self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update_patch, req,
tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id,
body=body)
self.assertEqual("Only integer is acceptable by 'timeout_mins'.",
six.text_type(ex))
self.assertFalse(mock_call.called)
def test_update_with_existing_and_default_parameters(
self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'update_patch', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
template = {u'Foo': u'bar'}
clear_params = [u'DBUsername', u'DBPassword', u'LinuxDistribution']
body = {'template': template,
'parameters': {},
'clear_parameters': clear_params,
'files': {},
'timeout_mins': 30}
req = self._patch('/stacks/%(stack_name)s/%(stack_id)s' % identity,
json.dumps(body))
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('update_stack',
{'stack_identity': dict(identity),
'template': template,
'params': {'parameters': {},
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {},
'args': {rpc_api.PARAM_EXISTING: True,
'clear_parameters': clear_params,
'timeout_mins': 30}})
).AndReturn(dict(identity))
self.m.ReplayAll()
self.assertRaises(webob.exc.HTTPAccepted,
self.controller.update_patch,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id,
body=body)
self.m.VerifyAll()
def test_update_with_patched_and_default_parameters(
self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'update_patch', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
clear_params = [u'DBUsername', u'DBPassword', u'LinuxDistribution']
body = {'template': template,
'parameters': parameters,
'clear_parameters': clear_params,
'files': {},
'timeout_mins': 30}
req = self._patch('/stacks/%(stack_name)s/%(stack_id)s' % identity,
json.dumps(body))
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('update_stack',
{'stack_identity': dict(identity),
'template': template,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {},
'args': {rpc_api.PARAM_EXISTING: True,
'clear_parameters': clear_params,
'timeout_mins': 30}})
).AndReturn(dict(identity))
self.m.ReplayAll()
self.assertRaises(webob.exc.HTTPAccepted,
self.controller.update_patch,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id,
body=body)
self.m.VerifyAll()
def test_delete(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'delete', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
req = self._delete('/stacks/%(stack_name)s/%(stack_id)s' % identity)
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
# Engine returns None when delete successful
rpc_client.EngineClient.call(
req.context,
('delete_stack', {'stack_identity': dict(identity)})
).AndReturn(None)
self.m.ReplayAll()
self.assertRaises(webob.exc.HTTPNoContent,
self.controller.delete,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id)
self.m.VerifyAll()
def test_delete_err_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'delete', False)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
req = self._delete('/stacks/%(stack_name)s/%(stack_id)s' % identity)
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.delete,
req, tenant_id=self.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp))
def test_abandon(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'abandon', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
req = self._abandon('/stacks/%(stack_name)s/%(stack_id)s' % identity)
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
# Engine returns json data on abandon completion
expected = {"name": "test", "id": "123"}
rpc_client.EngineClient.call(
req.context,
('abandon_stack', {'stack_identity': dict(identity)})
).AndReturn(expected)
self.m.ReplayAll()
ret = self.controller.abandon(req,
tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id)
self.assertEqual(expected, ret)
self.m.VerifyAll()
def test_abandon_err_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'abandon', False)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
req = self._abandon('/stacks/%(stack_name)s/%(stack_id)s' % identity)
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.abandon,
req, tenant_id=self.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp))
def test_delete_bad_name(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'delete', True)
identity = identifier.HeatIdentifier(self.tenant, 'wibble', '6')
req = self._delete('/stacks/%(stack_name)s/%(stack_id)s' % identity)
error = heat_exc.StackNotFound(stack_name='a')
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
# Engine returns None when delete successful
rpc_client.EngineClient.call(
req.context,
('delete_stack', {'stack_identity': dict(identity)})
).AndRaise(tools.to_remote_error(error))
self.m.ReplayAll()
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.delete,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id)
self.assertEqual(404, resp.json['code'])
self.assertEqual('StackNotFound', resp.json['error']['type'])
self.m.VerifyAll()
def test_validate_template(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'validate_template', True)
template = {u'Foo': u'bar'}
body = {'template': template}
req = self._post('/validate', json.dumps(body))
engine_response = {
u'Description': u'blah',
u'Parameters': [
{
u'NoEcho': u'false',
u'ParameterKey': u'InstanceType',
u'Description': u'Instance type'
}
]
}
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('validate_template',
{'template': template,
'params': {'parameters': {},
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}}})
).AndReturn(engine_response)
self.m.ReplayAll()
response = self.controller.validate_template(req,
tenant_id=self.tenant,
body=body)
self.assertEqual(engine_response, response)
self.m.VerifyAll()
def test_validate_template_error(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'validate_template', True)
template = {u'Foo': u'bar'}
body = {'template': template}
req = self._post('/validate', json.dumps(body))
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('validate_template',
{'template': template,
'params': {'parameters': {},
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}}})
).AndReturn({'Error': 'fubar'})
self.m.ReplayAll()
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.validate_template,
req, tenant_id=self.tenant, body=body)
self.m.VerifyAll()
def test_validate_err_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'validate_template', False)
template = {u'Foo': u'bar'}
body = {'template': template}
req = self._post('/validate', json.dumps(body))
resp = tools.request_with_middleware(
fault.FaultWrapper,
self.controller.validate_template,
req, tenant_id=self.tenant, body=body)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp))
def test_list_resource_types(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'list_resource_types', True)
req = self._get('/resource_types')
engine_response = ['AWS::EC2::Instance',
'AWS::EC2::EIP',
'AWS::EC2::EIPAssociation']
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context, ('list_resource_types', {'support_status': None}),
version="1.1"
).AndReturn(engine_response)
self.m.ReplayAll()
response = self.controller.list_resource_types(req,
tenant_id=self.tenant)
self.assertEqual({'resource_types': engine_response}, response)
self.m.VerifyAll()
def test_list_resource_types_error(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'list_resource_types', True)
req = self._get('/resource_types')
error = heat_exc.ResourceTypeNotFound(type_name='')
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('list_resource_types',
{'support_status': None},
), version="1.1"
).AndRaise(tools.to_remote_error(error))
self.m.ReplayAll()
resp = tools.request_with_middleware(
fault.FaultWrapper,
self.controller.list_resource_types,
req, tenant_id=self.tenant)
self.assertEqual(404, resp.json['code'])
self.assertEqual('ResourceTypeNotFound', resp.json['error']['type'])
self.m.VerifyAll()
def test_list_resource_types_err_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'list_resource_types', False)
req = self._get('/resource_types')
resp = tools.request_with_middleware(
fault.FaultWrapper,
self.controller.list_resource_types,
req, tenant_id=self.tenant)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp))
def test_list_template_versions(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'list_template_versions', True)
req = self._get('/template_versions')
engine_response = [
{'version': 'heat_template_version.2013-05-23', 'type': 'hot'},
{'version': 'AWSTemplateFormatVersion.2010-09-09', 'type': 'cfn'}]
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context, ('list_template_versions', {}),
version="1.11"
).AndReturn(engine_response)
self.m.ReplayAll()
response = self.controller.list_template_versions(
req, tenant_id=self.tenant)
self.assertEqual({'template_versions': engine_response}, response)
self.m.VerifyAll()
def test_list_template_functions(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'list_template_functions', True)
req = self._get('/template_versions/t1/functions')
engine_response = [
{'functions': 'func1', 'description': 'desc1'},
]
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context, (
'list_template_functions', {'template_version': 't1'}),
version="1.13"
).AndReturn(engine_response)
self.m.ReplayAll()
response = self.controller.list_template_functions(
req, tenant_id=self.tenant, template_version='t1')
self.assertEqual({'template_functions': engine_response}, response)
self.m.VerifyAll()
def test_resource_schema(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'resource_schema', True)
req = self._get('/resource_types/ResourceWithProps')
type_name = 'ResourceWithProps'
engine_response = {
'resource_type': type_name,
'properties': {
'Foo': {'type': 'string', 'required': False},
},
'attributes': {
'foo': {'description': 'A generic attribute'},
'Foo': {'description': 'Another generic attribute'},
},
'support_status': {
'status': 'SUPPORTED',
'version': None,
'message': None,
},
}
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('resource_schema', {'type_name': type_name})
).AndReturn(engine_response)
self.m.ReplayAll()
response = self.controller.resource_schema(req,
tenant_id=self.tenant,
type_name=type_name)
self.assertEqual(engine_response, response)
self.m.VerifyAll()
def test_resource_schema_nonexist(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'resource_schema', True)
req = self._get('/resource_types/BogusResourceType')
type_name = 'BogusResourceType'
error = heat_exc.ResourceTypeNotFound(type_name='BogusResourceType')
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('resource_schema', {'type_name': type_name})
).AndRaise(tools.to_remote_error(error))
self.m.ReplayAll()
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.resource_schema,
req, tenant_id=self.tenant,
type_name=type_name)
self.assertEqual(404, resp.json['code'])
self.assertEqual('ResourceTypeNotFound', resp.json['error']['type'])
self.m.VerifyAll()
def test_resource_schema_err_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'resource_schema', False)
req = self._get('/resource_types/BogusResourceType')
type_name = 'BogusResourceType'
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.resource_schema,
req, tenant_id=self.tenant,
type_name=type_name)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp))
def test_generate_template(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'generate_template', True)
req = self._get('/resource_types/TEST_TYPE/template')
engine_response = {'Type': 'TEST_TYPE'}
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('generate_template', {'type_name': 'TEST_TYPE',
'template_type': 'cfn'}),
version='1.9'
).AndReturn(engine_response)
self.m.ReplayAll()
self.controller.generate_template(req, tenant_id=self.tenant,
type_name='TEST_TYPE')
self.m.VerifyAll()
def test_generate_template_invalid_template_type(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'generate_template', True)
params = {'template_type': 'invalid'}
mock_call = self.patchobject(rpc_client.EngineClient, 'call')
req = self._get('/resource_types/TEST_TYPE/template',
params=params)
ex = self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.generate_template,
req, tenant_id=self.tenant,
type_name='TEST_TYPE')
self.assertIn('Template type is not supported: Invalid template '
'type "invalid", valid types are: cfn, hot.',
six.text_type(ex))
self.assertFalse(mock_call.called)
def test_generate_template_not_found(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'generate_template', True)
req = self._get('/resource_types/NOT_FOUND/template')
error = heat_exc.ResourceTypeNotFound(type_name='a')
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('generate_template', {'type_name': 'NOT_FOUND',
'template_type': 'cfn'}),
version='1.9'
).AndRaise(tools.to_remote_error(error))
self.m.ReplayAll()
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.generate_template,
req, tenant_id=self.tenant,
type_name='NOT_FOUND')
self.assertEqual(404, resp.json['code'])
self.assertEqual('ResourceTypeNotFound', resp.json['error']['type'])
self.m.VerifyAll()
def test_generate_template_err_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'generate_template', False)
req = self._get('/resource_types/NOT_FOUND/template')
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.generate_template,
req, tenant_id=self.tenant,
type_name='blah')
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp))
class StackSerializerTest(common.HeatTestCase):
def setUp(self):
super(StackSerializerTest, self).setUp()
self.serializer = stacks.StackSerializer()
def test_serialize_create(self):
result = {'stack':
{'id': '1',
'links': [{'href': 'location', "rel": "self"}]}}
response = webob.Response()
response = self.serializer.create(response, result)
self.assertEqual(201, response.status_int)
self.assertEqual('location', response.headers['Location'])
self.assertEqual('application/json', response.headers['Content-Type'])
| apache-2.0 | -1,535,811,039,123,686,100 | 41.061851 | 79 | 0.520989 | false |
thequbit/yellr-server | yellr-serv/yellrserv/ep_client_assignment.py | 1 | 2188 | from pyramid.view import view_config
import client_utils
import utils
@view_config(route_name='get_assignments.json')
def get_assignments(request):
result = {'success': False}
status_code = 200
#try:
if True:
success, error_text, language_code, lat, lng, \
client = client_utils.register_client(request)
if success == False:
raise Exception(error_text)
assignments = client_utils.get_assignments(
client_id = client.client_id,
language_code = language_code,
lat = lat,
lng = lng,
)
result['assignments'] = assignments
result['success'] = True
#except Exception, e:
# status_code = 400
# result['error_text'] = str(e)
client_utils.log_client_action(
client = client,
url = 'get_assignments.json',
lat = lat,
lng = lng,
request = request,
result = result,
success = success,
)
return utils.make_response(result, status_code)
'''
@view_config(route_name='get_poll_results.json')
def get_poll_results(request):
result = {'success': False}
status_code = 200
try:
success, error_text, language_code, lat, lng, \
client = client_utils.register_client(request)
if success == False:
raise Exception(error_text)
#assignments = client_utils.get_assignments(
# client_id = client.client_id,
# language_code = language_code,
# lat = lat,
# lng = lng,
#)
assignment_id = request.GET['assignment_id']
results = client_utils.get_poll_results(
assignment_id = assignment_id,
)
result['results'] = results
result['success'] = True
except Exception, e:
status_code = 400
result['error_text'] = str(e)
client_utils.log_client_action(
client = client,
url = 'get_poll_results.json',
lat = lat,
lng = lng,
request = request,
result = result,
success = success,
)
return utils.make_response(result, status_code)
'''
| agpl-3.0 | -8,832,418,995,809,838,000 | 23.043956 | 58 | 0.5617 | false |
jhsenjaliya/incubator-airflow | airflow/migrations/versions/64de9cddf6c9_add_task_fails_journal_table.py | 59 | 1452 | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""add task fails journal table
Revision ID: 64de9cddf6c9
Revises: 211e584da130
Create Date: 2016-08-03 14:02:59.203021
"""
# revision identifiers, used by Alembic.
revision = '64de9cddf6c9'
down_revision = '211e584da130'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'task_fail',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('task_id', sa.String(length=250), nullable=False),
sa.Column('dag_id', sa.String(length=250), nullable=False),
sa.Column('execution_date', sa.DateTime(), nullable=False),
sa.Column('start_date', sa.DateTime(), nullable=True),
sa.Column('end_date', sa.DateTime(), nullable=True),
sa.Column('duration', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id'),
)
def downgrade():
op.drop_table('task_fail')
| apache-2.0 | -7,374,129,793,547,358,000 | 30.565217 | 74 | 0.700413 | false |
GeoffreyFrogeye/syncthing-gtk | syncthing_gtk/timermanager.py | 2 | 1640 | #!/usr/bin/env python2
"""
Syncthing-GTK - Timer manager
Simple abstract class for named, cancelable timers
"""
from __future__ import unicode_literals
from gi.repository import GLib
class TimerManager(object):
def __init__(self):
self._timers = {}
def timer(self, name, delay, callback, *data, **kwdata):
"""
Runs callback after specified number of seconds. Uses
GLib.timeout_add_seconds with small wrapping to allow named
timers to be canceled by reset() call
"""
method = GLib.timeout_add_seconds
if delay < 1 and delay > 0:
method = GLib.timeout_add
delay = delay * 1000.0
if name is None:
# No wrapping is needed, call GLib directly
method(delay, callback, *data, **kwdata)
else:
if name in self._timers:
# Cancel old timer
GLib.source_remove(self._timers[name])
# Create new one
self._timers[name] = method(delay, self._callback, name, callback, *data, **kwdata)
def timer_active(self, name):
""" Returns True if named timer is active """
return (name in self._timers)
def cancel_timer(self, name):
"""
Cancels named timer. Returns True on success, False if there is no such timer.
"""
if name in self._timers:
GLib.source_remove(self._timers[name])
del self._timers[name]
return True
return False
def cancel_all(self):
""" Cancels all active timers """
for x in self._timers:
GLib.source_remove(self._timers[x])
self._timers = {}
def _callback(self, name, callback, *data, **kwdata):
"""
Removes name from list of active timers and calls real callback.
"""
del self._timers[name]
callback(*data, **kwdata)
return False
| gpl-2.0 | 8,937,110,810,102,361,000 | 25.451613 | 86 | 0.676829 | false |
scigghia/account-payment | __unported__/purchase_payment/__openerp__.py | 4 | 2019 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2010 Pexego S.L. (http://www.pexego.es) All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name" : "Purchase Payment Type and Terms",
"version" : "1.0",
"author" : "Pexego",
"website": "www.pexego.es",
"license" : "GPL-3",
"category" : 'Generic Modules/Sales & Purchases',
"description": """Adds payment info to the purchase process.
Adds payment type, terms, and bank account to the purchase orders.
Allows to set different default payment terms for purchases (the partners
will have payment terms and supplier payment terms).
The payment terms, payment type and bank account default values for the
purchase will be taken from the partner.
Invoices created from purchase orders, or from pickings related to purchase
orders, will inherit this payment info from the payment order.
""",
"depends" : [
"account_payment",
"account_payment_extension",
"purchase",
"stock",
],
"init_xml" : [],
"demo_xml" : [],
"update_xml" : [
"purchase_payment_view.xml",
],
"active": False,
"installable": False,
}
| agpl-3.0 | -1,617,378,283,571,020,800 | 36.388889 | 79 | 0.616147 | false |
raboof/supybot | src/utils/iter.py | 14 | 5180 | ###
# Copyright (c) 2002-2005, Jeremiah Fincher
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
from __future__ import division
import sys
import new
import random
from itertools import *
def len(iterable):
"""Returns the length of an iterator."""
i = 0
for _ in iterable:
i += 1
return i
def trueCycle(iterable):
while 1:
yielded = False
for x in iterable:
yield x
yielded = True
if not yielded:
raise StopIteration
if sys.version_info < (2, 4, 0):
def groupby(key, iterable):
if key is None:
key = lambda x: x
it = iter(iterable)
value = it.next() # If there are no items, this takes an early exit
oldkey = key(value)
group = [value]
for value in it:
newkey = key(value)
if newkey != oldkey:
yield group
group = []
oldkey = newkey
group.append(value)
yield group
def partition(p, iterable):
"""Partitions an iterable based on a predicate p.
Returns a (yes,no) tuple"""
no = []
yes = []
for elt in iterable:
if p(elt):
yes.append(elt)
else:
no.append(elt)
return (yes, no)
def any(p, iterable):
"""Returns true if any element in iterable satisfies predicate p."""
for elt in ifilter(p, iterable):
return True
else:
return False
def all(p, iterable):
"""Returns true if all elements in iterable satisfy predicate p."""
for elt in ifilterfalse(p, iterable):
return False
else:
return True
def choice(iterable):
if isinstance(iterable, (list, tuple)):
return random.choice(iterable)
else:
n = 1
m = new.module('') # Guaranteed unique value.
ret = m
for x in iterable:
if random.random() < 1/n:
ret = x
n += 1
if ret is m:
raise IndexError
return ret
def flatten(iterable, strings=False):
"""Flattens a list of lists into a single list. See the test for examples.
"""
for elt in iterable:
if not strings and isinstance(elt, basestring):
yield elt
else:
try:
for x in flatten(elt):
yield x
except TypeError:
yield elt
def split(isSeparator, iterable, maxsplit=-1, yieldEmpty=False):
"""split(isSeparator, iterable, maxsplit=-1, yieldEmpty=False)
Splits an iterator based on a predicate isSeparator."""
if isinstance(isSeparator, basestring):
f = lambda s: s == isSeparator
else:
f = isSeparator
acc = []
for element in iterable:
if maxsplit == 0 or not f(element):
acc.append(element)
else:
maxsplit -= 1
if acc or yieldEmpty:
yield acc
acc = []
if acc or yieldEmpty:
yield acc
def ilen(iterable):
i = 0
for _ in iterable:
i += 1
return i
def startswith(long, short):
longI = iter(long)
shortI = iter(short)
try:
while True:
if shortI.next() != longI.next():
return False
except StopIteration:
return True
def limited(iterable, limit):
i = limit
iterable = iter(iterable)
try:
while i:
yield iterable.next()
i -= 1
except StopIteration:
raise ValueError, 'Expected %s elements in iterable (%r), got %s.' % \
(limit, iterable, limit-i)
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| bsd-3-clause | -5,058,259,824,459,064,000 | 29.116279 | 79 | 0.6139 | false |
boedy1996/SPARC | geonode/maps/urls.py | 1 | 4070 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2012 OpenPlans
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.conf.urls import patterns, url
from django.views.generic import TemplateView
js_info_dict = {
'packages': ('geonode.maps',),
}
urlpatterns = patterns('geonode.maps.views',
url(r'^$',
TemplateView.as_view(
template_name='maps/map_list.html'),
name='maps_browse'),
url(r'^new$', 'new_map', name="new_map"),
url(r'^new/data$', 'new_map_json', name='new_map_json'),
url(r'^checkurl/?$', 'ajax_url_lookup'),
url(r'^snapshot/create/?$', 'snapshot_create'),
url(r'^(?P<mapid>[^/]+)$',
'map_detail',
name='map_detail'),
url(r'^(?P<mapid>[^/]+)/view$',
'map_view',
name='map_view'),
url(r'^(?P<mapid>[^/]+)/data$',
'map_json',
name='map_json'),
url(r'^(?P<mapid>[^/]+)/download$',
'map_download',
name='map_download'),
url(r'^(?P<mapid>[^/]+)/wmc$',
'map_wmc',
name='map_wmc'),
url(r'^(?P<mapid>[^/]+)/wms$',
'map_wms',
name='map_wms'),
url(r'^(?P<mapid>[^/]+)/remove$',
'map_remove',
name='map_remove'),
url(r'^(?P<mapid>[^/]+)/metadata$',
'map_metadata',
name='map_metadata'),
url(r'^(?P<mapid>[^/]+)/embed$',
'map_embed',
name='map_embed'),
url(r'^(?P<mapid>[^/]+)/history$',
'ajax_snapshot_history'),
url(
r'^(?P<mapid>[^/]+)/(?P<snapshot>[A-Za-z0-9_\-]+)/view$',
'map_view'),
url(
r'^(?P<mapid>[^/]+)/(?P<snapshot>[A-Za-z0-9_\-]+)/info$',
'map_detail'),
url(
r'^(?P<mapid>[^/]+)/(?P<snapshot>[A-Za-z0-9_\-]+)/embed/?$',
'map_embed'),
url(
r'^(?P<mapid>[^/]+)/(?P<snapshot>[A-Za-z0-9_\-]+)/data$',
'map_json',
name='map_json'),
url(r'^check/$',
'map_download_check',
name='map_download_check'),
url(r'^embed/$', 'map_embed', name='map_embed'),
url(r'^(?P<layername>[^/]*)/attributes',
'maplayer_attributes',
name='maplayer_attributes'),
# url(r'^change-poc/(?P<ids>\w+)$', 'change_poc', name='maps_change_poc'),
)
| gpl-3.0 | -3,159,580,514,517,918,700 | 45.781609 | 97 | 0.37543 | false |
Endika/odoomrp-wip | quality_control_sale_stock/__openerp__.py | 19 | 1562 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (c)
# 2015 Serv. Tec. Avanzados - Pedro M. Baeza (http://www.serviciosbaeza.com)
# 2015 AvanzOsc (http://www.avanzosc.es)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Quality control - Sale stock",
"version": "8.0.1.0.0",
"author": "OdooMRP team, "
"AvanzOSC, "
"Serv. Tecnol. Avanzados - Pedro M. Baeza",
"website": "http://www.odoomrp.com",
"contributors": [
"Pedro M. Baeza <[email protected]",
],
"category": "Quality control",
"depends": [
'quality_control_stock',
'sale_stock',
],
"data": [
'security/ir.model.access.csv',
],
"installable": True,
"auto_install": True,
}
| agpl-3.0 | -860,217,897,303,152,100 | 35.325581 | 79 | 0.571703 | false |
PeterWangPo/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/bot/feeders.py | 121 | 4477 | # Copyright (c) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
from webkitpy.common.config.committervalidator import CommitterValidator
from webkitpy.tool.grammar import pluralize
_log = logging.getLogger(__name__)
class AbstractFeeder(object):
def __init__(self, tool):
self._tool = tool
def feed(self):
raise NotImplementedError("subclasses must implement")
class CommitQueueFeeder(AbstractFeeder):
queue_name = "commit-queue"
def __init__(self, tool):
AbstractFeeder.__init__(self, tool)
self.committer_validator = CommitterValidator(self._tool)
def _update_work_items(self, item_ids):
# FIXME: This is the last use of update_work_items, the commit-queue
# should move to feeding patches one at a time like the EWS does.
self._tool.status_server.update_work_items(self.queue_name, item_ids)
_log.info("Feeding %s items %s" % (self.queue_name, item_ids))
def feed(self):
patches = self._validate_patches()
patches = self._patches_with_acceptable_review_flag(patches)
patches = sorted(patches, self._patch_cmp)
patch_ids = [patch.id() for patch in patches]
self._update_work_items(patch_ids)
def _patches_for_bug(self, bug_id):
return self._tool.bugs.fetch_bug(bug_id).commit_queued_patches(include_invalid=True)
# Filters out patches with r? or r-, only r+ or no review are OK to land.
def _patches_with_acceptable_review_flag(self, patches):
return [patch for patch in patches if patch.review() in [None, '+']]
def _validate_patches(self):
# Not using BugzillaQueries.fetch_patches_from_commit_queue() so we can reject patches with invalid committers/reviewers.
bug_ids = self._tool.bugs.queries.fetch_bug_ids_from_commit_queue()
all_patches = sum([self._patches_for_bug(bug_id) for bug_id in bug_ids], [])
return self.committer_validator.patches_after_rejecting_invalid_commiters_and_reviewers(all_patches)
def _patch_cmp(self, a, b):
# Sort first by is_rollout, then by attach_date.
# Reversing the order so that is_rollout is first.
rollout_cmp = cmp(b.is_rollout(), a.is_rollout())
if rollout_cmp != 0:
return rollout_cmp
return cmp(a.attach_date(), b.attach_date())
class EWSFeeder(AbstractFeeder):
def __init__(self, tool):
self._ids_sent_to_server = set()
AbstractFeeder.__init__(self, tool)
def feed(self):
ids_needing_review = set(self._tool.bugs.queries.fetch_attachment_ids_from_review_queue())
new_ids = ids_needing_review.difference(self._ids_sent_to_server)
_log.info("Feeding EWS (%s, %s new)" % (pluralize("r? patch", len(ids_needing_review)), len(new_ids)))
for attachment_id in new_ids: # Order doesn't really matter for the EWS.
self._tool.status_server.submit_to_ews(attachment_id)
self._ids_sent_to_server.add(attachment_id)
| bsd-3-clause | -3,685,260,596,144,805,400 | 44.683673 | 129 | 0.703373 | false |
wisdark/Empire | lib/stagers/osx/dylib.py | 11 | 3528 | from lib.common import helpers
import os
class Stager:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'dylib',
'Author': ['@xorrior'],
'Description': ('Generates a dylib.'),
'Comments': [
''
]
}
# any options needed by the stager, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Listener' : {
'Description' : 'Listener to generate stager for.',
'Required' : True,
'Value' : ''
},
'Language' : {
'Description' : 'Language of the stager to generate.',
'Required' : True,
'Value' : 'python'
},
'Architecture' : {
'Description' : 'Architecture: x86/x64',
'Required' : True,
'Value' : 'x86'
},
'SafeChecks' : {
'Description' : 'Switch. Checks for LittleSnitch or a SandBox, exit the staging process if true. Defaults to True.',
'Required' : True,
'Value' : 'True'
},
'Hijacker' : {
'Description' : 'Generate dylib to be used in a Dylib Hijack. This provides a dylib with the LC_REEXPORT_DYLIB load command. The path will serve as a placeholder.',
'Required' : True,
'Value' : 'False'
},
'OutFile' : {
'Description' : 'File to write the dylib.',
'Required' : True,
'Value' : ''
},
'UserAgent' : {
'Description' : 'User-agent string to use for the staging request (default, none, or other).',
'Required' : False,
'Value' : 'default'
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self):
# extract all of our options
language = self.options['Language']['Value']
listenerName = self.options['Listener']['Value']
userAgent = self.options['UserAgent']['Value']
arch = self.options['Architecture']['Value']
hijacker = self.options['Hijacker']['Value']
safeChecks = self.options['SafeChecks']['Value']
if arch == "":
print helpers.color("[!] Please select a valid architecture")
return ""
# generate the launcher code
launcher = self.mainMenu.stagers.generate_launcher(listenerName, language=language, userAgent=userAgent, safeChecks=safeChecks)
if launcher == "":
print helpers.color("[!] Error in launcher command generation.")
return ""
else:
launcher = launcher.strip('echo').strip(' | python &').strip("\"")
dylib = self.mainMenu.stagers.generate_dylib(launcherCode=launcher, arch=arch, hijacker=hijacker)
return dylib
| bsd-3-clause | 3,498,324,674,440,841,700 | 36.136842 | 184 | 0.486111 | false |
apple/llvm-project | lldb/test/API/lang/c/set_values/TestSetValues.py | 8 | 5017 | """Test settings and readings of program variables."""
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class SetValuesTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line numbers to break inside main().
self.line1 = line_number('main.c', '// Set break point #1.')
self.line2 = line_number('main.c', '// Set break point #2.')
self.line3 = line_number('main.c', '// Set break point #3.')
self.line4 = line_number('main.c', '// Set break point #4.')
self.line5 = line_number('main.c', '// Set break point #5.')
def test(self):
"""Test settings and readings of program variables."""
self.build()
exe = self.getBuildArtifact("a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
# Set breakpoints on several places to set program variables.
lldbutil.run_break_set_by_file_and_line(
self, "main.c", self.line1, num_expected_locations=1, loc_exact=True)
lldbutil.run_break_set_by_file_and_line(
self, "main.c", self.line2, num_expected_locations=1, loc_exact=True)
lldbutil.run_break_set_by_file_and_line(
self, "main.c", self.line3, num_expected_locations=1, loc_exact=True)
lldbutil.run_break_set_by_file_and_line(
self, "main.c", self.line4, num_expected_locations=1, loc_exact=True)
lldbutil.run_break_set_by_file_and_line(
self, "main.c", self.line5, num_expected_locations=1, loc_exact=True)
self.runCmd("run", RUN_SUCCEEDED)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs=['stopped',
'stop reason = breakpoint'])
# The breakpoint should have a hit count of 1.
self.expect("breakpoint list -f", BREAKPOINT_HIT_ONCE,
substrs=[' resolved, hit count = 1'])
# main.c:15
# Check that 'frame variable --show-types' displays the correct data
# type and value.
self.expect(
"frame variable --show-types",
VARIABLES_DISPLAYED_CORRECTLY,
startstr="(char) i = 'a'")
# Now set variable 'i' and check that it is correctly displayed.
self.runCmd("expression i = 'b'")
self.expect(
"frame variable --show-types",
VARIABLES_DISPLAYED_CORRECTLY,
startstr="(char) i = 'b'")
self.runCmd("continue")
# main.c:36
# Check that 'frame variable --show-types' displays the correct data
# type and value.
self.expect(
"frame variable --show-types",
VARIABLES_DISPLAYED_CORRECTLY,
patterns=["\((short unsigned int|unsigned short)\) i = 33"])
# Now set variable 'i' and check that it is correctly displayed.
self.runCmd("expression i = 333")
self.expect(
"frame variable --show-types",
VARIABLES_DISPLAYED_CORRECTLY,
patterns=["\((short unsigned int|unsigned short)\) i = 333"])
self.runCmd("continue")
# main.c:57
# Check that 'frame variable --show-types' displays the correct data
# type and value.
self.expect(
"frame variable --show-types",
VARIABLES_DISPLAYED_CORRECTLY,
startstr="(long) i = 33")
# Now set variable 'i' and check that it is correctly displayed.
self.runCmd("expression i = 33333")
self.expect(
"frame variable --show-types",
VARIABLES_DISPLAYED_CORRECTLY,
startstr="(long) i = 33333")
self.runCmd("continue")
# main.c:78
# Check that 'frame variable --show-types' displays the correct data
# type and value.
self.expect(
"frame variable --show-types",
VARIABLES_DISPLAYED_CORRECTLY,
startstr="(double) i = 2.25")
# Now set variable 'i' and check that it is correctly displayed.
self.runCmd("expression i = 1.5")
self.expect(
"frame variable --show-types",
VARIABLES_DISPLAYED_CORRECTLY,
startstr="(double) i = 1.5")
self.runCmd("continue")
# main.c:85
# Check that 'frame variable --show-types' displays the correct data
# type and value.
self.expect(
"frame variable --show-types",
VARIABLES_DISPLAYED_CORRECTLY,
startstr="(long double) i = 2.25")
# Now set variable 'i' and check that it is correctly displayed.
self.runCmd("expression i = 1.5")
self.expect(
"frame variable --show-types",
VARIABLES_DISPLAYED_CORRECTLY,
startstr="(long double) i = 1.5")
| apache-2.0 | 4,376,658,706,758,031,400 | 35.093525 | 81 | 0.580626 | false |
duramato/CouchPotatoServer | libs/dateutil/parser.py | 103 | 33736 | # -*- coding:iso-8859-1 -*-
"""
Copyright (c) 2003-2007 Gustavo Niemeyer <[email protected]>
This module offers extensions to the standard Python
datetime module.
"""
from __future__ import unicode_literals
__license__ = "Simplified BSD"
import datetime
import string
import time
import collections
try:
from io import StringIO
except ImportError:
from io import StringIO
from six import text_type, binary_type, integer_types
from . import relativedelta
from . import tz
__all__ = ["parse", "parserinfo"]
# Some pointers:
#
# http://www.cl.cam.ac.uk/~mgk25/iso-time.html
# http://www.iso.ch/iso/en/prods-services/popstds/datesandtime.html
# http://www.w3.org/TR/NOTE-datetime
# http://ringmaster.arc.nasa.gov/tools/time_formats.html
# http://search.cpan.org/author/MUIR/Time-modules-2003.0211/lib/Time/ParseDate.pm
# http://stein.cshl.org/jade/distrib/docs/java.text.SimpleDateFormat.html
class _timelex(object):
def __init__(self, instream):
if isinstance(instream, text_type):
instream = StringIO(instream)
self.instream = instream
self.wordchars = ('abcdfeghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ_'
'ßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ'
'ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞ')
self.numchars = '0123456789'
self.whitespace = ' \t\r\n'
self.charstack = []
self.tokenstack = []
self.eof = False
def get_token(self):
if self.tokenstack:
return self.tokenstack.pop(0)
seenletters = False
token = None
state = None
wordchars = self.wordchars
numchars = self.numchars
whitespace = self.whitespace
while not self.eof:
if self.charstack:
nextchar = self.charstack.pop(0)
else:
nextchar = self.instream.read(1)
while nextchar == '\x00':
nextchar = self.instream.read(1)
if not nextchar:
self.eof = True
break
elif not state:
token = nextchar
if nextchar in wordchars:
state = 'a'
elif nextchar in numchars:
state = '0'
elif nextchar in whitespace:
token = ' '
break # emit token
else:
break # emit token
elif state == 'a':
seenletters = True
if nextchar in wordchars:
token += nextchar
elif nextchar == '.':
token += nextchar
state = 'a.'
else:
self.charstack.append(nextchar)
break # emit token
elif state == '0':
if nextchar in numchars:
token += nextchar
elif nextchar == '.':
token += nextchar
state = '0.'
else:
self.charstack.append(nextchar)
break # emit token
elif state == 'a.':
seenletters = True
if nextchar == '.' or nextchar in wordchars:
token += nextchar
elif nextchar in numchars and token[-1] == '.':
token += nextchar
state = '0.'
else:
self.charstack.append(nextchar)
break # emit token
elif state == '0.':
if nextchar == '.' or nextchar in numchars:
token += nextchar
elif nextchar in wordchars and token[-1] == '.':
token += nextchar
state = 'a.'
else:
self.charstack.append(nextchar)
break # emit token
if (state in ('a.', '0.') and
(seenletters or token.count('.') > 1 or token[-1] == '.')):
l = token.split('.')
token = l[0]
for tok in l[1:]:
self.tokenstack.append('.')
if tok:
self.tokenstack.append(tok)
return token
def __iter__(self):
return self
def __next__(self):
token = self.get_token()
if token is None:
raise StopIteration
return token
def next(self):
return self.__next__() # Python 2.x support
def split(cls, s):
return list(cls(s))
split = classmethod(split)
class _resultbase(object):
def __init__(self):
for attr in self.__slots__:
setattr(self, attr, None)
def _repr(self, classname):
l = []
for attr in self.__slots__:
value = getattr(self, attr)
if value is not None:
l.append("%s=%s" % (attr, repr(value)))
return "%s(%s)" % (classname, ", ".join(l))
def __repr__(self):
return self._repr(self.__class__.__name__)
class parserinfo(object):
# m from a.m/p.m, t from ISO T separator
JUMP = [" ", ".", ",", ";", "-", "/", "'",
"at", "on", "and", "ad", "m", "t", "of",
"st", "nd", "rd", "th"]
WEEKDAYS = [("Mon", "Monday"),
("Tue", "Tuesday"),
("Wed", "Wednesday"),
("Thu", "Thursday"),
("Fri", "Friday"),
("Sat", "Saturday"),
("Sun", "Sunday")]
MONTHS = [("Jan", "January"),
("Feb", "February"),
("Mar", "March"),
("Apr", "April"),
("May", "May"),
("Jun", "June"),
("Jul", "July"),
("Aug", "August"),
("Sep", "Sept", "September"),
("Oct", "October"),
("Nov", "November"),
("Dec", "December")]
HMS = [("h", "hour", "hours"),
("m", "minute", "minutes"),
("s", "second", "seconds")]
AMPM = [("am", "a"),
("pm", "p")]
UTCZONE = ["UTC", "GMT", "Z"]
PERTAIN = ["of"]
TZOFFSET = {}
def __init__(self, dayfirst = False, yearfirst = False):
self._jump = self._convert(self.JUMP)
self._weekdays = self._convert(self.WEEKDAYS)
self._months = self._convert(self.MONTHS)
self._hms = self._convert(self.HMS)
self._ampm = self._convert(self.AMPM)
self._utczone = self._convert(self.UTCZONE)
self._pertain = self._convert(self.PERTAIN)
self.dayfirst = dayfirst
self.yearfirst = yearfirst
self._year = time.localtime().tm_year
self._century = self._year // 100 * 100
def _convert(self, lst):
dct = {}
for i in range(len(lst)):
v = lst[i]
if isinstance(v, tuple):
for v in v:
dct[v.lower()] = i
else:
dct[v.lower()] = i
return dct
def jump(self, name):
return name.lower() in self._jump
def weekday(self, name):
if len(name) >= 3:
try:
return self._weekdays[name.lower()]
except KeyError:
pass
return None
def month(self, name):
if len(name) >= 3:
try:
return self._months[name.lower()] + 1
except KeyError:
pass
return None
def hms(self, name):
try:
return self._hms[name.lower()]
except KeyError:
return None
def ampm(self, name):
try:
return self._ampm[name.lower()]
except KeyError:
return None
def pertain(self, name):
return name.lower() in self._pertain
def utczone(self, name):
return name.lower() in self._utczone
def tzoffset(self, name):
if name in self._utczone:
return 0
return self.TZOFFSET.get(name)
def convertyear(self, year):
if year < 100:
year += self._century
if abs(year - self._year) >= 50:
if year < self._year:
year += 100
else:
year -= 100
return year
def validate(self, res):
# move to info
if res.year is not None:
res.year = self.convertyear(res.year)
if res.tzoffset == 0 and not res.tzname or res.tzname == 'Z':
res.tzname = "UTC"
res.tzoffset = 0
elif res.tzoffset != 0 and res.tzname and self.utczone(res.tzname):
res.tzoffset = 0
return True
class parser(object):
def __init__(self, info = None):
self.info = info or parserinfo()
def parse(self, timestr, default = None,
ignoretz = False, tzinfos = None,
**kwargs):
if not default:
default = datetime.datetime.now().replace(hour = 0, minute = 0,
second = 0, microsecond = 0)
res = self._parse(timestr, **kwargs)
if res is None:
raise ValueError("unknown string format")
repl = {}
for attr in ["year", "month", "day", "hour",
"minute", "second", "microsecond"]:
value = getattr(res, attr)
if value is not None:
repl[attr] = value
ret = default.replace(**repl)
if res.weekday is not None and not res.day:
ret = ret + relativedelta.relativedelta(weekday = res.weekday)
if not ignoretz:
if isinstance(tzinfos, collections.Callable) or tzinfos and res.tzname in tzinfos:
if isinstance(tzinfos, collections.Callable):
tzdata = tzinfos(res.tzname, res.tzoffset)
else:
tzdata = tzinfos.get(res.tzname)
if isinstance(tzdata, datetime.tzinfo):
tzinfo = tzdata
elif isinstance(tzdata, text_type):
tzinfo = tz.tzstr(tzdata)
elif isinstance(tzdata, integer_types):
tzinfo = tz.tzoffset(res.tzname, tzdata)
else:
raise ValueError("offset must be tzinfo subclass, " \
"tz string, or int offset")
ret = ret.replace(tzinfo = tzinfo)
elif res.tzname and res.tzname in time.tzname:
ret = ret.replace(tzinfo = tz.tzlocal())
elif res.tzoffset == 0:
ret = ret.replace(tzinfo = tz.tzutc())
elif res.tzoffset:
ret = ret.replace(tzinfo = tz.tzoffset(res.tzname, res.tzoffset))
return ret
class _result(_resultbase):
__slots__ = ["year", "month", "day", "weekday",
"hour", "minute", "second", "microsecond",
"tzname", "tzoffset"]
def _parse(self, timestr, dayfirst = None, yearfirst = None, fuzzy = False):
info = self.info
if dayfirst is None:
dayfirst = info.dayfirst
if yearfirst is None:
yearfirst = info.yearfirst
res = self._result()
l = _timelex.split(timestr)
try:
# year/month/day list
ymd = []
# Index of the month string in ymd
mstridx = -1
len_l = len(l)
i = 0
while i < len_l:
# Check if it's a number
try:
value_repr = l[i]
value = float(value_repr)
except ValueError:
value = None
if value is not None:
# Token is a number
len_li = len(l[i])
i += 1
if (len(ymd) == 3 and len_li in (2, 4)
and (i >= len_l or (l[i] != ':' and
info.hms(l[i]) is None))):
# 19990101T23[59]
s = l[i - 1]
res.hour = int(s[:2])
if len_li == 4:
res.minute = int(s[2:])
elif len_li == 6 or (len_li > 6 and l[i - 1].find('.') == 6):
# YYMMDD or HHMMSS[.ss]
s = l[i - 1]
if not ymd and l[i - 1].find('.') == -1:
ymd.append(info.convertyear(int(s[:2])))
ymd.append(int(s[2:4]))
ymd.append(int(s[4:]))
else:
# 19990101T235959[.59]
res.hour = int(s[:2])
res.minute = int(s[2:4])
res.second, res.microsecond = _parsems(s[4:])
elif len_li == 8:
# YYYYMMDD
s = l[i - 1]
ymd.append(int(s[:4]))
ymd.append(int(s[4:6]))
ymd.append(int(s[6:]))
elif len_li in (12, 14):
# YYYYMMDDhhmm[ss]
s = l[i - 1]
ymd.append(int(s[:4]))
ymd.append(int(s[4:6]))
ymd.append(int(s[6:8]))
res.hour = int(s[8:10])
res.minute = int(s[10:12])
if len_li == 14:
res.second = int(s[12:])
elif ((i < len_l and info.hms(l[i]) is not None) or
(i + 1 < len_l and l[i] == ' ' and
info.hms(l[i + 1]) is not None)):
# HH[ ]h or MM[ ]m or SS[.ss][ ]s
if l[i] == ' ':
i += 1
idx = info.hms(l[i])
while True:
if idx == 0:
res.hour = int(value)
if value % 1:
res.minute = int(60 * (value % 1))
elif idx == 1:
res.minute = int(value)
if value % 1:
res.second = int(60 * (value % 1))
elif idx == 2:
res.second, res.microsecond = \
_parsems(value_repr)
i += 1
if i >= len_l or idx == 2:
break
# 12h00
try:
value_repr = l[i]
value = float(value_repr)
except ValueError:
break
else:
i += 1
idx += 1
if i < len_l:
newidx = info.hms(l[i])
if newidx is not None:
idx = newidx
elif i == len_l and l[i - 2] == ' ' and info.hms(l[i - 3]) is not None:
# X h MM or X m SS
idx = info.hms(l[i - 3]) + 1
if idx == 1:
res.minute = int(value)
if value % 1:
res.second = int(60 * (value % 1))
elif idx == 2:
res.second, res.microsecond = \
_parsems(value_repr)
i += 1
elif i + 1 < len_l and l[i] == ':':
# HH:MM[:SS[.ss]]
res.hour = int(value)
i += 1
value = float(l[i])
res.minute = int(value)
if value % 1:
res.second = int(60 * (value % 1))
i += 1
if i < len_l and l[i] == ':':
res.second, res.microsecond = _parsems(l[i + 1])
i += 2
elif i < len_l and l[i] in ('-', '/', '.'):
sep = l[i]
ymd.append(int(value))
i += 1
if i < len_l and not info.jump(l[i]):
try:
# 01-01[-01]
ymd.append(int(l[i]))
except ValueError:
# 01-Jan[-01]
value = info.month(l[i])
if value is not None:
ymd.append(value)
assert mstridx == -1
mstridx = len(ymd) - 1
else:
return None
i += 1
if i < len_l and l[i] == sep:
# We have three members
i += 1
value = info.month(l[i])
if value is not None:
ymd.append(value)
mstridx = len(ymd) - 1
assert mstridx == -1
else:
ymd.append(int(l[i]))
i += 1
elif i >= len_l or info.jump(l[i]):
if i + 1 < len_l and info.ampm(l[i + 1]) is not None:
# 12 am
res.hour = int(value)
if res.hour < 12 and info.ampm(l[i + 1]) == 1:
res.hour += 12
elif res.hour == 12 and info.ampm(l[i + 1]) == 0:
res.hour = 0
i += 1
else:
# Year, month or day
ymd.append(int(value))
i += 1
elif info.ampm(l[i]) is not None:
# 12am
res.hour = int(value)
if res.hour < 12 and info.ampm(l[i]) == 1:
res.hour += 12
elif res.hour == 12 and info.ampm(l[i]) == 0:
res.hour = 0
i += 1
elif not fuzzy:
return None
else:
i += 1
continue
# Check weekday
value = info.weekday(l[i])
if value is not None:
res.weekday = value
i += 1
continue
# Check month name
value = info.month(l[i])
if value is not None:
ymd.append(value)
assert mstridx == -1
mstridx = len(ymd) - 1
i += 1
if i < len_l:
if l[i] in ('-', '/'):
# Jan-01[-99]
sep = l[i]
i += 1
ymd.append(int(l[i]))
i += 1
if i < len_l and l[i] == sep:
# Jan-01-99
i += 1
ymd.append(int(l[i]))
i += 1
elif (i + 3 < len_l and l[i] == l[i + 2] == ' '
and info.pertain(l[i + 1])):
# Jan of 01
# In this case, 01 is clearly year
try:
value = int(l[i + 3])
except ValueError:
# Wrong guess
pass
else:
# Convert it here to become unambiguous
ymd.append(info.convertyear(value))
i += 4
continue
# Check am/pm
value = info.ampm(l[i])
if value is not None:
if value == 1 and res.hour < 12:
res.hour += 12
elif value == 0 and res.hour == 12:
res.hour = 0
i += 1
continue
# Check for a timezone name
if (res.hour is not None and len(l[i]) <= 5 and
res.tzname is None and res.tzoffset is None and
not [x for x in l[i] if x not in string.ascii_uppercase]):
res.tzname = l[i]
res.tzoffset = info.tzoffset(res.tzname)
i += 1
# Check for something like GMT+3, or BRST+3. Notice
# that it doesn't mean "I am 3 hours after GMT", but
# "my time +3 is GMT". If found, we reverse the
# logic so that timezone parsing code will get it
# right.
if i < len_l and l[i] in ('+', '-'):
l[i] = ('+', '-')[l[i] == '+']
res.tzoffset = None
if info.utczone(res.tzname):
# With something like GMT+3, the timezone
# is *not* GMT.
res.tzname = None
continue
# Check for a numbered timezone
if res.hour is not None and l[i] in ('+', '-'):
signal = (-1, 1)[l[i] == '+']
i += 1
len_li = len(l[i])
if len_li == 4:
# -0300
res.tzoffset = int(l[i][:2]) * 3600 + int(l[i][2:]) * 60
elif i + 1 < len_l and l[i + 1] == ':':
# -03:00
res.tzoffset = int(l[i]) * 3600 + int(l[i + 2]) * 60
i += 2
elif len_li <= 2:
# -[0]3
res.tzoffset = int(l[i][:2]) * 3600
else:
return None
i += 1
res.tzoffset *= signal
# Look for a timezone name between parenthesis
if (i + 3 < len_l and
info.jump(l[i]) and l[i + 1] == '(' and l[i + 3] == ')' and
3 <= len(l[i + 2]) <= 5 and
not [x for x in l[i + 2]
if x not in string.ascii_uppercase]):
# -0300 (BRST)
res.tzname = l[i + 2]
i += 4
continue
# Check jumps
if not (info.jump(l[i]) or fuzzy):
return None
i += 1
# Process year/month/day
len_ymd = len(ymd)
if len_ymd > 3:
# More than three members!?
return None
elif len_ymd == 1 or (mstridx != -1 and len_ymd == 2):
# One member, or two members with a month string
if mstridx != -1:
res.month = ymd[mstridx]
del ymd[mstridx]
if len_ymd > 1 or mstridx == -1:
if ymd[0] > 31:
res.year = ymd[0]
else:
res.day = ymd[0]
elif len_ymd == 2:
# Two members with numbers
if ymd[0] > 31:
# 99-01
res.year, res.month = ymd
elif ymd[1] > 31:
# 01-99
res.month, res.year = ymd
elif dayfirst and ymd[1] <= 12:
# 13-01
res.day, res.month = ymd
else:
# 01-13
res.month, res.day = ymd
if len_ymd == 3:
# Three members
if mstridx == 0:
res.month, res.day, res.year = ymd
elif mstridx == 1:
if ymd[0] > 31 or (yearfirst and ymd[2] <= 31):
# 99-Jan-01
res.year, res.month, res.day = ymd
else:
# 01-Jan-01
# Give precendence to day-first, since
# two-digit years is usually hand-written.
res.day, res.month, res.year = ymd
elif mstridx == 2:
# WTF!?
if ymd[1] > 31:
# 01-99-Jan
res.day, res.year, res.month = ymd
else:
# 99-01-Jan
res.year, res.day, res.month = ymd
else:
if ymd[0] > 31 or \
(yearfirst and ymd[1] <= 12 and ymd[2] <= 31):
# 99-01-01
res.year, res.month, res.day = ymd
elif ymd[0] > 12 or (dayfirst and ymd[1] <= 12):
# 13-01-01
res.day, res.month, res.year = ymd
else:
# 01-13-01
res.month, res.day, res.year = ymd
except (IndexError, ValueError, AssertionError):
return None
if not info.validate(res):
return None
return res
DEFAULTPARSER = parser()
def parse(timestr, parserinfo = None, **kwargs):
# Python 2.x support: datetimes return their string presentation as
# bytes in 2.x and unicode in 3.x, so it's reasonable to expect that
# the parser will get both kinds. Internally we use unicode only.
if isinstance(timestr, binary_type):
timestr = timestr.decode()
if parserinfo:
return parser(parserinfo).parse(timestr, **kwargs)
else:
return DEFAULTPARSER.parse(timestr, **kwargs)
class _tzparser(object):
class _result(_resultbase):
__slots__ = ["stdabbr", "stdoffset", "dstabbr", "dstoffset",
"start", "end"]
class _attr(_resultbase):
__slots__ = ["month", "week", "weekday",
"yday", "jyday", "day", "time"]
def __repr__(self):
return self._repr("")
def __init__(self):
_resultbase.__init__(self)
self.start = self._attr()
self.end = self._attr()
def parse(self, tzstr):
res = self._result()
l = _timelex.split(tzstr)
try:
len_l = len(l)
i = 0
while i < len_l:
# BRST+3[BRDT[+2]]
j = i
while j < len_l and not [x for x in l[j]
if x in "0123456789:,-+"]:
j += 1
if j != i:
if not res.stdabbr:
offattr = "stdoffset"
res.stdabbr = "".join(l[i:j])
else:
offattr = "dstoffset"
res.dstabbr = "".join(l[i:j])
i = j
if (i < len_l and
(l[i] in ('+', '-') or l[i][0] in "0123456789")):
if l[i] in ('+', '-'):
# Yes, that's right. See the TZ variable
# documentation.
signal = (1, -1)[l[i] == '+']
i += 1
else:
signal = -1
len_li = len(l[i])
if len_li == 4:
# -0300
setattr(res, offattr,
(int(l[i][:2]) * 3600 + int(l[i][2:]) * 60) * signal)
elif i + 1 < len_l and l[i + 1] == ':':
# -03:00
setattr(res, offattr,
(int(l[i]) * 3600 + int(l[i + 2]) * 60) * signal)
i += 2
elif len_li <= 2:
# -[0]3
setattr(res, offattr,
int(l[i][:2]) * 3600 * signal)
else:
return None
i += 1
if res.dstabbr:
break
else:
break
if i < len_l:
for j in range(i, len_l):
if l[j] == ';': l[j] = ','
assert l[i] == ','
i += 1
if i >= len_l:
pass
elif (8 <= l.count(',') <= 9 and
not [y for x in l[i:] if x != ','
for y in x if y not in "0123456789"]):
# GMT0BST,3,0,30,3600,10,0,26,7200[,3600]
for x in (res.start, res.end):
x.month = int(l[i])
i += 2
if l[i] == '-':
value = int(l[i + 1]) * -1
i += 1
else:
value = int(l[i])
i += 2
if value:
x.week = value
x.weekday = (int(l[i]) - 1) % 7
else:
x.day = int(l[i])
i += 2
x.time = int(l[i])
i += 2
if i < len_l:
if l[i] in ('-', '+'):
signal = (-1, 1)[l[i] == "+"]
i += 1
else:
signal = 1
res.dstoffset = (res.stdoffset + int(l[i])) * signal
elif (l.count(',') == 2 and l[i:].count('/') <= 2 and
not [y for x in l[i:] if x not in (',', '/', 'J', 'M',
'.', '-', ':')
for y in x if y not in "0123456789"]):
for x in (res.start, res.end):
if l[i] == 'J':
# non-leap year day (1 based)
i += 1
x.jyday = int(l[i])
elif l[i] == 'M':
# month[-.]week[-.]weekday
i += 1
x.month = int(l[i])
i += 1
assert l[i] in ('-', '.')
i += 1
x.week = int(l[i])
if x.week == 5:
x.week = -1
i += 1
assert l[i] in ('-', '.')
i += 1
x.weekday = (int(l[i]) - 1) % 7
else:
# year day (zero based)
x.yday = int(l[i]) + 1
i += 1
if i < len_l and l[i] == '/':
i += 1
# start time
len_li = len(l[i])
if len_li == 4:
# -0300
x.time = (int(l[i][:2]) * 3600 + int(l[i][2:]) * 60)
elif i + 1 < len_l and l[i + 1] == ':':
# -03:00
x.time = int(l[i]) * 3600 + int(l[i + 2]) * 60
i += 2
if i + 1 < len_l and l[i + 1] == ':':
i += 2
x.time += int(l[i])
elif len_li <= 2:
# -[0]3
x.time = (int(l[i][:2]) * 3600)
else:
return None
i += 1
assert i == len_l or l[i] == ','
i += 1
assert i >= len_l
except (IndexError, ValueError, AssertionError):
return None
return res
DEFAULTTZPARSER = _tzparser()
def _parsetz(tzstr):
return DEFAULTTZPARSER.parse(tzstr)
def _parsems(value):
"""Parse a I[.F] seconds value into (seconds, microseconds)."""
if "." not in value:
return int(value), 0
else:
i, f = value.split(".")
return int(i), int(f.ljust(6, "0")[:6])
# vim:ts=4:sw=4:et
| gpl-3.0 | 4,849,568,721,540,897,000 | 36.195149 | 94 | 0.358312 | false |
rishigb/bro | remindMe/venv/lib/python2.7/site-packages/wheel/bdist_wheel.py | 232 | 17441 | """
Create a wheel (.whl) distribution.
A wheel is a built archive format.
"""
import csv
import hashlib
import os
import subprocess
import warnings
import shutil
import json
import wheel
try:
import sysconfig
except ImportError: # pragma nocover
# Python < 2.7
import distutils.sysconfig as sysconfig
import pkg_resources
safe_name = pkg_resources.safe_name
safe_version = pkg_resources.safe_version
from shutil import rmtree
from email.generator import Generator
from distutils.util import get_platform
from distutils.core import Command
from distutils.sysconfig import get_python_version
from distutils import log as logger
from .pep425tags import get_abbr_impl, get_impl_ver, get_abi_tag
from .util import native, open_for_csv
from .archive import archive_wheelfile
from .pkginfo import read_pkg_info, write_pkg_info
from .metadata import pkginfo_to_dict
from . import pep425tags, metadata
def safer_name(name):
return safe_name(name).replace('-', '_')
def safer_version(version):
return safe_version(version).replace('-', '_')
class bdist_wheel(Command):
description = 'create a wheel distribution'
user_options = [('bdist-dir=', 'b',
"temporary directory for creating the distribution"),
('plat-name=', 'p',
"platform name to embed in generated filenames "
"(default: %s)" % get_platform()),
('keep-temp', 'k',
"keep the pseudo-installation tree around after " +
"creating the distribution archive"),
('dist-dir=', 'd',
"directory to put final built distributions in"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
('relative', None,
"build the archive using relative paths"
"(default: false)"),
('owner=', 'u',
"Owner name used when creating a tar file"
" [default: current user]"),
('group=', 'g',
"Group name used when creating a tar file"
" [default: current group]"),
('universal', None,
"make a universal wheel"
" (default: false)"),
('python-tag=', None,
"Python implementation compatibility tag"
" (default: py%s)" % get_impl_ver()[0]),
]
boolean_options = ['keep-temp', 'skip-build', 'relative', 'universal']
def initialize_options(self):
self.bdist_dir = None
self.data_dir = None
self.plat_name = None
self.plat_tag = None
self.format = 'zip'
self.keep_temp = False
self.dist_dir = None
self.distinfo_dir = None
self.egginfo_dir = None
self.root_is_pure = None
self.skip_build = None
self.relative = False
self.owner = None
self.group = None
self.universal = False
self.python_tag = 'py' + get_impl_ver()[0]
self.plat_name_supplied = False
def finalize_options(self):
if self.bdist_dir is None:
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, 'wheel')
self.data_dir = self.wheel_dist_name + '.data'
self.plat_name_supplied = self.plat_name is not None
need_options = ('dist_dir', 'plat_name', 'skip_build')
self.set_undefined_options('bdist',
*zip(need_options, need_options))
self.root_is_pure = not (self.distribution.has_ext_modules()
or self.distribution.has_c_libraries())
# Support legacy [wheel] section for setting universal
wheel = self.distribution.get_option_dict('wheel')
if 'universal' in wheel:
# please don't define this in your global configs
val = wheel['universal'][1].strip()
if val.lower() in ('1', 'true', 'yes'):
self.universal = True
@property
def wheel_dist_name(self):
"""Return distribution full name with - replaced with _"""
return '-'.join((safer_name(self.distribution.get_name()),
safer_version(self.distribution.get_version())))
def get_tag(self):
# bdist sets self.plat_name if unset, we should only use it for purepy
# wheels if the user supplied it.
if self.plat_name_supplied:
plat_name = self.plat_name
elif self.root_is_pure:
plat_name = 'any'
else:
plat_name = self.plat_name or get_platform()
plat_name = plat_name.replace('-', '_').replace('.', '_')
if self.root_is_pure:
if self.universal:
impl = 'py2.py3'
else:
impl = self.python_tag
tag = (impl, 'none', plat_name)
else:
impl_name = get_abbr_impl()
impl_ver = get_impl_ver()
# PEP 3149
abi_tag = str(get_abi_tag()).lower()
tag = (impl_name + impl_ver, abi_tag, plat_name)
supported_tags = pep425tags.get_supported(
supplied_platform=plat_name if self.plat_name_supplied else None)
# XXX switch to this alternate implementation for non-pure:
assert tag == supported_tags[0]
return tag
def get_archive_basename(self):
"""Return archive name without extension"""
impl_tag, abi_tag, plat_tag = self.get_tag()
archive_basename = "%s-%s-%s-%s" % (
self.wheel_dist_name,
impl_tag,
abi_tag,
plat_tag)
return archive_basename
def run(self):
build_scripts = self.reinitialize_command('build_scripts')
build_scripts.executable = 'python'
if not self.skip_build:
self.run_command('build')
install = self.reinitialize_command('install',
reinit_subcommands=True)
install.root = self.bdist_dir
install.compile = False
install.skip_build = self.skip_build
install.warn_dir = False
# A wheel without setuptools scripts is more cross-platform.
# Use the (undocumented) `no_ep` option to setuptools'
# install_scripts command to avoid creating entry point scripts.
install_scripts = self.reinitialize_command('install_scripts')
install_scripts.no_ep = True
# Use a custom scheme for the archive, because we have to decide
# at installation time which scheme to use.
for key in ('headers', 'scripts', 'data', 'purelib', 'platlib'):
setattr(install,
'install_' + key,
os.path.join(self.data_dir, key))
basedir_observed = ''
if os.name == 'nt':
# win32 barfs if any of these are ''; could be '.'?
# (distutils.command.install:change_roots bug)
basedir_observed = os.path.normpath(os.path.join(self.data_dir, '..'))
self.install_libbase = self.install_lib = basedir_observed
setattr(install,
'install_purelib' if self.root_is_pure else 'install_platlib',
basedir_observed)
logger.info("installing to %s", self.bdist_dir)
self.run_command('install')
archive_basename = self.get_archive_basename()
pseudoinstall_root = os.path.join(self.dist_dir, archive_basename)
if not self.relative:
archive_root = self.bdist_dir
else:
archive_root = os.path.join(
self.bdist_dir,
self._ensure_relative(install.install_base))
self.set_undefined_options(
'install_egg_info', ('target', 'egginfo_dir'))
self.distinfo_dir = os.path.join(self.bdist_dir,
'%s.dist-info' % self.wheel_dist_name)
self.egg2dist(self.egginfo_dir,
self.distinfo_dir)
self.write_wheelfile(self.distinfo_dir)
self.write_record(self.bdist_dir, self.distinfo_dir)
# Make the archive
if not os.path.exists(self.dist_dir):
os.makedirs(self.dist_dir)
wheel_name = archive_wheelfile(pseudoinstall_root, archive_root)
# Sign the archive
if 'WHEEL_TOOL' in os.environ:
subprocess.call([os.environ['WHEEL_TOOL'], 'sign', wheel_name])
# Add to 'Distribution.dist_files' so that the "upload" command works
getattr(self.distribution, 'dist_files', []).append(
('bdist_wheel', get_python_version(), wheel_name))
if not self.keep_temp:
if self.dry_run:
logger.info('removing %s', self.bdist_dir)
else:
rmtree(self.bdist_dir)
def write_wheelfile(self, wheelfile_base, generator='bdist_wheel (' + wheel.__version__ + ')'):
from email.message import Message
msg = Message()
msg['Wheel-Version'] = '1.0' # of the spec
msg['Generator'] = generator
msg['Root-Is-Purelib'] = str(self.root_is_pure).lower()
# Doesn't work for bdist_wininst
impl_tag, abi_tag, plat_tag = self.get_tag()
for impl in impl_tag.split('.'):
for abi in abi_tag.split('.'):
for plat in plat_tag.split('.'):
msg['Tag'] = '-'.join((impl, abi, plat))
wheelfile_path = os.path.join(wheelfile_base, 'WHEEL')
logger.info('creating %s', wheelfile_path)
with open(wheelfile_path, 'w') as f:
Generator(f, maxheaderlen=0).flatten(msg)
def _ensure_relative(self, path):
# copied from dir_util, deleted
drive, path = os.path.splitdrive(path)
if path[0:1] == os.sep:
path = drive + path[1:]
return path
def _pkginfo_to_metadata(self, egg_info_path, pkginfo_path):
return metadata.pkginfo_to_metadata(egg_info_path, pkginfo_path)
def license_file(self):
"""Return license filename from a license-file key in setup.cfg, or None."""
metadata = self.distribution.get_option_dict('metadata')
if not 'license_file' in metadata:
return None
return metadata['license_file'][1]
def setupcfg_requirements(self):
"""Generate requirements from setup.cfg as
('Requires-Dist', 'requirement; qualifier') tuples. From a metadata
section in setup.cfg:
[metadata]
provides-extra = extra1
extra2
requires-dist = requirement; qualifier
another; qualifier2
unqualified
Yields
('Provides-Extra', 'extra1'),
('Provides-Extra', 'extra2'),
('Requires-Dist', 'requirement; qualifier'),
('Requires-Dist', 'another; qualifier2'),
('Requires-Dist', 'unqualified')
"""
metadata = self.distribution.get_option_dict('metadata')
# our .ini parser folds - to _ in key names:
for key, title in (('provides_extra', 'Provides-Extra'),
('requires_dist', 'Requires-Dist')):
if not key in metadata:
continue
field = metadata[key]
for line in field[1].splitlines():
line = line.strip()
if not line:
continue
yield (title, line)
def add_requirements(self, metadata_path):
"""Add additional requirements from setup.cfg to file metadata_path"""
additional = list(self.setupcfg_requirements())
if not additional: return
pkg_info = read_pkg_info(metadata_path)
if 'Provides-Extra' in pkg_info or 'Requires-Dist' in pkg_info:
warnings.warn('setup.cfg requirements overwrite values from setup.py')
del pkg_info['Provides-Extra']
del pkg_info['Requires-Dist']
for k, v in additional:
pkg_info[k] = v
write_pkg_info(metadata_path, pkg_info)
def egg2dist(self, egginfo_path, distinfo_path):
"""Convert an .egg-info directory into a .dist-info directory"""
def adios(p):
"""Appropriately delete directory, file or link."""
if os.path.exists(p) and not os.path.islink(p) and os.path.isdir(p):
shutil.rmtree(p)
elif os.path.exists(p):
os.unlink(p)
adios(distinfo_path)
if not os.path.exists(egginfo_path):
# There is no egg-info. This is probably because the egg-info
# file/directory is not named matching the distribution name used
# to name the archive file. Check for this case and report
# accordingly.
import glob
pat = os.path.join(os.path.dirname(egginfo_path), '*.egg-info')
possible = glob.glob(pat)
err = "Egg metadata expected at %s but not found" % (egginfo_path,)
if possible:
alt = os.path.basename(possible[0])
err += " (%s found - possible misnamed archive file?)" % (alt,)
raise ValueError(err)
if os.path.isfile(egginfo_path):
# .egg-info is a single file
pkginfo_path = egginfo_path
pkg_info = self._pkginfo_to_metadata(egginfo_path, egginfo_path)
os.mkdir(distinfo_path)
else:
# .egg-info is a directory
pkginfo_path = os.path.join(egginfo_path, 'PKG-INFO')
pkg_info = self._pkginfo_to_metadata(egginfo_path, pkginfo_path)
# ignore common egg metadata that is useless to wheel
shutil.copytree(egginfo_path, distinfo_path,
ignore=lambda x, y: set(('PKG-INFO',
'requires.txt',
'SOURCES.txt',
'not-zip-safe',)))
# delete dependency_links if it is only whitespace
dependency_links_path = os.path.join(distinfo_path, 'dependency_links.txt')
with open(dependency_links_path, 'r') as dependency_links_file:
dependency_links = dependency_links_file.read().strip()
if not dependency_links:
adios(dependency_links_path)
write_pkg_info(os.path.join(distinfo_path, 'METADATA'), pkg_info)
# XXX deprecated. Still useful for current distribute/setuptools.
metadata_path = os.path.join(distinfo_path, 'METADATA')
self.add_requirements(metadata_path)
# XXX intentionally a different path than the PEP.
metadata_json_path = os.path.join(distinfo_path, 'metadata.json')
pymeta = pkginfo_to_dict(metadata_path,
distribution=self.distribution)
if 'description' in pymeta:
description_filename = 'DESCRIPTION.rst'
description_text = pymeta.pop('description')
description_path = os.path.join(distinfo_path,
description_filename)
with open(description_path, "wb") as description_file:
description_file.write(description_text.encode('utf-8'))
pymeta['extensions']['python.details']['document_names']['description'] = description_filename
# XXX heuristically copy any LICENSE/LICENSE.txt?
license = self.license_file()
if license:
license_filename = 'LICENSE.txt'
shutil.copy(license, os.path.join(self.distinfo_dir, license_filename))
pymeta['extensions']['python.details']['document_names']['license'] = license_filename
with open(metadata_json_path, "w") as metadata_json:
json.dump(pymeta, metadata_json, sort_keys=True)
adios(egginfo_path)
def write_record(self, bdist_dir, distinfo_dir):
from wheel.util import urlsafe_b64encode
record_path = os.path.join(distinfo_dir, 'RECORD')
record_relpath = os.path.relpath(record_path, bdist_dir)
def walk():
for dir, dirs, files in os.walk(bdist_dir):
dirs.sort()
for f in sorted(files):
yield os.path.join(dir, f)
def skip(path):
"""Wheel hashes every possible file."""
return (path == record_relpath)
with open_for_csv(record_path, 'w+') as record_file:
writer = csv.writer(record_file)
for path in walk():
relpath = os.path.relpath(path, bdist_dir)
if skip(relpath):
hash = ''
size = ''
else:
with open(path, 'rb') as f:
data = f.read()
digest = hashlib.sha256(data).digest()
hash = 'sha256=' + native(urlsafe_b64encode(digest))
size = len(data)
record_path = os.path.relpath(
path, bdist_dir).replace(os.path.sep, '/')
writer.writerow((record_path, hash, size))
| mit | -786,342,773,173,166,800 | 37.501104 | 106 | 0.559601 | false |
FireWRT/OpenWrt-Firefly-Libraries | staging_dir/target-mipsel_1004kc+dsp_uClibc-0.9.33.2/usr/lib/python2.7/test/test_copy_reg.py | 129 | 4256 | import copy_reg
import unittest
from test import test_support
from test.pickletester import ExtensionSaver
class C:
pass
class WithoutSlots(object):
pass
class WithWeakref(object):
__slots__ = ('__weakref__',)
class WithPrivate(object):
__slots__ = ('__spam',)
class WithSingleString(object):
__slots__ = 'spam'
class WithInherited(WithSingleString):
__slots__ = ('eggs',)
class CopyRegTestCase(unittest.TestCase):
def test_class(self):
self.assertRaises(TypeError, copy_reg.pickle,
C, None, None)
def test_noncallable_reduce(self):
self.assertRaises(TypeError, copy_reg.pickle,
type(1), "not a callable")
def test_noncallable_constructor(self):
self.assertRaises(TypeError, copy_reg.pickle,
type(1), int, "not a callable")
def test_bool(self):
import copy
self.assertEqual(True, copy.copy(True))
def test_extension_registry(self):
mod, func, code = 'junk1 ', ' junk2', 0xabcd
e = ExtensionSaver(code)
try:
# Shouldn't be in registry now.
self.assertRaises(ValueError, copy_reg.remove_extension,
mod, func, code)
copy_reg.add_extension(mod, func, code)
# Should be in the registry.
self.assertTrue(copy_reg._extension_registry[mod, func] == code)
self.assertTrue(copy_reg._inverted_registry[code] == (mod, func))
# Shouldn't be in the cache.
self.assertNotIn(code, copy_reg._extension_cache)
# Redundant registration should be OK.
copy_reg.add_extension(mod, func, code) # shouldn't blow up
# Conflicting code.
self.assertRaises(ValueError, copy_reg.add_extension,
mod, func, code + 1)
self.assertRaises(ValueError, copy_reg.remove_extension,
mod, func, code + 1)
# Conflicting module name.
self.assertRaises(ValueError, copy_reg.add_extension,
mod[1:], func, code )
self.assertRaises(ValueError, copy_reg.remove_extension,
mod[1:], func, code )
# Conflicting function name.
self.assertRaises(ValueError, copy_reg.add_extension,
mod, func[1:], code)
self.assertRaises(ValueError, copy_reg.remove_extension,
mod, func[1:], code)
# Can't remove one that isn't registered at all.
if code + 1 not in copy_reg._inverted_registry:
self.assertRaises(ValueError, copy_reg.remove_extension,
mod[1:], func[1:], code + 1)
finally:
e.restore()
# Shouldn't be there anymore.
self.assertNotIn((mod, func), copy_reg._extension_registry)
# The code *may* be in copy_reg._extension_registry, though, if
# we happened to pick on a registered code. So don't check for
# that.
# Check valid codes at the limits.
for code in 1, 0x7fffffff:
e = ExtensionSaver(code)
try:
copy_reg.add_extension(mod, func, code)
copy_reg.remove_extension(mod, func, code)
finally:
e.restore()
# Ensure invalid codes blow up.
for code in -1, 0, 0x80000000L:
self.assertRaises(ValueError, copy_reg.add_extension,
mod, func, code)
def test_slotnames(self):
self.assertEqual(copy_reg._slotnames(WithoutSlots), [])
self.assertEqual(copy_reg._slotnames(WithWeakref), [])
expected = ['_WithPrivate__spam']
self.assertEqual(copy_reg._slotnames(WithPrivate), expected)
self.assertEqual(copy_reg._slotnames(WithSingleString), ['spam'])
expected = ['eggs', 'spam']
expected.sort()
result = copy_reg._slotnames(WithInherited)
result.sort()
self.assertEqual(result, expected)
def test_main():
test_support.run_unittest(CopyRegTestCase)
if __name__ == "__main__":
test_main()
| gpl-2.0 | 5,836,294,233,127,825,000 | 34.173554 | 77 | 0.566729 | false |
hortonworks/hortonworks-sandbox | desktop/core/ext-py/Twisted/doc/words/examples/xmpp_client.py | 1 | 2125 | # Copyright (c) 2001-2006 Twisted Matrix Laboratories.
# See LICENSE for details.
import sys
from twisted.internet import reactor
from twisted.names.srvconnect import SRVConnector
from twisted.words.xish import domish
from twisted.words.protocols.jabber import xmlstream, client, jid
class XMPPClientConnector(SRVConnector):
def __init__(self, reactor, domain, factory):
SRVConnector.__init__(self, reactor, 'xmpp-client', domain, factory)
def pickServer(self):
host, port = SRVConnector.pickServer(self)
if not self.servers and not self.orderedServers:
# no SRV record, fall back..
port = 5222
return host, port
class Client(object):
def __init__(self, client_jid, secret):
f = client.XMPPClientFactory(client_jid, secret)
f.addBootstrap(xmlstream.STREAM_CONNECTED_EVENT, self.connected)
f.addBootstrap(xmlstream.STREAM_END_EVENT, self.disconnected)
f.addBootstrap(xmlstream.STREAM_AUTHD_EVENT, self.authenticated)
f.addBootstrap(xmlstream.INIT_FAILED_EVENT, self.init_failed)
connector = XMPPClientConnector(reactor, client_jid.host, f)
connector.connect()
def rawDataIn(self, buf):
print "RECV: %s" % unicode(buf, 'utf-8').encode('ascii', 'replace')
def rawDataOut(self, buf):
print "SEND: %s" % unicode(buf, 'utf-8').encode('ascii', 'replace')
def connected(self, xs):
print 'Connected.'
self.xmlstream = xs
# Log all traffic
xs.rawDataInFn = self.rawDataIn
xs.rawDataOutFn = self.rawDataOut
def disconnected(self, xs):
print 'Disconnected.'
reactor.stop()
def authenticated(self, xs):
print "Authenticated."
presence = domish.Element((None, 'presence'))
xs.send(presence)
reactor.callLater(5, xs.sendFooter)
def init_failed(self, failure):
print "Initialization failed."
print failure
self.xmlstream.sendFooter()
client_jid = jid.JID(sys.argv[1])
secret = sys.argv[2]
c = Client(client_jid, secret)
reactor.run()
| apache-2.0 | -1,757,602,001,811,589,400 | 24.914634 | 76 | 0.658353 | false |
mattclay/ansible | test/lib/ansible_test/_internal/commands/sanity/integration_aliases.py | 13 | 14512 | """Sanity test to check integration test aliases."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import textwrap
import os
from ... import types as t
from . import (
SanityVersionNeutral,
SanityMessage,
SanityFailure,
SanitySuccess,
SanityTargets,
SANITY_ROOT,
)
from ...config import (
SanityConfig,
)
from ...target import (
filter_targets,
walk_posix_integration_targets,
walk_windows_integration_targets,
walk_integration_targets,
walk_module_targets,
)
from ..integration.cloud import (
get_cloud_platforms,
)
from ...io import (
read_text_file,
)
from ...util import (
display,
find_python,
raw_command,
)
from ...util_common import (
write_json_test_results,
ResultType,
)
class IntegrationAliasesTest(SanityVersionNeutral):
"""Sanity test to evaluate integration test aliases."""
CI_YML = '.azure-pipelines/azure-pipelines.yml'
TEST_ALIAS_PREFIX = 'shippable' # this will be changed at some point in the future
DISABLED = 'disabled/'
UNSTABLE = 'unstable/'
UNSUPPORTED = 'unsupported/'
EXPLAIN_URL = 'https://docs.ansible.com/ansible/devel/dev_guide/testing/sanity/integration-aliases.html'
TEMPLATE_DISABLED = """
The following integration tests are **disabled** [[explain]({explain_url}#disabled)]:
{tests}
Consider fixing the integration tests before or alongside changes.
"""
TEMPLATE_UNSTABLE = """
The following integration tests are **unstable** [[explain]({explain_url}#unstable)]:
{tests}
Tests may need to be restarted due to failures unrelated to changes.
"""
TEMPLATE_UNSUPPORTED = """
The following integration tests are **unsupported** [[explain]({explain_url}#unsupported)]:
{tests}
Consider running the tests manually or extending test infrastructure to add support.
"""
TEMPLATE_UNTESTED = """
The following modules have **no integration tests** [[explain]({explain_url}#untested)]:
{tests}
Consider adding integration tests before or alongside changes.
"""
ansible_only = True
def __init__(self):
super(IntegrationAliasesTest, self).__init__()
self._ci_config = {} # type: t.Dict[str, t.Any]
self._ci_test_groups = {} # type: t.Dict[str, t.List[int]]
@property
def can_ignore(self): # type: () -> bool
"""True if the test supports ignore entries."""
return False
@property
def no_targets(self): # type: () -> bool
"""True if the test does not use test targets. Mutually exclusive with all_targets."""
return True
def load_ci_config(self, args): # type: (SanityConfig) -> t.Dict[str, t.Any]
"""Load and return the CI YAML configuration."""
if not self._ci_config:
self._ci_config = self.load_yaml(args, self.CI_YML)
return self._ci_config
@property
def ci_test_groups(self): # type: () -> t.Dict[str, t.List[int]]
"""Return a dictionary of CI test names and their group(s)."""
if not self._ci_test_groups:
test_groups = {}
for stage in self._ci_config['stages']:
for job in stage['jobs']:
if job.get('template') != 'templates/matrix.yml':
continue
parameters = job['parameters']
groups = parameters.get('groups', [])
test_format = parameters.get('testFormat', '{0}')
test_group_format = parameters.get('groupFormat', '{0}/{{1}}')
for target in parameters['targets']:
test = target.get('test') or target.get('name')
if groups:
tests_formatted = [test_group_format.format(test_format).format(test, group) for group in groups]
else:
tests_formatted = [test_format.format(test)]
for test_formatted in tests_formatted:
parts = test_formatted.split('/')
key = parts[0]
if key in ('sanity', 'units'):
continue
try:
group = int(parts[-1])
except ValueError:
continue
if group < 1 or group > 99:
continue
group_set = test_groups.setdefault(key, set())
group_set.add(group)
self._ci_test_groups = dict((key, sorted(value)) for key, value in test_groups.items())
return self._ci_test_groups
def format_test_group_alias(self, name, fallback=''):
"""
:type name: str
:type fallback: str
:rtype: str
"""
group_numbers = self.ci_test_groups.get(name, None)
if group_numbers:
if min(group_numbers) != 1:
display.warning('Min test group "%s" in %s is %d instead of 1.' % (name, self.CI_YML, min(group_numbers)), unique=True)
if max(group_numbers) != len(group_numbers):
display.warning('Max test group "%s" in %s is %d instead of %d.' % (name, self.CI_YML, max(group_numbers), len(group_numbers)), unique=True)
if max(group_numbers) > 9:
alias = '%s/%s/group(%s)/' % (self.TEST_ALIAS_PREFIX, name, '|'.join(str(i) for i in range(min(group_numbers), max(group_numbers) + 1)))
elif len(group_numbers) > 1:
alias = '%s/%s/group[%d-%d]/' % (self.TEST_ALIAS_PREFIX, name, min(group_numbers), max(group_numbers))
else:
alias = '%s/%s/group%d/' % (self.TEST_ALIAS_PREFIX, name, min(group_numbers))
elif fallback:
alias = '%s/%s/group%d/' % (self.TEST_ALIAS_PREFIX, fallback, 1)
else:
raise Exception('cannot find test group "%s" in %s' % (name, self.CI_YML))
return alias
def load_yaml(self, args, path): # type: (SanityConfig, str) -> t.Dict[str, t.Any]
"""Load the specified YAML file and return the contents."""
yaml_to_json_path = os.path.join(SANITY_ROOT, self.name, 'yaml_to_json.py')
python = find_python(args.python_version)
return json.loads(raw_command([python, yaml_to_json_path], data=read_text_file(path), capture=True)[0])
def test(self, args, targets):
"""
:type args: SanityConfig
:type targets: SanityTargets
:rtype: TestResult
"""
if args.explain:
return SanitySuccess(self.name)
if not os.path.isfile(self.CI_YML):
return SanityFailure(self.name, messages=[SanityMessage(
message='file missing',
path=self.CI_YML,
)])
results = dict(
comments=[],
labels={},
)
self.load_ci_config(args)
self.check_changes(args, results)
write_json_test_results(ResultType.BOT, 'data-sanity-ci.json', results)
messages = []
messages += self.check_posix_targets(args)
messages += self.check_windows_targets()
if messages:
return SanityFailure(self.name, messages=messages)
return SanitySuccess(self.name)
def check_posix_targets(self, args):
"""
:type args: SanityConfig
:rtype: list[SanityMessage]
"""
posix_targets = tuple(walk_posix_integration_targets())
clouds = get_cloud_platforms(args, posix_targets)
cloud_targets = ['cloud/%s/' % cloud for cloud in clouds]
all_cloud_targets = tuple(filter_targets(posix_targets, ['cloud/'], include=True, directories=False, errors=False))
invalid_cloud_targets = tuple(filter_targets(all_cloud_targets, cloud_targets, include=False, directories=False, errors=False))
messages = []
for target in invalid_cloud_targets:
for alias in target.aliases:
if alias.startswith('cloud/') and alias != 'cloud/':
if any(alias.startswith(cloud_target) for cloud_target in cloud_targets):
continue
messages.append(SanityMessage('invalid alias `%s`' % alias, '%s/aliases' % target.path))
messages += self.check_ci_group(
targets=tuple(filter_targets(posix_targets, ['cloud/', '%s/generic/' % self.TEST_ALIAS_PREFIX], include=False,
directories=False, errors=False)),
find=self.format_test_group_alias('linux').replace('linux', 'posix'),
find_incidental=['%s/posix/incidental/' % self.TEST_ALIAS_PREFIX],
)
messages += self.check_ci_group(
targets=tuple(filter_targets(posix_targets, ['%s/generic/' % self.TEST_ALIAS_PREFIX], include=True, directories=False,
errors=False)),
find=self.format_test_group_alias('generic'),
)
for cloud in clouds:
if cloud == 'httptester':
find = self.format_test_group_alias('linux').replace('linux', 'posix')
find_incidental = ['%s/posix/incidental/' % self.TEST_ALIAS_PREFIX]
else:
find = self.format_test_group_alias(cloud, 'generic')
find_incidental = ['%s/%s/incidental/' % (self.TEST_ALIAS_PREFIX, cloud), '%s/cloud/incidental/' % self.TEST_ALIAS_PREFIX]
messages += self.check_ci_group(
targets=tuple(filter_targets(posix_targets, ['cloud/%s/' % cloud], include=True, directories=False, errors=False)),
find=find,
find_incidental=find_incidental,
)
return messages
def check_windows_targets(self):
"""
:rtype: list[SanityMessage]
"""
windows_targets = tuple(walk_windows_integration_targets())
messages = []
messages += self.check_ci_group(
targets=windows_targets,
find=self.format_test_group_alias('windows'),
find_incidental=['%s/windows/incidental/' % self.TEST_ALIAS_PREFIX],
)
return messages
def check_ci_group(self, targets, find, find_incidental=None):
"""
:type targets: tuple[CompletionTarget]
:type find: str
:type find_incidental: list[str] | None
:rtype: list[SanityMessage]
"""
all_paths = set(target.path for target in targets)
supported_paths = set(target.path for target in filter_targets(targets, [find], include=True, directories=False, errors=False))
unsupported_paths = set(target.path for target in filter_targets(targets, [self.UNSUPPORTED], include=True, directories=False, errors=False))
if find_incidental:
incidental_paths = set(target.path for target in filter_targets(targets, find_incidental, include=True, directories=False, errors=False))
else:
incidental_paths = set()
unassigned_paths = all_paths - supported_paths - unsupported_paths - incidental_paths
conflicting_paths = supported_paths & unsupported_paths
unassigned_message = 'missing alias `%s` or `%s`' % (find.strip('/'), self.UNSUPPORTED.strip('/'))
conflicting_message = 'conflicting alias `%s` and `%s`' % (find.strip('/'), self.UNSUPPORTED.strip('/'))
messages = []
for path in unassigned_paths:
messages.append(SanityMessage(unassigned_message, '%s/aliases' % path))
for path in conflicting_paths:
messages.append(SanityMessage(conflicting_message, '%s/aliases' % path))
return messages
def check_changes(self, args, results):
"""
:type args: SanityConfig
:type results: dict[str, any]
"""
integration_targets = list(walk_integration_targets())
module_targets = list(walk_module_targets())
integration_targets_by_name = dict((target.name, target) for target in integration_targets)
module_names_by_path = dict((target.path, target.module) for target in module_targets)
disabled_targets = []
unstable_targets = []
unsupported_targets = []
for command in [command for command in args.metadata.change_description.focused_command_targets if 'integration' in command]:
for target in args.metadata.change_description.focused_command_targets[command]:
if self.DISABLED in integration_targets_by_name[target].aliases:
disabled_targets.append(target)
elif self.UNSTABLE in integration_targets_by_name[target].aliases:
unstable_targets.append(target)
elif self.UNSUPPORTED in integration_targets_by_name[target].aliases:
unsupported_targets.append(target)
untested_modules = []
for path in args.metadata.change_description.no_integration_paths:
module = module_names_by_path.get(path)
if module:
untested_modules.append(module)
comments = [
self.format_comment(self.TEMPLATE_DISABLED, disabled_targets),
self.format_comment(self.TEMPLATE_UNSTABLE, unstable_targets),
self.format_comment(self.TEMPLATE_UNSUPPORTED, unsupported_targets),
self.format_comment(self.TEMPLATE_UNTESTED, untested_modules),
]
comments = [comment for comment in comments if comment]
labels = dict(
needs_tests=bool(untested_modules),
disabled_tests=bool(disabled_targets),
unstable_tests=bool(unstable_targets),
unsupported_tests=bool(unsupported_targets),
)
results['comments'] += comments
results['labels'].update(labels)
def format_comment(self, template, targets):
"""
:type template: str
:type targets: list[str]
:rtype: str | None
"""
if not targets:
return None
tests = '\n'.join('- %s' % target for target in targets)
data = dict(
explain_url=self.EXPLAIN_URL,
tests=tests,
)
message = textwrap.dedent(template).strip().format(**data)
return message
| gpl-3.0 | 3,838,539,741,865,526,000 | 34.743842 | 156 | 0.582552 | false |
nevillehay/gbi-nevillehay | src/error.py | 2 | 1775 | #!/usr/local/bin/python
# Copyright (c) 2011 GhostBSD
#
# See COPYING for licence terms.
import gtk
from subprocess import Popen
lyrics = """
Please report to
, and be sure
to provide /tmp/.pc-sysinstall/pc-sysinstall.log.
"""
class PyApp:
def on_reboot(self, widget):
Popen('sudo reboot', shell=True)
gtk.main_quit()
def on_close(self, widget):
gtk.main_quit()
def __init__(self):
window = gtk.Window(gtk.WINDOW_TOPLEVEL)
window.set_position(gtk.WIN_POS_CENTER)
window.set_border_width(8)
window.connect("destroy", gtk.main_quit)
window.set_title("Installation Error")
# window.set_icon_from_file("/usr/local/lib/gbi/logo.png")
box1 = gtk.VBox(False, 0)
window.add(box1)
box1.show()
box2 = gtk.VBox(False, 10)
box2.set_border_width(10)
box1.pack_start(box2, True, True, 0)
box2.show()
title = gtk.Label()
title.set_use_markup(True)
title.set_markup('<b><span size="larger">Installation has failed!</span></b>')
label = gtk.Label(lyrics)
label.set_use_markup(True)
label.set_markup("Please report the issue to <a href='http://issues.ghostbsd.org/my_view_page.php'>GhostBSD issue system</a>,\nand be sure to provide tmp/.pc-sysinstall/pc-sysinstall.log.")
box2.pack_start(title)
box2.pack_start(label)
box2 = gtk.HBox(False, 10)
box2.set_border_width(5)
box1.pack_start(box2, False, True, 0)
box2.show()
table = gtk.Table(1, 2, True)
ok = gtk.Button("Ok")
ok.connect("clicked", self.on_close)
table.attach(ok, 0, 2, 0, 1)
box2.pack_start(table)
window.show_all()
PyApp()
gtk.main()
| bsd-3-clause | -6,885,649,683,964,739,000 | 28.583333 | 197 | 0.604507 | false |
DirtyPiece/dancestudio | Build/Tools/Python27/Lib/sgmllib.py | 306 | 17884 | """A parser for SGML, using the derived class as a static DTD."""
# XXX This only supports those SGML features used by HTML.
# XXX There should be a way to distinguish between PCDATA (parsed
# character data -- the normal case), RCDATA (replaceable character
# data -- only char and entity references and end tags are special)
# and CDATA (character data -- only end tags are special). RCDATA is
# not supported at all.
from warnings import warnpy3k
warnpy3k("the sgmllib module has been removed in Python 3.0",
stacklevel=2)
del warnpy3k
import markupbase
import re
__all__ = ["SGMLParser", "SGMLParseError"]
# Regular expressions used for parsing
interesting = re.compile('[&<]')
incomplete = re.compile('&([a-zA-Z][a-zA-Z0-9]*|#[0-9]*)?|'
'<([a-zA-Z][^<>]*|'
'/([a-zA-Z][^<>]*)?|'
'![^<>]*)?')
entityref = re.compile('&([a-zA-Z][-.a-zA-Z0-9]*)[^a-zA-Z0-9]')
charref = re.compile('&#([0-9]+)[^0-9]')
starttagopen = re.compile('<[>a-zA-Z]')
shorttagopen = re.compile('<[a-zA-Z][-.a-zA-Z0-9]*/')
shorttag = re.compile('<([a-zA-Z][-.a-zA-Z0-9]*)/([^/]*)/')
piclose = re.compile('>')
endbracket = re.compile('[<>]')
tagfind = re.compile('[a-zA-Z][-_.a-zA-Z0-9]*')
attrfind = re.compile(
r'\s*([a-zA-Z_][-:.a-zA-Z_0-9]*)(\s*=\s*'
r'(\'[^\']*\'|"[^"]*"|[][\-a-zA-Z0-9./,:;+*%?!&$\(\)_#=~\'"@]*))?')
class SGMLParseError(RuntimeError):
"""Exception raised for all parse errors."""
pass
# SGML parser base class -- find tags and call handler functions.
# Usage: p = SGMLParser(); p.feed(data); ...; p.close().
# The dtd is defined by deriving a class which defines methods
# with special names to handle tags: start_foo and end_foo to handle
# <foo> and </foo>, respectively, or do_foo to handle <foo> by itself.
# (Tags are converted to lower case for this purpose.) The data
# between tags is passed to the parser by calling self.handle_data()
# with some data as argument (the data may be split up in arbitrary
# chunks). Entity references are passed by calling
# self.handle_entityref() with the entity reference as argument.
class SGMLParser(markupbase.ParserBase):
# Definition of entities -- derived classes may override
entity_or_charref = re.compile('&(?:'
'([a-zA-Z][-.a-zA-Z0-9]*)|#([0-9]+)'
')(;?)')
def __init__(self, verbose=0):
"""Initialize and reset this instance."""
self.verbose = verbose
self.reset()
def reset(self):
"""Reset this instance. Loses all unprocessed data."""
self.__starttag_text = None
self.rawdata = ''
self.stack = []
self.lasttag = '???'
self.nomoretags = 0
self.literal = 0
markupbase.ParserBase.reset(self)
def setnomoretags(self):
"""Enter literal mode (CDATA) till EOF.
Intended for derived classes only.
"""
self.nomoretags = self.literal = 1
def setliteral(self, *args):
"""Enter literal mode (CDATA).
Intended for derived classes only.
"""
self.literal = 1
def feed(self, data):
"""Feed some data to the parser.
Call this as often as you want, with as little or as much text
as you want (may include '\n'). (This just saves the text,
all the processing is done by goahead().)
"""
self.rawdata = self.rawdata + data
self.goahead(0)
def close(self):
"""Handle the remaining data."""
self.goahead(1)
def error(self, message):
raise SGMLParseError(message)
# Internal -- handle data as far as reasonable. May leave state
# and data to be processed by a subsequent call. If 'end' is
# true, force handling all data as if followed by EOF marker.
def goahead(self, end):
rawdata = self.rawdata
i = 0
n = len(rawdata)
while i < n:
if self.nomoretags:
self.handle_data(rawdata[i:n])
i = n
break
match = interesting.search(rawdata, i)
if match: j = match.start()
else: j = n
if i < j:
self.handle_data(rawdata[i:j])
i = j
if i == n: break
if rawdata[i] == '<':
if starttagopen.match(rawdata, i):
if self.literal:
self.handle_data(rawdata[i])
i = i+1
continue
k = self.parse_starttag(i)
if k < 0: break
i = k
continue
if rawdata.startswith("</", i):
k = self.parse_endtag(i)
if k < 0: break
i = k
self.literal = 0
continue
if self.literal:
if n > (i + 1):
self.handle_data("<")
i = i+1
else:
# incomplete
break
continue
if rawdata.startswith("<!--", i):
# Strictly speaking, a comment is --.*--
# within a declaration tag <!...>.
# This should be removed,
# and comments handled only in parse_declaration.
k = self.parse_comment(i)
if k < 0: break
i = k
continue
if rawdata.startswith("<?", i):
k = self.parse_pi(i)
if k < 0: break
i = i+k
continue
if rawdata.startswith("<!", i):
# This is some sort of declaration; in "HTML as
# deployed," this should only be the document type
# declaration ("<!DOCTYPE html...>").
k = self.parse_declaration(i)
if k < 0: break
i = k
continue
elif rawdata[i] == '&':
if self.literal:
self.handle_data(rawdata[i])
i = i+1
continue
match = charref.match(rawdata, i)
if match:
name = match.group(1)
self.handle_charref(name)
i = match.end(0)
if rawdata[i-1] != ';': i = i-1
continue
match = entityref.match(rawdata, i)
if match:
name = match.group(1)
self.handle_entityref(name)
i = match.end(0)
if rawdata[i-1] != ';': i = i-1
continue
else:
self.error('neither < nor & ??')
# We get here only if incomplete matches but
# nothing else
match = incomplete.match(rawdata, i)
if not match:
self.handle_data(rawdata[i])
i = i+1
continue
j = match.end(0)
if j == n:
break # Really incomplete
self.handle_data(rawdata[i:j])
i = j
# end while
if end and i < n:
self.handle_data(rawdata[i:n])
i = n
self.rawdata = rawdata[i:]
# XXX if end: check for empty stack
# Extensions for the DOCTYPE scanner:
_decl_otherchars = '='
# Internal -- parse processing instr, return length or -1 if not terminated
def parse_pi(self, i):
rawdata = self.rawdata
if rawdata[i:i+2] != '<?':
self.error('unexpected call to parse_pi()')
match = piclose.search(rawdata, i+2)
if not match:
return -1
j = match.start(0)
self.handle_pi(rawdata[i+2: j])
j = match.end(0)
return j-i
def get_starttag_text(self):
return self.__starttag_text
# Internal -- handle starttag, return length or -1 if not terminated
def parse_starttag(self, i):
self.__starttag_text = None
start_pos = i
rawdata = self.rawdata
if shorttagopen.match(rawdata, i):
# SGML shorthand: <tag/data/ == <tag>data</tag>
# XXX Can data contain &... (entity or char refs)?
# XXX Can data contain < or > (tag characters)?
# XXX Can there be whitespace before the first /?
match = shorttag.match(rawdata, i)
if not match:
return -1
tag, data = match.group(1, 2)
self.__starttag_text = '<%s/' % tag
tag = tag.lower()
k = match.end(0)
self.finish_shorttag(tag, data)
self.__starttag_text = rawdata[start_pos:match.end(1) + 1]
return k
# XXX The following should skip matching quotes (' or ")
# As a shortcut way to exit, this isn't so bad, but shouldn't
# be used to locate the actual end of the start tag since the
# < or > characters may be embedded in an attribute value.
match = endbracket.search(rawdata, i+1)
if not match:
return -1
j = match.start(0)
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
if rawdata[i:i+2] == '<>':
# SGML shorthand: <> == <last open tag seen>
k = j
tag = self.lasttag
else:
match = tagfind.match(rawdata, i+1)
if not match:
self.error('unexpected call to parse_starttag')
k = match.end(0)
tag = rawdata[i+1:k].lower()
self.lasttag = tag
while k < j:
match = attrfind.match(rawdata, k)
if not match: break
attrname, rest, attrvalue = match.group(1, 2, 3)
if not rest:
attrvalue = attrname
else:
if (attrvalue[:1] == "'" == attrvalue[-1:] or
attrvalue[:1] == '"' == attrvalue[-1:]):
# strip quotes
attrvalue = attrvalue[1:-1]
attrvalue = self.entity_or_charref.sub(
self._convert_ref, attrvalue)
attrs.append((attrname.lower(), attrvalue))
k = match.end(0)
if rawdata[j] == '>':
j = j+1
self.__starttag_text = rawdata[start_pos:j]
self.finish_starttag(tag, attrs)
return j
# Internal -- convert entity or character reference
def _convert_ref(self, match):
if match.group(2):
return self.convert_charref(match.group(2)) or \
'&#%s%s' % match.groups()[1:]
elif match.group(3):
return self.convert_entityref(match.group(1)) or \
'&%s;' % match.group(1)
else:
return '&%s' % match.group(1)
# Internal -- parse endtag
def parse_endtag(self, i):
rawdata = self.rawdata
match = endbracket.search(rawdata, i+1)
if not match:
return -1
j = match.start(0)
tag = rawdata[i+2:j].strip().lower()
if rawdata[j] == '>':
j = j+1
self.finish_endtag(tag)
return j
# Internal -- finish parsing of <tag/data/ (same as <tag>data</tag>)
def finish_shorttag(self, tag, data):
self.finish_starttag(tag, [])
self.handle_data(data)
self.finish_endtag(tag)
# Internal -- finish processing of start tag
# Return -1 for unknown tag, 0 for open-only tag, 1 for balanced tag
def finish_starttag(self, tag, attrs):
try:
method = getattr(self, 'start_' + tag)
except AttributeError:
try:
method = getattr(self, 'do_' + tag)
except AttributeError:
self.unknown_starttag(tag, attrs)
return -1
else:
self.handle_starttag(tag, method, attrs)
return 0
else:
self.stack.append(tag)
self.handle_starttag(tag, method, attrs)
return 1
# Internal -- finish processing of end tag
def finish_endtag(self, tag):
if not tag:
found = len(self.stack) - 1
if found < 0:
self.unknown_endtag(tag)
return
else:
if tag not in self.stack:
try:
method = getattr(self, 'end_' + tag)
except AttributeError:
self.unknown_endtag(tag)
else:
self.report_unbalanced(tag)
return
found = len(self.stack)
for i in range(found):
if self.stack[i] == tag: found = i
while len(self.stack) > found:
tag = self.stack[-1]
try:
method = getattr(self, 'end_' + tag)
except AttributeError:
method = None
if method:
self.handle_endtag(tag, method)
else:
self.unknown_endtag(tag)
del self.stack[-1]
# Overridable -- handle start tag
def handle_starttag(self, tag, method, attrs):
method(attrs)
# Overridable -- handle end tag
def handle_endtag(self, tag, method):
method()
# Example -- report an unbalanced </...> tag.
def report_unbalanced(self, tag):
if self.verbose:
print '*** Unbalanced </' + tag + '>'
print '*** Stack:', self.stack
def convert_charref(self, name):
"""Convert character reference, may be overridden."""
try:
n = int(name)
except ValueError:
return
if not 0 <= n <= 127:
return
return self.convert_codepoint(n)
def convert_codepoint(self, codepoint):
return chr(codepoint)
def handle_charref(self, name):
"""Handle character reference, no need to override."""
replacement = self.convert_charref(name)
if replacement is None:
self.unknown_charref(name)
else:
self.handle_data(replacement)
# Definition of entities -- derived classes may override
entitydefs = \
{'lt': '<', 'gt': '>', 'amp': '&', 'quot': '"', 'apos': '\''}
def convert_entityref(self, name):
"""Convert entity references.
As an alternative to overriding this method; one can tailor the
results by setting up the self.entitydefs mapping appropriately.
"""
table = self.entitydefs
if name in table:
return table[name]
else:
return
def handle_entityref(self, name):
"""Handle entity references, no need to override."""
replacement = self.convert_entityref(name)
if replacement is None:
self.unknown_entityref(name)
else:
self.handle_data(replacement)
# Example -- handle data, should be overridden
def handle_data(self, data):
pass
# Example -- handle comment, could be overridden
def handle_comment(self, data):
pass
# Example -- handle declaration, could be overridden
def handle_decl(self, decl):
pass
# Example -- handle processing instruction, could be overridden
def handle_pi(self, data):
pass
# To be overridden -- handlers for unknown objects
def unknown_starttag(self, tag, attrs): pass
def unknown_endtag(self, tag): pass
def unknown_charref(self, ref): pass
def unknown_entityref(self, ref): pass
class TestSGMLParser(SGMLParser):
def __init__(self, verbose=0):
self.testdata = ""
SGMLParser.__init__(self, verbose)
def handle_data(self, data):
self.testdata = self.testdata + data
if len(repr(self.testdata)) >= 70:
self.flush()
def flush(self):
data = self.testdata
if data:
self.testdata = ""
print 'data:', repr(data)
def handle_comment(self, data):
self.flush()
r = repr(data)
if len(r) > 68:
r = r[:32] + '...' + r[-32:]
print 'comment:', r
def unknown_starttag(self, tag, attrs):
self.flush()
if not attrs:
print 'start tag: <' + tag + '>'
else:
print 'start tag: <' + tag,
for name, value in attrs:
print name + '=' + '"' + value + '"',
print '>'
def unknown_endtag(self, tag):
self.flush()
print 'end tag: </' + tag + '>'
def unknown_entityref(self, ref):
self.flush()
print '*** unknown entity ref: &' + ref + ';'
def unknown_charref(self, ref):
self.flush()
print '*** unknown char ref: &#' + ref + ';'
def unknown_decl(self, data):
self.flush()
print '*** unknown decl: [' + data + ']'
def close(self):
SGMLParser.close(self)
self.flush()
def test(args = None):
import sys
if args is None:
args = sys.argv[1:]
if args and args[0] == '-s':
args = args[1:]
klass = SGMLParser
else:
klass = TestSGMLParser
if args:
file = args[0]
else:
file = 'test.html'
if file == '-':
f = sys.stdin
else:
try:
f = open(file, 'r')
except IOError, msg:
print file, ":", msg
sys.exit(1)
data = f.read()
if f is not sys.stdin:
f.close()
x = klass()
for c in data:
x.feed(c)
x.close()
if __name__ == '__main__':
test()
| mit | -6,998,580,800,550,058,000 | 31.339964 | 79 | 0.503019 | false |
SickGear/SickGear | lib/js2py/constructors/jsfunction.py | 9 | 1399 | from ..base import *
try:
from ..translators.translator import translate_js
except:
pass
@Js
def Function():
# convert arguments to python list of strings
a = [e.to_string().value for e in arguments.to_list()]
body = ';'
args = ()
if len(a):
body = '%s;' % a[-1]
args = a[:-1]
# translate this function to js inline function
js_func = '(function (%s) {%s})' % (','.join(args), body)
# now translate js inline to python function
py_func = translate_js(js_func, '')
# add set func scope to global scope
# a but messy solution but works :)
globals()['var'] = PyJs.GlobalObject
# define py function and return it
temp = executor(py_func, globals())
temp.source = '{%s}' % body
temp.func_name = 'anonymous'
return temp
def executor(f, glob):
exec (f, globals())
return globals()['PyJs_anonymous_0_']
#new statement simply calls Function
Function.create = Function
#set constructor property inside FunctionPrototype
fill_in_props(FunctionPrototype, {'constructor': Function}, default_attrs)
#attach prototype to Function constructor
Function.define_own_property(
'prototype', {
'value': FunctionPrototype,
'enumerable': False,
'writable': False,
'configurable': False
})
#Fix Function length (its 0 and should be 1)
Function.own['length']['value'] = Js(1)
| gpl-3.0 | -547,553,859,711,474,100 | 25.903846 | 74 | 0.637598 | false |
agry/NGECore2 | scripts/mobiles/dynamicgroups/dantooine_kunga.py | 2 | 1024 | # Spawn Group file created with PSWG Planetary Spawn Tool
import sys
from java.util import Vector
from services.spawn import DynamicSpawnGroup
from services.spawn import MobileTemplate
def addDynamicGroup(core):
dynamicGroup = DynamicSpawnGroup()
mobileTemplates = Vector()
mobileTemplates.add('kunga_clan_leader')
mobileTemplates.add('kunga_clan_primalist')
mobileTemplates.add('kunga_harvester')
mobileTemplates.add('kunga_herbalist')
mobileTemplates.add('kunga_hunter')
mobileTemplates.add('kunga_loreweaver')
mobileTemplates.add('kunga_rockshaper')
mobileTemplates.add('kunga_scout')
mobileTemplates.add('kunga_shaman')
mobileTemplates.add('kunga_soothsayer')
mobileTemplates.add('kunga_tribesman')
mobileTemplates.add('kunga_warrior')
dynamicGroup.setMobiles(mobileTemplates)
dynamicGroup.setGroupMembersNumber(-3)
dynamicGroup.setName('dantooine_kunga')
dynamicGroup.setMaxSpawns(-1)
dynamicGroup.setMinSpawnDistance(150)
core.spawnService.addDynamicGroup('dantooine_kunga', dynamicGroup)
return
| lgpl-3.0 | -4,403,630,866,965,822,500 | 35.571429 | 67 | 0.81543 | false |
phobson/pycvc | setup.py | 2 | 1510 | # Setup script for the pycvc package
#
# Usage: python setup.py install
#
import os
from setuptools import setup, find_packages
DESCRIPTION = "pycvc: Analyze CVC stormwater data"
LONG_DESCRIPTION = DESCRIPTION
NAME = "pycvc"
VERSION = "0.3.0"
AUTHOR = "Paul Hobson (Geosyntec Consultants)"
AUTHOR_EMAIL = "[email protected]"
URL = ""
DOWNLOAD_URL = ""
LICENSE = "BSD 3-clause"
PACKAGES = find_packages(exclude=[])
PLATFORMS = "Python 3.4 and later."
CLASSIFIERS = [
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Intended Audience :: Science/Research",
"Topic :: Formats and Protocols :: Data Formats",
"Topic :: Scientific/Engineering :: Earth Sciences",
"Topic :: Software Development :: Libraries :: Python Modules",
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
]
INSTALL_REQUIRES = ['wqio', 'pybmpdb', 'pynsqd']
PACKAGE_DATA = {
'pycvc.tex': ['*.tex'],
'pycvc.tests.testdata': ['*.csv', '*.accdb'],
'pycvc.tests.baseline_images.viz_tests': ['*.png'],
}
setup(
name=NAME,
version=VERSION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
url=URL,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
download_url=DOWNLOAD_URL,
license=LICENSE,
packages=PACKAGES,
package_data=PACKAGE_DATA,
platforms=PLATFORMS,
classifiers=CLASSIFIERS,
install_requires=INSTALL_REQUIRES,
zip_safe=False
)
| bsd-3-clause | -4,860,293,613,332,010,000 | 27.490566 | 67 | 0.675497 | false |
xen0l/ansible | lib/ansible/modules/network/f5/bigip_hostname.py | 14 | 7419 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: bigip_hostname
short_description: Manage the hostname of a BIG-IP
description:
- Manage the hostname of a BIG-IP.
version_added: 2.3
options:
hostname:
description:
- Hostname of the BIG-IP host.
required: True
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
- Matthew Lam (@mryanlam)
'''
EXAMPLES = r'''
- name: Set the hostname of the BIG-IP
bigip_hostname:
hostname: bigip.localhost.localdomain
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
'''
RETURN = r'''
hostname:
description: The new hostname of the device
returned: changed
type: string
sample: big-ip01.internal
'''
from ansible.module_utils.basic import AnsibleModule
try:
from library.module_utils.network.f5.bigip import HAS_F5SDK
from library.module_utils.network.f5.bigip import F5Client
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import f5_argument_spec
try:
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
except ImportError:
from ansible.module_utils.network.f5.bigip import HAS_F5SDK
from ansible.module_utils.network.f5.bigip import F5Client
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import f5_argument_spec
try:
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
class Parameters(AnsibleF5Parameters):
api_attributes = ['hostname']
updatables = ['hostname']
returnables = ['hostname']
def to_return(self):
result = {}
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
return result
@property
def hostname(self):
if self._values['hostname'] is None:
return None
return str(self._values['hostname'])
class ApiParameters(Parameters):
pass
class ModuleParameters(Parameters):
pass
class Changes(Parameters):
pass
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.have = ApiParameters()
self.want = ModuleParameters(params=self.module.params)
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def exec_module(self):
result = dict()
try:
changed = self.update()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def read_current_from_device(self):
resource = self.client.api.tm.sys.global_settings.load()
result = resource.attrs
collection = self.client.api.tm.cm.devices.get_collection()
self_device = next((x.name for x in collection if x.selfDevice == "true"), None)
result['self_device'] = self_device
return ApiParameters(params=result)
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update_on_device(self):
params = self.want.api_params()
resource = self.client.api.tm.sys.global_settings.load()
resource.modify(**params)
if self.have.self_device:
self.client.api.tm.cm.devices.exec_cmd(
'mv', name=self.have.self_device, target=self.want.hostname
)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
hostname=dict(
required=True
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
if not HAS_F5SDK:
module.fail_json(msg="The python f5-sdk module is required")
try:
client = F5Client(**module.params)
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
module.exit_json(**results)
except F5ModuleError as e:
cleanup_tokens(client)
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| gpl-3.0 | -4,969,176,442,007,283,000 | 27.425287 | 91 | 0.628656 | false |
Dioptas/Dioptas | dioptas/controller/integration/ImageController.py | 1 | 49794 | # -*- coding: utf-8 -*-
# Dioptas - GUI program for fast processing of 2D X-ray diffraction data
# Principal author: Clemens Prescher ([email protected])
# Copyright (C) 2014-2019 GSECARS, University of Chicago, USA
# Copyright (C) 2015-2018 Institute for Geology and Mineralogy, University of Cologne, Germany
# Copyright (C) 2019-2020 DESY, Hamburg, Germany
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from functools import partial
import numpy as np
from PIL import Image
from qtpy import QtWidgets, QtCore
from ...widgets.UtilityWidgets import open_file_dialog, open_files_dialog, save_file_dialog
# imports for type hinting in PyCharm -- DO NOT DELETE
from ...widgets.integration import IntegrationWidget
from ...model.DioptasModel import DioptasModel
from ...model.util.Pattern import Pattern
from ...model.util.HelperModule import get_partial_index, get_partial_value
from .EpicsController import EpicsController
class ImageController(object):
"""
The ImageController manages the Image actions in the Integration Window. It connects the file actions, as
well as interaction with the image_view.
"""
def __init__(self, widget, dioptas_model):
"""
:param widget: Reference to IntegrationView
:param dioptas_model: Reference to DioptasModel object
:type widget: IntegrationWidget
:type dioptas_model: DioptasModel
"""
self.widget = widget
self.model = dioptas_model
self.epics_controller = EpicsController(self.widget, self.model)
self.img_docked = True
self.view_mode = 'normal' # modes available: normal, alternative
self.roi_active = False
self.clicked_tth = None
self.clicked_azi = None
self.vertical_splitter_alternative_state = None
self.vertical_splitter_normal_state = None
self.horizontal_splitter_alternative_state = None
self.horizontal_splitter_normal_state = None
self.initialize()
self.create_signals()
self.create_mouse_behavior()
def initialize(self):
self.update_img_control_widget()
self.plot_img()
self.plot_mask()
self.widget.img_widget.auto_level()
def plot_img(self, auto_scale=None):
"""
Plots the current image loaded in self.img_data.
:param auto_scale:
Determines if intensities shouldk be auto-scaled. If value is None it will use the parameter saved in the
Object (self._auto_scale)
"""
if auto_scale is None:
auto_scale = self.widget.img_autoscale_btn.isChecked()
if self.widget.integration_image_widget.show_background_subtracted_img_btn.isChecked():
self.widget.img_widget.plot_image(self.model.img_model.img_data, False)
else:
self.widget.img_widget.plot_image(self.model.img_model.raw_img_data, False)
if auto_scale:
self.widget.img_widget.auto_level()
def plot_cake(self, auto_scale=None):
"""
Plots the cake saved in the calibration data
:param auto_scale:
Determines if the intensity should be auto-scaled. If value is None it will use the parameter saved in the
object (self._auto_scale)
"""
if auto_scale is None:
auto_scale = self.widget.img_autoscale_btn.isChecked()
shift_amount = self.widget.cake_shift_azimuth_sl.value()
self.widget.cake_widget.plot_image(np.roll(self.model.cake_data, shift_amount, axis=0))
self.plot_cake_integral()
self.update_cake_axes_range()
if auto_scale:
self.widget.cake_widget.auto_level()
def plot_cake_integral(self, tth=None):
if not self.widget.cake_widget.cake_integral_plot.isVisible() or self.clicked_tth is None:
return
if tth is None:
tth = self.clicked_tth
x, y = self.model.calibration_model.cake_integral(
tth,
self.widget.integration_control_widget.integration_options_widget.cake_integral_width_sb.value()
)
shift_amount = self.widget.cake_shift_azimuth_sl.value()
self.widget.cake_widget.plot_cake_integral(x, np.roll(y, shift_amount))
def save_cake_integral(self):
img_filename, _ = os.path.splitext(os.path.basename(self.model.img_model.filename))
filename = save_file_dialog(
self.widget, "Save Cake Integral Data.",
os.path.join(self.model.working_directories['pattern'],
img_filename + '.xy'))
if filename != '':
integral_pattern = Pattern(*self.widget.cake_widget.cake_integral_item.getData())
integral_pattern.save(filename)
def plot_mask(self):
"""
Plots the mask data.
"""
if self.model.use_mask and self.widget.img_mode == 'Image':
self.widget.img_widget.plot_mask(self.model.mask_model.get_img())
self.widget.img_mask_btn.setChecked(True)
else:
self.widget.img_widget.plot_mask(np.zeros(self.model.mask_model.get_img().shape))
self.widget.img_mask_btn.setChecked(False)
def update_mask_transparency(self):
"""
Changes the colormap of the mask according to the transparency option selection in the GUI. Resulting Mask will
be either transparent or solid.
"""
self.model.transparent_mask = self.widget.mask_transparent_cb.isChecked()
if self.model.transparent_mask:
self.widget.img_widget.set_mask_color([255, 0, 0, 100])
else:
self.widget.img_widget.set_mask_color([255, 0, 0, 255])
def create_signals(self):
self.model.configuration_selected.connect(self.update_gui_from_configuration)
self.model.img_changed.connect(self.update_img_control_widget)
self.model.img_changed.connect(self.plot_img)
self.model.img_changed.connect(self.plot_mask)
"""
Creates all the connections of the GUI elements.
"""
self.widget.img_step_file_widget.next_btn.clicked.connect(self.load_next_img)
self.widget.img_step_file_widget.previous_btn.clicked.connect(self.load_previous_img)
self.widget.load_img_btn.clicked.connect(self.load_file)
self.widget.img_filename_txt.editingFinished.connect(self.filename_txt_changed)
self.widget.img_directory_txt.editingFinished.connect(self.directory_txt_changed)
self.widget.img_directory_btn.clicked.connect(self.img_directory_btn_click)
self.widget.img_step_series_widget.next_btn.clicked.connect(self.load_next_series_img)
self.widget.img_step_series_widget.previous_btn.clicked.connect(self.load_prev_series_img)
self.widget.img_step_series_widget.pos_txt.editingFinished.connect(self.load_series_img)
self.widget.file_info_btn.clicked.connect(self.show_file_info)
self.widget.integration_control_widget.img_control_widget.batch_btn.clicked.connect(self.show_batch_frame)
self.widget.img_step_file_widget.browse_by_name_rb.clicked.connect(self.set_iteration_mode_number)
self.widget.img_step_file_widget.browse_by_time_rb.clicked.connect(self.set_iteration_mode_time)
self.widget.image_control_widget.sources_cb.currentTextChanged.connect(self.select_source)
###
# Image widget image specific controls
self.widget.img_roi_btn.clicked.connect(self.click_roi_btn)
self.widget.img_mask_btn.clicked.connect(self.change_mask_mode)
self.widget.mask_transparent_cb.clicked.connect(self.update_mask_transparency)
###
# Image Widget cake specific controls
self.widget.img_phases_btn.clicked.connect(self.toggle_show_phases)
self.widget.cake_shift_azimuth_sl.valueChanged.connect(partial(self.plot_cake, None))
self.widget.cake_shift_azimuth_sl.valueChanged.connect(self._update_cake_mouse_click_pos)
self.widget.cake_shift_azimuth_sl.valueChanged.connect(self.update_cake_azimuth_axis)
self.widget.cake_shift_azimuth_sl.valueChanged.connect(partial(self.plot_cake_integral, None))
self.widget.integration_image_widget.cake_view.img_view_box.sigRangeChanged.connect(self.update_cake_axes_range)
self.widget.pattern_q_btn.clicked.connect(partial(self.set_cake_axis_unit, 'q_A^-1'))
self.widget.pattern_tth_btn.clicked.connect(partial(self.set_cake_axis_unit, '2th_deg'))
self.widget.integration_control_widget.integration_options_widget.cake_integral_width_sb.valueChanged. \
connect(partial(self.plot_cake_integral, None))
self.widget.integration_control_widget.integration_options_widget.cake_save_integral_btn.clicked. \
connect(self.save_cake_integral)
###
# General Image Widget controls
self.widget.img_dock_btn.clicked.connect(self.img_dock_btn_clicked)
self.widget.img_autoscale_btn.clicked.connect(self.img_autoscale_btn_clicked)
self.widget.img_mode_btn.clicked.connect(self.change_view_mode)
self.widget.integration_image_widget.show_background_subtracted_img_btn.clicked.connect(
self.show_background_subtracted_img_btn_clicked)
self.widget.qa_save_img_btn.clicked.connect(self.save_img)
self.widget.load_calibration_btn.clicked.connect(self.load_calibration)
# signals
self.widget.change_view_btn.clicked.connect(self.change_view_btn_clicked)
self.widget.autoprocess_cb.toggled.connect(self.auto_process_cb_click)
def create_mouse_behavior(self):
"""
Creates the signal connections of mouse interactions
"""
self.widget.img_widget.mouse_left_clicked.connect(self.img_mouse_click)
self.widget.img_widget.mouse_moved.connect(self.show_img_mouse_position)
self.widget.cake_widget.mouse_left_clicked.connect(self.img_mouse_click)
self.widget.cake_widget.mouse_moved.connect(self.show_img_mouse_position)
self.widget.pattern_widget.mouse_left_clicked.connect(self.pattern_mouse_click)
def load_file(self, *args, **kwargs):
filename = kwargs.get('filename', None)
if filename is None:
filenames = open_files_dialog(self.widget, "Load image data file(s)",
self.model.working_directories['image'])
else:
filenames = [filename]
if filenames is not None and len(filenames) != 0:
self.model.working_directories['image'] = os.path.dirname(str(filenames[0]))
if len(filenames) == 1:
self.model.img_model.load(str(filenames[0]))
else:
if self.widget.img_batch_mode_add_rb.isChecked():
self.model.img_model.blockSignals(True)
self.model.img_model.load(str(filenames[0]))
for ind in range(1, len(filenames)):
self.model.img_model.add(filenames[ind])
self.model.img_model.blockSignals(False)
self.model.img_model.img_changed.emit()
elif self.widget.img_batch_mode_integrate_rb.isChecked():
self._load_multiple_files(filenames)
elif self.widget.img_batch_mode_image_save_rb.isChecked():
self._save_multiple_image_files(filenames)
def _load_multiple_files(self, filenames):
if not self.model.calibration_model.is_calibrated:
self.widget.show_error_msg("Can not integrate multiple images without calibration.")
return
working_directory = self._get_pattern_working_directory()
if working_directory == '':
return # abort file processing if no directory was selected
progress_dialog = self.widget.get_progress_dialog("Integrating multiple files.", "Abort Integration",
len(filenames))
self._set_up_batch_processing()
for ind in range(len(filenames)):
filename = str(filenames[ind])
base_filename = os.path.basename(filename)
progress_dialog.setValue(ind)
progress_dialog.setLabelText("Integrating: " + base_filename)
self.model.img_model.blockSignals(True)
self.model.img_model.load(filename)
self.model.img_model.blockSignals(False)
x, y = self.integrate_pattern()
self._save_pattern(base_filename, working_directory, x, y)
QtWidgets.QApplication.processEvents()
if progress_dialog.wasCanceled():
break
progress_dialog.close()
self._tear_down_batch_processing()
def _get_pattern_working_directory(self):
if self.widget.pattern_autocreate_cb.isChecked():
working_directory = self.model.working_directories['pattern']
else:
# if there is no working directory selected A file dialog opens up to choose a directory...
working_directory = str(QtWidgets.QFileDialog.getExistingDirectory(
self.widget, "Please choose the output directory for the integrated Patterns.",
self.model.working_directories['pattern']))
return working_directory
def _set_up_batch_processing(self):
self.model.blockSignals(True)
def _tear_down_batch_processing(self):
self.model.blockSignals(False)
self.model.img_changed.emit()
self.model.pattern_changed.emit()
def _save_multiple_image_files(self, filenames):
working_directory = str(QtWidgets.QFileDialog.getExistingDirectory(
self.widget, "Please choose the output directory for the Images.",
self.model.working_directories['image']))
if working_directory == '':
return
self._set_up_batch_processing()
progress_dialog = self.widget.get_progress_dialog("Saving multiple image files.", "Abort",
len(filenames))
QtWidgets.QApplication.processEvents()
self.model.current_configuration.auto_integrate_pattern = False
for ind, filename in enumerate(filenames):
base_filename = os.path.basename(filename)
progress_dialog.setValue(ind)
progress_dialog.setLabelText("Saving: " + base_filename)
self.model.img_model.load(str(filename))
self.save_img(os.path.join(working_directory, 'batch_' + base_filename))
QtWidgets.QApplication.processEvents()
if progress_dialog.wasCanceled():
break
self.model.current_configuration.auto_integrate_pattern = True
progress_dialog.close()
self._tear_down_batch_processing()
def _save_pattern(self, base_filename, working_directory, x, y):
file_endings = self._get_pattern_file_endings()
for file_ending in file_endings:
filename = os.path.join(working_directory, os.path.splitext(base_filename)[0] + file_ending)
self.model.pattern_model.set_pattern(x, y, filename, unit=self.get_integration_unit())
if file_ending == '.xy':
self.model.pattern_model.save_pattern(filename, header=self._create_pattern_header())
else:
self.model.pattern_model.save_pattern(filename)
# save the background subtracted filename
if self.model.pattern.has_background():
directory = os.path.join(working_directory, 'bkg_subtracted')
if not os.path.exists(directory):
os.mkdir(directory)
filename = os.path.join(directory, self.model.pattern.name + file_ending)
if file_ending == '.xy':
self.model.pattern_model.save_pattern(filename, header=self._create_pattern_header(),
subtract_background=True)
else:
self.model.pattern_model.save_pattern(filename, subtract_background=True)
def _create_pattern_header(self):
header = self.model.calibration_model.create_file_header()
header = header.replace('\r\n', '\n')
header += '\n#\n# ' + self.model.pattern_model.unit + '\t I'
return header
def _get_pattern_file_endings(self):
res = []
if self.widget.pattern_header_xy_cb.isChecked():
res.append('.xy')
if self.widget.pattern_header_chi_cb.isChecked():
res.append('.chi')
if self.widget.pattern_header_dat_cb.isChecked():
res.append('.dat')
return res
def show_batch_frame(self):
self.widget.batch_widget.raise_widget()
def show_file_info(self):
self.widget.file_info_widget.raise_widget()
def get_integration_unit(self):
if self.widget.pattern_tth_btn.isChecked():
return '2th_deg'
elif self.widget.pattern_q_btn.isChecked():
return 'q_A^-1'
elif self.widget.pattern_d_btn.isChecked():
return 'd_A'
def integrate_pattern(self):
if self.widget.img_mask_btn.isChecked():
mask = self.model.mask_model.get_mask()
else:
mask = None
if self.widget.img_roi_btn.isChecked():
roi_mask = self.widget.img_widget.roi.getRoiMask(self.model.img_data.shape)
else:
roi_mask = None
if roi_mask is None and mask is None:
mask = None
elif roi_mask is None and mask is not None:
mask = mask
elif roi_mask is not None and mask is None:
mask = roi_mask
elif roi_mask is not None and mask is not None:
mask = np.logical_or(mask, roi_mask)
if self.widget.pattern_tth_btn.isChecked():
integration_unit = '2th_deg'
elif self.widget.pattern_q_btn.isChecked():
integration_unit = 'q_A^-1'
elif self.widget.pattern_d_btn.isChecked():
integration_unit = 'd_A'
else:
# in case something weird happened
print('No correct integration unit selected')
return
if not self.widget.automatic_binning_cb.isChecked():
num_points = int(str(self.widget.bin_count_txt.text()))
else:
num_points = None
return self.model.calibration_model.integrate_1d(mask=mask, unit=integration_unit, num_points=num_points)
def change_mask_mode(self):
self.model.use_mask = self.widget.integration_image_widget.mask_btn.isChecked()
self.widget.mask_transparent_cb.setVisible(self.model.use_mask)
self.plot_mask()
self.model.img_model.img_changed.emit()
def update_mask_mode(self):
self.widget.integration_image_widget.mask_btn.setChecked(bool(self.model.use_mask))
self.widget.mask_transparent_cb.setVisible(bool(self.model.use_mask))
self.widget.mask_transparent_cb.setChecked(bool(self.model.transparent_mask))
def update_img_mode(self):
self.widget.img_mode_btn.click()
def load_series_img(self):
pos = int(str(self.widget.img_step_series_widget.pos_txt.text()))
self.model.img_model.load_series_img(pos)
def load_prev_series_img(self):
step = int(str(self.widget.img_step_series_widget.step_txt.text()))
pos = int(str(self.widget.img_step_series_widget.pos_txt.text()))
self.model.img_model.load_series_img(pos - step)
def load_next_series_img(self):
step = int(str(self.widget.img_step_series_widget.step_txt.text()))
pos = int(str(self.widget.img_step_series_widget.pos_txt.text()))
self.model.img_model.load_series_img(pos + step)
def load_next_img(self):
step = int(str(self.widget.img_step_file_widget.step_txt.text()))
self.model.img_model.load_next_file(step=step)
def load_previous_img(self):
step = int(str(self.widget.img_step_file_widget.step_txt.text()))
self.model.img_model.load_previous_file(step=step)
def filename_txt_changed(self):
current_filename = os.path.basename(self.model.img_model.filename)
current_directory = str(self.widget.img_directory_txt.text())
new_filename = str(self.widget.img_filename_txt.text())
if os.path.exists(os.path.join(current_directory, new_filename)):
try:
self.load_file(filename=os.path.join(current_directory, new_filename))
except TypeError:
self.widget.img_filename_txt.setText(current_filename)
else:
self.widget.img_filename_txt.setText(current_filename)
def directory_txt_changed(self):
new_directory = str(self.widget.img_directory_txt.text())
if os.path.exists(new_directory) and new_directory != self.model.working_directories['image']:
if self.model.img_model.autoprocess:
self._files_now = dict([(f, None) for f in os.listdir(self.model.working_directories['image'])])
self.model.working_directories['image'] = os.path.abspath(new_directory)
old_filename = str(self.widget.img_filename_txt.text())
self.widget.img_filename_txt.setText(old_filename + '*')
else:
self.widget.img_directory_txt.setText(self.model.working_directories['image'])
def img_directory_btn_click(self):
directory = str(QtWidgets.QFileDialog.getExistingDirectory(
self.widget,
"Please choose the image working directory.",
self.model.working_directories['image']))
if directory != '':
if self.model.img_model.autoprocess:
self._files_now = dict([(f, None) for f in os.listdir(self.model.working_directories['image'])])
self.model.working_directories['image'] = directory
self.widget.img_directory_txt.setText(directory)
def update_img_control_widget(self):
self.widget.img_step_series_widget.setVisible(int(self.model.img_model.series_max > 1))
self.widget.img_step_series_widget.pos_validator.setTop(self.model.img_model.series_max)
self.widget.img_step_series_widget.pos_txt.setText(str(self.model.img_model.series_pos))
self.widget.file_info_btn.setVisible(self.model.img_model.file_info != "")
self.widget.move_btn.setVisible(len(self.model.img_model.motors_info) > 0)
self.widget.img_filename_txt.setText(os.path.basename(self.model.img_model.filename))
self.widget.img_directory_txt.setText(os.path.dirname(self.model.img_model.filename))
self.widget.file_info_widget.text_lbl.setText(self.model.img_model.file_info)
self.widget.image_control_widget.sources_widget.setVisible(not (self.model.img_model.sources is None))
if self.model.img_model.sources is not None:
sources_cb = self.widget.image_control_widget.sources_cb
sources_cb.blockSignals(True)
# remove all previous items:
for _ in range(sources_cb.count()):
sources_cb.removeItem(0)
sources_cb.addItems(self.model.img_model.sources)
sources_cb.setCurrentText(self.model.img_model.selected_source)
sources_cb.blockSignals(False)
self.widget.cbn_plot_btn.setText('Plot')
self.widget.oiadac_plot_btn.setText('Plot')
# update the window due to some errors on mac when using macports
self._get_master_parent().update()
def _get_master_parent(self):
master_widget_parent = self.widget
while master_widget_parent.parent():
master_widget_parent = master_widget_parent.parent()
return master_widget_parent
def click_roi_btn(self):
if self.model.current_configuration.roi is None:
self.model.current_configuration.roi = self.widget.img_widget.roi.getRoiLimits()
else:
self.model.current_configuration.roi = None
self.update_roi_in_gui()
def update_roi_in_gui(self):
roi = self.model.mask_model.roi
if roi is None:
self.widget.img_widget.deactivate_roi()
self.widget.img_roi_btn.setChecked(False)
if self.roi_active:
self.widget.img_widget.roi.sigRegionChangeFinished.disconnect(self.update_roi_in_model)
self.roi_active = False
return
if not self.model.current_configuration.auto_integrate_cake:
self.widget.img_roi_btn.setChecked(True)
self.widget.img_widget.activate_roi()
self.widget.img_widget.update_roi_shade_limits(self.model.img_data.shape)
pos = QtCore.QPoint(int(roi[2]), int(roi[0]))
size = QtCore.QPoint(int(roi[3] - roi[2]), int(roi[1] - roi[0]))
self.widget.img_widget.roi.setRoiLimits(pos, size)
if not self.roi_active:
self.widget.img_widget.roi.sigRegionChangeFinished.connect(self.update_roi_in_model)
self.roi_active = True
def update_roi_in_model(self):
self.model.current_configuration.roi = self.widget.img_widget.roi.getRoiLimits()
def change_view_mode(self):
if str(self.widget.img_mode_btn.text()) == 'Cake':
self.activate_cake_mode()
elif str(self.widget.img_mode_btn.text()) == 'Image':
self.activate_image_mode()
def toggle_show_phases(self):
if str(self.widget.img_phases_btn.text()) == 'Show Phases':
self.widget.integration_image_widget.cake_view.show_all_visible_cake_phases(
self.widget.phase_widget.phase_show_cbs)
self.widget.img_phases_btn.setText('Hide Phases')
self.model.enabled_phases_in_cake.emit()
elif str(self.widget.img_phases_btn.text()) == 'Hide Phases':
self.widget.integration_image_widget.cake_view.hide_all_cake_phases()
self.widget.img_phases_btn.setText('Show Phases')
def activate_cake_mode(self):
if not self.model.current_configuration.auto_integrate_cake:
self.model.current_configuration.auto_integrate_cake = True
self.model.current_configuration.integrate_image_2d()
self._update_cake_line_pos()
self._update_cake_mouse_click_pos()
self.widget.img_mode_btn.setText('Image')
self.widget.img_mode = str("Cake")
self.model.img_changed.disconnect(self.plot_img)
self.model.img_changed.disconnect(self.plot_mask)
self.model.cake_changed.connect(self.plot_cake)
self.plot_cake()
self.widget.cake_shift_azimuth_sl.setVisible(True)
self.widget.cake_shift_azimuth_sl.setMinimum(int(-len(self.model.cake_azi) / 2))
self.widget.cake_shift_azimuth_sl.setMaximum(int(len(self.model.cake_azi) / 2))
self.widget.cake_shift_azimuth_sl.setSingleStep(1)
self.widget.img_phases_btn.setVisible(True)
self.widget.integration_image_widget.img_pg_layout.hide()
self.widget.integration_image_widget.cake_pg_layout.show()
def activate_image_mode(self):
if self.model.current_configuration.auto_integrate_cake:
self.model.current_configuration.auto_integrate_cake = False
self.widget.cake_shift_azimuth_sl.setVisible(False)
self.widget.img_phases_btn.setVisible(False)
self._update_image_line_pos()
self._update_image_mouse_click_pos()
self.widget.img_mode_btn.setText('Cake')
self.widget.img_mode = str("Image")
self.model.img_changed.connect(self.plot_img)
self.model.img_changed.connect(self.plot_mask)
self.model.cake_changed.disconnect(self.plot_cake)
self.plot_img()
self.plot_mask()
self.widget.integration_image_widget.img_pg_layout.show()
self.widget.integration_image_widget.cake_pg_layout.hide()
def img_autoscale_btn_clicked(self):
if self.widget.img_autoscale_btn.isChecked():
self.widget.img_widget.auto_level()
def img_dock_btn_clicked(self):
self.img_docked = not self.img_docked
self.widget.dock_img(self.img_docked)
def show_background_subtracted_img_btn_clicked(self):
if self.widget.img_mode_btn.text() == 'Cake':
self.plot_img()
else:
self.widget.integration_image_widget.show_background_subtracted_img_btn.setChecked(False)
def _update_cake_line_pos(self):
cur_tth = self.get_current_pattern_tth()
if self.model.cake_tth is None or cur_tth < np.min(self.model.cake_tth) or cur_tth > np.max(
self.model.cake_tth):
self.widget.cake_widget.deactivate_vertical_line()
else:
new_pos = get_partial_index(self.model.cake_tth, cur_tth) + 0.5
self.widget.cake_widget.set_vertical_line_pos(new_pos, 0)
self.widget.cake_widget.activate_vertical_line()
def _update_cake_mouse_click_pos(self):
if self.clicked_tth is None or not self.model.calibration_model.is_calibrated:
return
tth = self.clicked_tth
azi = self.clicked_azi
cake_tth = self.model.cake_tth
x_pos = get_partial_index(cake_tth, tth) + 0.5
shift_amount = self.widget.cake_shift_azimuth_sl.value()
y_pos = (get_partial_index(self.model.cake_azi, azi) + 0.5 + shift_amount) % len(self.model.cake_azi)
self.widget.cake_widget.set_mouse_click_position(x_pos, y_pos)
def _update_image_line_pos(self):
if not self.model.calibration_model.is_calibrated:
return
cur_tth = self.get_current_pattern_tth()
self.widget.img_widget.set_circle_line(
self.model.calibration_model.get_two_theta_array(), cur_tth / 180 * np.pi)
def _update_image_mouse_click_pos(self):
if self.clicked_tth is None or not self.model.calibration_model.is_calibrated:
return
tth = np.deg2rad(self.clicked_tth)
azi = np.deg2rad(self.clicked_azi)
new_pos = self.model.calibration_model.get_pixel_ind(tth, azi)
if len(new_pos) == 0:
self.widget.img_widget.mouse_click_item.hide()
else:
x_ind, y_ind = new_pos
self.widget.img_widget.set_mouse_click_position(y_ind + 0.5, x_ind + 0.5)
self.widget.img_widget.mouse_click_item.show()
def get_current_pattern_tth(self):
cur_pos = self.widget.pattern_widget.pos_line.getPos()[0]
if self.widget.pattern_q_btn.isChecked():
cur_tth = self.convert_x_value(cur_pos, 'q_A^-1', '2th_deg')
elif self.widget.pattern_tth_btn.isChecked():
cur_tth = cur_pos
elif self.widget.pattern_d_btn.isChecked():
cur_tth = self.convert_x_value(cur_pos, 'd_A', '2th_deg')
else:
cur_tth = None
return cur_tth
def update_cake_axes_range(self):
if self.model.current_configuration.auto_integrate_cake:
self.update_cake_azimuth_axis()
self.update_cake_x_axis()
def update_cake_azimuth_axis(self):
data_img_item = self.widget.integration_image_widget.cake_view.data_img_item
shift_amount = self.widget.cake_shift_azimuth_sl.value()
cake_azi = self.model.cake_azi - shift_amount * np.mean(np.diff(self.model.cake_azi))
height = data_img_item.viewRect().height()
bottom = data_img_item.viewRect().top()
v_scale = (cake_azi[-1] - cake_azi[0]) / data_img_item.boundingRect().height()
v_shift = np.min(cake_azi[0])
min_azi = v_scale * bottom + v_shift
max_azi = v_scale * (bottom + height) + v_shift
self.widget.integration_image_widget.cake_view.left_axis_cake.setRange(min_azi, max_azi)
def update_cake_x_axis(self):
if self.model.cake_tth is None:
return
data_img_item = self.widget.integration_image_widget.cake_view.data_img_item
cake_tth = self.model.cake_tth
width = data_img_item.viewRect().width()
left = data_img_item.viewRect().left()
h_scale = (np.max(cake_tth) - np.min(cake_tth)) / data_img_item.boundingRect().width()
h_shift = np.min(cake_tth)
min_tth = h_scale * left + h_shift
max_tth = h_scale * (left + width) + h_shift
if self.model.current_configuration.integration_unit == '2th_deg':
self.widget.integration_image_widget.cake_view.bottom_axis_cake.setRange(min_tth, max_tth)
elif self.model.current_configuration.integration_unit == 'q_A^-1':
self.widget.integration_image_widget.cake_view.bottom_axis_cake.setRange(
self.convert_x_value(min_tth, '2th_deg', 'q_A^-1'),
self.convert_x_value(max_tth, '2th_deg', 'q_A^-1'))
def set_cake_axis_unit(self, unit='2th_deg'):
if unit == '2th_deg':
self.widget.integration_image_widget.cake_view.bottom_axis_cake.setLabel(u'2θ', u'°')
elif unit == 'q_A^-1':
self.widget.integration_image_widget.cake_view.bottom_axis_cake.setLabel('Q', 'A<sup>-1</sup>')
self.update_cake_x_axis()
def show_img_mouse_position(self, x, y):
if self.widget.img_mode == 'Cake':
img_data = self.widget.cake_widget.img_data
else:
img_data = self.widget.img_widget.img_data
img_shape = img_data.shape
if 0 < x < img_shape[1] - 1 and 0 < y < img_shape[0] - 1:
self.update_mouse_position_labels(x, y, img_data[int(np.floor(y)), int(np.floor(x))])
if self.model.calibration_model.is_calibrated:
x_temp = x
x = np.array([y])
y = np.array([x_temp])
if self.widget.img_mode == 'Cake':
tth = get_partial_value(self.model.cake_tth, y - 0.5)
shift_amount = self.widget.cake_shift_azimuth_sl.value()
cake_azi = self.model.cake_azi - shift_amount * np.mean(np.diff(self.model.cake_azi))
azi = get_partial_value(cake_azi, x - 0.5)
q_value = self.convert_x_value(tth, '2th_deg', 'q_A^-1')
else:
tth = self.model.calibration_model.get_two_theta_img(x, y)
tth = tth / np.pi * 180.0
q_value = self.convert_x_value(tth, '2th_deg', 'q_A^-1')
azi = self.model.calibration_model.get_azi_img(x, y) / np.pi * 180
d = self.convert_x_value(tth, '2th_deg', 'd_A')
tth_str = u"2θ:%9.3f " % tth
self.widget.mouse_tth_lbl.setText(tth_str)
self.widget.mouse_d_lbl.setText('d:%9.3f ' % d)
self.widget.mouse_q_lbl.setText('Q:%9.3f ' % q_value)
self.widget.mouse_azi_lbl.setText('X:%9.3f ' % azi)
self.widget.img_widget_mouse_tth_lbl.setText(tth_str)
self.widget.img_widget_mouse_d_lbl.setText('d:%9.3f ' % d)
self.widget.img_widget_mouse_q_lbl.setText('Q:%9.3f ' % q_value)
self.widget.img_widget_mouse_azi_lbl.setText('X:%9.3f ' % azi)
else:
self.widget.mouse_tth_lbl.setText(u'2θ: -')
self.widget.mouse_d_lbl.setText('d: -')
self.widget.mouse_q_lbl.setText('Q: -')
self.widget.mouse_azi_lbl.setText('X: -')
self.widget.img_widget_mouse_tth_lbl.setText(u'2θ: -')
self.widget.img_widget_mouse_d_lbl.setText('d: -')
self.widget.img_widget_mouse_q_lbl.setText('Q: -')
self.widget.img_widget_mouse_azi_lbl.setText('X: -')
else:
self.update_mouse_position_labels(x, y, None)
def img_mouse_click(self, x, y):
if self.widget.img_mode == 'Cake':
img_data = self.widget.cake_widget.img_data
else:
img_data = self.widget.img_widget.img_data
if 0 < x < img_data.shape[1] - 1 and 0 < y < img_data.shape[0] - 1:
intensity = img_data[int(np.floor(y)), int(np.floor(x))]
else:
intensity = None
self.update_mouse_click_position_labels(x, y, intensity)
if self.model.calibration_model.is_calibrated:
x, y = y, x # the indices are reversed for the img_array
if self.widget.img_mode == 'Cake': # cake mode
# get clicked tth and azimuth
cake_shape = self.model.cake_data.shape
if x < 0 or y < 0 or x > cake_shape[0] - 1 or y > cake_shape[1] - 1:
return
x = np.array([x])
y = np.array([y])
tth = get_partial_value(self.model.cake_tth, y - 0.5)
shift_amount = self.widget.cake_shift_azimuth_sl.value()
azi = get_partial_value(np.roll(self.model.cake_azi, shift_amount), x - 0.5)
self.widget.cake_widget.activate_vertical_line()
elif self.widget.img_mode == 'Image': # image mode
img_shape = self.model.img_data.shape
if x < 0 or y < 0 or x > img_shape[0] - 1 or y > img_shape[1] - 1:
return
x = np.array([x])
y = np.array([y])
tth = np.rad2deg(self.model.calibration_model.get_two_theta_img(x, y))
azi = np.rad2deg(self.model.calibration_model.get_azi_img(x, y))
self.widget.img_widget.set_circle_line(self.model.calibration_model.get_two_theta_array(),
np.deg2rad(tth))
else: # in the case of whatever
tth = 0
azi = 0
self.clicked_tth = tth # in degree
self.clicked_azi = azi # in degree
if self.widget.img_mode == 'Cake':
self.plot_cake_integral()
# calculate right unit for the position line the pattern widget
if self.widget.pattern_q_btn.isChecked():
pos = 4 * np.pi * np.sin(np.deg2rad(tth) / 2) / self.model.calibration_model.wavelength / 1e10
elif self.widget.pattern_tth_btn.isChecked():
pos = tth
elif self.widget.pattern_d_btn.isChecked():
pos = self.model.calibration_model.wavelength / (2 * np.sin(np.deg2rad(tth) / 2)) * 1e10
else:
pos = 0
self.widget.pattern_widget.set_pos_line(pos)
self.widget.click_tth_lbl.setText(self.widget.mouse_tth_lbl.text())
self.widget.click_d_lbl.setText(self.widget.mouse_d_lbl.text())
self.widget.click_q_lbl.setText(self.widget.mouse_q_lbl.text())
self.widget.click_azi_lbl.setText(self.widget.mouse_azi_lbl.text())
self.widget.img_widget_click_tth_lbl.setText(self.widget.mouse_tth_lbl.text())
self.widget.img_widget_click_d_lbl.setText(self.widget.mouse_d_lbl.text())
self.widget.img_widget_click_q_lbl.setText(self.widget.mouse_q_lbl.text())
self.widget.img_widget_click_azi_lbl.setText(self.widget.mouse_azi_lbl.text())
def update_mouse_position_labels(self, x, y, intensity):
x_pos_string = 'X: %4d' % x
y_pos_string = 'Y: %4d' % y
if intensity is None:
int_string = 'I:'
else:
int_string = 'I: %5d' % intensity
self.widget.mouse_x_lbl.setText(x_pos_string)
self.widget.mouse_y_lbl.setText(y_pos_string)
self.widget.mouse_int_lbl.setText(int_string)
def update_mouse_click_position_labels(self, x, y, intensity):
self.update_mouse_position_labels(x, y, intensity)
self.widget.click_x_lbl.setText(self.widget.mouse_x_lbl.text())
self.widget.click_y_lbl.setText(self.widget.mouse_y_lbl.text())
self.widget.click_int_lbl.setText(self.widget.mouse_int_lbl.text())
def pattern_mouse_click(self, x, y):
if self.model.calibration_model.is_calibrated:
if self.widget.img_mode == 'Cake':
self.set_cake_line_position(x)
elif self.widget.img_mode == 'Image':
self.set_image_line_position(x)
def set_cake_line_position(self, x):
x = self._convert_to_tth(x)
upper_ind = np.where(self.model.cake_tth > x)[0]
lower_ind = np.where(self.model.cake_tth < x)[0]
if len(upper_ind) == 0 or len(lower_ind) == 0:
self.widget.cake_widget.plot_cake_integral(np.array([]), np.array([]))
self.widget.cake_widget.deactivate_vertical_line()
return
spacing = self.model.cake_tth[upper_ind[0]] - self.model.cake_tth[lower_ind[-1]]
new_pos = lower_ind[-1] + (x - self.model.cake_tth[lower_ind[-1]]) / spacing + 0.5
self.widget.cake_widget.vertical_line.setValue(new_pos)
self.widget.cake_widget.activate_vertical_line()
self.plot_cake_integral(x)
def set_image_line_position(self, x):
x = self._convert_to_tth(x)
self.widget.img_widget.set_circle_line(
self.model.calibration_model.get_two_theta_array(), np.deg2rad(x))
def _convert_to_tth(self, x):
if self.model.integration_unit == 'q_A^-1':
return self.convert_x_value(x, 'q_A^-1', '2th_deg')
elif self.model.integration_unit == 'd_A':
return self.convert_x_value(x, 'd_A', '2th_deg')
return x
def set_iteration_mode_number(self):
self.model.img_model.set_file_iteration_mode('number')
def set_iteration_mode_time(self):
self.model.img_model.set_file_iteration_mode('time')
def select_source(self, source):
self.model.img_model.select_source(source)
def convert_x_value(self, value, previous_unit, new_unit):
wavelength = self.model.calibration_model.wavelength
if previous_unit == '2th_deg':
tth = value
elif previous_unit == 'q_A^-1':
tth = np.arcsin(
value * 1e10 * wavelength / (4 * np.pi)) * 360 / np.pi
elif previous_unit == 'd_A':
tth = 2 * np.arcsin(wavelength / (2 * value * 1e-10)) * 180 / np.pi
else:
tth = 0
if new_unit == '2th_deg':
res = tth
elif new_unit == 'q_A^-1':
res = 4 * np.pi * \
np.sin(tth / 360 * np.pi) / \
wavelength / 1e10
elif new_unit == 'd_A':
res = wavelength / (2 * np.sin(tth / 360 * np.pi)) * 1e10
else:
res = 0
return res
def load_calibration(self):
filename = open_file_dialog(
self.widget, "Load calibration...",
self.model.working_directories['calibration'],
'*.poni')
if filename != '':
self.model.working_directories['calibration'] = os.path.dirname(filename)
self.model.calibration_model.load(filename)
self.widget.calibration_lbl.setText(
self.model.calibration_model.calibration_name)
self.model.img_model.img_changed.emit()
def auto_process_cb_click(self):
self.model.img_model.autoprocess = self.widget.autoprocess_cb.isChecked()
def save_img(self, filename=None):
if not filename:
img_filename = os.path.splitext(os.path.basename(self.model.img_model.filename))[0]
filename = save_file_dialog(self.widget, "Save Image.",
os.path.join(self.model.working_directories['image'],
img_filename + '.png'),
('Image (*.png);;Data (*.tiff);;Text (*.txt)'))
if filename != '':
if filename.endswith('.png'):
if self.widget.img_mode == 'Cake':
self.widget.cake_widget.deactivate_vertical_line()
self.widget.cake_widget.deactivate_mouse_click_item()
QtWidgets.QApplication.processEvents()
self.widget.cake_widget.save_img(filename)
self.widget.cake_widget.activate_vertical_line()
self.widget.cake_widget.activate_mouse_click_item()
elif self.widget.img_mode == 'Image':
self.widget.img_widget.deactivate_circle_scatter()
self.widget.img_widget.deactivate_roi()
QtWidgets.QApplication.processEvents()
self.widget.img_widget.save_img(filename)
self.widget.img_widget.activate_circle_scatter()
if self.roi_active:
self.widget.img_widget.activate_roi()
elif filename.endswith('.tiff') or filename.endswith('.tif'):
if self.widget.img_mode == 'Image':
im_array = np.int32(self.model.img_data)
elif self.widget.img_mode == 'Cake':
im_array = np.int32(self.model.cake_data)
im_array = np.flipud(im_array)
im = Image.fromarray(im_array)
im.save(filename)
elif filename.endswith('.txt') or filename.endswith('.csv'):
if self.widget.img_mode == 'Image':
return
elif self.widget.img_mode == 'Cake': # saving cake data as a text file for export.
with open(filename, 'w') as out_file: # this is done in an odd and slow way because the headers
# should be floats and the data itself int.
cake_tth = np.insert(self.model.cake_tth, 0, 0)
np.savetxt(out_file, cake_tth[None], fmt='%6.3f')
for azi, row in zip(self.model.cake_azi, self.model.cake_data):
row_str = " ".join(["{:6.0f}".format(el) for el in row])
out_file.write("{:6.2f}".format(azi) + row_str + '\n')
def update_gui_from_configuration(self):
self.widget.img_mask_btn.setChecked(int(self.model.use_mask))
self.widget.mask_transparent_cb.setChecked(bool(self.model.transparent_mask))
self.widget.autoprocess_cb.setChecked(bool(self.model.img_model.autoprocess))
self.widget.calibration_lbl.setText(self.model.calibration_model.calibration_name)
self.update_img_control_widget()
self.update_mask_mode()
self.update_roi_in_gui()
if self.model.current_configuration.auto_integrate_cake and self.widget.img_mode == 'Image':
self.activate_cake_mode()
elif not self.model.current_configuration.auto_integrate_cake and self.widget.img_mode == 'Cake':
self.activate_image_mode()
elif self.model.current_configuration.auto_integrate_cake and self.widget.img_mode == 'Cake':
self._update_cake_line_pos()
self._update_cake_mouse_click_pos()
elif not self.model.current_configuration.auto_integrate_cake and self.widget.img_mode == 'Image':
self._update_image_line_pos()
self._update_image_mouse_click_pos()
def change_view_btn_clicked(self):
if self.view_mode == 'alternative':
self.change_view_to_normal()
elif self.view_mode == 'normal':
self.change_view_to_alternative()
def change_view_to_normal(self):
if self.view_mode == 'normal':
return
self.vertical_splitter_alternative_state = self.widget.vertical_splitter.saveState()
self.horizontal_splitter_alternative_state = self.widget.horizontal_splitter.saveState()
self.widget.vertical_splitter.addWidget(self.widget.integration_pattern_widget)
self.widget.integration_control_widget.setOrientation(QtCore.Qt.Horizontal)
if self.vertical_splitter_normal_state:
self.widget.vertical_splitter.restoreState(self.vertical_splitter_normal_state)
if self.horizontal_splitter_normal_state:
self.widget.horizontal_splitter.restoreState(self.horizontal_splitter_normal_state)
self.widget.img_widget.set_orientation("horizontal")
self.view_mode = 'normal'
def change_view_to_alternative(self):
if self.view_mode == 'alternative':
return
self.vertical_splitter_normal_state = self.widget.vertical_splitter.saveState()
self.horizontal_splitter_normal_state = self.widget.horizontal_splitter.saveState()
self.widget.vertical_splitter_left.insertWidget(0, self.widget.integration_pattern_widget)
self.widget.integration_control_widget.setOrientation(QtCore.Qt.Vertical)
if self.vertical_splitter_alternative_state:
self.widget.vertical_splitter.restoreState(self.vertical_splitter_alternative_state)
if self.horizontal_splitter_alternative_state:
self.widget.horizontal_splitter.restoreState(self.horizontal_splitter_alternative_state)
self.widget.img_widget.set_orientation("vertical")
self.view_mode = 'alternative'
| gpl-3.0 | 661,299,400,228,834,300 | 44.973223 | 120 | 0.619655 | false |
DJMuggs/ansible-modules-extras | notification/campfire.py | 8 | 4899 | #!/usr/bin/python
# -*- coding: utf-8 -*-
DOCUMENTATION = '''
---
module: campfire
version_added: "1.2"
short_description: Send a message to Campfire
description:
- Send a message to Campfire.
- Messages with newlines will result in a "Paste" message being sent.
version_added: "1.2"
options:
subscription:
description:
- The subscription name to use.
required: true
token:
description:
- API token.
required: true
room:
description:
- Room number to which the message should be sent.
required: true
msg:
description:
- The message body.
required: true
notify:
description:
- Send a notification sound before the message.
required: false
choices: ["56k", "bell", "bezos", "bueller", "clowntown",
"cottoneyejoe", "crickets", "dadgummit", "dangerzone",
"danielsan", "deeper", "drama", "greatjob", "greyjoy",
"guarantee", "heygirl", "horn", "horror",
"inconceivable", "live", "loggins", "makeitso", "noooo",
"nyan", "ohmy", "ohyeah", "pushit", "rimshot",
"rollout", "rumble", "sax", "secret", "sexyback",
"story", "tada", "tmyk", "trololo", "trombone", "unix",
"vuvuzela", "what", "whoomp", "yeah", "yodel"]
# informational: requirements for nodes
requirements: [ urllib2, cgi ]
author: '"Adam Garside (@fabulops)" <[email protected]>'
'''
EXAMPLES = '''
- campfire: subscription=foo token=12345 room=123 msg="Task completed."
- campfire: subscription=foo token=12345 room=123 notify=loggins
msg="Task completed ... with feeling."
'''
def main():
try:
import urllib2
except ImportError:
module.fail_json(msg="urllib2 is required")
try:
import cgi
except ImportError:
module.fail_json(msg="cgi is required")
module = AnsibleModule(
argument_spec=dict(
subscription=dict(required=True),
token=dict(required=True),
room=dict(required=True),
msg=dict(required=True),
notify=dict(required=False,
choices=["56k", "bell", "bezos", "bueller",
"clowntown", "cottoneyejoe",
"crickets", "dadgummit", "dangerzone",
"danielsan", "deeper", "drama",
"greatjob", "greyjoy", "guarantee",
"heygirl", "horn", "horror",
"inconceivable", "live", "loggins",
"makeitso", "noooo", "nyan", "ohmy",
"ohyeah", "pushit", "rimshot",
"rollout", "rumble", "sax", "secret",
"sexyback", "story", "tada", "tmyk",
"trololo", "trombone", "unix",
"vuvuzela", "what", "whoomp", "yeah",
"yodel"]),
),
supports_check_mode=False
)
subscription = module.params["subscription"]
token = module.params["token"]
room = module.params["room"]
msg = module.params["msg"]
notify = module.params["notify"]
URI = "https://%s.campfirenow.com" % subscription
NSTR = "<message><type>SoundMessage</type><body>%s</body></message>"
MSTR = "<message><body>%s</body></message>"
AGENT = "Ansible/1.2"
try:
# Setup basic auth using token as the username
pm = urllib2.HTTPPasswordMgrWithDefaultRealm()
pm.add_password(None, URI, token, 'X')
# Setup Handler and define the opener for the request
handler = urllib2.HTTPBasicAuthHandler(pm)
opener = urllib2.build_opener(handler)
target_url = '%s/room/%s/speak.xml' % (URI, room)
# Send some audible notification if requested
if notify:
req = urllib2.Request(target_url, NSTR % cgi.escape(notify))
req.add_header('Content-Type', 'application/xml')
req.add_header('User-agent', AGENT)
response = opener.open(req)
# Send the message
req = urllib2.Request(target_url, MSTR % cgi.escape(msg))
req.add_header('Content-Type', 'application/xml')
req.add_header('User-agent', AGENT)
response = opener.open(req)
except urllib2.HTTPError, e:
if not (200 <= e.code < 300):
module.fail_json(msg="unable to send msg: '%s', campfire api"
" returned error code: '%s'" %
(msg, e.code))
except Exception, e:
module.fail_json(msg="unable to send msg: %s" % msg)
module.exit_json(changed=True, room=room, msg=msg, notify=notify)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 | 1,198,253,725,349,094,700 | 33.258741 | 73 | 0.545213 | false |
markwal/OctoPrint-ScreenSquish | setup.py | 1 | 1331 | # coding=utf-8
########################################################################################################################
plugin_identifier = "ScreenSquish"
plugin_package = "octoprint_%s" % plugin_identifier
plugin_name = "OctoPrint-ScreenSquish"
plugin_version = "0.4"
plugin_description = "Scalable UI that does some old fashioned (2.3) bootstrap responsive and some collapse etc."
plugin_author = "Mark Walker"
plugin_author_email = "[email protected]"
plugin_url = "https://github.com/markwal/OctoPrint-ScreenSquish"
plugin_license = "AGPLv3"
plugin_requires = []
plugin_additional_data = []
########################################################################################################################
from setuptools import setup
try:
import octoprint_setuptools
except:
print("Could not import OctoPrint's setuptools, are you sure you are running that under "
"the same python installation that OctoPrint is installed under?")
import sys
sys.exit(-1)
setup(**octoprint_setuptools.create_plugin_setup_parameters(
identifier=plugin_identifier,
name=plugin_name,
version=plugin_version,
description=plugin_description,
author=plugin_author,
mail=plugin_author_email,
url=plugin_url,
license=plugin_license,
requires=plugin_requires,
additional_data=plugin_additional_data
))
| agpl-3.0 | -7,360,801,327,034,253,000 | 34.026316 | 120 | 0.631104 | false |
djenniex/CouchPotatoServer | libs/pyasn1/type/univ.py | 74 | 44619 | # ASN.1 "universal" data types
import operator, sys, math
from pyasn1.type import base, tag, constraint, namedtype, namedval, tagmap
from pyasn1.codec.ber import eoo
from pyasn1.compat import octets
from pyasn1 import error
# "Simple" ASN.1 types (yet incomplete)
class Integer(base.AbstractSimpleAsn1Item):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x02)
)
namedValues = namedval.NamedValues()
def __init__(self, value=None, tagSet=None, subtypeSpec=None,
namedValues=None):
if namedValues is None:
self.__namedValues = self.namedValues
else:
self.__namedValues = namedValues
base.AbstractSimpleAsn1Item.__init__(
self, value, tagSet, subtypeSpec
)
def __repr__(self):
if self.__namedValues is not self.namedValues:
return '%s, %r)' % (base.AbstractSimpleAsn1Item.__repr__(self)[:-1], self.__namedValues)
else:
return base.AbstractSimpleAsn1Item.__repr__(self)
def __and__(self, value): return self.clone(self._value & value)
def __rand__(self, value): return self.clone(value & self._value)
def __or__(self, value): return self.clone(self._value | value)
def __ror__(self, value): return self.clone(value | self._value)
def __xor__(self, value): return self.clone(self._value ^ value)
def __rxor__(self, value): return self.clone(value ^ self._value)
def __lshift__(self, value): return self.clone(self._value << value)
def __rshift__(self, value): return self.clone(self._value >> value)
def __add__(self, value): return self.clone(self._value + value)
def __radd__(self, value): return self.clone(value + self._value)
def __sub__(self, value): return self.clone(self._value - value)
def __rsub__(self, value): return self.clone(value - self._value)
def __mul__(self, value): return self.clone(self._value * value)
def __rmul__(self, value): return self.clone(value * self._value)
def __mod__(self, value): return self.clone(self._value % value)
def __rmod__(self, value): return self.clone(value % self._value)
def __pow__(self, value, modulo=None): return self.clone(pow(self._value, value, modulo))
def __rpow__(self, value): return self.clone(pow(value, self._value))
if sys.version_info[0] <= 2:
def __div__(self, value): return self.clone(self._value // value)
def __rdiv__(self, value): return self.clone(value // self._value)
else:
def __truediv__(self, value): return self.clone(self._value / value)
def __rtruediv__(self, value): return self.clone(value / self._value)
def __divmod__(self, value): return self.clone(self._value // value)
def __rdivmod__(self, value): return self.clone(value // self._value)
__hash__ = base.AbstractSimpleAsn1Item.__hash__
def __int__(self): return int(self._value)
if sys.version_info[0] <= 2:
def __long__(self): return long(self._value)
def __float__(self): return float(self._value)
def __abs__(self): return self.clone(abs(self._value))
def __index__(self): return int(self._value)
def __pos__(self): return self.clone(+self._value)
def __neg__(self): return self.clone(-self._value)
def __invert__(self): return self.clone(~self._value)
def __round__(self, n=0):
r = round(self._value, n)
if n:
return self.clone(r)
else:
return r
def __floor__(self): return math.floor(self._value)
def __ceil__(self): return math.ceil(self._value)
if sys.version_info[0:2] > (2, 5):
def __trunc__(self): return self.clone(math.trunc(self._value))
def __lt__(self, value): return self._value < value
def __le__(self, value): return self._value <= value
def __eq__(self, value): return self._value == value
def __ne__(self, value): return self._value != value
def __gt__(self, value): return self._value > value
def __ge__(self, value): return self._value >= value
def prettyIn(self, value):
if not isinstance(value, str):
try:
return int(value)
except:
raise error.PyAsn1Error(
'Can\'t coerce %r into integer: %s' % (value, sys.exc_info()[1])
)
r = self.__namedValues.getValue(value)
if r is not None:
return r
try:
return int(value)
except:
raise error.PyAsn1Error(
'Can\'t coerce %r into integer: %s' % (value, sys.exc_info()[1])
)
def prettyOut(self, value):
r = self.__namedValues.getName(value)
return r is None and str(value) or repr(r)
def getNamedValues(self): return self.__namedValues
def clone(self, value=None, tagSet=None, subtypeSpec=None,
namedValues=None):
if value is None and tagSet is None and subtypeSpec is None \
and namedValues is None:
return self
if value is None:
value = self._value
if tagSet is None:
tagSet = self._tagSet
if subtypeSpec is None:
subtypeSpec = self._subtypeSpec
if namedValues is None:
namedValues = self.__namedValues
return self.__class__(value, tagSet, subtypeSpec, namedValues)
def subtype(self, value=None, implicitTag=None, explicitTag=None,
subtypeSpec=None, namedValues=None):
if value is None:
value = self._value
if implicitTag is not None:
tagSet = self._tagSet.tagImplicitly(implicitTag)
elif explicitTag is not None:
tagSet = self._tagSet.tagExplicitly(explicitTag)
else:
tagSet = self._tagSet
if subtypeSpec is None:
subtypeSpec = self._subtypeSpec
else:
subtypeSpec = subtypeSpec + self._subtypeSpec
if namedValues is None:
namedValues = self.__namedValues
else:
namedValues = namedValues + self.__namedValues
return self.__class__(value, tagSet, subtypeSpec, namedValues)
class Boolean(Integer):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x01),
)
subtypeSpec = Integer.subtypeSpec+constraint.SingleValueConstraint(0,1)
namedValues = Integer.namedValues.clone(('False', 0), ('True', 1))
class BitString(base.AbstractSimpleAsn1Item):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x03)
)
namedValues = namedval.NamedValues()
def __init__(self, value=None, tagSet=None, subtypeSpec=None,
namedValues=None):
if namedValues is None:
self.__namedValues = self.namedValues
else:
self.__namedValues = namedValues
base.AbstractSimpleAsn1Item.__init__(
self, value, tagSet, subtypeSpec
)
def clone(self, value=None, tagSet=None, subtypeSpec=None,
namedValues=None):
if value is None and tagSet is None and subtypeSpec is None \
and namedValues is None:
return self
if value is None:
value = self._value
if tagSet is None:
tagSet = self._tagSet
if subtypeSpec is None:
subtypeSpec = self._subtypeSpec
if namedValues is None:
namedValues = self.__namedValues
return self.__class__(value, tagSet, subtypeSpec, namedValues)
def subtype(self, value=None, implicitTag=None, explicitTag=None,
subtypeSpec=None, namedValues=None):
if value is None:
value = self._value
if implicitTag is not None:
tagSet = self._tagSet.tagImplicitly(implicitTag)
elif explicitTag is not None:
tagSet = self._tagSet.tagExplicitly(explicitTag)
else:
tagSet = self._tagSet
if subtypeSpec is None:
subtypeSpec = self._subtypeSpec
else:
subtypeSpec = subtypeSpec + self._subtypeSpec
if namedValues is None:
namedValues = self.__namedValues
else:
namedValues = namedValues + self.__namedValues
return self.__class__(value, tagSet, subtypeSpec, namedValues)
def __str__(self): return str(tuple(self))
# Immutable sequence object protocol
def __len__(self):
if self._len is None:
self._len = len(self._value)
return self._len
def __getitem__(self, i):
if isinstance(i, slice):
return self.clone(operator.getitem(self._value, i))
else:
return self._value[i]
def __add__(self, value): return self.clone(self._value + value)
def __radd__(self, value): return self.clone(value + self._value)
def __mul__(self, value): return self.clone(self._value * value)
def __rmul__(self, value): return self * value
def prettyIn(self, value):
r = []
if not value:
return ()
elif isinstance(value, str):
if value[0] == '\'':
if value[-2:] == '\'B':
for v in value[1:-2]:
if v == '0':
r.append(0)
elif v == '1':
r.append(1)
else:
raise error.PyAsn1Error(
'Non-binary BIT STRING initializer %s' % (v,)
)
return tuple(r)
elif value[-2:] == '\'H':
for v in value[1:-2]:
i = 4
v = int(v, 16)
while i:
i = i - 1
r.append((v>>i)&0x01)
return tuple(r)
else:
raise error.PyAsn1Error(
'Bad BIT STRING value notation %s' % (value,)
)
else:
for i in value.split(','):
j = self.__namedValues.getValue(i)
if j is None:
raise error.PyAsn1Error(
'Unknown bit identifier \'%s\'' % (i,)
)
if j >= len(r):
r.extend([0]*(j-len(r)+1))
r[j] = 1
return tuple(r)
elif isinstance(value, (tuple, list)):
r = tuple(value)
for b in r:
if b and b != 1:
raise error.PyAsn1Error(
'Non-binary BitString initializer \'%s\'' % (r,)
)
return r
elif isinstance(value, BitString):
return tuple(value)
else:
raise error.PyAsn1Error(
'Bad BitString initializer type \'%s\'' % (value,)
)
def prettyOut(self, value):
return '\"\'%s\'B\"' % ''.join([str(x) for x in value])
try:
all
except NameError: # Python 2.4
def all(iterable):
for element in iterable:
if not element:
return False
return True
class OctetString(base.AbstractSimpleAsn1Item):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x04)
)
defaultBinValue = defaultHexValue = base.noValue
encoding = 'us-ascii'
def __init__(self, value=None, tagSet=None, subtypeSpec=None,
encoding=None, binValue=None, hexValue=None):
if encoding is None:
self._encoding = self.encoding
else:
self._encoding = encoding
if binValue is not None:
value = self.fromBinaryString(binValue)
if hexValue is not None:
value = self.fromHexString(hexValue)
if value is None or value is base.noValue:
value = self.defaultHexValue
if value is None or value is base.noValue:
value = self.defaultBinValue
self.__asNumbersCache = None
base.AbstractSimpleAsn1Item.__init__(self, value, tagSet, subtypeSpec)
def clone(self, value=None, tagSet=None, subtypeSpec=None,
encoding=None, binValue=None, hexValue=None):
if value is None and tagSet is None and subtypeSpec is None and \
encoding is None and binValue is None and hexValue is None:
return self
if value is None and binValue is None and hexValue is None:
value = self._value
if tagSet is None:
tagSet = self._tagSet
if subtypeSpec is None:
subtypeSpec = self._subtypeSpec
if encoding is None:
encoding = self._encoding
return self.__class__(
value, tagSet, subtypeSpec, encoding, binValue, hexValue
)
if sys.version_info[0] <= 2:
def prettyIn(self, value):
if isinstance(value, str):
return value
elif isinstance(value, unicode):
try:
return value.encode(self._encoding)
except (LookupError, UnicodeEncodeError):
raise error.PyAsn1Error(
'Can\'t encode string \'%s\' with \'%s\' codec' % (value, self._encoding)
)
elif isinstance(value, (tuple, list)):
try:
return ''.join([ chr(x) for x in value ])
except ValueError:
raise error.PyAsn1Error(
'Bad OctetString initializer \'%s\'' % (value,)
)
else:
return str(value)
else:
def prettyIn(self, value):
if isinstance(value, bytes):
return value
elif isinstance(value, str):
try:
return value.encode(self._encoding)
except UnicodeEncodeError:
raise error.PyAsn1Error(
'Can\'t encode string \'%s\' with \'%s\' codec' % (value, self._encoding)
)
elif isinstance(value, OctetString):
return value.asOctets()
elif isinstance(value, (tuple, list, map)):
try:
return bytes(value)
except ValueError:
raise error.PyAsn1Error(
'Bad OctetString initializer \'%s\'' % (value,)
)
else:
try:
return str(value).encode(self._encoding)
except UnicodeEncodeError:
raise error.PyAsn1Error(
'Can\'t encode string \'%s\' with \'%s\' codec' % (value, self._encoding)
)
def fromBinaryString(self, value):
bitNo = 8; byte = 0; r = ()
for v in value:
if bitNo:
bitNo = bitNo - 1
else:
bitNo = 7
r = r + (byte,)
byte = 0
if v == '0':
v = 0
elif v == '1':
v = 1
else:
raise error.PyAsn1Error(
'Non-binary OCTET STRING initializer %s' % (v,)
)
byte = byte | (v << bitNo)
return octets.ints2octs(r + (byte,))
def fromHexString(self, value):
r = p = ()
for v in value:
if p:
r = r + (int(p+v, 16),)
p = ()
else:
p = v
if p:
r = r + (int(p+'0', 16),)
return octets.ints2octs(r)
def prettyOut(self, value):
if sys.version_info[0] <= 2:
numbers = tuple(( ord(x) for x in value ))
else:
numbers = tuple(value)
if all(x >= 32 and x <= 126 for x in numbers):
return str(value)
else:
return '0x' + ''.join(( '%.2x' % x for x in numbers ))
def __repr__(self):
r = []
doHex = False
if self._value is not self.defaultValue:
for x in self.asNumbers():
if x < 32 or x > 126:
doHex = True
break
if not doHex:
r.append('%r' % (self._value,))
if self._tagSet is not self.tagSet:
r.append('tagSet=%r' % (self._tagSet,))
if self._subtypeSpec is not self.subtypeSpec:
r.append('subtypeSpec=%r' % (self._subtypeSpec,))
if self.encoding is not self._encoding:
r.append('encoding=%r' % (self._encoding,))
if doHex:
r.append('hexValue=%r' % ''.join([ '%.2x' % x for x in self.asNumbers() ]))
return '%s(%s)' % (self.__class__.__name__, ', '.join(r))
if sys.version_info[0] <= 2:
def __str__(self): return str(self._value)
def __unicode__(self):
return self._value.decode(self._encoding, 'ignore')
def asOctets(self): return self._value
def asNumbers(self):
if self.__asNumbersCache is None:
self.__asNumbersCache = tuple([ ord(x) for x in self._value ])
return self.__asNumbersCache
else:
def __str__(self): return self._value.decode(self._encoding, 'ignore')
def __bytes__(self): return self._value
def asOctets(self): return self._value
def asNumbers(self):
if self.__asNumbersCache is None:
self.__asNumbersCache = tuple(self._value)
return self.__asNumbersCache
# Immutable sequence object protocol
def __len__(self):
if self._len is None:
self._len = len(self._value)
return self._len
def __getitem__(self, i):
if isinstance(i, slice):
return self.clone(operator.getitem(self._value, i))
else:
return self._value[i]
def __add__(self, value): return self.clone(self._value + self.prettyIn(value))
def __radd__(self, value): return self.clone(self.prettyIn(value) + self._value)
def __mul__(self, value): return self.clone(self._value * value)
def __rmul__(self, value): return self * value
def __int__(self): return int(self._value)
def __float__(self): return float(self._value)
class Null(OctetString):
defaultValue = ''.encode() # This is tightly constrained
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x05)
)
subtypeSpec = OctetString.subtypeSpec+constraint.SingleValueConstraint(''.encode())
if sys.version_info[0] <= 2:
intTypes = (int, long)
else:
intTypes = (int,)
numericTypes = intTypes + (float,)
class ObjectIdentifier(base.AbstractSimpleAsn1Item):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x06)
)
def __add__(self, other): return self.clone(self._value + other)
def __radd__(self, other): return self.clone(other + self._value)
def asTuple(self): return self._value
# Sequence object protocol
def __len__(self):
if self._len is None:
self._len = len(self._value)
return self._len
def __getitem__(self, i):
if isinstance(i, slice):
return self.clone(
operator.getitem(self._value, i)
)
else:
return self._value[i]
def __str__(self): return self.prettyPrint()
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.prettyPrint())
def index(self, suboid): return self._value.index(suboid)
def isPrefixOf(self, value):
"""Returns true if argument OID resides deeper in the OID tree"""
l = len(self)
if l <= len(value):
if self._value[:l] == value[:l]:
return 1
return 0
def prettyIn(self, value):
"""Dotted -> tuple of numerics OID converter"""
if isinstance(value, tuple):
pass
elif isinstance(value, ObjectIdentifier):
return tuple(value)
elif isinstance(value, str):
r = []
for element in [ x for x in value.split('.') if x != '' ]:
try:
r.append(int(element, 0))
except ValueError:
raise error.PyAsn1Error(
'Malformed Object ID %s at %s: %s' %
(str(value), self.__class__.__name__, sys.exc_info()[1])
)
value = tuple(r)
else:
try:
value = tuple(value)
except TypeError:
raise error.PyAsn1Error(
'Malformed Object ID %s at %s: %s' %
(str(value), self.__class__.__name__,sys.exc_info()[1])
)
for x in value:
if not isinstance(x, intTypes) or x < 0:
raise error.PyAsn1Error(
'Invalid sub-ID in %s at %s' % (value, self.__class__.__name__)
)
return value
def prettyOut(self, value): return '.'.join([ str(x) for x in value ])
class Real(base.AbstractSimpleAsn1Item):
binEncBase = None # binEncBase = 16 is recommended for large numbers
try:
_plusInf = float('inf')
_minusInf = float('-inf')
_inf = (_plusInf, _minusInf)
except ValueError:
# Infinity support is platform and Python dependent
_plusInf = _minusInf = None
_inf = ()
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x09)
)
def __normalizeBase10(self, value):
m, b, e = value
while m and m % 10 == 0:
m = m / 10
e = e + 1
return m, b, e
def prettyIn(self, value):
if isinstance(value, tuple) and len(value) == 3:
if not isinstance(value[0], numericTypes) or \
not isinstance(value[1], intTypes) or \
not isinstance(value[2], intTypes):
raise error.PyAsn1Error('Lame Real value syntax: %s' % (value,))
if isinstance(value[0], float) and \
self._inf and value[0] in self._inf:
return value[0]
if value[1] not in (2, 10):
raise error.PyAsn1Error(
'Prohibited base for Real value: %s' % (value[1],)
)
if value[1] == 10:
value = self.__normalizeBase10(value)
return value
elif isinstance(value, intTypes):
return self.__normalizeBase10((value, 10, 0))
elif isinstance(value, (str, float)):
if isinstance(value, str):
try:
value = float(value)
except ValueError:
raise error.PyAsn1Error(
'Bad real value syntax: %s' % (value,)
)
if self._inf and value in self._inf:
return value
else:
e = 0
while int(value) != value:
value = value * 10
e = e - 1
return self.__normalizeBase10((int(value), 10, e))
elif isinstance(value, Real):
return tuple(value)
raise error.PyAsn1Error(
'Bad real value syntax: %s' % (value,)
)
def prettyOut(self, value):
if value in self._inf:
return '\'%s\'' % value
else:
return str(value)
def prettyPrint(self, scope=0):
if self.isInfinity():
return self.prettyOut(self._value)
else:
return str(float(self))
def isPlusInfinity(self): return self._value == self._plusInf
def isMinusInfinity(self): return self._value == self._minusInf
def isInfinity(self): return self._value in self._inf
def __str__(self): return str(float(self))
def __add__(self, value): return self.clone(float(self) + value)
def __radd__(self, value): return self + value
def __mul__(self, value): return self.clone(float(self) * value)
def __rmul__(self, value): return self * value
def __sub__(self, value): return self.clone(float(self) - value)
def __rsub__(self, value): return self.clone(value - float(self))
def __mod__(self, value): return self.clone(float(self) % value)
def __rmod__(self, value): return self.clone(value % float(self))
def __pow__(self, value, modulo=None): return self.clone(pow(float(self), value, modulo))
def __rpow__(self, value): return self.clone(pow(value, float(self)))
if sys.version_info[0] <= 2:
def __div__(self, value): return self.clone(float(self) / value)
def __rdiv__(self, value): return self.clone(value / float(self))
else:
def __truediv__(self, value): return self.clone(float(self) / value)
def __rtruediv__(self, value): return self.clone(value / float(self))
def __divmod__(self, value): return self.clone(float(self) // value)
def __rdivmod__(self, value): return self.clone(value // float(self))
def __int__(self): return int(float(self))
if sys.version_info[0] <= 2:
def __long__(self): return long(float(self))
def __float__(self):
if self._value in self._inf:
return self._value
else:
return float(
self._value[0] * pow(self._value[1], self._value[2])
)
def __abs__(self): return self.clone(abs(float(self)))
def __pos__(self): return self.clone(+float(self))
def __neg__(self): return self.clone(-float(self))
def __round__(self, n=0):
r = round(float(self), n)
if n:
return self.clone(r)
else:
return r
def __floor__(self): return self.clone(math.floor(float(self)))
def __ceil__(self): return self.clone(math.ceil(float(self)))
if sys.version_info[0:2] > (2, 5):
def __trunc__(self): return self.clone(math.trunc(float(self)))
def __lt__(self, value): return float(self) < value
def __le__(self, value): return float(self) <= value
def __eq__(self, value): return float(self) == value
def __ne__(self, value): return float(self) != value
def __gt__(self, value): return float(self) > value
def __ge__(self, value): return float(self) >= value
if sys.version_info[0] <= 2:
def __nonzero__(self): return bool(float(self))
else:
def __bool__(self): return bool(float(self))
__hash__ = base.AbstractSimpleAsn1Item.__hash__
def __getitem__(self, idx):
if self._value in self._inf:
raise error.PyAsn1Error('Invalid infinite value operation')
else:
return self._value[idx]
class Enumerated(Integer):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x0A)
)
# "Structured" ASN.1 types
class SetOf(base.AbstractConstructedAsn1Item):
componentType = None
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatConstructed, 0x11)
)
typeId = 1
strictConstraints = False
def _cloneComponentValues(self, myClone, cloneValueFlag):
idx = 0; l = len(self._componentValues)
while idx < l:
c = self._componentValues[idx]
if c is not None:
if isinstance(c, base.AbstractConstructedAsn1Item):
myClone.setComponentByPosition(
idx, c.clone(cloneValueFlag=cloneValueFlag)
)
else:
myClone.setComponentByPosition(idx, c.clone())
idx = idx + 1
def _verifyComponent(self, idx, value):
t = self._componentType
if t is None:
return
if not t.isSameTypeWith(value,matchConstraints=self.strictConstraints):
raise error.PyAsn1Error('Component value is tag-incompatible: %r vs %r' % (value, t))
if self.strictConstraints and \
not t.isSuperTypeOf(value, matchTags=False):
raise error.PyAsn1Error('Component value is constraints-incompatible: %r vs %r' % (value, t))
def getComponentByPosition(self, idx): return self._componentValues[idx]
def setComponentByPosition(self, idx, value=None, verifyConstraints=True):
l = len(self._componentValues)
if idx >= l:
self._componentValues = self._componentValues + (idx-l+1)*[None]
if value is None:
if self._componentValues[idx] is None:
if self._componentType is None:
raise error.PyAsn1Error('Component type not defined')
self._componentValues[idx] = self._componentType.clone()
self._componentValuesSet = self._componentValuesSet + 1
return self
elif not isinstance(value, base.Asn1Item):
if self._componentType is None:
raise error.PyAsn1Error('Component type not defined')
if isinstance(self._componentType, base.AbstractSimpleAsn1Item):
value = self._componentType.clone(value=value)
else:
raise error.PyAsn1Error('Instance value required')
if verifyConstraints:
if self._componentType is not None:
self._verifyComponent(idx, value)
self._verifySubtypeSpec(value, idx)
if self._componentValues[idx] is None:
self._componentValuesSet = self._componentValuesSet + 1
self._componentValues[idx] = value
return self
def getComponentTagMap(self):
if self._componentType is not None:
return self._componentType.getTagMap()
def prettyPrint(self, scope=0):
scope = scope + 1
r = self.__class__.__name__ + ':\n'
for idx in range(len(self._componentValues)):
r = r + ' '*scope
if self._componentValues[idx] is None:
r = r + '<empty>'
else:
r = r + self._componentValues[idx].prettyPrint(scope)
return r
def prettyPrintType(self, scope=0):
scope = scope + 1
r = '%s -> %s {\n' % (self.getTagSet(), self.__class__.__name__)
if self._componentType is not None:
r = r + ' '*scope
r = r + self._componentType.prettyPrintType(scope)
return r + '\n' + ' '*(scope-1) + '}'
class SequenceOf(SetOf):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatConstructed, 0x10)
)
typeId = 2
class SequenceAndSetBase(base.AbstractConstructedAsn1Item):
componentType = namedtype.NamedTypes()
strictConstraints = False
def __init__(self, componentType=None, tagSet=None,
subtypeSpec=None, sizeSpec=None):
if componentType is None:
componentType = self.componentType
base.AbstractConstructedAsn1Item.__init__(
self, componentType.clone(), tagSet, subtypeSpec, sizeSpec
)
self._componentTypeLen = len(self._componentType)
def __getitem__(self, idx):
if isinstance(idx, str):
return self.getComponentByName(idx)
else:
return base.AbstractConstructedAsn1Item.__getitem__(self, idx)
def __setitem__(self, idx, value):
if isinstance(idx, str):
self.setComponentByName(idx, value)
else:
base.AbstractConstructedAsn1Item.__setitem__(self, idx, value)
def _cloneComponentValues(self, myClone, cloneValueFlag):
idx = 0; l = len(self._componentValues)
while idx < l:
c = self._componentValues[idx]
if c is not None:
if isinstance(c, base.AbstractConstructedAsn1Item):
myClone.setComponentByPosition(
idx, c.clone(cloneValueFlag=cloneValueFlag)
)
else:
myClone.setComponentByPosition(idx, c.clone())
idx = idx + 1
def _verifyComponent(self, idx, value):
if idx >= self._componentTypeLen:
raise error.PyAsn1Error(
'Component type error out of range'
)
t = self._componentType[idx].getType()
if not t.isSameTypeWith(value,matchConstraints=self.strictConstraints):
raise error.PyAsn1Error('Component value is tag-incompatible: %r vs %r' % (value, t))
if self.strictConstraints and \
not t.isSuperTypeOf(value, matchTags=False):
raise error.PyAsn1Error('Component value is constraints-incompatible: %r vs %r' % (value, t))
def getComponentByName(self, name):
return self.getComponentByPosition(
self._componentType.getPositionByName(name)
)
def setComponentByName(self, name, value=None, verifyConstraints=True):
return self.setComponentByPosition(
self._componentType.getPositionByName(name),value,verifyConstraints
)
def getComponentByPosition(self, idx):
try:
return self._componentValues[idx]
except IndexError:
if idx < self._componentTypeLen:
return
raise
def setComponentByPosition(self, idx, value=None,
verifyConstraints=True,
exactTypes=False,
matchTags=True,
matchConstraints=True):
l = len(self._componentValues)
if idx >= l:
self._componentValues = self._componentValues + (idx-l+1)*[None]
if value is None:
if self._componentValues[idx] is None:
self._componentValues[idx] = self._componentType.getTypeByPosition(idx).clone()
self._componentValuesSet = self._componentValuesSet + 1
return self
elif not isinstance(value, base.Asn1Item):
t = self._componentType.getTypeByPosition(idx)
if isinstance(t, base.AbstractSimpleAsn1Item):
value = t.clone(value=value)
else:
raise error.PyAsn1Error('Instance value required')
if verifyConstraints:
if self._componentTypeLen:
self._verifyComponent(idx, value)
self._verifySubtypeSpec(value, idx)
if self._componentValues[idx] is None:
self._componentValuesSet = self._componentValuesSet + 1
self._componentValues[idx] = value
return self
def getNameByPosition(self, idx):
if self._componentTypeLen:
return self._componentType.getNameByPosition(idx)
def getDefaultComponentByPosition(self, idx):
if self._componentTypeLen and self._componentType[idx].isDefaulted:
return self._componentType[idx].getType()
def getComponentType(self):
if self._componentTypeLen:
return self._componentType
def setDefaultComponents(self):
if self._componentTypeLen == self._componentValuesSet:
return
idx = self._componentTypeLen
while idx:
idx = idx - 1
if self._componentType[idx].isDefaulted:
if self.getComponentByPosition(idx) is None:
self.setComponentByPosition(idx)
elif not self._componentType[idx].isOptional:
if self.getComponentByPosition(idx) is None:
raise error.PyAsn1Error(
'Uninitialized component #%s at %r' % (idx, self)
)
def prettyPrint(self, scope=0):
scope = scope + 1
r = self.__class__.__name__ + ':\n'
for idx in range(len(self._componentValues)):
if self._componentValues[idx] is not None:
r = r + ' '*scope
componentType = self.getComponentType()
if componentType is None:
r = r + '<no-name>'
else:
r = r + componentType.getNameByPosition(idx)
r = '%s=%s\n' % (
r, self._componentValues[idx].prettyPrint(scope)
)
return r
def prettyPrintType(self, scope=0):
scope = scope + 1
r = '%s -> %s {\n' % (self.getTagSet(), self.__class__.__name__)
for idx in range(len(self.componentType)):
r = r + ' '*scope
r = r + '"%s"' % self.componentType.getNameByPosition(idx)
r = '%s = %s\n' % (
r, self._componentType.getTypeByPosition(idx).prettyPrintType(scope)
)
return r + '\n' + ' '*(scope-1) + '}'
class Sequence(SequenceAndSetBase):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatConstructed, 0x10)
)
typeId = 3
def getComponentTagMapNearPosition(self, idx):
if self._componentType:
return self._componentType.getTagMapNearPosition(idx)
def getComponentPositionNearType(self, tagSet, idx):
if self._componentType:
return self._componentType.getPositionNearType(tagSet, idx)
else:
return idx
class Set(SequenceAndSetBase):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatConstructed, 0x11)
)
typeId = 4
def getComponent(self, innerFlag=0): return self
def getComponentByType(self, tagSet, innerFlag=0):
c = self.getComponentByPosition(
self._componentType.getPositionByType(tagSet)
)
if innerFlag and isinstance(c, Set):
# get inner component by inner tagSet
return c.getComponent(1)
else:
# get outer component by inner tagSet
return c
def setComponentByType(self, tagSet, value=None, innerFlag=0,
verifyConstraints=True):
idx = self._componentType.getPositionByType(tagSet)
t = self._componentType.getTypeByPosition(idx)
if innerFlag: # set inner component by inner tagSet
if t.getTagSet():
return self.setComponentByPosition(
idx, value, verifyConstraints
)
else:
t = self.setComponentByPosition(idx).getComponentByPosition(idx)
return t.setComponentByType(
tagSet, value, innerFlag, verifyConstraints
)
else: # set outer component by inner tagSet
return self.setComponentByPosition(
idx, value, verifyConstraints
)
def getComponentTagMap(self):
if self._componentType:
return self._componentType.getTagMap(True)
def getComponentPositionByType(self, tagSet):
if self._componentType:
return self._componentType.getPositionByType(tagSet)
class Choice(Set):
tagSet = baseTagSet = tag.TagSet() # untagged
sizeSpec = constraint.ConstraintsIntersection(
constraint.ValueSizeConstraint(1, 1)
)
typeId = 5
_currentIdx = None
def __eq__(self, other):
if self._componentValues:
return self._componentValues[self._currentIdx] == other
return NotImplemented
def __ne__(self, other):
if self._componentValues:
return self._componentValues[self._currentIdx] != other
return NotImplemented
def __lt__(self, other):
if self._componentValues:
return self._componentValues[self._currentIdx] < other
return NotImplemented
def __le__(self, other):
if self._componentValues:
return self._componentValues[self._currentIdx] <= other
return NotImplemented
def __gt__(self, other):
if self._componentValues:
return self._componentValues[self._currentIdx] > other
return NotImplemented
def __ge__(self, other):
if self._componentValues:
return self._componentValues[self._currentIdx] >= other
return NotImplemented
if sys.version_info[0] <= 2:
def __nonzero__(self): return bool(self._componentValues)
else:
def __bool__(self): return bool(self._componentValues)
def __len__(self): return self._currentIdx is not None and 1 or 0
def verifySizeSpec(self):
if self._currentIdx is None:
raise error.PyAsn1Error('Component not chosen')
else:
self._sizeSpec(' ')
def _cloneComponentValues(self, myClone, cloneValueFlag):
try:
c = self.getComponent()
except error.PyAsn1Error:
pass
else:
if isinstance(c, Choice):
tagSet = c.getEffectiveTagSet()
else:
tagSet = c.getTagSet()
if isinstance(c, base.AbstractConstructedAsn1Item):
myClone.setComponentByType(
tagSet, c.clone(cloneValueFlag=cloneValueFlag)
)
else:
myClone.setComponentByType(tagSet, c.clone())
def setComponentByPosition(self, idx, value=None, verifyConstraints=True):
l = len(self._componentValues)
if idx >= l:
self._componentValues = self._componentValues + (idx-l+1)*[None]
if self._currentIdx is not None:
self._componentValues[self._currentIdx] = None
if value is None:
if self._componentValues[idx] is None:
self._componentValues[idx] = self._componentType.getTypeByPosition(idx).clone()
self._componentValuesSet = 1
self._currentIdx = idx
return self
elif not isinstance(value, base.Asn1Item):
value = self._componentType.getTypeByPosition(idx).clone(
value=value
)
if verifyConstraints:
if self._componentTypeLen:
self._verifyComponent(idx, value)
self._verifySubtypeSpec(value, idx)
self._componentValues[idx] = value
self._currentIdx = idx
self._componentValuesSet = 1
return self
def getMinTagSet(self):
if self._tagSet:
return self._tagSet
else:
return self._componentType.genMinTagSet()
def getEffectiveTagSet(self):
if self._tagSet:
return self._tagSet
else:
c = self.getComponent()
if isinstance(c, Choice):
return c.getEffectiveTagSet()
else:
return c.getTagSet()
def getTagMap(self):
if self._tagSet:
return Set.getTagMap(self)
else:
return Set.getComponentTagMap(self)
def getComponent(self, innerFlag=0):
if self._currentIdx is None:
raise error.PyAsn1Error('Component not chosen')
else:
c = self._componentValues[self._currentIdx]
if innerFlag and isinstance(c, Choice):
return c.getComponent(innerFlag)
else:
return c
def getName(self, innerFlag=0):
if self._currentIdx is None:
raise error.PyAsn1Error('Component not chosen')
else:
if innerFlag:
c = self._componentValues[self._currentIdx]
if isinstance(c, Choice):
return c.getName(innerFlag)
return self._componentType.getNameByPosition(self._currentIdx)
def setDefaultComponents(self): pass
class Any(OctetString):
tagSet = baseTagSet = tag.TagSet() # untagged
typeId = 6
def getTagMap(self):
return tagmap.TagMap(
{ self.getTagSet(): self },
{ eoo.endOfOctets.getTagSet(): eoo.endOfOctets },
self
)
# XXX
# coercion rules?
| gpl-3.0 | 6,276,555,602,748,372,000 | 37.597751 | 105 | 0.548937 | false |
unreal666/outwiker | src/outwiker/pages/text/textpage.py | 3 | 1215 | # -*- coding: utf-8 -*-
"""
Необходимые классы для создания страниц с текстом
"""
from outwiker.core.tree import WikiPage
from outwiker.pages.text.textpanel import TextPanel
from outwiker.core.factory import PageFactory
class TextWikiPage (WikiPage):
"""
Класс текстовых страниц
"""
def __init__(self, path, title, parent, readonly=False):
WikiPage.__init__(self, path, title, parent, readonly)
@staticmethod
def getTypeString():
return u"text"
class TextPageFactory (PageFactory):
"""
Фабрика для создания текстовой страницы и ее представления
"""
def getPageType(self):
return TextWikiPage
@property
def title(self):
"""
Название страницы, показываемое пользователю
"""
return _(u"Text Page")
def getPageView(self, parent, application):
"""
Вернуть контрол, который будет отображать и редактировать страницу
"""
return TextPanel(parent, application)
| gpl-3.0 | -1,954,552,624,407,208,000 | 22.302326 | 74 | 0.651697 | false |
filippog/pysnmp | pysnmp/proto/secmod/rfc3414/auth/hmacmd5.py | 4 | 3369 | #
# This file is part of pysnmp software.
#
# Copyright (c) 2005-2016, Ilya Etingof <[email protected]>
# License: http://pysnmp.sf.net/license.html
#
try:
from hashlib import md5
except ImportError:
import md5
md5 = md5.new
from pyasn1.type import univ
from pysnmp.proto.secmod.rfc3414.auth import base
from pysnmp.proto.secmod.rfc3414 import localkey
from pysnmp.proto import errind, error
_twelveZeros = univ.OctetString((0,)*12).asOctets()
_fortyEightZeros = (0,)*48
# rfc3414: 6.2.4
class HmacMd5(base.AbstractAuthenticationService):
serviceID = (1, 3, 6, 1, 6, 3, 10, 1, 1, 2) # usmHMACMD5AuthProtocol
__ipad = [0x36]*64
__opad = [0x5C]*64
def hashPassphrase(self, authKey):
return localkey.hashPassphraseMD5(authKey)
def localizeKey(self, authKey, snmpEngineID):
return localkey.localizeKeyMD5(authKey, snmpEngineID)
# 6.3.1
def authenticateOutgoingMsg(self, authKey, wholeMsg):
# Here we expect calling secmod to indicate where the digest
# should be in the substrate. Also, it pre-sets digest placeholder
# so we hash wholeMsg out of the box.
# Yes, that's ugly but that's rfc...
l = wholeMsg.find(_twelveZeros)
if l == -1:
raise error.ProtocolError('Cant locate digest placeholder')
wholeHead = wholeMsg[:l]
wholeTail = wholeMsg[l+12:]
# 6.3.1.1
# 6.3.1.2a
extendedAuthKey = authKey.asNumbers() + _fortyEightZeros
# 6.3.1.2b --> noop
# 6.3.1.2c
k1 = univ.OctetString(
map(lambda x, y: x^y, extendedAuthKey, self.__ipad)
)
# 6.3.1.2d --> noop
# 6.3.1.2e
k2 = univ.OctetString(
map(lambda x, y: x^y, extendedAuthKey, self.__opad)
)
# 6.3.1.3
d1 = md5(k1.asOctets()+wholeMsg).digest()
# 6.3.1.4
d2 = md5(k2.asOctets()+d1).digest()
mac = d2[:12]
# 6.3.1.5 & 6
return wholeHead + mac + wholeTail
# 6.3.2
def authenticateIncomingMsg(self, authKey, authParameters, wholeMsg):
# 6.3.2.1 & 2
if len(authParameters) != 12:
raise error.StatusInformation(
errorIndication=errind.authenticationError
)
# 6.3.2.3
l = wholeMsg.find(authParameters.asOctets())
if l == -1:
raise error.ProtocolError('Cant locate digest in wholeMsg')
wholeHead = wholeMsg[:l]
wholeTail = wholeMsg[l+12:]
authenticatedWholeMsg = wholeHead + _twelveZeros + wholeTail
# 6.3.2.4a
extendedAuthKey = authKey.asNumbers() + _fortyEightZeros
# 6.3.2.4b --> noop
# 6.3.2.4c
k1 = univ.OctetString(
map(lambda x, y: x^y, extendedAuthKey, self.__ipad)
)
# 6.3.2.4d --> noop
# 6.3.2.4e
k2 = univ.OctetString(
map(lambda x, y: x^y, extendedAuthKey, self.__opad)
)
# 6.3.2.5a
d1 = md5(k1.asOctets()+authenticatedWholeMsg).digest()
# 6.3.2.5b
d2 = md5(k2.asOctets()+d1).digest()
# 6.3.2.5c
mac = d2[:12]
# 6.3.2.6
if mac != authParameters:
raise error.StatusInformation(
errorIndication=errind.authenticationFailure
)
return authenticatedWholeMsg
| bsd-3-clause | 194,906,958,389,056,930 | 26.614754 | 74 | 0.5794 | false |
mongkok/defocus | defocus/users/migrations/0001_initial.py | 1 | 2373 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2017-12-16 21:25
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
import model_utils.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0008_alter_user_username_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('email', models.EmailField(max_length=255, unique=True, verbose_name='email address')),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('first_name', models.CharField(blank=True, max_length=128, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=128, verbose_name='last name')),
('language', models.CharField(default='es', max_length=2)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'ordering': ('-id',),
},
),
]
| mit | 2,583,022,840,345,795,600 | 56.878049 | 266 | 0.636747 | false |
jjingrong/PONUS-1.2 | venv/build/django/django/contrib/auth/management/commands/changepassword.py | 76 | 2071 | from __future__ import unicode_literals
import getpass
from optparse import make_option
from django.contrib.auth import get_user_model
from django.core.management.base import BaseCommand, CommandError
from django.db import DEFAULT_DB_ALIAS
from django.utils.encoding import force_str
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Specifies the database to use. Default is "default".'),
)
help = "Change a user's password for django.contrib.auth."
requires_model_validation = False
def _get_pass(self, prompt="Password: "):
p = getpass.getpass(prompt=force_str(prompt))
if not p:
raise CommandError("aborted")
return p
def handle(self, *args, **options):
if len(args) > 1:
raise CommandError("need exactly one or zero arguments for username")
if args:
username, = args
else:
username = getpass.getuser()
UserModel = get_user_model()
try:
u = UserModel._default_manager.using(options.get('database')).get(**{
UserModel.USERNAME_FIELD: username
})
except UserModel.DoesNotExist:
raise CommandError("user '%s' does not exist" % username)
self.stdout.write("Changing password for user '%s'\n" % u)
MAX_TRIES = 3
count = 0
p1, p2 = 1, 2 # To make them initially mismatch.
while p1 != p2 and count < MAX_TRIES:
p1 = self._get_pass()
p2 = self._get_pass("Password (again): ")
if p1 != p2:
self.stdout.write("Passwords do not match. Please try again.\n")
count = count + 1
if count == MAX_TRIES:
raise CommandError("Aborting password change for user '%s' after %s attempts" % (u, count))
u.set_password(p1)
u.save()
return "Password changed successfully for user '%s'" % u
| mit | -1,031,634,272,322,229,500 | 31.873016 | 103 | 0.598745 | false |
MjAbuz/watchdog | import/parse/voteview.py | 3 | 1969 | """
parse voteview partisanship data
"""
HOUSE_DAT = "../data/crawl/voteview/HL01110C21_PRES_BSSE.DAT"
SENATE_DAT = "../data/crawl/voteview/SL01110C21_BSSE.dat"
state_map = { #@@ import to state json as icpsr
41: 'AL', 81: 'AK', 61: 'AZ', 42: 'AR', 71: 'CA', 62: 'CO', 1: 'CT',
11: 'DE', 43: 'FL', 44: 'GA', 82: 'HI', 63: 'ID', 21: 'IL', 22: 'IN',
31: 'IA', 32: 'KS', 51: 'KY', 45: 'LA', 2: 'ME', 52: 'MD', 3: 'MA',
23: 'MI', 33: 'MN', 46: 'MS', 34: 'MO', 64: 'MT', 35: 'NE', 65: 'NV',
4: 'NH', 12: 'NJ', 66: 'NM', 13: 'NY', 47: 'NC', 36: 'ND', 24: 'OH',
53: 'OK', 72: 'OR', 14: 'PA', 5: 'RI', 48: 'SC', 37: 'SD', 54: 'TN',
49: 'TX', 67: 'UT', 6: 'VT', 40: 'VA', 73: 'WA', 56: 'WV', 25: 'WI',
68: 'WY', 55: 'DC'
}
import web
import tools
def parse():
for fn in [HOUSE_DAT, SENATE_DAT]:
for line in file(fn):
out = web.storage()
out.congress = int(line[0:4])
out.icpsr_id = int(line[4:10])
out.icpsr_state = int(line[10:13])
out.district = int(line[13:15])
out.state_name = line[15:23].strip()
out.party_code = int(line[23:28])
out.last_name = line[28:41].strip()
out.dim1 = float(line[41:47])
out.dim2 = float(line[47:54])
out.std1 = float(line[54:61])
out.std2 = float(line[61:68])
out.corr = float(line[68:75])
out.loglike = float(line[75:87])
out.n_votes = int(line[87:92])
out.n_errs = int(line[92:97])
out.n_geomeanprob = float(line[97:104])
if out.icpsr_state in state_map:
out.state_code = state_map[out.icpsr_state]
if out.district:
out.district_id = out.state_code + '-' + str(out.district).zfill(2)
else:
out.district_id = out.state_code
yield out
if __name__ == "__main__":
tools.export(parse())
| agpl-3.0 | -7,038,240,073,142,130,000 | 36.150943 | 86 | 0.485526 | false |
twiest/openshift-tools | ansible/roles/lib_zabbix/library/zbx_user.py | 13 | 7263 | #!/usr/bin/env python
'''
ansible module for zabbix users
'''
# vim: expandtab:tabstop=4:shiftwidth=4
#
# Zabbix user ansible module
#
#
# Copyright 2015 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This is in place because each module looks similar to each other.
# These need duplicate code as their behavior is very similar
# but different for each zabbix class.
# pylint: disable=duplicate-code
# pylint: disable=import-error
from openshift_tools.zbxapi import ZabbixAPI, ZabbixConnection
def exists(content, key='result'):
''' Check if key exists in content or the size of content[key] > 0
'''
if not content.has_key(key):
return False
if not content[key]:
return False
return True
def get_usergroups(zapi, usergroups):
''' Get usergroups
'''
ugroups = []
for ugr in usergroups:
content = zapi.get_content('usergroup',
'get',
{'search': {'name': ugr},
#'selectUsers': 'userid',
#'getRights': 'extend'
})
if content['result']:
ugroups.append({'usrgrpid': content['result'][0]['usrgrpid']})
return ugroups or None
def get_passwd(passwd):
'''Determine if password is set, if not, return 'zabbix'
'''
if passwd:
return passwd
return 'zabbix'
def get_usertype(user_type):
'''
Determine zabbix user account type
'''
if not user_type:
return None
utype = 1
if 'super' in user_type:
utype = 3
elif 'admin' in user_type or user_type == 'admin':
utype = 2
return utype
def main():
'''
ansible zabbix module for users
'''
##def user(self, name, state='present', params=None):
module = AnsibleModule(
argument_spec=dict(
zbx_server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
zbx_user=dict(default=os.environ.get('ZABBIX_USER', None), type='str'),
zbx_password=dict(default=os.environ.get('ZABBIX_PASSWORD', None), type='str'),
zbx_debug=dict(default=False, type='bool'),
login=dict(default=None, type='str'),
first_name=dict(default=None, type='str'),
last_name=dict(default=None, type='str'),
user_type=dict(default=None, type='str'),
password=dict(default=None, type='str'),
refresh=dict(default=None, type='int'),
autologout=dict(default=None, type='int'),
update_password=dict(default=False, type='bool'),
user_groups=dict(default=[], type='list'),
state=dict(default='present', type='str'),
),
#supports_check_mode=True
)
zapi = ZabbixAPI(ZabbixConnection(module.params['zbx_server'],
module.params['zbx_user'],
module.params['zbx_password'],
module.params['zbx_debug']))
## before we can create a user media and users with media types we need media
zbx_class_name = 'user'
idname = "userid"
state = module.params['state']
content = zapi.get_content(zbx_class_name,
'get',
{'output': 'extend',
'search': {'alias': module.params['login']},
"selectUsrgrps": 'usergrpid',
})
if state == 'list':
module.exit_json(changed=False, results=content['result'], state="list")
if state == 'absent':
if not exists(content) or len(content['result']) == 0:
module.exit_json(changed=False, state="absent")
content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]])
module.exit_json(changed=True, results=content['result'], state="absent")
if state == 'present':
params = {'alias': module.params['login'],
'passwd': get_passwd(module.params['password']),
'usrgrps': get_usergroups(zapi, module.params['user_groups']),
'name': module.params['first_name'],
'surname': module.params['last_name'],
'refresh': module.params['refresh'],
'autologout': module.params['autologout'],
'type': get_usertype(module.params['user_type']),
}
# Remove any None valued params
_ = [params.pop(key, None) for key in params.keys() if params[key] is None]
if not exists(content):
# if we didn't find it, create it
content = zapi.get_content(zbx_class_name, 'create', params)
if content.has_key('Error'):
module.exit_json(failed=True, changed=False, results=content, state='present')
module.exit_json(changed=True, results=content['result'], state='present')
# already exists, we need to update it
# let's compare properties
differences = {}
# Update password
if not module.params['update_password']:
params.pop('passwd', None)
zab_results = content['result'][0]
for key, value in params.items():
if key == 'usrgrps':
# this must be done as a list of ordered dictionaries fails comparison
# if the current zabbix group list is not all in the
# provided group list
# or the provided group list is not all in the current zabbix
# group list
if not all([_ in value for _ in zab_results[key]]) \
or not all([_ in zab_results[key] for _ in value]):
differences[key] = value
elif zab_results[key] != value and zab_results[key] != str(value):
differences[key] = value
if not differences:
module.exit_json(changed=False, results=zab_results, state="present")
# We have differences and need to update
differences[idname] = zab_results[idname]
content = zapi.get_content(zbx_class_name, 'update', differences)
module.exit_json(changed=True, results=content['result'], state="present")
module.exit_json(failed=True,
changed=False,
results='Unknown state passed. %s' % state,
state="unknown")
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
# import module snippets. This are required
from ansible.module_utils.basic import *
main()
| apache-2.0 | -3,983,789,163,029,495,300 | 35.497487 | 94 | 0.571527 | false |
mubix/pth-toolkit | lib/python2.7/site-packages/samba/tests/samba_tool/ntacl.py | 42 | 6329 | # Unix SMB/CIFS implementation.
# Copyright (C) Andrew Bartlett 2012
#
# Based on user.py:
# Copyright (C) Sean Dague <[email protected]> 2011
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
import time
import ldb
from samba.tests.samba_tool.base import SambaToolCmdTest
import random
class NtACLCmdSysvolTestCase(SambaToolCmdTest):
"""Tests for samba-tool ntacl sysvol* subcommands"""
def test_ntvfs(self):
(result, out, err) = self.runsubcmd("ntacl", "sysvolreset",
"--use-ntvfs")
self.assertCmdSuccess(result)
self.assertEquals(out,"","Shouldn't be any output messages")
self.assertIn("Please note that POSIX permissions have NOT been changed, only the stored NT ACL", err)
def test_s3fs(self):
(result, out, err) = self.runsubcmd("ntacl", "sysvolreset",
"--use-s3fs")
self.assertCmdSuccess(result)
self.assertEquals(err,"","Shouldn't be any error messages")
self.assertEquals(out,"","Shouldn't be any output messages")
def test_ntvfs_check(self):
(result, out, err) = self.runsubcmd("ntacl", "sysvolreset",
"--use-ntvfs")
self.assertCmdSuccess(result)
self.assertEquals(out,"","Shouldn't be any output messages")
self.assertIn("Please note that POSIX permissions have NOT been changed, only the stored NT ACL", err)
# Now check they were set correctly
(result, out, err) = self.runsubcmd("ntacl", "sysvolcheck")
self.assertCmdSuccess(result)
self.assertEquals(err,"","Shouldn't be any error messages")
self.assertEquals(out,"","Shouldn't be any output messages")
def test_s3fs_check(self):
(result, out, err) = self.runsubcmd("ntacl", "sysvolreset",
"--use-s3fs")
self.assertCmdSuccess(result)
self.assertEquals(err,"","Shouldn't be any error messages")
self.assertEquals(out,"","Shouldn't be any output messages")
# Now check they were set correctly
(result, out, err) = self.runsubcmd("ntacl", "sysvolcheck")
self.assertCmdSuccess(result)
self.assertEquals(err,"","Shouldn't be any error messages")
self.assertEquals(out,"","Shouldn't be any output messages")
class NtACLCmdGetSetTestCase(SambaToolCmdTest):
"""Tests for samba-tool ntacl get/set subcommands"""
acl = "O:DAG:DUD:P(A;OICI;0x001f01ff;;;DA)(A;OICI;0x001f01ff;;;EA)(A;OICIIO;0x001f01ff;;;CO)(A;OICI;0x001f01ff;;;DA)(A;OICI;0x001f01ff;;;SY)(A;OICI;0x001200a9;;;AU)(A;OICI;0x001200a9;;;ED)S:AI(OU;CIIDSA;WP;f30e3bbe-9ff0-11d1-b603-0000f80367c1;bf967aa5-0de6-11d0-a285-00aa003049e2;WD)(OU;CIIDSA;WP;f30e3bbf-9ff0-11d1-b603-0000f80367c1;bf967aa5-0de6-11d0-a285-00aa003049e2;WD)"
def test_ntvfs(self):
path = os.environ['SELFTEST_PREFIX']
tempf = os.path.join(path,"pytests"+str(int(100000*random.random())))
open(tempf, 'w').write("empty")
(result, out, err) = self.runsubcmd("ntacl", "set", self.acl, tempf,
"--use-ntvfs")
self.assertCmdSuccess(result)
self.assertEquals(out,"","Shouldn't be any output messages")
self.assertIn("Please note that POSIX permissions have NOT been changed, only the stored NT ACL", err)
def test_s3fs(self):
path = os.environ['SELFTEST_PREFIX']
tempf = os.path.join(path,"pytests"+str(int(100000*random.random())))
open(tempf, 'w').write("empty")
(result, out, err) = self.runsubcmd("ntacl", "set", self.acl, tempf,
"--use-s3fs")
self.assertCmdSuccess(result)
self.assertEquals(err,"","Shouldn't be any error messages")
self.assertEquals(out,"","Shouldn't be any output messages")
def test_ntvfs_check(self):
path = os.environ['SELFTEST_PREFIX']
tempf = os.path.join(path,"pytests"+str(int(100000*random.random())))
open(tempf, 'w').write("empty")
(result, out, err) = self.runsubcmd("ntacl", "set", self.acl, tempf,
"--use-ntvfs")
self.assertCmdSuccess(result)
self.assertEquals(out,"","Shouldn't be any output messages")
self.assertIn("Please note that POSIX permissions have NOT been changed, only the stored NT ACL", err)
# Now check they were set correctly
(result, out, err) = self.runsubcmd("ntacl", "get", tempf,
"--use-ntvfs", "--as-sddl")
self.assertCmdSuccess(result)
self.assertEquals(err,"","Shouldn't be any error messages")
self.assertEquals(self.acl+"\n", out, "Output should be the ACL")
def test_s3fs_check(self):
path = os.environ['SELFTEST_PREFIX']
tempf = os.path.join(path,"pytests"+str(int(100000*random.random())))
open(tempf, 'w').write("empty")
(result, out, err) = self.runsubcmd("ntacl", "set", self.acl, tempf,
"--use-s3fs")
self.assertCmdSuccess(result)
self.assertEquals(out,"","Shouldn't be any output messages")
self.assertEquals(err,"","Shouldn't be any error messages")
# Now check they were set correctly
(result, out, err) = self.runsubcmd("ntacl", "get", tempf,
"--use-s3fs", "--as-sddl")
self.assertCmdSuccess(result)
self.assertEquals(err,"","Shouldn't be any error messages")
self.assertEquals(self.acl+"\n", out,"Output should be the ACL")
| bsd-2-clause | 5,131,043,824,513,855,000 | 45.881481 | 379 | 0.619055 | false |
chrislit/abydos | tests/distance/test_distance_baroni_urbani_buser_ii.py | 1 | 7009 | # Copyright 2019-2020 by Christopher C. Little.
# This file is part of Abydos.
#
# Abydos is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Abydos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Abydos. If not, see <http://www.gnu.org/licenses/>.
"""abydos.tests.distance.test_distance_baroni_urbani_buser_ii.
This module contains unit tests for abydos.distance.BaroniUrbaniBuserII
"""
import unittest
from abydos.distance import BaroniUrbaniBuserII
class BaroniUrbaniBuserIITestCases(unittest.TestCase):
"""Test BaroniUrbaniBuserII functions.
abydos.distance.BaroniUrbaniBuserII
"""
cmp = BaroniUrbaniBuserII()
cmp_no_d = BaroniUrbaniBuserII(alphabet=0)
def test_baroni_urbani_buser_ii_sim(self):
"""Test abydos.distance.BaroniUrbaniBuserII.sim."""
# Base cases
self.assertEqual(self.cmp.sim('', ''), 1.0)
self.assertEqual(self.cmp.sim('a', ''), 0.0)
self.assertEqual(self.cmp.sim('', 'a'), 0.0)
self.assertEqual(self.cmp.sim('abc', ''), 0.0)
self.assertEqual(self.cmp.sim('', 'abc'), 0.0)
self.assertEqual(self.cmp.sim('abc', 'abc'), 1.0)
self.assertEqual(self.cmp.sim('abcd', 'efgh'), 0.0)
self.assertAlmostEqual(self.cmp.sim('Nigel', 'Niall'), 0.8951383588)
self.assertAlmostEqual(self.cmp.sim('Niall', 'Nigel'), 0.8951383588)
self.assertAlmostEqual(self.cmp.sim('Colin', 'Coiln'), 0.8951383588)
self.assertAlmostEqual(self.cmp.sim('Coiln', 'Colin'), 0.8951383588)
self.assertAlmostEqual(
self.cmp.sim('ATCAACGAGT', 'AACGATTAG'), 0.9199236936
)
# Tests with alphabet=0 (no d factor)
self.assertEqual(self.cmp_no_d.sim('', ''), 1.0)
self.assertEqual(self.cmp_no_d.sim('a', ''), 0.0)
self.assertEqual(self.cmp_no_d.sim('', 'a'), 0.0)
self.assertEqual(self.cmp_no_d.sim('abc', ''), 0.0)
self.assertEqual(self.cmp_no_d.sim('', 'abc'), 0.0)
self.assertEqual(self.cmp_no_d.sim('abc', 'abc'), 1.0)
self.assertEqual(self.cmp_no_d.sim('abcd', 'efgh'), 0.0)
self.assertAlmostEqual(
self.cmp_no_d.sim('Nigel', 'Niall'), 0.3333333333
)
self.assertAlmostEqual(
self.cmp_no_d.sim('Niall', 'Nigel'), 0.3333333333
)
self.assertAlmostEqual(
self.cmp_no_d.sim('Colin', 'Coiln'), 0.3333333333
)
self.assertAlmostEqual(
self.cmp_no_d.sim('Coiln', 'Colin'), 0.3333333333
)
self.assertAlmostEqual(
self.cmp_no_d.sim('ATCAACGAGT', 'AACGATTAG'), 0.5
)
def test_baroni_urbani_buser_ii_dist(self):
"""Test abydos.distance.BaroniUrbaniBuserII.dist."""
# Base cases
self.assertEqual(self.cmp.dist('', ''), 0.0)
self.assertEqual(self.cmp.dist('a', ''), 1.0)
self.assertEqual(self.cmp.dist('', 'a'), 1.0)
self.assertEqual(self.cmp.dist('abc', ''), 1.0)
self.assertEqual(self.cmp.dist('', 'abc'), 1.0)
self.assertEqual(self.cmp.dist('abc', 'abc'), 0.0)
self.assertEqual(self.cmp.dist('abcd', 'efgh'), 1.0)
self.assertAlmostEqual(self.cmp.dist('Nigel', 'Niall'), 0.1048616412)
self.assertAlmostEqual(self.cmp.dist('Niall', 'Nigel'), 0.1048616412)
self.assertAlmostEqual(self.cmp.dist('Colin', 'Coiln'), 0.1048616412)
self.assertAlmostEqual(self.cmp.dist('Coiln', 'Colin'), 0.1048616412)
self.assertAlmostEqual(
self.cmp.dist('ATCAACGAGT', 'AACGATTAG'), 0.0800763064
)
# Tests with alphabet=0 (no d factor)
self.assertEqual(self.cmp_no_d.dist('', ''), 0.0)
self.assertEqual(self.cmp_no_d.dist('a', ''), 1.0)
self.assertEqual(self.cmp_no_d.dist('', 'a'), 1.0)
self.assertEqual(self.cmp_no_d.dist('abc', ''), 1.0)
self.assertEqual(self.cmp_no_d.dist('', 'abc'), 1.0)
self.assertEqual(self.cmp_no_d.dist('abc', 'abc'), 0.0)
self.assertEqual(self.cmp_no_d.dist('abcd', 'efgh'), 1.0)
self.assertAlmostEqual(
self.cmp_no_d.dist('Nigel', 'Niall'), 0.6666666667
)
self.assertAlmostEqual(
self.cmp_no_d.dist('Niall', 'Nigel'), 0.6666666667
)
self.assertAlmostEqual(
self.cmp_no_d.dist('Colin', 'Coiln'), 0.6666666667
)
self.assertAlmostEqual(
self.cmp_no_d.dist('Coiln', 'Colin'), 0.6666666667
)
self.assertAlmostEqual(
self.cmp_no_d.dist('ATCAACGAGT', 'AACGATTAG'), 0.5
)
def test_baroni_urbani_buser_ii_corr(self):
"""Test abydos.distance.BaroniUrbaniBuserII.corr."""
# Base cases
self.assertEqual(self.cmp.corr('', ''), 1.0)
self.assertEqual(self.cmp.corr('a', ''), -1.0)
self.assertEqual(self.cmp.corr('', 'a'), -1.0)
self.assertEqual(self.cmp.corr('abc', ''), -1.0)
self.assertEqual(self.cmp.corr('', 'abc'), -1.0)
self.assertEqual(self.cmp.corr('abc', 'abc'), 1.0)
self.assertEqual(self.cmp.corr('abcd', 'efgh'), -1.0)
self.assertAlmostEqual(self.cmp.corr('Nigel', 'Niall'), 0.7902767176)
self.assertAlmostEqual(self.cmp.corr('Niall', 'Nigel'), 0.7902767176)
self.assertAlmostEqual(self.cmp.corr('Colin', 'Coiln'), 0.7902767176)
self.assertAlmostEqual(self.cmp.corr('Coiln', 'Colin'), 0.7902767176)
self.assertAlmostEqual(
self.cmp.corr('ATCAACGAGT', 'AACGATTAG'), 0.8398473871
)
# Tests with alphabet=0 (no d factor)
self.assertEqual(self.cmp_no_d.corr('', ''), 1.0)
self.assertEqual(self.cmp_no_d.corr('a', ''), -1.0)
self.assertEqual(self.cmp_no_d.corr('', 'a'), -1.0)
self.assertEqual(self.cmp_no_d.corr('abc', ''), -1.0)
self.assertEqual(self.cmp_no_d.corr('', 'abc'), -1.0)
self.assertEqual(self.cmp_no_d.corr('abc', 'abc'), 1.0)
self.assertEqual(self.cmp_no_d.corr('abcd', 'efgh'), -1.0)
self.assertAlmostEqual(
self.cmp_no_d.corr('Nigel', 'Niall'), -0.3333333333
)
self.assertAlmostEqual(
self.cmp_no_d.corr('Niall', 'Nigel'), -0.3333333333
)
self.assertAlmostEqual(
self.cmp_no_d.corr('Colin', 'Coiln'), -0.3333333333
)
self.assertAlmostEqual(
self.cmp_no_d.corr('Coiln', 'Colin'), -0.3333333333
)
self.assertAlmostEqual(
self.cmp_no_d.corr('ATCAACGAGT', 'AACGATTAG'), 0.0
)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | -7,501,122,559,139,735,000 | 40.229412 | 77 | 0.603795 | false |
h3biomed/ansible | test/units/plugins/lookup/test_manifold.py | 38 | 27127 | # (c) 2018, Arigato Machine Inc.
# (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat import unittest
from units.compat.mock import patch, call
from ansible.errors import AnsibleError
from ansible.module_utils.urls import ConnectionError, SSLValidationError
from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
from ansible.module_utils import six
from ansible.plugins.lookup.manifold import ManifoldApiClient, LookupModule, ApiError
import json
API_FIXTURES = {
'https://api.marketplace.manifold.co/v1/resources':
[
{
"body": {
"label": "resource-1",
"name": "Resource 1"
},
"id": "rid-1"
},
{
"body": {
"label": "resource-2",
"name": "Resource 2"
},
"id": "rid-2"
}
],
'https://api.marketplace.manifold.co/v1/resources?label=resource-1':
[
{
"body": {
"label": "resource-1",
"name": "Resource 1"
},
"id": "rid-1"
}
],
'https://api.marketplace.manifold.co/v1/resources?label=resource-2':
[
{
"body": {
"label": "resource-2",
"name": "Resource 2"
},
"id": "rid-2"
}
],
'https://api.marketplace.manifold.co/v1/resources?team_id=tid-1':
[
{
"body": {
"label": "resource-1",
"name": "Resource 1"
},
"id": "rid-1"
}
],
'https://api.marketplace.manifold.co/v1/resources?project_id=pid-1':
[
{
"body": {
"label": "resource-2",
"name": "Resource 2"
},
"id": "rid-2"
}
],
'https://api.marketplace.manifold.co/v1/resources?project_id=pid-2':
[
{
"body": {
"label": "resource-1",
"name": "Resource 1"
},
"id": "rid-1"
},
{
"body": {
"label": "resource-3",
"name": "Resource 3"
},
"id": "rid-3"
}
],
'https://api.marketplace.manifold.co/v1/resources?team_id=tid-1&project_id=pid-1':
[
{
"body": {
"label": "resource-1",
"name": "Resource 1"
},
"id": "rid-1"
}
],
'https://api.marketplace.manifold.co/v1/projects':
[
{
"body": {
"label": "project-1",
"name": "Project 1",
},
"id": "pid-1",
},
{
"body": {
"label": "project-2",
"name": "Project 2",
},
"id": "pid-2",
}
],
'https://api.marketplace.manifold.co/v1/projects?label=project-2':
[
{
"body": {
"label": "project-2",
"name": "Project 2",
},
"id": "pid-2",
}
],
'https://api.marketplace.manifold.co/v1/credentials?resource_id=rid-1':
[
{
"body": {
"resource_id": "rid-1",
"values": {
"RESOURCE_TOKEN_1": "token-1",
"RESOURCE_TOKEN_2": "token-2"
}
},
"id": "cid-1",
}
],
'https://api.marketplace.manifold.co/v1/credentials?resource_id=rid-2':
[
{
"body": {
"resource_id": "rid-2",
"values": {
"RESOURCE_TOKEN_3": "token-3",
"RESOURCE_TOKEN_4": "token-4"
}
},
"id": "cid-2",
}
],
'https://api.marketplace.manifold.co/v1/credentials?resource_id=rid-3':
[
{
"body": {
"resource_id": "rid-3",
"values": {
"RESOURCE_TOKEN_1": "token-5",
"RESOURCE_TOKEN_2": "token-6"
}
},
"id": "cid-3",
}
],
'https://api.identity.manifold.co/v1/teams':
[
{
"id": "tid-1",
"body": {
"name": "Team 1",
"label": "team-1"
}
},
{
"id": "tid-2",
"body": {
"name": "Team 2",
"label": "team-2"
}
}
]
}
def mock_fixture(open_url_mock, fixture=None, data=None, headers=None):
if not headers:
headers = {}
if fixture:
data = json.dumps(API_FIXTURES[fixture])
if 'content-type' not in headers:
headers['content-type'] = 'application/json'
open_url_mock.return_value.read.return_value = data
open_url_mock.return_value.headers = headers
class TestManifoldApiClient(unittest.TestCase):
@patch('ansible.plugins.lookup.manifold.open_url')
def test_request_sends_default_headers(self, open_url_mock):
mock_fixture(open_url_mock, data='hello')
client = ManifoldApiClient('token-123')
client.request('test', 'endpoint')
open_url_mock.assert_called_with('https://api.test.manifold.co/v1/endpoint',
headers={'Accept': '*/*', 'Authorization': 'Bearer token-123'},
http_agent='python-manifold-ansible-1.0.0')
@patch('ansible.plugins.lookup.manifold.open_url')
def test_request_decodes_json(self, open_url_mock):
mock_fixture(open_url_mock, fixture='https://api.marketplace.manifold.co/v1/resources')
client = ManifoldApiClient('token-123')
self.assertIsInstance(client.request('marketplace', 'resources'), list)
@patch('ansible.plugins.lookup.manifold.open_url')
def test_request_streams_text(self, open_url_mock):
mock_fixture(open_url_mock, data='hello', headers={'content-type': "text/plain"})
client = ManifoldApiClient('token-123')
self.assertEqual('hello', client.request('test', 'endpoint'))
@patch('ansible.plugins.lookup.manifold.open_url')
def test_request_processes_parameterized_headers(self, open_url_mock):
mock_fixture(open_url_mock, data='hello')
client = ManifoldApiClient('token-123')
client.request('test', 'endpoint', headers={'X-HEADER': 'MANIFOLD'})
open_url_mock.assert_called_with('https://api.test.manifold.co/v1/endpoint',
headers={'Accept': '*/*', 'Authorization': 'Bearer token-123',
'X-HEADER': 'MANIFOLD'},
http_agent='python-manifold-ansible-1.0.0')
@patch('ansible.plugins.lookup.manifold.open_url')
def test_request_passes_arbitrary_parameters(self, open_url_mock):
mock_fixture(open_url_mock, data='hello')
client = ManifoldApiClient('token-123')
client.request('test', 'endpoint', use_proxy=False, timeout=5)
open_url_mock.assert_called_with('https://api.test.manifold.co/v1/endpoint',
headers={'Accept': '*/*', 'Authorization': 'Bearer token-123'},
http_agent='python-manifold-ansible-1.0.0',
use_proxy=False, timeout=5)
@patch('ansible.plugins.lookup.manifold.open_url')
def test_request_raises_on_incorrect_json(self, open_url_mock):
mock_fixture(open_url_mock, data='noJson', headers={'content-type': "application/json"})
client = ManifoldApiClient('token-123')
with self.assertRaises(ApiError) as context:
client.request('test', 'endpoint')
self.assertEqual('JSON response can\'t be parsed while requesting https://api.test.manifold.co/v1/endpoint:\n'
'noJson',
str(context.exception))
@patch('ansible.plugins.lookup.manifold.open_url')
def test_request_raises_on_status_500(self, open_url_mock):
open_url_mock.side_effect = HTTPError('https://api.test.manifold.co/v1/endpoint',
500, 'Server error', {}, six.StringIO('ERROR'))
client = ManifoldApiClient('token-123')
with self.assertRaises(ApiError) as context:
client.request('test', 'endpoint')
self.assertEqual('Server returned: HTTP Error 500: Server error while requesting '
'https://api.test.manifold.co/v1/endpoint:\nERROR',
str(context.exception))
@patch('ansible.plugins.lookup.manifold.open_url')
def test_request_raises_on_bad_url(self, open_url_mock):
open_url_mock.side_effect = URLError('URL is invalid')
client = ManifoldApiClient('token-123')
with self.assertRaises(ApiError) as context:
client.request('test', 'endpoint')
self.assertEqual('Failed lookup url for https://api.test.manifold.co/v1/endpoint : <url'
'open error URL is invalid>',
str(context.exception))
@patch('ansible.plugins.lookup.manifold.open_url')
def test_request_raises_on_ssl_error(self, open_url_mock):
open_url_mock.side_effect = SSLValidationError('SSL Error')
client = ManifoldApiClient('token-123')
with self.assertRaises(ApiError) as context:
client.request('test', 'endpoint')
self.assertEqual('Error validating the server\'s certificate for https://api.test.manifold.co/v1/endpoint: '
'SSL Error',
str(context.exception))
@patch('ansible.plugins.lookup.manifold.open_url')
def test_request_raises_on_connection_error(self, open_url_mock):
open_url_mock.side_effect = ConnectionError('Unknown connection error')
client = ManifoldApiClient('token-123')
with self.assertRaises(ApiError) as context:
client.request('test', 'endpoint')
self.assertEqual('Error connecting to https://api.test.manifold.co/v1/endpoint: Unknown connection error',
str(context.exception))
@patch('ansible.plugins.lookup.manifold.open_url')
def test_get_resources_get_all(self, open_url_mock):
url = 'https://api.marketplace.manifold.co/v1/resources'
mock_fixture(open_url_mock, fixture=url)
client = ManifoldApiClient('token-123')
self.assertListEqual(API_FIXTURES[url], client.get_resources())
open_url_mock.assert_called_with(url,
headers={'Accept': '*/*', 'Authorization': 'Bearer token-123'},
http_agent='python-manifold-ansible-1.0.0')
@patch('ansible.plugins.lookup.manifold.open_url')
def test_get_resources_filter_label(self, open_url_mock):
url = 'https://api.marketplace.manifold.co/v1/resources?label=resource-1'
mock_fixture(open_url_mock, fixture=url)
client = ManifoldApiClient('token-123')
self.assertListEqual(API_FIXTURES[url], client.get_resources(label='resource-1'))
open_url_mock.assert_called_with(url,
headers={'Accept': '*/*', 'Authorization': 'Bearer token-123'},
http_agent='python-manifold-ansible-1.0.0')
@patch('ansible.plugins.lookup.manifold.open_url')
def test_get_resources_filter_team_and_project(self, open_url_mock):
url = 'https://api.marketplace.manifold.co/v1/resources?team_id=tid-1&project_id=pid-1'
mock_fixture(open_url_mock, fixture=url)
client = ManifoldApiClient('token-123')
self.assertListEqual(API_FIXTURES[url], client.get_resources(team_id='tid-1', project_id='pid-1'))
args, kwargs = open_url_mock.call_args
url_called = args[0]
# Dict order is not guaranteed, so an url may have querystring parameters order randomized
self.assertIn('team_id=tid-1', url_called)
self.assertIn('project_id=pid-1', url_called)
@patch('ansible.plugins.lookup.manifold.open_url')
def test_get_teams_get_all(self, open_url_mock):
url = 'https://api.identity.manifold.co/v1/teams'
mock_fixture(open_url_mock, fixture=url)
client = ManifoldApiClient('token-123')
self.assertListEqual(API_FIXTURES[url], client.get_teams())
open_url_mock.assert_called_with(url,
headers={'Accept': '*/*', 'Authorization': 'Bearer token-123'},
http_agent='python-manifold-ansible-1.0.0')
@patch('ansible.plugins.lookup.manifold.open_url')
def test_get_teams_filter_label(self, open_url_mock):
url = 'https://api.identity.manifold.co/v1/teams'
mock_fixture(open_url_mock, fixture=url)
client = ManifoldApiClient('token-123')
self.assertListEqual(API_FIXTURES[url][1:2], client.get_teams(label='team-2'))
open_url_mock.assert_called_with(url,
headers={'Accept': '*/*', 'Authorization': 'Bearer token-123'},
http_agent='python-manifold-ansible-1.0.0')
@patch('ansible.plugins.lookup.manifold.open_url')
def test_get_projects_get_all(self, open_url_mock):
url = 'https://api.marketplace.manifold.co/v1/projects'
mock_fixture(open_url_mock, fixture=url)
client = ManifoldApiClient('token-123')
self.assertListEqual(API_FIXTURES[url], client.get_projects())
open_url_mock.assert_called_with(url,
headers={'Accept': '*/*', 'Authorization': 'Bearer token-123'},
http_agent='python-manifold-ansible-1.0.0')
@patch('ansible.plugins.lookup.manifold.open_url')
def test_get_projects_filter_label(self, open_url_mock):
url = 'https://api.marketplace.manifold.co/v1/projects?label=project-2'
mock_fixture(open_url_mock, fixture=url)
client = ManifoldApiClient('token-123')
self.assertListEqual(API_FIXTURES[url], client.get_projects(label='project-2'))
open_url_mock.assert_called_with(url,
headers={'Accept': '*/*', 'Authorization': 'Bearer token-123'},
http_agent='python-manifold-ansible-1.0.0')
@patch('ansible.plugins.lookup.manifold.open_url')
def test_get_credentials(self, open_url_mock):
url = 'https://api.marketplace.manifold.co/v1/credentials?resource_id=rid-1'
mock_fixture(open_url_mock, fixture=url)
client = ManifoldApiClient('token-123')
self.assertListEqual(API_FIXTURES[url], client.get_credentials(resource_id='rid-1'))
open_url_mock.assert_called_with(url,
headers={'Accept': '*/*', 'Authorization': 'Bearer token-123'},
http_agent='python-manifold-ansible-1.0.0')
class TestLookupModule(unittest.TestCase):
def setUp(self):
self.lookup = LookupModule()
self.lookup._load_name = "manifold"
@patch('ansible.plugins.lookup.manifold.ManifoldApiClient')
def test_get_all(self, client_mock):
expected_result = [{'RESOURCE_TOKEN_1': 'token-1',
'RESOURCE_TOKEN_2': 'token-2',
'RESOURCE_TOKEN_3': 'token-3',
'RESOURCE_TOKEN_4': 'token-4'
}]
client_mock.return_value.get_resources.return_value = API_FIXTURES['https://api.marketplace.manifold.co/v1/resources']
client_mock.return_value.get_credentials.side_effect = lambda x: API_FIXTURES['https://api.marketplace.manifold.co/v1/'
'credentials?resource_id={0}'.format(x)]
self.assertListEqual(expected_result, self.lookup.run([], api_token='token-123'))
client_mock.assert_called_with('token-123')
client_mock.return_value.get_resources.assert_called_with(team_id=None, project_id=None)
@patch('ansible.plugins.lookup.manifold.ManifoldApiClient')
def test_get_one_resource(self, client_mock):
expected_result = [{'RESOURCE_TOKEN_3': 'token-3',
'RESOURCE_TOKEN_4': 'token-4'
}]
client_mock.return_value.get_resources.return_value = API_FIXTURES['https://api.marketplace.manifold.co/v1/resources?label=resource-2']
client_mock.return_value.get_credentials.side_effect = lambda x: API_FIXTURES['https://api.marketplace.manifold.co/v1/'
'credentials?resource_id={0}'.format(x)]
self.assertListEqual(expected_result, self.lookup.run(['resource-2'], api_token='token-123'))
client_mock.return_value.get_resources.assert_called_with(team_id=None, project_id=None, label='resource-2')
@patch('ansible.plugins.lookup.manifold.ManifoldApiClient')
def test_get_two_resources(self, client_mock):
expected_result = [{'RESOURCE_TOKEN_1': 'token-1',
'RESOURCE_TOKEN_2': 'token-2',
'RESOURCE_TOKEN_3': 'token-3',
'RESOURCE_TOKEN_4': 'token-4'
}]
client_mock.return_value.get_resources.return_value = API_FIXTURES['https://api.marketplace.manifold.co/v1/resources']
client_mock.return_value.get_credentials.side_effect = lambda x: API_FIXTURES['https://api.marketplace.manifold.co/v1/'
'credentials?resource_id={0}'.format(x)]
self.assertListEqual(expected_result, self.lookup.run(['resource-1', 'resource-2'], api_token='token-123'))
client_mock.assert_called_with('token-123')
client_mock.return_value.get_resources.assert_called_with(team_id=None, project_id=None)
@patch('ansible.plugins.lookup.manifold.display')
@patch('ansible.plugins.lookup.manifold.ManifoldApiClient')
def test_get_resources_with_same_credential_names(self, client_mock, display_mock):
expected_result = [{'RESOURCE_TOKEN_1': 'token-5',
'RESOURCE_TOKEN_2': 'token-6'
}]
client_mock.return_value.get_resources.return_value = API_FIXTURES['https://api.marketplace.manifold.co/v1/resources?project_id=pid-2']
client_mock.return_value.get_projects.return_value = API_FIXTURES['https://api.marketplace.manifold.co/v1/projects?label=project-2']
client_mock.return_value.get_credentials.side_effect = lambda x: API_FIXTURES['https://api.marketplace.manifold.co/v1/'
'credentials?resource_id={0}'.format(x)]
self.assertListEqual(expected_result, self.lookup.run([], api_token='token-123', project='project-2'))
client_mock.assert_called_with('token-123')
display_mock.warning.assert_has_calls([
call("'RESOURCE_TOKEN_1' with label 'resource-1' was replaced by resource data with label 'resource-3'"),
call("'RESOURCE_TOKEN_2' with label 'resource-1' was replaced by resource data with label 'resource-3'")],
any_order=True
)
client_mock.return_value.get_resources.assert_called_with(team_id=None, project_id='pid-2')
@patch('ansible.plugins.lookup.manifold.ManifoldApiClient')
def test_filter_by_team(self, client_mock):
expected_result = [{'RESOURCE_TOKEN_1': 'token-1',
'RESOURCE_TOKEN_2': 'token-2'
}]
client_mock.return_value.get_resources.return_value = API_FIXTURES['https://api.marketplace.manifold.co/v1/resources?team_id=tid-1']
client_mock.return_value.get_teams.return_value = API_FIXTURES['https://api.identity.manifold.co/v1/teams'][0:1]
client_mock.return_value.get_credentials.side_effect = lambda x: API_FIXTURES['https://api.marketplace.manifold.co/v1/'
'credentials?resource_id={0}'.format(x)]
self.assertListEqual(expected_result, self.lookup.run([], api_token='token-123', team='team-1'))
client_mock.assert_called_with('token-123')
client_mock.return_value.get_resources.assert_called_with(team_id='tid-1', project_id=None)
@patch('ansible.plugins.lookup.manifold.ManifoldApiClient')
def test_filter_by_project(self, client_mock):
expected_result = [{'RESOURCE_TOKEN_3': 'token-3',
'RESOURCE_TOKEN_4': 'token-4'
}]
client_mock.return_value.get_resources.return_value = API_FIXTURES['https://api.marketplace.manifold.co/v1/resources?project_id=pid-1']
client_mock.return_value.get_projects.return_value = API_FIXTURES['https://api.marketplace.manifold.co/v1/projects'][0:1]
client_mock.return_value.get_credentials.side_effect = lambda x: API_FIXTURES['https://api.marketplace.manifold.co/v1/'
'credentials?resource_id={0}'.format(x)]
self.assertListEqual(expected_result, self.lookup.run([], api_token='token-123', project='project-1'))
client_mock.assert_called_with('token-123')
client_mock.return_value.get_resources.assert_called_with(team_id=None, project_id='pid-1')
@patch('ansible.plugins.lookup.manifold.ManifoldApiClient')
def test_filter_by_team_and_project(self, client_mock):
expected_result = [{'RESOURCE_TOKEN_1': 'token-1',
'RESOURCE_TOKEN_2': 'token-2'
}]
client_mock.return_value.get_resources.return_value = API_FIXTURES['https://api.marketplace.manifold.co/v1/resources?team_id=tid-1&project_id=pid-1']
client_mock.return_value.get_teams.return_value = API_FIXTURES['https://api.identity.manifold.co/v1/teams'][0:1]
client_mock.return_value.get_projects.return_value = API_FIXTURES['https://api.marketplace.manifold.co/v1/projects'][0:1]
client_mock.return_value.get_credentials.side_effect = lambda x: API_FIXTURES['https://api.marketplace.manifold.co/v1/'
'credentials?resource_id={0}'.format(x)]
self.assertListEqual(expected_result, self.lookup.run([], api_token='token-123', project='project-1'))
client_mock.assert_called_with('token-123')
client_mock.return_value.get_resources.assert_called_with(team_id=None, project_id='pid-1')
@patch('ansible.plugins.lookup.manifold.ManifoldApiClient')
def test_raise_team_doesnt_exist(self, client_mock):
client_mock.return_value.get_teams.return_value = []
with self.assertRaises(AnsibleError) as context:
self.lookup.run([], api_token='token-123', team='no-team')
self.assertEqual("Team 'no-team' does not exist",
str(context.exception))
@patch('ansible.plugins.lookup.manifold.ManifoldApiClient')
def test_raise_project_doesnt_exist(self, client_mock):
client_mock.return_value.get_projects.return_value = []
with self.assertRaises(AnsibleError) as context:
self.lookup.run([], api_token='token-123', project='no-project')
self.assertEqual("Project 'no-project' does not exist",
str(context.exception))
@patch('ansible.plugins.lookup.manifold.ManifoldApiClient')
def test_raise_resource_doesnt_exist(self, client_mock):
client_mock.return_value.get_resources.return_value = API_FIXTURES['https://api.marketplace.manifold.co/v1/resources']
with self.assertRaises(AnsibleError) as context:
self.lookup.run(['resource-1', 'no-resource-1', 'no-resource-2'], api_token='token-123')
self.assertEqual("Resource(s) no-resource-1, no-resource-2 do not exist",
str(context.exception))
@patch('ansible.plugins.lookup.manifold.ManifoldApiClient')
def test_catch_api_error(self, client_mock):
client_mock.side_effect = ApiError('Generic error')
with self.assertRaises(AnsibleError) as context:
self.lookup.run([], api_token='token-123')
self.assertEqual("API Error: Generic error",
str(context.exception))
@patch('ansible.plugins.lookup.manifold.ManifoldApiClient')
def test_catch_unhandled_exception(self, client_mock):
client_mock.side_effect = Exception('Unknown error')
with self.assertRaises(AnsibleError) as context:
self.lookup.run([], api_token='token-123')
self.assertTrue('Exception: Unknown error' in str(context.exception))
@patch('ansible.plugins.lookup.manifold.os.getenv')
@patch('ansible.plugins.lookup.manifold.ManifoldApiClient')
def test_falls_back_to_env_var(self, client_mock, getenv_mock):
getenv_mock.return_value = 'token-321'
client_mock.return_value.get_resources.return_value = []
client_mock.return_value.get_credentials.return_value = []
self.lookup.run([])
getenv_mock.assert_called_with('MANIFOLD_API_TOKEN')
client_mock.assert_called_with('token-321')
@patch('ansible.plugins.lookup.manifold.os.getenv')
@patch('ansible.plugins.lookup.manifold.ManifoldApiClient')
def test_falls_raises_on_no_token(self, client_mock, getenv_mock):
getenv_mock.return_value = None
client_mock.return_value.get_resources.return_value = []
client_mock.return_value.get_credentials.return_value = []
with self.assertRaises(AnsibleError) as context:
self.lookup.run([])
self.assertEqual('API token is required. Please set api_token parameter or MANIFOLD_API_TOKEN env var',
str(context.exception))
| gpl-3.0 | 6,783,216,871,838,835,000 | 49.610075 | 157 | 0.56265 | false |
schreiberx/sweet | benchmarks_sphere/galewsky_varying_varying_dt_and_tsm/benchmark_create_jobs.py | 1 | 6312 | #! /usr/bin/env python3
import os
import sys
import math
import numpy as np
from itertools import product
from mule_local.JobGeneration import *
from mule.JobParallelization import *
from mule.JobParallelizationDimOptions import *
p = JobGeneration()
verbose = False
#verbose = True
##################################################
##################################################
p.compile.mode = 'release'
#p.compile.sweet_mpi = 'disable'
p.runtime.space_res_spectral = 128
p.parallelization.core_oversubscription = False
p.parallelization.core_affinity = 'compact'
p.compile.threading = 'omp'
p.compile.rexi_thread_parallel_sum = 'disable'
gen_reference_solution = True
p.runtime.benchmark_name = "galewsky"
p.runtime.max_simulation_time = 60*60*24*8 # 8 days
p.runtime.output_timestep_size = 60*60*6 # Generate output every 6 hours
p.runtime.output_file_mode = 'bin'
params_timestep_sizes_explicit_ = [15*(2**i) for i in range(0, 4)]
params_timestep_sizes_implicit_ = [15*(2**i) for i in range(2, 6)]
params_timestep_sizes_sl_ = [15*(2**i) for i in range(2, 6)]
params_timestep_size_reference = 30.0
# Parallelization
params_pspace_num_cores_per_rank = [p.platform_resources.num_cores_per_socket]
params_pspace_num_threads_per_rank = [p.platform_resources.num_cores_per_socket]
unique_id_filter = []
unique_id_filter.append('compile')
#unique_id_filter.append('runtime.galewsky_params')
unique_id_filter.append('runtime.rexi')
unique_id_filter.append('runtime.benchmark')
unique_id_filter.append('runtime.max_simulation_time')
p.unique_id_filter = unique_id_filter
p.runtime.output_timestep_size = p.runtime.max_simulation_time
##########################################################################
##########################################################################
##########################################################################
def estimateWallclockTime(p):
return 12*60*60
p.compile.lapack = 'enable'
p.compile.mkl = 'disable'
p.compilecommand_in_jobscript = False
#
# Run simulation on plane or sphere
#
p.compile.program = 'swe_sphere'
p.compile.plane_spectral_space = 'disable'
p.compile.plane_spectral_dealiasing = 'disable'
p.compile.sphere_spectral_space = 'enable'
p.compile.sphere_spectral_dealiasing = 'enable'
p.compile.benchmark_timings = 'enable'
p.compile.quadmath = 'disable'
#
# Activate Fortran source
#
p.compile.fortran_source = 'enable'
# Verbosity mode
p.runtime.verbosity = 0
#
# Mode and Physical resolution
#
p.runtime.space_res_spectral = 128
p.runtime.space_res_physical = -1
#
# Compute error
#
p.runtime.compute_error = 0
#
# Preallocate the REXI matrices
#
#p.runtime.rexi_sphere_preallocation = 1
# Leave instability checks activated
p.runtime.instability_checks = 1
# Don't activate them for wallclock time studies since they are pretty costly!!!
#p.runtime.instability_checks = 0
p.runtime.viscosity = 0.0
p.runtime.sphere_extended_modes = 0
#
# allow including this file
#
if __name__ == "__main__":
ts_methods = [
['ln_erk', 4, 4, 0], # reference solution
###########
# Runge-Kutta
###########
['ln_erk', 2, 2, 0],
###########
# CN
###########
['lg_irk_lc_n_erk_ver0', 2, 2, 0],
['lg_irk_lc_n_erk_ver1', 2, 2, 0],
['l_irk_na_sl_nd_settls_ver1', 2, 2, 0],
['l_irk_na_sl_nd_settls_ver2', 2, 2, 0],
['lg_irk_na_sl_lc_nd_settls_ver1', 2, 2, 0],
['lg_irk_na_sl_lc_nd_settls_ver2', 2, 2, 0],
]
#
# Reference solution
#
p.reference_job_unique_id = None
if gen_reference_solution:
tsm = ts_methods[0]
p.runtime.timestep_size = params_timestep_size_reference
p.runtime.timestepping_method = tsm[0]
p.runtime.timestepping_order = tsm[1]
p.runtime.timestepping_order2 = tsm[2]
pspace = JobParallelizationDimOptions('space')
pspace.num_cores_per_rank = 1
pspace.num_threads_per_rank = params_pspace_num_cores_per_rank[-1]
pspace.num_ranks = 1
# Setup parallelization
p.setup_parallelization([pspace])
if verbose:
pspace.print()
p.parallelization.print()
p.parallelization.max_wallclock_seconds = estimateWallclockTime(p)
p.gen_jobscript_directory('job_benchref_'+p.getUniqueID())
# Use this as a reference job
p.reference_job_unique_id = p.job_unique_id
for tsm in ts_methods[1:]:
p.runtime.timestepping_method = tsm[0]
p.runtime.timestepping_order = tsm[1]
p.runtime.timestepping_order2 = tsm[2]
if len(tsm) > 4:
s = tsm[4]
p.runtime.load_from_dict(tsm[4])
tsm_name = tsm[0]
if 'ln_erk' in tsm_name:
params_timestep_sizes = params_timestep_sizes_explicit_
elif 'l_erk' in tsm_name or 'lg_erk' in tsm_name:
params_timestep_sizes = params_timestep_sizes_explicit_
elif 'l_irk' in tsm_name or 'lg_irk' in tsm_name:
params_timestep_sizes = params_timestep_sizes_implicit_
elif '_sl' in tsm_name:
params_timestep_sizes = params_timestep_sizes_sl_
else:
print("Unable to identify time stepping method "+tsm_name)
sys.exit(1)
for (
pspace_num_cores_per_rank,
pspace_num_threads_per_rank,
p.runtime.timestep_size
) in product(
params_pspace_num_cores_per_rank,
params_pspace_num_threads_per_rank,
params_timestep_sizes
):
pspace = JobParallelizationDimOptions('space')
pspace.num_cores_per_rank = pspace_num_cores_per_rank
pspace.num_threads_per_rank = pspace_num_threads_per_rank
pspace.num_ranks = 1
pspace.setup()
p.setup_parallelization([pspace])
if verbose:
pspace.print()
p.parallelization.print()
p.parallelization.max_wallclock_seconds = estimateWallclockTime(p)
p.gen_jobscript_directory('job_bench_'+p.getUniqueID())
p.write_compilecommands()
| mit | 3,381,570,057,641,261,000 | 25.3 | 80 | 0.59379 | false |
lokirius/python-for-android | python-build/python-libs/gdata/src/gdata/youtube/service.py | 141 | 57914 | #!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""YouTubeService extends GDataService to streamline YouTube operations.
YouTubeService: Provides methods to perform CRUD operations on YouTube feeds.
Extends GDataService.
"""
__author__ = ('[email protected] (Stephanie Liu), '
'[email protected] (Jochen Hartmann)')
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
try:
import cElementTree as ElementTree
except ImportError:
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
import os
import atom
import gdata
import gdata.service
import gdata.youtube
YOUTUBE_SERVER = 'gdata.youtube.com'
YOUTUBE_SERVICE = 'youtube'
YOUTUBE_CLIENTLOGIN_AUTHENTICATION_URL = 'https://www.google.com/youtube/accounts/ClientLogin'
YOUTUBE_SUPPORTED_UPLOAD_TYPES = ('mov', 'avi', 'wmv', 'mpg', 'quicktime',
'flv')
YOUTUBE_QUERY_VALID_TIME_PARAMETERS = ('today', 'this_week', 'this_month',
'all_time')
YOUTUBE_QUERY_VALID_ORDERBY_PARAMETERS = ('published', 'viewCount', 'rating',
'relevance')
YOUTUBE_QUERY_VALID_RACY_PARAMETERS = ('include', 'exclude')
YOUTUBE_QUERY_VALID_FORMAT_PARAMETERS = ('1', '5', '6')
YOUTUBE_STANDARDFEEDS = ('most_recent', 'recently_featured',
'top_rated', 'most_viewed','watch_on_mobile')
YOUTUBE_UPLOAD_URI = 'http://uploads.gdata.youtube.com/feeds/api/users'
YOUTUBE_UPLOAD_TOKEN_URI = 'http://gdata.youtube.com/action/GetUploadToken'
YOUTUBE_VIDEO_URI = 'http://gdata.youtube.com/feeds/api/videos'
YOUTUBE_USER_FEED_URI = 'http://gdata.youtube.com/feeds/api/users'
YOUTUBE_PLAYLIST_FEED_URI = 'http://gdata.youtube.com/feeds/api/playlists'
YOUTUBE_STANDARD_FEEDS = 'http://gdata.youtube.com/feeds/api/standardfeeds'
YOUTUBE_STANDARD_TOP_RATED_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS, 'top_rated')
YOUTUBE_STANDARD_MOST_VIEWED_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS,
'most_viewed')
YOUTUBE_STANDARD_RECENTLY_FEATURED_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS,
'recently_featured')
YOUTUBE_STANDARD_WATCH_ON_MOBILE_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS,
'watch_on_mobile')
YOUTUBE_STANDARD_TOP_FAVORITES_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS,
'top_favorites')
YOUTUBE_STANDARD_MOST_RECENT_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS,
'most_recent')
YOUTUBE_STANDARD_MOST_DISCUSSED_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS,
'most_discussed')
YOUTUBE_STANDARD_MOST_LINKED_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS,
'most_linked')
YOUTUBE_STANDARD_MOST_RESPONDED_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS,
'most_responded')
YOUTUBE_SCHEMA = 'http://gdata.youtube.com/schemas'
YOUTUBE_RATING_LINK_REL = '%s#video.ratings' % YOUTUBE_SCHEMA
YOUTUBE_COMPLAINT_CATEGORY_SCHEME = '%s/%s' % (YOUTUBE_SCHEMA,
'complaint-reasons.cat')
YOUTUBE_SUBSCRIPTION_CATEGORY_SCHEME = '%s/%s' % (YOUTUBE_SCHEMA,
'subscriptiontypes.cat')
YOUTUBE_COMPLAINT_CATEGORY_TERMS = ('PORN', 'VIOLENCE', 'HATE', 'DANGEROUS',
'RIGHTS', 'SPAM')
YOUTUBE_CONTACT_STATUS = ('accepted', 'rejected')
YOUTUBE_CONTACT_CATEGORY = ('Friends', 'Family')
UNKOWN_ERROR = 1000
YOUTUBE_BAD_REQUEST = 400
YOUTUBE_CONFLICT = 409
YOUTUBE_INTERNAL_SERVER_ERROR = 500
YOUTUBE_INVALID_ARGUMENT = 601
YOUTUBE_INVALID_CONTENT_TYPE = 602
YOUTUBE_NOT_A_VIDEO = 603
YOUTUBE_INVALID_KIND = 604
class Error(Exception):
"""Base class for errors within the YouTube service."""
pass
class RequestError(Error):
"""Error class that is thrown in response to an invalid HTTP Request."""
pass
class YouTubeError(Error):
"""YouTube service specific error class."""
pass
class YouTubeService(gdata.service.GDataService):
"""Client for the YouTube service.
Performs all documented Google Data YouTube API functions, such as inserting,
updating and deleting videos, comments, playlist, subscriptions etc.
YouTube Service requires authentication for any write, update or delete
actions.
Attributes:
email: An optional string identifying the user. Required only for
authenticated actions.
password: An optional string identifying the user's password.
source: An optional string identifying the name of your application.
server: An optional address of the YouTube API server. gdata.youtube.com
is provided as the default value.
additional_headers: An optional dictionary containing additional headers
to be passed along with each request. Use to store developer key.
client_id: An optional string identifying your application, required for
authenticated requests, along with a developer key.
developer_key: An optional string value. Register your application at
http://code.google.com/apis/youtube/dashboard to obtain a (free) key.
"""
def __init__(self, email=None, password=None, source=None,
server=YOUTUBE_SERVER, additional_headers=None, client_id=None,
developer_key=None, **kwargs):
"""Creates a client for the YouTube service.
Args:
email: string (optional) The user's email address, used for
authentication.
password: string (optional) The user's password.
source: string (optional) The name of the user's application.
server: string (optional) The name of the server to which a connection
will be opened. Default value: 'gdata.youtube.com'.
client_id: string (optional) Identifies your application, required for
authenticated requests, along with a developer key.
developer_key: string (optional) Register your application at
http://code.google.com/apis/youtube/dashboard to obtain a (free) key.
**kwargs: The other parameters to pass to gdata.service.GDataService
constructor.
"""
if developer_key and not client_id:
raise YouTubeError('You must also specify the clientId')
gdata.service.GDataService.__init__(
self, email=email, password=password, service=YOUTUBE_SERVICE,
source=source, server=server, additional_headers=additional_headers,
**kwargs)
if client_id is not None and developer_key is not None:
self.additional_headers['X-Gdata-Client'] = client_id
self.additional_headers['X-GData-Key'] = 'key=%s' % developer_key
self.auth_service_url = YOUTUBE_CLIENTLOGIN_AUTHENTICATION_URL
def GetYouTubeVideoFeed(self, uri):
"""Retrieve a YouTubeVideoFeed.
Args:
uri: A string representing the URI of the feed that is to be retrieved.
Returns:
A YouTubeVideoFeed if successfully retrieved.
"""
return self.Get(uri, converter=gdata.youtube.YouTubeVideoFeedFromString)
def GetYouTubeVideoEntry(self, uri=None, video_id=None):
"""Retrieve a YouTubeVideoEntry.
Either a uri or a video_id must be provided.
Args:
uri: An optional string representing the URI of the entry that is to
be retrieved.
video_id: An optional string representing the ID of the video.
Returns:
A YouTubeVideoFeed if successfully retrieved.
Raises:
YouTubeError: You must provide at least a uri or a video_id to the
GetYouTubeVideoEntry() method.
"""
if uri is None and video_id is None:
raise YouTubeError('You must provide at least a uri or a video_id '
'to the GetYouTubeVideoEntry() method')
elif video_id and not uri:
uri = '%s/%s' % (YOUTUBE_VIDEO_URI, video_id)
return self.Get(uri, converter=gdata.youtube.YouTubeVideoEntryFromString)
def GetYouTubeContactFeed(self, uri=None, username='default'):
"""Retrieve a YouTubeContactFeed.
Either a uri or a username must be provided.
Args:
uri: An optional string representing the URI of the contact feed that
is to be retrieved.
username: An optional string representing the username. Defaults to the
currently authenticated user.
Returns:
A YouTubeContactFeed if successfully retrieved.
Raises:
YouTubeError: You must provide at least a uri or a username to the
GetYouTubeContactFeed() method.
"""
if uri is None:
uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, username, 'contacts')
return self.Get(uri, converter=gdata.youtube.YouTubeContactFeedFromString)
def GetYouTubeContactEntry(self, uri):
"""Retrieve a YouTubeContactEntry.
Args:
uri: A string representing the URI of the contact entry that is to
be retrieved.
Returns:
A YouTubeContactEntry if successfully retrieved.
"""
return self.Get(uri, converter=gdata.youtube.YouTubeContactEntryFromString)
def GetYouTubeVideoCommentFeed(self, uri=None, video_id=None):
"""Retrieve a YouTubeVideoCommentFeed.
Either a uri or a video_id must be provided.
Args:
uri: An optional string representing the URI of the comment feed that
is to be retrieved.
video_id: An optional string representing the ID of the video for which
to retrieve the comment feed.
Returns:
A YouTubeVideoCommentFeed if successfully retrieved.
Raises:
YouTubeError: You must provide at least a uri or a video_id to the
GetYouTubeVideoCommentFeed() method.
"""
if uri is None and video_id is None:
raise YouTubeError('You must provide at least a uri or a video_id '
'to the GetYouTubeVideoCommentFeed() method')
elif video_id and not uri:
uri = '%s/%s/%s' % (YOUTUBE_VIDEO_URI, video_id, 'comments')
return self.Get(
uri, converter=gdata.youtube.YouTubeVideoCommentFeedFromString)
def GetYouTubeVideoCommentEntry(self, uri):
"""Retrieve a YouTubeVideoCommentEntry.
Args:
uri: A string representing the URI of the comment entry that is to
be retrieved.
Returns:
A YouTubeCommentEntry if successfully retrieved.
"""
return self.Get(
uri, converter=gdata.youtube.YouTubeVideoCommentEntryFromString)
def GetYouTubeUserFeed(self, uri=None, username=None):
"""Retrieve a YouTubeVideoFeed of user uploaded videos
Either a uri or a username must be provided. This will retrieve list
of videos uploaded by specified user. The uri will be of format
"http://gdata.youtube.com/feeds/api/users/{username}/uploads".
Args:
uri: An optional string representing the URI of the user feed that is
to be retrieved.
username: An optional string representing the username.
Returns:
A YouTubeUserFeed if successfully retrieved.
Raises:
YouTubeError: You must provide at least a uri or a username to the
GetYouTubeUserFeed() method.
"""
if uri is None and username is None:
raise YouTubeError('You must provide at least a uri or a username '
'to the GetYouTubeUserFeed() method')
elif username and not uri:
uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, username, 'uploads')
return self.Get(uri, converter=gdata.youtube.YouTubeUserFeedFromString)
def GetYouTubeUserEntry(self, uri=None, username=None):
"""Retrieve a YouTubeUserEntry.
Either a uri or a username must be provided.
Args:
uri: An optional string representing the URI of the user entry that is
to be retrieved.
username: An optional string representing the username.
Returns:
A YouTubeUserEntry if successfully retrieved.
Raises:
YouTubeError: You must provide at least a uri or a username to the
GetYouTubeUserEntry() method.
"""
if uri is None and username is None:
raise YouTubeError('You must provide at least a uri or a username '
'to the GetYouTubeUserEntry() method')
elif username and not uri:
uri = '%s/%s' % (YOUTUBE_USER_FEED_URI, username)
return self.Get(uri, converter=gdata.youtube.YouTubeUserEntryFromString)
def GetYouTubePlaylistFeed(self, uri=None, username='default'):
"""Retrieve a YouTubePlaylistFeed (a feed of playlists for a user).
Either a uri or a username must be provided.
Args:
uri: An optional string representing the URI of the playlist feed that
is to be retrieved.
username: An optional string representing the username. Defaults to the
currently authenticated user.
Returns:
A YouTubePlaylistFeed if successfully retrieved.
Raises:
YouTubeError: You must provide at least a uri or a username to the
GetYouTubePlaylistFeed() method.
"""
if uri is None:
uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, username, 'playlists')
return self.Get(uri, converter=gdata.youtube.YouTubePlaylistFeedFromString)
def GetYouTubePlaylistEntry(self, uri):
"""Retrieve a YouTubePlaylistEntry.
Args:
uri: A string representing the URI of the playlist feed that is to
be retrieved.
Returns:
A YouTubePlaylistEntry if successfully retrieved.
"""
return self.Get(uri, converter=gdata.youtube.YouTubePlaylistEntryFromString)
def GetYouTubePlaylistVideoFeed(self, uri=None, playlist_id=None):
"""Retrieve a YouTubePlaylistVideoFeed (a feed of videos on a playlist).
Either a uri or a playlist_id must be provided.
Args:
uri: An optional string representing the URI of the playlist video feed
that is to be retrieved.
playlist_id: An optional string representing the Id of the playlist whose
playlist video feed is to be retrieved.
Returns:
A YouTubePlaylistVideoFeed if successfully retrieved.
Raises:
YouTubeError: You must provide at least a uri or a playlist_id to the
GetYouTubePlaylistVideoFeed() method.
"""
if uri is None and playlist_id is None:
raise YouTubeError('You must provide at least a uri or a playlist_id '
'to the GetYouTubePlaylistVideoFeed() method')
elif playlist_id and not uri:
uri = '%s/%s' % (YOUTUBE_PLAYLIST_FEED_URI, playlist_id)
return self.Get(
uri, converter=gdata.youtube.YouTubePlaylistVideoFeedFromString)
def GetYouTubeVideoResponseFeed(self, uri=None, video_id=None):
"""Retrieve a YouTubeVideoResponseFeed.
Either a uri or a playlist_id must be provided.
Args:
uri: An optional string representing the URI of the video response feed
that is to be retrieved.
video_id: An optional string representing the ID of the video whose
response feed is to be retrieved.
Returns:
A YouTubeVideoResponseFeed if successfully retrieved.
Raises:
YouTubeError: You must provide at least a uri or a video_id to the
GetYouTubeVideoResponseFeed() method.
"""
if uri is None and video_id is None:
raise YouTubeError('You must provide at least a uri or a video_id '
'to the GetYouTubeVideoResponseFeed() method')
elif video_id and not uri:
uri = '%s/%s/%s' % (YOUTUBE_VIDEO_URI, video_id, 'responses')
return self.Get(
uri, converter=gdata.youtube.YouTubeVideoResponseFeedFromString)
def GetYouTubeVideoResponseEntry(self, uri):
"""Retrieve a YouTubeVideoResponseEntry.
Args:
uri: A string representing the URI of the video response entry that
is to be retrieved.
Returns:
A YouTubeVideoResponseEntry if successfully retrieved.
"""
return self.Get(
uri, converter=gdata.youtube.YouTubeVideoResponseEntryFromString)
def GetYouTubeSubscriptionFeed(self, uri=None, username='default'):
"""Retrieve a YouTubeSubscriptionFeed.
Either the uri of the feed or a username must be provided.
Args:
uri: An optional string representing the URI of the feed that is to
be retrieved.
username: An optional string representing the username whose subscription
feed is to be retrieved. Defaults to the currently authenticted user.
Returns:
A YouTubeVideoSubscriptionFeed if successfully retrieved.
"""
if uri is None:
uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, username, 'subscriptions')
return self.Get(
uri, converter=gdata.youtube.YouTubeSubscriptionFeedFromString)
def GetYouTubeSubscriptionEntry(self, uri):
"""Retrieve a YouTubeSubscriptionEntry.
Args:
uri: A string representing the URI of the entry that is to be retrieved.
Returns:
A YouTubeVideoSubscriptionEntry if successfully retrieved.
"""
return self.Get(
uri, converter=gdata.youtube.YouTubeSubscriptionEntryFromString)
def GetYouTubeRelatedVideoFeed(self, uri=None, video_id=None):
"""Retrieve a YouTubeRelatedVideoFeed.
Either a uri for the feed or a video_id is required.
Args:
uri: An optional string representing the URI of the feed that is to
be retrieved.
video_id: An optional string representing the ID of the video for which
to retrieve the related video feed.
Returns:
A YouTubeRelatedVideoFeed if successfully retrieved.
Raises:
YouTubeError: You must provide at least a uri or a video_id to the
GetYouTubeRelatedVideoFeed() method.
"""
if uri is None and video_id is None:
raise YouTubeError('You must provide at least a uri or a video_id '
'to the GetYouTubeRelatedVideoFeed() method')
elif video_id and not uri:
uri = '%s/%s/%s' % (YOUTUBE_VIDEO_URI, video_id, 'related')
return self.Get(
uri, converter=gdata.youtube.YouTubeVideoFeedFromString)
def GetTopRatedVideoFeed(self):
"""Retrieve the 'top_rated' standard video feed.
Returns:
A YouTubeVideoFeed if successfully retrieved.
"""
return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_TOP_RATED_URI)
def GetMostViewedVideoFeed(self):
"""Retrieve the 'most_viewed' standard video feed.
Returns:
A YouTubeVideoFeed if successfully retrieved.
"""
return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_MOST_VIEWED_URI)
def GetRecentlyFeaturedVideoFeed(self):
"""Retrieve the 'recently_featured' standard video feed.
Returns:
A YouTubeVideoFeed if successfully retrieved.
"""
return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_RECENTLY_FEATURED_URI)
def GetWatchOnMobileVideoFeed(self):
"""Retrieve the 'watch_on_mobile' standard video feed.
Returns:
A YouTubeVideoFeed if successfully retrieved.
"""
return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_WATCH_ON_MOBILE_URI)
def GetTopFavoritesVideoFeed(self):
"""Retrieve the 'top_favorites' standard video feed.
Returns:
A YouTubeVideoFeed if successfully retrieved.
"""
return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_TOP_FAVORITES_URI)
def GetMostRecentVideoFeed(self):
"""Retrieve the 'most_recent' standard video feed.
Returns:
A YouTubeVideoFeed if successfully retrieved.
"""
return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_MOST_RECENT_URI)
def GetMostDiscussedVideoFeed(self):
"""Retrieve the 'most_discussed' standard video feed.
Returns:
A YouTubeVideoFeed if successfully retrieved.
"""
return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_MOST_DISCUSSED_URI)
def GetMostLinkedVideoFeed(self):
"""Retrieve the 'most_linked' standard video feed.
Returns:
A YouTubeVideoFeed if successfully retrieved.
"""
return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_MOST_LINKED_URI)
def GetMostRespondedVideoFeed(self):
"""Retrieve the 'most_responded' standard video feed.
Returns:
A YouTubeVideoFeed if successfully retrieved.
"""
return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_MOST_RESPONDED_URI)
def GetUserFavoritesFeed(self, username='default'):
"""Retrieve the favorites feed for a given user.
Args:
username: An optional string representing the username whose favorites
feed is to be retrieved. Defaults to the currently authenticated user.
Returns:
A YouTubeVideoFeed if successfully retrieved.
"""
favorites_feed_uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, username,
'favorites')
return self.GetYouTubeVideoFeed(favorites_feed_uri)
def InsertVideoEntry(self, video_entry, filename_or_handle,
youtube_username='default',
content_type='video/quicktime'):
"""Upload a new video to YouTube using the direct upload mechanism.
Needs authentication.
Args:
video_entry: The YouTubeVideoEntry to upload.
filename_or_handle: A file-like object or file name where the video
will be read from.
youtube_username: An optional string representing the username into whose
account this video is to be uploaded to. Defaults to the currently
authenticated user.
content_type: An optional string representing internet media type
(a.k.a. mime type) of the media object. Currently the YouTube API
supports these types:
o video/mpeg
o video/quicktime
o video/x-msvideo
o video/mp4
o video/x-flv
Returns:
The newly created YouTubeVideoEntry if successful.
Raises:
AssertionError: video_entry must be a gdata.youtube.VideoEntry instance.
YouTubeError: An error occurred trying to read the video file provided.
gdata.service.RequestError: An error occurred trying to upload the video
to the API server.
"""
# We need to perform a series of checks on the video_entry and on the
# file that we plan to upload, such as checking whether we have a valid
# video_entry and that the file is the correct type and readable, prior
# to performing the actual POST request.
try:
assert(isinstance(video_entry, gdata.youtube.YouTubeVideoEntry))
except AssertionError:
raise YouTubeError({'status':YOUTUBE_INVALID_ARGUMENT,
'body':'`video_entry` must be a gdata.youtube.VideoEntry instance',
'reason':'Found %s, not VideoEntry' % type(video_entry)
})
majtype, mintype = content_type.split('/')
try:
assert(mintype in YOUTUBE_SUPPORTED_UPLOAD_TYPES)
except (ValueError, AssertionError):
raise YouTubeError({'status':YOUTUBE_INVALID_CONTENT_TYPE,
'body':'This is not a valid content type: %s' % content_type,
'reason':'Accepted content types: %s' %
['video/%s' % (t) for t in YOUTUBE_SUPPORTED_UPLOAD_TYPES]})
if (isinstance(filename_or_handle, (str, unicode))
and os.path.exists(filename_or_handle)):
mediasource = gdata.MediaSource()
mediasource.setFile(filename_or_handle, content_type)
elif hasattr(filename_or_handle, 'read'):
import StringIO
if hasattr(filename_or_handle, 'seek'):
filename_or_handle.seek(0)
file_handle = StringIO.StringIO(filename_or_handle.read())
name = 'video'
if hasattr(filename_or_handle, 'name'):
name = filename_or_handle.name
mediasource = gdata.MediaSource(file_handle, content_type,
content_length=file_handle.len, file_name=name)
else:
raise YouTubeError({'status':YOUTUBE_INVALID_ARGUMENT, 'body':
'`filename_or_handle` must be a path name or a file-like object',
'reason': ('Found %s, not path name or object '
'with a .read() method' % type(filename_or_handle))})
upload_uri = '%s/%s/%s' % (YOUTUBE_UPLOAD_URI, youtube_username,
'uploads')
self.additional_headers['Slug'] = mediasource.file_name
# Using a nested try statement to retain Python 2.4 compatibility
try:
try:
return self.Post(video_entry, uri=upload_uri, media_source=mediasource,
converter=gdata.youtube.YouTubeVideoEntryFromString)
except gdata.service.RequestError, e:
raise YouTubeError(e.args[0])
finally:
del(self.additional_headers['Slug'])
def CheckUploadStatus(self, video_entry=None, video_id=None):
"""Check upload status on a recently uploaded video entry.
Needs authentication. Either video_entry or video_id must be provided.
Args:
video_entry: An optional YouTubeVideoEntry whose upload status to check
video_id: An optional string representing the ID of the uploaded video
whose status is to be checked.
Returns:
A tuple containing (video_upload_state, detailed_message) or None if
no status information is found.
Raises:
YouTubeError: You must provide at least a video_entry or a video_id to the
CheckUploadStatus() method.
"""
if video_entry is None and video_id is None:
raise YouTubeError('You must provide at least a uri or a video_id '
'to the CheckUploadStatus() method')
elif video_id and not video_entry:
video_entry = self.GetYouTubeVideoEntry(video_id=video_id)
control = video_entry.control
if control is not None:
draft = control.draft
if draft is not None:
if draft.text == 'yes':
yt_state = control.extension_elements[0]
if yt_state is not None:
state_value = yt_state.attributes['name']
message = ''
if yt_state.text is not None:
message = yt_state.text
return (state_value, message)
def GetFormUploadToken(self, video_entry, uri=YOUTUBE_UPLOAD_TOKEN_URI):
"""Receives a YouTube Token and a YouTube PostUrl from a YouTubeVideoEntry.
Needs authentication.
Args:
video_entry: The YouTubeVideoEntry to upload (meta-data only).
uri: An optional string representing the URI from where to fetch the
token information. Defaults to the YOUTUBE_UPLOADTOKEN_URI.
Returns:
A tuple containing the URL to which to post your video file, along
with the youtube token that must be included with your upload in the
form of: (post_url, youtube_token).
"""
try:
response = self.Post(video_entry, uri)
except gdata.service.RequestError, e:
raise YouTubeError(e.args[0])
tree = ElementTree.fromstring(response)
for child in tree:
if child.tag == 'url':
post_url = child.text
elif child.tag == 'token':
youtube_token = child.text
return (post_url, youtube_token)
def UpdateVideoEntry(self, video_entry):
"""Updates a video entry's meta-data.
Needs authentication.
Args:
video_entry: The YouTubeVideoEntry to update, containing updated
meta-data.
Returns:
An updated YouTubeVideoEntry on success or None.
"""
for link in video_entry.link:
if link.rel == 'edit':
edit_uri = link.href
return self.Put(video_entry, uri=edit_uri,
converter=gdata.youtube.YouTubeVideoEntryFromString)
def DeleteVideoEntry(self, video_entry):
"""Deletes a video entry.
Needs authentication.
Args:
video_entry: The YouTubeVideoEntry to be deleted.
Returns:
True if entry was deleted successfully.
"""
for link in video_entry.link:
if link.rel == 'edit':
edit_uri = link.href
return self.Delete(edit_uri)
def AddRating(self, rating_value, video_entry):
"""Add a rating to a video entry.
Needs authentication.
Args:
rating_value: The integer value for the rating (between 1 and 5).
video_entry: The YouTubeVideoEntry to be rated.
Returns:
True if the rating was added successfully.
Raises:
YouTubeError: rating_value must be between 1 and 5 in AddRating().
"""
if rating_value < 1 or rating_value > 5:
raise YouTubeError('rating_value must be between 1 and 5 in AddRating()')
entry = gdata.GDataEntry()
rating = gdata.youtube.Rating(min='1', max='5')
rating.extension_attributes['name'] = 'value'
rating.extension_attributes['value'] = str(rating_value)
entry.extension_elements.append(rating)
for link in video_entry.link:
if link.rel == YOUTUBE_RATING_LINK_REL:
rating_uri = link.href
return self.Post(entry, uri=rating_uri)
def AddComment(self, comment_text, video_entry):
"""Add a comment to a video entry.
Needs authentication. Note that each comment that is posted must contain
the video entry that it is to be posted to.
Args:
comment_text: A string representing the text of the comment.
video_entry: The YouTubeVideoEntry to be commented on.
Returns:
True if the comment was added successfully.
"""
content = atom.Content(text=comment_text)
comment_entry = gdata.youtube.YouTubeVideoCommentEntry(content=content)
comment_post_uri = video_entry.comments.feed_link[0].href
return self.Post(comment_entry, uri=comment_post_uri)
def AddVideoResponse(self, video_id_to_respond_to, video_response):
"""Add a video response.
Needs authentication.
Args:
video_id_to_respond_to: A string representing the ID of the video to be
responded to.
video_response: YouTubeVideoEntry to be posted as a response.
Returns:
True if video response was posted successfully.
"""
post_uri = '%s/%s/%s' % (YOUTUBE_VIDEO_URI, video_id_to_respond_to,
'responses')
return self.Post(video_response, uri=post_uri)
def DeleteVideoResponse(self, video_id, response_video_id):
"""Delete a video response.
Needs authentication.
Args:
video_id: A string representing the ID of video that contains the
response.
response_video_id: A string representing the ID of the video that was
posted as a response.
Returns:
True if video response was deleted succcessfully.
"""
delete_uri = '%s/%s/%s/%s' % (YOUTUBE_VIDEO_URI, video_id, 'responses',
response_video_id)
return self.Delete(delete_uri)
def AddComplaint(self, complaint_text, complaint_term, video_id):
"""Add a complaint for a particular video entry.
Needs authentication.
Args:
complaint_text: A string representing the complaint text.
complaint_term: A string representing the complaint category term.
video_id: A string representing the ID of YouTubeVideoEntry to
complain about.
Returns:
True if posted successfully.
Raises:
YouTubeError: Your complaint_term is not valid.
"""
if complaint_term not in YOUTUBE_COMPLAINT_CATEGORY_TERMS:
raise YouTubeError('Your complaint_term is not valid')
content = atom.Content(text=complaint_text)
category = atom.Category(term=complaint_term,
scheme=YOUTUBE_COMPLAINT_CATEGORY_SCHEME)
complaint_entry = gdata.GDataEntry(content=content, category=[category])
post_uri = '%s/%s/%s' % (YOUTUBE_VIDEO_URI, video_id, 'complaints')
return self.Post(complaint_entry, post_uri)
def AddVideoEntryToFavorites(self, video_entry, username='default'):
"""Add a video entry to a users favorite feed.
Needs authentication.
Args:
video_entry: The YouTubeVideoEntry to add.
username: An optional string representing the username to whose favorite
feed you wish to add the entry. Defaults to the currently
authenticated user.
Returns:
The posted YouTubeVideoEntry if successfully posted.
"""
post_uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, username, 'favorites')
return self.Post(video_entry, post_uri,
converter=gdata.youtube.YouTubeVideoEntryFromString)
def DeleteVideoEntryFromFavorites(self, video_id, username='default'):
"""Delete a video entry from the users favorite feed.
Needs authentication.
Args:
video_id: A string representing the ID of the video that is to be removed
username: An optional string representing the username of the user's
favorite feed. Defaults to the currently authenticated user.
Returns:
True if entry was successfully deleted.
"""
edit_link = '%s/%s/%s/%s' % (YOUTUBE_USER_FEED_URI, username, 'favorites',
video_id)
return self.Delete(edit_link)
def AddPlaylist(self, playlist_title, playlist_description,
playlist_private=None):
"""Add a new playlist to the currently authenticated users account.
Needs authentication.
Args:
playlist_title: A string representing the title for the new playlist.
playlist_description: A string representing the description of the
playlist.
playlist_private: An optional boolean, set to True if the playlist is
to be private.
Returns:
The YouTubePlaylistEntry if successfully posted.
"""
playlist_entry = gdata.youtube.YouTubePlaylistEntry(
title=atom.Title(text=playlist_title),
description=gdata.youtube.Description(text=playlist_description))
if playlist_private:
playlist_entry.private = gdata.youtube.Private()
playlist_post_uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, 'default',
'playlists')
return self.Post(playlist_entry, playlist_post_uri,
converter=gdata.youtube.YouTubePlaylistEntryFromString)
def UpdatePlaylist(self, playlist_id, new_playlist_title,
new_playlist_description, playlist_private=None,
username='default'):
"""Update a playlist with new meta-data.
Needs authentication.
Args:
playlist_id: A string representing the ID of the playlist to be updated.
new_playlist_title: A string representing a new title for the playlist.
new_playlist_description: A string representing a new description for the
playlist.
playlist_private: An optional boolean, set to True if the playlist is
to be private.
username: An optional string representing the username whose playlist is
to be updated. Defaults to the currently authenticated user.
Returns:
A YouTubePlaylistEntry if the update was successful.
"""
updated_playlist = gdata.youtube.YouTubePlaylistEntry(
title=atom.Title(text=new_playlist_title),
description=gdata.youtube.Description(text=new_playlist_description))
if playlist_private:
updated_playlist.private = gdata.youtube.Private()
playlist_put_uri = '%s/%s/playlists/%s' % (YOUTUBE_USER_FEED_URI, username,
playlist_id)
return self.Put(updated_playlist, playlist_put_uri,
converter=gdata.youtube.YouTubePlaylistEntryFromString)
def DeletePlaylist(self, playlist_uri):
"""Delete a playlist from the currently authenticated users playlists.
Needs authentication.
Args:
playlist_uri: A string representing the URI of the playlist that is
to be deleted.
Returns:
True if successfully deleted.
"""
return self.Delete(playlist_uri)
def AddPlaylistVideoEntryToPlaylist(
self, playlist_uri, video_id, custom_video_title=None,
custom_video_description=None):
"""Add a video entry to a playlist, optionally providing a custom title
and description.
Needs authentication.
Args:
playlist_uri: A string representing the URI of the playlist to which this
video entry is to be added.
video_id: A string representing the ID of the video entry to add.
custom_video_title: An optional string representing a custom title for
the video (only shown on the playlist).
custom_video_description: An optional string representing a custom
description for the video (only shown on the playlist).
Returns:
A YouTubePlaylistVideoEntry if successfully posted.
"""
playlist_video_entry = gdata.youtube.YouTubePlaylistVideoEntry(
atom_id=atom.Id(text=video_id))
if custom_video_title:
playlist_video_entry.title = atom.Title(text=custom_video_title)
if custom_video_description:
playlist_video_entry.description = gdata.youtube.Description(
text=custom_video_description)
return self.Post(playlist_video_entry, playlist_uri,
converter=gdata.youtube.YouTubePlaylistVideoEntryFromString)
def UpdatePlaylistVideoEntryMetaData(
self, playlist_uri, playlist_entry_id, new_video_title,
new_video_description, new_video_position):
"""Update the meta data for a YouTubePlaylistVideoEntry.
Needs authentication.
Args:
playlist_uri: A string representing the URI of the playlist that contains
the entry to be updated.
playlist_entry_id: A string representing the ID of the entry to be
updated.
new_video_title: A string representing the new title for the video entry.
new_video_description: A string representing the new description for
the video entry.
new_video_position: An integer representing the new position on the
playlist for the video.
Returns:
A YouTubePlaylistVideoEntry if the update was successful.
"""
playlist_video_entry = gdata.youtube.YouTubePlaylistVideoEntry(
title=atom.Title(text=new_video_title),
description=gdata.youtube.Description(text=new_video_description),
position=gdata.youtube.Position(text=str(new_video_position)))
playlist_put_uri = playlist_uri + '/' + playlist_entry_id
return self.Put(playlist_video_entry, playlist_put_uri,
converter=gdata.youtube.YouTubePlaylistVideoEntryFromString)
def DeletePlaylistVideoEntry(self, playlist_uri, playlist_video_entry_id):
"""Delete a playlist video entry from a playlist.
Needs authentication.
Args:
playlist_uri: A URI representing the playlist from which the playlist
video entry is to be removed from.
playlist_video_entry_id: A string representing id of the playlist video
entry that is to be removed.
Returns:
True if entry was successfully deleted.
"""
delete_uri = '%s/%s' % (playlist_uri, playlist_video_entry_id)
return self.Delete(delete_uri)
def AddSubscriptionToChannel(self, username_to_subscribe_to,
my_username = 'default'):
"""Add a new channel subscription to the currently authenticated users
account.
Needs authentication.
Args:
username_to_subscribe_to: A string representing the username of the
channel to which we want to subscribe to.
my_username: An optional string representing the name of the user which
we want to subscribe. Defaults to currently authenticated user.
Returns:
A new YouTubeSubscriptionEntry if successfully posted.
"""
subscription_category = atom.Category(
scheme=YOUTUBE_SUBSCRIPTION_CATEGORY_SCHEME,
term='channel')
subscription_username = gdata.youtube.Username(
text=username_to_subscribe_to)
subscription_entry = gdata.youtube.YouTubeSubscriptionEntry(
category=subscription_category,
username=subscription_username)
post_uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, my_username,
'subscriptions')
return self.Post(subscription_entry, post_uri,
converter=gdata.youtube.YouTubeSubscriptionEntryFromString)
def AddSubscriptionToFavorites(self, username, my_username = 'default'):
"""Add a new subscription to a users favorites to the currently
authenticated user's account.
Needs authentication
Args:
username: A string representing the username of the user's favorite feed
to subscribe to.
my_username: An optional string representing the username of the user
that is to be subscribed. Defaults to currently authenticated user.
Returns:
A new YouTubeSubscriptionEntry if successful.
"""
subscription_category = atom.Category(
scheme=YOUTUBE_SUBSCRIPTION_CATEGORY_SCHEME,
term='favorites')
subscription_username = gdata.youtube.Username(text=username)
subscription_entry = gdata.youtube.YouTubeSubscriptionEntry(
category=subscription_category,
username=subscription_username)
post_uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, my_username,
'subscriptions')
return self.Post(subscription_entry, post_uri,
converter=gdata.youtube.YouTubeSubscriptionEntryFromString)
def AddSubscriptionToQuery(self, query, my_username = 'default'):
"""Add a new subscription to a specific keyword query to the currently
authenticated user's account.
Needs authentication
Args:
query: A string representing the keyword query to subscribe to.
my_username: An optional string representing the username of the user
that is to be subscribed. Defaults to currently authenticated user.
Returns:
A new YouTubeSubscriptionEntry if successful.
"""
subscription_category = atom.Category(
scheme=YOUTUBE_SUBSCRIPTION_CATEGORY_SCHEME,
term='query')
subscription_query_string = gdata.youtube.QueryString(text=query)
subscription_entry = gdata.youtube.YouTubeSubscriptionEntry(
category=subscription_category,
query_string=subscription_query_string)
post_uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, my_username,
'subscriptions')
return self.Post(subscription_entry, post_uri,
converter=gdata.youtube.YouTubeSubscriptionEntryFromString)
def DeleteSubscription(self, subscription_uri):
"""Delete a subscription from the currently authenticated user's account.
Needs authentication.
Args:
subscription_uri: A string representing the URI of the subscription that
is to be deleted.
Returns:
True if deleted successfully.
"""
return self.Delete(subscription_uri)
def AddContact(self, contact_username, my_username='default'):
"""Add a new contact to the currently authenticated user's contact feed.
Needs authentication.
Args:
contact_username: A string representing the username of the contact
that you wish to add.
my_username: An optional string representing the username to whose
contact the new contact is to be added.
Returns:
A YouTubeContactEntry if added successfully.
"""
contact_category = atom.Category(
scheme = 'http://gdata.youtube.com/schemas/2007/contact.cat',
term = 'Friends')
contact_username = gdata.youtube.Username(text=contact_username)
contact_entry = gdata.youtube.YouTubeContactEntry(
category=contact_category,
username=contact_username)
contact_post_uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, my_username,
'contacts')
return self.Post(contact_entry, contact_post_uri,
converter=gdata.youtube.YouTubeContactEntryFromString)
def UpdateContact(self, contact_username, new_contact_status,
new_contact_category, my_username='default'):
"""Update a contact, providing a new status and a new category.
Needs authentication.
Args:
contact_username: A string representing the username of the contact
that is to be updated.
new_contact_status: A string representing the new status of the contact.
This can either be set to 'accepted' or 'rejected'.
new_contact_category: A string representing the new category for the
contact, either 'Friends' or 'Family'.
my_username: An optional string representing the username of the user
whose contact feed we are modifying. Defaults to the currently
authenticated user.
Returns:
A YouTubeContactEntry if updated succesfully.
Raises:
YouTubeError: New contact status must be within the accepted values. Or
new contact category must be within the accepted categories.
"""
if new_contact_status not in YOUTUBE_CONTACT_STATUS:
raise YouTubeError('New contact status must be one of %s' %
(' '.join(YOUTUBE_CONTACT_STATUS)))
if new_contact_category not in YOUTUBE_CONTACT_CATEGORY:
raise YouTubeError('New contact category must be one of %s' %
(' '.join(YOUTUBE_CONTACT_CATEGORY)))
contact_category = atom.Category(
scheme='http://gdata.youtube.com/schemas/2007/contact.cat',
term=new_contact_category)
contact_status = gdata.youtube.Status(text=new_contact_status)
contact_entry = gdata.youtube.YouTubeContactEntry(
category=contact_category,
status=contact_status)
contact_put_uri = '%s/%s/%s/%s' % (YOUTUBE_USER_FEED_URI, my_username,
'contacts', contact_username)
return self.Put(contact_entry, contact_put_uri,
converter=gdata.youtube.YouTubeContactEntryFromString)
def DeleteContact(self, contact_username, my_username='default'):
"""Delete a contact from a users contact feed.
Needs authentication.
Args:
contact_username: A string representing the username of the contact
that is to be deleted.
my_username: An optional string representing the username of the user's
contact feed from which to delete the contact. Defaults to the
currently authenticated user.
Returns:
True if the contact was deleted successfully
"""
contact_edit_uri = '%s/%s/%s/%s' % (YOUTUBE_USER_FEED_URI, my_username,
'contacts', contact_username)
return self.Delete(contact_edit_uri)
def _GetDeveloperKey(self):
"""Getter for Developer Key property.
Returns:
If the developer key has been set, a string representing the developer key
is returned or None.
"""
if 'X-GData-Key' in self.additional_headers:
return self.additional_headers['X-GData-Key'][4:]
else:
return None
def _SetDeveloperKey(self, developer_key):
"""Setter for Developer Key property.
Sets the developer key in the 'X-GData-Key' header. The actual value that
is set is 'key=' plus the developer_key that was passed.
"""
self.additional_headers['X-GData-Key'] = 'key=' + developer_key
developer_key = property(_GetDeveloperKey, _SetDeveloperKey,
doc="""The Developer Key property""")
def _GetClientId(self):
"""Getter for Client Id property.
Returns:
If the client_id has been set, a string representing it is returned
or None.
"""
if 'X-Gdata-Client' in self.additional_headers:
return self.additional_headers['X-Gdata-Client']
else:
return None
def _SetClientId(self, client_id):
"""Setter for Client Id property.
Sets the 'X-Gdata-Client' header.
"""
self.additional_headers['X-Gdata-Client'] = client_id
client_id = property(_GetClientId, _SetClientId,
doc="""The ClientId property""")
def Query(self, uri):
"""Performs a query and returns a resulting feed or entry.
Args:
uri: A string representing the URI of the feed that is to be queried.
Returns:
On success, a tuple in the form:
(boolean succeeded=True, ElementTree._Element result)
On failure, a tuple in the form:
(boolean succeeded=False, {'status': HTTP status code from server,
'reason': HTTP reason from the server,
'body': HTTP body of the server's response})
"""
result = self.Get(uri)
return result
def YouTubeQuery(self, query):
"""Performs a YouTube specific query and returns a resulting feed or entry.
Args:
query: A Query object or one if its sub-classes (YouTubeVideoQuery,
YouTubeUserQuery or YouTubePlaylistQuery).
Returns:
Depending on the type of Query object submitted returns either a
YouTubeVideoFeed, a YouTubeUserFeed, a YouTubePlaylistFeed. If the
Query object provided was not YouTube-related, a tuple is returned.
On success the tuple will be in this form:
(boolean succeeded=True, ElementTree._Element result)
On failure, the tuple will be in this form:
(boolean succeeded=False, {'status': HTTP status code from server,
'reason': HTTP reason from the server,
'body': HTTP body of the server response})
"""
result = self.Query(query.ToUri())
if isinstance(query, YouTubeVideoQuery):
return gdata.youtube.YouTubeVideoFeedFromString(result.ToString())
elif isinstance(query, YouTubeUserQuery):
return gdata.youtube.YouTubeUserFeedFromString(result.ToString())
elif isinstance(query, YouTubePlaylistQuery):
return gdata.youtube.YouTubePlaylistFeedFromString(result.ToString())
else:
return result
class YouTubeVideoQuery(gdata.service.Query):
"""Subclasses gdata.service.Query to represent a YouTube Data API query.
Attributes are set dynamically via properties. Properties correspond to
the standard Google Data API query parameters with YouTube Data API
extensions. Please refer to the API documentation for details.
Attributes:
vq: The vq parameter, which is only supported for video feeds, specifies a
search query term. Refer to API documentation for further details.
orderby: The orderby parameter, which is only supported for video feeds,
specifies the value that will be used to sort videos in the search
result set. Valid values for this parameter are relevance, published,
viewCount and rating.
time: The time parameter, which is only available for the top_rated,
top_favorites, most_viewed, most_discussed, most_linked and
most_responded standard feeds, restricts the search to videos uploaded
within the specified time. Valid values for this parameter are today
(1 day), this_week (7 days), this_month (1 month) and all_time.
The default value for this parameter is all_time.
format: The format parameter specifies that videos must be available in a
particular video format. Refer to the API documentation for details.
racy: The racy parameter allows a search result set to include restricted
content as well as standard content. Valid values for this parameter
are include and exclude. By default, restricted content is excluded.
lr: The lr parameter restricts the search to videos that have a title,
description or keywords in a specific language. Valid values for the lr
parameter are ISO 639-1 two-letter language codes.
restriction: The restriction parameter identifies the IP address that
should be used to filter videos that can only be played in specific
countries.
location: A string of geo coordinates. Note that this is not used when the
search is performed but rather to filter the returned videos for ones
that match to the location entered.
"""
def __init__(self, video_id=None, feed_type=None, text_query=None,
params=None, categories=None):
if feed_type in YOUTUBE_STANDARDFEEDS:
feed = 'http://%s/feeds/standardfeeds/%s' % (YOUTUBE_SERVER, feed_type)
elif feed_type is 'responses' or feed_type is 'comments' and video_id:
feed = 'http://%s/feeds/videos/%s/%s' % (YOUTUBE_SERVER, video_id,
feed_type)
else:
feed = 'http://%s/feeds/videos' % (YOUTUBE_SERVER)
gdata.service.Query.__init__(self, feed, text_query=text_query,
params=params, categories=categories)
def _GetVideoQuery(self):
if 'vq' in self:
return self['vq']
else:
return None
def _SetVideoQuery(self, val):
self['vq'] = val
vq = property(_GetVideoQuery, _SetVideoQuery,
doc="""The video query (vq) query parameter""")
def _GetOrderBy(self):
if 'orderby' in self:
return self['orderby']
else:
return None
def _SetOrderBy(self, val):
if val not in YOUTUBE_QUERY_VALID_ORDERBY_PARAMETERS:
if val.startswith('relevance_lang_') is False:
raise YouTubeError('OrderBy must be one of: %s ' %
' '.join(YOUTUBE_QUERY_VALID_ORDERBY_PARAMETERS))
self['orderby'] = val
orderby = property(_GetOrderBy, _SetOrderBy,
doc="""The orderby query parameter""")
def _GetTime(self):
if 'time' in self:
return self['time']
else:
return None
def _SetTime(self, val):
if val not in YOUTUBE_QUERY_VALID_TIME_PARAMETERS:
raise YouTubeError('Time must be one of: %s ' %
' '.join(YOUTUBE_QUERY_VALID_TIME_PARAMETERS))
self['time'] = val
time = property(_GetTime, _SetTime,
doc="""The time query parameter""")
def _GetFormat(self):
if 'format' in self:
return self['format']
else:
return None
def _SetFormat(self, val):
if val not in YOUTUBE_QUERY_VALID_FORMAT_PARAMETERS:
raise YouTubeError('Format must be one of: %s ' %
' '.join(YOUTUBE_QUERY_VALID_FORMAT_PARAMETERS))
self['format'] = val
format = property(_GetFormat, _SetFormat,
doc="""The format query parameter""")
def _GetRacy(self):
if 'racy' in self:
return self['racy']
else:
return None
def _SetRacy(self, val):
if val not in YOUTUBE_QUERY_VALID_RACY_PARAMETERS:
raise YouTubeError('Racy must be one of: %s ' %
' '.join(YOUTUBE_QUERY_VALID_RACY_PARAMETERS))
self['racy'] = val
racy = property(_GetRacy, _SetRacy,
doc="""The racy query parameter""")
def _GetLanguageRestriction(self):
if 'lr' in self:
return self['lr']
else:
return None
def _SetLanguageRestriction(self, val):
self['lr'] = val
lr = property(_GetLanguageRestriction, _SetLanguageRestriction,
doc="""The lr (language restriction) query parameter""")
def _GetIPRestriction(self):
if 'restriction' in self:
return self['restriction']
else:
return None
def _SetIPRestriction(self, val):
self['restriction'] = val
restriction = property(_GetIPRestriction, _SetIPRestriction,
doc="""The restriction query parameter""")
def _GetLocation(self):
if 'location' in self:
return self['location']
else:
return None
def _SetLocation(self, val):
self['location'] = val
location = property(_GetLocation, _SetLocation,
doc="""The location query parameter""")
class YouTubeUserQuery(YouTubeVideoQuery):
"""Subclasses YouTubeVideoQuery to perform user-specific queries.
Attributes are set dynamically via properties. Properties correspond to
the standard Google Data API query parameters with YouTube Data API
extensions.
"""
def __init__(self, username=None, feed_type=None, subscription_id=None,
text_query=None, params=None, categories=None):
uploads_favorites_playlists = ('uploads', 'favorites', 'playlists')
if feed_type is 'subscriptions' and subscription_id and username:
feed = "http://%s/feeds/users/%s/%s/%s" % (YOUTUBE_SERVER, username,
feed_type, subscription_id)
elif feed_type is 'subscriptions' and not subscription_id and username:
feed = "http://%s/feeds/users/%s/%s" % (YOUTUBE_SERVER, username,
feed_type)
elif feed_type in uploads_favorites_playlists:
feed = "http://%s/feeds/users/%s/%s" % (YOUTUBE_SERVER, username,
feed_type)
else:
feed = "http://%s/feeds/users" % (YOUTUBE_SERVER)
YouTubeVideoQuery.__init__(self, feed, text_query=text_query,
params=params, categories=categories)
class YouTubePlaylistQuery(YouTubeVideoQuery):
"""Subclasses YouTubeVideoQuery to perform playlist-specific queries.
Attributes are set dynamically via properties. Properties correspond to
the standard Google Data API query parameters with YouTube Data API
extensions.
"""
def __init__(self, playlist_id, text_query=None, params=None,
categories=None):
if playlist_id:
feed = "http://%s/feeds/playlists/%s" % (YOUTUBE_SERVER, playlist_id)
else:
feed = "http://%s/feeds/playlists" % (YOUTUBE_SERVER)
YouTubeVideoQuery.__init__(self, feed, text_query=text_query,
params=params, categories=categories)
| apache-2.0 | 8,379,104,848,166,723,000 | 36.124359 | 94 | 0.67191 | false |
ds-hwang/chromium-crosswalk | build/download_gold_plugin.py | 19 | 1658 | #!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script to download LLVM gold plugin from google storage."""
import find_depot_tools
import json
import os
import shutil
import subprocess
import sys
import zipfile
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
CHROME_SRC = os.path.abspath(os.path.join(SCRIPT_DIR, os.pardir))
DEPOT_PATH = find_depot_tools.add_depot_tools_to_path()
GSUTIL_PATH = os.path.join(DEPOT_PATH, 'gsutil.py')
LLVM_BUILD_PATH = os.path.join(CHROME_SRC, 'third_party', 'llvm-build',
'Release+Asserts')
CLANG_UPDATE_PY = os.path.join(CHROME_SRC, 'tools', 'clang', 'scripts',
'update.py')
CLANG_REVISION = os.popen(CLANG_UPDATE_PY + ' --print-revision').read().rstrip()
CLANG_BUCKET = 'gs://chromium-browser-clang/Linux_x64'
def main():
targz_name = 'llvmgold-%s.tgz' % CLANG_REVISION
remote_path = '%s/%s' % (CLANG_BUCKET, targz_name)
os.chdir(LLVM_BUILD_PATH)
# TODO(pcc): Fix gsutil.py cp url file < /dev/null 2>&0
# (currently aborts with exit code 1,
# https://github.com/GoogleCloudPlatform/gsutil/issues/289) or change the
# stdin->stderr redirect in update.py to do something else (crbug.com/494442).
subprocess.check_call(['python', GSUTIL_PATH,
'cp', remote_path, targz_name],
stderr=open('/dev/null', 'w'))
subprocess.check_call(['tar', 'xzf', targz_name])
os.remove(targz_name)
return 0
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause | -3,897,716,713,393,981,400 | 32.836735 | 80 | 0.658022 | false |
ah-anssi/SecuML | SecuML/experiments/ExperimentFactory.py | 1 | 1834 | # SecuML
# Copyright (C) 2016-2017 ANSSI
#
# SecuML is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# SecuML is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with SecuML. If not, see <http://www.gnu.org/licenses/>.
import json
from SecuML.experiments import experiment_db_tools
from SecuML.experiments.Tools import dir_exp_tools
experiment_factory = None
def getFactory():
global experiment_factory
if experiment_factory is None:
experiment_factory = ExperimentFactory()
return experiment_factory
class ExperimentFactory(object):
def __init__(self):
self.register = {}
def registerClass(self, class_name, class_obj):
self.register[class_name] = class_obj
def fromJson(self, experiment_id, session):
project, dataset = experiment_db_tools.getProjectDataset(session,
experiment_id)
obj_filename = dir_exp_tools.getExperimentConfigurationFilename(project,
dataset,
experiment_id)
with open(obj_filename, 'r') as obj_file:
obj_dict = json.load(obj_file)
class_name = obj_dict['__type__']
obj = self.register[class_name].fromJson(obj_dict, session)
return obj
| gpl-2.0 | 6,305,072,350,794,199,000 | 35.68 | 86 | 0.633588 | false |
gabrieladt/kops-ec2-autoscaler | autoscaler/kube.py | 1 | 13310 | import datetime
import json
import logging
from dateutil.parser import parse as dateutil_parse
import pykube.exceptions
import autoscaler.utils as utils
logger = logging.getLogger(__name__)
class KubePodStatus(object):
RUNNING = 'Running'
PENDING = 'Pending'
CONTAINER_CREATING = 'ContainerCreating'
SUCCEEDED = 'Succeeded'
FAILED = 'Failed'
_CORDON_LABEL = 'openai/cordoned-by-autoscaler'
class KubePod(object):
_DRAIN_GRACE_PERIOD = datetime.timedelta(seconds=60*60)
def __init__(self, pod):
self.original = pod
metadata = pod.obj['metadata']
self.name = metadata['name']
self.namespace = metadata['namespace']
self.node_name = pod.obj['spec'].get('nodeName')
self.status = pod.obj['status']['phase']
self.uid = metadata['uid']
self.selectors = pod.obj['spec'].get('nodeSelector', {})
# TODO: Remove this, after everyone has migrated off reservations
if 'openai.org/reservation-id' in self.selectors:
del self.selectors['openai.org/reservation-id']
self.labels = metadata.get('labels', {})
self.annotations = metadata.get('annotations', {})
self.owner = self.labels.get('owner', None)
self.creation_time = dateutil_parse(metadata['creationTimestamp'])
self.start_time = dateutil_parse(pod.obj['status']['startTime']) if 'startTime' in pod.obj['status'] else None
self.scheduled_time = None
for condition in pod.obj['status'].get('conditions', []):
if condition['type'] == 'PodScheduled' and condition['status'] == 'True':
self.scheduled_time = dateutil_parse(condition['lastTransitionTime'])
# TODO: refactor
requests = [c.get('resources', {}).get('requests', {}) for c in pod.obj['spec']['containers']]
resource_requests = {}
for d in requests:
for k, v in d.items():
unitless_v = utils.parse_SI(v)
resource_requests[k] = resource_requests.get(k, 0.0) + unitless_v
self.resources = KubeResource(pods=1, **resource_requests)
self.no_schedule_wildcard_toleration = False
self.no_execute_wildcard_toleration = False
self.no_schedule_existential_tolerations = set()
self.no_execute_existential_tolerations = set()
for toleration in pod.obj['spec'].get('tolerations', []):
if toleration.get('operator', 'Equal') == 'Exists':
effect = toleration.get('effect')
if effect is None or effect == 'NoSchedule':
if 'key' not in toleration:
self.no_schedule_wildcard_toleration = True
else:
self.no_schedule_existential_tolerations.add(toleration['key'])
if effect is None or effect == 'NoExecute':
if 'key' not in toleration:
self.no_execute_wildcard_toleration = True
else:
self.no_execute_existential_tolerations.add(toleration['key'])
else:
logger.warn("Equality tolerations not implemented. Pod {} has an equality toleration".format(pod))
def is_mirrored(self):
created_by = json.loads(self.annotations.get('kubernetes.io/created-by', '{}'))
is_daemonset = created_by.get('reference', {}).get('kind') == 'DaemonSet'
return is_daemonset or self.annotations.get('kubernetes.io/config.mirror')
def is_replicated(self):
created_by = json.loads(self.annotations.get('kubernetes.io/created-by', '{}'))
return created_by
def is_critical(self):
return utils.parse_bool_label(self.labels.get('openai/do-not-drain'))
def is_in_drain_grace_period(self):
"""
determines whether the pod is in a grace period for draining
this prevents us from draining pods that are too new
"""
return (self.scheduled_time and
(datetime.datetime.now(self.scheduled_time.tzinfo) - self.scheduled_time) < self._DRAIN_GRACE_PERIOD)
def is_drainable(self):
"""
a pod is considered drainable if:
- it's a daemon
- it's a non-critical replicated pod that has exceeded grace period
"""
return (self.is_mirrored() or
(self.is_replicated() and not self.is_critical() and not self.is_in_drain_grace_period()))
def delete(self):
logger.info('Deleting Pod %s/%s', self.namespace, self.name)
return self.original.delete()
def __hash__(self):
return hash(self.uid)
def __eq__(self, other):
return self.uid == other.uid
def __str__(self):
return 'KubePod({namespace}, {name})'.format(
namespace=self.namespace, name=self.name)
def __repr__(self):
return str(self)
def reverse_bytes(value):
assert len(value) % 2 == 0
result = ""
for i in range(len(value), 0, -2):
result += value[i - 2: i]
return result
class KubeNode(object):
_HEARTBEAT_GRACE_PERIOD = datetime.timedelta(seconds=60*60)
def __init__(self, node):
self.original = node
self.pykube_node = node
metadata = node.obj['metadata']
self.name = metadata['name']
self.instance_id, self.region, self.instance_type, self.provider = self._get_instance_data()
self.capacity = KubeResource(**node.obj['status']['capacity'])
self.used_capacity = KubeResource()
self.creation_time = dateutil_parse(metadata['creationTimestamp'])
last_heartbeat_time = self.creation_time
for condition in node.obj['status'].get('conditions', []):
if condition.get('type') == 'Ready':
last_heartbeat_time = dateutil_parse(condition['lastHeartbeatTime'])
self.last_heartbeat_time = last_heartbeat_time
self.no_schedule_taints = {}
self.no_execute_taints = {}
for taint in node.obj['spec'].get('taints', []):
try :
if taint['effect'] == 'NoSchedule':
self.no_schedule_taints[taint['key']] = taint['value']
if taint['effect'] == 'NoExecute':
self.no_execute_taints[taint['key']] = taint['value']
except:
logger.info("Taint value not founded %s", taint)
def _get_instance_data(self):
"""
returns a tuple (instance id, region, instance type)
"""
labels = self.original.obj['metadata'].get('labels', {})
instance_type = labels.get('aws/type', labels.get('beta.kubernetes.io/instance-type'))
provider = self.original.obj['spec'].get('providerID', '')
if provider.startswith('aws://'):
az, instance_id = tuple(provider.split('/')[-2:])
if az and instance_id:
return (instance_id, az[:-1], instance_type, 'aws')
if labels.get('aws/id') and labels.get('aws/az'):
instance_id = labels['aws/id']
region = labels['aws/az'][:-1]
return (instance_id, region, instance_type, 'aws')
assert provider.startswith('azure:////'), provider
# Id is in wrong order: https://azure.microsoft.com/en-us/blog/accessing-and-using-azure-vm-unique-id/
big_endian_vm_id = provider.replace('azure:////', '')
parts = big_endian_vm_id.split('-')
instance_id = '-'.join([reverse_bytes(parts[0]),
reverse_bytes(parts[1]),
reverse_bytes(parts[2]),
parts[3],
parts[4]]).lower()
instance_type = labels['azure/type']
return (instance_id, 'placeholder', instance_type, 'azure')
@property
def selectors(self):
return self.original.obj['metadata'].get('labels', {})
@property
def unschedulable(self):
return self.original.obj['spec'].get('unschedulable', False)
@property
def can_uncordon(self):
return utils.parse_bool_label(self.selectors.get(_CORDON_LABEL))
def drain(self, pods, notifier=None):
for pod in pods:
if pod.is_drainable() and not pod.is_mirrored():
pod.delete()
logger.info("drained %s", self)
if notifier:
notifier.notify_drained_node(self, pods)
def uncordon(self):
if not utils.parse_bool_label(self.selectors.get(_CORDON_LABEL)):
logger.debug('uncordon %s ignored', self)
return False
try:
self.original.reload()
self.original.obj['spec']['unschedulable'] = False
self.original.update()
logger.info("uncordoned %s", self)
return True
except pykube.exceptions.HTTPError as ex:
logger.info("uncordon failed %s %s", self, ex)
return False
def cordon(self):
try:
self.original.reload()
self.original.obj['spec']['unschedulable'] = True
self.original.obj['metadata'].setdefault('labels', {})[_CORDON_LABEL] = 'true'
self.original.update()
logger.info("cordoned %s", self)
return True
except pykube.exceptions.HTTPError as ex:
logger.info("cordon failed %s %s", self, ex)
return False
def delete(self):
try:
self.original.delete()
logger.info("deleted %s", self)
return True
except pykube.exceptions.HTTPError as ex:
logger.info("delete failed %s %s", self, ex)
return False
def count_pod(self, pod):
assert isinstance(pod, KubePod)
self.used_capacity += pod.resources
def can_fit(self, resources):
assert isinstance(resources, KubeResource)
left = self.capacity - (self.used_capacity + resources)
return left.possible
def is_match(self, pod: KubePod):
"""
whether this node matches all the selectors on the pod
"""
for label, value in pod.selectors.items():
if self.selectors.get(label) != value:
return False
for key in self.no_schedule_taints:
if not (pod.no_schedule_wildcard_toleration or key in pod.no_schedule_existential_tolerations):
return False
for key in self.no_execute_taints:
if not (pod.no_execute_wildcard_toleration or key in pod.no_execute_existential_tolerations):
return False
return True
def is_managed(self):
"""
an instance is managed if we know its instance ID in ec2.
"""
return self.instance_id is not None
def is_detached(self):
return utils.parse_bool_label(self.selectors.get('openai/detached'))
def is_dead(self):
return datetime.datetime.now(self.last_heartbeat_time.tzinfo) - self.last_heartbeat_time > self._HEARTBEAT_GRACE_PERIOD
def __hash__(self):
return hash(self.name)
def __eq__(self, other):
return self.name == other.name
def __str__(self):
return "{}: {} ({})".format(self.name, self.instance_id,
utils.selectors_to_hash(self.selectors))
class KubeResource(object):
def __init__(self, **kwargs):
self.raw = dict((k, utils.parse_resource(v))
for (k, v) in kwargs.items())
def __add__(self, other):
keys = set(self.raw.keys()) | set(other.raw.keys())
raw_diff = dict((k, self.raw.get(k, 0) + other.raw.get(k, 0))
for k in keys)
return KubeResource(**raw_diff)
def __sub__(self, other):
keys = set(self.raw.keys()) | set(other.raw.keys())
raw_diff = dict((k, self.raw.get(k, 0) - other.raw.get(k, 0))
for k in keys)
return KubeResource(**raw_diff)
def __mul__(self, multiplier):
new_raw = dict((k, v * multiplier) for k, v in self.raw.items())
return KubeResource(**new_raw)
def __rmul__(self, multiplier):
return self.__mul__(multiplier)
def __cmp__(self, other):
"""
should return a negative integer if self < other,
zero if self == other, a positive integer if self > other.
we consider self to be greater than other if it exceeds
the resource amount in other in more resource types.
e.g. if self = {cpu: 4, memory: 1K, gpu: 1},
other = {cpu: 2, memory: 2K}, then self exceeds the resource
amount in other in both cpu and gpu, while other exceeds
the resource amount in self in only memory, so self > other.
"""
resource_diff = (self - other).raw
num_resource_types = len(resource_diff)
num_eq = sum(1 for v in resource_diff.values() if v == 0)
num_less = sum(1 for v in resource_diff.values() if v < 0)
num_more = num_resource_types - num_eq - num_less
return num_more - num_less
def __str__(self):
return str(self.raw)
def get(self, key, default=None):
return self.raw.get(key, default)
@property
def possible(self):
return all([x >= 0 for x in self.raw.values()])
| mit | 4,107,491,487,797,331,000 | 37.137536 | 127 | 0.581518 | false |
AgostonSzepessy/servo | tests/wpt/css-tests/tools/runner/report.py | 278 | 9660 | import argparse
import json
import sys
from cgi import escape
from collections import defaultdict
import types
def html_escape(item, escape_quote=False):
if isinstance(item, types.StringTypes):
rv = escape(item)
if escape_quote:
rv = rv.replace('"', """)
return rv
else:
return item
class Raw(object):
"""Simple wrapper around a string to stop it being escaped by html_escape"""
def __init__(self, value):
self.value = value
def __unicode__(self):
return unicode(self.value)
class Node(object):
"""Node structure used when building HTML"""
def __init__(self, name, attrs, children):
#Need list of void elements
self.name = name
self.attrs = attrs
self.children = children
def __unicode__(self):
if self.attrs:
#Need to escape
attrs_unicode = " " + " ".join("%s=\"%s\"" % (html_escape(key),
html_escape(value,
escape_quote=True))
for key, value in self.attrs.iteritems())
else:
attrs_unicode = ""
return "<%s%s>%s</%s>\n" % (self.name,
attrs_unicode,
"".join(unicode(html_escape(item))
for item in self.children),
self.name)
def __str__(self):
return unicode(self).encode("utf8")
class RootNode(object):
"""Special Node representing the document root"""
def __init__(self, *children):
self.children = ["<!DOCTYPE html>"] + list(children)
def __unicode__(self):
return "".join(unicode(item) for item in self.children)
def __str__(self):
return unicode(self).encode("utf8")
def flatten(iterable):
"""Flatten a list of lists by one level so that
[1,["abc"], "def",[2, [3]]]
becomes
[1, "abc", "def", 2, [3]]"""
rv = []
for item in iterable:
if hasattr(item, "__iter__") and not isinstance(item, types.StringTypes):
rv.extend(item)
else:
rv.append(item)
return rv
class HTML(object):
"""Simple HTML templating system. An instance of this class can create
element nodes by calling methods with the same name as the element,
passing in children as positional arguments or as a list, and attributes
as keyword arguments, with _ replacing - and trailing _ for python keywords
e.g.
h = HTML()
print h.html(
html.head(),
html.body([html.h1("Hello World!")], class_="body-class")
)
Would give
<!DOCTYPE html><html><head></head><body class="body-class"><h1>Hello World!</h1></body></html>"""
def __getattr__(self, name):
def make_html(self, *content, **attrs):
for attr_name in attrs.keys():
if "_" in attr_name:
new_name = attr_name.replace("_", "-")
if new_name.endswith("-"):
new_name = new_name[:-1]
attrs[new_name] = attrs.pop(attr_name)
return Node(name, attrs, flatten(content))
method = types.MethodType(make_html, self, HTML)
setattr(self, name, method)
return method
def __call__(self, *children):
return RootNode(*flatten(children))
h = HTML()
class TestResult(object):
"""Simple holder for the results of a single test in a single UA"""
def __init__(self, test):
self.test = test
self.results = {}
def __cmp__(self, other):
return self.test == other.test
def __hash__(self):
return hash(self.test)
def load_data(args):
"""Load data treating args as a list of UA name, filename pairs"""
pairs = []
for i in xrange(0, len(args), 2):
pairs.append(args[i:i+2])
rv = {}
for UA, filename in pairs:
with open(filename) as f:
rv[UA] = json.load(f)
return rv
def test_id(id):
"""Convert a test id in JSON into an immutable object that
can be used as a dictionary key"""
if isinstance(id, list):
return tuple(id)
else:
return id
def all_tests(data):
tests = defaultdict(set)
for UA, results in data.iteritems():
for result in results["results"]:
id = test_id(result["test"])
tests[id] |= set(subtest["name"] for subtest in result["subtests"])
return tests
def group_results(data):
"""Produce a list of UAs and a dictionary mapping specific tests to their
status in all UAs e.g.
["UA1", "UA2"], {"test_id":{"harness":{"UA1": (status1, message1),
"UA2": (status2, message2)},
"subtests":{"subtest1": "UA1": (status1-1, message1-1),
"UA2": (status2-1, message2-1)}}}
Status and message are None if the test didn't run in a particular UA.
Message is None if the test didn't produce a message"""
tests = all_tests(data)
UAs = data.keys()
def result():
return {
"harness": dict((UA, (None, None)) for UA in UAs),
"subtests": None # init this later
}
results_by_test = defaultdict(result)
for UA, results in data.iteritems():
for test_data in results["results"]:
id = test_id(test_data["test"])
result = results_by_test[id]
if result["subtests"] is None:
result["subtests"] = dict(
(name, dict((UA, (None, None)) for UA in UAs)) for name in tests[id]
)
result["harness"][UA] = (test_data["status"], test_data["message"])
for subtest in test_data["subtests"]:
result["subtests"][subtest["name"]][UA] = (subtest["status"],
subtest["message"])
return UAs, results_by_test
def status_cell(status, message=None):
"""Produce a table cell showing the status of a test"""
status = status if status is not None else "NONE"
kwargs = {}
if message:
kwargs["title"] = message
status_text = status.title()
return h.td(status_text, class_="status " + status,
**kwargs)
def test_link(test_id, subtest=None):
"""Produce an <a> element linking to a test"""
if isinstance(test_id, types.StringTypes):
rv = [h.a(test_id, href=test_id)]
else:
rv = [h.a(test_id[0], href=test_id[0]),
" %s " % test_id[1],
h.a(test_id[2], href=test_id[2])]
if subtest is not None:
rv.append(" [%s]" % subtest)
return rv
def summary(UAs, results_by_test):
"""Render the implementation report summary"""
not_passing = []
for test, results in results_by_test.iteritems():
if not any(item[0] in ("PASS", "OK") for item in results["harness"].values()):
not_passing.append((test, None))
for subtest_name, subtest_results in results["subtests"].iteritems():
if not any(item[0] == "PASS" for item in subtest_results.values()):
not_passing.append((test, subtest_name))
if not_passing:
rv = [
h.p("The following tests failed to pass in all UAs:"),
h.ul([h.li(test_link(test, subtest))
for test, subtest in not_passing])
]
else:
rv = "All tests passed in at least one UA"
return rv
def result_rows(UAs, test, result):
"""Render the results for each test run"""
yield h.tr(
h.td(
test_link(test),
rowspan=(1 + len(result["subtests"]))
),
h.td(),
[status_cell(status, message)
for UA, (status, message) in sorted(result["harness"].items())],
class_="test"
)
for name, subtest_result in sorted(result["subtests"].iteritems()):
yield h.tr(
h.td(name),
[status_cell(status, message)
for UA, (status, message) in sorted(subtest_result.items())],
class_="subtest"
)
def result_bodies(UAs, results_by_test):
return [h.tbody(result_rows(UAs, test, result))
for test, result in sorted(results_by_test.iteritems())]
def generate_html(UAs, results_by_test):
"""Generate all the HTML output"""
doc = h(h.html([
h.head(h.meta(charset="utf8"),
h.title("Implementation Report"),
h.link(href="report.css", rel="stylesheet")),
h.body(h.h1("Implementation Report"),
h.h2("Summary"),
summary(UAs, results_by_test),
h.h2("Full Results"),
h.table(
h.thead(
h.tr(
h.th("Test"),
h.th("Subtest"),
[h.th(UA) for UA in sorted(UAs)]
)
),
result_bodies(UAs, results_by_test)
)
)
]))
return doc
def main(filenames):
data = load_data(filenames)
UAs, results_by_test = group_results(data)
return generate_html(UAs, results_by_test)
if __name__ == "__main__":
if not sys.argv[1:]:
print """Please supply a list of UA name, filename pairs e.g.
python report.py Firefox firefox.json Chrome chrome.json IE internet_explorer.json"""
print main(sys.argv[1:])
| mpl-2.0 | -7,210,168,331,505,407,000 | 30.16129 | 101 | 0.530021 | false |
zaccoz/odoo | addons/l10n_be_intrastat/__openerp__.py | 257 | 1631 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Odoo, Open Source Business Applications
# Copyright (C) 2014-2015 Odoo S.A. <http://www.odoo.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Belgian Intrastat Declaration',
'version': '1.0',
'category': 'Reporting',
'description': """
Generates Intrastat XML report for declaration
Based on invoices.
""",
'author': 'Odoo SA',
'depends': ['report_intrastat', 'sale_stock', 'account_accountant', 'l10n_be'],
'data': [
'data/regions.xml',
'data/report.intrastat.code.csv',
'data/transaction.codes.xml',
'data/transport.modes.xml',
'security/groups.xml',
'security/ir.model.access.csv',
'l10n_be_intrastat.xml',
'wizard/l10n_be_intrastat_xml_view.xml',
],
'installable': True,
}
| agpl-3.0 | -2,219,014,505,261,441,500 | 36.930233 | 83 | 0.59534 | false |
qrkourier/ansible | lib/ansible/modules/system/sefcontext.py | 56 | 7940 | #!/usr/bin/python
# (c) 2016, Dag Wieers <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: sefcontext
short_description: Manages SELinux file context mapping definitions
description:
- Manages SELinux file context mapping definitions.
- Similar to the C(semanage fcontext) command.
version_added: '2.2'
options:
target:
description:
- Target path (expression).
required: yes
aliases: [ path ]
ftype:
description:
- File type.
default: a
setype:
description:
- SELinux type for the specified target.
required: yes
seuser:
description:
- SELinux user for the specified target.
selevel:
description:
- SELinux range for the specified target.
aliases: [ serange ]
state:
description:
- Desired boolean value.
choices: [ absent, present ]
default: present
reload:
description:
- Reload SELinux policy after commit.
type: bool
default: 'yes'
notes:
- The changes are persistent across reboots
requirements:
- libselinux-python
- policycoreutils-python
author:
- Dag Wieers (@dagwieers)
'''
EXAMPLES = r'''
# Allow apache to modify files in /srv/git_repos
- sefcontext:
target: '/srv/git_repos(/.*)?'
setype: httpd_git_rw_content_t
state: present
'''
RETURN = r'''
# Default return values
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils._text import to_native
try:
import selinux
HAVE_SELINUX = True
except ImportError:
HAVE_SELINUX = False
try:
import seobject
HAVE_SEOBJECT = True
except ImportError:
HAVE_SEOBJECT = False
# Add missing entries (backward compatible)
if HAVE_SEOBJECT:
seobject.file_types.update(dict(
a=seobject.SEMANAGE_FCONTEXT_ALL,
b=seobject.SEMANAGE_FCONTEXT_BLOCK,
c=seobject.SEMANAGE_FCONTEXT_CHAR,
d=seobject.SEMANAGE_FCONTEXT_DIR,
f=seobject.SEMANAGE_FCONTEXT_REG,
l=seobject.SEMANAGE_FCONTEXT_LINK,
p=seobject.SEMANAGE_FCONTEXT_PIPE,
s=seobject.SEMANAGE_FCONTEXT_SOCK,
))
# Make backward compatible
option_to_file_type_str = dict(
a='all files',
b='block device',
c='character device',
d='directory',
f='regular file',
l='symbolic link',
p='named pipe',
s='socket file',
)
def semanage_fcontext_exists(sefcontext, target, ftype):
''' Get the SELinux file context mapping definition from policy. Return None if it does not exist. '''
# Beware that records comprise of a string representation of the file_type
record = (target, option_to_file_type_str[ftype])
records = sefcontext.get_all()
try:
return records[record]
except KeyError:
return None
def semanage_fcontext_modify(module, result, target, ftype, setype, do_reload, serange, seuser, sestore=''):
''' Add or modify SELinux file context mapping definition to the policy. '''
changed = False
prepared_diff = ''
try:
sefcontext = seobject.fcontextRecords(sestore)
sefcontext.set_reload(do_reload)
exists = semanage_fcontext_exists(sefcontext, target, ftype)
if exists:
# Modify existing entry
orig_seuser, orig_serole, orig_setype, orig_serange = exists
if seuser is None:
seuser = orig_seuser
if serange is None:
serange = orig_serange
if setype != orig_setype or seuser != orig_seuser or serange != orig_serange:
if not module.check_mode:
sefcontext.modify(target, setype, ftype, serange, seuser)
changed = True
if module._diff:
prepared_diff += '# Change to semanage file context mappings\n'
prepared_diff += '-%s %s %s:%s:%s:%s\n' % (target, ftype, orig_seuser, orig_serole, orig_setype, orig_serange)
prepared_diff += '+%s %s %s:%s:%s:%s\n' % (target, ftype, seuser, orig_serole, setype, serange)
else:
# Add missing entry
if seuser is None:
seuser = 'system_u'
if serange is None:
serange = 's0'
if not module.check_mode:
sefcontext.add(target, setype, ftype, serange, seuser)
changed = True
if module._diff:
prepared_diff += '# Addition to semanage file context mappings\n'
prepared_diff += '+%s %s %s:%s:%s:%s\n' % (target, ftype, seuser, 'object_r', setype, serange)
except Exception:
e = get_exception()
module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)))
if module._diff and prepared_diff:
result['diff'] = dict(prepared=prepared_diff)
module.exit_json(changed=changed, seuser=seuser, serange=serange, **result)
def semanage_fcontext_delete(module, result, target, ftype, do_reload, sestore=''):
''' Delete SELinux file context mapping definition from the policy. '''
changed = False
prepared_diff = ''
try:
sefcontext = seobject.fcontextRecords(sestore)
sefcontext.set_reload(do_reload)
exists = semanage_fcontext_exists(sefcontext, target, ftype)
if exists:
# Remove existing entry
orig_seuser, orig_serole, orig_setype, orig_serange = exists
if not module.check_mode:
sefcontext.delete(target, ftype)
changed = True
if module._diff:
prepared_diff += '# Deletion to semanage file context mappings\n'
prepared_diff += '-%s %s %s:%s:%s:%s\n' % (target, ftype, exists[0], exists[1], exists[2], exists[3])
except Exception:
e = get_exception()
module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)))
if module._diff and prepared_diff:
result['diff'] = dict(prepared=prepared_diff)
module.exit_json(changed=changed, **result)
def main():
module = AnsibleModule(
argument_spec=dict(
target=dict(required=True, aliases=['path']),
ftype=dict(type='str', default='a', choices=option_to_file_type_str.keys()),
setype=dict(type='str', required=True),
seuser=dict(type='str'),
selevel=dict(type='str', aliases=['serange']),
state=dict(type='str', default='present', choices=['absent', 'present']),
reload=dict(type='bool', default=True),
),
supports_check_mode=True,
)
if not HAVE_SELINUX:
module.fail_json(msg="This module requires libselinux-python")
if not HAVE_SEOBJECT:
module.fail_json(msg="This module requires policycoreutils-python")
if not selinux.is_selinux_enabled():
module.fail_json(msg="SELinux is disabled on this host.")
target = module.params['target']
ftype = module.params['ftype']
setype = module.params['setype']
seuser = module.params['seuser']
serange = module.params['selevel']
state = module.params['state']
do_reload = module.params['reload']
result = dict(target=target, ftype=ftype, setype=setype, state=state)
if state == 'present':
semanage_fcontext_modify(module, result, target, ftype, setype, do_reload, serange, seuser)
elif state == 'absent':
semanage_fcontext_delete(module, result, target, ftype, do_reload)
else:
module.fail_json(msg='Invalid value of argument "state": {0}'.format(state))
if __name__ == '__main__':
main()
| gpl-3.0 | 5,549,401,864,826,840,000 | 30.137255 | 140 | 0.619395 | false |
jeffzheng1/tensorflow | tensorflow/contrib/learn/python/learn/dataframe/transforms/in_memory_source.py | 82 | 6157 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sources for numpy arrays and pandas DataFrames."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.dataframe import transform
from tensorflow.contrib.learn.python.learn.dataframe.queues import feeding_functions
class BaseInMemorySource(transform.TensorFlowTransform):
"""Abstract parent class for NumpySource and PandasSource."""
def __init__(self,
data,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
shuffle=False,
min_after_dequeue=None,
seed=None,
data_name="in_memory_data"):
super(BaseInMemorySource, self).__init__()
self._data = data
self._num_threads = 1 if num_threads is None else num_threads
self._batch_size = (32 if batch_size is None else batch_size)
self._enqueue_size = max(1, int(self._batch_size / self._num_threads)
) if enqueue_size is None else enqueue_size
self._queue_capacity = (self._batch_size * 10 if queue_capacity is None else
queue_capacity)
self._shuffle = shuffle
self._min_after_dequeue = (batch_size if min_after_dequeue is None else
min_after_dequeue)
self._seed = seed
self._data_name = data_name
@transform.parameter
def data(self):
return self._data
@transform.parameter
def num_threads(self):
return self._num_threads
@transform.parameter
def enqueue_size(self):
return self._enqueue_size
@transform.parameter
def batch_size(self):
return self._batch_size
@transform.parameter
def queue_capacity(self):
return self._queue_capacity
@transform.parameter
def shuffle(self):
return self._shuffle
@transform.parameter
def min_after_dequeue(self):
return self._min_after_dequeue
@transform.parameter
def seed(self):
return self._seed
@transform.parameter
def data_name(self):
return self._data_name
@property
def input_valency(self):
return 0
def _apply_transform(self, transform_input, **kwargs):
queue = feeding_functions.enqueue_data(self.data,
self.queue_capacity,
self.shuffle,
self.min_after_dequeue,
num_threads=self.num_threads,
seed=self.seed,
name=self.data_name,
enqueue_size=self.enqueue_size,
num_epochs=kwargs.get("num_epochs"))
dequeued = queue.dequeue_many(self.batch_size)
# TODO(jamieas): dequeue and dequeue_many will soon return a list regardless
# of the number of enqueued tensors. Remove the following once that change
# is in place.
if not isinstance(dequeued, (tuple, list)):
dequeued = (dequeued,)
# pylint: disable=not-callable
return self.return_type(*dequeued)
class NumpySource(BaseInMemorySource):
"""A zero-input Transform that produces a single column from a numpy array."""
@property
def name(self):
return "NumpySource"
@property
def _output_names(self):
return ("index", "value")
class OrderedDictNumpySource(BaseInMemorySource):
"""A zero-input Transform that produces Series from a dict of numpy arrays."""
def __init__(self,
ordered_dict_of_arrays,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
shuffle=False,
min_after_dequeue=None,
seed=None,
data_name="pandas_data"):
if "index" in ordered_dict_of_arrays.keys():
raise ValueError("Column name `index` is reserved.")
super(OrderedDictNumpySource, self).__init__(ordered_dict_of_arrays,
num_threads, enqueue_size,
batch_size, queue_capacity,
shuffle, min_after_dequeue,
seed, data_name)
@property
def name(self):
return "OrderedDictNumpySource"
@property
def _output_names(self):
return tuple(["index"] + list(self._data.keys()))
class PandasSource(BaseInMemorySource):
"""A zero-input Transform that produces Series from a DataFrame."""
def __init__(self,
dataframe,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
shuffle=False,
min_after_dequeue=None,
seed=None,
data_name="pandas_data"):
if "index" in dataframe.columns:
raise ValueError("Column name `index` is reserved.")
super(PandasSource, self).__init__(dataframe, num_threads, enqueue_size,
batch_size, queue_capacity, shuffle,
min_after_dequeue, seed, data_name)
@property
def name(self):
return "PandasSource"
@property
def _output_names(self):
return tuple(["index"] + self._data.columns.tolist())
| apache-2.0 | -174,137,666,437,107,840 | 33.016575 | 84 | 0.585025 | false |
zahodi/ansible | lib/ansible/utils/module_docs_fragments/dellos10.py | 42 | 2591 | #
# (c) 2015, Peter Sprygada <[email protected]>
#
# Copyright (c) 2016 Dell Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard files documentation fragment
DOCUMENTATION = """
options:
host:
description:
- Specifies the DNS host name or address for connecting to the remote
device over the specified transport. The value of host is used as
the destination address for the transport.
required: true
port:
description:
- Specifies the port to use when building the connection to the remote
device.
required: false
default: 22
username:
description:
- User to authenticate the SSH session to the remote device. If the
value is not specified in the task, the value of environment variable
C(ANSIBLE_NET_USERNAME) will be used instead.
required: false
password:
description:
- Password to authenticate the SSH session to the remote device. If the
value is not specified in the task, the value of environment variable
C(ANSIBLE_NET_PASSWORD) will be used instead.
required: false
default: null
ssh_keyfile:
description:
- Path to an ssh key used to authenticate the SSH session to the remote
device. If the value is not specified in the task, the value of
environment variable C(ANSIBLE_NET_SSH_KEYFILE) will be used instead.
required: false
timeout:
description:
- Specifies idle timeout (in seconds) for the connection. Useful if the
console freezes before continuing. For example when saving
configurations.
required: false
default: 10
provider:
description:
- Convenience method that allows all I(dellos10) arguments to be passed as
a dict object. All constraints (required, choices, etc) must be
met either by individual arguments or values in this dict.
required: false
default: null
"""
| gpl-3.0 | -1,529,598,488,391,523,300 | 34.986111 | 80 | 0.708607 | false |
ceibal-tatu/software-center | tests/test_login_backend.py | 3 | 1423 | import os
import unittest
from tests.utils import (
setup_test_env,
)
setup_test_env()
from softwarecenter.backend.login import get_login_backend
from softwarecenter.backend.login_impl.login_sso import (
LoginBackendDbusSSO)
from softwarecenter.backend.login_impl.login_fake import (
LoginBackendDbusSSOFake,
)
class TestLoginBackend(unittest.TestCase):
""" tests the login backend stuff """
def test_fake_and_real_provide_similar_methods(self):
""" test if the real and fake login provide the same functions """
login_real = LoginBackendDbusSSO
login_fake = LoginBackendDbusSSOFake
# ensure that both fake and real implement the same methods
self.assertEqual(
set([x for x in dir(login_real) if not x.startswith("_")]),
set([x for x in dir(login_fake) if not x.startswith("_")]))
def test_get_login_backend(self):
# test that we get the real one
self.assertEqual(type(get_login_backend(None, None, None)),
LoginBackendDbusSSO)
# test that we get the fake one
os.environ["SOFTWARE_CENTER_FAKE_REVIEW_API"] = "1"
self.assertEqual(type(get_login_backend(None, None, None)),
LoginBackendDbusSSOFake)
# clean the environment
del os.environ["SOFTWARE_CENTER_FAKE_REVIEW_API"]
if __name__ == "__main__":
unittest.main()
| lgpl-3.0 | -1,117,801,081,300,315,900 | 33.707317 | 74 | 0.654252 | false |
Thraxis/SickRage | sickbeard/name_parser/parser.py | 3 | 24350 | # coding=utf-8
# Author: Nic Wolfe <[email protected]>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import os
import time
import re
import os.path
import regexes
import sickbeard
from sickbeard import logger, helpers, scene_numbering, common, scene_exceptions, db
from sickrage.helper.common import remove_extension
from sickrage.helper.encoding import ek
from sickrage.helper.exceptions import ex
from dateutil import parser
class NameParser(object):
ALL_REGEX = 0
NORMAL_REGEX = 1
ANIME_REGEX = 2
def __init__(self, file_name=True, showObj=None, tryIndexers=False, naming_pattern=False, parse_method = None):
self.file_name = file_name
self.showObj = showObj
self.tryIndexers = tryIndexers
self.naming_pattern = naming_pattern
if (self.showObj and not self.showObj.is_anime) or parse_method == 'normal':
self._compile_regexes(self.NORMAL_REGEX)
elif (self.showObj and self.showObj.is_anime) or parse_method == 'anime':
self._compile_regexes(self.ANIME_REGEX)
else:
self._compile_regexes(self.ALL_REGEX)
@staticmethod
def clean_series_name(series_name):
"""Cleans up series name by removing any . and _
characters, along with any trailing hyphens.
Is basically equivalent to replacing all _ and . with a
space, but handles decimal numbers in string, for example:
>>> cleanRegexedSeriesName("an.example.1.0.test")
'an example 1.0 test'
>>> cleanRegexedSeriesName("an_example_1.0_test")
'an example 1.0 test'
Stolen from dbr's tvnamer
"""
series_name = re.sub(r"(\D)\.(?!\s)(\D)", "\\1 \\2", series_name)
series_name = re.sub(r"(\d)\.(\d{4})", "\\1 \\2", series_name) # if it ends in a year then don't keep the dot
series_name = re.sub(r"(\D)\.(?!\s)", "\\1 ", series_name)
series_name = re.sub(r"\.(?!\s)(\D)", " \\1", series_name)
series_name = series_name.replace("_", " ")
series_name = re.sub(r"-$", "", series_name)
series_name = re.sub(r"^\[.*\]", "", series_name)
return series_name.strip()
def _compile_regexes(self, regexMode):
if regexMode == self.ANIME_REGEX:
dbg_str = u"ANIME"
uncompiled_regex = [regexes.anime_regexes]
elif regexMode == self.NORMAL_REGEX:
dbg_str = u"NORMAL"
uncompiled_regex = [regexes.normal_regexes]
else:
dbg_str = u"ALL"
uncompiled_regex = [regexes.normal_regexes, regexes.anime_regexes]
self.compiled_regexes = []
for regexItem in uncompiled_regex:
for cur_pattern_num, (cur_pattern_name, cur_pattern) in enumerate(regexItem):
try:
cur_regex = re.compile(cur_pattern, re.VERBOSE | re.IGNORECASE)
except re.error, errormsg:
logger.log(u"WARNING: Invalid episode_pattern using %s regexs, %s. %s" % (dbg_str, errormsg, cur_pattern))
else:
self.compiled_regexes.append((cur_pattern_num, cur_pattern_name, cur_regex))
def _parse_string(self, name):
if not name:
return
matches = []
bestResult = None
for (cur_regex_num, cur_regex_name, cur_regex) in self.compiled_regexes:
match = cur_regex.match(name)
if not match:
continue
result = ParseResult(name)
result.which_regex = [cur_regex_name]
result.score = 0 - cur_regex_num
named_groups = match.groupdict().keys()
if 'series_name' in named_groups:
result.series_name = match.group('series_name')
if result.series_name:
result.series_name = self.clean_series_name(result.series_name)
result.score += 1
if 'series_num' in named_groups and match.group('series_num'):
result.score += 1
if 'season_num' in named_groups:
tmp_season = int(match.group('season_num'))
if cur_regex_name == 'bare' and tmp_season in (19, 20):
continue
result.season_number = tmp_season
result.score += 1
if 'ep_num' in named_groups:
ep_num = self._convert_number(match.group('ep_num'))
if 'extra_ep_num' in named_groups and match.group('extra_ep_num'):
result.episode_numbers = range(ep_num, self._convert_number(match.group('extra_ep_num')) + 1)
result.score += 1
else:
result.episode_numbers = [ep_num]
result.score += 1
if 'ep_ab_num' in named_groups:
ep_ab_num = self._convert_number(match.group('ep_ab_num'))
if 'extra_ab_ep_num' in named_groups and match.group('extra_ab_ep_num'):
result.ab_episode_numbers = range(ep_ab_num,
self._convert_number(match.group('extra_ab_ep_num')) + 1)
result.score += 1
else:
result.ab_episode_numbers = [ep_ab_num]
result.score += 1
if 'air_date' in named_groups:
air_date = match.group('air_date')
try:
result.air_date = parser.parse(air_date, fuzzy=True).date()
result.score += 1
except Exception:
continue
if 'extra_info' in named_groups:
tmp_extra_info = match.group('extra_info')
# Show.S04.Special or Show.S05.Part.2.Extras is almost certainly not every episode in the season
if tmp_extra_info and cur_regex_name == 'season_only' and re.search(
r'([. _-]|^)(special|extra)s?\w*([. _-]|$)', tmp_extra_info, re.I):
continue
result.extra_info = tmp_extra_info
result.score += 1
if 'release_group' in named_groups:
result.release_group = match.group('release_group')
result.score += 1
if 'version' in named_groups:
# assigns version to anime file if detected using anime regex. Non-anime regex receives -1
version = match.group('version')
if version:
result.version = version
else:
result.version = 1
else:
result.version = -1
matches.append(result)
if len(matches):
# pick best match with highest score based on placement
bestResult = max(sorted(matches, reverse=True, key=lambda x: x.which_regex), key=lambda x: x.score)
show = None
if not self.naming_pattern:
# try and create a show object for this result
show = helpers.get_show(bestResult.series_name, self.tryIndexers)
# confirm passed in show object indexer id matches result show object indexer id
if show:
if self.showObj and show.indexerid != self.showObj.indexerid:
show = None
bestResult.show = show
elif not show and self.showObj:
bestResult.show = self.showObj
# if this is a naming pattern test or result doesn't have a show object then return best result
if not bestResult.show or self.naming_pattern:
return bestResult
# get quality
bestResult.quality = common.Quality.nameQuality(name, bestResult.show.is_anime)
new_episode_numbers = []
new_season_numbers = []
new_absolute_numbers = []
# if we have an air-by-date show then get the real season/episode numbers
if bestResult.is_air_by_date:
airdate = bestResult.air_date.toordinal()
myDB = db.DBConnection()
sql_result = myDB.select(
"SELECT season, episode FROM tv_episodes WHERE showid = ? and indexer = ? and airdate = ?",
[bestResult.show.indexerid, bestResult.show.indexer, airdate])
season_number = None
episode_numbers = []
if sql_result:
season_number = int(sql_result[0][0])
episode_numbers = [int(sql_result[0][1])]
if not season_number or not len(episode_numbers):
try:
lINDEXER_API_PARMS = sickbeard.indexerApi(bestResult.show.indexer).api_params.copy()
if bestResult.show.lang:
lINDEXER_API_PARMS['language'] = bestResult.show.lang
t = sickbeard.indexerApi(bestResult.show.indexer).indexer(**lINDEXER_API_PARMS)
epObj = t[bestResult.show.indexerid].airedOn(bestResult.air_date)[0]
season_number = int(epObj["seasonnumber"])
episode_numbers = [int(epObj["episodenumber"])]
except sickbeard.indexer_episodenotfound:
logger.log(u"Unable to find episode with date " + str(bestResult.air_date) + " for show " + bestResult.show.name + ", skipping", logger.WARNING)
episode_numbers = []
except sickbeard.indexer_error, e:
logger.log(u"Unable to contact " + sickbeard.indexerApi(bestResult.show.indexer).name + ": " + ex(e), logger.WARNING)
episode_numbers = []
for epNo in episode_numbers:
s = season_number
e = epNo
if bestResult.show.is_scene:
(s, e) = scene_numbering.get_indexer_numbering(bestResult.show.indexerid,
bestResult.show.indexer,
season_number,
epNo)
new_episode_numbers.append(e)
new_season_numbers.append(s)
elif bestResult.show.is_anime and len(bestResult.ab_episode_numbers):
scene_season = scene_exceptions.get_scene_exception_by_name(bestResult.series_name)[1]
for epAbsNo in bestResult.ab_episode_numbers:
a = epAbsNo
if bestResult.show.is_scene:
a = scene_numbering.get_indexer_absolute_numbering(bestResult.show.indexerid,
bestResult.show.indexer, epAbsNo,
True, scene_season)
(s, e) = helpers.get_all_episodes_from_absolute_number(bestResult.show, [a])
new_absolute_numbers.append(a)
new_episode_numbers.extend(e)
new_season_numbers.append(s)
elif bestResult.season_number and len(bestResult.episode_numbers):
for epNo in bestResult.episode_numbers:
s = bestResult.season_number
e = epNo
if bestResult.show.is_scene:
(s, e) = scene_numbering.get_indexer_numbering(bestResult.show.indexerid,
bestResult.show.indexer,
bestResult.season_number,
epNo)
if bestResult.show.is_anime:
a = helpers.get_absolute_number_from_season_and_episode(bestResult.show, s, e)
if a:
new_absolute_numbers.append(a)
new_episode_numbers.append(e)
new_season_numbers.append(s)
# need to do a quick sanity check heregex. It's possible that we now have episodes
# from more than one season (by tvdb numbering), and this is just too much
# for sickbeard, so we'd need to flag it.
new_season_numbers = list(set(new_season_numbers)) # remove duplicates
if len(new_season_numbers) > 1:
raise InvalidNameException("Scene numbering results episodes from "
"seasons %s, (i.e. more than one) and "
"sickrage does not support this. "
"Sorry." % (str(new_season_numbers)))
# I guess it's possible that we'd have duplicate episodes too, so lets
# eliminate them
new_episode_numbers = list(set(new_episode_numbers))
new_episode_numbers.sort()
# maybe even duplicate absolute numbers so why not do them as well
new_absolute_numbers = list(set(new_absolute_numbers))
new_absolute_numbers.sort()
if len(new_absolute_numbers):
bestResult.ab_episode_numbers = new_absolute_numbers
if len(new_season_numbers) and len(new_episode_numbers):
bestResult.episode_numbers = new_episode_numbers
bestResult.season_number = new_season_numbers[0]
if bestResult.show.is_scene:
logger.log(
u"Converted parsed result " + bestResult.original_name + " into " + str(bestResult).decode('utf-8',
'xmlcharrefreplace'),
logger.DEBUG)
# CPU sleep
time.sleep(0.02)
return bestResult
def _combine_results(self, first, second, attr):
# if the first doesn't exist then return the second or nothing
if not first:
if not second:
return None
else:
return getattr(second, attr)
# if the second doesn't exist then return the first
if not second:
return getattr(first, attr)
a = getattr(first, attr)
b = getattr(second, attr)
# if a is good use it
if a is not None or (isinstance(a, list) and a):
return a
# if not use b (if b isn't set it'll just be default)
else:
return b
@staticmethod
def _unicodify(obj, encoding="utf-8"):
if isinstance(obj, basestring):
if not isinstance(obj, unicode):
obj = unicode(obj, encoding, 'replace')
return obj
@staticmethod
def _convert_number(org_number):
"""
Convert org_number into an integer
org_number: integer or representation of a number: string or unicode
Try force converting to int first, on error try converting from Roman numerals
returns integer or 0
"""
try:
# try forcing to int
if org_number:
number = int(org_number)
else:
number = 0
except Exception:
# on error try converting from Roman numerals
roman_to_int_map = (
('M', 1000), ('CM', 900), ('D', 500), ('CD', 400), ('C', 100),
('XC', 90), ('L', 50), ('XL', 40), ('X', 10),
('IX', 9), ('V', 5), ('IV', 4), ('I', 1)
)
roman_numeral = str(org_number).upper()
number = 0
index = 0
for numeral, integer in roman_to_int_map:
while roman_numeral[index:index + len(numeral)] == numeral:
number += integer
index += len(numeral)
return number
def parse(self, name, cache_result=True):
name = self._unicodify(name)
if self.naming_pattern:
cache_result = False
cached = name_parser_cache.get(name)
if cached:
return cached
# break it into parts if there are any (dirname, file name, extension)
dir_name, file_name = ek(os.path.split, name)
if self.file_name:
base_file_name = remove_extension(file_name)
else:
base_file_name = file_name
# set up a result to use
final_result = ParseResult(name)
# try parsing the file name
file_name_result = self._parse_string(base_file_name)
# use only the direct parent dir
dir_name = ek(os.path.basename, dir_name)
# parse the dirname for extra info if needed
dir_name_result = self._parse_string(dir_name)
# build the ParseResult object
final_result.air_date = self._combine_results(file_name_result, dir_name_result, 'air_date')
# anime absolute numbers
final_result.ab_episode_numbers = self._combine_results(file_name_result, dir_name_result, 'ab_episode_numbers')
# season and episode numbers
final_result.season_number = self._combine_results(file_name_result, dir_name_result, 'season_number')
final_result.episode_numbers = self._combine_results(file_name_result, dir_name_result, 'episode_numbers')
# if the dirname has a release group/show name I believe it over the filename
final_result.series_name = self._combine_results(dir_name_result, file_name_result, 'series_name')
final_result.extra_info = self._combine_results(dir_name_result, file_name_result, 'extra_info')
final_result.release_group = self._combine_results(dir_name_result, file_name_result, 'release_group')
final_result.version = self._combine_results(dir_name_result, file_name_result, 'version')
final_result.which_regex = []
if final_result == file_name_result:
final_result.which_regex = file_name_result.which_regex
elif final_result == dir_name_result:
final_result.which_regex = dir_name_result.which_regex
else:
if file_name_result:
final_result.which_regex += file_name_result.which_regex
if dir_name_result:
final_result.which_regex += dir_name_result.which_regex
final_result.show = self._combine_results(file_name_result, dir_name_result, 'show')
final_result.quality = self._combine_results(file_name_result, dir_name_result, 'quality')
if not final_result.show:
raise InvalidShowException(
"Unable to parse " + name.encode(sickbeard.SYS_ENCODING, 'xmlcharrefreplace'))
# if there's no useful info in it then raise an exception
if final_result.season_number is None and not final_result.episode_numbers and final_result.air_date is None and not final_result.ab_episode_numbers and not final_result.series_name:
raise InvalidNameException("Unable to parse " + name.encode(sickbeard.SYS_ENCODING, 'xmlcharrefreplace'))
if cache_result:
name_parser_cache.add(name, final_result)
logger.log(u"Parsed " + name + " into " + str(final_result).decode('utf-8', 'xmlcharrefreplace'), logger.DEBUG)
return final_result
class ParseResult(object):
def __init__(self,
original_name,
series_name=None,
season_number=None,
episode_numbers=None,
extra_info=None,
release_group=None,
air_date=None,
ab_episode_numbers=None,
show=None,
score=None,
quality=None,
version=None
):
self.original_name = original_name
self.series_name = series_name
self.season_number = season_number
if not episode_numbers:
self.episode_numbers = []
else:
self.episode_numbers = episode_numbers
if not ab_episode_numbers:
self.ab_episode_numbers = []
else:
self.ab_episode_numbers = ab_episode_numbers
if not quality:
self.quality = common.Quality.UNKNOWN
else:
self.quality = quality
self.extra_info = extra_info
self.release_group = release_group
self.air_date = air_date
self.which_regex = []
self.show = show
self.score = score
self.version = version
def __eq__(self, other):
if not other:
return False
if self.series_name != other.series_name:
return False
if self.season_number != other.season_number:
return False
if self.episode_numbers != other.episode_numbers:
return False
if self.extra_info != other.extra_info:
return False
if self.release_group != other.release_group:
return False
if self.air_date != other.air_date:
return False
if self.ab_episode_numbers != other.ab_episode_numbers:
return False
if self.show != other.show:
return False
if self.score != other.score:
return False
if self.quality != other.quality:
return False
if self.version != other.version:
return False
return True
def __str__(self):
if self.series_name is not None:
to_return = self.series_name + u' - '
else:
to_return = u''
if self.season_number is not None:
to_return += 'S' + str(self.season_number).zfill(2)
if self.episode_numbers and len(self.episode_numbers):
for e in self.episode_numbers:
to_return += 'E' + str(e).zfill(2)
if self.is_air_by_date:
to_return += str(self.air_date)
if self.ab_episode_numbers:
to_return += ' [ABS: ' + str(self.ab_episode_numbers) + ']'
if self.version and self.is_anime is True:
to_return += ' [ANIME VER: ' + str(self.version) + ']'
if self.release_group:
to_return += ' [GROUP: ' + self.release_group + ']'
to_return += ' [ABD: ' + str(self.is_air_by_date) + ']'
to_return += ' [ANIME: ' + str(self.is_anime) + ']'
to_return += ' [whichReg: ' + str(self.which_regex) + ']'
return to_return.encode('utf-8')
@property
def is_air_by_date(self):
if self.air_date:
return True
return False
@property
def is_anime(self):
if len(self.ab_episode_numbers):
return True
return False
class NameParserCache(object):
_previous_parsed = {}
_cache_size = 100
def add(self, name, parse_result):
self._previous_parsed[name] = parse_result
while len(self._previous_parsed) > self._cache_size:
del self._previous_parsed[self._previous_parsed.keys()[0]]
def get(self, name):
if name in self._previous_parsed:
logger.log(u"Using cached parse result for: " + name, logger.DEBUG)
return self._previous_parsed[name]
name_parser_cache = NameParserCache()
class InvalidNameException(Exception):
"""The given release name is not valid"""
class InvalidShowException(Exception):
"""The given show name is not valid"""
| gpl-3.0 | -9,206,293,839,359,749,000 | 38.787582 | 190 | 0.547515 | false |
mgraupe/acq4 | acq4/pyqtgraph/widgets/JoystickButton.py | 52 | 2460 | from ..Qt import QtGui, QtCore
__all__ = ['JoystickButton']
class JoystickButton(QtGui.QPushButton):
sigStateChanged = QtCore.Signal(object, object) ## self, state
def __init__(self, parent=None):
QtGui.QPushButton.__init__(self, parent)
self.radius = 200
self.setCheckable(True)
self.state = None
self.setState(0,0)
self.setFixedWidth(50)
self.setFixedHeight(50)
def mousePressEvent(self, ev):
self.setChecked(True)
self.pressPos = ev.pos()
ev.accept()
def mouseMoveEvent(self, ev):
dif = ev.pos()-self.pressPos
self.setState(dif.x(), -dif.y())
def mouseReleaseEvent(self, ev):
self.setChecked(False)
self.setState(0,0)
def wheelEvent(self, ev):
ev.accept()
def doubleClickEvent(self, ev):
ev.accept()
def getState(self):
return self.state
def setState(self, *xy):
xy = list(xy)
d = (xy[0]**2 + xy[1]**2)**0.5
nxy = [0,0]
for i in [0,1]:
if xy[i] == 0:
nxy[i] = 0
else:
nxy[i] = xy[i]/d
if d > self.radius:
d = self.radius
d = (d/self.radius)**2
xy = [nxy[0]*d, nxy[1]*d]
w2 = self.width()/2.
h2 = self.height()/2
self.spotPos = QtCore.QPoint(w2*(1+xy[0]), h2*(1-xy[1]))
self.update()
if self.state == xy:
return
self.state = xy
self.sigStateChanged.emit(self, self.state)
def paintEvent(self, ev):
QtGui.QPushButton.paintEvent(self, ev)
p = QtGui.QPainter(self)
p.setBrush(QtGui.QBrush(QtGui.QColor(0,0,0)))
p.drawEllipse(self.spotPos.x()-3,self.spotPos.y()-3,6,6)
def resizeEvent(self, ev):
self.setState(*self.state)
QtGui.QPushButton.resizeEvent(self, ev)
if __name__ == '__main__':
app = QtGui.QApplication([])
w = QtGui.QMainWindow()
b = JoystickButton()
w.setCentralWidget(b)
w.show()
w.resize(100, 100)
def fn(b, s):
print("state changed:", s)
b.sigStateChanged.connect(fn)
## Start Qt event loop unless running in interactive mode.
import sys
if sys.flags.interactive != 1:
app.exec_()
| mit | -3,343,453,610,123,480,000 | 24.905263 | 67 | 0.511382 | false |
AnderEnder/ansible-modules-extras | web_infrastructure/jenkins_plugin.py | 12 | 25559 | #!/usr/bin/python
# encoding: utf-8
# (c) 2016, Jiri Tyr <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.urls import fetch_url
from ansible.module_utils.urls import url_argument_spec
import base64
import hashlib
import json
import os
import tempfile
import time
import urllib
DOCUMENTATION = '''
---
module: jenkins_plugin
author: Jiri Tyr (@jtyr)
version_added: '2.2'
short_description: Add or remove Jenkins plugin
description:
- Ansible module which helps to manage Jenkins plugins.
options:
group:
required: false
default: jenkins
description:
- Name of the Jenkins group on the OS.
jenkins_home:
required: false
default: /var/lib/jenkins
description:
- Home directory of the Jenkins user.
mode:
required: false
default: '0664'
description:
- File mode applied on versioned plugins.
name:
required: true
description:
- Plugin name.
owner:
required: false
default: jenkins
description:
- Name of the Jenkins user on the OS.
params:
required: false
default: null
description:
- Option used to allow the user to overwrite any of the other options. To
remove an option, set the value of the option to C(null).
state:
required: false
choices: [absent, present, pinned, unpinned, enabled, disabled, latest]
default: present
description:
- Desired plugin state.
- If the C(latest) is set, the check for new version will be performed
every time. This is suitable to keep the plugin up-to-date.
timeout:
required: false
default: 30
description:
- Server connection timeout in secs.
updates_expiration:
required: false
default: 86400
description:
- Number of seconds after which a new copy of the I(update-center.json)
file is downloaded. This is used to avoid the need to download the
plugin to calculate its checksum when C(latest) is specified.
- Set it to C(0) if no cache file should be used. In that case, the
plugin file will always be downloaded to calculate its checksum when
C(latest) is specified.
updates_url:
required: false
default: https://updates.jenkins-ci.org
description:
- URL of the Update Centre.
- Used as the base URL to download the plugins and the
I(update-center.json) JSON file.
url:
required: false
default: http://localhost:8080
description:
- URL of the Jenkins server.
version:
required: false
default: null
description:
- Plugin version number.
- If this option is specified, all plugin dependencies must be installed
manually.
- It might take longer to verify that the correct version is installed.
This is especially true if a specific version number is specified.
with_dependencies:
required: false
choices: ['yes', 'no']
default: 'yes'
description:
- Defines whether to install plugin dependencies.
notes:
- Plugin installation shoud be run under root or the same user which owns
the plugin files on the disk. Only if the plugin is not installed yet and
no version is specified, the API installation is performed which requires
only the Web UI credentials.
- It's necessary to notify the handler or call the I(service) module to
restart the Jenkins service after a new plugin was installed.
- Pinning works only if the plugin is installed and Jenkis service was
successfully restarted after the plugin installation.
- It is not possible to run the module remotely by changing the I(url)
parameter to point to the Jenkins server. The module must be used on the
host where Jenkins runs as it needs direct access to the plugin files.
'''
EXAMPLES = '''
- name: Install plugin
jenkins_plugin:
name: build-pipeline-plugin
- name: Install plugin without its dependencies
jenkins_plugin:
name: build-pipeline-plugin
with_dependencies: no
- name: Make sure the plugin is always up-to-date
jenkins_plugin:
name: token-macro
state: latest
- name: Install specific version of the plugin
jenkins_plugin:
name: token-macro
version: 1.15
- name: Pin the plugin
jenkins_plugin:
name: token-macro
state: pinned
- name: Unpin the plugin
jenkins_plugin:
name: token-macro
state: unpinned
- name: Enable the plugin
jenkins_plugin:
name: token-macro
state: enabled
- name: Disable the plugin
jenkins_plugin:
name: token-macro
state: disabled
- name: Uninstall plugin
jenkins_plugin:
name: build-pipeline-plugin
state: absent
#
# Example of how to use the params
#
# Define a variable and specify all default parameters you want to use across
# all jenkins_plugin calls:
#
# my_jenkins_params:
# url_username: admin
# url_password: p4ssw0rd
# url: http://localhost:8888
#
- name: Install plugin
jenkins_plugin:
name: build-pipeline-plugin
params: "{{ my_jenkins_params }}"
#
# Example of a Play which handles Jenkins restarts during the state changes
#
- name: Jenkins Master play
hosts: jenkins-master
vars:
my_jenkins_plugins:
token-macro:
enabled: yes
build-pipeline-plugin:
version: 1.4.9
pinned: no
enabled: yes
tasks:
- name: Install plugins without a specific version
jenkins_plugin:
name: "{{ item.key }}"
register: my_jenkins_plugin_unversioned
when: >
'version' not in item.value
with_dict: my_jenkins_plugins
- name: Install plugins with a specific version
jenkins_plugin:
name: "{{ item.key }}"
version: "{{ item.value['version'] }}"
register: my_jenkins_plugin_versioned
when: >
'version' in item.value
with_dict: my_jenkins_plugins
- name: Initiate the fact
set_fact:
jenkins_restart_required: no
- name: Check if restart is required by any of the versioned plugins
set_fact:
jenkins_restart_required: yes
when: item.changed
with_items: my_jenkins_plugin_versioned.results
- name: Check if restart is required by any of the unversioned plugins
set_fact:
jenkins_restart_required: yes
when: item.changed
with_items: my_jenkins_plugin_unversioned.results
- name: Restart Jenkins if required
service:
name: jenkins
state: restarted
when: jenkins_restart_required
# Requires python-httplib2 to be installed on the guest
- name: Wait for Jenkins to start up
uri:
url: http://localhost:8080
status_code: 200
timeout: 5
register: jenkins_service_status
# Keep trying for 5 mins in 5 sec intervals
retries: 60
delay: 5
until: >
'status' in jenkins_service_status and
jenkins_service_status['status'] == 200
when: jenkins_restart_required
- name: Reset the fact
set_fact:
jenkins_restart_required: no
when: jenkins_restart_required
- name: Plugin pinning
jenkins_plugin:
name: "{{ item.key }}"
state: "{{ 'pinned' if item.value['pinned'] else 'unpinned'}}"
when: >
'pinned' in item.value
with_dict: my_jenkins_plugins
- name: Plugin enabling
jenkins_plugin:
name: "{{ item.key }}"
state: "{{ 'enabled' if item.value['enabled'] else 'disabled'}}"
when: >
'enabled' in item.value
with_dict: my_jenkins_plugins
'''
RETURN = '''
plugin:
description: plugin name
returned: success
type: string
sample: build-pipeline-plugin
state:
description: state of the target, after execution
returned: success
type: string
sample: "present"
'''
class JenkinsPlugin(object):
def __init__(self, module):
# To be able to call fail_json
self.module = module
# Shortcuts for the params
self.params = self.module.params
self.url = self.params['url']
self.timeout = self.params['timeout']
# Crumb
self.crumb = {}
if self._csrf_enabled():
self.crumb = self._get_crumb()
# Get list of installed plugins
self._get_installed_plugins()
def _csrf_enabled(self):
csrf_data = self._get_json_data(
"%s/%s" % (self.url, "api/json"), 'CSRF')
return csrf_data["useCrumbs"]
def _get_json_data(self, url, what, **kwargs):
# Get the JSON data
r = self._get_url_data(url, what, **kwargs)
# Parse the JSON data
try:
json_data = json.load(r)
except Exception:
e = get_exception()
self.module.fail_json(
msg="Cannot parse %s JSON data." % what,
details=e.message)
return json_data
def _get_url_data(
self, url, what=None, msg_status=None, msg_exception=None,
**kwargs):
# Compose default messages
if msg_status is None:
msg_status = "Cannot get %s" % what
if msg_exception is None:
msg_exception = "Retrieval of %s failed." % what
# Get the URL data
try:
response, info = fetch_url(
self.module, url, timeout=self.timeout, **kwargs)
if info['status'] != 200:
self.module.fail_json(msg=msg_status, details=info['msg'])
except Exception:
e = get_exception()
self.module.fail_json(msg=msg_exception, details=e.message)
return response
def _get_crumb(self):
crumb_data = self._get_json_data(
"%s/%s" % (self.url, "crumbIssuer/api/json"), 'Crumb')
if 'crumbRequestField' in crumb_data and 'crumb' in crumb_data:
ret = {
crumb_data['crumbRequestField']: crumb_data['crumb']
}
else:
self.module.fail_json(
msg="Required fields not found in the Crum response.",
details=crumb_data)
return ret
def _get_installed_plugins(self):
plugins_data = self._get_json_data(
"%s/%s" % (self.url, "pluginManager/api/json?depth=1"),
'list of plugins')
# Check if we got valid data
if 'plugins' not in plugins_data:
self.module.fail_json(msg="No valid plugin data found.")
# Create final list of installed/pined plugins
self.is_installed = False
self.is_pinned = False
self.is_enabled = False
for p in plugins_data['plugins']:
if p['shortName'] == self.params['name']:
self.is_installed = True
if p['pinned']:
self.is_pinned = True
if p['enabled']:
self.is_enabled = True
break
def install(self):
changed = False
plugin_file = (
'%s/plugins/%s.jpi' % (
self.params['jenkins_home'],
self.params['name']))
if not self.is_installed and self.params['version'] is None:
if not self.module.check_mode:
# Install the plugin (with dependencies)
install_script = (
'd = Jenkins.instance.updateCenter.getPlugin("%s")'
'.deploy(); d.get();' % self.params['name'])
if self.params['with_dependencies']:
install_script = (
'Jenkins.instance.updateCenter.getPlugin("%s")'
'.getNeededDependencies().each{it.deploy()}; %s' % (
self.params['name'], install_script))
script_data = {
'script': install_script
}
script_data.update(self.crumb)
data = urllib.urlencode(script_data)
# Send the installation request
r = self._get_url_data(
"%s/scriptText" % self.url,
msg_status="Cannot install plugin.",
msg_exception="Plugin installation has failed.",
data=data)
changed = True
else:
# Check if the plugin directory exists
if not os.path.isdir(self.params['jenkins_home']):
self.module.fail_json(
msg="Jenkins home directory doesn't exist.")
md5sum_old = None
if os.path.isfile(plugin_file):
# Make the checksum of the currently installed plugin
md5sum_old = hashlib.md5(
open(plugin_file, 'rb').read()).hexdigest()
if self.params['version'] in [None, 'latest']:
# Take latest version
plugin_url = (
"%s/latest/%s.hpi" % (
self.params['updates_url'],
self.params['name']))
else:
# Take specific version
plugin_url = (
"{0}/download/plugins/"
"{1}/{2}/{1}.hpi".format(
self.params['updates_url'],
self.params['name'],
self.params['version']))
if (
self.params['updates_expiration'] == 0 or
self.params['version'] not in [None, 'latest'] or
md5sum_old is None):
# Download the plugin file directly
r = self._download_plugin(plugin_url)
# Write downloaded plugin into file if checksums don't match
if md5sum_old is None:
# No previously installed plugin
if not self.module.check_mode:
self._write_file(plugin_file, r)
changed = True
else:
# Get data for the MD5
data = r.read()
# Make new checksum
md5sum_new = hashlib.md5(data).hexdigest()
# If the checksum is different from the currently installed
# plugin, store the new plugin
if md5sum_old != md5sum_new:
if not self.module.check_mode:
self._write_file(plugin_file, data)
changed = True
else:
# Check for update from the updates JSON file
plugin_data = self._download_updates()
try:
sha1_old = hashlib.sha1(open(plugin_file, 'rb').read())
except Exception:
e = get_exception()
self.module.fail_json(
msg="Cannot calculate SHA1 of the old plugin.",
details=e.message)
sha1sum_old = base64.b64encode(sha1_old.digest())
# If the latest version changed, download it
if sha1sum_old != plugin_data['sha1']:
if not self.module.check_mode:
r = self._download_plugin(plugin_url)
self._write_file(plugin_file, r)
changed = True
# Change file attributes if needed
if os.path.isfile(plugin_file):
params = {
'dest': plugin_file
}
params.update(self.params)
file_args = self.module.load_file_common_arguments(params)
if not self.module.check_mode:
# Not sure how to run this in the check mode
changed = self.module.set_fs_attributes_if_different(
file_args, changed)
else:
# See the comment above
changed = True
return changed
def _download_updates(self):
updates_filename = 'jenkins-plugin-cache.json'
updates_dir = os.path.expanduser('~/.ansible/tmp')
updates_file = "%s/%s" % (updates_dir, updates_filename)
download_updates = True
# Check if we need to download new updates file
if os.path.isfile(updates_file):
# Get timestamp when the file was changed last time
ts_file = os.stat(updates_file).st_mtime
ts_now = time.time()
if ts_now - ts_file < self.params['updates_expiration']:
download_updates = False
updates_file_orig = updates_file
# Download the updates file if needed
if download_updates:
url = "%s/update-center.json" % self.params['updates_url']
# Get the data
r = self._get_url_data(
url,
msg_status="Remote updates not found.",
msg_exception="Updates download failed.")
# Write the updates file
updates_file = tempfile.mkstemp()
try:
fd = open(updates_file, 'wb')
except IOError:
e = get_exception()
self.module.fail_json(
msg="Cannot open the tmp updates file %s." % updates_file,
details=str(e))
fd.write(r.read())
try:
fd.close()
except IOError:
e = get_exception()
self.module.fail_json(
msg="Cannot close the tmp updates file %s." % updates_file,
detail=str(e))
# Open the updates file
try:
f = open(updates_file)
except IOError:
e = get_exception()
self.module.fail_json(
msg="Cannot open temporal updates file.",
details=str(e))
i = 0
for line in f:
# Read only the second line
if i == 1:
try:
data = json.loads(line)
except Exception:
e = get_exception()
self.module.fail_json(
msg="Cannot load JSON data from the tmp updates file.",
details=e.message)
break
i += 1
# Move the updates file to the right place if we could read it
if download_updates:
# Make sure the destination directory exists
if not os.path.isdir(updates_dir):
try:
os.makedirs(updates_dir, int('0700', 8))
except OSError:
e = get_exception()
self.module.fail_json(
msg="Cannot create temporal directory.",
details=e.message)
self.module.atomic_move(updates_file, updates_file_orig)
# Check if we have the plugin data available
if 'plugins' not in data or self.params['name'] not in data['plugins']:
self.module.fail_json(
msg="Cannot find plugin data in the updates file.")
return data['plugins'][self.params['name']]
def _download_plugin(self, plugin_url):
# Download the plugin
r = self._get_url_data(
plugin_url,
msg_status="Plugin not found.",
msg_exception="Plugin download failed.")
return r
def _write_file(self, f, data):
# Store the plugin into a temp file and then move it
tmp_f = tempfile.mkstemp()
try:
fd = open(tmp_f, 'wb')
except IOError:
e = get_exception()
self.module.fail_json(
msg='Cannot open the temporal plugin file %s.' % tmp_f,
details=str(e))
if isinstance(data, str):
d = data
else:
d = data.read()
fd.write(d)
try:
fd.close()
except IOError:
e = get_exception()
self.module.fail_json(
msg='Cannot close the temporal plugin file %s.' % tmp_f,
details=str(e))
# Move the file onto the right place
self.module.atomic_move(tmp_f, f)
def uninstall(self):
changed = False
# Perform the action
if self.is_installed:
if not self.module.check_mode:
self._pm_query('doUninstall', 'Uninstallation')
changed = True
return changed
def pin(self):
return self._pinning('pin')
def unpin(self):
return self._pinning('unpin')
def _pinning(self, action):
changed = False
# Check if the plugin is pinned/unpinned
if (
action == 'pin' and not self.is_pinned or
action == 'unpin' and self.is_pinned):
# Perform the action
if not self.module.check_mode:
self._pm_query(action, "%sning" % action.capitalize())
changed = True
return changed
def enable(self):
return self._enabling('enable')
def disable(self):
return self._enabling('disable')
def _enabling(self, action):
changed = False
# Check if the plugin is pinned/unpinned
if (
action == 'enable' and not self.is_enabled or
action == 'disable' and self.is_enabled):
# Perform the action
if not self.module.check_mode:
self._pm_query(
"make%sd" % action.capitalize(),
"%sing" % action[:-1].capitalize())
changed = True
return changed
def _pm_query(self, action, msg):
url = "%s/pluginManager/plugin/%s/%s" % (
self.params['url'], self.params['name'], action)
data = urllib.urlencode(self.crumb)
# Send the request
self._get_url_data(
url,
msg_status="Plugin not found. %s" % url,
msg_exception="%s has failed." % msg,
data=data)
def main():
# Module arguments
argument_spec = url_argument_spec()
argument_spec.update(
group=dict(default='jenkins'),
jenkins_home=dict(default='/var/lib/jenkins'),
mode=dict(default='0644', type='raw'),
name=dict(required=True),
owner=dict(default='jenkins'),
params=dict(type='dict'),
state=dict(
choices=[
'present',
'absent',
'pinned',
'unpinned',
'enabled',
'disabled',
'latest'],
default='present'),
timeout=dict(default=30, type="int"),
updates_expiration=dict(default=86400, type="int"),
updates_url=dict(default='https://updates.jenkins-ci.org'),
url=dict(default='http://localhost:8080'),
url_password=dict(no_log=True),
version=dict(),
with_dependencies=dict(default=True, type='bool'),
)
# Module settings
module = AnsibleModule(
argument_spec=argument_spec,
add_file_common_args=True,
supports_check_mode=True,
)
# Update module parameters by user's parameters if defined
if 'params' in module.params and isinstance(module.params['params'], dict):
module.params.update(module.params['params'])
# Remove the params
module.params.pop('params', None)
# Force basic authentication
module.params['force_basic_auth'] = True
# Convert timeout to float
try:
module.params['timeout'] = float(module.params['timeout'])
except ValueError:
e = get_exception()
module.fail_json(
msg='Cannot convert %s to float.' % module.params['timeout'],
details=str(e))
# Set version to latest if state is latest
if module.params['state'] == 'latest':
module.params['state'] = 'present'
module.params['version'] = 'latest'
# Create some shortcuts
name = module.params['name']
state = module.params['state']
# Initial change state of the task
changed = False
# Instantiate the JenkinsPlugin object
jp = JenkinsPlugin(module)
# Perform action depending on the requested state
if state == 'present':
changed = jp.install()
elif state == 'absent':
changed = jp.uninstall()
elif state == 'pinned':
changed = jp.pin()
elif state == 'unpinned':
changed = jp.unpin()
elif state == 'enabled':
changed = jp.enable()
elif state == 'disabled':
changed = jp.disable()
# Print status of the change
module.exit_json(changed=changed, plugin=name, state=state)
if __name__ == '__main__':
main()
| gpl-3.0 | 2,062,749,394,826,865,700 | 29.793976 | 79 | 0.562581 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.