ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b412caa12f63ba4936dbceafa8bba82102078793 | # orm/query.py
# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
"""The Query class and support.
Defines the :class:`_query.Query` class, the central
construct used by the ORM to construct database queries.
The :class:`_query.Query` class should not be confused with the
:class:`_expression.Select` class, which defines database
SELECT operations at the SQL (non-ORM) level. ``Query`` differs from
``Select`` in that it returns ORM-mapped objects and interacts with an
ORM session, whereas the ``Select`` construct interacts directly with the
database to return iterable result sets.
"""
import itertools
import operator
import types
from . import exc as orm_exc
from . import interfaces
from . import loading
from . import util as orm_util
from .base import _assertions
from .context import _column_descriptions
from .context import _legacy_determine_last_joined_entity
from .context import _legacy_filter_by_entity_zero
from .context import LABEL_STYLE_LEGACY_ORM
from .context import ORMCompileState
from .context import ORMFromStatementCompileState
from .context import QueryContext
from .interfaces import ORMColumnsClauseRole
from .util import aliased
from .util import AliasedClass
from .util import object_mapper
from .util import with_parent
from .util import with_polymorphic
from .. import exc as sa_exc
from .. import inspect
from .. import inspection
from .. import log
from .. import sql
from .. import util
from ..sql import coercions
from ..sql import elements
from ..sql import expression
from ..sql import roles
from ..sql import Select
from ..sql import util as sql_util
from ..sql import visitors
from ..sql.annotation import SupportsCloneAnnotations
from ..sql.base import _entity_namespace_key
from ..sql.base import _generative
from ..sql.base import Executable
from ..sql.selectable import _MemoizedSelectEntities
from ..sql.selectable import _SelectFromElements
from ..sql.selectable import ForUpdateArg
from ..sql.selectable import GroupedElement
from ..sql.selectable import HasHints
from ..sql.selectable import HasPrefixes
from ..sql.selectable import HasSuffixes
from ..sql.selectable import LABEL_STYLE_TABLENAME_PLUS_COL
from ..sql.selectable import SelectBase
from ..sql.selectable import SelectStatementGrouping
from ..sql.visitors import InternalTraversal
from ..util import collections_abc
__all__ = ["Query", "QueryContext", "aliased"]
@inspection._self_inspects
@log.class_logger
class Query(
_SelectFromElements,
SupportsCloneAnnotations,
HasPrefixes,
HasSuffixes,
HasHints,
Executable,
):
"""ORM-level SQL construction object.
:class:`_query.Query`
is the source of all SELECT statements generated by the
ORM, both those formulated by end-user query operations as well as by
high level internal operations such as related collection loading. It
features a generative interface whereby successive calls return a new
:class:`_query.Query` object, a copy of the former with additional
criteria and options associated with it.
:class:`_query.Query` objects are normally initially generated using the
:meth:`~.Session.query` method of :class:`.Session`, and in
less common cases by instantiating the :class:`_query.Query` directly and
associating with a :class:`.Session` using the
:meth:`_query.Query.with_session`
method.
For a full walk through of :class:`_query.Query` usage, see the
:ref:`ormtutorial_toplevel`.
"""
# elements that are in Core and can be cached in the same way
_where_criteria = ()
_having_criteria = ()
_order_by_clauses = ()
_group_by_clauses = ()
_limit_clause = None
_offset_clause = None
_distinct = False
_distinct_on = ()
_for_update_arg = None
_correlate = ()
_auto_correlate = True
_from_obj = ()
_setup_joins = ()
_legacy_setup_joins = ()
_label_style = LABEL_STYLE_LEGACY_ORM
_memoized_select_entities = ()
_compile_options = ORMCompileState.default_compile_options
load_options = QueryContext.default_load_options + {
"_legacy_uniquing": True
}
_params = util.EMPTY_DICT
# local Query builder state, not needed for
# compilation or execution
_aliased_generation = None
_enable_assertions = True
_last_joined_entity = None
_statement = None
# mirrors that of ClauseElement, used to propagate the "orm"
# plugin as well as the "subject" of the plugin, e.g. the mapper
# we are querying against.
_propagate_attrs = util.immutabledict()
def __init__(self, entities, session=None):
"""Construct a :class:`_query.Query` directly.
E.g.::
q = Query([User, Address], session=some_session)
The above is equivalent to::
q = some_session.query(User, Address)
:param entities: a sequence of entities and/or SQL expressions.
:param session: a :class:`.Session` with which the
:class:`_query.Query`
will be associated. Optional; a :class:`_query.Query`
can be associated
with a :class:`.Session` generatively via the
:meth:`_query.Query.with_session` method as well.
.. seealso::
:meth:`.Session.query`
:meth:`_query.Query.with_session`
"""
self.session = session
self._set_entities(entities)
def _set_propagate_attrs(self, values):
self._propagate_attrs = util.immutabledict(values)
return self
def _set_entities(self, entities):
self._raw_columns = [
coercions.expect(
roles.ColumnsClauseRole,
ent,
apply_propagate_attrs=self,
post_inspect=True,
)
for ent in util.to_list(entities)
]
def _entity_from_pre_ent_zero(self):
if not self._raw_columns:
return None
ent = self._raw_columns[0]
if "parententity" in ent._annotations:
return ent._annotations["parententity"]
elif isinstance(ent, ORMColumnsClauseRole):
return ent.entity
elif "bundle" in ent._annotations:
return ent._annotations["bundle"]
else:
# label, other SQL expression
for element in visitors.iterate(ent):
if "parententity" in element._annotations:
return element._annotations["parententity"]
else:
return None
def _only_full_mapper_zero(self, methname):
if (
len(self._raw_columns) != 1
or "parententity" not in self._raw_columns[0]._annotations
or not self._raw_columns[0].is_selectable
):
raise sa_exc.InvalidRequestError(
"%s() can only be used against "
"a single mapped class." % methname
)
return self._raw_columns[0]._annotations["parententity"]
def _set_select_from(self, obj, set_base_alias):
fa = [
coercions.expect(
roles.StrictFromClauseRole,
elem,
allow_select=True,
apply_propagate_attrs=self,
)
for elem in obj
]
self._compile_options += {"_set_base_alias": set_base_alias}
self._from_obj = tuple(fa)
@_generative
def _set_lazyload_from(self, state):
self.load_options += {"_lazy_loaded_from": state}
def _get_condition(self):
return self._no_criterion_condition(
"get", order_by=False, distinct=False
)
def _get_existing_condition(self):
self._no_criterion_assertion("get", order_by=False, distinct=False)
def _no_criterion_assertion(self, meth, order_by=True, distinct=True):
if not self._enable_assertions:
return
if (
self._where_criteria
or self._statement is not None
or self._from_obj
or self._legacy_setup_joins
or self._limit_clause is not None
or self._offset_clause is not None
or self._group_by_clauses
or (order_by and self._order_by_clauses)
or (distinct and self._distinct)
):
raise sa_exc.InvalidRequestError(
"Query.%s() being called on a "
"Query with existing criterion. " % meth
)
def _no_criterion_condition(self, meth, order_by=True, distinct=True):
self._no_criterion_assertion(meth, order_by, distinct)
self._from_obj = self._legacy_setup_joins = ()
if self._statement is not None:
self._compile_options += {"_statement": None}
self._where_criteria = ()
self._distinct = False
self._order_by_clauses = self._group_by_clauses = ()
def _no_clauseelement_condition(self, meth):
if not self._enable_assertions:
return
if self._order_by_clauses:
raise sa_exc.InvalidRequestError(
"Query.%s() being called on a "
"Query with existing criterion. " % meth
)
self._no_criterion_condition(meth)
def _no_statement_condition(self, meth):
if not self._enable_assertions:
return
if self._statement is not None:
raise sa_exc.InvalidRequestError(
(
"Query.%s() being called on a Query with an existing full "
"statement - can't apply criterion."
)
% meth
)
def _no_limit_offset(self, meth):
if not self._enable_assertions:
return
if self._limit_clause is not None or self._offset_clause is not None:
raise sa_exc.InvalidRequestError(
"Query.%s() being called on a Query which already has LIMIT "
"or OFFSET applied. Call %s() before limit() or offset() "
"are applied." % (meth, meth)
)
@property
def _has_row_limiting_clause(self):
return (
self._limit_clause is not None or self._offset_clause is not None
)
def _get_options(
self,
populate_existing=None,
version_check=None,
only_load_props=None,
refresh_state=None,
identity_token=None,
):
load_options = {}
compile_options = {}
if version_check:
load_options["_version_check"] = version_check
if populate_existing:
load_options["_populate_existing"] = populate_existing
if refresh_state:
load_options["_refresh_state"] = refresh_state
compile_options["_for_refresh_state"] = True
if only_load_props:
compile_options["_only_load_props"] = frozenset(only_load_props)
if identity_token:
load_options["_refresh_identity_token"] = identity_token
if load_options:
self.load_options += load_options
if compile_options:
self._compile_options += compile_options
return self
def _clone(self):
return self._generate()
@property
def statement(self):
"""The full SELECT statement represented by this Query.
The statement by default will not have disambiguating labels
applied to the construct unless with_labels(True) is called
first.
"""
# .statement can return the direct future.Select() construct here, as
# long as we are not using subsequent adaption features that
# are made against raw entities, e.g. from_self(), with_polymorphic(),
# select_entity_from(). If these features are being used, then
# the Select() we return will not have the correct .selected_columns
# collection and will not embed in subsequent queries correctly.
# We could find a way to make this collection "correct", however
# this would not be too different from doing the full compile as
# we are doing in any case, the Select() would still not have the
# proper state for other attributes like whereclause, order_by,
# and these features are all deprecated in any case.
#
# for these reasons, Query is not a Select, it remains an ORM
# object for which __clause_element__() must be called in order for
# it to provide a real expression object.
#
# from there, it starts to look much like Query itself won't be
# passed into the execute process and wont generate its own cache
# key; this will all occur in terms of the ORM-enabled Select.
if (
not self._compile_options._set_base_alias
and not self._compile_options._with_polymorphic_adapt_map
):
# if we don't have legacy top level aliasing features in use
# then convert to a future select() directly
stmt = self._statement_20(for_statement=True)
else:
stmt = self._compile_state(for_statement=True).statement
if self._params:
stmt = stmt.params(self._params)
return stmt
def _final_statement(self, legacy_query_style=True):
"""Return the 'final' SELECT statement for this :class:`.Query`.
This is the Core-only select() that will be rendered by a complete
compilation of this query, and is what .statement used to return
in 1.3.
This method creates a complete compile state so is fairly expensive.
"""
q = self._clone()
return q._compile_state(
use_legacy_query_style=legacy_query_style
).statement
def _statement_20(self, for_statement=False, use_legacy_query_style=True):
# TODO: this event needs to be deprecated, as it currently applies
# only to ORM query and occurs at this spot that is now more
# or less an artificial spot
if self.dispatch.before_compile:
for fn in self.dispatch.before_compile:
new_query = fn(self)
if new_query is not None and new_query is not self:
self = new_query
if not fn._bake_ok:
self._compile_options += {"_bake_ok": False}
compile_options = self._compile_options
compile_options += {
"_for_statement": for_statement,
"_use_legacy_query_style": use_legacy_query_style,
}
if self._statement is not None:
stmt = FromStatement(self._raw_columns, self._statement)
stmt.__dict__.update(
_with_options=self._with_options,
_with_context_options=self._with_context_options,
_compile_options=compile_options,
_execution_options=self._execution_options,
_propagate_attrs=self._propagate_attrs,
)
else:
# Query / select() internal attributes are 99% cross-compatible
stmt = Select._create_raw_select(**self.__dict__)
stmt.__dict__.update(
_label_style=self._label_style,
_compile_options=compile_options,
_propagate_attrs=self._propagate_attrs,
)
stmt.__dict__.pop("session", None)
# ensure the ORM context is used to compile the statement, even
# if it has no ORM entities. This is so ORM-only things like
# _legacy_joins are picked up that wouldn't be picked up by the
# Core statement context
if "compile_state_plugin" not in stmt._propagate_attrs:
stmt._propagate_attrs = stmt._propagate_attrs.union(
{"compile_state_plugin": "orm", "plugin_subject": None}
)
return stmt
def subquery(
self,
name=None,
with_labels=False,
reduce_columns=False,
):
"""Return the full SELECT statement represented by
this :class:`_query.Query`, embedded within an
:class:`_expression.Alias`.
Eager JOIN generation within the query is disabled.
:param name: string name to be assigned as the alias;
this is passed through to :meth:`_expression.FromClause.alias`.
If ``None``, a name will be deterministically generated
at compile time.
:param with_labels: if True, :meth:`.with_labels` will be called
on the :class:`_query.Query` first to apply table-qualified labels
to all columns.
:param reduce_columns: if True,
:meth:`_expression.Select.reduce_columns` will
be called on the resulting :func:`_expression.select` construct,
to remove same-named columns where one also refers to the other
via foreign key or WHERE clause equivalence.
"""
q = self.enable_eagerloads(False)
if with_labels:
q = q.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
q = q.statement
if reduce_columns:
q = q.reduce_columns()
return q.alias(name=name)
def cte(self, name=None, recursive=False, nesting=False):
r"""Return the full SELECT statement represented by this
:class:`_query.Query` represented as a common table expression (CTE).
Parameters and usage are the same as those of the
:meth:`_expression.SelectBase.cte` method; see that method for
further details.
Here is the `PostgreSQL WITH
RECURSIVE example
<https://www.postgresql.org/docs/8.4/static/queries-with.html>`_.
Note that, in this example, the ``included_parts`` cte and the
``incl_alias`` alias of it are Core selectables, which
means the columns are accessed via the ``.c.`` attribute. The
``parts_alias`` object is an :func:`_orm.aliased` instance of the
``Part`` entity, so column-mapped attributes are available
directly::
from sqlalchemy.orm import aliased
class Part(Base):
__tablename__ = 'part'
part = Column(String, primary_key=True)
sub_part = Column(String, primary_key=True)
quantity = Column(Integer)
included_parts = session.query(
Part.sub_part,
Part.part,
Part.quantity).\
filter(Part.part=="our part").\
cte(name="included_parts", recursive=True)
incl_alias = aliased(included_parts, name="pr")
parts_alias = aliased(Part, name="p")
included_parts = included_parts.union_all(
session.query(
parts_alias.sub_part,
parts_alias.part,
parts_alias.quantity).\
filter(parts_alias.part==incl_alias.c.sub_part)
)
q = session.query(
included_parts.c.sub_part,
func.sum(included_parts.c.quantity).
label('total_quantity')
).\
group_by(included_parts.c.sub_part)
.. seealso::
:meth:`_expression.HasCTE.cte`
"""
return self.enable_eagerloads(False).statement.cte(
name=name, recursive=recursive, nesting=nesting
)
def label(self, name):
"""Return the full SELECT statement represented by this
:class:`_query.Query`, converted
to a scalar subquery with a label of the given name.
Analogous to :meth:`sqlalchemy.sql.expression.SelectBase.label`.
"""
return self.enable_eagerloads(False).statement.label(name)
@util.deprecated(
"1.4",
"The :meth:`_query.Query.as_scalar` method is deprecated and will be "
"removed in a future release. Please refer to "
":meth:`_query.Query.scalar_subquery`.",
)
def as_scalar(self):
"""Return the full SELECT statement represented by this
:class:`_query.Query`, converted to a scalar subquery.
"""
return self.scalar_subquery()
def scalar_subquery(self):
"""Return the full SELECT statement represented by this
:class:`_query.Query`, converted to a scalar subquery.
Analogous to
:meth:`sqlalchemy.sql.expression.SelectBase.scalar_subquery`.
.. versionchanged:: 1.4 The :meth:`_query.Query.scalar_subquery`
method replaces the :meth:`_query.Query.as_scalar` method.
"""
return self.enable_eagerloads(False).statement.scalar_subquery()
@property
def selectable(self):
"""Return the :class:`_expression.Select` object emitted by this
:class:`_query.Query`.
Used for :func:`_sa.inspect` compatibility, this is equivalent to::
query.enable_eagerloads(False).with_labels().statement
"""
return self.__clause_element__()
def __clause_element__(self):
return (
self._with_compile_options(
_enable_eagerloads=False, _render_for_subquery=True
)
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.statement
)
@_generative
def only_return_tuples(self, value):
"""When set to True, the query results will always be a tuple.
This is specifically for single element queries. The default is False.
.. versionadded:: 1.2.5
.. seealso::
:meth:`_query.Query.is_single_entity`
"""
self.load_options += dict(_only_return_tuples=value)
@property
def is_single_entity(self):
"""Indicates if this :class:`_query.Query`
returns tuples or single entities.
Returns True if this query returns a single entity for each instance
in its result list, and False if this query returns a tuple of entities
for each result.
.. versionadded:: 1.3.11
.. seealso::
:meth:`_query.Query.only_return_tuples`
"""
return (
not self.load_options._only_return_tuples
and len(self._raw_columns) == 1
and "parententity" in self._raw_columns[0]._annotations
and isinstance(
self._raw_columns[0]._annotations["parententity"],
ORMColumnsClauseRole,
)
)
@_generative
def enable_eagerloads(self, value):
"""Control whether or not eager joins and subqueries are
rendered.
When set to False, the returned Query will not render
eager joins regardless of :func:`~sqlalchemy.orm.joinedload`,
:func:`~sqlalchemy.orm.subqueryload` options
or mapper-level ``lazy='joined'``/``lazy='subquery'``
configurations.
This is used primarily when nesting the Query's
statement into a subquery or other
selectable, or when using :meth:`_query.Query.yield_per`.
"""
self._compile_options += {"_enable_eagerloads": value}
@_generative
def _with_compile_options(self, **opt):
self._compile_options += opt
@util.deprecated_20(
":meth:`_orm.Query.with_labels` and :meth:`_orm.Query.apply_labels`",
alternative="Use set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL) "
"instead.",
)
def with_labels(self):
return self.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
apply_labels = with_labels
@property
def get_label_style(self):
"""
Retrieve the current label style.
.. versionadded:: 1.4
"""
return self._label_style
def set_label_style(self, style):
"""Apply column labels to the return value of Query.statement.
Indicates that this Query's `statement` accessor should return
a SELECT statement that applies labels to all columns in the
form <tablename>_<columnname>; this is commonly used to
disambiguate columns from multiple tables which have the same
name.
When the `Query` actually issues SQL to load rows, it always
uses column labeling.
.. note:: The :meth:`_query.Query.set_label_style` method *only* applies
the output of :attr:`_query.Query.statement`, and *not* to any of
the result-row invoking systems of :class:`_query.Query` itself,
e.g.
:meth:`_query.Query.first`, :meth:`_query.Query.all`, etc.
To execute
a query using :meth:`_query.Query.set_label_style`, invoke the
:attr:`_query.Query.statement` using :meth:`.Session.execute`::
result = session.execute(
query
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.statement
)
.. versionadded:: 1.4
""" # noqa
if self._label_style is not style:
self = self._generate()
self._label_style = style
return self
@_generative
def enable_assertions(self, value):
"""Control whether assertions are generated.
When set to False, the returned Query will
not assert its state before certain operations,
including that LIMIT/OFFSET has not been applied
when filter() is called, no criterion exists
when get() is called, and no "from_statement()"
exists when filter()/order_by()/group_by() etc.
is called. This more permissive mode is used by
custom Query subclasses to specify criterion or
other modifiers outside of the usual usage patterns.
Care should be taken to ensure that the usage
pattern is even possible. A statement applied
by from_statement() will override any criterion
set by filter() or order_by(), for example.
"""
self._enable_assertions = value
@property
def whereclause(self):
"""A readonly attribute which returns the current WHERE criterion for
this Query.
This returned value is a SQL expression construct, or ``None`` if no
criterion has been established.
"""
return sql.elements.BooleanClauseList._construct_for_whereclause(
self._where_criteria
)
@_generative
def _with_current_path(self, path):
"""indicate that this query applies to objects loaded
within a certain path.
Used by deferred loaders (see strategies.py) which transfer
query options from an originating query to a newly generated
query intended for the deferred load.
"""
self._compile_options += {"_current_path": path}
@_generative
@_assertions(_no_clauseelement_condition)
@util.deprecated_20(
":meth:`_orm.Query.with_polymorphic`",
alternative="Use the orm.with_polymorphic() standalone function",
)
def with_polymorphic(
self, cls_or_mappers, selectable=None, polymorphic_on=None
):
"""Load columns for inheriting classes.
This is a legacy method which is replaced by the
:func:`_orm.with_polymorphic` function.
.. warning:: The :meth:`_orm.Query.with_polymorphic` method does
**not** support 1.4/2.0 style features including
:func:`_orm.with_loader_criteria`. Please migrate code
to use :func:`_orm.with_polymorphic`.
:meth:`_query.Query.with_polymorphic` applies transformations
to the "main" mapped class represented by this :class:`_query.Query`.
The "main" mapped class here means the :class:`_query.Query`
object's first argument is a full class, i.e.
``session.query(SomeClass)``. These transformations allow additional
tables to be present in the FROM clause so that columns for a
joined-inheritance subclass are available in the query, both for the
purposes of load-time efficiency as well as the ability to use
these columns at query time.
.. seealso::
:ref:`with_polymorphic` - illustrates current patterns
"""
entity = _legacy_filter_by_entity_zero(self)
wp = with_polymorphic(
entity,
cls_or_mappers,
selectable=selectable,
polymorphic_on=polymorphic_on,
)
self._compile_options = self._compile_options.add_to_element(
"_with_polymorphic_adapt_map", ((entity, inspect(wp)),)
)
@_generative
def yield_per(self, count):
r"""Yield only ``count`` rows at a time.
The purpose of this method is when fetching very large result sets
(> 10K rows), to batch results in sub-collections and yield them
out partially, so that the Python interpreter doesn't need to declare
very large areas of memory which is both time consuming and leads
to excessive memory use. The performance from fetching hundreds of
thousands of rows can often double when a suitable yield-per setting
(e.g. approximately 1000) is used, even with DBAPIs that buffer
rows (which are most).
As of SQLAlchemy 1.4, the :meth:`_orm.Query.yield_per` method is
equivalent to using the ``yield_per`` execution option at the ORM
level. See the section :ref:`orm_queryguide_yield_per` for further
background on this option.
"""
self.load_options += {"_yield_per": count}
@util.deprecated_20(
":meth:`_orm.Query.get`",
alternative="The method is now available as :meth:`_orm.Session.get`",
becomes_legacy=True,
)
def get(self, ident):
"""Return an instance based on the given primary key identifier,
or ``None`` if not found.
E.g.::
my_user = session.query(User).get(5)
some_object = session.query(VersionedFoo).get((5, 10))
some_object = session.query(VersionedFoo).get(
{"id": 5, "version_id": 10})
:meth:`_query.Query.get` is special in that it provides direct
access to the identity map of the owning :class:`.Session`.
If the given primary key identifier is present
in the local identity map, the object is returned
directly from this collection and no SQL is emitted,
unless the object has been marked fully expired.
If not present,
a SELECT is performed in order to locate the object.
:meth:`_query.Query.get` also will perform a check if
the object is present in the identity map and
marked as expired - a SELECT
is emitted to refresh the object as well as to
ensure that the row is still present.
If not, :class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised.
:meth:`_query.Query.get` is only used to return a single
mapped instance, not multiple instances or
individual column constructs, and strictly
on a single primary key value. The originating
:class:`_query.Query` must be constructed in this way,
i.e. against a single mapped entity,
with no additional filtering criterion. Loading
options via :meth:`_query.Query.options` may be applied
however, and will be used if the object is not
yet locally present.
:param ident: A scalar, tuple, or dictionary representing the
primary key. For a composite (e.g. multiple column) primary key,
a tuple or dictionary should be passed.
For a single-column primary key, the scalar calling form is typically
the most expedient. If the primary key of a row is the value "5",
the call looks like::
my_object = query.get(5)
The tuple form contains primary key values typically in
the order in which they correspond to the mapped
:class:`_schema.Table`
object's primary key columns, or if the
:paramref:`_orm.Mapper.primary_key` configuration parameter were
used, in
the order used for that parameter. For example, if the primary key
of a row is represented by the integer
digits "5, 10" the call would look like::
my_object = query.get((5, 10))
The dictionary form should include as keys the mapped attribute names
corresponding to each element of the primary key. If the mapped class
has the attributes ``id``, ``version_id`` as the attributes which
store the object's primary key value, the call would look like::
my_object = query.get({"id": 5, "version_id": 10})
.. versionadded:: 1.3 the :meth:`_query.Query.get`
method now optionally
accepts a dictionary of attribute names to values in order to
indicate a primary key identifier.
:return: The object instance, or ``None``.
"""
self._no_criterion_assertion("get", order_by=False, distinct=False)
# we still implement _get_impl() so that baked query can override
# it
return self._get_impl(ident, loading.load_on_pk_identity)
def _get_impl(self, primary_key_identity, db_load_fn, identity_token=None):
mapper = self._only_full_mapper_zero("get")
return self.session._get_impl(
mapper,
primary_key_identity,
db_load_fn,
populate_existing=self.load_options._populate_existing,
with_for_update=self._for_update_arg,
options=self._with_options,
identity_token=identity_token,
execution_options=self._execution_options,
)
@property
def lazy_loaded_from(self):
"""An :class:`.InstanceState` that is using this :class:`_query.Query`
for a lazy load operation.
.. deprecated:: 1.4 This attribute should be viewed via the
:attr:`.ORMExecuteState.lazy_loaded_from` attribute, within
the context of the :meth:`.SessionEvents.do_orm_execute`
event.
.. seealso::
:attr:`.ORMExecuteState.lazy_loaded_from`
"""
return self.load_options._lazy_loaded_from
@property
def _current_path(self):
return self._compile_options._current_path
@_generative
def correlate(self, *fromclauses):
"""Return a :class:`.Query` construct which will correlate the given
FROM clauses to that of an enclosing :class:`.Query` or
:func:`~.expression.select`.
The method here accepts mapped classes, :func:`.aliased` constructs,
and :func:`.mapper` constructs as arguments, which are resolved into
expression constructs, in addition to appropriate expression
constructs.
The correlation arguments are ultimately passed to
:meth:`_expression.Select.correlate`
after coercion to expression constructs.
The correlation arguments take effect in such cases
as when :meth:`_query.Query.from_self` is used, or when
a subquery as returned by :meth:`_query.Query.subquery` is
embedded in another :func:`_expression.select` construct.
"""
self._auto_correlate = False
if fromclauses and fromclauses[0] in {None, False}:
self._correlate = ()
else:
self._correlate = set(self._correlate).union(
coercions.expect(roles.FromClauseRole, f) for f in fromclauses
)
@_generative
def autoflush(self, setting):
"""Return a Query with a specific 'autoflush' setting.
As of SQLAlchemy 1.4, the :meth:`_orm.Query.autoflush` method
is equivalent to using the ``autoflush`` execution option at the
ORM level. See the section :ref:`orm_queryguide_autoflush` for
further background on this option.
"""
self.load_options += {"_autoflush": setting}
@_generative
def populate_existing(self):
"""Return a :class:`_query.Query`
that will expire and refresh all instances
as they are loaded, or reused from the current :class:`.Session`.
As of SQLAlchemy 1.4, the :meth:`_orm.Query.populate_existing` method
is equivalent to using the ``populate_existing`` execution option at
the ORM level. See the section :ref:`orm_queryguide_populate_existing`
for further background on this option.
"""
self.load_options += {"_populate_existing": True}
@_generative
def _with_invoke_all_eagers(self, value):
"""Set the 'invoke all eagers' flag which causes joined- and
subquery loaders to traverse into already-loaded related objects
and collections.
Default is that of :attr:`_query.Query._invoke_all_eagers`.
"""
self.load_options += {"_invoke_all_eagers": value}
@util.deprecated_20(
":meth:`_orm.Query.with_parent`",
alternative="Use the :func:`_orm.with_parent` standalone construct.",
becomes_legacy=True,
)
@util.preload_module("sqlalchemy.orm.relationships")
def with_parent(self, instance, property=None, from_entity=None): # noqa
"""Add filtering criterion that relates the given instance
to a child object or collection, using its attribute state
as well as an established :func:`_orm.relationship()`
configuration.
The method uses the :func:`.with_parent` function to generate
the clause, the result of which is passed to
:meth:`_query.Query.filter`.
Parameters are the same as :func:`.with_parent`, with the exception
that the given property can be None, in which case a search is
performed against this :class:`_query.Query` object's target mapper.
:param instance:
An instance which has some :func:`_orm.relationship`.
:param property:
String property name, or class-bound attribute, which indicates
what relationship from the instance should be used to reconcile the
parent/child relationship.
:param from_entity:
Entity in which to consider as the left side. This defaults to the
"zero" entity of the :class:`_query.Query` itself.
"""
relationships = util.preloaded.orm_relationships
if from_entity:
entity_zero = inspect(from_entity)
else:
entity_zero = _legacy_filter_by_entity_zero(self)
if property is None:
# TODO: deprecate, property has to be supplied
mapper = object_mapper(instance)
for prop in mapper.iterate_properties:
if (
isinstance(prop, relationships.RelationshipProperty)
and prop.mapper is entity_zero.mapper
):
property = prop # noqa
break
else:
raise sa_exc.InvalidRequestError(
"Could not locate a property which relates instances "
"of class '%s' to instances of class '%s'"
% (
entity_zero.mapper.class_.__name__,
instance.__class__.__name__,
)
)
return self.filter(with_parent(instance, property, entity_zero.entity))
@_generative
def add_entity(self, entity, alias=None):
"""add a mapped entity to the list of result columns
to be returned."""
if alias is not None:
# TODO: deprecate
entity = aliased(entity, alias)
self._raw_columns = list(self._raw_columns)
self._raw_columns.append(
coercions.expect(
roles.ColumnsClauseRole, entity, apply_propagate_attrs=self
)
)
@_generative
def with_session(self, session):
"""Return a :class:`_query.Query` that will use the given
:class:`.Session`.
While the :class:`_query.Query`
object is normally instantiated using the
:meth:`.Session.query` method, it is legal to build the
:class:`_query.Query`
directly without necessarily using a :class:`.Session`. Such a
:class:`_query.Query` object, or any :class:`_query.Query`
already associated
with a different :class:`.Session`, can produce a new
:class:`_query.Query`
object associated with a target session using this method::
from sqlalchemy.orm import Query
query = Query([MyClass]).filter(MyClass.id == 5)
result = query.with_session(my_session).one()
"""
self.session = session
@util.deprecated_20(
":meth:`_query.Query.from_self`",
alternative="The new approach is to use the :func:`.orm.aliased` "
"construct in conjunction with a subquery. See the section "
":ref:`Selecting from the query itself as a subquery "
"<migration_20_query_from_self>` in the 2.0 migration notes for an "
"example.",
)
def from_self(self, *entities):
r"""return a Query that selects from this Query's
SELECT statement.
:meth:`_query.Query.from_self` essentially turns the SELECT statement
into a SELECT of itself. Given a query such as::
q = session.query(User).filter(User.name.like('e%'))
Given the :meth:`_query.Query.from_self` version::
q = session.query(User).filter(User.name.like('e%')).from_self()
This query renders as:
.. sourcecode:: sql
SELECT anon_1.user_id AS anon_1_user_id,
anon_1.user_name AS anon_1_user_name
FROM (SELECT "user".id AS user_id, "user".name AS user_name
FROM "user"
WHERE "user".name LIKE :name_1) AS anon_1
There are lots of cases where :meth:`_query.Query.from_self`
may be useful.
A simple one is where above, we may want to apply a row LIMIT to
the set of user objects we query against, and then apply additional
joins against that row-limited set::
q = session.query(User).filter(User.name.like('e%')).\
limit(5).from_self().\
join(User.addresses).filter(Address.email.like('q%'))
The above query joins to the ``Address`` entity but only against the
first five results of the ``User`` query:
.. sourcecode:: sql
SELECT anon_1.user_id AS anon_1_user_id,
anon_1.user_name AS anon_1_user_name
FROM (SELECT "user".id AS user_id, "user".name AS user_name
FROM "user"
WHERE "user".name LIKE :name_1
LIMIT :param_1) AS anon_1
JOIN address ON anon_1.user_id = address.user_id
WHERE address.email LIKE :email_1
**Automatic Aliasing**
Another key behavior of :meth:`_query.Query.from_self`
is that it applies
**automatic aliasing** to the entities inside the subquery, when
they are referenced on the outside. Above, if we continue to
refer to the ``User`` entity without any additional aliasing applied
to it, those references will be in terms of the subquery::
q = session.query(User).filter(User.name.like('e%')).\
limit(5).from_self().\
join(User.addresses).filter(Address.email.like('q%')).\
order_by(User.name)
The ORDER BY against ``User.name`` is aliased to be in terms of the
inner subquery:
.. sourcecode:: sql
SELECT anon_1.user_id AS anon_1_user_id,
anon_1.user_name AS anon_1_user_name
FROM (SELECT "user".id AS user_id, "user".name AS user_name
FROM "user"
WHERE "user".name LIKE :name_1
LIMIT :param_1) AS anon_1
JOIN address ON anon_1.user_id = address.user_id
WHERE address.email LIKE :email_1 ORDER BY anon_1.user_name
The automatic aliasing feature only works in a **limited** way,
for simple filters and orderings. More ambitious constructions
such as referring to the entity in joins should prefer to use
explicit subquery objects, typically making use of the
:meth:`_query.Query.subquery`
method to produce an explicit subquery object.
Always test the structure of queries by viewing the SQL to ensure
a particular structure does what's expected!
**Changing the Entities**
:meth:`_query.Query.from_self`
also includes the ability to modify what
columns are being queried. In our example, we want ``User.id``
to be queried by the inner query, so that we can join to the
``Address`` entity on the outside, but we only wanted the outer
query to return the ``Address.email`` column::
q = session.query(User).filter(User.name.like('e%')).\
limit(5).from_self(Address.email).\
join(User.addresses).filter(Address.email.like('q%'))
yielding:
.. sourcecode:: sql
SELECT address.email AS address_email
FROM (SELECT "user".id AS user_id, "user".name AS user_name
FROM "user"
WHERE "user".name LIKE :name_1
LIMIT :param_1) AS anon_1
JOIN address ON anon_1.user_id = address.user_id
WHERE address.email LIKE :email_1
**Looking out for Inner / Outer Columns**
Keep in mind that when referring to columns that originate from
inside the subquery, we need to ensure they are present in the
columns clause of the subquery itself; this is an ordinary aspect of
SQL. For example, if we wanted to load from a joined entity inside
the subquery using :func:`.contains_eager`, we need to add those
columns. Below illustrates a join of ``Address`` to ``User``,
then a subquery, and then we'd like :func:`.contains_eager` to access
the ``User`` columns::
q = session.query(Address).join(Address.user).\
filter(User.name.like('e%'))
q = q.add_entity(User).from_self().\
options(contains_eager(Address.user))
We use :meth:`_query.Query.add_entity` above **before** we call
:meth:`_query.Query.from_self`
so that the ``User`` columns are present
in the inner subquery, so that they are available to the
:func:`.contains_eager` modifier we are using on the outside,
producing:
.. sourcecode:: sql
SELECT anon_1.address_id AS anon_1_address_id,
anon_1.address_email AS anon_1_address_email,
anon_1.address_user_id AS anon_1_address_user_id,
anon_1.user_id AS anon_1_user_id,
anon_1.user_name AS anon_1_user_name
FROM (
SELECT address.id AS address_id,
address.email AS address_email,
address.user_id AS address_user_id,
"user".id AS user_id,
"user".name AS user_name
FROM address JOIN "user" ON "user".id = address.user_id
WHERE "user".name LIKE :name_1) AS anon_1
If we didn't call ``add_entity(User)``, but still asked
:func:`.contains_eager` to load the ``User`` entity, it would be
forced to add the table on the outside without the correct
join criteria - note the ``anon1, "user"`` phrase at
the end:
.. sourcecode:: sql
-- incorrect query
SELECT anon_1.address_id AS anon_1_address_id,
anon_1.address_email AS anon_1_address_email,
anon_1.address_user_id AS anon_1_address_user_id,
"user".id AS user_id,
"user".name AS user_name
FROM (
SELECT address.id AS address_id,
address.email AS address_email,
address.user_id AS address_user_id
FROM address JOIN "user" ON "user".id = address.user_id
WHERE "user".name LIKE :name_1) AS anon_1, "user"
:param \*entities: optional list of entities which will replace
those being selected.
"""
return self._from_self(*entities)
def _from_self(self, *entities):
fromclause = (
self.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.correlate(None)
.subquery()
._anonymous_fromclause()
)
q = self._from_selectable(fromclause)
if entities:
q._set_entities(entities)
return q
@_generative
def _set_enable_single_crit(self, val):
self._compile_options += {"_enable_single_crit": val}
@_generative
def _from_selectable(self, fromclause, set_entity_from=True):
for attr in (
"_where_criteria",
"_order_by_clauses",
"_group_by_clauses",
"_limit_clause",
"_offset_clause",
"_last_joined_entity",
"_legacy_setup_joins",
"_memoized_select_entities",
"_distinct",
"_distinct_on",
"_having_criteria",
"_prefixes",
"_suffixes",
):
self.__dict__.pop(attr, None)
self._set_select_from([fromclause], set_entity_from)
self._compile_options += {
"_enable_single_crit": False,
}
# this enables clause adaptation for non-ORM
# expressions.
# legacy. see test/orm/test_froms.py for various
# "oldstyle" tests that rely on this and the corresponding
# "newtyle" that do not.
self._compile_options += {"_orm_only_from_obj_alias": False}
@util.deprecated(
"1.4",
":meth:`_query.Query.values` "
"is deprecated and will be removed in a "
"future release. Please use :meth:`_query.Query.with_entities`",
)
def values(self, *columns):
"""Return an iterator yielding result tuples corresponding
to the given list of columns
"""
if not columns:
return iter(())
q = self._clone().enable_eagerloads(False)
q._set_entities(columns)
if not q.load_options._yield_per:
q.load_options += {"_yield_per": 10}
return iter(q)
_values = values
@util.deprecated(
"1.4",
":meth:`_query.Query.value` "
"is deprecated and will be removed in a "
"future release. Please use :meth:`_query.Query.with_entities` "
"in combination with :meth:`_query.Query.scalar`",
)
def value(self, column):
"""Return a scalar result corresponding to the given
column expression.
"""
try:
return next(self.values(column))[0]
except StopIteration:
return None
@_generative
def with_entities(self, *entities):
r"""Return a new :class:`_query.Query`
replacing the SELECT list with the
given entities.
e.g.::
# Users, filtered on some arbitrary criterion
# and then ordered by related email address
q = session.query(User).\
join(User.address).\
filter(User.name.like('%ed%')).\
order_by(Address.email)
# given *only* User.id==5, Address.email, and 'q', what
# would the *next* User in the result be ?
subq = q.with_entities(Address.email).\
order_by(None).\
filter(User.id==5).\
subquery()
q = q.join((subq, subq.c.email < Address.email)).\
limit(1)
"""
_MemoizedSelectEntities._generate_for_statement(self)
self._set_entities(entities)
@_generative
def add_columns(self, *column):
"""Add one or more column expressions to the list
of result columns to be returned."""
self._raw_columns = list(self._raw_columns)
self._raw_columns.extend(
coercions.expect(
roles.ColumnsClauseRole,
c,
apply_propagate_attrs=self,
post_inspect=True,
)
for c in column
)
@util.deprecated(
"1.4",
":meth:`_query.Query.add_column` "
"is deprecated and will be removed in a "
"future release. Please use :meth:`_query.Query.add_columns`",
)
def add_column(self, column):
"""Add a column expression to the list of result columns to be
returned.
"""
return self.add_columns(column)
@_generative
def options(self, *args):
"""Return a new :class:`_query.Query` object,
applying the given list of
mapper options.
Most supplied options regard changing how column- and
relationship-mapped attributes are loaded.
.. seealso::
:ref:`deferred_options`
:ref:`relationship_loader_options`
"""
opts = tuple(util.flatten_iterator(args))
if self._compile_options._current_path:
for opt in opts:
if opt._is_legacy_option:
opt.process_query_conditionally(self)
else:
for opt in opts:
if opt._is_legacy_option:
opt.process_query(self)
self._with_options += opts
def with_transformation(self, fn):
"""Return a new :class:`_query.Query` object transformed by
the given function.
E.g.::
def filter_something(criterion):
def transform(q):
return q.filter(criterion)
return transform
q = q.with_transformation(filter_something(x==5))
This allows ad-hoc recipes to be created for :class:`_query.Query`
objects. See the example at :ref:`hybrid_transformers`.
"""
return fn(self)
def get_execution_options(self):
"""Get the non-SQL options which will take effect during execution.
.. versionadded:: 1.3
.. seealso::
:meth:`_query.Query.execution_options`
"""
return self._execution_options
@_generative
def execution_options(self, **kwargs):
"""Set non-SQL options which take effect during execution.
Options allowed here include all of those accepted by
:meth:`_engine.Connection.execution_options`, as well as a series
of ORM specific options:
``populate_existing=True`` - equivalent to using
:meth:`_orm.Query.populate_existing`
``autoflush=True|False`` - equivalent to using
:meth:`_orm.Query.autoflush`
``yield_per=<value>`` - equivalent to using
:meth:`_orm.Query.yield_per`
Note that the ``stream_results`` execution option is enabled
automatically if the :meth:`~sqlalchemy.orm.query.Query.yield_per()`
method or execution option is used.
The execution options may also be specified on a per execution basis
when using :term:`2.0 style` queries via the
:paramref:`_orm.Session.execution_options` parameter.
.. versionadded:: 1.4 - added ORM options to
:meth:`_orm.Query.execution_options`
.. seealso::
:ref:`engine_stream_results`
:meth:`_query.Query.get_execution_options`
"""
self._execution_options = self._execution_options.union(kwargs)
@_generative
def with_for_update(
self,
read=False,
nowait=False,
of=None,
skip_locked=False,
key_share=False,
):
"""return a new :class:`_query.Query`
with the specified options for the
``FOR UPDATE`` clause.
The behavior of this method is identical to that of
:meth:`_expression.GenerativeSelect.with_for_update`.
When called with no arguments,
the resulting ``SELECT`` statement will have a ``FOR UPDATE`` clause
appended. When additional arguments are specified, backend-specific
options such as ``FOR UPDATE NOWAIT`` or ``LOCK IN SHARE MODE``
can take effect.
E.g.::
q = sess.query(User).populate_existing().with_for_update(nowait=True, of=User)
The above query on a PostgreSQL backend will render like::
SELECT users.id AS users_id FROM users FOR UPDATE OF users NOWAIT
.. warning::
Using ``with_for_update`` in the context of eager loading
relationships is not officially supported or recommended by
SQLAlchemy and may not work with certain queries on various
database backends. When ``with_for_update`` is successfully used
with a query that involves :func:`_orm.joinedload`, SQLAlchemy will
attempt to emit SQL that locks all involved tables.
.. note:: It is generally a good idea to combine the use of the
:meth:`_orm.Query.populate_existing` method when using the
:meth:`_orm.Query.with_for_update` method. The purpose of
:meth:`_orm.Query.populate_existing` is to force all the data read
from the SELECT to be populated into the ORM objects returned,
even if these objects are already in the :term:`identity map`.
.. seealso::
:meth:`_expression.GenerativeSelect.with_for_update`
- Core level method with
full argument and behavioral description.
:meth:`_orm.Query.populate_existing` - overwrites attributes of
objects already loaded in the identity map.
""" # noqa: E501
self._for_update_arg = ForUpdateArg(
read=read,
nowait=nowait,
of=of,
skip_locked=skip_locked,
key_share=key_share,
)
@_generative
def params(self, *args, **kwargs):
r"""Add values for bind parameters which may have been
specified in filter().
Parameters may be specified using \**kwargs, or optionally a single
dictionary as the first positional argument. The reason for both is
that \**kwargs is convenient, however some parameter dictionaries
contain unicode keys in which case \**kwargs cannot be used.
"""
if len(args) == 1:
kwargs.update(args[0])
elif len(args) > 0:
raise sa_exc.ArgumentError(
"params() takes zero or one positional argument, "
"which is a dictionary."
)
self._params = self._params.union(kwargs)
def where(self, *criterion):
"""A synonym for :meth:`.Query.filter`.
.. versionadded:: 1.4
"""
return self.filter(*criterion)
@_generative
@_assertions(_no_statement_condition, _no_limit_offset)
def filter(self, *criterion):
r"""Apply the given filtering criterion to a copy
of this :class:`_query.Query`, using SQL expressions.
e.g.::
session.query(MyClass).filter(MyClass.name == 'some name')
Multiple criteria may be specified as comma separated; the effect
is that they will be joined together using the :func:`.and_`
function::
session.query(MyClass).\
filter(MyClass.name == 'some name', MyClass.id > 5)
The criterion is any SQL expression object applicable to the
WHERE clause of a select. String expressions are coerced
into SQL expression constructs via the :func:`_expression.text`
construct.
.. seealso::
:meth:`_query.Query.filter_by` - filter on keyword expressions.
"""
for criterion in list(criterion):
criterion = coercions.expect(
roles.WhereHavingRole, criterion, apply_propagate_attrs=self
)
# legacy vvvvvvvvvvvvvvvvvvvvvvvvvvv
if self._aliased_generation:
criterion = sql_util._deep_annotate(
criterion, {"aliased_generation": self._aliased_generation}
)
# legacy ^^^^^^^^^^^^^^^^^^^^^^^^^^^
self._where_criteria += (criterion,)
@util.memoized_property
def _last_joined_entity(self):
if self._legacy_setup_joins:
return _legacy_determine_last_joined_entity(
self._legacy_setup_joins, self._entity_from_pre_ent_zero()
)
else:
return None
def _filter_by_zero(self):
"""for the filter_by() method, return the target entity for which
we will attempt to derive an expression from based on string name.
"""
if self._legacy_setup_joins:
_last_joined_entity = self._last_joined_entity
if _last_joined_entity is not None:
return _last_joined_entity
# discussion related to #7239
# special check determines if we should try to derive attributes
# for filter_by() from the "from object", i.e., if the user
# called query.select_from(some selectable).filter_by(some_attr=value).
# We don't want to do that in the case that methods like
# from_self(), select_entity_from(), or a set op like union() were
# called; while these methods also place a
# selectable in the _from_obj collection, they also set up
# the _set_base_alias boolean which turns on the whole "adapt the
# entity to this selectable" thing, meaning the query still continues
# to construct itself in terms of the lead entity that was passed
# to query(), e.g. query(User).from_self() is still in terms of User,
# and not the subquery that from_self() created. This feature of
# "implicitly adapt all occurrences of entity X to some arbitrary
# subquery" is the main thing I am trying to do away with in 2.0 as
# users should now used aliased() for that, but I can't entirely get
# rid of it due to query.union() and other set ops relying upon it.
#
# compare this to the base Select()._filter_by_zero() which can
# just return self._from_obj[0] if present, because there is no
# "_set_base_alias" feature.
#
# IOW, this conditional essentially detects if
# "select_from(some_selectable)" has been called, as opposed to
# "select_entity_from()", "from_self()"
# or "union() / some_set_op()".
if self._from_obj and not self._compile_options._set_base_alias:
return self._from_obj[0]
return self._raw_columns[0]
def filter_by(self, **kwargs):
r"""Apply the given filtering criterion to a copy
of this :class:`_query.Query`, using keyword expressions.
e.g.::
session.query(MyClass).filter_by(name = 'some name')
Multiple criteria may be specified as comma separated; the effect
is that they will be joined together using the :func:`.and_`
function::
session.query(MyClass).\
filter_by(name = 'some name', id = 5)
The keyword expressions are extracted from the primary
entity of the query, or the last entity that was the
target of a call to :meth:`_query.Query.join`.
.. seealso::
:meth:`_query.Query.filter` - filter on SQL expressions.
"""
from_entity = self._filter_by_zero()
if from_entity is None:
raise sa_exc.InvalidRequestError(
"Can't use filter_by when the first entity '%s' of a query "
"is not a mapped class. Please use the filter method instead, "
"or change the order of the entities in the query"
% self._query_entity_zero()
)
clauses = [
_entity_namespace_key(from_entity, key) == value
for key, value in kwargs.items()
]
return self.filter(*clauses)
@_generative
@_assertions(_no_statement_condition, _no_limit_offset)
def order_by(self, *clauses):
"""Apply one or more ORDER BY criteria to the query and return
the newly resulting :class:`_query.Query`.
e.g.::
q = session.query(Entity).order_by(Entity.id, Entity.name)
All existing ORDER BY criteria may be cancelled by passing
``None`` by itself. New ORDER BY criteria may then be added by
invoking :meth:`_orm.Query.order_by` again, e.g.::
# will erase all ORDER BY and ORDER BY new_col alone
q = q.order_by(None).order_by(new_col)
.. seealso::
These sections describe ORDER BY in terms of :term:`2.0 style`
invocation but apply to :class:`_orm.Query` as well:
:ref:`tutorial_order_by` - in the :ref:`unified_tutorial`
:ref:`tutorial_order_by_label` - in the :ref:`unified_tutorial`
"""
if len(clauses) == 1 and (clauses[0] is None or clauses[0] is False):
self._order_by_clauses = ()
else:
criterion = tuple(
coercions.expect(roles.OrderByRole, clause)
for clause in clauses
)
# legacy vvvvvvvvvvvvvvvvvvvvvvvvvvv
if self._aliased_generation:
criterion = tuple(
[
sql_util._deep_annotate(
o, {"aliased_generation": self._aliased_generation}
)
for o in criterion
]
)
# legacy ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
self._order_by_clauses += criterion
@_generative
@_assertions(_no_statement_condition, _no_limit_offset)
def group_by(self, *clauses):
"""Apply one or more GROUP BY criterion to the query and return
the newly resulting :class:`_query.Query`.
All existing GROUP BY settings can be suppressed by
passing ``None`` - this will suppress any GROUP BY configured
on mappers as well.
.. seealso::
These sections describe GROUP BY in terms of :term:`2.0 style`
invocation but apply to :class:`_orm.Query` as well:
:ref:`tutorial_group_by_w_aggregates` - in the
:ref:`unified_tutorial`
:ref:`tutorial_order_by_label` - in the :ref:`unified_tutorial`
"""
if len(clauses) == 1 and (clauses[0] is None or clauses[0] is False):
self._group_by_clauses = ()
else:
criterion = tuple(
coercions.expect(roles.GroupByRole, clause)
for clause in clauses
)
# legacy vvvvvvvvvvvvvvvvvvvvvvvvvvv
if self._aliased_generation:
criterion = tuple(
[
sql_util._deep_annotate(
o, {"aliased_generation": self._aliased_generation}
)
for o in criterion
]
)
# legacy ^^^^^^^^^^^^^^^^^^^^^^^^^^
self._group_by_clauses += criterion
@_generative
@_assertions(_no_statement_condition, _no_limit_offset)
def having(self, criterion):
r"""Apply a HAVING criterion to the query and return the
newly resulting :class:`_query.Query`.
:meth:`_query.Query.having` is used in conjunction with
:meth:`_query.Query.group_by`.
HAVING criterion makes it possible to use filters on aggregate
functions like COUNT, SUM, AVG, MAX, and MIN, eg.::
q = session.query(User.id).\
join(User.addresses).\
group_by(User.id).\
having(func.count(Address.id) > 2)
"""
self._having_criteria += (
coercions.expect(
roles.WhereHavingRole, criterion, apply_propagate_attrs=self
),
)
def _set_op(self, expr_fn, *q):
return self._from_selectable(expr_fn(*([self] + list(q))).subquery())
def union(self, *q):
"""Produce a UNION of this Query against one or more queries.
e.g.::
q1 = sess.query(SomeClass).filter(SomeClass.foo=='bar')
q2 = sess.query(SomeClass).filter(SomeClass.bar=='foo')
q3 = q1.union(q2)
The method accepts multiple Query objects so as to control
the level of nesting. A series of ``union()`` calls such as::
x.union(y).union(z).all()
will nest on each ``union()``, and produces::
SELECT * FROM (SELECT * FROM (SELECT * FROM X UNION
SELECT * FROM y) UNION SELECT * FROM Z)
Whereas::
x.union(y, z).all()
produces::
SELECT * FROM (SELECT * FROM X UNION SELECT * FROM y UNION
SELECT * FROM Z)
Note that many database backends do not allow ORDER BY to
be rendered on a query called within UNION, EXCEPT, etc.
To disable all ORDER BY clauses including those configured
on mappers, issue ``query.order_by(None)`` - the resulting
:class:`_query.Query` object will not render ORDER BY within
its SELECT statement.
"""
return self._set_op(expression.union, *q)
def union_all(self, *q):
"""Produce a UNION ALL of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._set_op(expression.union_all, *q)
def intersect(self, *q):
"""Produce an INTERSECT of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._set_op(expression.intersect, *q)
def intersect_all(self, *q):
"""Produce an INTERSECT ALL of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._set_op(expression.intersect_all, *q)
def except_(self, *q):
"""Produce an EXCEPT of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._set_op(expression.except_, *q)
def except_all(self, *q):
"""Produce an EXCEPT ALL of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._set_op(expression.except_all, *q)
def _next_aliased_generation(self):
if "_aliased_generation_counter" not in self.__dict__:
self._aliased_generation_counter = 0
self._aliased_generation_counter += 1
return self._aliased_generation_counter
@_generative
@_assertions(_no_statement_condition, _no_limit_offset)
def join(self, target, *props, **kwargs):
r"""Create a SQL JOIN against this :class:`_query.Query`
object's criterion
and apply generatively, returning the newly resulting
:class:`_query.Query`.
**Simple Relationship Joins**
Consider a mapping between two classes ``User`` and ``Address``,
with a relationship ``User.addresses`` representing a collection
of ``Address`` objects associated with each ``User``. The most
common usage of :meth:`_query.Query.join`
is to create a JOIN along this
relationship, using the ``User.addresses`` attribute as an indicator
for how this should occur::
q = session.query(User).join(User.addresses)
Where above, the call to :meth:`_query.Query.join` along
``User.addresses`` will result in SQL approximately equivalent to::
SELECT user.id, user.name
FROM user JOIN address ON user.id = address.user_id
In the above example we refer to ``User.addresses`` as passed to
:meth:`_query.Query.join` as the "on clause", that is, it indicates
how the "ON" portion of the JOIN should be constructed.
To construct a chain of joins, multiple :meth:`_query.Query.join`
calls may be used. The relationship-bound attribute implies both
the left and right side of the join at once::
q = session.query(User).\
join(User.orders).\
join(Order.items).\
join(Item.keywords)
.. note:: as seen in the above example, **the order in which each
call to the join() method occurs is important**. Query would not,
for example, know how to join correctly if we were to specify
``User``, then ``Item``, then ``Order``, in our chain of joins; in
such a case, depending on the arguments passed, it may raise an
error that it doesn't know how to join, or it may produce invalid
SQL in which case the database will raise an error. In correct
practice, the
:meth:`_query.Query.join` method is invoked in such a way that lines
up with how we would want the JOIN clauses in SQL to be
rendered, and each call should represent a clear link from what
precedes it.
**Joins to a Target Entity or Selectable**
A second form of :meth:`_query.Query.join` allows any mapped entity or
core selectable construct as a target. In this usage,
:meth:`_query.Query.join` will attempt to create a JOIN along the
natural foreign key relationship between two entities::
q = session.query(User).join(Address)
In the above calling form, :meth:`_query.Query.join` is called upon to
create the "on clause" automatically for us. This calling form will
ultimately raise an error if either there are no foreign keys between
the two entities, or if there are multiple foreign key linkages between
the target entity and the entity or entities already present on the
left side such that creating a join requires more information. Note
that when indicating a join to a target without any ON clause, ORM
configured relationships are not taken into account.
**Joins to a Target with an ON Clause**
The third calling form allows both the target entity as well
as the ON clause to be passed explicitly. A example that includes
a SQL expression as the ON clause is as follows::
q = session.query(User).join(Address, User.id==Address.user_id)
The above form may also use a relationship-bound attribute as the
ON clause as well::
q = session.query(User).join(Address, User.addresses)
The above syntax can be useful for the case where we wish
to join to an alias of a particular target entity. If we wanted
to join to ``Address`` twice, it could be achieved using two
aliases set up using the :func:`~sqlalchemy.orm.aliased` function::
a1 = aliased(Address)
a2 = aliased(Address)
q = session.query(User).\
join(a1, User.addresses).\
join(a2, User.addresses).\
filter(a1.email_address=='[email protected]').\
filter(a2.email_address=='[email protected]')
The relationship-bound calling form can also specify a target entity
using the :meth:`_orm.PropComparator.of_type` method; a query
equivalent to the one above would be::
a1 = aliased(Address)
a2 = aliased(Address)
q = session.query(User).\
join(User.addresses.of_type(a1)).\
join(User.addresses.of_type(a2)).\
filter(a1.email_address == '[email protected]').\
filter(a2.email_address == '[email protected]')
**Augmenting Built-in ON Clauses**
As a substitute for providing a full custom ON condition for an
existing relationship, the :meth:`_orm.PropComparator.and_` function
may be applied to a relationship attribute to augment additional
criteria into the ON clause; the additional criteria will be combined
with the default criteria using AND::
q = session.query(User).join(
User.addresses.and_(Address.email_address != '[email protected]')
)
.. versionadded:: 1.4
**Joining to Tables and Subqueries**
The target of a join may also be any table or SELECT statement,
which may be related to a target entity or not. Use the
appropriate ``.subquery()`` method in order to make a subquery
out of a query::
subq = session.query(Address).\
filter(Address.email_address == '[email protected]').\
subquery()
q = session.query(User).join(
subq, User.id == subq.c.user_id
)
Joining to a subquery in terms of a specific relationship and/or
target entity may be achieved by linking the subquery to the
entity using :func:`_orm.aliased`::
subq = session.query(Address).\
filter(Address.email_address == '[email protected]').\
subquery()
address_subq = aliased(Address, subq)
q = session.query(User).join(
User.addresses.of_type(address_subq)
)
**Controlling what to Join From**
In cases where the left side of the current state of
:class:`_query.Query` is not in line with what we want to join from,
the :meth:`_query.Query.select_from` method may be used::
q = session.query(Address).select_from(User).\
join(User.addresses).\
filter(User.name == 'ed')
Which will produce SQL similar to::
SELECT address.* FROM user
JOIN address ON user.id=address.user_id
WHERE user.name = :name_1
**Legacy Features of Query.join()**
.. deprecated:: 1.4 The following features are deprecated and will
be removed in SQLAlchemy 2.0.
The :meth:`_query.Query.join` method currently supports several
usage patterns and arguments that are considered to be legacy
as of SQLAlchemy 1.3. A deprecation path will follow
in the 1.4 series for the following features:
* Joining on relationship names rather than attributes::
session.query(User).join("addresses")
**Why it's legacy**: the string name does not provide enough context
for :meth:`_query.Query.join` to always know what is desired,
notably in that there is no indication of what the left side
of the join should be. This gives rise to flags like
``from_joinpoint`` as well as the ability to place several
join clauses in a single :meth:`_query.Query.join` call
which don't solve the problem fully while also
adding new calling styles that are unnecessary and expensive to
accommodate internally.
**Modern calling pattern**: Use the actual relationship,
e.g. ``User.addresses`` in the above case::
session.query(User).join(User.addresses)
* Automatic aliasing with the ``aliased=True`` flag::
session.query(Node).join(Node.children, aliased=True).\
filter(Node.name == 'some name')
**Why it's legacy**: the automatic aliasing feature of
:class:`_query.Query` is intensely complicated, both in its internal
implementation as well as in its observed behavior, and is almost
never used. It is difficult to know upon inspection where and when
its aliasing of a target entity, ``Node`` in the above case, will be
applied and when it won't, and additionally the feature has to use
very elaborate heuristics to achieve this implicit behavior.
**Modern calling pattern**: Use the :func:`_orm.aliased` construct
explicitly::
from sqlalchemy.orm import aliased
n1 = aliased(Node)
session.query(Node).join(Node.children.of_type(n1)).\
filter(n1.name == 'some name')
* Multiple joins in one call::
session.query(User).join("orders", "items")
session.query(User).join(User.orders, Order.items)
session.query(User).join(
(Order, User.orders),
(Item, Item.order_id == Order.id)
)
session.query(User).join(Order, Item)
# ... and several more forms actually
**Why it's legacy**: being able to chain multiple ON clauses in one
call to :meth:`_query.Query.join` is yet another attempt to solve
the problem of being able to specify what entity to join from,
and is the source of a large variety of potential calling patterns
that are internally expensive and complicated to parse and
accommodate.
**Modern calling pattern**: Use relationship-bound attributes
or SQL-oriented ON clauses within separate calls, so that
each call to :meth:`_query.Query.join` knows what the left
side should be::
session.query(User).join(User.orders).join(
Item, Item.order_id == Order.id)
:param \*props: Incoming arguments for :meth:`_query.Query.join`,
the props collection in modern use should be considered to be a one
or two argument form, either as a single "target" entity or ORM
attribute-bound relationship, or as a target entity plus an "on
clause" which may be a SQL expression or ORM attribute-bound
relationship.
:param isouter=False: If True, the join used will be a left outer join,
just as if the :meth:`_query.Query.outerjoin` method were called.
:param full=False: render FULL OUTER JOIN; implies ``isouter``.
.. versionadded:: 1.1
:param from_joinpoint=False: When using ``aliased=True``, a setting
of True here will cause the join to be from the most recent
joined target, rather than starting back from the original
FROM clauses of the query.
.. note:: This flag is considered legacy.
:param aliased=False: If True, indicate that the JOIN target should be
anonymously aliased. Subsequent calls to :meth:`_query.Query.filter`
and similar will adapt the incoming criterion to the target
alias, until :meth:`_query.Query.reset_joinpoint` is called.
.. note:: This flag is considered legacy.
.. seealso::
:ref:`ormtutorial_joins` in the ORM tutorial.
:ref:`inheritance_toplevel` for details on how
:meth:`_query.Query.join` is used for inheritance relationships.
:func:`_orm.join` - a standalone ORM-level join function,
used internally by :meth:`_query.Query.join`, which in previous
SQLAlchemy versions was the primary ORM-level joining interface.
"""
aliased, from_joinpoint, isouter, full = (
kwargs.pop("aliased", False),
kwargs.pop("from_joinpoint", False),
kwargs.pop("isouter", False),
kwargs.pop("full", False),
)
if aliased or from_joinpoint:
util.warn_deprecated_20(
"The ``aliased`` and ``from_joinpoint`` keyword arguments "
"to Query.join() are deprecated and will be removed "
"in SQLAlchemy 2.0."
)
if kwargs:
raise TypeError(
"unknown arguments: %s" % ", ".join(sorted(kwargs))
)
# legacy vvvvvvvvvvvvvvvvvvvvvvvvvvv
if not from_joinpoint:
self._last_joined_entity = None
self._aliased_generation = None
# legacy ^^^^^^^^^^^^^^^^^^^^^^^^^^^
if props:
onclause, legacy = props[0], props[1:]
else:
onclause = legacy = None
if not legacy and onclause is None and not isinstance(target, tuple):
# non legacy argument form
_props = [(target,)]
elif (
not legacy
and isinstance(
target,
(
expression.Selectable,
type,
AliasedClass,
types.FunctionType,
),
)
and isinstance(
onclause,
(
elements.ColumnElement,
str,
interfaces.PropComparator,
types.FunctionType,
),
)
):
# non legacy argument form
_props = [(target, onclause)]
else:
# legacy forms. more time consuming :)
_props = []
_single = []
for prop in (target,) + props:
if isinstance(prop, tuple):
util.warn_deprecated_20(
"Query.join() will no longer accept tuples as "
"arguments in SQLAlchemy 2.0."
)
if _single:
_props.extend((_s,) for _s in _single)
_single = []
# this checks for an extremely ancient calling form of
# reversed tuples.
if isinstance(prop[0], (str, interfaces.PropComparator)):
prop = (prop[1], prop[0])
_props.append(prop)
else:
_single.append(prop)
if _single:
_props.extend((_s,) for _s in _single)
# legacy vvvvvvvvvvvvvvvvvvvvvvvvvvv
if aliased:
self._aliased_generation = self._next_aliased_generation()
if self._aliased_generation:
_props = [
(
prop[0],
sql_util._deep_annotate(
prop[1],
{"aliased_generation": self._aliased_generation},
)
if isinstance(prop[1], expression.ClauseElement)
else prop[1],
)
if len(prop) == 2
else prop
for prop in _props
]
# legacy ^^^^^^^^^^^^^^^^^^^^^^^^^^^
joins_to_add = tuple(
(
coercions.expect(
roles.JoinTargetRole,
prop[0],
legacy=True,
apply_propagate_attrs=self,
),
(
coercions.expect(roles.OnClauseRole, prop[1], legacy=True)
# if not isinstance(prop[1], str)
# else prop[1]
)
if len(prop) == 2
else None,
None,
{
"isouter": isouter,
"aliased": aliased,
"from_joinpoint": True if i > 0 else from_joinpoint,
"full": full,
"aliased_generation": self._aliased_generation,
},
)
for i, prop in enumerate(_props)
)
if len(joins_to_add) > 1:
util.warn_deprecated_20(
"Passing a chain of multiple join conditions to Query.join() "
"is deprecated and will be removed in SQLAlchemy 2.0. "
"Please use individual join() calls per relationship."
)
self._legacy_setup_joins += joins_to_add
self.__dict__.pop("_last_joined_entity", None)
def outerjoin(self, target, *props, **kwargs):
"""Create a left outer join against this ``Query`` object's criterion
and apply generatively, returning the newly resulting ``Query``.
Usage is the same as the ``join()`` method.
"""
kwargs["isouter"] = True
return self.join(target, *props, **kwargs)
@_generative
@_assertions(_no_statement_condition)
def reset_joinpoint(self):
"""Return a new :class:`.Query`, where the "join point" has
been reset back to the base FROM entities of the query.
This method is usually used in conjunction with the
``aliased=True`` feature of the :meth:`~.Query.join`
method. See the example in :meth:`~.Query.join` for how
this is used.
"""
self._last_joined_entity = None
self._aliased_generation = None
@_generative
@_assertions(_no_clauseelement_condition)
def select_from(self, *from_obj):
r"""Set the FROM clause of this :class:`.Query` explicitly.
:meth:`.Query.select_from` is often used in conjunction with
:meth:`.Query.join` in order to control which entity is selected
from on the "left" side of the join.
The entity or selectable object here effectively replaces the
"left edge" of any calls to :meth:`~.Query.join`, when no
joinpoint is otherwise established - usually, the default "join
point" is the leftmost entity in the :class:`~.Query` object's
list of entities to be selected.
A typical example::
q = session.query(Address).select_from(User).\
join(User.addresses).\
filter(User.name == 'ed')
Which produces SQL equivalent to::
SELECT address.* FROM user
JOIN address ON user.id=address.user_id
WHERE user.name = :name_1
:param \*from_obj: collection of one or more entities to apply
to the FROM clause. Entities can be mapped classes,
:class:`.AliasedClass` objects, :class:`.Mapper` objects
as well as core :class:`.FromClause` elements like subqueries.
.. versionchanged:: 0.9
This method no longer applies the given FROM object
to be the selectable from which matching entities
select from; the :meth:`.select_entity_from` method
now accomplishes this. See that method for a description
of this behavior.
.. seealso::
:meth:`~.Query.join`
:meth:`.Query.select_entity_from`
"""
self._set_select_from(from_obj, False)
@util.deprecated_20(
":meth:`_orm.Query.select_entity_from`",
alternative="Use the :func:`_orm.aliased` construct instead",
)
@_generative
@_assertions(_no_clauseelement_condition)
def select_entity_from(self, from_obj):
r"""Set the FROM clause of this :class:`_query.Query` to a
core selectable, applying it as a replacement FROM clause
for corresponding mapped entities.
The :meth:`_query.Query.select_entity_from`
method supplies an alternative
approach to the use case of applying an :func:`.aliased` construct
explicitly throughout a query. Instead of referring to the
:func:`.aliased` construct explicitly,
:meth:`_query.Query.select_entity_from` automatically *adapts* all
occurrences of the entity to the target selectable.
Given a case for :func:`.aliased` such as selecting ``User``
objects from a SELECT statement::
select_stmt = select(User).where(User.id == 7)
user_alias = aliased(User, select_stmt)
q = session.query(user_alias).\
filter(user_alias.name == 'ed')
Above, we apply the ``user_alias`` object explicitly throughout the
query. When it's not feasible for ``user_alias`` to be referenced
explicitly in many places, :meth:`_query.Query.select_entity_from`
may be
used at the start of the query to adapt the existing ``User`` entity::
q = session.query(User).\
select_entity_from(select_stmt.subquery()).\
filter(User.name == 'ed')
Above, the generated SQL will show that the ``User`` entity is
adapted to our statement, even in the case of the WHERE clause:
.. sourcecode:: sql
SELECT anon_1.id AS anon_1_id, anon_1.name AS anon_1_name
FROM (SELECT "user".id AS id, "user".name AS name
FROM "user"
WHERE "user".id = :id_1) AS anon_1
WHERE anon_1.name = :name_1
The :meth:`_query.Query.select_entity_from` method is similar to the
:meth:`_query.Query.select_from` method,
in that it sets the FROM clause
of the query. The difference is that it additionally applies
adaptation to the other parts of the query that refer to the
primary entity. If above we had used :meth:`_query.Query.select_from`
instead, the SQL generated would have been:
.. sourcecode:: sql
-- uses plain select_from(), not select_entity_from()
SELECT "user".id AS user_id, "user".name AS user_name
FROM "user", (SELECT "user".id AS id, "user".name AS name
FROM "user"
WHERE "user".id = :id_1) AS anon_1
WHERE "user".name = :name_1
To supply textual SQL to the :meth:`_query.Query.select_entity_from`
method,
we can make use of the :func:`_expression.text` construct. However,
the
:func:`_expression.text`
construct needs to be aligned with the columns of our
entity, which is achieved by making use of the
:meth:`_expression.TextClause.columns` method::
text_stmt = text("select id, name from user").columns(
User.id, User.name).subquery()
q = session.query(User).select_entity_from(text_stmt)
:meth:`_query.Query.select_entity_from` itself accepts an
:func:`.aliased`
object, so that the special options of :func:`.aliased` such as
:paramref:`.aliased.adapt_on_names` may be used within the
scope of the :meth:`_query.Query.select_entity_from`
method's adaptation
services. Suppose
a view ``user_view`` also returns rows from ``user``. If
we reflect this view into a :class:`_schema.Table`, this view has no
relationship to the :class:`_schema.Table` to which we are mapped,
however
we can use name matching to select from it::
user_view = Table('user_view', metadata,
autoload_with=engine)
user_view_alias = aliased(
User, user_view, adapt_on_names=True)
q = session.query(User).\
select_entity_from(user_view_alias).\
order_by(User.name)
.. versionchanged:: 1.1.7 The :meth:`_query.Query.select_entity_from`
method now accepts an :func:`.aliased` object as an alternative
to a :class:`_expression.FromClause` object.
:param from_obj: a :class:`_expression.FromClause`
object that will replace
the FROM clause of this :class:`_query.Query`.
It also may be an instance
of :func:`.aliased`.
.. seealso::
:meth:`_query.Query.select_from`
"""
self._set_select_from([from_obj], True)
self._compile_options += {"_enable_single_crit": False}
def __getitem__(self, item):
return orm_util._getitem(
self,
item,
allow_negative=not self.session or not self.session.future,
)
@_generative
@_assertions(_no_statement_condition)
def slice(self, start, stop):
"""Computes the "slice" of the :class:`_query.Query` represented by
the given indices and returns the resulting :class:`_query.Query`.
The start and stop indices behave like the argument to Python's
built-in :func:`range` function. This method provides an
alternative to using ``LIMIT``/``OFFSET`` to get a slice of the
query.
For example, ::
session.query(User).order_by(User.id).slice(1, 3)
renders as
.. sourcecode:: sql
SELECT users.id AS users_id,
users.name AS users_name
FROM users ORDER BY users.id
LIMIT ? OFFSET ?
(2, 1)
.. seealso::
:meth:`_query.Query.limit`
:meth:`_query.Query.offset`
"""
self._limit_clause, self._offset_clause = sql_util._make_slice(
self._limit_clause, self._offset_clause, start, stop
)
@_generative
@_assertions(_no_statement_condition)
def limit(self, limit):
"""Apply a ``LIMIT`` to the query and return the newly resulting
``Query``.
"""
self._limit_clause = sql_util._offset_or_limit_clause(limit)
@_generative
@_assertions(_no_statement_condition)
def offset(self, offset):
"""Apply an ``OFFSET`` to the query and return the newly resulting
``Query``.
"""
self._offset_clause = sql_util._offset_or_limit_clause(offset)
@_generative
@_assertions(_no_statement_condition)
def distinct(self, *expr):
r"""Apply a ``DISTINCT`` to the query and return the newly resulting
``Query``.
.. note::
The ORM-level :meth:`.distinct` call includes logic that will
automatically add columns from the ORDER BY of the query to the
columns clause of the SELECT statement, to satisfy the common need
of the database backend that ORDER BY columns be part of the SELECT
list when DISTINCT is used. These columns *are not* added to the
list of columns actually fetched by the :class:`_query.Query`,
however,
so would not affect results. The columns are passed through when
using the :attr:`_query.Query.statement` accessor, however.
.. deprecated:: 2.0 This logic is deprecated and will be removed
in SQLAlchemy 2.0. See :ref:`migration_20_query_distinct`
for a description of this use case in 2.0.
:param \*expr: optional column expressions. When present,
the PostgreSQL dialect will render a ``DISTINCT ON (<expressions>)``
construct.
.. deprecated:: 1.4 Using \*expr in other dialects is deprecated
and will raise :class:`_exc.CompileError` in a future version.
"""
if expr:
self._distinct = True
self._distinct_on = self._distinct_on + tuple(
coercions.expect(roles.ByOfRole, e) for e in expr
)
else:
self._distinct = True
def all(self):
"""Return the results represented by this :class:`_query.Query`
as a list.
This results in an execution of the underlying SQL statement.
.. warning:: The :class:`_query.Query` object,
when asked to return either
a sequence or iterator that consists of full ORM-mapped entities,
will **deduplicate entries based on primary key**. See the FAQ for
more details.
.. seealso::
:ref:`faq_query_deduplicating`
"""
return self._iter().all()
@_generative
@_assertions(_no_clauseelement_condition)
def from_statement(self, statement):
"""Execute the given SELECT statement and return results.
This method bypasses all internal statement compilation, and the
statement is executed without modification.
The statement is typically either a :func:`_expression.text`
or :func:`_expression.select` construct, and should return the set
of columns
appropriate to the entity class represented by this
:class:`_query.Query`.
.. seealso::
:ref:`orm_tutorial_literal_sql` - usage examples in the
ORM tutorial
"""
statement = coercions.expect(
roles.SelectStatementRole, statement, apply_propagate_attrs=self
)
self._statement = statement
def first(self):
"""Return the first result of this ``Query`` or
None if the result doesn't contain any row.
first() applies a limit of one within the generated SQL, so that
only one primary entity row is generated on the server side
(note this may consist of multiple result rows if join-loaded
collections are present).
Calling :meth:`_query.Query.first`
results in an execution of the underlying
query.
.. seealso::
:meth:`_query.Query.one`
:meth:`_query.Query.one_or_none`
"""
# replicates limit(1) behavior
if self._statement is not None:
return self._iter().first()
else:
return self.limit(1)._iter().first()
def one_or_none(self):
"""Return at most one result or raise an exception.
Returns ``None`` if the query selects
no rows. Raises ``sqlalchemy.orm.exc.MultipleResultsFound``
if multiple object identities are returned, or if multiple
rows are returned for a query that returns only scalar values
as opposed to full identity-mapped entities.
Calling :meth:`_query.Query.one_or_none`
results in an execution of the
underlying query.
.. versionadded:: 1.0.9
Added :meth:`_query.Query.one_or_none`
.. seealso::
:meth:`_query.Query.first`
:meth:`_query.Query.one`
"""
return self._iter().one_or_none()
def one(self):
"""Return exactly one result or raise an exception.
Raises ``sqlalchemy.orm.exc.NoResultFound`` if the query selects
no rows. Raises ``sqlalchemy.orm.exc.MultipleResultsFound``
if multiple object identities are returned, or if multiple
rows are returned for a query that returns only scalar values
as opposed to full identity-mapped entities.
Calling :meth:`.one` results in an execution of the underlying query.
.. seealso::
:meth:`_query.Query.first`
:meth:`_query.Query.one_or_none`
"""
return self._iter().one()
def scalar(self):
"""Return the first element of the first result or None
if no rows present. If multiple rows are returned,
raises MultipleResultsFound.
>>> session.query(Item).scalar()
<Item>
>>> session.query(Item.id).scalar()
1
>>> session.query(Item.id).filter(Item.id < 0).scalar()
None
>>> session.query(Item.id, Item.name).scalar()
1
>>> session.query(func.count(Parent.id)).scalar()
20
This results in an execution of the underlying query.
"""
# TODO: not sure why we can't use result.scalar() here
try:
ret = self.one()
if not isinstance(ret, collections_abc.Sequence):
return ret
return ret[0]
except orm_exc.NoResultFound:
return None
def __iter__(self):
return self._iter().__iter__()
def _iter(self):
# new style execution.
params = self._params
statement = self._statement_20()
result = self.session.execute(
statement,
params,
execution_options={"_sa_orm_load_options": self.load_options},
)
# legacy: automatically set scalars, unique
if result._attributes.get("is_single_entity", False):
result = result.scalars()
if (
result._attributes.get("filtered", False)
and not self.load_options._yield_per
):
result = result.unique()
return result
def __str__(self):
statement = self._statement_20()
try:
bind = (
self._get_bind_args(statement, self.session.get_bind)
if self.session
else None
)
except sa_exc.UnboundExecutionError:
bind = None
return str(statement.compile(bind))
def _get_bind_args(self, statement, fn, **kw):
return fn(clause=statement, **kw)
@property
def column_descriptions(self):
"""Return metadata about the columns which would be
returned by this :class:`_query.Query`.
Format is a list of dictionaries::
user_alias = aliased(User, name='user2')
q = sess.query(User, User.id, user_alias)
# this expression:
q.column_descriptions
# would return:
[
{
'name':'User',
'type':User,
'aliased':False,
'expr':User,
'entity': User
},
{
'name':'id',
'type':Integer(),
'aliased':False,
'expr':User.id,
'entity': User
},
{
'name':'user2',
'type':User,
'aliased':True,
'expr':user_alias,
'entity': user_alias
}
]
"""
return _column_descriptions(self, legacy=True)
def instances(self, result_proxy, context=None):
"""Return an ORM result given a :class:`_engine.CursorResult` and
:class:`.QueryContext`.
"""
if context is None:
util.warn_deprecated(
"Using the Query.instances() method without a context "
"is deprecated and will be disallowed in a future release. "
"Please make use of :meth:`_query.Query.from_statement` "
"for linking ORM results to arbitrary select constructs.",
version="1.4",
)
compile_state = self._compile_state(for_statement=False)
context = QueryContext(
compile_state,
compile_state.statement,
self._params,
self.session,
self.load_options,
)
result = loading.instances(result_proxy, context)
# legacy: automatically set scalars, unique
if result._attributes.get("is_single_entity", False):
result = result.scalars()
if result._attributes.get("filtered", False):
result = result.unique()
return result
@util.deprecated_20(
":meth:`_orm.Query.merge_result`",
alternative="The method is superseded by the "
":func:`_orm.merge_frozen_result` function.",
becomes_legacy=True,
enable_warnings=False, # warnings occur via loading.merge_result
)
def merge_result(self, iterator, load=True):
"""Merge a result into this :class:`_query.Query` object's Session.
Given an iterator returned by a :class:`_query.Query`
of the same structure
as this one, return an identical iterator of results, with all mapped
instances merged into the session using :meth:`.Session.merge`. This
is an optimized method which will merge all mapped instances,
preserving the structure of the result rows and unmapped columns with
less method overhead than that of calling :meth:`.Session.merge`
explicitly for each value.
The structure of the results is determined based on the column list of
this :class:`_query.Query` - if these do not correspond,
unchecked errors
will occur.
The 'load' argument is the same as that of :meth:`.Session.merge`.
For an example of how :meth:`_query.Query.merge_result` is used, see
the source code for the example :ref:`examples_caching`, where
:meth:`_query.Query.merge_result` is used to efficiently restore state
from a cache back into a target :class:`.Session`.
"""
return loading.merge_result(self, iterator, load)
def exists(self):
"""A convenience method that turns a query into an EXISTS subquery
of the form EXISTS (SELECT 1 FROM ... WHERE ...).
e.g.::
q = session.query(User).filter(User.name == 'fred')
session.query(q.exists())
Producing SQL similar to::
SELECT EXISTS (
SELECT 1 FROM users WHERE users.name = :name_1
) AS anon_1
The EXISTS construct is usually used in the WHERE clause::
session.query(User.id).filter(q.exists()).scalar()
Note that some databases such as SQL Server don't allow an
EXISTS expression to be present in the columns clause of a
SELECT. To select a simple boolean value based on the exists
as a WHERE, use :func:`.literal`::
from sqlalchemy import literal
session.query(literal(True)).filter(q.exists()).scalar()
"""
# .add_columns() for the case that we are a query().select_from(X),
# so that ".statement" can be produced (#2995) but also without
# omitting the FROM clause from a query(X) (#2818);
# .with_only_columns() after we have a core select() so that
# we get just "SELECT 1" without any entities.
inner = (
self.enable_eagerloads(False)
.add_columns(sql.literal_column("1"))
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.statement.with_only_columns(1)
)
ezero = self._entity_from_pre_ent_zero()
if ezero is not None:
inner = inner.select_from(ezero)
return sql.exists(inner)
def count(self):
r"""Return a count of rows this the SQL formed by this :class:`Query`
would return.
This generates the SQL for this Query as follows::
SELECT count(1) AS count_1 FROM (
SELECT <rest of query follows...>
) AS anon_1
The above SQL returns a single row, which is the aggregate value
of the count function; the :meth:`_query.Query.count`
method then returns
that single integer value.
.. warning::
It is important to note that the value returned by
count() is **not the same as the number of ORM objects that this
Query would return from a method such as the .all() method**.
The :class:`_query.Query` object,
when asked to return full entities,
will **deduplicate entries based on primary key**, meaning if the
same primary key value would appear in the results more than once,
only one object of that primary key would be present. This does
not apply to a query that is against individual columns.
.. seealso::
:ref:`faq_query_deduplicating`
:ref:`orm_tutorial_query_returning`
For fine grained control over specific columns to count, to skip the
usage of a subquery or otherwise control of the FROM clause, or to use
other aggregate functions, use :attr:`~sqlalchemy.sql.expression.func`
expressions in conjunction with :meth:`~.Session.query`, i.e.::
from sqlalchemy import func
# count User records, without
# using a subquery.
session.query(func.count(User.id))
# return count of user "id" grouped
# by "name"
session.query(func.count(User.id)).\
group_by(User.name)
from sqlalchemy import distinct
# count distinct "name" values
session.query(func.count(distinct(User.name)))
"""
col = sql.func.count(sql.literal_column("*"))
return self._from_self(col).enable_eagerloads(False).scalar()
def delete(self, synchronize_session="evaluate"):
r"""Perform a DELETE with an arbitrary WHERE clause.
Deletes rows matched by this query from the database.
E.g.::
sess.query(User).filter(User.age == 25).\
delete(synchronize_session=False)
sess.query(User).filter(User.age == 25).\
delete(synchronize_session='evaluate')
.. warning::
See the section :ref:`orm_expression_update_delete` for important
caveats and warnings, including limitations when using bulk UPDATE
and DELETE with mapper inheritance configurations.
:param synchronize_session: chooses the strategy to update the
attributes on objects in the session. See the section
:ref:`orm_expression_update_delete` for a discussion of these
strategies.
:return: the count of rows matched as returned by the database's
"row count" feature.
.. seealso::
:ref:`orm_expression_update_delete`
"""
bulk_del = BulkDelete(self)
if self.dispatch.before_compile_delete:
for fn in self.dispatch.before_compile_delete:
new_query = fn(bulk_del.query, bulk_del)
if new_query is not None:
bulk_del.query = new_query
self = bulk_del.query
delete_ = sql.delete(*self._raw_columns)
delete_._where_criteria = self._where_criteria
result = self.session.execute(
delete_,
self._params,
execution_options={"synchronize_session": synchronize_session},
)
bulk_del.result = result
self.session.dispatch.after_bulk_delete(bulk_del)
result.close()
return result.rowcount
def update(self, values, synchronize_session="evaluate", update_args=None):
r"""Perform an UPDATE with an arbitrary WHERE clause.
Updates rows matched by this query in the database.
E.g.::
sess.query(User).filter(User.age == 25).\
update({User.age: User.age - 10}, synchronize_session=False)
sess.query(User).filter(User.age == 25).\
update({"age": User.age - 10}, synchronize_session='evaluate')
.. warning::
See the section :ref:`orm_expression_update_delete` for important
caveats and warnings, including limitations when using arbitrary
UPDATE and DELETE with mapper inheritance configurations.
:param values: a dictionary with attributes names, or alternatively
mapped attributes or SQL expressions, as keys, and literal
values or sql expressions as values. If :ref:`parameter-ordered
mode <updates_order_parameters>` is desired, the values can be
passed as a list of 2-tuples;
this requires that the
:paramref:`~sqlalchemy.sql.expression.update.preserve_parameter_order`
flag is passed to the :paramref:`.Query.update.update_args` dictionary
as well.
:param synchronize_session: chooses the strategy to update the
attributes on objects in the session. See the section
:ref:`orm_expression_update_delete` for a discussion of these
strategies.
:param update_args: Optional dictionary, if present will be passed
to the underlying :func:`_expression.update`
construct as the ``**kw`` for
the object. May be used to pass dialect-specific arguments such
as ``mysql_limit``, as well as other special arguments such as
:paramref:`~sqlalchemy.sql.expression.update.preserve_parameter_order`.
:return: the count of rows matched as returned by the database's
"row count" feature.
.. seealso::
:ref:`orm_expression_update_delete`
"""
update_args = update_args or {}
bulk_ud = BulkUpdate(self, values, update_args)
if self.dispatch.before_compile_update:
for fn in self.dispatch.before_compile_update:
new_query = fn(bulk_ud.query, bulk_ud)
if new_query is not None:
bulk_ud.query = new_query
self = bulk_ud.query
upd = sql.update(*self._raw_columns)
ppo = update_args.pop("preserve_parameter_order", False)
if ppo:
upd = upd.ordered_values(*values)
else:
upd = upd.values(values)
if update_args:
upd = upd.with_dialect_options(**update_args)
upd._where_criteria = self._where_criteria
result = self.session.execute(
upd,
self._params,
execution_options={"synchronize_session": synchronize_session},
)
bulk_ud.result = result
self.session.dispatch.after_bulk_update(bulk_ud)
result.close()
return result.rowcount
def _compile_state(self, for_statement=False, **kw):
"""Create an out-of-compiler ORMCompileState object.
The ORMCompileState object is normally created directly as a result
of the SQLCompiler.process() method being handed a Select()
or FromStatement() object that uses the "orm" plugin. This method
provides a means of creating this ORMCompileState object directly
without using the compiler.
This method is used only for deprecated cases, which include
the .from_self() method for a Query that has multiple levels
of .from_self() in use, as well as the instances() method. It is
also used within the test suite to generate ORMCompileState objects
for test purposes.
"""
stmt = self._statement_20(for_statement=for_statement, **kw)
assert for_statement == stmt._compile_options._for_statement
# this chooses between ORMFromStatementCompileState and
# ORMSelectCompileState. We could also base this on
# query._statement is not None as we have the ORM Query here
# however this is the more general path.
compile_state_cls = ORMCompileState._get_plugin_class_for_plugin(
stmt, "orm"
)
return compile_state_cls.create_for_statement(stmt, None)
def _compile_context(self, for_statement=False):
compile_state = self._compile_state(for_statement=for_statement)
context = QueryContext(
compile_state,
compile_state.statement,
self._params,
self.session,
self.load_options,
)
return context
class FromStatement(GroupedElement, SelectBase, Executable):
"""Core construct that represents a load of ORM objects from a finished
select or text construct.
"""
__visit_name__ = "orm_from_statement"
_compile_options = ORMFromStatementCompileState.default_compile_options
_compile_state_factory = ORMFromStatementCompileState.create_for_statement
_for_update_arg = None
_traverse_internals = [
("_raw_columns", InternalTraversal.dp_clauseelement_list),
("element", InternalTraversal.dp_clauseelement),
] + Executable._executable_traverse_internals
_cache_key_traversal = _traverse_internals + [
("_compile_options", InternalTraversal.dp_has_cache_key)
]
def __init__(self, entities, element):
self._raw_columns = [
coercions.expect(
roles.ColumnsClauseRole,
ent,
apply_propagate_attrs=self,
post_inspect=True,
)
for ent in util.to_list(entities)
]
self.element = element
def get_label_style(self):
return self._label_style
def set_label_style(self, label_style):
return SelectStatementGrouping(
self.element.set_label_style(label_style)
)
@property
def _label_style(self):
return self.element._label_style
def _compiler_dispatch(self, compiler, **kw):
"""provide a fixed _compiler_dispatch method.
This is roughly similar to using the sqlalchemy.ext.compiler
``@compiles`` extension.
"""
compile_state = self._compile_state_factory(self, compiler, **kw)
toplevel = not compiler.stack
if toplevel:
compiler.compile_state = compile_state
return compiler.process(compile_state.statement, **kw)
def _ensure_disambiguated_names(self):
return self
def get_children(self, **kw):
for elem in itertools.chain.from_iterable(
element._from_objects for element in self._raw_columns
):
yield elem
for elem in super(FromStatement, self).get_children(**kw):
yield elem
@property
def _returning(self):
return self.element._returning if self.element.is_dml else None
@property
def _inline(self):
return self.element._inline if self.element.is_dml else None
class AliasOption(interfaces.LoaderOption):
@util.deprecated(
"1.4",
"The :class:`.AliasOption` is not necessary "
"for entities to be matched up to a query that is established "
"via :meth:`.Query.from_statement` and now does nothing.",
)
def __init__(self, alias):
r"""Return a :class:`.MapperOption` that will indicate to the
:class:`_query.Query`
that the main table has been aliased.
"""
inherit_cache = False
def process_compile_state(self, compile_state):
pass
class BulkUD(object):
"""State used for the orm.Query version of update() / delete().
This object is now specific to Query only.
"""
def __init__(self, query):
self.query = query.enable_eagerloads(False)
self._validate_query_state()
self.mapper = self.query._entity_from_pre_ent_zero()
def _validate_query_state(self):
for attr, methname, notset, op in (
("_limit_clause", "limit()", None, operator.is_),
("_offset_clause", "offset()", None, operator.is_),
("_order_by_clauses", "order_by()", (), operator.eq),
("_group_by_clauses", "group_by()", (), operator.eq),
("_distinct", "distinct()", False, operator.is_),
(
"_from_obj",
"join(), outerjoin(), select_from(), or from_self()",
(),
operator.eq,
),
(
"_legacy_setup_joins",
"join(), outerjoin(), select_from(), or from_self()",
(),
operator.eq,
),
):
if not op(getattr(self.query, attr), notset):
raise sa_exc.InvalidRequestError(
"Can't call Query.update() or Query.delete() "
"when %s has been called" % (methname,)
)
@property
def session(self):
return self.query.session
class BulkUpdate(BulkUD):
"""BulkUD which handles UPDATEs."""
def __init__(self, query, values, update_kwargs):
super(BulkUpdate, self).__init__(query)
self.values = values
self.update_kwargs = update_kwargs
class BulkDelete(BulkUD):
"""BulkUD which handles DELETEs."""
|
py | b412caf53f788aa59621c07cf5b766a8c2a478df | from .core.problem import Problem
|
py | b412cb8fd690e251bda1130715c769f7a0f4cdcc | from conf.db import host,user,port
import os
print(host)
base_dir = str(os.path.dirname(os.path.dirname(__file__)))
base_dir = base_dir.replace('\\', '/')
file_path = base_dir + "/Config/DbConfig.ini"
print(type(base_dir)) |
py | b412cc83133512116652f4538e527f25fcdf1df1 | #!/usr/bin/env python
#
# Copyright 2009 Todd Whiteman
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
pass
|
py | b412ccac0d98d14640e5811014917f51380b764d | # MIT License
# Copyright (c) 2018 Nathan Letwory , Joel Putnam
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
bl_info = {
"name": "Import Rhinoceros 3D",
"author": "jesterKing, Joel Putnam",
"version": (0, 0, 2),
"blender": (2, 80, 0),
"location": "File > Import > Rhinoceros 3D (.3dm)",
"description": "This addon lets you import Rhinoceros 3dm files",
"warning": "The importer doesn't handle all data in 3dm files yet",
"wiki_url": "https://github.com/jesterKing/import_3dm",
"category": "Import-Export",
}
import bpy
# ImportHelper is a helper class, defines filename and
# invoke() function which calls the file selector.
from bpy_extras.io_utils import ImportHelper
from bpy.props import StringProperty, BoolProperty, EnumProperty
from bpy.types import Operator
import rhino3dm as r3d
from .read3dm import read_3dm
class Import3dm(Operator, ImportHelper):
"""Import Rhinoceros 3D files (.3dm). Currently does render meshes only, more geometry and data to follow soon."""
bl_idname = "import_3dm.some_data" # important since its how bpy.ops.import_3dm.some_data is constructed
bl_label = "Import Rhinoceros 3D file"
# ImportHelper mixin class uses this
filename_ext = ".3dm"
filter_glob: StringProperty(
default="*.3dm",
options={'HIDDEN'},
maxlen=1024, # Max internal buffer length, longer would be clamped.
)
# List of operator properties, the attributes will be assigned
# to the class instance from the operator settings before calling.
import_hidden: BoolProperty(
name="Import Hidden Geometry",
description="Import Hidden Geometry",
default=True,
)
# type: EnumProperty(
# name="Example Enum",
# description="Choose between two items",
# items=(
# ('OPT_A', "First Option", "Description one"),
# ('OPT_B', "Second Option", "Description two"),
# ),
# default='OPT_A',
# )
def execute(self, context):
return read_3dm(context, self.filepath, self.import_hidden)
# Only needed if you want to add into a dynamic menu
def menu_func_import(self, context):
self.layout.operator(Import3dm.bl_idname, text="Rhinoceros 3D (.3dm)")
def register():
bpy.utils.register_class(Import3dm)
bpy.types.TOPBAR_MT_file_import.append(menu_func_import)
def unregister():
bpy.utils.unregister_class(Import3dm)
bpy.types.TOPBAR_MT_file_import.remove(menu_func_import)
if __name__ == "__main__":
register()
# test call
bpy.ops.import_3dm.some_data('INVOKE_DEFAULT')
|
py | b412ccd7add6b7ef5b083ce6d4177c9b19c95f25 | from multiprocessing import Process, process
from Simulation.Process.Commons import developerUploadCommand,coordinatorPORT,developerInitCommand,developerCheckRoundCommand,developerCheckModelCommand
import time
from fabric import Connection
# rm -f pythonfunc.zip;
# 7z a -tzip pythonfunc.zip *;
# HOST=$(curl --data-binary @pythonfunc.zip $master:$master_port/function | awk '{ print $3 }');
# curl --resolve $HOST:$master_port:$master http://$HOST:$master_port/init
import os
import shutil
class Developer(Process):
def __init__(self,projectDir,expNum,ip,connect_kwargs,developerType,hostIP):
super(Developer, self).__init__()
self.projectDir = projectDir
self.ip = ip
self.connect_kwargs = connect_kwargs
self.connection = Connection(ip,connect_kwargs=connect_kwargs)
self.experimentNumber = expNum
self.developerType =developerType
self.processId = ip[ip.find("linux-",2)+6:ip.find(".")]
self.developerFileDir = os.path.join("src","Simulation",f"developer_{self.developerType}")
self.developerDir = os.path.join(projectDir,"TempDeveloper",f"TempDeveloper_{self.processId}")
self.hostIP = hostIP
def run(self,):
print("Run Developer")
self.task()
time.sleep(2)
print('Run Check')
self.check()
def reconnect(self):
self.connection = Connection(self.ip,connect_kwargs=self.connect_kwargs)
def setup(self):
print("Setup Developer...")
def createStorage():
print("Clean Previous Developer File")
if os.path.exists(f"TempDeveloper/TempDeveloper_{self.processId}"):
shutil.rmtree(f"TempDeveloper/TempDeveloper_{self.processId}")
os.mkdir(f"TempDeveloper/TempDeveloper_{self.processId}")
def copyClientFile():
shutil.copytree(src=self.developerFileDir,dst=f"TempDeveloper/TempDeveloper_{self.processId}/pyFiles")
def zipClientFolder():
shutil.make_archive(f"TempDeveloper/TempDeveloper_{self.processId}/pythonfunc", 'zip', f"TempDeveloper/TempDeveloper_{self.processId}/pyFiles")
createStorage()
copyClientFile()
zipClientFolder()
print(self.developerDir)
self.reconnect()
with self.connection.cd(self.developerDir):
print('Entered')
command = developerUploadCommand.format(ip=self.hostIP,port=coordinatorPORT,experimentNum=self.experimentNumber,workerId = self.processId)
# command = 'pwd'
# print(command)
result = self.connection.run(command, hide=True)
msg = "Ran {0.command!r} on {0.connection.host}, got stdout:\n{0.stdout}"
print(msg.format(result))
self.functionId = result.stdout[result.stdout.find("=> ")+3:].strip()
print(self.functionId)
return self.functionId
def task(self,):
self.reconnect()
with self.connection.cd(self.developerDir):
command = developerInitCommand.format(ip=self.hostIP,port=coordinatorPORT,functionId = self.functionId,experimentNum=self.experimentNumber,workerId = self.processId)
# command = 'pwd'
print(command)
result = self.connection.run(command, hide=True)
msg = "Ran {0.command!r} on {0.connection.host}, got stdout:\n{0.stdout}"
print(msg.format(result))
# self.functionId = result.stdout[result.stdout.find("=> ")+3:].strip()
# print(self.functionId)
def check(self,):
def checkRound():
self.reconnect()
with self.connection.cd(self.developerDir):
command = developerCheckRoundCommand.format(ip=self.hostIP,port=coordinatorPORT,functionId = self.functionId,experimentNum=self.experimentNumber,workerId = self.processId)
# command = 'pwd'
# print(command)
result = self.connection.run(command, hide=True)
msg = "Ran {0.command!r} on {0.connection.host}, got stdout:\n{0.stdout}"
print(msg.format(result))
# self.functionId = result.stdout[result.stdout.find("=> ")+3:].strip()
def checkModel():
self.reconnect()
with self.connection.cd(self.developerDir):
command = developerCheckModelCommand.format(ip=self.hostIP,port=coordinatorPORT,functionId = self.functionId,experimentNum=self.experimentNumber,workerId = self.processId)
print(command)
result = self.connection.run(command,hide=True )
msg = "Ran {0.command!r} on {0.connection.host}, got stdout:\n{0.stdout}"
print(msg.format(result))
# self.functionId = result.stdout[result.stdout.find("=> ")+3:].strip()
# print(self.functionId)
checkRound()
checkModel()
|
py | b412cd84e5eae62298b7139309188332e8c87221 | """Serve files directly from the ContentsManager."""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import mimetypes
import json
from base64 import decodebytes
from tornado import gen, web
from notebook.base.handlers import IPythonHandler
from notebook.utils import maybe_future
class FilesHandler(IPythonHandler):
"""serve files via ContentsManager
Normally used when ContentsManager is not a FileContentsManager.
FileContentsManager subclasses use AuthenticatedFilesHandler by default,
a subclass of StaticFileHandler.
"""
@property
def content_security_policy(self):
# In case we're serving HTML/SVG, confine any Javascript to a unique
# origin so it can't interact with the notebook server.
return super().content_security_policy + "; sandbox allow-scripts"
@web.authenticated
def head(self, path):
self.check_xsrf_cookie()
return self.get(path, include_body=False)
@web.authenticated
@gen.coroutine
def get(self, path, include_body=True):
# /files/ requests must originate from the same site
self.check_xsrf_cookie()
cm = self.contents_manager
if cm.is_hidden(path) and not cm.allow_hidden:
self.log.info("Refusing to serve hidden file, via 404 Error")
raise web.HTTPError(404)
path = path.strip('/')
if '/' in path:
_, name = path.rsplit('/', 1)
else:
name = path
model = yield maybe_future(cm.get(path, type='file', content=include_body))
if self.get_argument("download", False):
self.set_attachment_header(name)
# get mimetype from filename
if name.lower().endswith('.ipynb'):
self.set_header('Content-Type', 'application/x-ipynb+json')
else:
cur_mime = mimetypes.guess_type(name)[0]
if cur_mime == 'text/plain':
self.set_header('Content-Type', 'text/plain; charset=UTF-8')
elif cur_mime is not None:
self.set_header('Content-Type', cur_mime)
else:
if model['format'] == 'base64':
self.set_header('Content-Type', 'application/octet-stream')
else:
self.set_header('Content-Type', 'text/plain; charset=UTF-8')
if include_body:
if model['format'] == 'base64':
b64_bytes = model['content'].encode('ascii')
self.write(decodebytes(b64_bytes))
elif model['format'] == 'json':
self.write(json.dumps(model['content']))
else:
self.write(model['content'])
self.flush()
default_handlers = []
|
py | b412cd9ac43a3fdf9483659aeb4cb4abf8f9a7a8 | """
This plugin allows you to quote messages easily.
Usage
-----
.. glossary::
/quote
**Usage:** ``/quote <message>``
The message must exist. You can use autocompletion to get the message
you want to quote easily.
Example:
.. code-block:: none
/quote "Pouet"
If the message "Pouet" exists, it will be put in the input. If not you
will get a warning.
Options
-------
.. glossary::
:sorted:
before_quote
**Default value:** ``[empty]``
Text to insert before the quote. ``%(nick)s`` and ``%(time)s`` can
be used to insert the nick of the user who sent the message or the
time of the message.
after_quote
**Default value:** ``[empty]``
Text to insert after the quote. ``%(nick)s`` and ``%(time)s`` can
be used to insert the nick of the user who sent the message or the
time of the message.
"""
from poezio.core.structs import Completion
from poezio.plugin import BasePlugin
from poezio.xhtml import clean_text
from poezio import common
from poezio import tabs
import logging
log = logging.getLogger(__name__)
class Plugin(BasePlugin):
def init(self):
for _class in (tabs.MucTab, tabs.ConversationTab, tabs.PrivateTab):
self.api.add_tab_command(
_class,
'quote',
self.command_quote,
usage='<message>',
help='Quote the message you typed if it exists.',
short='Quote a message.',
completion=self.completion_quote)
def command_quote(self, args):
args = common.shell_split(args)
if len(args) == 1:
message = args[-1]
else:
return self.api.run_command('/help quote')
message = self.find_message(message)
if message:
before = self.config.get('before_quote', '') % {
'nick': message.nickname or '',
'time': message.str_time
}
after = self.config.get('after_quote', '') % {
'nick': message.nickname or '',
'time': message.str_time
}
self.core.insert_input_text(
'%(before)s%(quote)s%(after)s' % {
'before': before.replace('\\n', '\n').replace('[SP]', ' '),
'quote': clean_text(message.txt),
'after': after.replace('\\n', '\n').replace('[SP]', ' ')
})
else:
self.api.information('No message found', 'Warning')
def find_message(self, txt):
messages = self.api.get_conversation_messages()
if not messages:
return None
for message in messages[::-1]:
if clean_text(message.txt) == txt:
return message
return None
def completion_quote(self, the_input):
def message_match(msg):
return input_message.lower() in clean_text(msg.txt).lower()
messages = self.api.get_conversation_messages()
if not messages:
return
text = the_input.get_text()
args = common.shell_split(text)
if not text.endswith(' '):
input_message = args[-1]
messages = list(filter(message_match, messages))
elif len(args) > 1:
return False
return Completion(the_input.auto_completion,
[clean_text(msg.txt) for msg in messages[::-1]], '')
|
py | b412ce61c19c1ae2ea6d54ece0c63f417d3ddfcd | # Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import webkitpy.thirdparty.unittest2 as unittest
from webkitpy.common.net.layouttestresults import LayoutTestResults
from webkitpy.common.net.buildbot import BuildBot, Builder, Build
from webkitpy.layout_tests.models import test_results
from webkitpy.layout_tests.models import test_failures
from webkitpy.thirdparty.BeautifulSoup import BeautifulSoup
class BuilderTest(unittest.TestCase):
def _mock_test_result(self, testname):
return test_results.TestResult(testname, [test_failures.FailureTextMismatch()])
def _install_fetch_build(self, failure):
def _mock_fetch_build(build_number):
build = Build(
builder=self.builder,
build_number=build_number,
revision=build_number + 1000,
is_green=build_number < 4
)
return build
self.builder._fetch_build = _mock_fetch_build
def setUp(self):
self.buildbot = BuildBot()
self.builder = Builder(u"Test Builder \u2661", self.buildbot)
self._install_fetch_build(lambda build_number: ["test1", "test2"])
def test_latest_layout_test_results(self):
self.builder.fetch_layout_test_results = lambda results_url: LayoutTestResults(None)
self.builder.accumulated_results_url = lambda: "http://dummy_url.org"
self.assertTrue(self.builder.latest_layout_test_results())
def test_build_caching(self):
self.assertEqual(self.builder.build(10), self.builder.build(10))
def test_build_and_revision_for_filename(self):
expectations = {
"r47483 (1)/" : (47483, 1),
"r47483 (1).zip" : (47483, 1),
"random junk": None,
}
for filename, revision_and_build in expectations.items():
self.assertEqual(self.builder._revision_and_build_for_filename(filename), revision_and_build)
def test_file_info_list_to_revision_to_build_list(self):
file_info_list = [
{"filename": "r47483 (1)/"},
{"filename": "r47483 (1).zip"},
{"filename": "random junk"},
]
builds_and_revisions_list = [(47483, 1), (47483, 1)]
self.assertEqual(self.builder._file_info_list_to_revision_to_build_list(file_info_list), builds_and_revisions_list)
def test_fetch_build(self):
buildbot = BuildBot()
builder = Builder(u"Test Builder \u2661", buildbot)
def mock_fetch_build_dictionary(self, build_number):
build_dictionary = {
"sourceStamp": {
"revision": None, # revision=None means a trunk build started from the force-build button on the builder page.
},
"number": int(build_number),
# Intentionally missing the 'results' key, meaning it's a "pass" build.
}
return build_dictionary
buildbot._fetch_build_dictionary = mock_fetch_build_dictionary
self.assertIsNotNone(builder._fetch_build(1))
class BuildBotTest(unittest.TestCase):
_example_one_box_status = '''
<table>
<tr>
<td class="box"><a href="builders/Windows%20Debug%20%28Tests%29">Windows Debug (Tests)</a></td>
<td align="center" class="LastBuild box success"><a href="builders/Windows%20Debug%20%28Tests%29/builds/3693">47380</a><br />build<br />successful</td>
<td align="center" class="Activity building">building<br />ETA in<br />~ 14 mins<br />at 13:40</td>
<tr>
<td class="box"><a href="builders/SnowLeopard%20Intel%20Release">SnowLeopard Intel Release</a></td>
<td class="LastBuild box" >no build</td>
<td align="center" class="Activity building">building<br />< 1 min</td>
<tr>
<td class="box"><a href="builders/Qt%20Linux%20Release">Qt Linux Release</a></td>
<td align="center" class="LastBuild box failure"><a href="builders/Qt%20Linux%20Release/builds/654">47383</a><br />failed<br />compile-webkit</td>
<td align="center" class="Activity idle">idle<br />3 pending</td>
<tr>
<td class="box"><a href="builders/Qt%20Windows%2032-bit%20Debug">Qt Windows 32-bit Debug</a></td>
<td align="center" class="LastBuild box failure"><a href="builders/Qt%20Windows%2032-bit%20Debug/builds/2090">60563</a><br />failed<br />failed<br />slave<br />lost</td>
<td align="center" class="Activity building">building<br />ETA in<br />~ 5 mins<br />at 08:25</td>
</table>
'''
_expected_example_one_box_parsings = [
{
'is_green': True,
'build_number' : 3693,
'name': u'Windows Debug (Tests)',
'built_revision': 47380,
'activity': 'building',
'pending_builds': 0,
},
{
'is_green': False,
'build_number' : None,
'name': u'SnowLeopard Intel Release',
'built_revision': None,
'activity': 'building',
'pending_builds': 0,
},
{
'is_green': False,
'build_number' : 654,
'name': u'Qt Linux Release',
'built_revision': 47383,
'activity': 'idle',
'pending_builds': 3,
},
{
'is_green': True,
'build_number' : 2090,
'name': u'Qt Windows 32-bit Debug',
'built_revision': 60563,
'activity': 'building',
'pending_builds': 0,
},
]
def test_status_parsing(self):
buildbot = BuildBot()
soup = BeautifulSoup(self._example_one_box_status)
status_table = soup.find("table")
input_rows = status_table.findAll('tr')
for x in range(len(input_rows)):
status_row = input_rows[x]
expected_parsing = self._expected_example_one_box_parsings[x]
builder = buildbot._parse_builder_status_from_row(status_row)
# Make sure we aren't parsing more or less than we expect
self.assertEqual(builder.keys(), expected_parsing.keys())
for key, expected_value in expected_parsing.items():
self.assertEqual(builder[key], expected_value, ("Builder %d parse failure for key: %s: Actual='%s' Expected='%s'" % (x, key, builder[key], expected_value)))
def test_builder_with_name(self):
buildbot = BuildBot()
builder = buildbot.builder_with_name("Test Builder")
self.assertEqual(builder.name(), "Test Builder")
self.assertEqual(builder.url(), "http://build.webkit.org/builders/Test%20Builder")
self.assertEqual(builder.url_encoded_name(), "Test%20Builder")
self.assertEqual(builder.results_url(), "http://build.webkit.org/results/Test%20Builder")
# Override _fetch_build_dictionary function to not touch the network.
def mock_fetch_build_dictionary(self, build_number):
build_dictionary = {
"sourceStamp": {
"revision" : 2 * build_number,
},
"number" : int(build_number),
"results" : build_number % 2, # 0 means pass
}
return build_dictionary
buildbot._fetch_build_dictionary = mock_fetch_build_dictionary
build = builder.build(10)
self.assertEqual(build.builder(), builder)
self.assertEqual(build.url(), "http://build.webkit.org/builders/Test%20Builder/builds/10")
self.assertEqual(build.results_url(), "http://build.webkit.org/results/Test%20Builder/r20%20%2810%29")
self.assertEqual(build.revision(), 20)
self.assertTrue(build.is_green())
build = build.previous_build()
self.assertEqual(build.builder(), builder)
self.assertEqual(build.url(), "http://build.webkit.org/builders/Test%20Builder/builds/9")
self.assertEqual(build.results_url(), "http://build.webkit.org/results/Test%20Builder/r18%20%289%29")
self.assertEqual(build.revision(), 18)
self.assertFalse(build.is_green())
self.assertIsNone(builder.build(None))
_example_directory_listing = '''
<h1>Directory listing for /results/SnowLeopard Intel Leaks/</h1>
<table>
<tr class="alt">
<th>Filename</th>
<th>Size</th>
<th>Content type</th>
<th>Content encoding</th>
</tr>
<tr class="directory ">
<td><a href="r47483%20%281%29/"><b>r47483 (1)/</b></a></td>
<td><b></b></td>
<td><b>[Directory]</b></td>
<td><b></b></td>
</tr>
<tr class="file alt">
<td><a href="r47484%20%282%29.zip">r47484 (2).zip</a></td>
<td>89K</td>
<td>[application/zip]</td>
<td></td>
</tr>
'''
_expected_files = [
{
"filename" : "r47483 (1)/",
"size" : "",
"type" : "[Directory]",
"encoding" : "",
},
{
"filename" : "r47484 (2).zip",
"size" : "89K",
"type" : "[application/zip]",
"encoding" : "",
},
]
def test_parse_build_to_revision_map(self):
buildbot = BuildBot()
files = buildbot._parse_twisted_directory_listing(self._example_directory_listing)
self.assertEqual(self._expected_files, files)
_fake_builder_page = '''
<body>
<div class="content">
<h1>Some Builder</h1>
<p>(<a href="../waterfall?show=Some Builder">view in waterfall</a>)</p>
<div class="column">
<h2>Recent Builds:</h2>
<table class="info">
<tr>
<th>Time</th>
<th>Revision</th>
<th>Result</th> <th>Build #</th>
<th>Info</th>
</tr>
<tr class="alt">
<td>Jan 10 15:49</td>
<td><span class="revision" title="Revision 104643"><a href="http://trac.webkit.org/changeset/104643">104643</a></span></td>
<td class="success">failure</td> <td><a href=".../37604">#37604</a></td>
<td class="left">Build successful</td>
</tr>
<tr class="">
<td>Jan 10 15:32</td>
<td><span class="revision" title="Revision 104636"><a href="http://trac.webkit.org/changeset/104636">104636</a></span></td>
<td class="success">failure</td> <td><a href=".../37603">#37603</a></td>
<td class="left">Build successful</td>
</tr>
<tr class="alt">
<td>Jan 10 15:18</td>
<td><span class="revision" title="Revision 104635"><a href="http://trac.webkit.org/changeset/104635">104635</a></span></td>
<td class="success">success</td> <td><a href=".../37602">#37602</a></td>
<td class="left">Build successful</td>
</tr>
<tr class="">
<td>Jan 10 14:51</td>
<td><span class="revision" title="Revision 104633"><a href="http://trac.webkit.org/changeset/104633">104633</a></span></td>
<td class="failure">failure</td> <td><a href=".../37601">#37601</a></td>
<td class="left">Failed compile-webkit</td>
</tr>
</table>
</body>'''
_fake_builder_page_without_success = '''
<body>
<table>
<tr class="alt">
<td>Jan 10 15:49</td>
<td><span class="revision" title="Revision 104643"><a href="http://trac.webkit.org/changeset/104643">104643</a></span></td>
<td class="success">failure</td>
</tr>
<tr class="">
<td>Jan 10 15:32</td>
<td><span class="revision" title="Revision 104636"><a href="http://trac.webkit.org/changeset/104636">104636</a></span></td>
<td class="success">failure</td>
</tr>
<tr class="alt">
<td>Jan 10 15:18</td>
<td><span class="revision" title="Revision 104635"><a href="http://trac.webkit.org/changeset/104635">104635</a></span></td>
<td class="success">failure</td>
</tr>
<tr class="">
<td>Jan 10 11:58</td>
<td><span class="revision" title="Revision ??"><a href="http://trac.webkit.org/changeset/%3F%3F">??</a></span></td>
<td class="retry">retry</td>
</tr>
<tr class="">
<td>Jan 10 14:51</td>
<td><span class="revision" title="Revision 104633"><a href="http://trac.webkit.org/changeset/104633">104633</a></span></td>
<td class="failure">failure</td>
</tr>
</table>
</body>'''
def test_revisions_for_builder(self):
buildbot = BuildBot()
buildbot._fetch_builder_page = lambda builder: builder.page
builder_with_success = Builder('Some builder', None)
builder_with_success.page = self._fake_builder_page
self.assertEqual(buildbot._revisions_for_builder(builder_with_success), [(104643, False), (104636, False), (104635, True), (104633, False)])
builder_without_success = Builder('Some builder', None)
builder_without_success.page = self._fake_builder_page_without_success
self.assertEqual(buildbot._revisions_for_builder(builder_without_success), [(104643, False), (104636, False), (104635, False), (104633, False)])
def test_find_green_revision(self):
buildbot = BuildBot()
self.assertEqual(buildbot._find_green_revision({
'Builder 1': [(1, True), (3, True)],
'Builder 2': [(1, True), (3, False)],
'Builder 3': [(1, True), (3, True)],
}), 1)
self.assertEqual(buildbot._find_green_revision({
'Builder 1': [(1, False), (3, True)],
'Builder 2': [(1, True), (3, True)],
'Builder 3': [(1, True), (3, True)],
}), 3)
self.assertEqual(buildbot._find_green_revision({
'Builder 1': [(1, True), (2, True)],
'Builder 2': [(1, False), (2, True), (3, True)],
'Builder 3': [(1, True), (3, True)],
}), None)
self.assertEqual(buildbot._find_green_revision({
'Builder 1': [(1, True), (2, True)],
'Builder 2': [(1, True), (2, True), (3, True)],
'Builder 3': [(1, True), (3, True)],
}), 2)
self.assertEqual(buildbot._find_green_revision({
'Builder 1': [(1, False), (2, True)],
'Builder 2': [(1, True), (3, True)],
'Builder 3': [(1, True), (3, True)],
}), None)
self.assertEqual(buildbot._find_green_revision({
'Builder 1': [(1, True), (3, True)],
'Builder 2': [(1, False), (2, True), (3, True), (4, True)],
'Builder 3': [(2, True), (4, True)],
}), 3)
self.assertEqual(buildbot._find_green_revision({
'Builder 1': [(1, True), (3, True)],
'Builder 2': [(1, False), (2, True), (3, True), (4, False)],
'Builder 3': [(2, True), (4, True)],
}), None)
self.assertEqual(buildbot._find_green_revision({
'Builder 1': [(1, True), (3, True)],
'Builder 2': [(1, False), (2, True), (3, True), (4, False)],
'Builder 3': [(2, True), (3, True), (4, True)],
}), 3)
self.assertEqual(buildbot._find_green_revision({
'Builder 1': [(1, True), (2, True)],
'Builder 2': [],
'Builder 3': [(1, True), (2, True)],
}), None)
self.assertEqual(buildbot._find_green_revision({
'Builder 1': [(1, True), (3, False), (5, True), (10, True), (12, False)],
'Builder 2': [(1, True), (3, False), (7, True), (9, True), (12, False)],
'Builder 3': [(1, True), (3, True), (7, True), (11, False), (12, True)],
}), 7)
def _fetch_build(self, build_number):
if build_number == 5:
return "correct build"
return "wrong build"
def _fetch_revision_to_build_map(self):
return {'r5': 5, 'r2': 2, 'r3': 3}
def test_latest_cached_build(self):
b = Builder('builder', BuildBot())
b._fetch_build = self._fetch_build
b._fetch_revision_to_build_map = self._fetch_revision_to_build_map
self.assertEqual("correct build", b.latest_cached_build())
def results_url(self):
return "some-url"
def test_results_zip_url(self):
b = Build(None, 123, 123, False)
b.results_url = self.results_url
self.assertEqual("some-url.zip", b.results_zip_url())
|
py | b412cf4bb22215e9137a7dd168681df9d418f652 |
from imutils.video import VideoStream
from imutils.video import FPS
import numpy as np
import argparse
import imutils
import pickle
import time
import cv2
import os
import pyfakewebcam
import sys
def run():
args = {"detector": "face_detection_model", "embedding_model": "openface_nn4.small2.v1.t7",
"recognizer": "output/recognizer.pickle", "le": "output/le.pickle", "confidence": 0.5,
}
protoPath = os.path.sep.join([args["detector"], "deploy.prototxt"])
modelPath = os.path.sep.join([args["detector"],
"res10_300x300_ssd_iter_140000.caffemodel"])
detector = cv2.dnn.readNetFromCaffe(protoPath, modelPath)
embedder = cv2.dnn.readNetFromTorch(args["embedding_model"])
recognizer = pickle.loads(open(args["recognizer"], "rb").read())
le = pickle.loads(open(args["le"], "rb").read())
camera = pyfakewebcam.FakeWebcam('/dev/video1', 300, 300)
vs = VideoStream(src=0).start()
time.sleep(2.0)
verifications = {}
while True:
frame = vs.read()
frame = imutils.resize(frame, width=600)
(h, w) = frame.shape[:2]
imageBlob = cv2.dnn.blobFromImage(
cv2.resize(frame, (100, 100)), 1.0, (100, 100),
(104.0, 177.0, 123.0), swapRB=False, crop=False)
detector.setInput(imageBlob)
detections = detector.forward()
for i in range(0, detections.shape[2]):
confidence = detections[0, 0, i, 2]
if confidence > args["confidence"]:
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
face = frame[startY:endY, startX:endX]
(fH, fW) = face.shape[:2]
if fW < 20 or fH < 20:
continue
faceBlob = cv2.dnn.blobFromImage(face, 1.0 / 255,
(96, 96), (0, 0, 0), swapRB=True, crop=False)
embedder.setInput(faceBlob)
vec = embedder.forward()
preds = recognizer.predict_proba(vec)[0]
j = np.argmax(preds)
proba = preds[j]
name = le.classes_[j]
if name in verifications:
verifications[name] += 1
else:
verifications[name] = 0
return name
|
py | b412cf5987d22d2308cc1746ddfc79c040ffe295 | """
Return the sum of the numbers in the array, except ignore sections of numbers starting with a 6 and extending to the next 7 (every 6 will be followed by at least one 7). Return 0 for no numbers.
sum67([1, 2, 2]) → 5
sum67([1, 2, 2, 6, 99, 99, 7]) → 5
sum67([1, 1, 6, 7, 2]) → 4
@author unobatbayar
This was much harder than it looks, excuse my bad code,
did it to finish everything fast.
Improvement coming soon.
"""
def sum67(nums):
sum = 0
canAdd = True
foundSix = False
for num in nums:
if num == 6:
canAdd = False
foundSix = True
elif num == 7:
canAdd = True
if foundSix == False:
sum = sum + num
else:
foundSix = False
else:
if canAdd == True:
sum = sum + num
return sum |
py | b412cf5b5d47bced080e751a58269a1c69d1efd0 | from qtpy import QtWidgets
from qtpy import QtCore
class IControlPanel(QtWidgets.QWidget):
"""
Abstract base class for control panels. (though no abstract methods; GenericControlPanel is direct impl.)
Include widgets for modifying settings that are likely common to ALL renderers. Note that some of these
common widgets can be disabled if the renderer doesn't have the required variable (e.g. color_sets = None).
For settings that are specific to individual renderers, subclass this and create custom control panels.
"""
def __init__(self, renderer, name="ControlPanel", *args, **kwargs):
self._last_row = -1
super().__init__(*args, **kwargs)
self.setSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.MinimumExpanding)
self.setObjectName(name)
self.setLayout(QtWidgets.QGridLayout())
# self._renderer = None # Will be set in reset_widgets
self._init_widgets()
renderer.chan_states_changed.connect(self.reset_widgets)
self.reset_widgets(renderer)
def _init_widgets(self):
# Create grid of widgets for renderer/config settings
# This widget assumes the passed in renderer is a TimeSeriesRenderer,
# and thus it has specific attributes and slots.
row_ix = -1
# All renderers are multi-channel
# Channel tree widget:
row_ix += 1
_tree = QtWidgets.QTreeWidget(parent=self)
_tree.setObjectName("Chans_TreeWidget")
_tree.setHeaderHidden(True)
_tree.setFrameShape(QtWidgets.QFrame.NoFrame)
_tree.viewport().setAutoFillBackground(False)
tli = QtWidgets.QTreeWidgetItem(_tree)
tli.setText(0, "View Channels")
tli.setExpanded(False) # Start collapsed because list of channels may be very long.
_tree.addTopLevelItem(tli)
self.layout().addWidget(_tree, row_ix, 0, 1, 2)
# show names checkbox
row_ix += 1
self.layout().addWidget(QtWidgets.QLabel("Show Names"), row_ix, 0, 1, 1)
_checkbox = QtWidgets.QCheckBox()
_checkbox.setObjectName("ShowNames_CheckBox")
self.layout().addWidget(_checkbox, row_ix, 1, 1, 1)
# Colors ComboBox
row_ix += 1
self.layout().addWidget(QtWidgets.QLabel("Colors"), row_ix, 0, 1, 1)
_combo = QtWidgets.QComboBox()
_combo.setObjectName("Colors_ComboBox")
self.layout().itemAtPosition(row_ix, 0).widget().setVisible(False) # Hide label by default
_combo.setVisible(False)
self.layout().addWidget(_combo, row_ix, 1, 1, 1)
# Background ComboBox
row_ix += 1
self.layout().addWidget(QtWidgets.QLabel("Background"), row_ix, 0, 1, 1)
_combo = QtWidgets.QComboBox()
_combo.setObjectName("Background_ComboBox")
self.layout().itemAtPosition(row_ix, 0).widget().setVisible(False) # Hide label by default
_combo.setVisible(False)
self.layout().addWidget(_combo, row_ix, 1, 1, 1)
# Lower Limit SpinBox
row_ix += 1
self.layout().addWidget(QtWidgets.QLabel("Lower Limit"), row_ix, 0, 1, 1)
_spinbox = QtWidgets.QDoubleSpinBox()
_spinbox.setObjectName("LL_SpinBox")
_spinbox.setMinimum(-10000000.0)
_spinbox.setMaximum(10000000.0)
_spinbox.setSingleStep(1.0)
self.layout().addWidget(_spinbox, row_ix, 1, 1, 1)
# Upper Limit SpinBox
row_ix += 1
self.layout().addWidget(QtWidgets.QLabel("Upper Limit"), row_ix, 0, 1, 1)
_spinbox = QtWidgets.QDoubleSpinBox()
_spinbox.setObjectName("UL_SpinBox")
_spinbox.setMinimum(-10000000.0)
_spinbox.setMaximum(10000000.0)
_spinbox.setSingleStep(1.0)
self.layout().addWidget(_spinbox, row_ix, 1, 1, 1)
# Highpass Cutoff SpinBox
row_ix += 1
self.layout().addWidget(QtWidgets.QLabel("Highpass Cutoff"), row_ix, 0, 1, 1)
_spinbox = QtWidgets.QDoubleSpinBox()
_spinbox.setObjectName("HP_SpinBox")
_spinbox.setMinimum(0.0)
_spinbox.setMaximum(10000000.0)
_spinbox.setSingleStep(0.1)
self.layout().addWidget(_spinbox, row_ix, 1, 1, 1)
self._last_row = row_ix
def reset_widgets(self, renderer):
# self._renderer = renderer
_tree = self.findChild(QtWidgets.QTreeWidget, name="Chans_TreeWidget")
try:
_tree.itemChanged.disconnect()
except TypeError:
pass
if len(renderer.chan_states) > 0:
tli = _tree.topLevelItem(0)
_ = tli.takeChildren()
for label, vis in renderer.chan_states[['name', 'vis']].values:
chstate_item = QtWidgets.QTreeWidgetItem(tli)
chstate_item.setText(0, label)
chstate_item.setCheckState(0, QtCore.Qt.Checked if vis else QtCore.Qt.Unchecked)
_tree.itemChanged.connect(renderer.chantree_itemChanged)
# show names checkbox
_checkbox = self.findChild(QtWidgets.QCheckBox, name="ShowNames_CheckBox")
try:
_checkbox.stateChanged.disconnect()
except TypeError:
pass
_checkbox.setChecked(renderer.show_chan_labels)
_checkbox.stateChanged.connect(renderer.labelvis_stateChanged)
# Colors ComboBox
if renderer.color_sets is not None:
_combo = self.findChild(QtWidgets.QComboBox, name="Colors_ComboBox")
try:
_combo.currentTextChanged.disconnect()
except TypeError:
pass
_combo.setVisible(True)
row_ix = self.layout().getItemPosition(self.layout().indexOf(_combo))[0]
self.layout().itemAtPosition(row_ix, 0).widget().setVisible(True)
_items = renderer.color_sets
_combo.clear()
_combo.addItems(_items)
_combo.setCurrentText(renderer.color_set)
_combo.currentTextChanged.connect(renderer.colors_currentTextChanged)
# Background ComboBox
if renderer.bg_colors is not None:
_combo = self.findChild(QtWidgets.QComboBox, name="Background_ComboBox")
try:
_combo.currentTextChanged.disconnect()
except TypeError:
pass
_combo.setVisible(True)
row_ix = self.layout().getItemPosition(self.layout().indexOf(_combo))[0]
self.layout().itemAtPosition(row_ix, 0).widget().setVisible(True)
_combo.clear()
_combo.addItems(renderer.bg_colors)
_combo.setCurrentText(str(renderer.bg_color))
_combo.currentTextChanged.connect(renderer.background_currentTextChanged)
# Lower Limit SpinBox
_spinbox = self.findChild(QtWidgets.QDoubleSpinBox, name="LL_SpinBox")
try:
_spinbox.valueChanged.disconnect()
except TypeError:
pass
_spinbox.setValue(renderer.lower_limit)
_spinbox.valueChanged.connect(renderer.lower_limit_valueChanged)
# Upper Limit spinbox
_spinbox = self.findChild(QtWidgets.QDoubleSpinBox, name="UL_SpinBox")
try:
_spinbox.valueChanged.disconnect()
except TypeError:
pass
_spinbox.setValue(renderer.upper_limit)
_spinbox.valueChanged.connect(renderer.upper_limit_valueChanged)
# Highpass spinbox
_spinbox = self.findChild(QtWidgets.QDoubleSpinBox, name="HP_SpinBox")
try:
_spinbox.valueChanged.disconnect()
except TypeError:
pass
_spinbox.setValue(renderer.highpass_cutoff)
_spinbox.valueChanged.connect(renderer.highpass_cutoff_valueChanged)
|
py | b412cfe3f4e0f48b326aee1a9830840448ae1aa5 | from app.main import main
from flask import g, url_for, render_template, redirect, request
from flask_babel import lazy_gettext
from app import db
from app.models import Registrant, Clerk
from app.decorators import InSession
from app.services import SessionManager
from app.services.nvris_client import NVRISClient
from app.services.county_mailer import CountyMailer
from app.services.id_action_mailer import IdActionMailer
from app.services.steps import Step_AB_7
from app.main.forms import FormAB7, CountyPicker
from datetime import datetime
@main.route('/ab/affirmation', methods=["GET", "POST"])
@InSession
def ab7_affirmation():
reg = g.registrant
form = FormAB7()
clerk = reg.try_clerk()
county_picker = CountyPicker()
# if we don't have a signed AB form to affirm, redirect
if not reg.try_value('ab_forms', False):
if not reg.try_value('signature_string', False):
return redirect(url_for('main.index'))
else:
return redirect(url_for('main.ab6_preview_sign'))
ab_forms = reg.try_value('ab_forms')
if request.method == "POST" and form.validate_on_submit():
step = Step_AB_7(form.data)
if step.run():
reg.update(form.data)
reg.save(db.session)
mailer = CountyMailer(reg, clerk, 'ab_forms')
r = mailer.send()
# if there was no ID string defined, send the action-needed email
if not reg.ab_permanent and not reg.try_value('ab_identification'):
id_action_mailer = IdActionMailer(reg, clerk)
resp = id_action_mailer.send()
reg.update({'ab_id_action_email_sent': resp['MessageId']})
# any error gets a special page
for k in ['clerk', 'receipt']:
if k not in r or 'MessageId' not in r[k] or not r[k]['MessageId']:
# TODO log New Relic event
return render_template('email_error.html', clerk=clerk)
reg.update({'ab_forms_message_id': r['clerk']['MessageId']})
reg.ab_completed_at = datetime.utcnow()
reg.save(db.session)
session_manager = SessionManager(reg, step)
return redirect(session_manager.get_redirect_url())
return render_template('ab/affirmation.html', preview_imgs=ab_forms, form=form, clerk=clerk, county_picker=county_picker)
|
py | b412cfe6867ade81e050a32a998a411f5f70cb23 | #!/usr/bin/env python
# coding: utf-8
# Noise model selection on NANOGrav pulsars
import json, pickle, copy
import logging
import numpy as np
from enterprise_extensions.models import model_singlepsr_noise
from enterprise_extensions.hypermodel import HyperModel
from enterprise.signals import parameter, gp_signals, deterministic_signals
from enterprise.signals import signal_base
from enterprise_extensions import gp_kernels as gpk
from enterprise_extensions.blocks import chromatic_noise_block
from enterprise_extensions.blocks import common_red_noise_block, red_noise_block
import pta_sim.parse_sim as parse_sim
args = parse_sim.arguments()
logging.basicConfig(level=logging.WARNING)
with open(args.pickle, 'rb') as fin:
psr = pickle.load(fin)
with open(args.model_kwargs_path, 'r') as fin:
model_kwargs = json.load(fin)
# Add to exponential dips for J1713+0747
#Model, kernel, extra DMGP, Chrom Kernel, Chrom Quad, Index, GWB
model_labels = [['A', 'periodic', True, True, 'sq_exp', False, 4, False],
['B', 'periodic', True, True, 'sq_exp', False, 4, True],
]
ptas = {}
all_kwargs = {}
# Periodic GP kernel for DM
log10_sigma = parameter.Uniform(-10, -4.8)
log10_ell = parameter.Uniform(1, 2.4)
log10_p = parameter.Uniform(-2, -1)
log10_gam_p = parameter.Uniform(-2, 2)
dm_basis = gpk.linear_interp_basis_dm(dt=3*86400)
dm_prior = gpk.periodic_kernel(log10_sigma=log10_sigma,
log10_ell=log10_ell,
log10_gam_p=log10_gam_p,
log10_p=log10_p)
dmgp = gp_signals.BasisGP(dm_prior, dm_basis, name='dm_gp1')
# Periodic GP kernel for DM
log10_sigma2 = parameter.Uniform(-4.8, -3)
log10_ell2 = parameter.Uniform(2.4, 5)
log10_p2 = parameter.Uniform(-2, 2)
log10_gam_p2 = parameter.Uniform(-2, 2)
dm_basis2 = gpk.linear_interp_basis_dm(dt=3*86400)
dm_prior2 = gpk.periodic_kernel(log10_sigma=log10_sigma2,
log10_ell=log10_ell2,
log10_gam_p=log10_gam_p2,
log10_p=log10_p2)
dmgp2 = gp_signals.BasisGP(dm_prior2, dm_basis2, name='dm_gp2')
ch_log10_sigma = parameter.Uniform(-10, -3.5)
ch_log10_ell = parameter.Uniform(1, 6)
chm_basis = gpk.linear_interp_basis_chromatic(dt=3*86400, idx=4)
chm_prior = gpk.se_dm_kernel(log10_sigma=ch_log10_sigma, log10_ell=ch_log10_ell)
chromgp = gp_signals.BasisGP(chm_prior, chm_basis, name='chrom_gp')
#
# chromgp = chromatic_noise_block(nondiag_kernel='sq_exp')
@signal_base.function
def chromatic_quad(toas, freqs, quad_coeff=np.ones(3)*1e-10, idx=4):
"""
Basis for chromatic quadratic function.
:param idx: index of chromatic dependence
:return ret: normalized quadratic basis matrix [Ntoa, 3]
"""
t0 = (toas.max() + toas.min()) / 2
a, b, c = 10**quad_coeff[0], 10**quad_coeff[1], 10**quad_coeff[2]
quad = (a*(toas-t0)**2 + b*(toas-t0) + c)* (1400/freqs) ** idx
return quad
quad_coeff = parameter.Uniform(-10, -4, size=3)
deter_chrom = chromatic_quad(quad_coeff=quad_coeff)
chrom_quad = deterministic_signals.Deterministic(deter_chrom,
name='deter_chrom_quad')
for ii, ent in enumerate(model_labels):
if ent[2] and ent[5]:
extra = dmgp + dmgp2 + chromgp + chrom_quad
elif ent[2]:
extra = dmgp + dmgp2 + chromgp
elif ent[5]:
extra = dmgp + chromgp + chrom_quad
else:
extra = dmgp + chromgp
Tspan = 407576851.48121357
rn = red_noise_block(psd='powerlaw', prior='log-uniform',
Tspan=Tspan, components=30, gamma_val=None)
if ent[7]:
gw = common_red_noise_block(psd='powerlaw', prior='log-uniform',
Tspan=Tspan, components=5, gamma_val=4.3333)
sig = rn + gw
else:
sig = rn
extra += sig
new_kwargs = {'dm_nondiag_kernel':ent[1],
'dm_var':False,
'chrom_gp': ent[3],
'chrom_gp_kernel':'nondiag',
'chrom_idx':ent[6],
'chrom_kernel':ent[4],
'chrom_dt':14,
'red_var':False,
'dm_expdip':False,
'extra_sigs':extra,
}
kwargs = copy.deepcopy(model_kwargs['5'])
kwargs.update(new_kwargs)
ptas[ii] = model_singlepsr_noise(psr, **kwargs)
all_kwargs[ii] = kwargs
super_model = HyperModel(ptas)
sampler = super_model.setup_sampler(resume=True, outdir=args.outdir,
empirical_distr=args.emp_distr)
model_params = {}
for ky, pta in ptas.items():
model_params.update({str(ky): pta.param_names})
with open(args.outdir + '/model_params.json', 'w') as fout:
json.dump(model_params, fout, sort_keys=True,
indent=4, separators=(',', ': '))
kwargs_out = copy.deepcopy(all_kwargs)
kys = list(kwargs_out.keys())
kwargs_out[kys[0]]['extra_sigs'] = str('dm_gp + dm_gp2 + chrom_gp')
kwargs_out[kys[1]]['extra_sigs'] = str('dm_gp + dm_gp2 + chrom_gp + gwb')
# kwargs_out[kys[2]]['extra_sigs'] = str('chrom_quad')
# kwargs_out[kys[3]]['extra_sigs'] = str('dm_gp2 + chrom_quad')
with open(args.outdir + '/model_kwargs.json', 'w') as fout:
json.dump(kwargs_out, fout, sort_keys=True,
indent=4, separators=(',', ': '))
with open(args.outdir + '/model_labels.json', 'w') as fout:
json.dump(model_labels, fout, sort_keys=True,
indent=4, separators=(',', ': '))
# sampler for N steps
N = args.niter
x0 = super_model.initial_sample()
sampler.sample(x0, N, SCAMweight=30, AMweight=15, DEweight=50, burn=500000,
writeHotChains=args.writeHotChains,
hotChain=args.hot_chain)
|
py | b412d0b878f82efccc98b97d8e28bd1bdc2151aa | import os
import torch
import numpy as np
import numpy.random as rd
class ReplayBuffer:
def __init__(self, max_len, state_dim, action_dim, cd2_dim, if_use_per, gpu_id=0, state_type=torch.float32):
"""Experience Replay Buffer
save environment transition in a continuous RAM for high performance training
we save trajectory in order and save state and other (action, reward, mask, ...) separately.
`int max_len` the maximum capacity of ReplayBuffer. First In First Out
`int state_dim` the dimension of state
`int action_dim` the dimension of action (action_dim==1 for discrete action)
`bool if_on_policy` on-policy or off-policy
`bool if_gpu` create buffer space on CPU RAM or GPU
`bool if_per` Prioritized Experience Replay for sparse reward
"""
self.now_len = 0
self.next_id = 0
self.if_full = False
self.max_len = max_len
self.data_type = torch.float32
self.action_dim = action_dim
self.device = torch.device(f"cuda:{gpu_id}" if (torch.cuda.is_available() and (gpu_id >= 0)) else "cpu")
self.cd2_dim = cd2_dim
self.per_tree = BinarySearchTree(max_len) if if_use_per else None
other_dim = 1 + 1 + self.action_dim
self.buf_other = torch.empty((max_len, other_dim), dtype=torch.float32, device=self.device)
buf_state_shape = (max_len, state_dim, cd2_dim, 1, 1) if isinstance(state_dim, int) else (max_len, *state_dim)
self.buf_state = torch.empty(buf_state_shape, dtype=state_type, device=self.device)
def append_buffer(self, state, other): # CPU array to CPU array
self.buf_state[self.next_id] = state
self.buf_other[self.next_id] = other
if self.per_tree:
self.per_tree.update_id(self.next_id)
self.next_id += 1
if self.next_id >= self.max_len:
self.if_full = True
self.next_id = 0
def extend_buffer(self, state, other):
size = len(other)
next_idx = self.next_id + size
if self.per_tree:
self.per_tree.update_ids(data_ids=np.arange(self.next_id, next_idx) % self.max_len)
if next_idx > self.max_len:
self.buf_state[self.next_id:self.max_len] = state[:self.max_len - self.next_id]
self.buf_other[self.next_id:self.max_len] = other[:self.max_len - self.next_id]
self.if_full = True
next_idx = next_idx - self.max_len
self.buf_state[0:next_idx] = state[-next_idx:]
self.buf_other[0:next_idx] = other[-next_idx:]
else:
self.buf_state[self.next_id:next_idx] = state
self.buf_other[self.next_id:next_idx] = other
self.next_id = next_idx
def sample_batch(self, batch_size) -> tuple:
"""randomly sample a batch of data for training
:int batch_size: the number of data in a batch for Stochastic Gradient Descent
:return torch.Tensor reward: reward.shape==(now_len, 1)
:return torch.Tensor mask: mask.shape ==(now_len, 1), mask = 0.0 if done else gamma
:return torch.Tensor action: action.shape==(now_len, action_dim)
:return torch.Tensor state: state.shape ==(now_len, state_dim)
:return torch.Tensor state: state.shape ==(now_len, state_dim), next state
"""
if self.per_tree:
beg = -self.max_len
end = (self.now_len - self.max_len) if (self.now_len < self.max_len) else None
indices, is_weights = self.per_tree.get_indices_is_weights(batch_size, beg, end)
r_m_a = self.buf_other[indices]
return (r_m_a[:, 0:1].type(torch.float32), # reward
r_m_a[:, 1:2].type(torch.float32), # mask
r_m_a[:, 2:].type(torch.float32), # action
self.buf_state[indices].type(torch.float32), # state
self.buf_state[indices + 1].type(torch.float32), # next state
torch.as_tensor(is_weights, dtype=torch.float32, device=self.device)) # important sampling weights
else:
indices = rd.randint(self.now_len - 1, size=batch_size)
r_m_a = self.buf_other[indices]
return (r_m_a[:, 0:1], # reward
r_m_a[:, 1:2], # mask
r_m_a[:, 2:], # action
self.buf_state[indices],
self.buf_state[indices + 1])
def sample_batch_one_step(self, batch_size) -> tuple:
if self.per_tree:
beg = -self.max_len
end = (self.now_len - self.max_len) if (self.now_len < self.max_len) else None
indices, is_weights = self.per_tree.get_indices_is_weights(batch_size, beg, end)
r_m_a = self.buf_other[indices]
return (r_m_a[:, 0:1].type(torch.float32), # reward
r_m_a[:, 2:].type(torch.float32), # action
self.buf_state[indices].type(torch.float32), # state
torch.as_tensor(is_weights, dtype=torch.float32, device=self.device)) # important sampling weights
else:
indices = rd.randint(self.now_len - 1, size=batch_size)
r_m_a = self.buf_other[indices]
return (r_m_a[:, 0:1], # reward
r_m_a[:, 2:], # action
self.buf_state[indices],)
def update_now_len(self):
"""update the a pointer `now_len`, which is the current data number of ReplayBuffer
"""
self.now_len = self.max_len if self.if_full else self.next_id
def print_state_norm(self, neg_avg=None, div_std=None): # non-essential
"""print the state norm information: state_avg, state_std
We don't suggest to use running stat state.
We directly do normalization on state using the historical avg and std
eg. `state = (state + self.neg_state_avg) * self.div_state_std` in `PreprocessEnv.step_norm()`
neg_avg = -states.mean()
div_std = 1/(states.std()+1e-5) or 6/(states.max()-states.min())
:array neg_avg: neg_avg.shape=(state_dim)
:array div_std: div_std.shape=(state_dim)
"""
max_sample_size = 2 ** 14
'''check if pass'''
state_shape = self.buf_state.shape
if len(state_shape) > 2 or state_shape[1] > 64:
print(f"| print_state_norm(): state_dim: {state_shape} is too large to print its norm. ")
return None
'''sample state'''
indices = np.arange(self.now_len)
rd.shuffle(indices)
indices = indices[:max_sample_size] # len(indices) = min(self.now_len, max_sample_size)
batch_state = self.buf_state[indices]
'''compute state norm'''
if isinstance(batch_state, torch.Tensor):
batch_state = batch_state.cpu().data.numpy()
assert isinstance(batch_state, np.ndarray)
if batch_state.shape[1] > 64:
print(f"| _print_norm(): state_dim: {batch_state.shape[1]:.0f} is too large to print its norm. ")
return None
if np.isnan(batch_state).any(): # 2020-12-12
batch_state = np.nan_to_num(batch_state) # nan to 0
ary_avg = batch_state.mean(axis=0)
ary_std = batch_state.std(axis=0)
fix_std = ((np.max(batch_state, axis=0) - np.min(batch_state, axis=0)) / 6 + ary_std) / 2
if neg_avg is not None: # norm transfer
ary_avg = ary_avg - neg_avg / div_std
ary_std = fix_std / div_std
print(f"print_state_norm: state_avg, state_std (fixed)")
print(f"avg = np.{repr(ary_avg).replace('=float32', '=np.float32')}")
print(f"std = np.{repr(ary_std).replace('=float32', '=np.float32')}")
def td_error_update(self, td_error):
self.per_tree.td_error_update(td_error)
def save_or_load_history(self, cwd, if_save, buffer_id=0): # [ElegantRL.2021.11.11]
save_path = f"{cwd}/buffer_{buffer_id}.npz"
if_load = None
if if_save:
self.update_now_len()
state_dim = self.buf_state.shape[1]
other_dim = self.buf_other.shape[1]
buf_state_data_type = np.float16 \
if self.buf_state.dtype in {np.float, np.float64, np.float32} \
else np.uint8
buf_state = np.empty((self.now_len, state_dim), dtype=buf_state_data_type)
buf_other = np.empty((self.now_len, other_dim), dtype=np.float16)
temp_len = self.now_len - self.next_id
buf_state[0:temp_len] = self.buf_state[self.next_id:self.now_len].cpu().numpy()
buf_other[0:temp_len] = self.buf_other[self.next_id:self.now_len].cpu().numpy()
buf_state[temp_len:] = self.buf_state[:self.next_id].detach().cpu().numpy()
buf_other[temp_len:] = self.buf_other[:self.next_id].detach().cpu().numpy()
np.savez_compressed(save_path, buf_state=buf_state, buf_other=buf_other)
print(f"| ReplayBuffer save in: {save_path}")
elif os.path.isfile(save_path):
buf_dict = np.load(save_path)
buf_state = buf_dict['buf_state']
buf_other = buf_dict['buf_other']
bs = 512
for i in range(0, buf_state.shape[0], bs):
tmp_state = torch.as_tensor(buf_state[i:i + bs], dtype=torch.float32, device=self.device)
tmp_other = torch.as_tensor(buf_other[i:i + bs], dtype=torch.float32, device=self.device)
self.extend_buffer(tmp_state, tmp_other)
self.update_now_len()
print(f"| ReplayBuffer load: {save_path}")
if_load = True
else:
# print(f"| ReplayBuffer FileNotFound: {save_path}")
if_load = False
return if_load
class ReplayBufferMP:
def __init__(self, state_dim, action_dim, max_len, if_use_per, buffer_num, gpu_id):
"""Experience Replay Buffer for Multiple Processing
`int max_len` the max_len of ReplayBuffer, not the total len of ReplayBufferMP
`int worker_num` the rollout workers number
"""
self.now_len = 0
self.max_len = max_len
self.worker_num = buffer_num
buf_max_len = max_len // buffer_num
self.buffers = [ReplayBuffer(max_len=buf_max_len, state_dim=state_dim, action_dim=action_dim,
if_use_per=if_use_per, gpu_id=gpu_id)
for _ in range(buffer_num)]
def sample_batch(self, batch_size) -> list:
bs = batch_size // self.worker_num
list_items = [self.buffers[i].sample_batch(bs)
for i in range(self.worker_num)]
# list_items of reward, mask, action, state, next_state
# list_items of reward, mask, action, state, next_state, is_weights (PER)
list_items = list(map(list, zip(*list_items))) # 2D-list transpose
return [torch.cat(item, dim=0) for item in list_items]
def sample_batch_one_step(self, batch_size) -> list:
bs = batch_size // self.worker_num
list_items = [self.buffers[i].sample_batch_one_step(bs)
for i in range(self.worker_num)]
# list_items of reward, mask, action, state, next_state
# list_items of reward, mask, action, state, next_state, is_weights (PER)
list_items = list(map(list, zip(*list_items))) # 2D-list transpose
return [torch.cat(item, dim=0) for item in list_items]
def update_now_len(self):
self.now_len = 0
for buffer in self.buffers:
buffer.update_now_len()
self.now_len += buffer.now_len
def print_state_norm(self, neg_avg=None, div_std=None): # non-essential
# for buffer in self.l_buffer:
self.buffers[0].print_state_norm(neg_avg, div_std)
def td_error_update(self, td_error):
td_errors = td_error.view(self.worker_num, -1, 1)
for i in range(self.worker_num):
self.buffers[i].per_tree.td_error_update(td_errors[i])
def save_or_load_history(self, cwd, if_save):
for i in range(self.worker_num):
self.buffers[i].save_or_load_history(cwd, if_save, buffer_id=i)
class SharedReplayBuffer(object):
"""
Buffer to store training data.
:param args: (argparse.Namespace) arguments containing relevant model, policy, and env information.
:param num_agents: (int) number of agents in the env.
:param obs_space: (gym.Space) observation space of agents.
:param cent_obs_space: (gym.Space) centralized observation space of agents.
:param act_space: (gym.Space) action space for agents.
"""
def __init__(self, args, num_agents, obs_space, cent_obs_space, act_space):
self.episode_length = args.episode_length
self.n_rollout_threads = args.n_rollout_threads
self.hidden_size = args.hidden_size
self.recurrent_N = args.recurrent_N
self.gamma = args.gamma
self.gae_lambda = args.gae_lambda
self._use_gae = args.use_gae
self._use_popart = args.use_popart
self._use_valuenorm = args.use_valuenorm
self._use_proper_time_limits = args.use_proper_time_limits
obs_shape = get_shape_from_obs_space(obs_space)
share_obs_shape = get_shape_from_obs_space(cent_obs_space)
if type(obs_shape[-1]) == list:
obs_shape = obs_shape[:1]
if type(share_obs_shape[-1]) == list:
share_obs_shape = share_obs_shape[:1]
self.share_obs = np.zeros((self.episode_length + 1, self.n_rollout_threads, num_agents, *share_obs_shape),
dtype=np.float32)
self.obs = np.zeros((self.episode_length + 1, self.n_rollout_threads, num_agents, *obs_shape), dtype=np.float32)
self.rnn_states = np.zeros(
(self.episode_length + 1, self.n_rollout_threads, num_agents, self.recurrent_N, self.hidden_size),
dtype=np.float32)
self.rnn_states_critic = np.zeros_like(self.rnn_states)
self.value_preds = np.zeros(
(self.episode_length + 1, self.n_rollout_threads, num_agents, 1), dtype=np.float32)
self.returns = np.zeros_like(self.value_preds)
if act_space.__class__.__name__ == 'Discrete':
self.available_actions = np.ones((self.episode_length + 1, self.n_rollout_threads, num_agents, act_space.n),
dtype=np.float32)
else:
self.available_actions = None
act_shape = get_shape_from_act_space(act_space)
self.actions = np.zeros(
(self.episode_length, self.n_rollout_threads, num_agents, act_shape), dtype=np.float32)
self.action_log_probs = np.zeros(
(self.episode_length, self.n_rollout_threads, num_agents, act_shape), dtype=np.float32)
self.rewards = np.zeros(
(self.episode_length, self.n_rollout_threads, num_agents, 1), dtype=np.float32)
self.masks = np.ones((self.episode_length + 1, self.n_rollout_threads, num_agents, 1), dtype=np.float32)
self.bad_masks = np.ones_like(self.masks)
self.active_masks = np.ones_like(self.masks)
self.step = 0
def insert(self, share_obs, obs, rnn_states_actor, rnn_states_critic, actions, action_log_probs,
value_preds, rewards, masks, bad_masks=None, active_masks=None, available_actions=None):
"""
Insert data into the buffer.
:param share_obs: (argparse.Namespace) arguments containing relevant model, policy, and env information.
:param obs: (np.ndarray) local agent observations.
:param rnn_states_actor: (np.ndarray) RNN states for actor network.
:param rnn_states_critic: (np.ndarray) RNN states for critic network.
:param actions:(np.ndarray) actions taken by agents.
:param action_log_probs:(np.ndarray) log probs of actions taken by agents
:param value_preds: (np.ndarray) value function prediction at each step.
:param rewards: (np.ndarray) reward collected at each step.
:param masks: (np.ndarray) denotes whether the environment has terminated or not.
:param bad_masks: (np.ndarray) action space for agents.
:param active_masks: (np.ndarray) denotes whether an agent is active or dead in the env.
:param available_actions: (np.ndarray) actions available to each agent. If None, all actions are available.
"""
self.share_obs[self.step + 1] = share_obs.copy()
self.obs[self.step + 1] = obs.copy()
self.rnn_states[self.step + 1] = rnn_states_actor.copy()
self.rnn_states_critic[self.step + 1] = rnn_states_critic.copy()
self.actions[self.step] = actions.copy()
self.action_log_probs[self.step] = action_log_probs.copy()
self.value_preds[self.step] = value_preds.copy()
self.rewards[self.step] = rewards.copy()
self.masks[self.step + 1] = masks.copy()
if bad_masks is not None:
self.bad_masks[self.step + 1] = bad_masks.copy()
if active_masks is not None:
self.active_masks[self.step + 1] = active_masks.copy()
if available_actions is not None:
self.available_actions[self.step + 1] = available_actions.copy()
self.step = (self.step + 1) % self.episode_length
def chooseinsert(self, share_obs, obs, rnn_states, rnn_states_critic, actions, action_log_probs,
value_preds, rewards, masks, bad_masks=None, active_masks=None, available_actions=None):
"""
Insert data into the buffer. This insert function is used specifically for Hanabi, which is turn based.
:param share_obs: (argparse.Namespace) arguments containing relevant model, policy, and env information.
:param obs: (np.ndarray) local agent observations.
:param rnn_states_actor: (np.ndarray) RNN states for actor network.
:param rnn_states_critic: (np.ndarray) RNN states for critic network.
:param actions:(np.ndarray) actions taken by agents.
:param action_log_probs:(np.ndarray) log probs of actions taken by agents
:param value_preds: (np.ndarray) value function prediction at each step.
:param rewards: (np.ndarray) reward collected at each step.
:param masks: (np.ndarray) denotes whether the environment has terminated or not.
:param bad_masks: (np.ndarray) denotes indicate whether whether true terminal state or due to episode limit
:param active_masks: (np.ndarray) denotes whether an agent is active or dead in the env.
:param available_actions: (np.ndarray) actions available to each agent. If None, all actions are available.
"""
self.share_obs[self.step] = share_obs.copy()
self.obs[self.step] = obs.copy()
self.rnn_states[self.step + 1] = rnn_states.copy()
self.rnn_states_critic[self.step + 1] = rnn_states_critic.copy()
self.actions[self.step] = actions.copy()
self.action_log_probs[self.step] = action_log_probs.copy()
self.value_preds[self.step] = value_preds.copy()
self.rewards[self.step] = rewards.copy()
self.masks[self.step + 1] = masks.copy()
if bad_masks is not None:
self.bad_masks[self.step + 1] = bad_masks.copy()
if active_masks is not None:
self.active_masks[self.step] = active_masks.copy()
if available_actions is not None:
self.available_actions[self.step] = available_actions.copy()
self.step = (self.step + 1) % self.episode_length
def after_update(self):
"""Copy last timestep data to first index. Called after update to model."""
self.share_obs[0] = self.share_obs[-1].copy()
self.obs[0] = self.obs[-1].copy()
self.rnn_states[0] = self.rnn_states[-1].copy()
self.rnn_states_critic[0] = self.rnn_states_critic[-1].copy()
self.masks[0] = self.masks[-1].copy()
self.bad_masks[0] = self.bad_masks[-1].copy()
self.active_masks[0] = self.active_masks[-1].copy()
if self.available_actions is not None:
self.available_actions[0] = self.available_actions[-1].copy()
def chooseafter_update(self):
"""Copy last timestep data to first index. This method is used for Hanabi."""
self.rnn_states[0] = self.rnn_states[-1].copy()
self.rnn_states_critic[0] = self.rnn_states_critic[-1].copy()
self.masks[0] = self.masks[-1].copy()
self.bad_masks[0] = self.bad_masks[-1].copy()
def compute_returns(self, next_value, value_normalizer=None):
"""
Compute returns either as discounted sum of rewards, or using GAE.
:param next_value: (np.ndarray) value predictions for the step after the last episode step.
:param value_normalizer: (PopArt) If not None, PopArt value normalizer instance.
"""
if self._use_proper_time_limits:
if self._use_gae:
self.value_preds[-1] = next_value
gae = 0
for step in reversed(range(self.rewards.shape[0])):
if self._use_popart or self._use_valuenorm:
# step + 1
delta = self.rewards[step] + self.gamma * value_normalizer.denormalize(
self.value_preds[step + 1]) * self.masks[step + 1] \
- value_normalizer.denormalize(self.value_preds[step])
gae = delta + self.gamma * self.gae_lambda * gae * self.masks[step + 1]
gae = gae * self.bad_masks[step + 1]
self.returns[step] = gae + value_normalizer.denormalize(self.value_preds[step])
else:
delta = self.rewards[step] + self.gamma * self.value_preds[step + 1] * self.masks[step + 1] - \
self.value_preds[step]
gae = delta + self.gamma * self.gae_lambda * self.masks[step + 1] * gae
gae = gae * self.bad_masks[step + 1]
self.returns[step] = gae + self.value_preds[step]
else:
self.returns[-1] = next_value
for step in reversed(range(self.rewards.shape[0])):
if self._use_popart or self._use_valuenorm:
self.returns[step] = (self.returns[step + 1] * self.gamma * self.masks[step + 1] + self.rewards[
step]) * self.bad_masks[step + 1] \
+ (1 - self.bad_masks[step + 1]) * value_normalizer.denormalize(
self.value_preds[step])
else:
self.returns[step] = (self.returns[step + 1] * self.gamma * self.masks[step + 1] + self.rewards[
step]) * self.bad_masks[step + 1] \
+ (1 - self.bad_masks[step + 1]) * self.value_preds[step]
else:
if self._use_gae:
self.value_preds[-1] = next_value
gae = 0
for step in reversed(range(self.rewards.shape[0])):
if self._use_popart or self._use_valuenorm:
delta = self.rewards[step] + self.gamma * value_normalizer.denormalize(
self.value_preds[step + 1]) * self.masks[step + 1] \
- value_normalizer.denormalize(self.value_preds[step])
gae = delta + self.gamma * self.gae_lambda * self.masks[step + 1] * gae
self.returns[step] = gae + value_normalizer.denormalize(self.value_preds[step])
else:
delta = self.rewards[step] + self.gamma * self.value_preds[step + 1] * self.masks[step + 1] - \
self.value_preds[step]
gae = delta + self.gamma * self.gae_lambda * self.masks[step + 1] * gae
self.returns[step] = gae + self.value_preds[step]
else:
self.returns[-1] = next_value
for step in reversed(range(self.rewards.shape[0])):
self.returns[step] = self.returns[step + 1] * self.gamma * self.masks[step + 1] + self.rewards[step]
def feed_forward_generator(self, advantages, num_mini_batch=None, mini_batch_size=None):
"""
Yield training data for MLP policies.
:param advantages: (np.ndarray) advantage estimates.
:param num_mini_batch: (int) number of minibatches to split the batch into.
:param mini_batch_size: (int) number of samples in each minibatch.
"""
episode_length, n_rollout_threads, num_agents = self.rewards.shape[0:3]
batch_size = n_rollout_threads * episode_length * num_agents
if mini_batch_size is None:
assert batch_size >= num_mini_batch, (
"PPO requires the number of processes ({}) "
"* number of steps ({}) * number of agents ({}) = {} "
"to be greater than or equal to the number of PPO mini batches ({})."
"".format(n_rollout_threads, episode_length, num_agents,
n_rollout_threads * episode_length * num_agents,
num_mini_batch))
mini_batch_size = batch_size // num_mini_batch
rand = torch.randperm(batch_size).numpy()
sampler = [rand[i * mini_batch_size:(i + 1) * mini_batch_size] for i in range(num_mini_batch)]
share_obs = self.share_obs[:-1].reshape(-1, *self.share_obs.shape[3:])
obs = self.obs[:-1].reshape(-1, *self.obs.shape[3:])
rnn_states = self.rnn_states[:-1].reshape(-1, *self.rnn_states.shape[3:])
rnn_states_critic = self.rnn_states_critic[:-1].reshape(-1, *self.rnn_states_critic.shape[3:])
actions = self.actions.reshape(-1, self.actions.shape[-1])
if self.available_actions is not None:
available_actions = self.available_actions[:-1].reshape(-1, self.available_actions.shape[-1])
value_preds = self.value_preds[:-1].reshape(-1, 1)
returns = self.returns[:-1].reshape(-1, 1)
masks = self.masks[:-1].reshape(-1, 1)
active_masks = self.active_masks[:-1].reshape(-1, 1)
action_log_probs = self.action_log_probs.reshape(-1, self.action_log_probs.shape[-1])
advantages = advantages.reshape(-1, 1)
for indices in sampler:
# obs size [T+1 N M Dim]-->[T N M Dim]-->[T*N*M,Dim]-->[index,Dim]
share_obs_batch = share_obs[indices]
obs_batch = obs[indices]
rnn_states_batch = rnn_states[indices]
rnn_states_critic_batch = rnn_states_critic[indices]
actions_batch = actions[indices]
if self.available_actions is not None:
available_actions_batch = available_actions[indices]
else:
available_actions_batch = None
value_preds_batch = value_preds[indices]
return_batch = returns[indices]
masks_batch = masks[indices]
active_masks_batch = active_masks[indices]
old_action_log_probs_batch = action_log_probs[indices]
if advantages is None:
adv_targ = None
else:
adv_targ = advantages[indices]
yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch,\
value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch,\
adv_targ, available_actions_batch
def naive_recurrent_generator(self, advantages, num_mini_batch):
"""
Yield training data for non-chunked RNN training.
:param advantages: (np.ndarray) advantage estimates.
:param num_mini_batch: (int) number of minibatches to split the batch into.
"""
episode_length, n_rollout_threads, num_agents = self.rewards.shape[0:3]
batch_size = n_rollout_threads * num_agents
assert n_rollout_threads * num_agents >= num_mini_batch, (
"PPO requires the number of processes ({})* number of agents ({}) "
"to be greater than or equal to the number of "
"PPO mini batches ({}).".format(n_rollout_threads, num_agents, num_mini_batch))
num_envs_per_batch = batch_size // num_mini_batch
perm = torch.randperm(batch_size).numpy()
share_obs = self.share_obs.reshape(-1, batch_size, *self.share_obs.shape[3:])
obs = self.obs.reshape(-1, batch_size, *self.obs.shape[3:])
rnn_states = self.rnn_states.reshape(-1, batch_size, *self.rnn_states.shape[3:])
rnn_states_critic = self.rnn_states_critic.reshape(-1, batch_size, *self.rnn_states_critic.shape[3:])
actions = self.actions.reshape(-1, batch_size, self.actions.shape[-1])
if self.available_actions is not None:
available_actions = self.available_actions.reshape(-1, batch_size, self.available_actions.shape[-1])
value_preds = self.value_preds.reshape(-1, batch_size, 1)
returns = self.returns.reshape(-1, batch_size, 1)
masks = self.masks.reshape(-1, batch_size, 1)
active_masks = self.active_masks.reshape(-1, batch_size, 1)
action_log_probs = self.action_log_probs.reshape(-1, batch_size, self.action_log_probs.shape[-1])
advantages = advantages.reshape(-1, batch_size, 1)
for start_ind in range(0, batch_size, num_envs_per_batch):
share_obs_batch = []
obs_batch = []
rnn_states_batch = []
rnn_states_critic_batch = []
actions_batch = []
available_actions_batch = []
value_preds_batch = []
return_batch = []
masks_batch = []
active_masks_batch = []
old_action_log_probs_batch = []
adv_targ = []
for offset in range(num_envs_per_batch):
ind = perm[start_ind + offset]
share_obs_batch.append(share_obs[:-1, ind])
obs_batch.append(obs[:-1, ind])
rnn_states_batch.append(rnn_states[0:1, ind])
rnn_states_critic_batch.append(rnn_states_critic[0:1, ind])
actions_batch.append(actions[:, ind])
if self.available_actions is not None:
available_actions_batch.append(available_actions[:-1, ind])
value_preds_batch.append(value_preds[:-1, ind])
return_batch.append(returns[:-1, ind])
masks_batch.append(masks[:-1, ind])
active_masks_batch.append(active_masks[:-1, ind])
old_action_log_probs_batch.append(action_log_probs[:, ind])
adv_targ.append(advantages[:, ind])
# [N[T, dim]]
T, N = self.episode_length, num_envs_per_batch
# These are all from_numpys of size (T, N, -1)
share_obs_batch = np.stack(share_obs_batch, 1)
obs_batch = np.stack(obs_batch, 1)
actions_batch = np.stack(actions_batch, 1)
if self.available_actions is not None:
available_actions_batch = np.stack(available_actions_batch, 1)
value_preds_batch = np.stack(value_preds_batch, 1)
return_batch = np.stack(return_batch, 1)
masks_batch = np.stack(masks_batch, 1)
active_masks_batch = np.stack(active_masks_batch, 1)
old_action_log_probs_batch = np.stack(old_action_log_probs_batch, 1)
adv_targ = np.stack(adv_targ, 1)
# States is just a (N, dim) from_numpy [N[1,dim]]
rnn_states_batch = np.stack(rnn_states_batch).reshape(N, *self.rnn_states.shape[3:])
rnn_states_critic_batch = np.stack(rnn_states_critic_batch).reshape(N, *self.rnn_states_critic.shape[3:])
# Flatten the (T, N, ...) from_numpys to (T * N, ...)
share_obs_batch = _flatten(T, N, share_obs_batch)
obs_batch = _flatten(T, N, obs_batch)
actions_batch = _flatten(T, N, actions_batch)
if self.available_actions is not None:
available_actions_batch = _flatten(T, N, available_actions_batch)
else:
available_actions_batch = None
value_preds_batch = _flatten(T, N, value_preds_batch)
return_batch = _flatten(T, N, return_batch)
masks_batch = _flatten(T, N, masks_batch)
active_masks_batch = _flatten(T, N, active_masks_batch)
old_action_log_probs_batch = _flatten(T, N, old_action_log_probs_batch)
adv_targ = _flatten(T, N, adv_targ)
yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch,\
value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch,\
adv_targ, available_actions_batch
def recurrent_generator(self, advantages, num_mini_batch, data_chunk_length):
"""
Yield training data for chunked RNN training.
:param advantages: (np.ndarray) advantage estimates.
:param num_mini_batch: (int) number of minibatches to split the batch into.
:param data_chunk_length: (int) length of sequence chunks with which to train RNN.
"""
episode_length, n_rollout_threads, num_agents = self.rewards.shape[0:3]
batch_size = n_rollout_threads * episode_length * num_agents
data_chunks = batch_size // data_chunk_length # [C=r*T*M/L]
mini_batch_size = data_chunks // num_mini_batch
rand = torch.randperm(data_chunks).numpy()
sampler = [rand[i * mini_batch_size:(i + 1) * mini_batch_size] for i in range(num_mini_batch)]
if len(self.share_obs.shape) > 4:
share_obs = self.share_obs[:-1].transpose(1, 2, 0, 3, 4, 5).reshape(-1, *self.share_obs.shape[3:])
obs = self.obs[:-1].transpose(1, 2, 0, 3, 4, 5).reshape(-1, *self.obs.shape[3:])
else:
share_obs = _cast(self.share_obs[:-1])
obs = _cast(self.obs[:-1])
actions = _cast(self.actions)
action_log_probs = _cast(self.action_log_probs)
advantages = _cast(advantages)
value_preds = _cast(self.value_preds[:-1])
returns = _cast(self.returns[:-1])
masks = _cast(self.masks[:-1])
active_masks = _cast(self.active_masks[:-1])
# rnn_states = _cast(self.rnn_states[:-1])
# rnn_states_critic = _cast(self.rnn_states_critic[:-1])
rnn_states = self.rnn_states[:-1].transpose(1, 2, 0, 3, 4).reshape(-1, *self.rnn_states.shape[3:])
rnn_states_critic = self.rnn_states_critic[:-1].transpose(1, 2, 0, 3, 4).reshape(-1,
*self.rnn_states_critic.shape[
3:])
if self.available_actions is not None:
available_actions = _cast(self.available_actions[:-1])
for indices in sampler:
share_obs_batch = []
obs_batch = []
rnn_states_batch = []
rnn_states_critic_batch = []
actions_batch = []
available_actions_batch = []
value_preds_batch = []
return_batch = []
masks_batch = []
active_masks_batch = []
old_action_log_probs_batch = []
adv_targ = []
for index in indices:
ind = index * data_chunk_length
# size [T+1 N M Dim]-->[T N M Dim]-->[N,M,T,Dim]-->[N*M*T,Dim]-->[L,Dim]
share_obs_batch.append(share_obs[ind:ind + data_chunk_length])
obs_batch.append(obs[ind:ind + data_chunk_length])
actions_batch.append(actions[ind:ind + data_chunk_length])
if self.available_actions is not None:
available_actions_batch.append(available_actions[ind:ind + data_chunk_length])
value_preds_batch.append(value_preds[ind:ind + data_chunk_length])
return_batch.append(returns[ind:ind + data_chunk_length])
masks_batch.append(masks[ind:ind + data_chunk_length])
active_masks_batch.append(active_masks[ind:ind + data_chunk_length])
old_action_log_probs_batch.append(action_log_probs[ind:ind + data_chunk_length])
adv_targ.append(advantages[ind:ind + data_chunk_length])
# size [T+1 N M Dim]-->[T N M Dim]-->[N M T Dim]-->[N*M*T,Dim]-->[1,Dim]
rnn_states_batch.append(rnn_states[ind])
rnn_states_critic_batch.append(rnn_states_critic[ind])
L, N = data_chunk_length, mini_batch_size
# These are all from_numpys of size (L, N, Dim)
share_obs_batch = np.stack(share_obs_batch, axis=1)
obs_batch = np.stack(obs_batch, axis=1)
actions_batch = np.stack(actions_batch, axis=1)
if self.available_actions is not None:
available_actions_batch = np.stack(available_actions_batch, axis=1)
value_preds_batch = np.stack(value_preds_batch, axis=1)
return_batch = np.stack(return_batch, axis=1)
masks_batch = np.stack(masks_batch, axis=1)
active_masks_batch = np.stack(active_masks_batch, axis=1)
old_action_log_probs_batch = np.stack(old_action_log_probs_batch, axis=1)
adv_targ = np.stack(adv_targ, axis=1)
# States is just a (N, -1) from_numpy
rnn_states_batch = np.stack(rnn_states_batch).reshape(N, *self.rnn_states.shape[3:])
rnn_states_critic_batch = np.stack(rnn_states_critic_batch).reshape(N, *self.rnn_states_critic.shape[3:])
# Flatten the (L, N, ...) from_numpys to (L * N, ...)
share_obs_batch = _flatten(L, N, share_obs_batch)
obs_batch = _flatten(L, N, obs_batch)
actions_batch = _flatten(L, N, actions_batch)
if self.available_actions is not None:
available_actions_batch = _flatten(L, N, available_actions_batch)
else:
available_actions_batch = None
value_preds_batch = _flatten(L, N, value_preds_batch)
return_batch = _flatten(L, N, return_batch)
masks_batch = _flatten(L, N, masks_batch)
active_masks_batch = _flatten(L, N, active_masks_batch)
old_action_log_probs_batch = _flatten(L, N, old_action_log_probs_batch)
adv_targ = _flatten(L, N, adv_targ)
yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch,\
value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch,\
adv_targ, available_actions_batch
class BinarySearchTree:
"""Binary Search Tree for PER
Contributor: Github GyChou, Github mississippiu
Reference: https://github.com/kaixindelele/DRLib/tree/main/algos/pytorch/td3_sp
Reference: https://github.com/jaromiru/AI-blog/blob/master/SumTree.py
"""
def __init__(self, memo_len):
self.memo_len = memo_len # replay buffer len
self.prob_ary = np.zeros((memo_len - 1) + memo_len) # parent_nodes_num + leaf_nodes_num
self.max_len = len(self.prob_ary)
self.now_len = self.memo_len - 1 # pointer
self.indices = None
self.depth = int(np.log2(self.max_len))
# PER. Prioritized Experience Replay. Section 4
# alpha, beta = 0.7, 0.5 for rank-based variant
# alpha, beta = 0.6, 0.4 for proportional variant
self.per_alpha = 0.6 # alpha = (Uniform:0, Greedy:1)
self.per_beta = 0.4 # beta = (PER:0, NotPER:1)
def update_id(self, data_id, prob=10): # 10 is max_prob
tree_id = data_id + self.memo_len - 1
if self.now_len == tree_id:
self.now_len += 1
delta = prob - self.prob_ary[tree_id]
self.prob_ary[tree_id] = prob
while tree_id != 0: # propagate the change through tree
tree_id = (tree_id - 1) // 2 # faster than the recursive loop
self.prob_ary[tree_id] += delta
def update_ids(self, data_ids, prob=10): # 10 is max_prob
ids = data_ids + self.memo_len - 1
self.now_len += (ids >= self.now_len).sum()
upper_step = self.depth - 1
self.prob_ary[ids] = prob # here, ids means the indices of given children (maybe the right ones or left ones)
p_ids = (ids - 1) // 2
while upper_step: # propagate the change through tree
ids = p_ids * 2 + 1 # in this while loop, ids means the indices of the left children
self.prob_ary[p_ids] = self.prob_ary[ids] + self.prob_ary[ids + 1]
p_ids = (p_ids - 1) // 2
upper_step -= 1
self.prob_ary[0] = self.prob_ary[1] + self.prob_ary[2]
# because we take depth-1 upper steps, ps_tree[0] need to be updated alone
def get_leaf_id(self, v):
"""Tree structure and array storage:
Tree index:
0 -> storing priority sum
| |
1 2
| | | |
3 4 5 6 -> storing priority for transitions
Array type for storing: [0, 1, 2, 3, 4, 5, 6]
"""
parent_idx = 0
while True:
l_idx = 2 * parent_idx + 1 # the leaf's left node
r_idx = l_idx + 1 # the leaf's right node
if l_idx >= (len(self.prob_ary)): # reach bottom, end search
leaf_idx = parent_idx
break
else: # downward search, always search for a higher priority node
if v <= self.prob_ary[l_idx]:
parent_idx = l_idx
else:
v -= self.prob_ary[l_idx]
parent_idx = r_idx
return min(leaf_idx, self.now_len - 2) # leaf_idx
def get_indices_is_weights(self, batch_size, beg, end):
self.per_beta = min(1., self.per_beta + 0.001)
# get random values for searching indices with proportional prioritization
values = (rd.rand(batch_size) + np.arange(batch_size)) * (self.prob_ary[0] / batch_size)
# get proportional prioritization
leaf_ids = np.array([self.get_leaf_id(v) for v in values])
self.indices = leaf_ids - (self.memo_len - 1)
prob_ary = self.prob_ary[leaf_ids] / self.prob_ary[beg:end].min()
is_weights = np.power(prob_ary, -self.per_beta) # important sampling weights
return self.indices, is_weights
def td_error_update(self, td_error): # td_error = (q-q).detach_().abs()
prob = td_error.squeeze().clamp(1e-6, 10).pow(self.per_alpha)
prob = prob.cpu().numpy()
self.update_ids(self.indices, prob)
|
py | b412d0d40844e0f65b04d2351e79233c77096589 | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# ==============================================================================
import pytest
import mxnet as mx
import numpy as np
import numpy.testing as npt
from mxfusion.components.variables.var_trans import PositiveTransformation, Logistic
@pytest.mark.usefixtures("set_seed")
class TestVariableTransformation(object):
"""
Tests the MXFusion.core.var_trans file for variable transformations
"""
def test_softplus(self):
v_orig = mx.nd.array([-10.], dtype=np.float64)
p = PositiveTransformation()
v_pos = p.transform(v_orig)
v_inv_trans = p.inverseTransform(v_pos)
assert v_orig.asnumpy()[0] < 0
assert v_pos.asnumpy()[0] > 0
assert v_inv_trans.asnumpy()[0] < 0
npt.assert_allclose(v_inv_trans.asnumpy()[0], v_orig.asnumpy()[0], rtol=1e-7, atol=1e-10)
@pytest.mark.parametrize("x, rtol, atol", [
(mx.nd.array([10], dtype=np.float64), 1e-7, 1e-10),
(mx.nd.array([1e-30], dtype=np.float64), 1e-7, 1e-10),
(mx.nd.array([5], dtype=np.float32), 1e-4, 1e-5),
(mx.nd.array([1e-6], dtype=np.float32), 1e-4, 1e-5)
])
def test_softplus_numerical(self, x, rtol, atol):
p = PositiveTransformation()
mf_pos = p.transform(x)
mf_inv = p.inverseTransform(mf_pos)
np_pos = np.log1p(np.exp(x.asnumpy()))
np_inv = np.log(np.expm1(np_pos))
npt.assert_allclose(mf_pos.asnumpy(), np_pos, rtol=rtol, atol=atol)
npt.assert_allclose(mf_inv.asnumpy(), np_inv, rtol=rtol, atol=atol)
npt.assert_allclose(mf_inv.asnumpy(), x.asnumpy(), rtol=rtol, atol=atol)
@pytest.mark.parametrize("x, upper, lower, rtol, atol", [
(mx.nd.array([10], dtype=np.float64), 2, 20, 1e-7, 1e-10),
(mx.nd.array([1e-3], dtype=np.float64), 1e-6, 1e-2, 1e-7, 1e-10),
(mx.nd.array([1], dtype=np.float32), 1, 200000, 1e-4, 1e-5),
(mx.nd.array([5], dtype=np.float32), 2, 10000, 1e-4, 1e-5)
])
def test_logistic(self, x, upper, lower, rtol, atol):
transform = Logistic(upper, lower)
x_trans = transform.transform(x)
x_inversed = transform.inverseTransform(x_trans)
assert x_inversed.dtype == x.dtype
assert np.isclose(x.asnumpy(), x_inversed.asnumpy(), rtol=rtol, atol=atol)
|
py | b412d1370195caa99308a70cbafb5aaff5eb234f | from virgil_crypto._libs import LowLevelLibs
from ctypes import Structure, POINTER, c_int, c_size_t
from virgil_crypto.common._c_bridge import vsc_data_t, vsc_buffer_t
class vsce_phe_cipher_t(Structure):
pass
class VscePheCipher(object):
def __init__(self):
self._ll = LowLevelLibs()
self._lib_common = self._ll.common
self._lib_foundation = self._ll.foundation
self._lib = self._ll.phe
def vsce_phe_cipher_new(self):
vsce_phe_cipher_new = self._lib.vsce_phe_cipher_new
vsce_phe_cipher_new.argtypes = []
vsce_phe_cipher_new.restype = POINTER(vsce_phe_cipher_t)
return vsce_phe_cipher_new()
def vsce_phe_cipher_setup_defaults(self, cipher):
vsce_phe_cipher_setup_defaults = self._lib.vsce_phe_cipher_setup_defaults
vsce_phe_cipher_setup_defaults.argtypes = [POINTER(vsce_phe_cipher_t)]
vsce_phe_cipher_setup_defaults.restype = None
return vsce_phe_cipher_setup_defaults(cipher)
def vsce_phe_cipher_delete(self, phe_cipher):
vsce_phe_cipher_delete = self._lib.vsce_phe_cipher_delete
vsce_phe_cipher_delete.argtypes = [POINTER(vsce_phe_cipher_t)]
vsce_phe_cipher_delete.restype = None
return vsce_phe_cipher_delete(phe_cipher)
def vsce_phe_cipher_encrypt_len(self, cipher, data_len):
vsce_phe_cipher_encrypt_len = self._lib.vsce_phe_cipher_encrypt_len
vsce_phe_cipher_encrypt_len.argtypes = [POINTER(vsce_phe_cipher_t), c_size_t]
vsce_phe_cipher_encrypt_len.restype = c_size_t
return vsce_phe_cipher_encrypt_len(cipher, data_len)
def vsce_phe_cipher_encrypt(self, phe_cipher, data, account_key, encrypted_data):
vsce_phe_cipher_encrypt = self._lib.vsce_phe_cipher_encrypt
vsce_phe_cipher_encrypt.argtypes = [POINTER(vsce_phe_cipher_t), vsc_data_t, vsc_data_t, POINTER(vsc_buffer_t)]
vsce_phe_cipher_encrypt.restype = c_int
return vsce_phe_cipher_encrypt(phe_cipher, data, account_key, encrypted_data)
def vsce_phe_cipher_decrypt_len(self, cipher, encrypted_data_len):
vsce_phe_cipher_decrypt_len = self._lib.vsce_phe_cipher_decrypt_len
vsce_phe_cipher_decrypt_len.argtypes = [POINTER(vsce_phe_cipher_t), c_size_t]
vsce_phe_cipher_decrypt_len.restype = c_size_t
return vsce_phe_cipher_decrypt_len(cipher, encrypted_data_len)
def vsce_phe_cipher_decrypt(self, phe_cipher, encrypted_data, account_key, data):
vsce_phe_cipher_decrypt = self._lib.vsce_phe_cipher_decrypt
vsce_phe_cipher_decrypt.argtypes = [POINTER(vsce_phe_cipher_t), vsc_data_t, vsc_data_t, POINTER(vsc_buffer_t)]
vsce_phe_cipher_decrypt.restype = c_int
return vsce_phe_cipher_decrypt(phe_cipher, encrypted_data, account_key, data)
|
py | b412d1e5a9d1e1bc4ae53d064ec6d2eb17c75efd | import sys
from ncc.data.constants import (
PAD,
SBT_LEFT_PARENTHESE,
SBT_RIGHT_PARENTHESE,
)
from ncc.data import tokenizer_funcs
from ..constants import (
RECURSION_DEPTH,
MAX_SUBTOKEN_LEN,
NODE_TMP,
)
from copy import deepcopy
# ignore those ast whose size is too large. Therefore set it as a small number
sys.setrecursionlimit(RECURSION_DEPTH) # recursion depth
def child_value2child_only(ast):
"""node['value'] => node['children']"""
for idx, node in ast.items():
value = node.get('value', None)
if value:
node.pop('value')
node['children'] = [value]
return ast
def pad_leaf_nodes(ast, max_len=MAX_SUBTOKEN_LEN):
'''
pad leaf node's child into [XX, [XX, ...]]
split token and pad it with PAD_TOKEN till reach MAX_TOKEN_LIST_LEN
e.g. VariableName -> [VariableName, [Variable, Name, PAD_TOKEN, PAD_TOKEN, ...]]
'''
for idx, node in ast.items():
if len(node['children']) == 1 and isinstance(node['children'][0], str):
subtokens = tokenizer_funcs._space_dpu_sub_tokenizer(node['children'][0])[:max_len]
subtokens.extend([PAD] * (max_len - len(subtokens)))
node['children'].append(subtokens)
return ast
def ast2sbt(ast, idx):
'''
build structure-based traversal SBT tree
ref: Deep Code Comment Generation
'''
if len(ast[idx]['children']) == 2 and type(ast[idx]['children'][1]) == list:
token = ast[idx]['type'] + '_' + ast[idx]['children'][0]
seq = [SBT_LEFT_PARENTHESE, token, SBT_RIGHT_PARENTHESE, token]
else:
token = ast[idx]['type']
seq = [SBT_LEFT_PARENTHESE, token]
for child_idx in ast[idx]['children']:
seq += ast2sbt(ast, str(child_idx))
seq += [SBT_RIGHT_PARENTHESE, token]
return seq
def get_root(ast):
"""get root node index"""
for idx, node in ast.items():
if node['parent'] is None:
return idx
def delete_root_with_unichild(ast):
"""
delete root node with only a child
because in such way, head node might be Program/Function/Error and its child is the code's AST
"""
for idx in sorted([idx for idx in ast.keys()], key=int):
if (ast[idx]['parent'] is None) and len(ast[idx]['children']) == 1:
child_idx = ast[idx]['children'][0]
ast[str(child_idx)]['parent'] = None
ast.pop(idx)
else:
break
return ast
def delete_nodes_with_unichild(ast):
'''
delete nodes with single child node
e.g. [1*NODEFIX1] -> [1*NODEFIX2] -> ['void'] => [1*NODEFIX1] -> ['void']
'''
def _dfs(idx):
node = ast[idx]
# get current node's children indices, if it's leaf node, ignore.
if not (len(node['children']) == 1 and isinstance(node['children'][0], str)):
child_ids = node['children']
else:
return # move to leaf node, return
# each ast tree generally is parsed from a method, so it has a "program" root node and a "method" node
# therefore, if current node is the root node with single child, we do not delete it
while (len(child_ids) == 1) and (node['parent'] is not None):
# update its parent's children
parent_node = ast[str(node['parent'])]
del_idx = parent_node['children'].index(int(idx))
parent_node['children'].pop(del_idx)
child_idx = child_ids[0]
# update its children's parent to its parent
ast[str(child_idx)]['parent'] = node['parent']
# update its parent's children
parent_node['children'].insert(del_idx, child_idx)
# delete itself
ast.pop(idx)
# update current info
idx = str(child_idx)
node = ast[idx]
# get current node's children indices, if it's leaf node, ignore.
if not (len(node['children']) == 1 and isinstance(node['children'][0], str)):
child_ids = node['children']
else:
return # move to leaf node, return
for idx in child_ids:
_dfs(str(idx))
idx = get_root(ast)
_dfs(idx)
return ast
def ast2bin_ast(ast):
'''ast tree -> binary ast tree'''
last_node_idx = sorted(ast.keys(), key=int)[-1]
def _dfs(idx):
node = ast[idx]
# get current node's children indices, if it's leaf node, ignore.
if not (len(node['children']) == 1 and isinstance(node['children'][0], str)):
child_ids = node['children']
else:
return # move to leaf node, return
if len(child_ids) > 2:
# add new node
nonlocal last_node_idx
last_node_idx = str(int(last_node_idx) + 1)
ast[last_node_idx] = {'type': NODE_TMP, 'parent': idx, 'children': child_ids[1:]}
# update node's children info
node['children'] = [child_ids[0], int(last_node_idx)]
# update other childen nodes' parent info
for child_idx in child_ids[1:]:
ast[str(child_idx)]['parent'] = last_node_idx
# update current node's children info
# get current node's children indices, if it's leaf node, ignore.
if not (len(node['children']) == 1 and isinstance(node['children'][0], str)):
child_ids = node['children']
else:
return # move to leaf node, return
for idx in child_ids:
_dfs(str(idx))
idx = get_root(ast)
_dfs(idx)
return ast
def reset_indices(ast):
'''rename ast tree's node indices with consecutive indices'''
if sorted(list(ast.keys())) == list(range(len(ast))):
return ast
# firstly, resort node index with a prefix "_", e.g. 0 => "_0"
_idx = 0
def _dfs(idx, _parent_idx):
nonlocal _idx
_new_idx, _idx = f'_{_idx}', _idx + 1 # update for next node
node = ast.pop(str(idx))
ast[_new_idx] = node
# update its parent's children
if node['parent'] is None:
pass # current node is root node, no need for update its children
else:
parent_node = ast[_parent_idx]
# update its index in its parent node
parent_node['children'][parent_node['children'].index(idx)] = _new_idx
# update parent index
node['parent'] = _parent_idx
if isinstance(node['children'][0], int): # non-leaf nodes, traverse its children nodes
# update its children nodes' parent
for child_idx in node['children']:
_dfs(child_idx, _parent_idx=_new_idx)
else:
return
root_idx = get_root(ast)
_dfs(root_idx, _parent_idx=None)
# recover name: from _* => *
node_ids = deepcopy(list(ast.keys()))
for idx in node_ids:
node = ast.pop(idx)
# update children index
if len(node['children']) > 1:
node['children'] = [int(child_idx[1:]) for child_idx in node['children']]
# update parent index
if node['parent'] == None:
pass
else:
node['parent'] = int(node['parent'][1:])
ast[int(idx[1:])] = node # _idx => idx
return ast
|
py | b412d2a36304482756320c40eea6d6f073b84e98 | import sys
meu_nome = "joao"
def hello(meu_nome):
print ("OLá",meu_nome)
return
print ("teste")
hello(meu_nome)
print ("Referência: ", w)
print ("")
print ("A temperatura em Barueri é de: ",temperature)
print ("")
#Quando eu passo o str(wind.get("speed"), eu pego o valor dentro da key do JSON
#:4 eu pego apenas 4 posições do valor
print ("Está ventando a ", str(wind.get("speed")*1.60934)[:4], " Km/h")
#print (len(wind))
#quando eu forneço um FOR com uma variável (nesse caso, o objeto do wind) ele retorna o nome da chave, e o wind.get(str(j)) me retorna o valor desse objeto
for j in wind:
print("item: ", j, " / ", wind.get(str(j)))
print ("")
print ("Amanhã: ", tomorrow)
#to_JSON() faz com que ele exiba todos os valores do JSON
print(w.to_JSON())
#Prints
print ("A temperatura em Barueri é de",str(temperature.get('temp')),"ºC")
print ("Está ventando a ", str(wind.get("speed")*1.60934)[:4], " Km/h")
print ("A temperatura em Barueri é de",str(temperature.get('temp')),"ºC")
print ("Está ventando a ", str(wind.get("speed")*1.60934)[:4], " Km/h")
print (status.title())
# Log
hoje = "%s" % (time.strftime("%Y_%m_%d"))
arquivo = open("log.%s.txt" % (date), "a")
if temperatureC != temperature:
arquivo.write("mudou")
temperatureC = temperature
arquivo.write("[%s]" % (hoje))
arquivo.write("[%s]" % (temperature.get('temp')))
arquivo.write("[%s]\n" % str(wind.get("speed")*1.60934)[:4])
arquivo.close()
from tkinter import *
class Application(Frame):
def say_hi(self):
print ("hi there, everyone!")
def createWidgets(self):
self.QUIT = Button(self)
self.QUIT["text"] = "QUIT"
self.QUIT["fg"] = "green"
self.QUIT["bg"] = "black"
self.QUIT["height"] = "10"
self.QUIT["width"] = "10"
self.QUIT["command"] = self.quit
self.QUIT.pack({"side": "left"})
self.hi_there = Button(self)
self.hi_there["text"] = "Hello",
self.hi_there["command"] = self.say_hi
self.hi_there.pack({"side": "left"})
def __init__(self, master=None):
Frame.__init__(self, master)
self.pack()
self.createWidgets()
root = Tk()
app = Application(master=root)
app.mainloop()
root.destroy()
#tkinter
from tkinter import *
root = Tk()
var = StringVar()
label = Label( root, textvariable=var, relief=RAISED )
var.set("Hey!? How are you doing?")
label.pack()
root.mainloop()
|
py | b412d3533d15a0ec9764bb14184bc51c9bf18be0 | #
#
#
#
#
# base URL for all Twitch API v3 request
BASE_URL = 'https://api.twitch.tv/kraken/'
|
py | b412d425a8fec0611d294c0391561be3dd9fd872 | def load(h):
return ({'abbr': 'avg', 'code': 0, 'title': 'Average'},
{'abbr': 'accum', 'code': 1, 'title': 'Accumulation'},
{'abbr': 'max', 'code': 2, 'title': 'Maximum'},
{'abbr': 'min', 'code': 3, 'title': 'Minimum'},
{'abbr': 'diff',
'code': 4,
'title': 'Difference',
'units': 'value at the end of time range minus value at the beginning'},
{'abbr': 'rms', 'code': 5, 'title': 'Root mean square'},
{'abbr': 'sd', 'code': 6, 'title': 'Standard deviation'},
{'abbr': 'cov',
'code': 7,
'title': 'Covariance',
'units': 'temporal variance'},
{'abbr': 8,
'code': 8,
'title': 'Difference',
'units': 'value at the start of time range minus value at the end'},
{'abbr': 'ratio', 'code': 9, 'title': 'Ratio'},
{'abbr': 'missing', 'code': 255, 'title': 'Missing'})
|
py | b412d4b441a9270584b981e2e6e3fe74a658d03e | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .azure_workload_sap_hana_restore_request_py3 import AzureWorkloadSAPHanaRestoreRequest
class AzureWorkloadSAPHanaPointInTimeRestoreRequest(AzureWorkloadSAPHanaRestoreRequest):
"""AzureWorkload SAP Hana -specific restore. Specifically for PointInTime/Log
restore.
All required parameters must be populated in order to send to Azure.
:param object_type: Required. Constant filled by server.
:type object_type: str
:param target_info: Details of target database
:type target_info:
~azure.mgmt.recoveryservicesbackup.models.TargetRestoreInfo
:param recovery_type: OLR/ALR, RestoreDisks is invalid option. Possible
values include: 'Invalid', 'OriginalLocation', 'AlternateLocation',
'RestoreDisks'
:type recovery_type: str or
~azure.mgmt.recoveryservicesbackup.models.RecoveryType
:param source_resource_id: Fully qualified ARM ID of the VM on which
workload that was running is being recovered.
:type source_resource_id: str
:param property_bag: Workload specific property bag.
:type property_bag: dict[str, str]
:param point_in_time: PointInTime value
:type point_in_time: datetime
"""
_validation = {
'object_type': {'required': True},
}
_attribute_map = {
'object_type': {'key': 'objectType', 'type': 'str'},
'target_info': {'key': 'targetInfo', 'type': 'TargetRestoreInfo'},
'recovery_type': {'key': 'recoveryType', 'type': 'str'},
'source_resource_id': {'key': 'sourceResourceId', 'type': 'str'},
'property_bag': {'key': 'propertyBag', 'type': '{str}'},
'point_in_time': {'key': 'pointInTime', 'type': 'iso-8601'},
}
def __init__(self, *, target_info=None, recovery_type=None, source_resource_id: str=None, property_bag=None, point_in_time=None, **kwargs) -> None:
super(AzureWorkloadSAPHanaPointInTimeRestoreRequest, self).__init__(target_info=target_info, recovery_type=recovery_type, source_resource_id=source_resource_id, property_bag=property_bag, **kwargs)
self.point_in_time = point_in_time
self.object_type = 'AzureWorkloadSAPHanaPointInTimeRestoreRequest'
|
py | b412d62ec4bbb72f67d6903af3dfd17fce644db0 |
# coding: utf-8
# In[1]:
from jove.SystemImports import *
from jove.TransitionSelectors import *
from jove.DotBashers import chk_consistent_pda
# # Pushdown Automata (PDA)
#
# ## Basic Definitions
#
# Pushdown Automata are structures
#
# $(Q, Sigma, Gamma, Delta, q0, z0, F)$
#
# where
#
# * $Q$ : Finite non-empty set of states
#
# * $Sigma$ : Finite non-empty input alphabet
#
# * $Gamma$ : Finite non-empty stack alphabet (usually subsumes Sigma)
#
# * $Delta$ : A transition function
#
# and $Delta$'s signature is
#
# $(Q \times (Sigma \cup \{\varepsilon\}) \times (Gamma\cup\{\varepsilon\}) \rightarrow (Q \times Gamma^*)$
#
# ## Example
#
# We model Delta as a mapping of this form
#
# (q, a, b) -> { (q1,G1s), (q2,G2s), ... }
#
# where
# a gets read
# b gets popped, if non-empty
# Gis gets pushed
# qi becomes the next state
#
# * q0 : Starting state
#
# * z0 : Initial stack's lone contents
#
# - prevents an "accept by
# empty stack" PDA from accepting as soon as it is
# switched on
#
# * F : Finite, possibly empty set of final states
#
# We will define acceptance by final state _or_ empty stack, as will be detailed in this sequel.
#
# ## Instantaneous Description
#
# An instantaneous description (ID) of a PDA is a triple (p, aI, bS).
#
# Now, ID (p, aI, bS) evolves to an ID (q, I, GS)
#
# written
#
# (p, aI, bS) $\vdash$ (q, I, GS)
#
#
# if Delta(p,a,b) contains (q,G)
#
# A PDA accepts by final state if its ID is of the form (p, "", S)
# where p in F.
#
# That is, the input is fully consumed
# and control resides within F. Note that S is arbitrary.
#
# A PDA accepts by empty stack if its ID is of the form (p, "", "")
# at any point (for any p).
#
# ## Design Details of a PDA
#
# To __prevent__ a PDA P whose acceptance is defined via an empty stack
# from accepting "as soon as it is turned on", we put in an
# initial stack letter denoted by P["z0"].
#
# * As of now, P["z0"] is the hash mark, #
#
# - It does not matter what this character it is
#
# - With markdowns, the initial stack contents is always #
#
# * Note that this is only to help-out the user. The user may decide to start with
# an empty stack, which is fine.
#
# * Our preferred initial stack symbol is "z" (lower-case z).
#
#
# # Our coding decisions wrt acceptance
#
# In our coding,
#
# * For PDA, we will require there to be an initial stack symbol
#
# * We will permit acceptance either by final state or empty stack (this will be a
# parameter given to the run_pda function)
#
# * We will require that a PDA always pop something from the stack (but allow zero or more things to be pushed). This way ("zero or more"), emptying the stack becomes possible.
#
# * When we encounter an ID for which acceptance has been noted, that ID will still be expanded if there are moves leading out of it.
#
# # Routines to run PDA
#
# We now devise a routine to run a PDA according to either the "accept by final state" criterion or "accept by empty stack" criterion. We call these "ACCEPT_F" and "ACCEPT_S" with the default being ACCEPT_F. The main difference is that the "final" configurations are collected differently.
# In[2]:
def explore_pda(inp, P, acceptance = 'ACCEPT_F', STKMAX=0, chatty=False):
"""A handy routine to print the result of run_pda plus making
future extensions to explore run-results.
"""
chk_consistent_pda(P)
(term, final, visited) = run_pda(inp, P, acceptance, STKMAX=STKMAX,
chatty=chatty)
if (final == []):
print("String " + inp + " rejected by your PDA :-(")
print("Visited states are:")
print(visited)
else:
print("String " + inp + " accepted by your PDA in " +
str(len(final)) + " ways :-) ")
print("Here are the ways: ")
for fin_path in final:
(fin, path) = fin_path
print("Final state ", fin)
print("Reached as follows:")
for p in path:
print("-> ", p)
print("-> ", fin, ".")
# In[3]:
def run_pda(str, P, acceptance = 'ACCEPT_F', STKMAX=0, chatty=False):
"""Helper for explore_pda
---
Input: An initial string str.
A PDA P
The acceptance criterion (default is "by final state"
encoded as ACCEPT_F. The alternative is ACCEPT_S
that stands for "acceptance by empty stack").
Output: (l_term_id_path, l_final_id_path, s_visited_id)
Thus, an external routine can probe and determine
* terminal IDs
* acceptance configurations
* visited IDs
"""
chk_consistent_pda(P)
init_id = (P["q0"], str, P["z0"]) # Initial ID
init_l_id_path = [(init_id, [])] # [(Initial ID, empty path)]
s_visited_id = set({}) # Nothing visited yet
(l_surv,
l_term,
l_final) = classify_l_id_path(init_l_id_path, s_visited_id, P, acceptance,
STKMAX=STKMAX)
rslt = h_run_pda(l_id_path = l_surv,
l_term_id_path = l_term,
l_final_id_path = l_final,
s_visited_id = s_visited_id,
pda = P,
acceptance = acceptance, # Acceptance criterion
STKMAX = STKMAX
)
(terminal_id_path, final_id_path, visited_ids) = rslt
if chatty:
print("terminal_id_path = ", terminal_id_path)
print("final_id_path = ", final_id_path)
print("visited_ids = ", visited_ids)
return rslt
# In[4]:
def classify_l_id_path(l_id_path, s_visited_id, P, acceptance, STKMAX):
"""Helper for run_pda
---
Given a list l_id_path of id_path pairs, a list s_visited_id
of visited IDs, a PDA P, and the acceptance criterion, classify
the contents of id_path into survivors, terminals, and finals.
"""
#print("---")
#print("classify_l_id_path >> ")
#print("l_id_path = ", l_id_path)
#print("s_visited_id = ", s_visited_id)
surv_pool = list(map(survivor_id(s_visited_id, P, STKMAX=STKMAX), l_id_path))
term_pool = list(map(term_id(s_visited_id, P, STKMAX=STKMAX), l_id_path))
final_pool = list(map(final_id(P, acceptance), l_id_path))
l_surv = list(map(lambda x: x[1],
filter(lambda x: x[0]=="surv",
surv_pool)))
l_term = list(map(lambda x: x[1],
filter(lambda x: x[0]=="term",
term_pool)))
l_final = list(map(lambda x: x[1],
filter(lambda x: x[0]=="final",
final_pool)))
#print("classify_l_id_path << ")
#print("l_surv = ", l_surv)
#print("l_term = ", l_term)
#print("l_final = ", l_final)
#print("---")
return (l_surv, l_term, l_final)
# In[5]:
def h_run_pda(l_id_path, l_term_id_path, l_final_id_path, s_visited_id,
pda, acceptance, STKMAX):
"""Helper for run_pda
---
Input: A list of id_path, all of which are surviving i.e. not
"term" or terminal. This invariant is maintained.
A list of terminal id_path (terminal in that there is
no point pushing on them; stuck or loopy).
A list of final id_path: whenever we meet the
acceptance condition, we record that configuration;
A list of visited id. This will help determine if
terminal or not. Detects looping as well.
A PDA.
Output: (l_term_id_path, l_final_id_path, s_visited_id)
Thus, an external routine can probe and determine
* terminal IDs
* acceptance configurations
* visited IDs
"""
while (l_id_path != []):
id_path0 = l_id_path[0]
(id0,path0) = id_path0 # separate out the id and path
# First, record the current id0 in s_visited_id
s_visited_id = {id0} | s_visited_id
# Then obtain (ID, path) pairs generated by
# taking all possible one-step moves out of id0.
# We also record the extension of path0 in each such
# reached new ID.
nl_id_path0 = step_pda(id0, path0, pda)
if nl_id_path0 == []:
# Nothing gen by firing id0; recurse on rest
l_id_path = l_id_path[1:]
else:
# Classify the progenies of id0 in nl_id_path0
(l_surv,
l_term,
l_final) = classify_l_id_path(nl_id_path0, s_visited_id, pda, acceptance, STKMAX)
l_id_path = l_id_path[1:] + l_surv
l_term_id_path = l_term_id_path + l_term
l_final_id_path = l_final_id_path + l_final
return (l_term_id_path, l_final_id_path, s_visited_id)
# In[6]:
def interpret_w_eps(q_inp_stk, pda):
"""Helper for step_pda
---
Produce the most liberal interpretation of q_inp_stk for pda
i.e. in (q, inp_str, stk_str), we can ignore inp_str or stk_str.
E.g. if inp_str is "ab", we can consider it to be "" or "a".
The rest of the string will then be "ab" or "b" respectively.
This is done if a move in Delta can process that option.
"""
(q, inp_str, stk_str) = q_inp_stk
inp_interps = cvt_str_to_sym(inp_str) # Diverse interpretations of input
stk_interps = cvt_str_to_sym(stk_str) # and stack strings.
# A list of the form [ ((if, ir), (sf, sr)), ... ] pairs where
# ifst is the first of the input and sfst is the first of the stack
# irst is the rest of the input and srst is the rest of the stack
i_s_interps = list(product(inp_interps, stk_interps))
pda_delta = pda["Delta"]
key_list = list(pda_delta.keys())
# Form a dictionary i_s_interp_dict of { i_s_interp : delta-codom-pt-set }
i_s_interp_dict = dict({})
for i_s_interp in i_s_interps:
# Each i_s_interp is ((ifst, irst), (sfst, srst))
(inp_interp, stk_interp) = i_s_interp
(ifst, irst) = inp_interp
(sfst, srst) = stk_interp
# Now form all possible transitions under each interpretation
key_sought = (q, ifst, sfst)
if key_sought in key_list:
# Transition as per that, recording the irst, srst also
i_s_interp_dict.update({i_s_interp : pda_delta[key_sought]})
return i_s_interp_dict
# In[7]:
def step_pda(q_inp_stk, path, pda):
"""Inputs: An ID q_inp_stk = (q, inp_str, stk_str)
A path reaching this ID. path is a list
of Delta's domain triples via which the firings occurred.
A pda (Q, Sigma, Gamma, Delta, q0, z0, F)
Output: Let inp_sym and stk_sym be the symbols in the input/stack.
In case (q,inp_sym, stk_sym) is not in the domain of pda's
TRel, return [], i.e. empty list.
Else return the list [ (q_inp_stk_i, path_i), ... ]
where ID q_inp_stk_i can be reached via path_i,
and path_i is obtained by extending path
with the domain triple that fired.
For instance, if path_list is [p1,p2,p3] and the
transition at domain point (q,c,s) fired from (q,inp,stk),
and the codomain has its third entry as (q3,inp3,stk3),
then q_inp_stk_i will be (q3,inp3,stk3)
and path_i will be [p1,p2,p3, (q,c,s)].
"""
i_s_interp_dict = interpret_w_eps(q_inp_stk, pda)
nxt_id_path_l = []
extpath = path + [ q_inp_stk ]
for i_s_interp_item in i_s_interp_dict.items():
(((ifst, irst), # extract input fst,rst
(sfst, srst)), # and stack fst,rst
codom_set # and codom_set
) = i_s_interp_item
for codom_pt in codom_set:
(nxt_st, str_psh) = codom_pt
nxt_id_path_l += [((nxt_st, irst, str_psh+srst),
extpath)]
return nxt_id_path_l
# In[8]:
def survivor_id(s_visited_id, pda, STKMAX):
"""Helper for classify_l_id_path
---
Classify s_visited_id using is_surv_id to tag
its entries 'surv' or 'not_surv'.
"""
return (lambda id_path:
(("surv", id_path)
if is_surv_id(id_path, s_visited_id, pda, STKMAX=STKMAX)
else ("not_surv", id_path)))
def term_id(s_visited_id, pda, STKMAX):
"""Helper for classify_l_id_path
---
Classify s_visited_id using is_term_id to tag
its entries 'term' or 'not_term'.
"""
return (lambda id_path:
(("term", id_path)
if is_term_id(id_path, s_visited_id, pda, STKMAX=STKMAX)
else ("not_term", id_path)))
def final_id(pda, acceptance):
"""Helper for classify_l_id_path
---
Classify s_visited_id using is_final_id to tag
its entries 'final' or 'not_final'.
"""
return (lambda id_path:
(("final", id_path)
if is_final_id(id_path, pda, acceptance)
else ("not_final", id_path)))
# In[9]:
def cvt_str_to_sym(str):
"""Helper for interpret_w_eps
---
Given a string, interpret it in all possible ways and return a set of pairs
of (first, rest). E.g. "ab" interpreted as ("", "ab") as well as ("a", "b").
However, "" interpreted only as ("", "").
"""
if str == "":
return [("", "")]
else:
return [("", str), (str[0], str[1:])]
# In[10]:
def is_surv_id(id_path, s_visited_id, pda, STKMAX):
"""Helper for survivor_id
---
If there is any move out of the id of id_path,
and the id is not subsumed by s_visited_id,
then it is "surv"; else not.
"""
#print("--is_surv_id--")
(id, path) = id_path
S = subsumed(id, s_visited_id, STKMAX=STKMAX)
#print("not S = ", (not S))
return (not S)
from functools import reduce
def subsumed(id, s_visited_id, STKMAX):
"""Helper for is_term_id and is_surv_id
---
If id is (q,in_str,stk_str)
and exists a member (q1,in_str1,stk_str1) in s_visited_id
then subsumed is True if q==q1, in_str==in_str1
and stk_str1 starts with stk_str.
This "starts with" test models stk_str being on top of the stack.
"""
#print(" ~~~")
#print(" subsumed >>>")
#print("id",id)
#print("s_visited_id",s_visited_id)
(q, inp_str, stk_str) = id
for (q1, inp_str1, stk_str1) in s_visited_id:
if ((q==q1) and
(inp_str == inp_str1) and
(stk_str.startswith(stk_str1)
or
(len(stk_str) - len(stk_str1)) >= STKMAX)):
return True
return False
def is_term_id(id_path, s_visited_id, pda, STKMAX):
"""Helper for term_id
---
If the id of id_path is subsumed by s_visited_id,
then it is "term"; else not.
"""
#print("--is_term_id--")
(id, path) = id_path
#print("id = ", id)
#print("s_visited_id = ", s_visited_id)
S = subsumed(id, s_visited_id, STKMAX=STKMAX)
#print("subsumed(..) = ", S)
return S
def is_final_id(id_path, pda, acceptance):
"""Helper for final_id
---
If the id of id_path meets the acceptance criterion
that is passed in, then it is "final"; else not.
"""
(id, path) = id_path
(q, inp_str, stk_str) = id
if (acceptance == "ACCEPT_F"):
return (inp_str=="" and q in pda["F"])
else:
assert(acceptance == "ACCEPT_S")
return (inp_str=="" and stk_str=="")
# Now for the functions in this file
# In[11]:
print('''You may use any of these help commands:
help(explore_pda)
help(run_pda)
help(classify_l_id_path)
help(h_run_pda)
help(interpret_w_eps)
help(step_pda)
help(suvivor_id)
help(term_id)
help(final_id)
help(cvt_str_to_sym)
help(is_surv_id)
help(subsumed)
help(is_term_id)
help(is_final_id)
''')
|
py | b412d667a3b3debafb31483ab423150cd7a0cbbd | from .protocol import Response
from .sessions import Session
from .typing import Any, List, Callable
import mdpopups
import sublime
class MessageRequestHandler():
def __init__(self, view: sublime.View, session: Session, request_id: Any, params: dict, source: str) -> None:
self.session = session
self.request_id = request_id
self.request_sent = False
self.view = view
self.actions = params.get("actions", [])
self.titles = list(action.get("title") for action in self.actions)
self.message = params.get('message', '')
self.message_type = params.get('type', 4)
self.source = source
def _send_user_choice(self, href: int = -1) -> None:
if not self.request_sent:
self.request_sent = True
self.view.hide_popup()
# when noop; nothing was selected e.g. the user pressed escape
param = None
index = int(href)
if index != -1:
param = self.actions[index]
response = Response(self.request_id, param)
self.session.send_response(response)
def show(self) -> None:
show_notification(
self.view,
self.source,
self.message_type,
self.message,
self.titles,
self._send_user_choice,
self._send_user_choice
)
def message_content(source: str, message_type: int, message: str, titles: List[str]) -> str:
formatted = []
icons = {
1: '❗',
2: '⚠️',
3: 'ℹ️',
4: '📝'
}
icon = icons.get(message_type, '')
formatted.append("<h2>{}</h2>".format(source))
formatted.append("<p class='message'>{} {}</p>".format(icon, message))
buttons = []
for idx, title in enumerate(titles):
buttons.append("<a href='{}'>{}</a>".format(idx, title))
formatted.append("<p class='actions'>" + " ".join(buttons) + "</p>")
return "".join(formatted)
def show_notification(view: sublime.View, source: str, message_type: int, message: str, titles: List[str],
on_navigate: Callable, on_hide: Callable) -> None:
stylesheet = sublime.load_resource("Packages/LSP/notification.css")
contents = message_content(source, message_type, message, titles)
mdpopups.show_popup(
view,
contents,
css=stylesheet,
md=False,
location=-1,
wrapper_class='notification',
max_width=800,
max_height=800,
on_navigate=on_navigate,
on_hide=on_hide
)
|
py | b412d68acc41d3a8abfb0e786f76055b2a896ae6 | # BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
from __future__ import absolute_import
import re
import sys
import os
import warnings
try:
from collections.abc import Mapping
from collections.abc import MutableMapping
except ImportError:
from collections import Mapping
from collections import MutableMapping
import awkward as ak
np = ak.nplike.NumpyMetadata.instance()
py27 = sys.version_info[0] < 3
py35 = sys.version_info[0] == 3 and sys.version_info[1] <= 5
win = os.name == "nt"
# to silence flake8 F821 errors
if py27:
unicode = eval("unicode")
else:
unicode = None
def exception_suffix(filename):
line = ""
if hasattr(sys, "_getframe"):
line = "#L" + str(sys._getframe(1).f_lineno)
filename = filename.replace("\\", "/")
filename = "/src/awkward/" + filename.split("awkward/")[1]
return (
"\n\n(https://github.com/scikit-hep/awkward-1.0/blob/"
+ ak.__version__
+ filename
+ line
+ ")"
)
def deprecate(exception, version, date=None):
if ak.deprecations_as_errors:
raise exception
else:
if date is None:
date = ""
else:
date = " (target date: " + date + ")"
message = """In version {0}{1}, this will be an error.
(Set ak.deprecations_as_errors = True to get a stack trace now.)
{2}: {3}""".format(
version, date, type(exception).__name__, str(exception)
)
warnings.warn(message, DeprecationWarning)
virtualtypes = (ak.layout.VirtualArray,)
unknowntypes = (ak.layout.EmptyArray,)
indexedtypes = (
ak.layout.IndexedArray32,
ak.layout.IndexedArrayU32,
ak.layout.IndexedArray64,
)
uniontypes = (
ak.layout.UnionArray8_32,
ak.layout.UnionArray8_U32,
ak.layout.UnionArray8_64,
)
indexedoptiontypes = (
ak.layout.IndexedOptionArray32,
ak.layout.IndexedOptionArray64,
)
optiontypes = (
ak.layout.IndexedOptionArray32,
ak.layout.IndexedOptionArray64,
ak.layout.ByteMaskedArray,
ak.layout.BitMaskedArray,
ak.layout.UnmaskedArray,
)
listtypes = (
ak.layout.RegularArray,
ak.layout.ListArray32,
ak.layout.ListArrayU32,
ak.layout.ListArray64,
ak.layout.ListOffsetArray32,
ak.layout.ListOffsetArrayU32,
ak.layout.ListOffsetArray64,
)
recordtypes = (ak.layout.RecordArray,)
class Behavior(Mapping):
def __init__(self, defaults, overrides):
self.defaults = defaults
if overrides is None:
self.overrides = {}
else:
self.overrides = overrides
def __getitem__(self, where):
try:
return self.overrides[where]
except KeyError:
try:
return self.defaults[where]
except KeyError:
return None
def items(self):
for n, x in self.overrides.items():
yield n, x
for n, x in self.defaults.items():
if n not in self.overrides:
yield n, x
def __iter__(self):
for n, x in self.items():
yield n
def __len__(self):
return len(set(self.defaults) | set(self.overrides))
def arrayclass(layout, behavior):
layout = ak.partition.first(layout)
behavior = Behavior(ak.behavior, behavior)
arr = layout.parameter("__array__")
if isinstance(arr, str) or (py27 and isinstance(arr, unicode)):
cls = behavior[arr]
if isinstance(cls, type) and issubclass(cls, ak.highlevel.Array):
return cls
rec = layout.parameter("__record__")
if isinstance(rec, str) or (py27 and isinstance(rec, unicode)):
cls = behavior[".", rec]
if isinstance(cls, type) and issubclass(cls, ak.highlevel.Array):
return cls
deeprec = layout.purelist_parameter("__record__")
if isinstance(deeprec, str) or (py27 and isinstance(deeprec, unicode)):
cls = behavior["*", deeprec]
if isinstance(cls, type) and issubclass(cls, ak.highlevel.Array):
return cls
return ak.highlevel.Array
def custom_broadcast(layout, behavior):
layout = ak.partition.first(layout)
behavior = Behavior(ak.behavior, behavior)
custom = layout.parameter("__array__")
if not (isinstance(custom, str) or (py27 and isinstance(custom, unicode))):
custom = layout.parameter("__record__")
if not (isinstance(custom, str) or (py27 and isinstance(custom, unicode))):
custom = layout.purelist_parameter("__record__")
if isinstance(custom, str) or (py27 and isinstance(custom, unicode)):
for key, fcn in behavior.items():
if (
isinstance(key, tuple)
and len(key) == 2
and key[0] == "__broadcast__"
and key[1] == custom
):
return fcn
return None
def numba_array_typer(layouttype, behavior):
behavior = Behavior(ak.behavior, behavior)
arr = layouttype.parameters.get("__array__")
if isinstance(arr, str) or (py27 and isinstance(arr, unicode)):
typer = behavior["__numba_typer__", arr]
if callable(typer):
return typer
rec = layouttype.parameters.get("__record__")
if isinstance(rec, str) or (py27 and isinstance(rec, unicode)):
typer = behavior["__numba_typer__", ".", rec]
if callable(typer):
return typer
deeprec = layouttype.parameters.get("__record__")
if isinstance(deeprec, str) or (py27 and isinstance(deeprec, unicode)):
typer = behavior["__numba_typer__", "*", deeprec]
if callable(typer):
return typer
return None
def numba_array_lower(layouttype, behavior):
behavior = Behavior(ak.behavior, behavior)
arr = layouttype.parameters.get("__array__")
if isinstance(arr, str) or (py27 and isinstance(arr, unicode)):
lower = behavior["__numba_lower__", arr]
if callable(lower):
return lower
rec = layouttype.parameters.get("__record__")
if isinstance(rec, str) or (py27 and isinstance(rec, unicode)):
lower = behavior["__numba_lower__", ".", rec]
if callable(lower):
return lower
deeprec = layouttype.parameters.get("__record__")
if isinstance(deeprec, str) or (py27 and isinstance(deeprec, unicode)):
lower = behavior["__numba_lower__", "*", deeprec]
if callable(lower):
return lower
return None
def recordclass(layout, behavior):
layout = ak.partition.first(layout)
behavior = Behavior(ak.behavior, behavior)
rec = layout.parameter("__record__")
if isinstance(rec, str) or (py27 and isinstance(rec, unicode)):
cls = behavior[rec]
if isinstance(cls, type) and issubclass(cls, ak.highlevel.Record):
return cls
return ak.highlevel.Record
def typestrs(behavior):
behavior = Behavior(ak.behavior, behavior)
out = {}
for key, typestr in behavior.items():
if (
isinstance(key, tuple)
and len(key) == 2
and key[0] == "__typestr__"
and (isinstance(key[1], str) or (py27 and isinstance(key[1], unicode)))
and (isinstance(typestr, str) or (py27 and isinstance(typestr, unicode)))
):
out[key[1]] = typestr
return out
def numba_record_typer(layouttype, behavior):
behavior = Behavior(ak.behavior, behavior)
rec = layouttype.parameters.get("__record__")
if isinstance(rec, str) or (py27 and isinstance(rec, unicode)):
typer = behavior["__numba_typer__", rec]
if callable(typer):
return typer
return None
def numba_record_lower(layouttype, behavior):
behavior = Behavior(ak.behavior, behavior)
rec = layouttype.parameters.get("__record__")
if isinstance(rec, str) or (py27 and isinstance(rec, unicode)):
lower = behavior["__numba_lower__", rec]
if callable(lower):
return lower
return None
def overload(behavior, signature):
if not any(s is None for s in signature):
behavior = Behavior(ak.behavior, behavior)
for key, custom in behavior.items():
if (
isinstance(key, tuple)
and len(key) == len(signature)
and key[0] == signature[0]
and all(
k == s
or (
isinstance(k, type) and isinstance(s, type) and issubclass(s, k)
)
for k, s in zip(key[1:], signature[1:])
)
):
return custom
def numba_attrs(layouttype, behavior):
behavior = Behavior(ak.behavior, behavior)
rec = layouttype.parameters.get("__record__")
if isinstance(rec, str) or (py27 and isinstance(rec, unicode)):
for key, typer in behavior.items():
if (
isinstance(key, tuple)
and len(key) == 3
and key[0] == "__numba_typer__"
and key[1] == rec
):
lower = behavior["__numba_lower__", key[1], key[2]]
yield key[2], typer, lower
def numba_methods(layouttype, behavior):
behavior = Behavior(ak.behavior, behavior)
rec = layouttype.parameters.get("__record__")
if isinstance(rec, str) or (py27 and isinstance(rec, unicode)):
for key, typer in behavior.items():
if (
isinstance(key, tuple)
and len(key) == 4
and key[0] == "__numba_typer__"
and key[1] == rec
and key[3] == ()
):
lower = behavior["__numba_lower__", key[1], key[2], ()]
yield key[2], typer, lower
def numba_unaryops(unaryop, left, behavior):
behavior = Behavior(ak.behavior, behavior)
done = False
if isinstance(left, ak._connect._numba.layout.ContentType):
left = left.parameters.get("__record__")
if not (isinstance(left, str) or (py27 and isinstance(left, unicode))):
done = True
if not done:
for key, typer in behavior.items():
if (
isinstance(key, tuple)
and len(key) == 3
and key[0] == "__numba_typer__"
and key[1] == unaryop
and key[2] == left
):
lower = behavior["__numba_lower__", key[1], key[2]]
yield typer, lower
def numba_binops(binop, left, right, behavior):
behavior = Behavior(ak.behavior, behavior)
done = False
if isinstance(left, ak._connect._numba.layout.ContentType):
left = left.parameters.get("__record__")
if not (isinstance(left, str) or (py27 and isinstance(left, unicode))):
done = True
if isinstance(right, ak._connect._numba.layout.ContentType):
right = right.parameters.get("__record__")
if not isinstance(right, str) and not (py27 and isinstance(right, unicode)):
done = True
if not done:
for key, typer in behavior.items():
if (
isinstance(key, tuple)
and len(key) == 4
and key[0] == "__numba_typer__"
and key[1] == left
and key[2] == binop
and key[3] == right
):
lower = behavior["__numba_lower__", key[1], key[2], key[3]]
yield typer, lower
def behaviorof(*arrays):
behavior = None
for x in arrays[::-1]:
if (
isinstance(
x,
(ak.highlevel.Array, ak.highlevel.Record, ak.highlevel.ArrayBuilder,),
)
and x.behavior is not None
):
if behavior is None:
behavior = dict(x.behavior)
else:
behavior.update(x.behavior)
return behavior
def wrap(content, behavior):
if isinstance(content, (ak.layout.Content, ak.partition.PartitionedArray)):
return ak.highlevel.Array(content, behavior=behavior, kernels=None)
elif isinstance(content, ak.layout.Record):
return ak.highlevel.Record(content, behavior=behavior, kernels=None)
else:
return content
def extra(args, kwargs, defaults):
out = []
for i in range(len(defaults)):
name, default = defaults[i]
if i < len(args):
out.append(args[i])
elif name in kwargs:
out.append(kwargs[name])
else:
out.append(default)
return out
def key2index(keys, key):
if keys is None:
attempt = None
else:
try:
attempt = keys.index(key)
except ValueError:
attempt = None
if attempt is None:
m = key2index._pattern.match(key)
if m is not None:
attempt = m.group(0)
if attempt is None:
raise ValueError(
"key {0} not found in record".format(repr(key)) + exception_suffix(__file__)
)
else:
return attempt
key2index._pattern = re.compile(r"^[1-9][0-9]*$")
def completely_flatten(array):
if isinstance(array, ak.partition.PartitionedArray):
out = []
for partition in array.partitions:
for outi in completely_flatten(partition):
out.append(outi)
return tuple(out)
elif isinstance(array, virtualtypes):
return completely_flatten(array.array)
elif isinstance(array, unknowntypes):
return (ak.nplike.of(array).array([], dtype=np.bool_),)
elif isinstance(array, indexedtypes):
return completely_flatten(array.project())
elif isinstance(array, uniontypes):
out = []
for i in range(array.numcontents):
out.append(completely_flatten(array.project(i)))
return tuple(out)
elif isinstance(array, optiontypes):
return completely_flatten(array.project())
elif isinstance(array, listtypes):
return completely_flatten(array.flatten(axis=1))
elif isinstance(array, recordtypes):
out = []
for i in range(array.numfields):
out.extend(completely_flatten(array.field(i)))
return tuple(out)
elif isinstance(array, ak.layout.NumpyArray):
return (ak.nplike.of(array).asarray(array),)
else:
raise RuntimeError(
"cannot completely flatten: {0}".format(type(array))
+ exception_suffix(__file__)
)
def broadcast_and_apply( # noqa: C901
inputs,
getfunction,
behavior,
allow_records=True,
pass_depth=True,
pass_user=False,
user=None,
left_broadcast=True,
right_broadcast=True,
numpy_to_regular=False,
):
def checklength(inputs):
length = len(inputs[0])
for x in inputs[1:]:
if len(x) != length:
raise ValueError(
"cannot broadcast {0} of length {1} with {2} of "
"length {3}".format(
type(inputs[0]).__name__, length, type(x).__name__, len(x)
)
+ exception_suffix(__file__)
)
def all_same_offsets(nplike, inputs):
offsets = None
for x in inputs:
if isinstance(
x,
(
ak.layout.ListOffsetArray32,
ak.layout.ListOffsetArrayU32,
ak.layout.ListOffsetArray64,
),
):
if offsets is None:
offsets = nplike.asarray(x.offsets)
elif not nplike.array_equal(offsets, nplike.asarray(x.offsets)):
return False
elif isinstance(
x,
(ak.layout.ListArray32, ak.layout.ListArrayU32, ak.layout.ListArray64,),
):
starts = nplike.asarray(x.starts)
stops = nplike.asarray(x.stops)
if not nplike.array_equal(starts[1:], stops[:-1]):
return False
if offsets is None:
offsets = nplike.empty(len(starts) + 1, dtype=starts.dtype)
if len(offsets) == 1:
offsets[0] = 0
else:
offsets[:-1] = starts
offsets[-1] = stops[-1]
elif not nplike.array_equal(offsets[:-1], starts) or (
len(stops) != 0 and offsets[-1] != stops[-1]
):
return False
elif isinstance(x, ak.layout.RegularArray):
my_offsets = nplike.arange(0, len(x.content), x.size)
if offsets is None:
offsets = my_offsets
elif not nplike.array_equal(offsets, my_offsets):
return False
elif isinstance(x, ak.layout.Content):
return False
else:
return True
def apply(inputs, depth, user):
nplike = ak.nplike.of(*inputs)
if numpy_to_regular:
inputs = [
x.toRegularArray() if isinstance(x, ak.layout.NumpyArray) else x
for x in inputs
]
# handle implicit right-broadcasting (i.e. NumPy-like)
if right_broadcast and any(isinstance(x, listtypes) for x in inputs):
maxdepth = max(
x.purelist_depth for x in inputs if isinstance(x, ak.layout.Content)
)
if maxdepth > 0 and all(
x.purelist_isregular for x in inputs if isinstance(x, ak.layout.Content)
):
nextinputs = []
for obj in inputs:
if isinstance(obj, ak.layout.Content):
while obj.purelist_depth < maxdepth:
obj = ak.layout.RegularArray(obj, 1)
nextinputs.append(obj)
if any(x is not y for x, y in zip(inputs, nextinputs)):
return apply(nextinputs, depth, user)
# now all lengths must agree
checklength([x for x in inputs if isinstance(x, ak.layout.Content)])
args = ()
if pass_depth:
args = args + (depth,)
if pass_user:
args = args + (user,)
custom = getfunction(inputs, *args)
if callable(custom):
return custom()
else:
user = custom
# the rest of this is one switch statement
if any(isinstance(x, virtualtypes) for x in inputs):
nextinputs = []
for x in inputs:
if isinstance(x, virtualtypes):
nextinputs.append(x.array)
else:
nextinputs.append(x)
return apply(nextinputs, depth, user)
elif any(isinstance(x, unknowntypes) for x in inputs):
nextinputs = []
for x in inputs:
if isinstance(x, unknowntypes):
nextinputs.append(
ak.layout.NumpyArray(nplike.array([], dtype=np.bool_))
)
else:
nextinputs.append(x)
return apply(nextinputs, depth, user)
elif any(isinstance(x, ak.layout.NumpyArray) and x.ndim > 1 for x in inputs):
nextinputs = []
for x in inputs:
if isinstance(x, ak.layout.NumpyArray) and x.ndim > 1:
nextinputs.append(x.toRegularArray())
else:
nextinputs.append(x)
return apply(nextinputs, depth, user)
elif any(isinstance(x, indexedtypes) for x in inputs):
nextinputs = []
for x in inputs:
if isinstance(x, indexedtypes):
nextinputs.append(x.project())
else:
nextinputs.append(x)
return apply(nextinputs, depth, user)
elif any(isinstance(x, uniontypes) for x in inputs):
tagslist = []
length = None
for x in inputs:
if isinstance(x, uniontypes):
tagslist.append(nplike.asarray(x.tags))
if length is None:
length = len(tagslist[-1])
elif length != len(tagslist[-1]):
raise ValueError(
"cannot broadcast UnionArray of length {0} "
"with UnionArray of length {1}".format(
length, len(tagslist[-1])
)
+ exception_suffix(__file__)
)
combos = nplike.stack(tagslist, axis=-1)
combos = combos.view(
[(str(i), combos.dtype) for i in range(len(tagslist))]
).reshape(length)
tags = nplike.empty(length, dtype=np.int8)
index = nplike.empty(length, dtype=np.int64)
outcontents = []
for tag, combo in enumerate(nplike.unique(combos)):
mask = combos == combo
tags[mask] = tag
index[mask] = nplike.arange(nplike.count_nonzero(mask))
nextinputs = []
numoutputs = None
i = 0
for x in inputs:
if isinstance(x, uniontypes):
nextinputs.append(x[mask].project(combo[str(i)]))
i += 1
elif isinstance(x, ak.layout.Content):
nextinputs.append(x[mask])
else:
nextinputs.append(x)
outcontents.append(apply(nextinputs, depth, user))
assert isinstance(outcontents[-1], tuple)
if numoutputs is not None:
assert numoutputs == len(outcontents[-1])
numoutputs = len(outcontents[-1])
tags = ak.layout.Index8(tags)
index = ak.layout.Index64(index)
return tuple(
ak.layout.UnionArray8_64(
tags, index, [x[i] for x in outcontents]
).simplify()
for i in range(numoutputs)
)
elif any(isinstance(x, optiontypes) for x in inputs):
mask = None
for x in inputs:
if isinstance(x, optiontypes):
m = nplike.asarray(x.bytemask()).view(np.bool_)
if mask is None:
mask = m
else:
nplike.bitwise_or(mask, m, out=mask)
nextmask = ak.layout.Index8(mask.view(np.int8))
index = nplike.full(len(mask), -1, dtype=np.int64)
index[~mask] = nplike.arange(
len(mask) - nplike.count_nonzero(mask), dtype=np.int64
)
index = ak.layout.Index64(index)
if any(not isinstance(x, optiontypes) for x in inputs):
nextindex = nplike.arange(len(mask), dtype=np.int64)
nextindex[mask] = -1
nextindex = ak.layout.Index64(nextindex)
nextinputs = []
for x in inputs:
if isinstance(x, optiontypes):
nextinputs.append(x.project(nextmask))
elif isinstance(x, ak.layout.Content):
nextinputs.append(
ak.layout.IndexedOptionArray64(nextindex, x).project(nextmask)
)
else:
nextinputs.append(x)
outcontent = apply(nextinputs, depth, user)
assert isinstance(outcontent, tuple)
return tuple(
ak.layout.IndexedOptionArray64(index, x).simplify() for x in outcontent
)
elif any(isinstance(x, listtypes) for x in inputs):
if all(
isinstance(x, ak.layout.RegularArray) or not isinstance(x, listtypes)
for x in inputs
):
maxsize = max(
[x.size for x in inputs if isinstance(x, ak.layout.RegularArray)]
)
for x in inputs:
if isinstance(x, ak.layout.RegularArray):
if maxsize > 1 and x.size == 1:
tmpindex = ak.layout.Index64(
nplike.repeat(
nplike.arange(len(x), dtype=np.int64), maxsize
)
)
nextinputs = []
for x in inputs:
if isinstance(x, ak.layout.RegularArray):
if maxsize > 1 and x.size == 1:
nextinputs.append(
ak.layout.IndexedArray64(
tmpindex, x.content[: len(x) * x.size]
).project()
)
elif x.size == maxsize:
nextinputs.append(x.content[: len(x) * x.size])
else:
raise ValueError(
"cannot broadcast RegularArray of size "
"{0} with RegularArray of size {1}".format(
x.size, maxsize
)
+ exception_suffix(__file__)
)
else:
nextinputs.append(x)
outcontent = apply(nextinputs, depth + 1, user)
assert isinstance(outcontent, tuple)
return tuple(ak.layout.RegularArray(x, maxsize) for x in outcontent)
elif not all_same_offsets(nplike, inputs):
fcns = [
custom_broadcast(x, behavior)
if isinstance(x, ak.layout.Content)
else None
for x in inputs
]
first, secondround = None, False
for x, fcn in zip(inputs, fcns):
if (
isinstance(x, listtypes)
and not isinstance(x, ak.layout.RegularArray)
and fcn is None
):
first = x
break
if first is None:
secondround = True
for x in inputs:
if isinstance(x, listtypes) and not isinstance(
x, ak.layout.RegularArray
):
first = x
break
offsets = first.compact_offsets64(True)
nextinputs = []
for x, fcn in zip(inputs, fcns):
if callable(fcn) and not secondround:
nextinputs.append(fcn(x, offsets))
elif isinstance(x, listtypes):
nextinputs.append(x.broadcast_tooffsets64(offsets).content)
# handle implicit left-broadcasting (unlike NumPy)
elif left_broadcast and isinstance(x, ak.layout.Content):
nextinputs.append(
ak.layout.RegularArray(x, 1)
.broadcast_tooffsets64(offsets)
.content
)
else:
nextinputs.append(x)
outcontent = apply(nextinputs, depth + 1, user)
assert isinstance(outcontent, tuple)
return tuple(
ak.layout.ListOffsetArray64(offsets, x) for x in outcontent
)
else:
lencontent, offsets, starts, stops = None, None, None, None
nextinputs = []
for x in inputs:
if isinstance(
x,
(
ak.layout.ListOffsetArray32,
ak.layout.ListOffsetArrayU32,
ak.layout.ListOffsetArray64,
),
):
offsets = x.offsets
lencontent = offsets[-1]
nextinputs.append(x.content[:lencontent])
elif isinstance(
x,
(
ak.layout.ListArray32,
ak.layout.ListArrayU32,
ak.layout.ListArray64,
),
):
starts, stops = x.starts, x.stops
if len(starts) == 0 or len(stops) == 0:
nextinputs.append(x.content[:0])
else:
lencontent = nplike.max(stops)
nextinputs.append(x.content[:lencontent])
else:
nextinputs.append(x)
outcontent = apply(nextinputs, depth + 1, user)
if isinstance(offsets, ak.layout.Index32):
return tuple(
ak.layout.ListOffsetArray32(offsets, x) for x in outcontent
)
elif isinstance(offsets, ak.layout.IndexU32):
return tuple(
ak.layout.ListOffsetArrayU32(offsets, x) for x in outcontent
)
elif isinstance(offsets, ak.layout.Index64):
return tuple(
ak.layout.ListOffsetArray64(offsets, x) for x in outcontent
)
elif isinstance(starts, ak.layout.Index32):
return tuple(
ak.layout.ListArray32(starts, stops, x) for x in outcontent
)
elif isinstance(starts, ak.layout.IndexU32):
return tuple(
ak.layout.ListArrayU32(starts, stops, x) for x in outcontent
)
elif isinstance(starts, ak.layout.Index64):
return tuple(
ak.layout.ListArray64(starts, stops, x) for x in outcontent
)
else:
raise AssertionError(
"unexpected offsets, starts: {0} {1}".format(
type(offsets), type(starts)
)
+ exception_suffix(__file__)
)
elif any(isinstance(x, recordtypes) for x in inputs):
if not allow_records:
raise ValueError(
"cannot broadcast: {0}".format(
", ".join(repr(type(x)) for x in inputs)
)
+ exception_suffix(__file__)
)
keys = None
length = None
istuple = True
for x in inputs:
if isinstance(x, recordtypes):
if keys is None:
keys = x.keys()
elif set(keys) != set(x.keys()):
raise ValueError(
"cannot broadcast records because keys don't "
"match:\n {0}\n {1}".format(
", ".join(sorted(keys)), ", ".join(sorted(x.keys()))
)
+ exception_suffix(__file__)
)
if length is None:
length = len(x)
elif length != len(x):
raise ValueError(
"cannot broadcast RecordArray of length {0} "
"with RecordArray of length {1}".format(length, len(x))
+ exception_suffix(__file__)
)
if not x.istuple:
istuple = False
outcontents = []
numoutputs = None
for key in keys:
outcontents.append(
apply(
[
x if not isinstance(x, recordtypes) else x[key]
for x in inputs
],
depth,
user,
)
)
assert isinstance(outcontents[-1], tuple)
if numoutputs is not None:
assert numoutputs == len(outcontents[-1])
numoutputs = len(outcontents[-1])
return tuple(
ak.layout.RecordArray(
[x[i] for x in outcontents], None if istuple else keys, length
)
for i in range(numoutputs)
)
else:
raise ValueError(
"cannot broadcast: {0}".format(", ".join(repr(type(x)) for x in inputs))
+ exception_suffix(__file__)
)
if any(isinstance(x, ak.partition.PartitionedArray) for x in inputs):
purelist_isregular = True
purelist_depths = set()
for x in inputs:
if isinstance(x, (ak.layout.Content, ak.partition.PartitionedArray)):
if not x.purelist_isregular:
purelist_isregular = False
break
purelist_depths.add(x.purelist_depth)
if purelist_isregular and len(purelist_depths) > 1:
nextinputs = []
for x in inputs:
if isinstance(x, ak.partition.PartitionedArray):
nextinputs.append(x.toContent())
else:
nextinputs.append(x)
isscalar = []
out = apply(broadcast_pack(nextinputs, isscalar), 0, None)
assert isinstance(out, tuple)
return tuple(broadcast_unpack(x, isscalar) for x in out)
else:
sample = None
for x in inputs:
if isinstance(x, ak.partition.PartitionedArray):
sample = x
break
nextinputs = ak.partition.partition_as(sample, inputs)
outputs = []
for part_inputs in ak.partition.iterate(sample.numpartitions, nextinputs):
isscalar = []
part = apply(broadcast_pack(part_inputs, isscalar), 0, None)
assert isinstance(part, tuple)
outputs.append(tuple(broadcast_unpack(x, isscalar) for x in part))
out = ()
for i in range(len(part)):
out = out + (
ak.partition.IrregularlyPartitionedArray([x[i] for x in outputs]),
)
return out
else:
isscalar = []
out = apply(broadcast_pack(inputs, isscalar), 0, user)
assert isinstance(out, tuple)
return tuple(broadcast_unpack(x, isscalar) for x in out)
def broadcast_pack(inputs, isscalar):
maxlen = -1
for x in inputs:
if isinstance(x, ak.layout.Content):
maxlen = max(maxlen, len(x))
if maxlen < 0:
maxlen = 1
nextinputs = []
for x in inputs:
if isinstance(x, ak.layout.Record):
index = ak.nplike.of(*inputs).full(maxlen, x.at, dtype=np.int64)
nextinputs.append(ak.layout.RegularArray(x.array[index], maxlen))
isscalar.append(True)
elif isinstance(x, ak.layout.Content):
nextinputs.append(ak.layout.RegularArray(x, len(x)))
isscalar.append(False)
else:
nextinputs.append(x)
isscalar.append(True)
return nextinputs
def broadcast_unpack(x, isscalar):
if all(isscalar):
if len(x) == 0:
return x.getitem_nothing().getitem_nothing()
else:
return x[0][0]
else:
if len(x) == 0:
return x.getitem_nothing()
else:
return x[0]
def recursively_apply(
layout,
getfunction,
pass_depth=True,
pass_user=False,
user=None,
keep_parameters=True,
numpy_to_regular=False,
):
def apply(layout, depth, user):
if numpy_to_regular and isinstance(layout, ak.layout.NumpyArray):
layout = layout.toRegularArray()
args = ()
if pass_depth:
args = args + (depth,)
if pass_user:
args = args + (user,)
custom = getfunction(layout, *args)
if callable(custom):
return custom()
else:
user = custom
# the rest of this is one switch statement
if isinstance(layout, ak.partition.PartitionedArray):
return ak.partition.IrregularlyPartitionedArray(
[apply(x, depth, user) for x in layout.partitions]
)
elif isinstance(layout, ak.layout.NumpyArray):
if keep_parameters:
return layout
else:
return ak.layout.NumpyArray(
ak.nplike.of(layout).asarray(layout), layout.identities, None
)
elif isinstance(layout, ak.layout.EmptyArray):
if keep_parameters:
return layout
else:
return ak.layout.EmptyArray(layout.identities, None)
elif isinstance(layout, ak.layout.RegularArray):
return ak.layout.RegularArray(
apply(layout.content, depth + 1, user),
layout.size,
layout.identities,
layout.parameters if keep_parameters else None,
)
elif isinstance(layout, ak.layout.ListArray32):
return ak.layout.ListArray32(
layout.starts,
layout.stops,
apply(layout.content, depth + 1, user),
layout.identities,
layout.parameters if keep_parameters else None,
)
elif isinstance(layout, ak.layout.ListArrayU32):
return ak.layout.ListArrayU32(
layout.starts,
layout.stops,
apply(layout.content, depth + 1, user),
layout.identities,
layout.parameters if keep_parameters else None,
)
elif isinstance(layout, ak.layout.ListArray64):
return ak.layout.ListArray64(
layout.starts,
layout.stops,
apply(layout.content, depth + 1, user),
layout.identities,
layout.parameters if keep_parameters else None,
)
elif isinstance(layout, ak.layout.ListOffsetArray32):
return ak.layout.ListOffsetArray32(
layout.offsets,
apply(layout.content, depth + 1, user),
layout.identities,
layout.parameters if keep_parameters else None,
)
elif isinstance(layout, ak.layout.ListOffsetArrayU32):
return ak.layout.ListOffsetArrayU32(
layout.offsets,
apply(layout.content, depth + 1, user),
layout.identities,
layout.parameters if keep_parameters else None,
)
elif isinstance(layout, ak.layout.ListOffsetArray64):
return ak.layout.ListOffsetArray64(
layout.offsets,
apply(layout.content, depth + 1, user),
layout.identities,
layout.parameters if keep_parameters else None,
)
elif isinstance(layout, ak.layout.IndexedArray32):
return ak.layout.IndexedArray32(
layout.index,
apply(layout.content, depth, user),
layout.identities,
layout.parameters if keep_parameters else None,
)
elif isinstance(layout, ak.layout.IndexedArrayU32):
return ak.layout.IndexedArrayU32(
layout.index,
apply(layout.content, depth, user),
layout.identities,
layout.parameters if keep_parameters else None,
)
elif isinstance(layout, ak.layout.IndexedArray64):
return ak.layout.IndexedArray64(
layout.index,
apply(layout.content, depth, user),
layout.identities,
layout.parameters if keep_parameters else None,
)
elif isinstance(layout, ak.layout.IndexedOptionArray32):
return ak.layout.IndexedOptionArray32(
layout.index,
apply(layout.content, depth, user),
layout.identities,
layout.parameters if keep_parameters else None,
)
elif isinstance(layout, ak.layout.IndexedOptionArray64):
return ak.layout.IndexedOptionArray64(
layout.index,
apply(layout.content, depth, user),
layout.identities,
layout.parameters if keep_parameters else None,
)
elif isinstance(layout, ak.layout.ByteMaskedArray):
return ak.layout.ByteMaskedArray(
layout.mask,
apply(layout.content, depth, user),
layout.valid_when,
layout.identities,
layout.parameters if keep_parameters else None,
)
elif isinstance(layout, ak.layout.BitMaskedArray):
return ak.layout.BitMaskedArray(
layout.mask,
apply(layout.content, depth, user),
layout.valid_when,
len(layout),
layout.lsb_order,
layout.identities,
layout.parameters if keep_parameters else None,
)
elif isinstance(layout, ak.layout.UnmaskedArray):
return ak.layout.UnmaskedArray(
apply(layout.content, depth, user),
layout.identities,
layout.parameters if keep_parameters else None,
)
elif isinstance(layout, ak.layout.RecordArray):
return ak.layout.RecordArray(
[apply(x, depth, user) for x in layout.contents],
layout.recordlookup,
len(layout),
layout.identities,
layout.parameters if keep_parameters else None,
)
elif isinstance(layout, ak.layout.Record):
return ak.layout.Record(apply(layout.array, depth, user), layout.at,)
elif isinstance(layout, ak.layout.UnionArray8_32):
return ak.layout.UnionArray8_32(
layout.tags,
layout.index,
[apply(x, depth, user) for x in layout.contents],
layout.identities,
layout.parameters if keep_parameters else None,
)
elif isinstance(layout, ak.layout.UnionArray8_U32):
return ak.layout.UnionArray8_U32(
layout.tags,
layout.index,
[apply(x, depth, user) for x in layout.contents],
layout.identities,
layout.parameters if keep_parameters else None,
)
elif isinstance(layout, ak.layout.UnionArray8_64):
return ak.layout.UnionArray8_64(
layout.tags,
layout.index,
[apply(x, depth, user) for x in layout.contents],
layout.identities,
layout.parameters if keep_parameters else None,
)
elif isinstance(layout, ak.layout.VirtualArray):
return apply(layout.array, depth, user)
else:
raise AssertionError(
"unrecognized Content type: {0}".format(type(layout))
+ exception_suffix(__file__)
)
return apply(layout, 1, user)
def recursive_walk(layout, apply, args=(), depth=1, materialize=False):
apply(layout, depth, *args)
if isinstance(layout, ak.partition.PartitionedArray):
for x in layout.partitions:
recursive_walk(x, apply, args, depth, materialize)
elif isinstance(layout, ak.layout.NumpyArray):
pass
elif isinstance(layout, ak.layout.EmptyArray):
pass
elif isinstance(
layout,
(
ak.layout.RegularArray,
ak.layout.ListArray32,
ak.layout.ListArrayU32,
ak.layout.ListArray64,
ak.layout.ListOffsetArray32,
ak.layout.ListOffsetArrayU32,
ak.layout.ListOffsetArray64,
),
):
recursive_walk(layout.content, apply, args, depth + 1, materialize)
elif isinstance(
layout,
(
ak.layout.IndexedArray32,
ak.layout.IndexedArrayU32,
ak.layout.IndexedArray64,
ak.layout.IndexedOptionArray32,
ak.layout.IndexedOptionArray64,
ak.layout.ByteMaskedArray,
ak.layout.BitMaskedArray,
ak.layout.UnmaskedArray,
),
):
recursive_walk(layout.content, apply, args, depth, materialize)
elif isinstance(
layout,
(
ak.layout.RecordArray,
ak.layout.UnionArray8_32,
ak.layout.UnionArray8_U32,
ak.layout.UnionArray8_64,
),
):
for x in layout.contents:
recursive_walk(x, apply, args, depth, materialize)
elif isinstance(layout, ak.layout.Record):
recursive_walk(layout.array, apply, args, depth, materialize)
elif isinstance(layout, ak.layout.VirtualArray):
if materialize:
recursive_walk(layout.array, apply, args, depth, materialize)
else:
raise AssertionError(
"unrecognized Content type: {0}".format(type(layout))
+ exception_suffix(__file__)
)
def find_caches(layout):
if isinstance(layout, ak.partition.PartitionedArray):
seen = set()
mutablemappings = []
for partition in layout.partitions:
for cache in partition.caches:
x = cache.mutablemapping
if id(x) not in seen:
seen.add(id(x))
mutablemappings.append(x)
else:
mutablemappings = []
for cache in layout.caches:
x = cache.mutablemapping
for y in mutablemappings:
if x is y:
break
else:
mutablemappings.append(x)
return tuple(mutablemappings)
def highlevel_type(layout, behavior, isarray):
if isarray:
return ak.types.ArrayType(layout.type(typestrs(behavior)), len(layout))
else:
return layout.type(typestrs(behavior))
_is_identifier = re.compile(r"^[A-Za-z_][A-Za-z_0-9]*$")
def minimally_touching_string(limit_length, layout, behavior):
if isinstance(layout, ak.layout.Record):
layout = layout.array[layout.at : layout.at + 1]
if len(layout) == 0:
return "[]"
def forward(x, space, brackets=True, wrap=True, stop=None):
done = False
if wrap and isinstance(x, (ak.layout.Content, ak.partition.PartitionedArray)):
cls = arrayclass(x, behavior)
if cls is not ak.highlevel.Array:
y = cls(x, behavior=behavior)
if "__repr__" in type(y).__dict__:
yield space + repr(y)
done = True
if wrap and isinstance(x, ak.layout.Record):
cls = recordclass(x, behavior)
if cls is not ak.highlevel.Record:
y = cls(x, behavior=behavior)
if "__repr__" in type(y).__dict__:
yield space + repr(y)
done = True
if not done:
if isinstance(x, (ak.layout.Content, ak.partition.PartitionedArray)):
if brackets:
yield space + "["
sp = ""
for i in range(len(x) if stop is None else stop):
for token in forward(x[i], sp):
yield token
sp = ", "
if brackets:
yield "]"
elif isinstance(x, ak.layout.Record) and x.istuple:
yield space + "("
sp = ""
for i in range(x.numfields):
key = sp
for token in forward(x[str(i)], ""):
yield key + token
key = ""
sp = ", "
yield ")"
elif isinstance(x, ak.layout.Record):
yield space + "{"
sp = ""
for k in x.keys():
if _is_identifier.match(k) is None:
kk = repr(k)
if kk.startswith("u"):
kk = kk[1:]
else:
kk = k
key = sp + kk + ": "
for token in forward(x[k], ""):
yield key + token
key = ""
sp = ", "
yield "}"
elif isinstance(x, (float, np.floating)):
yield space + "{0:.3g}".format(x)
else:
yield space + repr(x)
def backward(x, space, brackets=True, wrap=True, stop=-1):
done = False
if wrap and isinstance(x, (ak.layout.Content, ak.partition.PartitionedArray)):
cls = arrayclass(x, behavior)
if cls is not ak.highlevel.Array:
y = cls(x, behavior=behavior)
if "__repr__" in type(y).__dict__:
yield repr(y) + space
done = True
if wrap and isinstance(x, ak.layout.Record):
cls = recordclass(x, behavior)
if cls is not ak.highlevel.Record:
y = cls(x, behavior=behavior)
if "__repr__" in type(y).__dict__:
yield repr(y) + space
done = True
if not done:
if isinstance(x, (ak.layout.Content, ak.partition.PartitionedArray)):
if brackets:
yield "]" + space
sp = ""
for i in range(len(x) - 1, stop, -1):
for token in backward(x[i], sp):
yield token
sp = ", "
if brackets:
yield "["
elif isinstance(x, ak.layout.Record) and x.istuple:
yield ")" + space
for i in range(x.numfields - 1, -1, -1):
last = None
for token in backward(x[str(i)], ""):
if last is not None:
yield last
last = token
if last is not None:
yield last
if i != 0:
yield ", "
yield "("
elif isinstance(x, ak.layout.Record):
yield "}" + space
keys = x.keys()
for i in range(len(keys) - 1, -1, -1):
last = None
for token in backward(x[keys[i]], ""):
if last is not None:
yield last
last = token
if _is_identifier.match(keys[i]) is None:
kk = repr(keys[i])
if kk.startswith("u"):
kk = kk[1:]
else:
kk = keys[i]
if last is not None:
yield kk + ": " + last
if i != 0:
yield ", "
yield "{"
elif isinstance(x, (float, np.floating)):
yield "{0:.3g}".format(x) + space
else:
yield repr(x) + space
def forever(iterable):
for token in iterable:
yield token
while True:
yield None
halfway = len(layout) // 2
left, right = ["["], ["]"]
leftlen, rightlen = 1, 1
leftgen = forever(forward(layout, "", brackets=False, wrap=False, stop=halfway))
rightgen = forever(
backward(layout, "", brackets=False, wrap=False, stop=halfway - 1)
)
while True:
lft = next(leftgen)
rgt = next(rightgen)
if lft is not None:
if (
leftlen
+ rightlen
+ len(lft)
+ (2 if lft is None and rgt is None else 6)
> limit_length
):
break
left.append(lft)
leftlen += len(lft)
if rgt is not None:
if (
leftlen
+ rightlen
+ len(rgt)
+ (2 if lft is None and rgt is None else 6)
> limit_length
):
break
right.append(rgt)
rightlen += len(rgt)
if lft is None and rgt is None:
break
while len(left) > 1 and (
left[-1] == "["
or left[-1] == ", ["
or left[-1] == "{"
or left[-1] == ", {"
or left[-1] == ", "
):
left.pop()
lft = ""
while len(right) > 1 and (
right[-1] == "]"
or right[-1] == "], "
or right[-1] == "}"
or right[-1] == "}, "
or right[-1] == ", "
):
right.pop()
rgt = ""
if lft is None and rgt is None:
if left == ["["]:
return "[" + "".join(reversed(right)).lstrip(" ")
else:
return (
"".join(left).rstrip(" ") + ", " + "".join(reversed(right)).lstrip(" ")
)
else:
if left == ["["] and right == ["]"]:
return "[...]"
elif left == ["["]:
return "[... " + "".join(reversed(right)).lstrip(" ")
else:
return (
"".join(left).rstrip(" ")
+ ", ... "
+ "".join(reversed(right)).lstrip(" ")
)
class MappingProxy(MutableMapping):
"""
A type suitable for use with layout.ArrayCache.
This can be used to wrap plain dict instances if need be,
since they are not able to be weak referenced.
"""
@classmethod
def maybe_wrap(cls, mapping):
if type(mapping) is dict:
return cls(mapping)
return mapping
def __init__(self, base):
self.base = base
def __repr__(self):
return repr(self.base)
def __getitem__(self, key):
return self.base[key]
def __setitem__(self, key, value):
self.base[key] = value
def __delitem__(self, key):
del self.base[key]
def __iter__(self):
return iter(self.base)
def __len__(self):
return len(self.base)
def make_union(tags, index, contents, identities, parameters):
if isinstance(index, ak.layout.Index32):
return ak.layout.UnionArray8_32(tags, index, contents, identities, parameters)
elif isinstance(index, ak.layout.IndexU32):
return ak.layout.UnionArray8_U32(tags, index, contents, identities, parameters)
elif isinstance(index, ak.layout.Index64):
return ak.layout.UnionArray8_64(tags, index, contents, identities, parameters)
else:
raise AssertionError(index)
def union_to_record(unionarray, anonymous):
nplike = ak.nplike.of(unionarray)
contents = []
for layout in unionarray.contents:
if isinstance(layout, virtualtypes):
contents.append(layout.array)
elif isinstance(layout, indexedtypes):
contents.append(layout.project())
elif isinstance(layout, uniontypes):
contents.append(union_to_record(layout, anonymous))
elif isinstance(layout, optiontypes):
contents.append(
ak.operations.structure.fill_none(layout, np.nan, highlevel=False)
)
else:
contents.append(layout)
if not any(isinstance(x, ak.layout.RecordArray) for x in contents):
return make_union(
unionarray.tags,
unionarray.index,
contents,
unionarray.identities,
unionarray.parameters,
)
else:
seen = set()
all_names = []
for layout in contents:
if isinstance(layout, ak.layout.RecordArray):
for key in layout.keys():
if key not in seen:
seen.add(key)
all_names.append(key)
else:
if anonymous not in seen:
seen.add(anonymous)
all_names.append(anonymous)
missingarray = ak.layout.IndexedOptionArray64(
ak.layout.Index64(nplike.full(len(unionarray), -1, dtype=np.int64)),
ak.layout.EmptyArray(),
)
all_fields = []
for name in all_names:
union_contents = []
for layout in contents:
if isinstance(layout, ak.layout.RecordArray):
for key in layout.keys():
if name == key:
union_contents.append(layout.field(key))
break
else:
union_contents.append(missingarray)
else:
if name == anonymous:
union_contents.append(layout)
else:
union_contents.append(missingarray)
all_fields.append(
make_union(
unionarray.tags,
unionarray.index,
union_contents,
unionarray.identities,
unionarray.parameters,
).simplify()
)
return ak.layout.RecordArray(all_fields, all_names, len(unionarray))
|
py | b412d68dc455124b2091cae837c1f1d24047f9f9 | import numpy as np
import pandas as pd
from keras import models, layers, Input
import matplotlib.pyplot as plt
import keras
################################################ Data process
#################### standardization
path = "./features.xlsx"
out = pd.read_excel(path,index_col=0)
Max_out = np.max(out.values)
Min_out = np.min(out.values)
range_out = Max_out - Min_out
out_standard = out / range_out
print(Max_out,Min_out)
#################### hyperparameter
intervals = out.shape[0] # 6640 days # 12intervals
begin = 13 # start from 14th
interval_batch=35 # batch_size
n = 118 # number of stations
pre_sr = 12
#################### samples and labels
labels = []
samples_sr = []
for i in range(begin*interval_batch,intervals):
label = out_standard['坪洲'].values[i]
sample_sr = out_standard.values[i-pre_sr:i]
samples_sr.append(sample_sr)
labels.append(label)
num_samples = len(labels)
samples_sr = np.reshape(np.array(samples_sr),(num_samples,118,pre_sr))
labels = np.array(labels)
print(samples_sr.shape)
print(labels.shape)
#################### adjacency matrix
path ="./near matrix.xlsx"
dataset= pd.read_excel(path,index_col=0).values
adjacencys_near = []
for i in range(num_samples):
adjacencys_near.append(dataset)
adjacencys_near = np.array(adjacencys_near)
print(adjacencys_near.shape)
path ="./middle matrix.xlsx"
dataset= pd.read_excel(path,index_col=0).values
adjacencys_middle = []
for i in range(num_samples):
adjacencys_middle.append(dataset)
adjacencys_middle = np.array(adjacencys_middle)
print(adjacencys_middle.shape)
path ="./distant matrix.xlsx"
dataset= pd.read_excel(path,index_col=0).values
adjacencys_distant = []
for i in range(num_samples):
adjacencys_distant.append(dataset)
adjacencys_distant = np.array(adjacencys_distant)
print(adjacencys_distant.shape)
#################### train and test split
x_train_sr = samples_sr[:4000]
adjacency_train_near = adjacencys_near[:4000]
adjacency_train_middle = adjacencys_middle[:4000]
adjacency_train_distant = adjacencys_distant[:4000]
y_train = labels[:4000]
x_test_sr = samples_sr[4000:]
adjacency_test_near = adjacencys_near[4000:]
adjacency_test_middle = adjacencys_middle[4000:]
adjacency_test_distant = adjacencys_distant[4000:]
y_test = labels[4000:]
print(x_train_sr.shape,y_train.shape,adjacency_train_near.shape,adjacency_train_middle.shape,adjacency_train_distant.shape)
print(x_test_sr.shape,y_test.shape,adjacency_test_near.shape,adjacency_test_middle.shape,adjacency_test_distant.shape)
plt.figure(figsize=(20,10))
plt.plot(y_test,'r')
################################################ Model: Multi-STGCnet-SR
# input
features = Input(shape=(n,pre_sr))
adjacency_near = Input(shape=(n,n))
adjacency_middle = Input(shape=(n,n))
adjacency_distant = Input(shape=(n,n))
# near
# GCN layer
output_near_start = layers.Dot(axes=1)([adjacency_near, features])
output = layers.Dense(n,activation='relu')(output_near_start)
# GCN layer
output = layers.Dot(axes=1)([adjacency_near, output])
output = layers.Dense(n,activation='relu')(output)
output = layers.Permute((2,1))(output)
output = layers.LSTM(32,return_sequences=True)(output)
output = layers.LSTM(12,kernel_initializer='random_normal')(output)
output_near_end = layers.Dense(1,activation='relu',kernel_initializer='random_normal')(output)
# middle
# GCN layer
output_middle_start = layers.Dot(axes=1)([adjacency_middle, features])
output = layers.Dense(n,activation='relu')(output_middle_start)
# GCN layer
output = layers.Dot(axes=1)([adjacency_middle, output])
output = layers.Dense(n,activation='relu')(output)
output = layers.Permute((2,1))(output)
output = layers.LSTM(32,return_sequences=True)(output)
output = layers.LSTM(12,kernel_initializer='random_normal')(output)
output_middle_end = layers.Dense(1,activation='relu',kernel_initializer='random_normal')(output)
# distant
# GCN layer
output_distant_start = layers.Dot(axes=1)([adjacency_distant, features])
output = layers.Dense(n,activation='relu')(output_distant_start)
# GCN layer
output = layers.Dot(axes=1)([adjacency_distant, output])
output = layers.Dense(n,activation='relu')(output)
output = layers.Permute((2,1))(output)
output = layers.LSTM(32,return_sequences=True)(output)
output = layers.LSTM(12,kernel_initializer='random_normal')(output)
output_distant_end = layers.Dense(1,activation='relu',kernel_initializer='random_normal')(output)
# merge
merge = layers.concatenate([output_near_end, output_middle_end ,output_distant_end],axis=-1)
temp = layers.Dense(35,activation='relu',kernel_initializer='random_normal')(merge)
output_end = layers.Dense(1,activation='relu',kernel_initializer='random_normal')(temp)
# model
model = models.Model(inputs=[features,adjacency_near,adjacency_middle,adjacency_distant],outputs=[output_end])
model.summary()
model.compile(optimizer='rmsprop',loss='mae',metrics=['mae','mse','mape'])
callbacks_list = [
keras.callbacks.ModelCheckpoint(filepath='SR.h5',
monitor='val_loss',
save_best_only=True,)
]
################################################ Model training
epochs = 1000
H = model.fit([x_train_sr,adjacency_train_near,adjacency_train_middle,adjacency_train_distant], y_train,callbacks=callbacks_list,batch_size=interval_batch,epochs=epochs,validation_data=([x_test_sr,adjacency_test_near,adjacency_test_middle,adjacency_test_distant],y_test))
################################################ Loss
train_loss = H.history['loss']
test_loss = H.history['val_loss']
iterations = [i for i in range(epochs)]
plt.plot(iterations, train_loss,'b-',label='train_mae')
plt.plot(iterations, test_loss,'r-',label='test_mae')
plt.legend()
plt.title('Train_mae VS Test_mae')
################################################ predict
path="./SR.h5"
model_best = models.load_model(path)
predict_best = model_best.predict([x_test_sr,adjacency_test_near,adjacency_test_middle,adjacency_test_distant])
plt.figure(figsize=(20,10))
plt.plot(y_test,'r')
plt.plot(predict_best,'g')
|
py | b412d73545e11b027adb89e579c254a9dd26c2a8 | """Training file for SMP-CAIL2020-Argmine.
Author: Tsinghuaboy [email protected]
Usage:
python -m torch.distributed.launch train.py \
--config_file 'config/bert_config.json'
CUDA_VISIBLE_DEVICES=0 python -m torch.distributed.launch train.py \
--config_file 'config/rnn_config.json'
"""
from typing import Dict
import argparse
import json
import os
from copy import deepcopy
from types import SimpleNamespace
import torch
from torch import nn
from torch.optim import Adam
from torch.utils.data import DataLoader, RandomSampler
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from transformers.optimization import (
AdamW, get_linear_schedule_with_warmup, get_constant_schedule)
from data import Data
from evaluate import evaluate, calculate_accuracy_f1, get_labels_from_file
from model import BertForClassification,RnnForSentencePairClassification
from utils import get_csv_logger, get_path
from vocab import build_vocab
MODEL_MAP = {
'bert': BertForClassification,
'rnn': RnnForSentencePairClassification
}
class Trainer:
"""Trainer for SMP-CAIL2020-Argmine.
"""
def __init__(self,
model, data_loader: Dict[str, DataLoader], device, config):
"""Initialize trainer with model, data, device, and config.
Initialize optimizer, scheduler, criterion.
Args:
model: model to be evaluated
data_loader: dict of torch.utils.data.DataLoader
device: torch.device('cuda') or torch.device('cpu')
config:
config.experiment_name: experiment name
config.model_type: 'bert' or 'rnn'
config.lr: learning rate for optimizer
config.num_epoch: epoch number
config.num_warmup_steps: warm-up steps number
config.gradient_accumulation_steps: gradient accumulation steps
config.max_grad_norm: max gradient norm
"""
self.model = model
self.device = device
self.config = config
self.data_loader = data_loader
self.config.num_training_steps = config.num_epoch * (
len(data_loader['train']) // config.batch_size)
self.optimizer = self._get_optimizer()
self.scheduler = self._get_scheduler()
self.criterion = nn.BCEWithLogitsLoss(reduction='mean')
def _get_optimizer(self):
"""Get optimizer for different models.
Returns:
optimizer
"""
if self.config.model_type == 'bert':
no_decay = ['bias', 'gamma', 'beta']
optimizer_parameters = [
{'params': [p for n, p in self.model.named_parameters()
if not any(nd in n for nd in no_decay)],
'weight_decay_rate': 0.01},
{'params': [p for n, p in self.model.named_parameters()
if any(nd in n for nd in no_decay)],
'weight_decay_rate': 0.0}]
optimizer = AdamW(
optimizer_parameters,
lr=self.config.lr,
betas=(0.9, 0.999),
weight_decay=1e-8,
correct_bias=False)
else: # rnn
optimizer = Adam(self.model.parameters(), lr=self.config.lr)
return optimizer
def _get_scheduler(self):
"""Get scheduler for different models.
Returns:
scheduler
"""
if self.config.model_type == 'bert':
scheduler = get_linear_schedule_with_warmup(
self.optimizer,
num_warmup_steps=self.config.num_warmup_steps,
num_training_steps=self.config.num_training_steps)
else: # rnn
scheduler = get_constant_schedule(self.optimizer)
return scheduler
def _evaluate_for_train_valid(self):
"""Evaluate model on train and valid set and get acc and f1 score.
Returns:
train_acc, train_f1, valid_acc, valid_f1
"""
train_predictions = evaluate(
model=self.model, data_loader=self.data_loader['valid_train'],
device=self.device)
valid_predictions = evaluate(
model=self.model, data_loader=self.data_loader['valid_valid'],
device=self.device)
train_answers = get_labels_from_file(self.config.train_file_path)
valid_answers = get_labels_from_file(self.config.valid_file_path)
train_acc, train_f1 = calculate_accuracy_f1(
train_answers, train_predictions)
valid_acc, valid_f1 = calculate_accuracy_f1(
valid_answers, valid_predictions)
return train_acc, train_f1, valid_acc, valid_f1
def get_score(self, answerid_list, paraid_list):
token_list = []
for line in answerid_list:
tokens = self.tokenizer.decode(line)
token_list.append(tokens)
# # 4. Write answers to file
summary_dict = dict(zip(set(paraid_list), [""] * len(set(paraid_list))))
for i in range(len(paraid_list)):
summary_dict[paraid_list[i]] += token_list[i]
def _epoch_evaluate_update_description_log(
self, tqdm_obj, logger, epoch):
"""Evaluate model and update logs for epoch.
Args:
tqdm_obj: tqdm/trange object with description to be updated
logger: logging.logger
epoch: int
Return:
train_acc, train_f1, valid_acc, valid_f1
"""
# Evaluate model for train and valid set
results = self._evaluate_for_train_valid()
train_acc, train_f1, valid_acc, valid_f1 = results
# Update tqdm description for command line
tqdm_obj.set_description(
'Epoch: {:d}, train_acc: {:.6f}, train_f1: {:.6f}, '
'valid_acc: {:.6f}, valid_f1: {:.6f}, '.format(
epoch, train_acc, train_f1, valid_acc, valid_f1))
# Logging
logger.info(','.join([str(epoch)] + [str(s) for s in results]))
return train_acc, train_f1, valid_acc, valid_f1
def save_model(self, filename):
"""Save model to file.
Args:
filename: file name
"""
torch.save(self.model.state_dict(), filename)
def train(self):
"""Train model on train set and evaluate on train and valid set.
Returns:
state dict of the best model with highest valid f1 score
"""
epoch_logger = get_csv_logger(
os.path.join(self.config.log_path,
self.config.experiment_name + '-epoch.csv'),
title='epoch,train_acc,train_f1,valid_acc,valid_f1')
step_logger = get_csv_logger(
os.path.join(self.config.log_path,
self.config.experiment_name + '-step.csv'),
title='step,loss')
trange_obj = trange(self.config.num_epoch, desc='Epoch', ncols=120)
# self._epoch_evaluate_update_description_log(
# tqdm_obj=trange_obj, logger=epoch_logger, epoch=0)
best_model_state_dict, best_train_f1, global_step = None, 0, 0
for epoch, _ in enumerate(trange_obj):
self.model.train()
tqdm_obj = tqdm(self.data_loader['train'], ncols=80)
for step, batch in enumerate(tqdm_obj):
batch = tuple(t.to(self.device) for t in batch)
logits = self.model(*batch[:-1]) # the last one is label
loss = self.criterion(logits, batch[-1])
# if self.config.gradient_accumulation_steps > 1:
# loss = loss / self.config.gradient_accumulation_steps
# self.optimizer.zero_grad()
loss.backward()
if (step + 1) % self.config.gradient_accumulation_steps == 0:
torch.nn.utils.clip_grad_norm_(
self.model.parameters(), self.config.max_grad_norm)
#after 梯度累加的基本思想在于,在优化器更新参数前,也就是执行 optimizer.step() 前,进行多次反向传播,是的梯度累计值自动保存在 parameter.grad 中,最后使用累加的梯度进行参数更新。
self.optimizer.step()
self.scheduler.step()
self.optimizer.zero_grad()
global_step += 1
tqdm_obj.set_description('loss: {:.6f}'.format(loss.item()))
step_logger.info(str(global_step) + ',' + str(loss.item()))
# if epoch >= 2:
# results = self._epoch_evaluate_update_description_log(
# tqdm_obj=trange_obj, logger=epoch_logger, epoch=epoch + 1)
self.save_model(os.path.join(
self.config.model_path, self.config.experiment_name,
self.config.model_type + '-' + str(epoch + 1) + '.bin'))
# if results[-3] > best_train_f1:
# best_model_state_dict = deepcopy(self.model.state_dict())
# best_train_f1 = results[-3]
return best_model_state_dict
def main(config_file='config/bert_config.json'):
"""Main method for training.
Args:
config_file: in config dir
"""
# 0. Load config and mkdir
with open(config_file) as fin:
config = json.load(fin, object_hook=lambda d: SimpleNamespace(**d))
get_path(os.path.join(config.model_path, config.experiment_name))
get_path(config.log_path)
if config.model_type == 'rnn': # build vocab for rnn
build_vocab(file_in=config.all_train_file_path,
file_out=os.path.join(config.model_path, 'vocab.txt'))
# 1. Load data
data = Data(vocab_file=os.path.join(config.model_path, 'vocab.txt'),
max_seq_len=config.max_seq_len,
model_type=config.model_type, config=config)
datasets = data.load_train_and_valid_files(
train_file=config.train_file_path,
valid_file=config.valid_file_path)
train_set, valid_set_train, valid_set_valid = datasets
if torch.cuda.is_available():
device = torch.device('cuda')
# device = torch.device('cpu')
# torch.distributed.init_process_group(backend="nccl")
# sampler_train = DistributedSampler(train_set)
sampler_train = RandomSampler(train_set)
else:
device = torch.device('cpu')
sampler_train = RandomSampler(train_set)
data_loader = {
'train': DataLoader(
train_set, sampler=sampler_train, batch_size=config.batch_size),
'valid_train': DataLoader(
valid_set_train, batch_size=config.batch_size, shuffle=False),
'valid_valid': DataLoader(
valid_set_valid, batch_size=config.batch_size, shuffle=False)}
# 2. Build model
model = MODEL_MAP[config.model_type](config)
#load model states.
if config.trained_weight:
model.load_state_dict(torch.load(config.trained_weight))
model.to(device)
if torch.cuda.is_available():
model = model
# model = torch.nn.parallel.DistributedDataParallel(
# model, find_unused_parameters=True)
# 3. Train
trainer = Trainer(model=model, data_loader=data_loader,
device=device, config=config)
best_model_state_dict = trainer.train()
# 4. Save model
torch.save(best_model_state_dict,
os.path.join(config.model_path, 'model.bin'))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'-c', '--config_file', default='config/bert_config.json',
help='model config file')
parser.add_argument(
'--local_rank', default=0,
help='used for distributed parallel')
args = parser.parse_args()
main(args.config_file)
|
py | b412d7c37cf59859187601d9cc20ed4f8c898f6b | import pytest
from .env import H2Conf
class TestStore:
@pytest.fixture(autouse=True, scope='class')
def _class_scope(self, env):
H2Conf(env).add_vhost_cgi().install()
assert env.apache_restart() == 0
# let the hecho.py CGI echo chars < 0x20 in field name
# for almost all such characters, the stream gets aborted with a h2 error and
# there will be no http status, cr and lf are handled special
def test_h2_200_01(self, env):
url = env.mkurl("https", "cgi", "/hecho.py")
for x in range(1, 32):
r = env.curl_post_data(url, "name=x%%%02xx&value=yz" % x)
if x in [10]:
assert 0 == r.exit_code, "unexpected exit code for char 0x%02x" % x
assert 500 == r.response["status"], "unexpected status for char 0x%02x" % x
elif x in [13]:
assert 0 == r.exit_code, "unexpected exit code for char 0x%02x" % x
assert 200 == r.response["status"], "unexpected status for char 0x%02x" % x
else:
assert 0 != r.exit_code, "unexpected exit code for char 0x%02x" % x
# let the hecho.py CGI echo chars < 0x20 in field value
# for almost all such characters, the stream gets aborted with a h2 error and
# there will be no http status, cr and lf are handled special
def test_h2_200_02(self, env):
url = env.mkurl("https", "cgi", "/hecho.py")
for x in range(1, 32):
if 9 != x:
r = env.curl_post_data(url, "name=x&value=y%%%02x" % x)
if x in [10, 13]:
assert 0 == r.exit_code, "unexpected exit code for char 0x%02x" % x
assert 200 == r.response["status"], "unexpected status for char 0x%02x" % x
else:
assert 0 != r.exit_code, "unexpected exit code for char 0x%02x" % x
# let the hecho.py CGI echo 0x10 and 0x7f in field name and value
def test_h2_200_03(self, env):
url = env.mkurl("https", "cgi", "/hecho.py")
for h in ["10", "7f"]:
r = env.curl_post_data(url, "name=x%%%s&value=yz" % h)
assert 0 != r.exit_code
r = env.curl_post_data(url, "name=x&value=y%%%sz" % h)
assert 0 != r.exit_code
# test header field lengths check, LimitRequestLine (default 8190)
def test_h2_200_10(self, env):
url = env.mkurl("https", "cgi", "/")
val = "1234567890" # 10 chars
for i in range(3): # make a 10000 char string
val = "%s%s%s%s%s%s%s%s%s%s" % (val, val, val, val, val, val, val, val, val, val)
# LimitRequestLine 8190 ok, one more char -> 431
r = env.curl_get(url, options=["-H", "x: %s" % (val[:8187])])
assert 200 == r.response["status"]
r = env.curl_get(url, options=["-H", "x: %sx" % (val[:8188])])
assert 431 == r.response["status"]
# same with field name
r = env.curl_get(url, options=["-H", "y%s: 1" % (val[:8186])])
assert 200 == r.response["status"]
r = env.curl_get(url, options=["-H", "y%s: 1" % (val[:8188])])
assert 431 == r.response["status"]
# test header field lengths check, LimitRequestFieldSize (default 8190)
def test_h2_200_11(self, env):
url = env.mkurl("https", "cgi", "/")
val = "1234567890" # 10 chars
for i in range(3): # make a 10000 char string
val = "%s%s%s%s%s%s%s%s%s%s" % (val, val, val, val, val, val, val, val, val, val)
# LimitRequestFieldSize 8190 ok, one more char -> 400 in HTTP/1.1
# (we send 4000+4185 since they are concatenated by ", " and start with "x: "
r = env.curl_get(url, options=["-H", "x: %s" % (val[:4000]), "-H", "x: %s" % (val[:4185])])
assert 200 == r.response["status"]
r = env.curl_get(url, options=["--http1.1", "-H", "x: %s" % (val[:4000]), "-H", "x: %s" % (val[:4189])])
assert 400 == r.response["status"]
r = env.curl_get(url, options=["-H", "x: %s" % (val[:4000]), "-H", "x: %s" % (val[:4191])])
assert 431 == r.response["status"]
# test header field count, LimitRequestFields (default 100)
# see #201: several headers with same name are mered and count only once
def test_h2_200_12(self, env):
url = env.mkurl("https", "cgi", "/")
opt = []
for i in range(98): # curl sends 2 headers itself (user-agent and accept)
opt += ["-H", "x: 1"]
r = env.curl_get(url, options=opt)
assert 200 == r.response["status"]
r = env.curl_get(url, options=(opt + ["-H", "y: 2"]))
assert 200 == r.response["status"]
# test header field count, LimitRequestFields (default 100)
# different header names count each
def test_h2_200_13(self, env):
url = env.mkurl("https", "cgi", "/")
opt = []
for i in range(98): # curl sends 2 headers itself (user-agent and accept)
opt += ["-H", "x{0}: 1".format(i)]
r = env.curl_get(url, options=opt)
assert 200 == r.response["status"]
r = env.curl_get(url, options=(opt + ["-H", "y: 2"]))
assert 431 == r.response["status"]
# test "LimitRequestFields 0" setting, see #200
def test_h2_200_14(self, env):
conf = H2Conf(env)
conf.add("""
LimitRequestFields 20
""")
conf.add_vhost_cgi()
conf.install()
assert env.apache_restart() == 0
url = env.mkurl("https", "cgi", "/")
opt = []
for i in range(21):
opt += ["-H", "x{0}: 1".format(i)]
r = env.curl_get(url, options=opt)
assert 431 == r.response["status"]
conf = H2Conf(env)
conf.add("""
LimitRequestFields 0
""")
conf.add_vhost_cgi()
conf.install()
assert env.apache_restart() == 0
url = env.mkurl("https", "cgi", "/")
opt = []
for i in range(100):
opt += ["-H", "x{0}: 1".format(i)]
r = env.curl_get(url, options=opt)
assert 200 == r.response["status"]
# the uri limits
def test_h2_200_15(self, env):
conf = H2Conf(env)
conf.add("""
LimitRequestLine 48
""")
conf.add_vhost_cgi()
conf.install()
assert env.apache_restart() == 0
url = env.mkurl("https", "cgi", "/")
r = env.curl_get(url)
assert 200 == r.response["status"]
url = env.mkurl("https", "cgi", "/" + (48*"x"))
r = env.curl_get(url)
assert 414 == r.response["status"]
# nghttp sends the :method: header first (so far)
# trigger a too long request line on it
# the stream will RST and we get no response
url = env.mkurl("https", "cgi", "/")
opt = ["-H:method: {0}".format(100*"x")]
r = env.nghttp().get(url, options=opt)
assert r.exit_code == 0, r
assert not r.response
# invalid chars in method
def test_h2_200_16(self, env):
conf = H2Conf(env)
conf.add_vhost_cgi()
conf.install()
assert env.apache_restart() == 0
url = env.mkurl("https", "cgi", "/hello.py")
opt = ["-H:method: GET /hello.py"]
r = env.nghttp().get(url, options=opt)
assert r.exit_code == 0, r
# nghttp version >= 1.45.0 check pseudo headers and RST streams,
# which means we see no response.
if r.response is not None:
assert r.response["status"] == 400
url = env.mkurl("https", "cgi", "/proxy/hello.py")
r = env.nghttp().get(url, options=opt)
assert r.exit_code == 0, r
if r.response is not None:
assert r.response["status"] == 400
|
py | b412d7ca27a2ba608a8ccae93d57b04d9f522d08 | import os
import random
import shutil
import time
import warnings
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
import args_file
import json
from utils import Logger
from models.build_model import build_model
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
best_acc1 = 0
def main():
args = args_file.parser.parse_args()
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
global best_acc1
args.gpu = gpu
if not os.path.exists(args.result_path):
os.makedirs(args.result_path)
if not args.evaluate:
if args.resume:
with open(os.path.join(args.result_path, 'resume_args.json'), 'a') as f:
json.dump(vars(args), f)
else:
with open(os.path.join(args.result_path, 'args.json'), 'w') as f:
json.dump(vars(args), f)
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
# create model
'''
if args.pretrained:
print("=> using pre-trained model '{}'".format(args.arch))
model = models.__dict__[args.arch](pretrained=True)
else:
print("=> creating model '{}'".format(args.arch))
model = models.__dict__[args.arch]()
'''
model = build_model(args)
print(model)
print(args)
if args.distributed:
# For multiprocessing distributed, DistributedDataParallel constructor
# should always set the single device scope, otherwise,
# DistributedDataParallel will use all available devices.
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs we have
args.batch_size = int(args.batch_size / ngpus_per_node)
args.workers = int(args.workers / ngpus_per_node)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
else:
model.cuda()
# DistributedDataParallel will divide and allocate batch_size to all
# available GPUs if device_ids are not set
model = torch.nn.parallel.DistributedDataParallel(model)
elif args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
else:
# DataParallel will divide and allocate batch_size to all available GPUs
if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay,
nesterov=args.nesterov)
if args.lr_scheduler == 'MultiStepLR':
print("using MultiStepLR with steps: ", args.lr_steps)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, args.lr_steps, gamma=args.lr_reduce_factor)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
if args.gpu is not None:
# best_acc1 may be from a checkpoint from a different GPU
best_acc1 = best_acc1.to(args.gpu)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
if args.lr_scheduler == 'MultiStepLR':
print("usingMultiStepLR with steps: ", args.lr_steps)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, args.lr_steps,
gamma=args.lr_reduce_factor,
last_epoch=(checkpoint['epoch'] - 1))
print("last_epoch: ", scheduler.last_epoch)
if args.start_epoch == 0:
args.start_epoch = checkpoint['epoch']
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Data loading code
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_trans = [
transforms.RandomResizedCrop(args.train_crop_size), # transforms.RandomResizedCrop(224)
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose(train_trans))
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(args.val_resize),#transforms.Resize(256)
transforms.CenterCrop(args.val_crop_size),#transforms.CenterCrop(224)
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
if args.evaluate:
validate(val_loader, model, criterion, args)
return
if args.resume:
mode = 'a'
else:
mode = 'w'
train_logger = Logger(os.path.join(args.result_path, 'train.log'),
['epoch', 'loss', 'acc1', 'acc5', 'lr'], mode=mode)
val_logger = Logger(os.path.join(args.result_path, 'val.log'), ['epoch', 'loss', 'acc1', 'acc5'], mode=mode)
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
if args.lr_scheduler == 'MultiStepLR':
scheduler.step()
else:
adjust_learning_rate(optimizer, epoch, args)
# train for one epoch
train_acc1, train_acc5, train_loss = \
train(train_loader, model, criterion, optimizer, epoch, args)
train_logger.log({
'epoch': epoch+1,
'loss': '{:.4f}'.format(train_loss),
'acc1': '{:.2f}'.format(train_acc1.item()),
'acc5': '{:.2f}'.format(train_acc5.item()),
'lr': '{:.6f}'.format(optimizer.param_groups[0]['lr'])
})
# evaluate on validation set
val_acc1, val_acc5, val_loss = validate(val_loader, model, criterion, args)
val_logger.log({
'epoch': epoch+1,
'loss': '{:.4f}'.format(val_loss),
'acc1': '{:.2f}'.format(val_acc1.item()),
'acc5': '{:.2f}'.format(val_acc5.item())
})
# remember best acc@1 and save checkpoint
is_best = val_acc1 > best_acc1
best_acc1 = max(val_acc1, best_acc1)
if not args.multiprocessing_distributed or (args.multiprocessing_distributed
and args.rank % ngpus_per_node == 0):
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_acc1': best_acc1,
'val_acc1': val_acc1,
'optimizer': optimizer.state_dict(),
}, is_best, args.result_path + '/')
def train(train_loader, model, criterion, optimizer, epoch, args):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(len(train_loader), batch_time, data_time, losses, top1,
top5, prefix="Epoch: [{}]".format(epoch+1))
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if args.gpu is not None:
input = input.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(acc1[0], input.size(0))
top5.update(acc5[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.print(i)
return top1.avg, top5.avg, losses.avg
def validate(val_loader, model, criterion, args):
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(len(val_loader), batch_time, losses, top1, top5,
prefix='Test: ')
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (input, target) in enumerate(val_loader):
if args.gpu is not None:
input = input.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(acc1[0], input.size(0))
top5.update(acc5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.print(i)
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg, top5.avg, losses.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename + 'checkpoint.pth.tar')
if is_best:
shutil.copyfile(filename + 'checkpoint.pth.tar', filename + 'model_best.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, *meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def print(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def adjust_learning_rate(optimizer, epoch, args):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (0.1 ** (epoch // 30))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main() |
py | b412d82c4d9e3c6c8e342fc4b9e285fedc364f29 | #def main():
# print('MCA')
#main()
#print("test")
#def display(a):
# print('my neme',a)
def main():
print('mca')
if __name__ == '__main__' :
main() |
py | b412d8aa69db84d3b12293b8e93533be103623bd | """
.. codeauthor:: David Zwicker <[email protected]>
"""
import itertools
import numpy as np
import pytest
from pde import ScalarField, UnitGrid
from pde.grids.boundaries.axes import Boundaries
from pde.grids.boundaries.axis import BoundaryPair, BoundaryPeriodic, get_boundary_axis
def test_boundaries():
""" test setting boundaries for multiple systems """
b = ["periodic", "value", {"type": "derivative", "value": 1}]
for bx, by in itertools.product(b, b):
g = UnitGrid([2, 2], periodic=[b == "periodic" for b in (bx, by)])
bcs = Boundaries.from_data(g, [bx, by])
bc_x = get_boundary_axis(g, 0, bx)
bc_y = get_boundary_axis(g, 1, by)
assert bcs.grid.num_axes == 2
assert bcs[0] == bc_x
assert bcs[1] == bc_y
assert bcs == Boundaries.from_data(g, [bc_x, bc_y])
if bx == by:
assert bcs == Boundaries.from_data(g, bx)
bc2 = bcs.copy()
assert bcs == bc2
assert bcs is not bc2
b1 = Boundaries.from_data(UnitGrid([2, 2]), "natural")
b2 = Boundaries.from_data(UnitGrid([3, 3]), "natural")
assert b1 != b2
def test_boundary_specifications():
""" test different ways of specifying boundary conditions """
g = UnitGrid([2])
bc1 = Boundaries.from_data(
g, [{"type": "derivative", "value": 0}, {"type": "value", "value": 0}]
)
assert bc1 == Boundaries.from_data(g, [{"type": "derivative"}, {"type": "value"}])
assert bc1 == Boundaries.from_data(g, [{"derivative": 0}, {"value": 0}])
assert bc1 == Boundaries.from_data(g, ["neumann", "dirichlet"])
def test_mixed_boundary_condition():
""" test limiting cases of the mixed boundary condition """
g = UnitGrid([2])
d = np.random.random(2)
g1 = g.get_operator("gradient", bc=[{"mixed": 0}, {"mixed": np.inf}])
g2 = g.get_operator("gradient", bc=["derivative", "value"])
np.testing.assert_allclose(g1(d), g2(d))
@pytest.mark.parametrize(
"cond,is_value",
[
("natural", False),
("auto_periodic_neumann", False),
("auto_periodic_dirichlet", True),
],
)
def test_natural_boundary_conditions(cond, is_value):
""" test special automatic boundary conditions """
g = UnitGrid([2, 2], periodic=[True, False])
for bc in [
Boundaries.from_data(g, cond),
Boundaries.from_data(g, ["periodic", cond]),
]:
assert isinstance(bc[0], BoundaryPeriodic)
if is_value:
assert bc[1] == BoundaryPair.from_data(g, 1, "value")
else:
assert bc[1] == BoundaryPair.from_data(g, 1, "derivative")
def test_special_cases():
""" test some special boundary conditions """
g = UnitGrid([5])
s = ScalarField(g, np.arange(5))
for bc in ["extrapolate", {"curvature": 0}]:
np.testing.assert_allclose(s.laplace(bc).data, 0)
def test_bc_values():
""" test setting the values of boundary conditions """
g = UnitGrid([5])
bc = g.get_boundary_conditions([{"value": 2}, {"derivative": 3}])
assert bc[0].low.value == 2 and bc[0].high.value == 3
bc.scale_value(5)
assert bc[0].low.value == 10 and bc[0].high.value == 15
bc.set_value(7)
assert bc[0].low.value == bc[0].high.value == 7
|
py | b412da4bc8b76285ce4b548a6c43f312759e039d | from unittest import TestCase
import jax.numpy as jnp
import tensorflow.keras as tfk
import elegy
from elegy.testing_utils import transform_and_run
class BinaryCrossentropyTest(TestCase):
@transform_and_run
def test_basic(self):
# Input: true (y_true) and predicted (y_pred) tensors
y_true = jnp.array([[0.0, 1.0], [0.0, 0.0]])
y_pred = jnp.array([[0.6, 0.4], [0.4, 0.6]])
# Standard BCE, considering prediction tensor as probabilities
bce = elegy.metrics.BinaryCrossentropy()
result = bce(
y_true=y_true,
y_pred=y_pred,
)
assert jnp.isclose(result, 0.815, rtol=0.01)
# Standard BCE, considering prediction tensor as logits
y_logits = jnp.log(y_pred) - jnp.log(1 - y_pred)
bce = elegy.metrics.BinaryCrossentropy(from_logits=True)
result_from_logits = bce(y_true, y_logits)
assert jnp.isclose(result_from_logits, 0.815, rtol=0.01)
assert jnp.isclose(result_from_logits, result, rtol=0.01)
# BCE using sample_weight
bce = elegy.metrics.BinaryCrossentropy()
result = bce(y_true, y_pred, sample_weight=jnp.array([1.0, 0.0]))
assert jnp.isclose(result, 0.916, rtol=0.01)
@transform_and_run
def test_compatibility(self):
# Input: true (y_true) and predicted (y_pred) tensors
y_true = jnp.array([[0.0, 1.0], [0.0, 0.0]])
y_pred = jnp.array([[0.6, 0.4], [0.4, 0.6]])
# Standard BCE, considering prediction tensor as probabilities
bce_elegy = elegy.metrics.BinaryCrossentropy()
bce_tfk = tfk.metrics.BinaryCrossentropy()
assert jnp.isclose(
bce_elegy(y_true, y_pred), bce_tfk(y_true, y_pred), rtol=0.0001
)
# Standard BCE, considering prediction tensor as logits
y_logits = jnp.log(y_pred) - jnp.log(1 - y_pred)
bce_elegy = elegy.metrics.BinaryCrossentropy(from_logits=True)
bce_tfk = tfk.metrics.BinaryCrossentropy(from_logits=True)
assert jnp.isclose(
bce_elegy(y_true, y_logits), bce_tfk(y_true, y_logits), rtol=0.0001
)
# BCE using sample_weight
bce_elegy = elegy.metrics.BinaryCrossentropy()
bce_tfk = tfk.metrics.BinaryCrossentropy()
assert jnp.isclose(
bce_elegy(y_true, y_pred, sample_weight=jnp.array([1, 0])),
bce_tfk(y_true, y_pred, sample_weight=jnp.array([1, 0])),
rtol=0.0001,
)
|
py | b412da63f9958eeb043d3284f96309864b45fd55 |
import sys
if sys.platform == 'cli':
import clr
clr.AddReference('integrate')
from scipy__integrate___odepack import *
|
py | b412dc6d42cdcb5a4eff3808677bbb99f8802e97 | from unittest.mock import patch
from nose.tools import assert_equal
from pyecharts import options as opts
from pyecharts.charts import Sankey
@patch("pyecharts.render.engine.write_utf8_html_file")
def test_sankey_base(fake_writer):
nodes = [{"name": "category1"}, {"name": "category2"}, {"name": "category3"}]
links = [
{"source": "category1", "target": "category2", "value": 10},
{"source": "category2", "target": "category3", "value": 15},
]
c = Sankey().add(
"sankey",
nodes,
links,
linestyle_opt=opts.LineStyleOpts(opacity=0.2, curve=0.5, color="source"),
label_opts=opts.LabelOpts(position="right"),
)
c.render()
_, content = fake_writer.call_args[0]
assert_equal(c.theme, "white")
assert_equal(c.renderer, "canvas")
|
py | b412dece92fe5470f60e4bd3e7157eea762d275a | import pandas as pd
import numpy as np
from torch.utils.data import Dataset
import os
from PIL import Image
class CXRDataset(Dataset):
def __init__(
self,
path_to_images,
fold,
transform=None,
sample=0,
finding="any",
starter_images=False):
self.transform = transform
self.path_to_images = path_to_images
self.df = pd.read_csv("nih_labels_modified.csv")
self.df = self.df[self.df['fold'] == fold]
if(starter_images):
starter_images = pd.read_csv("starter_images.csv")
self.df=pd.merge(left=self.df,right=starter_images, how="inner",on="Image Index")
# can limit to sample, useful for testing
# if fold == "train" or fold =="val": sample=500
if(sample > 0 and sample < len(self.df)):
self.df = self.df.sample(sample)
if not finding == "any": # can filter for positive findings of the kind described; useful for evaluation
if finding in self.df.columns:
if len(self.df[self.df[finding] == 1]) > 0:
self.df = self.df[self.df[finding] == 1]
else:
print("No positive cases exist for "+LABEL+", returning all unfiltered cases")
else:
print("cannot filter on finding " + finding +
" as not in data - please check spelling")
self.df = self.df.set_index("Image Index")
self.PRED_LABEL = [
'Atelectasis',
'Cardiomegaly',
'Effusion',
'Infiltration',
'Mass',
'Nodule',
'Pneumonia',
'Pneumothorax',
'Consolidation',
'Edema',
'Emphysema',
'Fibrosis',
'Pleural_Thickening',
'Hernia',
'Covid']
RESULT_PATH = "results/"
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
image = Image.open(
os.path.join(
self.path_to_images,
self.df.index[idx]))
image = image.convert('RGB')
label = np.zeros(len(self.PRED_LABEL), dtype=int)
for i in range(0, len(self.PRED_LABEL)):
# can leave zero if zero, else make one
if(self.df[self.PRED_LABEL[i].strip()].iloc[idx].astype('int') > 0):
label[i] = self.df[self.PRED_LABEL[i].strip()
].iloc[idx].astype('int')
if self.transform:
image = self.transform(image)
return (image, label,self.df.index[idx])
|
py | b412e034a38b16eaaae0861574862bfb72f0d70f | #!/usr/bin/env python3
"""
Author: Diego Maldonado (@dmnunez1993)
Maintainer: Emiliano Borghi (@eborghi10)
"""
import os
import subprocess
from subprocess import PIPE
KNOWN_NVIDIA_ERRORS = [
("Failed to initialize NVML: Driver/library version mismatch"),
("NVIDIA-SMI has failed because it couldn't communicate with the NVIDIA driver. "
"Make sure that the latest NVIDIA driver is installed and running."),
]
def is_nvidia():
try:
everything, _ = subprocess.Popen(["nvidia-smi", "--query-gpu=driver_version", "--format=noheader,csv"],
stdout=PIPE, stderr=PIPE).communicate()
return (False if everything.strip() in KNOWN_NVIDIA_ERRORS else True)
except OSError:
return False
return False
def run_command(command):
subprocess.call(command, shell=True)
def get_repo_root():
return subprocess.check_output('git rev-parse --show-toplevel'.split()).strip().decode()
def get_uid():
return os.getuid()
def get_user():
return os.getlogin()
def create_directory(directory):
run_command("mkdir -p {}".format(directory))
run_command("sudo chown {0}:{0} {1}".format(get_user(), directory))
|
py | b412e0bd81e96e819eff4579aa796c1484822f8b | """
Redshift connector module.
Extends BaseConnector.
"""
from typing import Dict
import sqlalchemy
from dbt_sugar.core.connectors.base import BaseConnector
class RedshiftConnector(BaseConnector):
"""
Connection class for Redshift databases.
Extends BaseConnector.
"""
def __init__(
self,
connection_params: Dict[str, str],
) -> None:
"""
Creates the URL and the Engine for future connections.
Args:
connection_params (Dict[str, str]): Dict containing database connection
parameters and credentials.
"""
self.connection_url = sqlalchemy.engine.url.URL(
drivername="redshift+psycopg2",
host=connection_params.get("host", str()),
username=connection_params.get("user", str()),
password=connection_params.get("password", str()),
database=connection_params.get("database", str()),
port=connection_params.get("port", str()),
)
self.engine = sqlalchemy.create_engine(
self.connection_url, connect_args={"sslmode": "prefer"}
)
|
py | b412e0d7c5f214180d3b058b40ee007c8467925a | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['SnapshotArgs', 'Snapshot']
@pulumi.input_type
class SnapshotArgs:
def __init__(__self__, *,
creation_data: pulumi.Input['CreationDataArgs'],
resource_group_name: pulumi.Input[str],
disk_size_gb: Optional[pulumi.Input[int]] = None,
encryption_settings: Optional[pulumi.Input['EncryptionSettingsArgs']] = None,
location: Optional[pulumi.Input[str]] = None,
os_type: Optional[pulumi.Input['OperatingSystemTypes']] = None,
sku: Optional[pulumi.Input['DiskSkuArgs']] = None,
snapshot_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a Snapshot resource.
:param pulumi.Input['CreationDataArgs'] creation_data: Disk source information. CreationData information cannot be changed after the disk has been created.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[int] disk_size_gb: If creationData.createOption is Empty, this field is mandatory and it indicates the size of the VHD to create. If this field is present for updates or creation with other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a running VM, and can only increase the disk's size.
:param pulumi.Input['EncryptionSettingsArgs'] encryption_settings: Encryption settings for disk or snapshot
:param pulumi.Input[str] location: Resource location
:param pulumi.Input['OperatingSystemTypes'] os_type: The Operating System type.
:param pulumi.Input['DiskSkuArgs'] sku: The disks and snapshots sku name. Can be Standard_LRS or Premium_LRS.
:param pulumi.Input[str] snapshot_name: The name of the snapshot that is being created. The name can't be changed after the snapshot is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The max name length is 80 characters.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags
"""
pulumi.set(__self__, "creation_data", creation_data)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if disk_size_gb is not None:
pulumi.set(__self__, "disk_size_gb", disk_size_gb)
if encryption_settings is not None:
pulumi.set(__self__, "encryption_settings", encryption_settings)
if location is not None:
pulumi.set(__self__, "location", location)
if os_type is not None:
pulumi.set(__self__, "os_type", os_type)
if sku is not None:
pulumi.set(__self__, "sku", sku)
if snapshot_name is not None:
pulumi.set(__self__, "snapshot_name", snapshot_name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="creationData")
def creation_data(self) -> pulumi.Input['CreationDataArgs']:
"""
Disk source information. CreationData information cannot be changed after the disk has been created.
"""
return pulumi.get(self, "creation_data")
@creation_data.setter
def creation_data(self, value: pulumi.Input['CreationDataArgs']):
pulumi.set(self, "creation_data", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="diskSizeGB")
def disk_size_gb(self) -> Optional[pulumi.Input[int]]:
"""
If creationData.createOption is Empty, this field is mandatory and it indicates the size of the VHD to create. If this field is present for updates or creation with other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a running VM, and can only increase the disk's size.
"""
return pulumi.get(self, "disk_size_gb")
@disk_size_gb.setter
def disk_size_gb(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "disk_size_gb", value)
@property
@pulumi.getter(name="encryptionSettings")
def encryption_settings(self) -> Optional[pulumi.Input['EncryptionSettingsArgs']]:
"""
Encryption settings for disk or snapshot
"""
return pulumi.get(self, "encryption_settings")
@encryption_settings.setter
def encryption_settings(self, value: Optional[pulumi.Input['EncryptionSettingsArgs']]):
pulumi.set(self, "encryption_settings", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Resource location
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="osType")
def os_type(self) -> Optional[pulumi.Input['OperatingSystemTypes']]:
"""
The Operating System type.
"""
return pulumi.get(self, "os_type")
@os_type.setter
def os_type(self, value: Optional[pulumi.Input['OperatingSystemTypes']]):
pulumi.set(self, "os_type", value)
@property
@pulumi.getter
def sku(self) -> Optional[pulumi.Input['DiskSkuArgs']]:
"""
The disks and snapshots sku name. Can be Standard_LRS or Premium_LRS.
"""
return pulumi.get(self, "sku")
@sku.setter
def sku(self, value: Optional[pulumi.Input['DiskSkuArgs']]):
pulumi.set(self, "sku", value)
@property
@pulumi.getter(name="snapshotName")
def snapshot_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the snapshot that is being created. The name can't be changed after the snapshot is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The max name length is 80 characters.
"""
return pulumi.get(self, "snapshot_name")
@snapshot_name.setter
def snapshot_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "snapshot_name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class Snapshot(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
creation_data: Optional[pulumi.Input[pulumi.InputType['CreationDataArgs']]] = None,
disk_size_gb: Optional[pulumi.Input[int]] = None,
encryption_settings: Optional[pulumi.Input[pulumi.InputType['EncryptionSettingsArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
os_type: Optional[pulumi.Input['OperatingSystemTypes']] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['DiskSkuArgs']]] = None,
snapshot_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
Snapshot resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['CreationDataArgs']] creation_data: Disk source information. CreationData information cannot be changed after the disk has been created.
:param pulumi.Input[int] disk_size_gb: If creationData.createOption is Empty, this field is mandatory and it indicates the size of the VHD to create. If this field is present for updates or creation with other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a running VM, and can only increase the disk's size.
:param pulumi.Input[pulumi.InputType['EncryptionSettingsArgs']] encryption_settings: Encryption settings for disk or snapshot
:param pulumi.Input[str] location: Resource location
:param pulumi.Input['OperatingSystemTypes'] os_type: The Operating System type.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[pulumi.InputType['DiskSkuArgs']] sku: The disks and snapshots sku name. Can be Standard_LRS or Premium_LRS.
:param pulumi.Input[str] snapshot_name: The name of the snapshot that is being created. The name can't be changed after the snapshot is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The max name length is 80 characters.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: SnapshotArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Snapshot resource.
:param str resource_name: The name of the resource.
:param SnapshotArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(SnapshotArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
creation_data: Optional[pulumi.Input[pulumi.InputType['CreationDataArgs']]] = None,
disk_size_gb: Optional[pulumi.Input[int]] = None,
encryption_settings: Optional[pulumi.Input[pulumi.InputType['EncryptionSettingsArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
os_type: Optional[pulumi.Input['OperatingSystemTypes']] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['DiskSkuArgs']]] = None,
snapshot_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = SnapshotArgs.__new__(SnapshotArgs)
if creation_data is None and not opts.urn:
raise TypeError("Missing required property 'creation_data'")
__props__.__dict__["creation_data"] = creation_data
__props__.__dict__["disk_size_gb"] = disk_size_gb
__props__.__dict__["encryption_settings"] = encryption_settings
__props__.__dict__["location"] = location
__props__.__dict__["os_type"] = os_type
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["sku"] = sku
__props__.__dict__["snapshot_name"] = snapshot_name
__props__.__dict__["tags"] = tags
__props__.__dict__["managed_by"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["time_created"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:compute/v20170330:Snapshot"), pulumi.Alias(type_="azure-native:compute:Snapshot"), pulumi.Alias(type_="azure-nextgen:compute:Snapshot"), pulumi.Alias(type_="azure-native:compute/v20160430preview:Snapshot"), pulumi.Alias(type_="azure-nextgen:compute/v20160430preview:Snapshot"), pulumi.Alias(type_="azure-native:compute/v20180401:Snapshot"), pulumi.Alias(type_="azure-nextgen:compute/v20180401:Snapshot"), pulumi.Alias(type_="azure-native:compute/v20180601:Snapshot"), pulumi.Alias(type_="azure-nextgen:compute/v20180601:Snapshot"), pulumi.Alias(type_="azure-native:compute/v20180930:Snapshot"), pulumi.Alias(type_="azure-nextgen:compute/v20180930:Snapshot"), pulumi.Alias(type_="azure-native:compute/v20190301:Snapshot"), pulumi.Alias(type_="azure-nextgen:compute/v20190301:Snapshot"), pulumi.Alias(type_="azure-native:compute/v20190701:Snapshot"), pulumi.Alias(type_="azure-nextgen:compute/v20190701:Snapshot"), pulumi.Alias(type_="azure-native:compute/v20191101:Snapshot"), pulumi.Alias(type_="azure-nextgen:compute/v20191101:Snapshot"), pulumi.Alias(type_="azure-native:compute/v20200501:Snapshot"), pulumi.Alias(type_="azure-nextgen:compute/v20200501:Snapshot"), pulumi.Alias(type_="azure-native:compute/v20200630:Snapshot"), pulumi.Alias(type_="azure-nextgen:compute/v20200630:Snapshot"), pulumi.Alias(type_="azure-native:compute/v20200930:Snapshot"), pulumi.Alias(type_="azure-nextgen:compute/v20200930:Snapshot"), pulumi.Alias(type_="azure-native:compute/v20201201:Snapshot"), pulumi.Alias(type_="azure-nextgen:compute/v20201201:Snapshot"), pulumi.Alias(type_="azure-native:compute/v20210401:Snapshot"), pulumi.Alias(type_="azure-nextgen:compute/v20210401:Snapshot")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Snapshot, __self__).__init__(
'azure-native:compute/v20170330:Snapshot',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Snapshot':
"""
Get an existing Snapshot resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = SnapshotArgs.__new__(SnapshotArgs)
__props__.__dict__["creation_data"] = None
__props__.__dict__["disk_size_gb"] = None
__props__.__dict__["encryption_settings"] = None
__props__.__dict__["location"] = None
__props__.__dict__["managed_by"] = None
__props__.__dict__["name"] = None
__props__.__dict__["os_type"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["sku"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["time_created"] = None
__props__.__dict__["type"] = None
return Snapshot(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="creationData")
def creation_data(self) -> pulumi.Output['outputs.CreationDataResponse']:
"""
Disk source information. CreationData information cannot be changed after the disk has been created.
"""
return pulumi.get(self, "creation_data")
@property
@pulumi.getter(name="diskSizeGB")
def disk_size_gb(self) -> pulumi.Output[Optional[int]]:
"""
If creationData.createOption is Empty, this field is mandatory and it indicates the size of the VHD to create. If this field is present for updates or creation with other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a running VM, and can only increase the disk's size.
"""
return pulumi.get(self, "disk_size_gb")
@property
@pulumi.getter(name="encryptionSettings")
def encryption_settings(self) -> pulumi.Output[Optional['outputs.EncryptionSettingsResponse']]:
"""
Encryption settings for disk or snapshot
"""
return pulumi.get(self, "encryption_settings")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="managedBy")
def managed_by(self) -> pulumi.Output[str]:
"""
Unused. Always Null.
"""
return pulumi.get(self, "managed_by")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="osType")
def os_type(self) -> pulumi.Output[Optional[str]]:
"""
The Operating System type.
"""
return pulumi.get(self, "os_type")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The disk provisioning state.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def sku(self) -> pulumi.Output[Optional['outputs.DiskSkuResponse']]:
"""
The disks and snapshots sku name. Can be Standard_LRS or Premium_LRS.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="timeCreated")
def time_created(self) -> pulumi.Output[str]:
"""
The time when the disk was created.
"""
return pulumi.get(self, "time_created")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type
"""
return pulumi.get(self, "type")
|
py | b412e13fc8fc5fffe106f78a3ff45561dc27c9f5 | #!/usr/bin/python
#
# Copyright (C) 2013 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for looking up symbolic debugging information.
The information can include symbol names, offsets, and source locations.
"""
import glob
import itertools
import os
import re
import subprocess
import zipfile
CHROME_SRC = os.path.join(os.path.realpath(os.path.dirname(__file__)),
os.pardir, os.pardir, os.pardir, os.pardir)
ANDROID_BUILD_TOP = CHROME_SRC
SYMBOLS_DIR = CHROME_SRC
CHROME_SYMBOLS_DIR = CHROME_SRC
ARCH = "arm"
TOOLCHAIN_INFO = None
def Uname():
"""'uname' for constructing prebuilt/<...> and out/host/<...> paths."""
uname = os.uname()[0]
if uname == "Darwin":
proc = os.uname()[-1]
if proc == "i386" or proc == "x86_64":
return "darwin-x86"
return "darwin-ppc"
if uname == "Linux":
return "linux-x86"
return uname
def ToolPath(tool, toolchain_info=None):
"""Return a full qualified path to the specified tool"""
# ToolPath looks for the tools in the completely incorrect directory.
# This looks in the checked in android_tools.
if ARCH == "arm":
toolchain_source = "arm-linux-androideabi-4.9"
toolchain_prefix = "arm-linux-androideabi"
ndk = "ndk"
elif ARCH == "arm64":
toolchain_source = "aarch64-linux-android-4.9"
toolchain_prefix = "aarch64-linux-android"
ndk = "ndk"
elif ARCH == "x86":
toolchain_source = "x86-4.9"
toolchain_prefix = "i686-linux-android"
ndk = "ndk"
elif ARCH == "x86_64" or ARCH == "x64":
toolchain_source = "x86_64-4.9"
toolchain_prefix = "x86_64-linux-android"
ndk = "ndk"
elif ARCH == "mips":
toolchain_source = "mipsel-linux-android-4.9"
toolchain_prefix = "mipsel-linux-android"
ndk = "ndk"
else:
raise Exception("Could not find tool chain")
toolchain_subdir = (
"third_party/android_tools/%s/toolchains/%s/prebuilt/linux-x86_64/bin" %
(ndk, toolchain_source))
return os.path.join(CHROME_SRC,
toolchain_subdir,
toolchain_prefix + "-" + tool)
def FindToolchain():
"""Look for the latest available toolchain
Args:
None
Returns:
A pair of strings containing toolchain label and target prefix.
"""
global TOOLCHAIN_INFO
if TOOLCHAIN_INFO is not None:
return TOOLCHAIN_INFO
## Known toolchains, newer ones in the front.
gcc_version = "4.9"
if ARCH == "arm64":
known_toolchains = [
("aarch64-linux-android-" + gcc_version, "aarch64", "aarch64-linux-android")
]
elif ARCH == "arm":
known_toolchains = [
("arm-linux-androideabi-" + gcc_version, "arm", "arm-linux-androideabi")
]
elif ARCH =="x86":
known_toolchains = [
("x86-" + gcc_version, "x86", "i686-linux-android")
]
elif ARCH =="x86_64" or ARCH =="x64":
known_toolchains = [
("x86_64-" + gcc_version, "x86_64", "x86_64-linux-android")
]
elif ARCH == "mips":
known_toolchains = [
("mipsel-linux-android-" + gcc_version, "mips", "mipsel-linux-android")
]
else:
known_toolchains = []
# Look for addr2line to check for valid toolchain path.
for (label, platform, target) in known_toolchains:
toolchain_info = (label, platform, target);
if os.path.exists(ToolPath("addr2line", toolchain_info)):
TOOLCHAIN_INFO = toolchain_info
print "Using toolchain from :" + ToolPath("", TOOLCHAIN_INFO)
return toolchain_info
raise Exception("Could not find tool chain")
def GetAapt():
"""Returns the path to aapt.
Args:
None
Returns:
the pathname of the 'aapt' executable.
"""
sdk_home = os.path.join('third_party', 'android_tools', 'sdk')
sdk_home = os.environ.get('SDK_HOME', sdk_home)
aapt_exe = glob.glob(os.path.join(sdk_home, 'build-tools', '*', 'aapt'))
if not aapt_exe:
return None
return sorted(aapt_exe, key=os.path.getmtime, reverse=True)[0]
def ApkMatchPackageName(aapt, apk_path, package_name):
"""Returns true the APK's package name matches package_name.
Args:
aapt: pathname for the 'aapt' executable.
apk_path: pathname of the APK file.
package_name: package name to match.
Returns:
True if the package name matches or aapt is None, False otherwise.
"""
if not aapt:
# Allow false positives
return True
aapt_output = subprocess.check_output(
[aapt, 'dump', 'badging', apk_path]).split('\n')
package_name_re = re.compile(r'package: .*name=\'(\S*)\'')
for line in aapt_output:
match = package_name_re.match(line)
if match:
return package_name == match.group(1)
return False
def PathListJoin(prefix_list, suffix_list):
"""Returns each prefix in prefix_list joined with each suffix in suffix list.
Args:
prefix_list: list of path prefixes.
suffix_list: list of path suffixes.
Returns:
List of paths each of which joins a prefix with a suffix.
"""
return [
os.path.join(prefix, suffix)
for prefix in prefix_list for suffix in suffix_list ]
def GetCandidates(dirs, filepart, candidate_fun):
"""Returns a list of candidate filenames.
Args:
dirs: a list of the directory part of the pathname.
filepart: the file part of the pathname.
candidate_fun: a function to apply to each candidate, returns a list.
Returns:
A list of candidate files ordered by modification time, newest first.
"""
out_dir = os.environ.get('CHROMIUM_OUT_DIR', 'out')
out_dir = os.path.join(CHROME_SYMBOLS_DIR, out_dir)
buildtype = os.environ.get('BUILDTYPE')
if buildtype:
buildtype_list = [ buildtype ]
else:
buildtype_list = [ 'Debug', 'Release' ]
candidates = PathListJoin([out_dir], buildtype_list) + [CHROME_SYMBOLS_DIR]
candidates = PathListJoin(candidates, dirs)
candidates = PathListJoin(candidates, [filepart])
candidates = list(
itertools.chain.from_iterable(map(candidate_fun, candidates)))
candidates = sorted(candidates, key=os.path.getmtime, reverse=True)
return candidates
def GetCandidateApks():
"""Returns a list of APKs which could contain the library.
Args:
None
Returns:
list of APK filename which could contain the library.
"""
return GetCandidates(['apks'], '*.apk', glob.glob)
def GetCrazyLib(apk_filename):
"""Returns the name of the first crazy library from this APK.
Args:
apk_filename: name of an APK file.
Returns:
Name of the first library which would be crazy loaded from this APK.
"""
zip_file = zipfile.ZipFile(apk_filename, 'r')
for filename in zip_file.namelist():
match = re.match('lib/[^/]*/crazy.(lib.*[.]so)', filename)
if match:
return match.group(1)
def GetMatchingApks(device_apk_name):
"""Find any APKs which match the package indicated by the device_apk_name.
Args:
device_apk_name: name of the APK on the device.
Returns:
A list of APK filenames which could contain the desired library.
"""
match = re.match('(.*)-[0-9]+[.]apk$', device_apk_name)
if not match:
return None
package_name = match.group(1)
return filter(
lambda candidate_apk:
ApkMatchPackageName(GetAapt(), candidate_apk, package_name),
GetCandidateApks())
def MapDeviceApkToLibrary(device_apk_name):
"""Provide a library name which corresponds with device_apk_name.
Args:
device_apk_name: name of the APK on the device.
Returns:
Name of the library which corresponds to that APK.
"""
matching_apks = GetMatchingApks(device_apk_name)
for matching_apk in matching_apks:
crazy_lib = GetCrazyLib(matching_apk)
if crazy_lib:
return crazy_lib
def GetCandidateLibraries(library_name):
"""Returns a list of candidate library filenames.
Args:
library_name: basename of the library to match.
Returns:
A list of matching library filenames for library_name.
"""
return GetCandidates(
['lib', 'lib.target'], library_name,
lambda filename: filter(os.path.exists, [filename]))
def TranslateLibPath(lib):
# SymbolInformation(lib, addr) receives lib as the path from symbols
# root to the symbols file. This needs to be translated to point to the
# correct .so path. If the user doesn't explicitly specify which directory to
# use, then use the most recently updated one in one of the known directories.
# If the .so is not found somewhere in CHROME_SYMBOLS_DIR, leave it
# untranslated in case it is an Android symbol in SYMBOLS_DIR.
library_name = os.path.basename(lib)
# The filename in the stack trace maybe an APK name rather than a library
# name. This happens when the library was loaded directly from inside the
# APK. If this is the case we try to figure out the library name by looking
# for a matching APK file and finding the name of the library in contains.
# The name of the APK file on the device is of the form
# <package_name>-<number>.apk. The APK file on the host may have any name
# so we look at the APK badging to see if the package name matches.
if re.search('-[0-9]+[.]apk$', library_name):
mapping = MapDeviceApkToLibrary(library_name)
if mapping:
library_name = mapping
candidate_libraries = GetCandidateLibraries(library_name)
if not candidate_libraries:
return lib
library_path = os.path.relpath(candidate_libraries[0], SYMBOLS_DIR)
return '/' + library_path
def SymbolInformation(lib, addr, get_detailed_info):
"""Look up symbol information about an address.
Args:
lib: library (or executable) pathname containing symbols
addr: string hexidecimal address
Returns:
A list of the form [(source_symbol, source_location,
object_symbol_with_offset)].
If the function has been inlined then the list may contain
more than one element with the symbols for the most deeply
nested inlined location appearing first. The list is
always non-empty, even if no information is available.
Usually you want to display the source_location and
object_symbol_with_offset from the last element in the list.
"""
lib = TranslateLibPath(lib)
info = SymbolInformationForSet(lib, set([addr]), get_detailed_info)
return (info and info.get(addr)) or [(None, None, None)]
def SymbolInformationForSet(lib, unique_addrs, get_detailed_info):
"""Look up symbol information for a set of addresses from the given library.
Args:
lib: library (or executable) pathname containing symbols
unique_addrs: set of hexidecimal addresses
Returns:
A dictionary of the form {addr: [(source_symbol, source_location,
object_symbol_with_offset)]} where each address has a list of
associated symbols and locations. The list is always non-empty.
If the function has been inlined then the list may contain
more than one element with the symbols for the most deeply
nested inlined location appearing first. The list is
always non-empty, even if no information is available.
Usually you want to display the source_location and
object_symbol_with_offset from the last element in the list.
"""
if not lib:
return None
addr_to_line = CallAddr2LineForSet(lib, unique_addrs)
if not addr_to_line:
return None
if get_detailed_info:
addr_to_objdump = CallObjdumpForSet(lib, unique_addrs)
if not addr_to_objdump:
return None
else:
addr_to_objdump = dict((addr, ("", 0)) for addr in unique_addrs)
result = {}
for addr in unique_addrs:
source_info = addr_to_line.get(addr)
if not source_info:
source_info = [(None, None)]
if addr in addr_to_objdump:
(object_symbol, object_offset) = addr_to_objdump.get(addr)
object_symbol_with_offset = FormatSymbolWithOffset(object_symbol,
object_offset)
else:
object_symbol_with_offset = None
result[addr] = [(source_symbol, source_location, object_symbol_with_offset)
for (source_symbol, source_location) in source_info]
return result
class MemoizedForSet(object):
def __init__(self, fn):
self.fn = fn
self.cache = {}
def __call__(self, lib, unique_addrs):
lib_cache = self.cache.setdefault(lib, {})
no_cache = filter(lambda x: x not in lib_cache, unique_addrs)
if no_cache:
lib_cache.update((k, None) for k in no_cache)
result = self.fn(lib, no_cache)
if result:
lib_cache.update(result)
return dict((k, lib_cache[k]) for k in unique_addrs if lib_cache[k])
@MemoizedForSet
def CallAddr2LineForSet(lib, unique_addrs):
"""Look up line and symbol information for a set of addresses.
Args:
lib: library (or executable) pathname containing symbols
unique_addrs: set of string hexidecimal addresses look up.
Returns:
A dictionary of the form {addr: [(symbol, file:line)]} where
each address has a list of associated symbols and locations
or an empty list if no symbol information was found.
If the function has been inlined then the list may contain
more than one element with the symbols for the most deeply
nested inlined location appearing first.
"""
if not lib:
return None
symbols = SYMBOLS_DIR + lib
if not os.path.isfile(symbols):
return None
(label, platform, target) = FindToolchain()
cmd = [ToolPath("addr2line"), "--functions", "--inlines",
"--demangle", "--exe=" + symbols]
child = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
result = {}
addrs = sorted(unique_addrs)
for addr in addrs:
child.stdin.write("0x%s\n" % addr)
child.stdin.flush()
records = []
first = True
while True:
symbol = child.stdout.readline().strip()
if symbol == "??":
symbol = None
location = child.stdout.readline().strip()
if location == "??:0":
location = None
if symbol is None and location is None:
break
records.append((symbol, location))
if first:
# Write a blank line as a sentinel so we know when to stop
# reading inlines from the output.
# The blank line will cause addr2line to emit "??\n??:0\n".
child.stdin.write("\n")
first = False
result[addr] = records
child.stdin.close()
child.stdout.close()
return result
def StripPC(addr):
"""Strips the Thumb bit a program counter address when appropriate.
Args:
addr: the program counter address
Returns:
The stripped program counter address.
"""
global ARCH
if ARCH == "arm":
return addr & ~1
return addr
@MemoizedForSet
def CallObjdumpForSet(lib, unique_addrs):
"""Use objdump to find out the names of the containing functions.
Args:
lib: library (or executable) pathname containing symbols
unique_addrs: set of string hexidecimal addresses to find the functions for.
Returns:
A dictionary of the form {addr: (string symbol, offset)}.
"""
if not lib:
return None
symbols = SYMBOLS_DIR + lib
if not os.path.exists(symbols):
return None
symbols = SYMBOLS_DIR + lib
if not os.path.exists(symbols):
return None
result = {}
# Function lines look like:
# 000177b0 <android::IBinder::~IBinder()+0x2c>:
# We pull out the address and function first. Then we check for an optional
# offset. This is tricky due to functions that look like "operator+(..)+0x2c"
func_regexp = re.compile("(^[a-f0-9]*) \<(.*)\>:$")
offset_regexp = re.compile("(.*)\+0x([a-f0-9]*)")
# A disassembly line looks like:
# 177b2: b510 push {r4, lr}
asm_regexp = re.compile("(^[ a-f0-9]*):[ a-f0-0]*.*$")
for target_addr in unique_addrs:
start_addr_dec = str(StripPC(int(target_addr, 16)))
stop_addr_dec = str(StripPC(int(target_addr, 16)) + 8)
cmd = [ToolPath("objdump"),
"--section=.text",
"--demangle",
"--disassemble",
"--start-address=" + start_addr_dec,
"--stop-address=" + stop_addr_dec,
symbols]
current_symbol = None # The current function symbol in the disassembly.
current_symbol_addr = 0 # The address of the current function.
stream = subprocess.Popen(cmd, stdout=subprocess.PIPE).stdout
for line in stream:
# Is it a function line like:
# 000177b0 <android::IBinder::~IBinder()>:
components = func_regexp.match(line)
if components:
# This is a new function, so record the current function and its address.
current_symbol_addr = int(components.group(1), 16)
current_symbol = components.group(2)
# Does it have an optional offset like: "foo(..)+0x2c"?
components = offset_regexp.match(current_symbol)
if components:
current_symbol = components.group(1)
offset = components.group(2)
if offset:
current_symbol_addr -= int(offset, 16)
# Is it an disassembly line like:
# 177b2: b510 push {r4, lr}
components = asm_regexp.match(line)
if components:
addr = components.group(1)
i_addr = int(addr, 16)
i_target = StripPC(int(target_addr, 16))
if i_addr == i_target:
result[target_addr] = (current_symbol, i_target - current_symbol_addr)
stream.close()
return result
def CallCppFilt(mangled_symbol):
cmd = [ToolPath("c++filt")]
process = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
process.stdin.write(mangled_symbol)
process.stdin.write("\n")
process.stdin.close()
demangled_symbol = process.stdout.readline().strip()
process.stdout.close()
return demangled_symbol
def FormatSymbolWithOffset(symbol, offset):
if offset == 0:
return symbol
return "%s+%d" % (symbol, offset)
|
py | b412e1d77f59ae8785fc128202020b478df7af44 | import factory
from ..models import User
class UserFactory(factory.django.DjangoModelFactory):
username = factory.Sequence(lambda n: 'user-{0}'.format(n))
email = factory.Sequence(lambda n: 'user-{0}@example.com'.format(n))
password = factory.PostGenerationMethodCall('set_password', 'password')
class Meta:
model = User
django_get_or_create = ('username', )
|
py | b412e21cedc58bcde94cf299a15b9caf47a4e8f2 | # -*- coding: utf-8 -*-
"""This module contains basic data cleaning functions"""
from typing import Union
import pandas as pd
def drop_duplicated_indices(df: Union[pd.Series, pd.DataFrame]) -> Union[pd.Series, pd.DataFrame]:
"""If one concatenates dataframes there might be duplicated
indices. This can lead to problems, e.g., in interpolation steps.
One easy solution can be to just drop the duplicated row
Args:
df (Union[pd.Series, pd.DataFrame]): Input data
Returns:
Union[pd.Series, pd.DataFrame]: Data without duplicated indices
"""
return df[~df.index.duplicated()]
|
py | b412e4194529084120e20ae7e275ed82b0c47a69 | from submission import Submission
class MathieuSubmission(Submission):
def run(self, s):
steps = list(map(int, s.split("\n")))
n = len(steps)
i = 0
step_nb = 0
while 0 <= i < n:
new_i = steps[i] + i
if steps[i]>2:
steps[i] -= 1
else:
steps[i]+=1
step_nb += 1
i = new_i
return step_nb
|
py | b412e44735d7cb84063f24c064b68d2995accbab | # coding: utf-8
import pprint
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
class ShowPartitionEndMessageResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'topic': 'str',
'partition': 'int',
'message_offset': 'int',
'timestamp': 'int'
}
attribute_map = {
'topic': 'topic',
'partition': 'partition',
'message_offset': 'message_offset',
'timestamp': 'timestamp'
}
def __init__(self, topic=None, partition=None, message_offset=None, timestamp=None):
"""ShowPartitionEndMessageResponse - a model defined in huaweicloud sdk"""
super().__init__()
self._topic = None
self._partition = None
self._message_offset = None
self._timestamp = None
self.discriminator = None
if topic is not None:
self.topic = topic
if partition is not None:
self.partition = partition
if message_offset is not None:
self.message_offset = message_offset
if timestamp is not None:
self.timestamp = timestamp
@property
def topic(self):
"""Gets the topic of this ShowPartitionEndMessageResponse.
Topic名称。
:return: The topic of this ShowPartitionEndMessageResponse.
:rtype: str
"""
return self._topic
@topic.setter
def topic(self, topic):
"""Sets the topic of this ShowPartitionEndMessageResponse.
Topic名称。
:param topic: The topic of this ShowPartitionEndMessageResponse.
:type: str
"""
self._topic = topic
@property
def partition(self):
"""Gets the partition of this ShowPartitionEndMessageResponse.
分区编号。
:return: The partition of this ShowPartitionEndMessageResponse.
:rtype: int
"""
return self._partition
@partition.setter
def partition(self, partition):
"""Sets the partition of this ShowPartitionEndMessageResponse.
分区编号。
:param partition: The partition of this ShowPartitionEndMessageResponse.
:type: int
"""
self._partition = partition
@property
def message_offset(self):
"""Gets the message_offset of this ShowPartitionEndMessageResponse.
最新消息位置。
:return: The message_offset of this ShowPartitionEndMessageResponse.
:rtype: int
"""
return self._message_offset
@message_offset.setter
def message_offset(self, message_offset):
"""Sets the message_offset of this ShowPartitionEndMessageResponse.
最新消息位置。
:param message_offset: The message_offset of this ShowPartitionEndMessageResponse.
:type: int
"""
self._message_offset = message_offset
@property
def timestamp(self):
"""Gets the timestamp of this ShowPartitionEndMessageResponse.
最新消息时间戳。
:return: The timestamp of this ShowPartitionEndMessageResponse.
:rtype: int
"""
return self._timestamp
@timestamp.setter
def timestamp(self, timestamp):
"""Sets the timestamp of this ShowPartitionEndMessageResponse.
最新消息时间戳。
:param timestamp: The timestamp of this ShowPartitionEndMessageResponse.
:type: int
"""
self._timestamp = timestamp
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowPartitionEndMessageResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py | b412e4548d9c09c6ffb80512e8df01197b5fcf4b | """
This module helps to navigate inside json file.
"""
import json
# import pprint
def read_file(path: str):
'''
This function reads json file.
'''
json_file = open(path, encoding="utf-8")
return json.load(json_file)
def navigate(data: str):
'''
This function navigates in json file via command line.
'''
if isinstance(data, list):
print("♥ ♥ ♥ ♥ ♥")
print(f"Enter the value from indexes in range 0 to {len(data) - 1}")
# print("♥ ♥ ♥ ♥ ♥")
while True:
print("♥ ♥ ♥ ♥ ♥")
print(f"Available indexes {[ind for ind in range(len(data) - 1)]}")
print("♥ ♥ ♥ ♥ ♥")
user_key = input("Make your choice: ")
if user_key in [str(ind) for ind in range(len(data) - 1)]:
return navigate(data[int(user_key)])
else:
continue
return navigate(data[int(user_key)])
if isinstance(data, dict):
keys_base = list(data.keys())
print("♥ ♥ ♥ ♥ ♥")
print("Enter the value from keys:")
while True:
print("♥ ♥ ♥ ♥ ♥")
for key in keys_base:
print(f"- {key}")
print("♥ ♥ ♥ ♥ ♥")
user_key = input("Make your choice: ")
if user_key in keys_base:
return navigate(data[user_key])
else:
continue
return data
if __name__ == "__main__":
json_data = read_file("example_2.json")
print(navigate(json_data))
# print()
print("♥ ♥ ♥ ♥ ♥")
print('Thanks for using this program :)')
print("♥ ♥ ♥ ♥ ♥")
|
py | b412e4d10f47c81097051e2606ea86ee08b389fb | """
tests.unit.utils.markers.test_skip_if_binaries_missing
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Test the "skip_if_binaries_missing" marker helper
"""
import saltfactories.utils.markers as markers
def test_single_existing():
assert markers.skip_if_binaries_missing(["python"]) is None
def test_multiple_existing():
assert markers.skip_if_binaries_missing(["python", "pip"]) is None
def test_single_non_existing_with_message():
reason = markers.skip_if_binaries_missing(["python9"], message="Dam!")
assert reason is not None
assert reason == "Dam! The 'python9' binary was not found"
def test_multiple_one_missing():
reason = markers.skip_if_binaries_missing(["python", "pip9"])
assert reason is not None
assert reason == "The 'pip9' binary was not found"
def test_multiple_all_missing():
reason = markers.skip_if_binaries_missing(["python9", "pip9"])
assert reason is not None
assert reason == "The 'python9' binary was not found"
def test_multiple_one_missing_check_all_false():
reason = markers.skip_if_binaries_missing(["python", "pip9"], check_all=False)
# We should get no message back because the python binary is found
assert reason is None, reason
reason = markers.skip_if_binaries_missing(["python9", "pip"], check_all=False)
# We should get no message back because the pip binary is found
assert reason is None, reason
def test_multiple_one_missing_check_all_false_with_message():
reason = markers.skip_if_binaries_missing(["python", "pip9"], message="Dam!", check_all=False)
# We should get no message back because the python binary is found
assert reason is None
def test_multiple_missing_check_all_false():
reason = markers.skip_if_binaries_missing(["python9", "pip9"], check_all=False)
assert reason is not None
assert reason == "None of the following binaries was found: python9, pip9"
def test_multiple_missing_check_all_false_with_message():
reason = markers.skip_if_binaries_missing(["python9", "pip9"], message="Dam!", check_all=False)
assert reason is not None
assert reason == "Dam! None of the following binaries was found: python9, pip9"
|
py | b412e6354fe826705863a23cfd3d863583025fcb | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class InboundSecurityRuleOperations(object):
"""InboundSecurityRuleOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _create_or_update_initial(
self,
resource_group_name, # type: str
network_virtual_appliance_name, # type: str
rule_collection_name, # type: str
parameters, # type: "_models.InboundSecurityRule"
**kwargs # type: Any
):
# type: (...) -> "_models.InboundSecurityRule"
cls = kwargs.pop('cls', None) # type: ClsType["_models.InboundSecurityRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkVirtualApplianceName': self._serialize.url("network_virtual_appliance_name", network_virtual_appliance_name, 'str'),
'ruleCollectionName': self._serialize.url("rule_collection_name", rule_collection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'InboundSecurityRule')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('InboundSecurityRule', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('InboundSecurityRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkVirtualAppliances/{networkVirtualApplianceName}/inboundSecurityRules/{ruleCollectionName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
network_virtual_appliance_name, # type: str
rule_collection_name, # type: str
parameters, # type: "_models.InboundSecurityRule"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.InboundSecurityRule"]
"""Creates or updates the specified Network Virtual Appliance Inbound Security Rules.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_virtual_appliance_name: The name of the Network Virtual Appliance.
:type network_virtual_appliance_name: str
:param rule_collection_name: The name of security rule collection.
:type rule_collection_name: str
:param parameters: Parameters supplied to the create or update Network Virtual Appliance
Inbound Security Rules operation.
:type parameters: ~azure.mgmt.network.v2020_11_01.models.InboundSecurityRule
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either InboundSecurityRule or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_11_01.models.InboundSecurityRule]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.InboundSecurityRule"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
network_virtual_appliance_name=network_virtual_appliance_name,
rule_collection_name=rule_collection_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('InboundSecurityRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkVirtualApplianceName': self._serialize.url("network_virtual_appliance_name", network_virtual_appliance_name, 'str'),
'ruleCollectionName': self._serialize.url("rule_collection_name", rule_collection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkVirtualAppliances/{networkVirtualApplianceName}/inboundSecurityRules/{ruleCollectionName}'} # type: ignore
|
py | b412e69a12ad8cbbf6f022f4c2c7a49a5e364945 | import time
import os
import struct
import sys
import numpy
import scipy
file_path = os.path.dirname(os.path.abspath(__file__))
pyspace_path = file_path[:file_path.rfind('pySPACE')-1]
if not pyspace_path in sys.path:
sys.path.append(pyspace_path)
import pySPACE
from pySPACE.missions.support.WindowerInterface import AbstractStreamReader
header_mrk = "Brain Vision Data Exchange Marker File, Version 1.0\n\
; Data written by the recorder script.\n\
[Common Infos]\n\
Codepage=UTF-8\n\
DataFile=%s\n\
\n\
[Marker Infos]\n\
; Each entry: Mk<Marker number>=<Type>,<Description>,<Position in data points>,\n\
; <Size in data points>, <Channel number (0 = marker is related to all channels)>\n\
; Fields are delimited by commas, some fields might be omitted (empty).\n\
; Commas in type or description text are coded as \"\\1\".\n\
Mk1=New Segment,,1,1,0,%s\n"
header_hdr = "Brain Vision Data Exchange Header File Version 1.0\n\
; Data written by the recorder script.\n\
\n\
[Common Infos]\n\
Codepage=UTF-8\n\
DataFile=%s\n\
MarkerFile=%s\n\
DataFormat=BINARY\n\
; Data orientation: MULTIPLEXED=ch1,pt1, ch2,pt1 ...\n\
DataOrientation=MULTIPLEXED\n\
NumberOfChannels=%d\n\
; Sampling interval in microseconds\n\
SamplingInterval=%d\n\
\n\
[Binary Infos]\n\
BinaryFormat=INT_16\n\
\n\
[Channel Infos]\n\
; Each entry: Ch<Channel number>=<Name>,<Reference channel name>,\n\
; <Resolution in \"Unit\">,<Unit>, Future extensions..\n\
; Fields are delimited by commas, some fields might be omitted (empty).\n\
; Commas in channel names are coded as \"\1\".\n"
class Recorder(object):
def __init__(self, client=None, folder=None, subject=None,
task="pySPACE", online=False, **kwargs):
super(Recorder, self).__init__(**kwargs)
if folder is None:
folder = pySPACE.configuration.storage
self.folder = folder
self.subject = str(subject)
self.task = task
self.date = time.strftime("%Y%m%d") # append %H%M%S for time
self.set_no = self.find_next_set()
if not os.path.exists(self.folder):
os.makedirs(self.folder)
assert os.path.exists(self.folder), "Could not create Directory!"
self.eeg_filename = str("%s_r_%s_%s_%sSet%d.eeg" %
(self.date, self.subject, self.task,
"_online_" if online else "", self.set_no))
self.vhdr_filename = str("%s_r_%s_%s_%sSet%d.vhdr" %
(self.date, self.subject, self.task,
"_online_" if online else "", self.set_no))
self.vmrk_filename = str("%s_r_%s_%s_%sSet%d.vmrk" %
(self.date, self.subject, self.task,
"_online_" if online else "", self.set_no))
self.eeg = open(os.path.join(self.folder, self.eeg_filename), "w")
self.vhdr = open(os.path.join(self.folder, self.vhdr_filename), "w")
self.vmrk = open(os.path.join(self.folder, self.vmrk_filename), "w")
self.client = client
if client is not None:
self.set_eeg_client(client)
self.datapoint = 0
self.markerno = 2
def set_eeg_client(self, client):
if not isinstance(client, AbstractStreamReader):
raise IOError, "No AbstractStreamReader compatible data-source!"
self.client = client
self.write_header()
self.client.regcallback(self.write_data)
def has_client(self):
return (self.client is not None)
def find_next_set(self):
set_no = 1
while True:
filename = str("%s_r_%s_%s_Set%d.eeg" %
(self.date, self.subject, self.task, set_no))
abs_filename = os.path.join(self.folder, filename)
if not os.path.isfile(abs_filename):
break
set_no += 1
return set_no
def write_header(self):
self.vhdr.write(header_hdr % (self.eeg_filename, self.vmrk_filename,
self.client.nChannels, 1000000/self.client.dSamplingInterval))
for i in range(self.client.nChannels):
self.vhdr.write(str("Ch%d=%s,,100,nV\n" % (i+1,self.client.channelNames[i])))
self.vhdr.flush()
self.vmrk.write(header_mrk % (self.eeg_filename, time.strftime("%Y%m%d%H%M%S")))
self.vmrk.flush()
def write_data(self, data, marker):
if isinstance(data, numpy.ndarray):
data_ = data.astype(numpy.int16)
data_ = scipy.transpose(data_)
buf = struct.pack("%dh"%len(data.flatten()), *data_.flatten())
else:
buf = struct.pack('%dh'%len(data), *data)
self.eeg.write(buf)
self.eeg.flush()
for i,m in enumerate(marker):
if m != -1:
self.vmrk.write(str("Mk%d=Stimulus,%s,%d,1,0\n" %
(self.markerno,self.client.markerNames[m],self.datapoint+i)))
self.markerno += 1
self.datapoint += len(marker)
self.vmrk.flush()
|
py | b412e85688be6ee9ad81e4f86b053c97551b0778 | #!/usr/bin/env python3
# Copyright (c) 2009-2019 The Bitcoin Core developers
# Copyright (c) 2014-2019 The BitsCoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test bitscoind with different proxy configuration.
Test plan:
- Start bitscoind's with different proxy configurations
- Use addnode to initiate connections
- Verify that proxies are connected to, and the right connection command is given
- Proxy configurations to test on bitscoind side:
- `-proxy` (proxy everything)
- `-onion` (proxy just onions)
- `-proxyrandomize` Circuit randomization
- Proxy configurations to test on proxy side,
- support no authentication (other proxy)
- support no authentication + user/pass authentication (Tor)
- proxy on IPv6
- Create various proxies (as threads)
- Create bitscoinds that connect to them
- Manipulate the bitscoinds using addnode (onetry) an observe effects
addnode connect to IPv4
addnode connect to IPv6
addnode connect to onion
addnode connect to generic DNS name
"""
import socket
import os
from test_framework.socks5 import Socks5Configuration, Socks5Command, Socks5Server, AddressType
from test_framework.test_framework import BitsCoinTestFramework
from test_framework.util import (
PORT_MIN,
PORT_RANGE,
assert_equal,
)
from test_framework.netutil import test_ipv6_local
RANGE_BEGIN = PORT_MIN + 2 * PORT_RANGE # Start after p2p and rpc ports
class ProxyTest(BitsCoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
def setup_nodes(self):
self.have_ipv6 = test_ipv6_local()
# Create two proxies on different ports
# ... one unauthenticated
self.conf1 = Socks5Configuration()
self.conf1.addr = ('127.0.0.1', RANGE_BEGIN + (os.getpid() % 1000))
self.conf1.unauth = True
self.conf1.auth = False
# ... one supporting authenticated and unauthenticated (Tor)
self.conf2 = Socks5Configuration()
self.conf2.addr = ('127.0.0.1', RANGE_BEGIN + 1000 + (os.getpid() % 1000))
self.conf2.unauth = True
self.conf2.auth = True
if self.have_ipv6:
# ... one on IPv6 with similar configuration
self.conf3 = Socks5Configuration()
self.conf3.af = socket.AF_INET6
self.conf3.addr = ('::1', RANGE_BEGIN + 2000 + (os.getpid() % 1000))
self.conf3.unauth = True
self.conf3.auth = True
else:
self.log.warning("Testing without local IPv6 support")
self.serv1 = Socks5Server(self.conf1)
self.serv1.start()
self.serv2 = Socks5Server(self.conf2)
self.serv2.start()
if self.have_ipv6:
self.serv3 = Socks5Server(self.conf3)
self.serv3.start()
# Note: proxies are not used to connect to local nodes
# this is because the proxy to use is based on CService.GetNetwork(), which return NET_UNROUTABLE for localhost
args = [
['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-proxyrandomize=1'],
['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-onion=%s:%i' % (self.conf2.addr),'-proxyrandomize=0'],
['-listen', '-proxy=%s:%i' % (self.conf2.addr),'-proxyrandomize=1'],
[]
]
if self.have_ipv6:
args[3] = ['-listen', '-proxy=[%s]:%i' % (self.conf3.addr),'-proxyrandomize=0', '-noonion']
self.add_nodes(self.num_nodes, extra_args=args)
self.start_nodes()
def node_test(self, node, proxies, auth, test_onion=True):
rv = []
# Test: outgoing IPv4 connection through node
node.addnode("15.61.23.23:1234", "onetry")
cmd = proxies[0].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitscoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"15.61.23.23")
assert_equal(cmd.port, 1234)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if self.have_ipv6:
# Test: outgoing IPv6 connection through node
node.addnode("[1233:3432:2434:2343:3234:2345:6546:4534]:5443", "onetry")
cmd = proxies[1].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitscoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"1233:3432:2434:2343:3234:2345:6546:4534")
assert_equal(cmd.port, 5443)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if test_onion:
# Test: outgoing onion connection through node
node.addnode("bitscoinostk4e4re.onion:8333", "onetry")
cmd = proxies[2].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"bitscoinostk4e4re.onion")
assert_equal(cmd.port, 8333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing DNS name connection through node
node.addnode("node.noumenon:8333", "onetry")
cmd = proxies[3].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"node.noumenon")
assert_equal(cmd.port, 8333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
return rv
def run_test(self):
# basic -proxy
self.node_test(self.nodes[0], [self.serv1, self.serv1, self.serv1, self.serv1], False)
# -proxy plus -onion
self.node_test(self.nodes[1], [self.serv1, self.serv1, self.serv2, self.serv1], False)
# -proxy plus -onion, -proxyrandomize
rv = self.node_test(self.nodes[2], [self.serv2, self.serv2, self.serv2, self.serv2], True)
# Check that credentials as used for -proxyrandomize connections are unique
credentials = set((x.username,x.password) for x in rv)
assert_equal(len(credentials), len(rv))
if self.have_ipv6:
# proxy on IPv6 localhost
self.node_test(self.nodes[3], [self.serv3, self.serv3, self.serv3, self.serv3], False, False)
def networks_dict(d):
r = {}
for x in d['networks']:
r[x['name']] = x
return r
# test RPC getnetworkinfo
n0 = networks_dict(self.nodes[0].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n0[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n0[net]['proxy_randomize_credentials'], True)
assert_equal(n0['onion']['reachable'], True)
n1 = networks_dict(self.nodes[1].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n1[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n1[net]['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n1['onion']['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['reachable'], True)
n2 = networks_dict(self.nodes[2].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n2[net]['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n2[net]['proxy_randomize_credentials'], True)
assert_equal(n2['onion']['reachable'], True)
if self.have_ipv6:
n3 = networks_dict(self.nodes[3].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n3[net]['proxy'], '[%s]:%i' % (self.conf3.addr))
assert_equal(n3[net]['proxy_randomize_credentials'], False)
assert_equal(n3['onion']['reachable'], False)
if __name__ == '__main__':
ProxyTest().main()
|
py | b412e8595c874f08263b85aa2a071208a1131378 | import logging
import os
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
def start(bot, update):
update.effective_message.reply_text("Hi!")
def echo(bot, update):
update.effective_message.reply_text(update.effective_message.text)
def error(bot, update, error):
logger.warning('Update "%s" caused error "%s"', update, error)
if __name__ == "__main__":
# Set these variable to the appropriate values
TOKEN = '545193892:AAF-i-kxjJBeEiVXL1PokHCCEGNnQ1sOXFo'
NAME = 'shayantt' # Same FQDN used when generating SSL Cert
PORT = 8443
print("SSS");
# Enable logging
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
# Set up the Updater
updater = Updater(TOKEN)
dp = updater.dispatcher
# Add handlers
dp.add_handler(CommandHandler('start', start))
dp.add_handler(MessageHandler(Filters.text, echo))
dp.add_error_handler(error)
updater.start_polling()
# Start the webhook
#updater.start_webhook(listen="0.0.0.0",
# port=int(PORT),
# url_path=TOKEN)
#updater.bot.setWebhook("https://{}.herokuapp.com/{}".format(NAME, TOKEN))
#print("SSS");
#updater.idle()
|
py | b412e8f469bce1c487e2ef993430f8ee68b74ed3 | import unittest
from consecutive_prime_sums import *
class ConsecutivePrimeSumsTest(unittest.TestCase):
def test_consecutive_prime_sums(self):
self.assertEqual(consecutive_prime_sum_less_than(100), 41)
self.assertEqual(consecutive_prime_sum_less_than(1000), 953)
self.assertEqual(consecutive_prime_sum_less_than(10000), 9521)
self.assertEqual(consecutive_prime_sum_less_than(100000), 92951)
@unittest.skip("A little bit slow to run all the time")
def test_project_euler(self):
self.assertEqual(consecutive_prime_sum_less_than(1000000), 997651)
if __name__ == '__main__':
unittest.main()
|
py | b412e92c5ce11a5cbc4c299854235042eb9699c1 | # Created by Martin Strohalm, Thermo Fisher Scientific
# import modules
import xml.etree.cElementTree as etree
from .converter import register, ValueConverter
@register("ED0FB1D9-4E07-47E1-B96C-4013B9AFE534")
class MassSpectrumConverter(ValueConverter):
"""
The pyeds.MassSpectrumConverter is used to convert mass spectrum data from
original binary format into pyeds.MassSpectrum.
"""
def Convert(self, value):
"""
Converts binary spectrum data.
Args:
value: pyeds.Binary
Binary data as stored in result file.
Returns:
pyeds.MassSpectrum or None
Parsed spectrum.
"""
# check value
if not value:
return None
# parse data
return MassSpectrumParser().parse(value.Unzip())
class MassSpectrumParser(object):
"""
The pyeds.MassSpectrumParser is used to parse mass spectrum data from
original binary format into pyeds.MassSpectrum.
"""
def parse(self, xml):
"""
Parses given pattern XML.
Args:
xml: str
Spectrum XML.
Returns:
pyeds.MassSpectrum
Mass spectrum.
"""
# parse XML
tree = etree.fromstring(xml)
# retrieve spectrum header
header_elm = tree.find('Header')
header = self._retrieve_header(header_elm)
# retrieve scan event
event_elm = tree.find('ScanEvent')
event = self._retrieve_event(event_elm)
# retrieve precursor info
precursor_elm = tree.find('PrecursorInfo')
precursor = self._retrieve_precursor(precursor_elm)
# retrieve centroids data
peaks_elm = tree.find('PeakCentroids')
centroids = self._retrieve_centroids(peaks_elm)
# retrieve profile data
points_elm = tree.find('ProfilePoints')
profile = self._retrieve_profile(points_elm)
# free memory
tree.clear()
# create spectrum
spectrum = MassSpectrum()
spectrum.Header = header
spectrum.Event = event
spectrum.Precursor = precursor
spectrum.Centroids = centroids
spectrum.Profile = profile
return spectrum
def _retrieve_header(self, header_elm):
"""Retrieves spectrum header."""
# init header
header = ScanHeader()
# get header data
if header_elm is not None:
elm = header_elm.find('SpectrumID')
if elm is not None and elm.text:
header.SpectrumID = int(elm.text)
elm = header_elm.find('InstrumentName')
if elm is not None:
header.InstrumentName = elm.text
elm = header_elm.find('DataType')
if elm is not None:
header.DataType = elm.text
elm = header_elm.find('LowPosition')
if elm is not None and elm.text:
header.LowPosition = float(elm.text)
elm = header_elm.find('HighPosition')
if elm is not None and elm.text:
header.HighPosition = float(elm.text)
elm = header_elm.find('BasePeakPosition')
if elm is not None and elm.text:
header.BasePeakPosition = float(elm.text)
elm = header_elm.find('BasePeakIntensity')
if elm is not None and elm.text:
header.BasePeakIntensity = float(elm.text)
elm = header_elm.find('TotalIntensity')
if elm is not None and elm.text:
header.TotalIntensity = float(elm.text)
# retrieve identifiers
identifiers_elm = header_elm.find('SpectrumIdentifiers')
if identifiers_elm:
for identifier_elm in identifiers_elm.iter('SpectrumIdentifier'):
identifier = ScanIdentifier()
attr = identifier_elm.get('FileID', None)
if attr is not None and attr != "-1" and attr != "":
identifier.FileID = int(attr)
attr = identifier_elm.get('ScanNumber', None)
if attr is not None and attr != "":
identifier.ScanNumber = int(attr)
attr = identifier_elm.get('MasterScanNumber', None)
if attr is not None and attr != "-1" and attr != "":
identifier.MasterScanNumber = int(attr)
attr = identifier_elm.get('RetentionTime', None)
if attr is not None and attr != "":
identifier.RetentionTime = float(attr)
# add to header
header.SpectrumIdentifiers.append(identifier)
return header
def _retrieve_event(self, event_elm):
"""Retrieves event data."""
# init event
event = ScanEvent()
# get scan event data
if event_elm is not None:
elm = event_elm.find('ActivationTypes')
if elm is not None:
event.ActivationTypes = elm.text
energies_elm = event_elm.find('ActivationEnergies')
if energies_elm is not None:
event.ActivationEnergies = []
for elm in energies_elm.iter('double'):
event.ActivationEnergies.append(float(elm.text))
elm = event_elm.find('CompensationVoltage')
if elm is not None and elm.text:
event.CompensationVoltage = float(elm.text)
elm = event_elm.find('IonizationSource')
if elm is not None and elm.text:
event.IonizationSource = elm.text
elm = event_elm.find('IsMultiplexed')
if elm is not None:
event.IsMultiplexed = elm.text == 'true'
elm = event_elm.find('IsolationMass')
if elm is not None and elm.text:
event.IsolationMass = float(elm.text)
elm = event_elm.find('IsolationWidth')
if elm is not None and elm.text:
event.IsolationWidth = float(elm.text)
elm = event_elm.find('IsolationOffset')
if elm is not None and elm.text:
event.IsolationOffset = float(elm.text)
elm = event_elm.find('MassAnalyzer')
if elm is not None:
event.MassAnalyzer = elm.text
elm = event_elm.find('MSOrder')
if elm is not None:
event.MSOrder = elm.text
elm = event_elm.find('Polarity')
if elm is not None:
event.Polarity = elm.text
elm = event_elm.find('ResolutionAtMass200')
if elm is not None and elm.text:
event.ResolutionAtMass200 = int(elm.text)
elm = event_elm.find('ScanRate')
if elm is not None:
event.ScanRate = elm.text
elm = event_elm.find('ScanType')
if elm is not None:
event.ScanType = elm.text
return event
def _retrieve_precursor(self, precursor_elm):
"""Retrieves precursor data."""
# init precursor
precursor = PrecursorInfo()
# get precursor data
if precursor_elm is not None:
attr = precursor_elm.get('Charge', None)
if attr is not None and attr != "":
precursor.Charge = int(attr)
attr = precursor_elm.get('Intensity', None)
if attr is not None and attr != "":
precursor.Intensity = float(attr)
attr = precursor_elm.get('InstrumentDeterminedCharge', None)
if attr is not None and attr != "":
precursor.InstrumentDeterminedCharge = int(attr)
attr = precursor_elm.get('InstrumentDeterminedMonoisotopicMass', None)
if attr is not None and attr != "":
precursor.InstrumentDeterminedMonoisotopicMass = float(attr)
attr = precursor_elm.get('IonInjectTime', None)
if attr is not None and attr != "":
precursor.IonInjectTime = float(attr)
attr = precursor_elm.get('IsolationMass', None)
if attr is not None and attr != "":
precursor.IsolationMass = float(attr)
attr = precursor_elm.get('IsolationOffset', None)
if attr is not None and attr != "":
precursor.IsolationOffset = float(attr)
attr = precursor_elm.get('IsolationWidth', None)
if attr is not None and attr != "":
precursor.IsolationWidth = float(attr)
attr = precursor_elm.get('PercentIsolationInterference', None)
if attr is not None and attr != "":
precursor.PrecursorInterference = float(attr)
attr = precursor_elm.get('PrecursorMassOrigin', None)
if attr is not None and attr != "":
precursor.PrecursorMassOrigin = str(attr)
attr = precursor_elm.get('Resolution', None)
if attr is not None and attr != "":
precursor.Resolution = int(attr)
attr = precursor_elm.get('SignalToNoise', None)
if attr is not None and attr != "":
precursor.SignalToNoise = float(attr)
attr = precursor_elm.get('SinglyChargedMass', None)
if attr is not None and attr != "":
precursor.SinglyChargedMass = float(attr)
attr = precursor_elm.get('SpectrumNumber', None)
if attr is not None and attr != "":
precursor.SpectrumNumber = int(attr)
# get spectrum header
header_elm = precursor_elm.find('SpectrumHeader')
precursor.Header = self._retrieve_header(header_elm)
# get scan event
event_elm = precursor_elm.find('ScanEvent')
precursor.Event = self._retrieve_event(event_elm)
# get mono centroids
peaks_elm = precursor_elm.find('MonoisotopicPeakCentroids')
precursor.MonoisotopicPeakCentroids = self._retrieve_centroids(peaks_elm)
# get measured centroids
peaks_elm = precursor_elm.find('MeasuredMonoisotopicPeakCentroids')
precursor.MeasuredMonoisotopicPeakCentroids = self._retrieve_centroids(peaks_elm)
# get cluster centroids
peaks_elm = precursor_elm.find('IsotopeClusterPeakCentroids')
precursor.IsotopeClusterPeakCentroids = self._retrieve_centroids(peaks_elm)
return precursor
def _retrieve_centroids(self, peaks_elm):
"""Retrieves centroids data."""
# init centroids
centroids = []
# retrieve centroids
if peaks_elm is not None:
for peak_elm in peaks_elm.iter('Peak'):
centroid = Centroid()
centroid.MZ = float(peak_elm.get('X', 0))
centroid.Intensity = float(peak_elm.get('Y', 0))
centroid.Charge = int(peak_elm.get('Z', None))
centroid.SN = float(peak_elm.get('SN', None))
centroid.Resolution = float(peak_elm.get('R', None))
centroids.append(centroid)
return tuple(centroids)
def _retrieve_profile(self, points_elm):
"""Retrieves profile data."""
# init profile
profile = []
# retrieve profile
if points_elm is not None:
for point_elm in points_elm.iter('Pt'):
mz = float(point_elm.get('X', 0))
ai = float(point_elm.get('Y', 0))
profile.append((mz, ai))
return tuple(profile)
class MassSpectrum(object):
"""
The pyeds.MassSpectrum is used to hold information about mass spectrum.
Attributes:
Header: pyeds.ScanHeader
Contains the spectrum header information.
Event: pyeds.ScanEvent
Contains the scan event information.
Precursor: pyeds.PrecursorInfo
Contains the precursor information.
Centroids: (pyeds.Centroid,)
Collection of spectrum centroids.
Profile: ((float, float),)
Collection of profile points as ((mz, intensity),)
"""
def __init__(self):
"""Initializes a new instance of MassSpectrum."""
self.Header = None
self.Event = None
self.Precursor = None
self.Centroids = None
self.Profile = None
def __str__(self):
"""Gets standard string representation."""
return "%s %s" % (self.Header, self.Event)
def __repr__(self):
"""Gets debug string representation."""
return "%s(%s)" % (self.__class__.__name__, self.__str__())
def __getattr__(self, name):
"""Tries to get unknown attribute from header or event."""
if self.Header is not None and hasattr(self.Header, name):
return getattr(self.Header, name)
if self.Event is not None and hasattr(self.Event, name):
return getattr(self.Event, name)
raise AttributeError("'%s' object has no attribute '%s'" % (self.__class__.__name__, name))
class ScanHeader(object):
"""
The pyeds.ScanHeader is used to hold information from mass spectrum header.
"""
def __init__(self):
"""Initializes a new instance of ScanHeader."""
self.BasePeakIntensity = None
self.BasePeakPosition = None
self.DataType = None
self.HighPosition = None
self.InstrumentName = None
self.LowPosition = None
self.SpectrumID = None
self.SpectrumIdentifiers = []
self.TotalIntensity = None
def __str__(self):
"""Gets standard string representation."""
data = "#%s" % self.ScanNumber
if self.MasterScanNumber is not None:
data += "-#%s" % self.MasterScanNumber
if self.RetentionTime is not None:
rts = self.RetentionTime
if isinstance(rts, float):
rts = [rts]
rts = ", ".join("%.3f" % rt for rt in rts if rt is not None)
data += " RT:%s min" % rts
if self.LowPosition is not None and self.HighPosition is not None:
data += " [%.4f-%.4f]" % (self.LowPosition, self.HighPosition)
return data
def __repr__(self):
"""Gets debug string representation."""
return "%s(%s)" % (self.__class__.__name__, self.__str__())
@property
def FileID(self):
"""Gets FileID (if single value, otherwise None)."""
values = set(i.FileID for i in self.SpectrumIdentifiers)
return values.pop() if len(values) == 1 else None
@property
def ScanNumber(self):
"""Gets scan number (if single value, otherwise None)."""
values = self.ScanNumbers
return values[0] if len(values) == 1 else None
@property
def ScanNumbers(self):
"""Gets scan numbers."""
return tuple(s.ScanNumber for s in self.SpectrumIdentifiers)
@property
def MasterScanNumber(self):
"""Gets master scan number (if single value, otherwise None)."""
values = self.MasterScanNumbers
return values[0] if len(values) == 1 else None
@property
def MasterScanNumbers(self):
"""Gets master scan numbers."""
return tuple(s.MasterScanNumber for s in self.SpectrumIdentifiers)
@property
def RetentionTime(self):
"""Gets retention time (center if multiple values)."""
values = self.RetentionTimes
return 0.5*(min(values) + max(values)) if len(values) else None
@property
def RetentionTimes(self):
"""Gets retention times."""
return tuple(s.RetentionTime for s in self.SpectrumIdentifiers)
class ScanIdentifier(object):
"""
The pyeds.ScanIdentifier is used to hold information from mass spectrum
identifier.
"""
def __init__(self):
"""Initializes a new instance of ScanIdentifier."""
self.FileID = None
self.MasterScanNumber = None
self.RetentionTime = None
self.ScanNumber = None
def __str__(self):
"""Gets standard string representation."""
data = "#%s" % self.ScanNumber
if self.MasterScanNumber is not None:
data += "-#%s" % self.MasterScanNumber
if self.RetentionTime is not None:
data += " RT:%.3f min" % self.RetentionTime
if self.FileID is not None:
data += " FileID:%s" % self.FileID
return data
def __repr__(self):
"""Gets debug string representation."""
return "%s(%s)" % (self.__class__.__name__, self.__str__())
class ScanEvent(object):
"""
The pyeds.ScanEvent is used to hold information from scan event.
"""
def __init__(self):
"""Initializes a new instance of ScanEvent."""
self.ActivationEnergies = None
self.ActivationTypes = None
self.CompensationVoltage = None
self.IonizationSource = None
self.IsMultiplexed = None
self.IsolationMass = None
self.IsolationWidth = None
self.IsolationOffset = None
self.MassAnalyzer = None
self.MSOrder = None
self.Polarity = None
self.ResolutionAtMass200 = None
self.ScanRate = None
self.ScanType = None
def __str__(self):
"""Gets standard string representation."""
data = ""
if self.MSOrder is not None:
data += "%s" % self.MSOrder
if self.Polarity == 'Positive':
data += " (+)"
elif self.Polarity == 'Negative':
data += " (-)"
if self.MassAnalyzer is not None:
data += " %s" % self.MassAnalyzer
if self.ActivationTypes is not None:
data += " %s" % self.ActivationTypes
if self.ActivationEnergies is not None:
items = ("%.2f" % x for x in self.ActivationEnergies)
data += ":"+",".join(items)
if self.IsolationMass is not None:
data += " P:%.4f" % self.IsolationMass
if self.IsolationWidth is not None:
data += " [%.4f-%.4f]" % (self.IsolationLowMass, self.IsolationHighMass)
if self.CompensationVoltage is not None:
data += " CV:%s" % self.CompensationVoltage
return data
def __repr__(self):
"""Gets debug string representation."""
return "%s(%s)" % (self.__class__.__name__, self.__str__())
@property
def IsolationLowMass(self):
"""Gets low isolation mass."""
if self.IsolationMass is None or self.IsolationWidth is None:
return None
return self.IsolationMass - .5*self.IsolationWidth + (self.IsolationOffset or 0)
@property
def IsolationHighMass(self):
"""Gets high isolation mass."""
if self.IsolationMass is None or self.IsolationWidth is None:
return None
return self.IsolationMass + .5*self.IsolationWidth + (self.IsolationOffset or 0)
class PrecursorInfo(object):
"""
The pyeds.PrecursorInfo is used to hold information from precursor info.
"""
def __init__(self):
"""Initializes a new instance of PrecursorInfo."""
self.Header = None
self.Event = None
self.Charge = None
self.Intensity = None
self.InstrumentDeterminedCharge = None
self.InstrumentDeterminedMonoisotopicMass = None
self.IonInjectTime = None
self.IsolationMass = None
self.IsolationOffset = None
self.IsolationWidth = None
self.PercentIsolationInterference = None
self.PrecursorMassOrigin = None
self.Resolution = None
self.SignalToNoise = None
self.SinglyChargedMass = None
self.SpectrumNumber = None
self.IsotopeClusterPeakCentroids = []
self.MonoisotopicPeakCentroids = []
self.MeasuredMonoisotopicPeakCentroids = []
def __str__(self):
"""Gets standard string representation."""
data = "P"
if self.IsolationMass is not None:
data += " %.4f" % self.IsolationMass
if self.Charge is not None:
data += " (%+d)" % self.Charge
if self.IsolationWidth is not None and self.IsolationMass is not None:
data += " [%.4f-%.4f]" % (self.IsolationLowMass, self.IsolationHighMass)
return data
def __repr__(self):
"""Gets debug string representation."""
return "%s(%s)" % (self.__class__.__name__, self.__str__())
def __getattr__(self, name):
"""Tries to get unknown attribute from header or event."""
if self.Header is not None and hasattr(self.Header, name):
return getattr(self.Header, name)
if self.Event is not None and hasattr(self.Event, name):
return getattr(self.Event, name)
raise AttributeError("'%s' object has no attribute '%s'" % (self.__class__.__name__, name))
@property
def IsolationLowMass(self):
"""Gets low isolation mass."""
if self.IsolationMass is None or self.IsolationWidth is None:
return None
return self.IsolationMass - .5*self.IsolationWidth + (self.IsolationOffset or 0)
@property
def IsolationHighMass(self):
"""Gets high isolation mass."""
if self.IsolationMass is None or self.IsolationWidth is None:
return None
return self.IsolationMass + .5*self.IsolationWidth + (self.IsolationOffset or 0)
class Centroid(object):
"""
The pyeds.Centroid is used to hold information about a single mass centroid.
Attributes:
MZ: float
Mass-to-charge ratio.
Intensity: float
Absolute intensity.
SN: float or None
Signal-to-noise ratio.
Charge: int or None
Instrument assigned charge.
Resolution: int or None
Profile peak resolution.
"""
def __init__(self):
"""Initializes a new instance of Centroid."""
self.MZ = None
self.Intensity = None
self.SN = None
self.Charge = None
self.Resolution = None
def __str__(self):
"""Gets standard string representation."""
data = "MZ:%f Int:%f" % (self.MZ, self.Intensity)
if self.SN is not None:
data += " SN:%d" % self.SN
if self.Charge is not None:
data += " Z:%d" % self.Charge
if self.Resolution is not None:
data += " Res:%d" % self.Resolution
return data
def __repr__(self):
"""Gets debug string representation."""
return "%s(%s)" % (self.__class__.__name__, self.__str__())
|
py | b412e9c5c52db6524eaa515536ff2e87cca9fe5c | import sys
import numpy as np
from PyQt5 import QtWidgets
from PyQt5.QtGui import QImage, QPixmap
from PyQt5.QtWidgets import QDialog, QPushButton, QWidget, QHBoxLayout, QApplication, QLabel, QVBoxLayout, QGridLayout
from PyQt5.uic import loadUi
import cv2
imageT = None
import windowSize
class mywin(QtWidgets.QDialog):
def __init__(self):
super(mywin, self).__init__()
# loadUi('img.ui',self)
self.desktop = QApplication.desktop()
self.screenRect = self.desktop.screenGeometry()
self.height = self.screenRect.height()
self.width = self.screenRect.width()
self.setWindowTitle("PyScrcpy")
print("pc屏幕分辨率为", (self.height, self.width))
minAxis = min(self.width, self.height) * 0.9 // 2 * 2
minAxis = int(minAxis)
self.resize(minAxis * 9 // 16, minAxis)
layout = QHBoxLayout()
frameLayout = QHBoxLayout()
buttonLayout = QVBoxLayout()
self.all_btn = all_btn = QPushButton()
all_btn.setText("一键启动")
self.start_btn = start_btn = QPushButton()
start_btn.setText("启动server")
self.androit_btn = androit_btn = QPushButton()
androit_btn.setText("启动android")
self.qrShow_btn=qrShow_btn=QPushButton()
qrShow_btn.setText("显示二维码")
# self.load_btn.clicked.connect(self.loadimage)
# self.
buttonLayout.addWidget(all_btn)
buttonLayout.addWidget(start_btn)
buttonLayout.addWidget(androit_btn)
buttonLayout.addWidget(qrShow_btn)
buttonLayout.addStretch()
self.img_label = QLabel()
# self.img_label.setMinimumWidth(720)
frameLayout.addWidget(self.img_label)
layout.addLayout(buttonLayout)
layout.addLayout(frameLayout)
self.setLayout(layout)
# self.setLayout(wLayout)
def loadimage(self):
global imageT
# image = cv2.cvtColor(imageT, cv2.COLOR_BGR2RGB)
self.qimg = QImage(imageT.data, imageT.shape[1], imageT.shape[0], QImage.Format_RGB888)
# QPixmap.loadFromData()
# self.qimg = self.qimg.rgbSwapped()
self.img_label.setPixmap(QPixmap.fromImage(self.qimg))
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
window = mywin()
window.show()
window.setWindowTitle("window")
# window.setGeometry(100,100,400,300)
sys.exit(app.exec_())
|
py | b412ea9ccae5cc479a4344f697266ff86de47c72 | from typing import List
import numpy as np
from reader import TrainData
class Perceptron:
def __init__(self, size: int, h: float = 0.75) -> None:
self.h = h
self.weights = self._get_weights(size)
def train(self, train_data: List[TrainData], target_number: int, nu: float = 000.1, epochs: int = 10) -> None:
for epoch in range(epochs):
for item in train_data:
target = 1 if target_number == item.number else 0
sum_local = self.__get_sum_for_weight(item.data)
res = self.__get_activation_function(sum_local)
error = self.__get_error(target, res)
self.__normalize_weights(item.data, nu, error)
def predict(self, test_data: TrainData):
sum_local = self.__get_sum_for_weight(test_data.data)
return self.__get_activation_function(sum_local)
@staticmethod
def _get_weights(size: int) -> np.array:
return np.random.uniform(-1, 1, size)
def __get_activation_function(self, sum_local: float) -> float:
return 1 if sum_local > self.h else 0
def __get_sum_for_weight(self, data: List[float]) -> float:
net_y = 0
length = len(data)
if length != len(self.weights):
raise Exception
for i in range(length):
net_y += data[i] * self.weights[i]
return net_y
@staticmethod
def __get_error(target: int, res: float) -> float:
return target - res
def __normalize_weights(self, data: List[float], nu: float, error: float) -> None:
for i in range(len(self.weights)):
self.weights[i] += error * nu * data[i]
|
py | b412eb87a7da809080782181368b14490903f7d3 | log_level = 'INFO'
load_from = None
resume_from = None
dist_params = dict(backend='nccl')
workflow = [('train', 1)]
checkpoint_config = dict(interval=5, create_symlink=False)
evaluation = dict(interval=10, metric='mAP', key_indicator='AP')
optimizer = dict(
type='Adam',
lr=5e-4,
)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[170, 200])
total_epochs = 210
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
channel_cfg = dict(
num_output_channels=133,
dataset_joints=133,
dataset_channel=[
list(range(133)),
],
inference_channel=list(range(133)))
# model settings
model = dict(
type='TopDown',
pretrained='https://download.openmmlab.com/mmpose/'
'pretrain_models/hrnet_w48-8ef0771d.pth',
backbone=dict(
type='HRNet',
in_channels=3,
extra=dict(
stage1=dict(
num_modules=1,
num_branches=1,
block='BOTTLENECK',
num_blocks=(4, ),
num_channels=(64, )),
stage2=dict(
num_modules=1,
num_branches=2,
block='BASIC',
num_blocks=(4, 4),
num_channels=(48, 96)),
stage3=dict(
num_modules=4,
num_branches=3,
block='BASIC',
num_blocks=(4, 4, 4),
num_channels=(48, 96, 192)),
stage4=dict(
num_modules=3,
num_branches=4,
block='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(48, 96, 192, 384))),
),
keypoint_head=dict(
type='TopDownSimpleHead',
in_channels=48,
out_channels=channel_cfg['num_output_channels'],
num_deconv_layers=0,
extra=dict(final_conv_kernel=1, ),
loss_keypoint=dict(type='JointsMSELoss', use_target_weight=True)),
train_cfg=dict(),
test_cfg=dict(
flip_test=True,
post_process='unbiased',
shift_heatmap=True,
modulate_kernel=11))
data_cfg = dict(
image_size=[192, 256],
heatmap_size=[48, 64],
num_output_channels=channel_cfg['num_output_channels'],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'],
soft_nms=False,
nms_thr=1.0,
oks_thr=0.9,
vis_thr=0.2,
use_gt_bbox=False,
det_bbox_thr=0.0,
bbox_file='../../../../dataset/coco/person_detection_results/'
'COCO_val2017_detections_AP_H_56_person.json',
)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownRandomFlip', flip_prob=0.5),
dict(
type='TopDownHalfBodyTransform',
num_joints_half_body=8,
prob_half_body=0.3),
dict(
type='TopDownGetRandomScaleRotation', rot_factor=40, scale_factor=0.5),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(type='TopDownGenerateTarget', sigma=2, unbiased_encoding=True),
dict(
type='Collect',
keys=['img', 'target', 'target_weight'],
meta_keys=[
'image_file', 'joints_3d', 'joints_3d_visible', 'center', 'scale',
'rotation', 'bbox_score', 'flip_pairs'
]),
]
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(
type='Collect',
keys=['img'],
meta_keys=[
'image_file', 'center', 'scale', 'rotation', 'bbox_score',
'flip_pairs'
]),
]
test_pipeline = val_pipeline
data_root = '../../../../dataset/coco'
data = dict(
samples_per_gpu=32,
workers_per_gpu=2,
val_dataloader=dict(samples_per_gpu=256),
test_dataloader=dict(samples_per_gpu=256),
train=dict(
type='TopDownCocoWholeBodyDataset',
ann_file=f'{data_root}/annotations/coco_wholebody_train_v1.0.json',
img_prefix=f'{data_root}/train2017/',
data_cfg=data_cfg,
pipeline=train_pipeline),
val=dict(
type='TopDownCocoWholeBodyDataset',
ann_file=f'{data_root}/annotations/coco_wholebody_val_v1.0.json',
img_prefix=f'{data_root}/val2017/',
data_cfg=data_cfg,
pipeline=val_pipeline),
test=dict(
type='TopDownCocoWholeBodyDataset',
ann_file=f'{data_root}/annotations/coco_wholebody_val_v1.0.json',
img_prefix=f'{data_root}/val2017/',
data_cfg=data_cfg,
pipeline=val_pipeline),
)
|
py | b412ebb3c2153495913f8615e592cf060297dbc1 | # pylint: disable=no-member,unused-argument,protected-access,redefined-outer-name
import pytest
from botocore.exceptions import ClientError
from django_dynamic_fixture import G
from tests.fixtures import api_client, boto_client, zone # noqa: F401
from tests.utils import strip_ns_and_soa, get_test_record
from zinc import models as m
@pytest.mark.django_db
def test_create_zone(api_client, boto_client):
root = 'example.com.presslabs.com.'
resp = api_client.post(
'/zones',
data={
'root': root,
}
)
assert resp.status_code == 201, resp.data
assert resp.data['root'] == root
_id = resp.data['id']
assert list(m.Zone.objects.all().values_list('id', 'root')) == [(_id, root)]
@pytest.mark.django_db
def test_create_zone_passing_wrong_params(api_client, boto_client):
resp = api_client.post(
'/zones',
data={
'id': 'asd',
'root': 'asdasd'
}
)
assert resp.status_code == 400, resp.data
assert resp.data['root'] == ['Invalid root domain']
@pytest.mark.django_db
def test_list_zones(api_client, boto_client):
zones = [G(m.Zone, root='1.test-zinc.com.', route53_id=None),
G(m.Zone, root='2.test-zinc.com.', route53_id=None)]
response = api_client.get('/zones')
assert [result['url'] for result in response.data] == [
"http://testserver/zones/{}".format(zone.id) for zone in zones]
assert ([(zone.id, zone.root, zone.dirty, zone.r53_zone.id) for zone in zones] ==
[(zone['id'], zone['root'], zone['dirty'], zone['route53_id'])
for zone in response.data])
@pytest.mark.django_db
def test_detail_zone(api_client, zone):
response = api_client.get(
'/zones/%s' % zone.id,
)
assert strip_ns_and_soa(response.data['records']) == [
get_test_record(zone)
]
assert response.data['route53_id'] == zone.route53_id
assert response.data['dirty'] is False
@pytest.mark.django_db
def test_delete_a_zone(api_client, zone, boto_client):
response = api_client.delete(
'/zones/%s' % zone.id
)
with pytest.raises(ClientError) as excp_info:
boto_client.get_hosted_zone(Id=zone.route53_id)
assert excp_info.value.response['Error']['Code'] == 'NoSuchHostedZone'
assert m.Zone.objects.filter(pk=zone.pk).count() == 0
assert not response.data
@pytest.mark.django_db
def test_policy_record_create_more_values(api_client, zone):
response = api_client.post(
'/zones/%s/records' % zone.id,
data={
'name': '@',
'type': 'CNAME',
'ttl': 300,
'values': ['test1.com', 'test2.com']
}
)
assert response.status_code == 400
assert response.data == {
'values': [
'Only one value can be specified for CNAME records.'
]
}
@pytest.mark.django_db
def test_create_zone_no_fqdn(api_client, boto_client):
root = 'presslabs.com'
resp = api_client.post(
'/zones',
data={
'root': root,
}
)
root += '.'
assert resp.status_code == 201, resp.data
assert resp.data['root'] == root
_id = resp.data['id']
assert list(m.Zone.objects.all().values_list('id', 'root')) == [(_id, root)]
|
py | b412ed4331fa8ea50febd02ef2a53abd4e3f766f | # Copyright 2014 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.import mock
import mock
import os
from nova import test
from nova.virt.hyperv import ioutils
class IOThreadTestCase(test.NoDBTestCase):
_FAKE_SRC = r'fake_source_file'
_FAKE_DEST = r'fake_dest_file'
_FAKE_MAX_BYTES = 1
def setUp(self):
self._iothread = ioutils.IOThread(
self._FAKE_SRC, self._FAKE_DEST, self._FAKE_MAX_BYTES)
super(IOThreadTestCase, self).setUp()
@mock.patch('__builtin__.open')
@mock.patch('os.rename')
@mock.patch('os.path.exists')
@mock.patch('os.remove')
def test_copy(self, fake_remove, fake_exists, fake_rename, fake_open):
fake_data = 'a'
fake_src = mock.Mock()
fake_dest = mock.Mock()
fake_src.read.return_value = fake_data
fake_dest.tell.return_value = 0
fake_exists.return_value = True
mock_context_manager = mock.MagicMock()
fake_open.return_value = mock_context_manager
mock_context_manager.__enter__.side_effect = [fake_src, fake_dest]
self._iothread._stopped.isSet = mock.Mock(side_effect=[False, True])
self._iothread._copy()
fake_dest.seek.assert_called_once_with(0, os.SEEK_END)
fake_dest.write.assert_called_once_with(fake_data)
fake_dest.close.assert_called_once_with()
fake_rename.assert_called_once_with(
self._iothread._dest, self._iothread._dest_archive)
fake_remove.assert_called_once_with(
self._iothread._dest_archive)
self.assertEqual(3, fake_open.call_count)
|
py | b412edbc1d4c5301a9811934c3e91bfa04c81ea3 | import sqlflow_models
def test_answer():
assert sqlflow_models.__version__ == sqlflow_models._version.__version__
|
py | b412edc3a91ecd956c438e0d2d9296d68f9b46bc | import html
from telethon.tl.functions.photos import GetUserPhotosRequest
from telethon.tl.functions.users import GetFullUserRequest
from telethon.tl.types import MessageEntityMentionName
from telethon.utils import get_input_location
from . import *
@Andencento.on(andencento_cmd(pattern="getpic ?(.*)", outgoing=True))
@Andencento.on(sudo_cmd(pattern="getpic ?(.*)", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
await eor(event, "Getting profile photo..")
replied_user, error_i_a = await get_full_user(event)
if replied_user is None:
await edit_or_reply(event, str(error_i_a))
return False
replied_user_profile_photos = await bot(
GetUserPhotosRequest(
user_id=replied_user.user.id, offset=42, max_id=0, limit=80
)
)
replied_user_profile_photos_count = "NaN"
try:
replied_user_profile_photos_count = replied_user_profile_photos.count
except AttributeError:
pass
user_id = replied_user.user.id
# some people have weird HTML in their names
first_name = html.escape(replied_user.user.first_name)
# https://stackoverflow.com/a/5072031/4723940
# some Deleted Accounts do not have first_name
if first_name is not None:
# some weird people (like me) have more than 4096 characters in their
# names
first_name = first_name.replace("\u2060", "")
# inspired by https://telegram.dog/afsaI181
user_bio = replied_user.about
if user_bio is not None:
user_bio = html.escape(replied_user.about)
common_chats = replied_user.common_chats_count
try:
dc_id, location = get_input_location(replied_user.profile_photo)
except Exception as e:
dc_id = "Need a Profile Picture to check **this**"
str(e)
caption = """Profile Pics (◠‿◕)
Person: <a href='tg://user?id={}'>{}</a>
""".format(
user_id,
user_id,
first_name,
user_bio,
dc_id,
replied_user_profile_photos_count,
replied_user.user.restricted,
replied_user.user.verified,
replied_user.user.bot,
common_chats,
)
message_id_to_reply = event.message.reply_to_msg_id
if not message_id_to_reply:
message_id_to_reply = event.message.id
await bot.send_message(
event.chat_id,
caption,
reply_to=message_id_to_reply,
parse_mode="HTML",
file=replied_user.profile_photo,
force_document=False,
silent=True,
)
await event.delete()
async def get_full_user(event):
if event.reply_to_msg_id:
previous_message = await event.get_reply_message()
if previous_message.forward:
replied_user = await event.client(
GetFullUserRequest(
previous_message.forward.sender_id
or previous_message.forward.channel_id
)
)
return replied_user, None
else:
replied_user = await event.client(
GetFullUserRequest(previous_message.sender_id)
)
return replied_user, None
else:
input_str = None
try:
input_str = event.pattern_match.group(1)
except IndexError as e:
return None, e
if event.message.entities is not None:
mention_entity = event.message.entities
probable_user_mention_entity = mention_entity[0]
if isinstance(probable_user_mention_entity, MessageEntityMentionName):
user_id = probable_user_mention_entity.user_id
replied_user = await event.client(GetFullUserRequest(user_id))
return replied_user, None
else:
try:
user_object = await event.client.get_entity(input_str)
user_id = user_object.id
replied_user = await event.client(GetFullUserRequest(user_id))
return replied_user, None
except Exception as e:
return None, e
elif event.is_private:
try:
user_id = event.chat_id
replied_user = await event.client(GetFullUserRequest(user_id))
return replied_user, None
except Exception as e:
return None, e
else:
try:
user_object = await event.client.get_entity(int(input_str))
user_id = user_object.id
replied_user = await event.client(GetFullUserRequest(user_id))
return replied_user, None
except Exception as e:
return None, e
name = "Profile Photos"
@Andencento.on(andencento_cmd(pattern="poto ?(.*)", outgoing=True))
@Andencento.on(sudo_cmd(pattern="poto ?(.*)", allow_sudo=True))
async def potocmd(event):
uid = "".join(event.raw_text.split(maxsplit=1)[1:])
user = await event.get_reply_message()
chat = event.input_chat
if user:
photos = await event.client.get_profile_photos(user.sender)
u = True
else:
photos = await event.client.get_profile_photos(chat)
u = False
if uid.strip() == "":
uid = 1
if int(uid) <= (len(photos)):
send_photos = await event.client.download_media(photos[uid - 1])
await event.client.send_file(event.chat_id, send_photos)
else:
await eod(event, "No photo found of this NIBBA. Now u Die!")
await asyncio.sleep(2)
return
elif uid.strip() == "all":
if len(photos) > 0:
await event.client.send_file(event.chat_id, photos)
else:
try:
if u is True:
photo = await event.client.download_profile_photo(user.sender)
else:
photo = await event.client.download_profile_photo(event.input_chat)
await event.client.send_file(event.chat_id, photo)
except a:
await eod(event, "**This user has no photos!**")
return
else:
try:
uid = int(uid)
if uid <= 0:
await eod(event, "```number Invalid!``` **Are you komedy Me ?**")
return
except BaseException:
await eod(event, "Are you komedy me ?")
return
if int(uid) <= (len(photos)):
send_photos = await event.client.download_media(photos[uid - 1])
await event.client.send_file(event.chat_id, send_photos)
else:
await eod(event, "No photo found of this NIBBA. Now u Die!")
await asyncio.sleep(2)
return
CmdHelp("getpfp").add_command(
"poto",
"<all> / <desired pp number>",
"Reply to user to get his/her profile pic. Use .poto <number> to get desired profile pic else use .poto all to get all profile pic(s). If you dont reply to a user then it gets group pics.",
).add_command(
"getpic",
"<reply> <username>",
"Gets the user's 1st profile pic. But this time with a caption. Try it yourself...",
).add_info(
"Steal Profile Pictures."
).add_warning(
"✅ Harmless Module."
).add()
|
py | b412ee3e9b6946388deb2003fbfca8e373e46c56 | from .images import get_punks
from .metadata import PunkMetadata, PunkType, Acccessory, get_all_metadata |
py | b412ef124064b41749322dd30f7ea55df234ac10 | # coding=utf-8
# Copyright 2017 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import importlib
import locale
import os
import warnings
class PantsLoader(object):
"""Loads and executes entrypoints."""
ENTRYPOINT_ENV_VAR = 'PANTS_ENTRYPOINT'
DEFAULT_ENTRYPOINT = 'pants.bin.pants_exe:main'
class InvalidLocaleError(Exception):
"""Raised when a valid locale can't be found."""
@staticmethod
def setup_warnings():
# We want to present warnings to the user, set this up before importing any of our own code,
# to ensure all deprecation warnings are seen, including module deprecations.
# The "default" action displays a warning for a particular file and line number exactly once.
# See https://docs.python.org/2/library/warnings.html#the-warnings-filter for the complete list.
warnings.simplefilter('default', DeprecationWarning)
@classmethod
def ensure_locale(cls):
# Sanity check for locale, See https://github.com/pantsbuild/pants/issues/2465.
# This check is done early to give good feedback to user on how to fix the problem. Other
# libraries called by Pants may fail with more obscure errors.
try:
locale.getlocale()[1] or locale.getdefaultlocale()[1]
except Exception as e:
raise cls.InvalidLocaleError(
'{}: {}\n'
' Could not get a valid locale. Check LC_* and LANG environment settings.\n'
' Example for US English:\n'
' LC_ALL=en_US.UTF-8\n'
' LANG=en_US.UTF-8'.format(type(e).__name__, e)
)
@staticmethod
def determine_entrypoint(env_var, default):
return os.environ.pop(env_var, default)
@staticmethod
def load_and_execute(entrypoint):
assert ':' in entrypoint, 'ERROR: entrypoint must be of the form `module.path:callable`'
module_path, func_name = entrypoint.split(':', 1)
module = importlib.import_module(module_path)
entrypoint_main = getattr(module, func_name)
assert callable(entrypoint_main), 'ERROR: entrypoint `{}` is not callable'.format(entrypoint)
entrypoint_main()
@classmethod
def run(cls):
cls.setup_warnings()
cls.ensure_locale()
entrypoint = cls.determine_entrypoint(cls.ENTRYPOINT_ENV_VAR, cls.DEFAULT_ENTRYPOINT)
cls.load_and_execute(entrypoint)
def main():
PantsLoader.run()
if __name__ == '__main__':
main()
|
py | b412ef22bfdb85e41a1064c2506c14df0698d675 | import sys
def part1(lines):
valid_phrases = 0;
for row in lines:
words = row.split(' ')
words.sort()
valid = True
for i in range(len(words)-1):
if words[i] == words[i+1]:
valid = False
break
if valid:
valid_phrases += 1
return valid_phrases
def part2(lines):
valid_phrases = 0;
for row in lines:
words = row.split(' ')
words.sort()
valid = True
for i in range(len(words)):
for j in range(i+1, len(words)):
word1list = list(words[i])
word2list = list(words[j])
word1list.sort()
word2list.sort()
if word1list == word2list:
valid = False
break
if not valid:
break
if valid:
valid_phrases += 1
return valid_phrases
if __name__ == "__main__":
lines = [line.strip() for line in sys.stdin.readlines()]
print part1(lines)
print part2(lines)
|
py | b412ef3ee2860bf110718f28724063bef43aee7d | # -*- coding: utf-8 -*-
"""Public section, including homepage and signup."""
from flask import Blueprint, flash, redirect, render_template, request, url_for
from flask_login import login_required, login_user, logout_user
from salary.extensions import login_manager
from salary.public.forms import LoginForm
from salary.user.forms import RegisterForm
from salary.user.models import User
from salary.utils import flash_errors
blueprint = Blueprint('public', __name__, static_folder='../static')
@login_manager.user_loader
def load_user(user_id):
"""Load user by ID."""
return User.get_by_id(int(user_id))
@blueprint.route('/', methods=['GET', 'POST'])
def home():
"""Home page."""
form = LoginForm(request.form)
# Handle logging in
if request.method == 'POST':
if form.validate_on_submit():
login_user(form.user)
flash('You are logged in.', 'success')
redirect_url = request.args.get('next') or url_for('user.members')
return redirect(redirect_url)
else:
flash_errors(form)
return render_template('public/home.html', form=form)
@blueprint.route('/logout/')
@login_required
def logout():
"""Logout."""
logout_user()
flash('You are logged out.', 'info')
return redirect(url_for('public.home'))
@blueprint.route('/register/', methods=['GET', 'POST'])
def register():
"""Register new user."""
form = RegisterForm(request.form)
if form.validate_on_submit():
User.create(employee=form.employee.data, username=form.username.data,
password=form.password.data, dept_id=form.dept_id.data, active=True)
flash('Thank you for registering. You can now log in.', 'success')
return redirect(url_for('public.home'))
else:
flash_errors(form)
return render_template('public/register.html', form=form)
@blueprint.route('/about/')
def about():
"""About page."""
form = LoginForm(request.form)
return render_template('public/about.html', form=form)
|
py | b412ef4c490137fddb97253f91491b8c902d58fc | import sympy
import sys
import unittest
import sophus
import functools
class So3:
""" 3 dimensional group of orthogonal matrices with determinant 1 """
def __init__(self, q):
""" internally represented by a unit quaternion q """
self.q = q
@staticmethod
def exp(v):
""" exponential map """
theta_sq = sophus.squared_norm(v)
theta = sympy.sqrt(theta_sq)
return So3(
sophus.Quaternion(
sympy.cos(0.5 * theta),
sympy.sin(0.5 * theta) / theta * v))
def log(self):
""" logarithmic map"""
n = sympy.sqrt(sophus.squared_norm(self.q.vec))
return 2 * sympy.atan(n / self.q.real) / n * self.q.vec
def __repr__(self):
return "So3:" + repr(self.q)
@staticmethod
def hat(o):
return sympy.Matrix([[0, -o[2], o[1]],
[o[2], 0, -o[0]],
[-o[1], o[0], 0]])
"""vee-operator
It takes the 3x3-matrix representation ``Omega`` and maps it to the
corresponding vector representation of Lie algebra.
This is the inverse of the hat-operator, see above.
Precondition: ``Omega`` must have the following structure:
| 0 -c b |
| c 0 -a |
| -b a 0 |
"""
@staticmethod
def vee(Omega):
v = sophus.Vector3(Omega.row(2).col(1), Omega.row(0).col(2), Omega.row(1).col(0))
return v
def matrix(self):
""" returns matrix representation """
return sympy.Matrix([[
1 - 2 * self.q.vec[1]**2 - 2 * self.q.vec[2]**2,
2 * self.q.vec[0] * self.q.vec[1] -
2 * self.q.vec[2] * self.q[3],
2 * self.q.vec[0] * self.q.vec[2] +
2 * self.q.vec[1] * self.q[3]
], [
2 * self.q.vec[0] * self.q.vec[1] +
2 * self.q.vec[2] * self.q[3],
1 - 2 * self.q.vec[0]**2 - 2 * self.q.vec[2]**2,
2 * self.q.vec[1] * self.q.vec[2] -
2 * self.q.vec[0] * self.q[3]
], [
2 * self.q.vec[0] * self.q.vec[2] -
2 * self.q.vec[1] * self.q[3],
2 * self.q.vec[1] * self.q.vec[2] +
2 * self.q.vec[0] * self.q[3],
1 - 2 * self.q.vec[0]**2 - 2 * self.q.vec[1]**2
]])
def __mul__(self, right):
""" left-multiplication
either rotation concatenation or point-transform """
if isinstance(right, sympy.Matrix):
assert right.shape == (3, 1), right.shape
return (self.q * sophus.Quaternion(0, right) * self.q.conj()).vec
elif isinstance(right, So3):
return So3(self.q * right.q)
assert False, "unsupported type: {0}".format(type(right))
def __getitem__(self, key):
return self.q[key]
@staticmethod
def calc_Dx_exp_x(x):
return sympy.Matrix(4, 3, lambda r, c:
sympy.diff(So3.exp(x)[r], x[c]))
@staticmethod
def Dx_exp_x_at_0():
return sympy.Matrix([[0.5, 0.0, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.0, 0.5],
[0.0, 0.0, 0.0]])
@staticmethod
def calc_Dx_exp_x_at_0(x):
return So3.calc_Dx_exp_x(x).subs(x[0], 0).subs(x[1], 0).limit(x[2], 0)
def calc_Dx_this_mul_exp_x_at_0(self, x):
return sympy.Matrix(4, 3, lambda r, c:
sympy.diff((self * So3.exp(x))[r], x[c]))\
.subs(x[0], 0).subs(x[1], 0).limit(x[2], 0)
def calc_Dx_exp_x_mul_this_at_0(self, x):
return sympy.Matrix(3, 4, lambda r, c:
sympy.diff((self * So3.exp(x))[c], x[r, 0]))\
.subs(x[0], 0).subs(x[1], 0).limit(x[2], 0)
@staticmethod
def Dxi_x_matrix(x, i):
if i == 0:
return sympy.Matrix([[0, 2 * x[1], 2 * x[2]],
[2 * x[1], -4 * x[0], -2 * x[3]],
[2 * x[2], 2 * x[3], -4 * x[0]]])
if i == 1:
return sympy.Matrix([[-4 * x[1], 2 * x[0], 2 * x[3]],
[2 * x[0], 0, 2 * x[2]],
[-2 * x[3], 2 * x[2], -4 * x[1]]])
if i == 2:
return sympy.Matrix([[-4 * x[2], -2 * x[3], 2 * x[0]],
[2 * x[3], -4 * x[2], 2 * x[1]],
[2 * x[0], 2 * x[1], 0]])
if i == 3:
return sympy.Matrix([[0, -2 * x[2], 2 * x[1]],
[2 * x[2], 0, -2 * x[0]],
[-2 * x[1], 2 * x[0], 0]])
@staticmethod
def calc_Dxi_x_matrix(x, i):
return sympy.Matrix(3, 3, lambda r, c:
sympy.diff(x.matrix()[r, c], x[i]))
@staticmethod
def Dxi_exp_x_matrix(x, i):
R = So3.exp(x)
Dx_exp_x = So3.calc_Dx_exp_x(x)
l = [Dx_exp_x[j, i] * So3.Dxi_x_matrix(R, j) for j in [0, 1, 2, 3]]
return functools.reduce((lambda a, b: a + b), l)
@staticmethod
def calc_Dxi_exp_x_matrix(x, i):
return sympy.Matrix(3, 3, lambda r, c:
sympy.diff(So3.exp(x).matrix()[r, c], x[i]))
@staticmethod
def Dxi_exp_x_matrix_at_0(i):
v = sophus.ZeroVector3()
v[i] = 1
return So3.hat(v)
@staticmethod
def calc_Dxi_exp_x_matrix_at_0(x, i):
return sympy.Matrix(3, 3, lambda r, c:
sympy.diff(So3.exp(x).matrix()[r, c], x[i])
).subs(x[0], 0).subs(x[1], 0).limit(x[2], 0)
class TestSo3(unittest.TestCase):
def setUp(self):
omega0, omega1, omega2 = sympy.symbols(
'omega[0], omega[1], omega[2]', real=True)
x, v0, v1, v2 = sympy.symbols('q.w() q.x() q.y() q.z()', real=True)
p0, p1, p2 = sympy.symbols('p0 p1 p2', real=True)
v = sophus.Vector3(v0, v1, v2)
self.omega = sophus.Vector3(omega0, omega1, omega2)
self.a = So3(sophus.Quaternion(x, v))
self.p = sophus.Vector3(p0, p1, p2)
def test_exp_log(self):
for o in [sophus.Vector3(0., 1, 0.5),
sophus.Vector3(0.1, 0.1, 0.1),
sophus.Vector3(0.01, 0.2, 0.03)]:
w = So3.exp(o).log()
for i in range(0, 3):
self.assertAlmostEqual(o[i], w[i])
def test_matrix(self):
R_foo_bar = So3.exp(self.omega)
Rmat_foo_bar = R_foo_bar.matrix()
point_bar = self.p
p1_foo = R_foo_bar * point_bar
p2_foo = Rmat_foo_bar * point_bar
self.assertEqual(sympy.simplify(p1_foo - p2_foo),
sophus.ZeroVector3())
def test_derivatives(self):
self.assertEqual(sympy.simplify(So3.calc_Dx_exp_x_at_0(self.omega) -
So3.Dx_exp_x_at_0()),
sympy.Matrix.zeros(4, 3))
for i in [0, 1, 2, 3]:
self.assertEqual(sympy.simplify(So3.calc_Dxi_x_matrix(self.a, i) -
So3.Dxi_x_matrix(self.a, i)),
sympy.Matrix.zeros(3, 3))
for i in [0, 1, 2]:
self.assertEqual(sympy.simplify(
So3.Dxi_exp_x_matrix(self.omega, i) -
So3.calc_Dxi_exp_x_matrix(self.omega, i)),
sympy.Matrix.zeros(3, 3))
self.assertEqual(sympy.simplify(
So3.Dxi_exp_x_matrix_at_0(i) -
So3.calc_Dxi_exp_x_matrix_at_0(self.omega, i)),
sympy.Matrix.zeros(3, 3))
def test_codegen(self):
stream = sophus.cse_codegen(So3.calc_Dx_exp_x(self.omega))
filename = "cpp_gencode/So3_Dx_exp_x.cpp"
# set to true to generate codegen files
if False:
file = open(filename, "w")
for line in stream:
file.write(line)
file.close()
else:
file = open(filename, "r")
file_lines = file.readlines()
for i, line in enumerate(stream):
self.assertEqual(line, file_lines[i])
file.close()
stream.close
stream = sophus.cse_codegen(
self.a.calc_Dx_this_mul_exp_x_at_0(self.omega))
filename = "cpp_gencode/So3_Dx_this_mul_exp_x_at_0.cpp"
# set to true to generate codegen files
if False:
file = open(filename, "w")
for line in stream:
file.write(line)
file.close()
else:
file = open(filename, "r")
file_lines = file.readlines()
for i, line in enumerate(stream):
self.assertEqual(line, file_lines[i])
file.close()
stream.close
if __name__ == '__main__':
unittest.main()
|
py | b412ef6d176ebd62a6304212a9634cba9b3609fc | from __future__ import unicode_literals
try:
from backports import configparser
except ImportError: # pragma: no cover
import configparser
import re
class EfficientParsingError(configparser.ParsingError):
def append(self, lineno, line):
"""
Rather than inefficiently add all the lines in the file
to the error message like the CPython code from 1998.
We just `return` because we will catch and `pass`
the exception in `high_entropy_strings.py` anyway.
"""
return
configparser.ParsingError = EfficientParsingError
class IniFileParser(object):
_comment_regex = re.compile(r'\s*[;#]')
def __init__(self, file, add_header=False, exclude_lines_regex=None):
"""
:type file: file object
:type add_header: bool
:param add_header: whether or not to add a top-level [global] header.
:type exclude_lines_regex: regex object
:param exclude_lines_regex: optional regex for ignored lines.
"""
self.parser = configparser.ConfigParser()
try:
# python2.7 compatible
self.parser.optionxform = unicode
except NameError: # pragma: no cover
# python3 compatible
self.parser.optionxform = str
self.exclude_lines_regex = exclude_lines_regex
content = file.read()
if add_header:
# This supports environment variables, or other files that look
# like config files, without a section header.
content = '[global]\n' + content
try:
# python2.7 compatible
self.parser.read_string(unicode(content))
except NameError: # pragma: no cover
# python3 compatible
self.parser.read_string(content)
# Hacky way to keep track of line location
file.seek(0)
self.lines = [line.strip() for line in file.readlines()]
self.line_offset = 0
def iterator(self):
if not self.parser.sections():
# To prevent cases where it's not an ini file, but the parser
# helpfully attempts to parse everything to a DEFAULT section,
# when not explicitly provided.
raise configparser.Error
for section_name, _ in self.parser.items():
for key, values in self.parser.items(section_name):
for value, offset in self._get_value_and_line_offset(
key,
values,
):
yield value, offset
def _get_value_and_line_offset(self, key, values):
"""Returns the index of the location of key, value pair in lines.
:type key: str
:param key: key, in config file.
:type values: str
:param values: values for key, in config file. This is plural,
because you can have multiple values per key. e.g.
>>> key =
... value1
... value2
:type lines: list
:param lines: a collection of lines-so-far in file
:rtype: list(tuple)
"""
values_list = self._construct_values_list(values)
if not values_list:
return []
current_value_list_index = 0
output = []
lines_modified = False
for index, line in enumerate(self.lines):
# Check ignored lines before checking values, because
# you can write comments *after* the value.
if not line.strip() or self._comment_regex.match(line):
continue
if (
self.exclude_lines_regex and
self.exclude_lines_regex.search(line)
):
continue
if current_value_list_index == 0:
first_line_regex = re.compile(r'^\s*{}[ :=]+{}'.format(
re.escape(key),
re.escape(values_list[current_value_list_index]),
))
if first_line_regex.match(line):
output.append((
values_list[current_value_list_index],
self.line_offset + index + 1,
))
current_value_list_index += 1
continue
if current_value_list_index == len(values_list):
if index == 0:
index = 1 # don't want to count the same line again
self.line_offset += index
self.lines = self.lines[index:]
lines_modified = True
break
else:
output.append((
values_list[current_value_list_index],
self.line_offset + index + 1,
))
current_value_list_index += 1
if not lines_modified:
# No more lines left, if loop was not explicitly left.
self.lines = []
return output
@staticmethod
def _construct_values_list(values):
"""
This values_list is a strange construction, because of ini format.
We need to extract the values with the following supported format:
>>> key = value0
... value1
...
... # comment line here
... value2
given that normally, either value0 is supplied, or (value1, value2),
but still allowing for all three at once.
Furthermore, with the configparser, we will get a list of values,
and intermediate blank lines, but no comments. This means that we can't
merely use the count of values' items to heuristically "skip ahead" lines,
because we still have to manually parse through this.
Therefore, we construct the values_list in the following fashion:
1. Keep the first value (in the example, this is `value0`)
2. For all other values, ignore blank lines.
Then, we can parse through, and look for values only.
"""
lines = values.splitlines()
values_list = lines[:1]
values_list.extend(filter(None, lines[1:]))
return values_list
|
py | b412ef909bcc60bf8dd4951ada6aa62c53a815dc | #!/usr/bin/env python3
import pacmanBFS
import escape
import pygame
block_size = 50
class Game :
def __init__(self, time, filename = 'pacmanMatrix'):
fp = open(filename,'r')
data = fp.readlines()
self.matrix = []
self.row = len(data)
self.col = None
self.score = 0
self.ghost = ()
self.time = time
self.over = False
self.win = False
self.pac = ()
self.fruit = ()
self.fruitOrg = ()
i = 0
for line in data:
self.col = len(line) - 1
self.matrix.append([])
j = 0
for j in range(len(line) - 1):
self.matrix[i].append(' ')
if line[j] == 'p':
self.pac = (i,j)
elif line[j] == 'g':
self.ghost += ((i,j),)
elif line[j] == '#':
self.matrix[i][j] = '#'
elif line[j] == 'f':
#self.matrix[i][j] = 'f'
self.fruit += ((i,j),)
i += 1
self.fruitOrg = self.fruit
def display(self):
s = ''
for i in range(self.row):
for j in range(self.col):
s += self.matrix[i][j]
s += '\n'
print(s)
def valid(self, i, j):
if i >= 0 and i < self.row and j >= 0 and j < self.col:
if self.matrix[i][j] != '#':
return True
return False
return False
def eat(self):
fruit = ()
ghost = ()
win = False
game_over = False
for i,j in self.fruit:
if (i,j) == self.pac:
self.time = 40
else:
fruit += ((i,j),)
self.fruit = fruit
for i,j in self.ghost:
if (i,j) == self.pac:
if self.time == 0:
win = False
game_over = True
ghost += ((i,j),)
else:
ghost += ((i,j),)
self.ghost = ghost
if len(self.ghost) == 0:
win = True
game_over = True
self.over = game_over
self.win = win
def lose(self):
if self.win == True :
print('You Won')
print('Your time = ', self.score)
else:
print('Game Over')
class window:
def __init__(self, row, col):
pygame.init()
self.background = (33, 33, 33)
self.clock = pygame.time.Clock()
self.screen = pygame.display.set_mode((col * block_size, row * block_size))
pygame.display.set_caption('Pacman')
self.store = {}
# Left Pacman
image = pygame.image.load('Images/pac.jpg')
image = pygame.transform.scale(image, (block_size, block_size))
self.store['pac_left1'] = image
image = pygame.image.load('Images/pac1.jpg')
image = pygame.transform.scale(image, (block_size, block_size))
self.store['pac_left2'] = image
# Right Pacman
image = pygame.image.load('Images/pac_right.jpg')
image = pygame.transform.scale(image, (block_size, block_size))
self.store['pac_right1'] = image
image = pygame.image.load('Images/pac1_right.jpg')
image = pygame.transform.scale(image, (block_size, block_size))
self.store['pac_right2'] = image
# Fruit
image = pygame.image.load('Images/fruit.jpg')
image = pygame.transform.scale(image, (block_size, block_size))
self.store['fruit'] = image
# Obstacle
image = pygame.image.load('Images/block.jpg')
image = pygame.transform.scale(image, (block_size, block_size))
self.store['block'] = image
# Ghost
image = pygame.image.load('Images/red_ghost.jpg')
image = pygame.transform.scale(image, (block_size, block_size))
self.store['red_ghost'] = image
image = pygame.image.load('Images/cold_ghost.jpg')
image = pygame.transform.scale(image, (block_size, block_size))
self.store['cold_ghost'] = image
# Game Over
image = pygame.image.load('Images/game_over.jpg')
#image = pygame.transform.scale(image, (block_size, block_size))
self.store['game_over'] = image
# Win
image = pygame.image.load('Images/win.jpg')
#image = pygame.transform.scale(image, (block_size, block_size))
self.store['win'] = image
# start screen
image = pygame.image.load('Images/pacman.jpeg')
image = pygame.transform.scale(image, (col * block_size, row * block_size))
self.store['start'] = image
def display(self, matrix, time, direct, even, pac, ghost, fruit):
self.screen.fill(self.background)
row = len(matrix)
col = len(matrix[0])
for i in range(row):
for j in range(col):
if matrix[i][j] == ' ':
continue
if matrix[i][j] == '#':
self.screen.blit(self.store['block'], (block_size * j, block_size * i))
#else :
# self.screen.blit(self.store['fruit'], (block_size * j, block_size * i))
for i, j in fruit:
coord = (block_size * j, block_size * i)
self.screen.blit(self.store['fruit'], coord)
# Ghost and Pacman
coord = (block_size * pac[1], block_size * pac[0])
if direct == 0:
if even:
self.screen.blit(self.store['pac_left1'], coord)
else:
self.screen.blit(self.store['pac_left2'], coord)
else:
if even:
self.screen.blit(self.store['pac_right1'], coord)
else:
self.screen.blit(self.store['pac_right2'], coord)
for i, j in ghost:
coord = (block_size * j, block_size * i)
if time > 8 or time % 2:
self.screen.blit(self.store['cold_ghost'], coord)
else:
self.screen.blit(self.store['red_ghost'], coord)
'''
move = 0 -> up
move = 1 -> down
move = 2 -> left
move = 3 -> right
'''
def main() :
G = Game(40)
Win = window(G.row, G.col)
game = True
direct = 0
move = -1
delay = 2
r, c = G.pac
even = 0
Win.screen.blit(Win.store['start'], (0, 0))
pygame.display.flip()
Win.clock.tick(0.5)
while game:
#Win.display(G.matrix, G.time, direct, even, G.pac, G.ghost)
for event in pygame.event.get():
if event.type == pygame.QUIT:
game = False
pressed = pygame.key.get_pressed()
if pressed[pygame.K_UP] and G.valid(G.pac[0] - 1, G.pac[1]):
move = 0
elif pressed[pygame.K_DOWN] and G.valid(G.pac[0] + 1, G.pac[1]):
move = 1
elif pressed[pygame.K_LEFT] and G.valid(G.pac[0], G.pac[1] - 1):
move = 2
direct = 0
elif pressed[pygame.K_RIGHT] and G.valid(G.pac[0], G.pac[1] + 1):
move = 3
direct = 1
if move == 0 and G.valid(G.pac[0] - 1, G.pac[1]):
r = max(r - 1, 1)
elif move == 1 and G.valid(G.pac[0] + 1,G.pac[1]):
r = min(r + 1, G.row - 2)
elif move == 2 and G.valid(G.pac[0], G.pac[1] - 1):
c = max(c - 1, 1)
elif move == 3 and G.valid(G.pac[0], G.pac[1] + 1):
c = min(c + 1,G.col - 2)
G.matrix[G.pac[0]][G.pac[1]] = ' '
for i,j in G.ghost:
G.matrix[i][j] = ' '
G.pac = (r, c)
G.eat()
if G.over == True:
Win.display(G.matrix, G.time, direct, even, G.pac, G.ghost, G.fruit)
break
if delay % 2:
if G.time:
G.ghost = escape.BFS(G.matrix, G.ghost, G.row, G.col, G.pac)
else:
G.ghost = pacmanBFS.BFS(G.matrix, G.ghost, G.row, G.col, G.pac)
delay = (delay + 1) % 2
even = (even + 1) % 2
Win.display(G.matrix, G.time, direct, even, G.pac, G.ghost, G.fruit)
G.eat()
if G.over == True:
Win.display(G.matrix, G.time, direct, even, G.pac, G.ghost, G.fruit)
break
if len(G.fruit) == 0:
G.fruit = G.fruitOrg
G.score += 1
G.time = max(0,G.time - 1)
Win.clock.tick(6)
pygame.display.flip()
if G.win == False:
Win.screen.blit(Win.store['game_over'], ((G.col // 2 - 2) * block_size, (G.row // 2 - 1) * block_size))
else:
Win.screen.blit(Win.store['win'], ((G.col // 2 - 2) * block_size, (G.row // 2 - 1) * block_size))
pygame.display.flip()
Win.clock.tick(1)
if __name__ == '__main__':
main()
|
py | b412f0a7533a969e4949994e712e792f61ec679f | import fcntl
import os
import pytest
from crl.interactivesessions.shells.remotemodules.servercomm import ServerComm
@pytest.fixture
def infd(tmpdir):
infile = tmpdir.join('infile')
infile.write('content-of-infile')
with open(str(infile)) as f:
yield f.fileno()
@pytest.fixture
def outfile(tmpdir):
outfile = tmpdir.join('outfile')
with open(str(outfile), 'w') as f:
yield f
def test_servercomm_blocking_states(infd, outfile):
s = ServerComm(infd, outfile)
infl = fcntl.fcntl(s.infd, fcntl.F_GETFL)
assert infl & os.O_NONBLOCK
outfl = fcntl.fcntl(s.outfile.fileno(), fcntl.F_GETFL)
assert not outfl & os.O_NONBLOCK
|
py | b412f106db13dc555b25e09574e8eda20c30a8a4 | import setuptools
with open("README.md") as fp:
long_description = fp.read()
setuptools.setup(
name="lab07",
version="0.0.1",
description="An empty CDK Python app",
long_description=long_description,
long_description_content_type="text/markdown",
author="author",
package_dir={"": "lab07"},
packages=setuptools.find_packages(where="lab07"),
install_requires=[
"aws-cdk.core==1.34.1",
],
python_requires=">=3.6",
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: JavaScript",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Software Development :: Code Generators",
"Topic :: Utilities",
"Typing :: Typed",
],
)
|
py | b412f13800868c49c672479069d47d860309e313 | # -*- coding: utf-8 -*-
# Copyright (c) 2012 Rackspace
# flake8: noqa
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
OpenStack Swift Python client binding.
"""
from .client import *
# At setup.py time, we haven't installed anything yet, so there
# is nothing that is able to set this version property. Squelching
# that exception here should be fine- if there are problems with
# pkg_resources in a real install, that will manifest itself as
# an error still
try:
from swiftclient import version
__version__ = version.version_string
except Exception:
pass
|
py | b412f382d3b4ec09d0f62e5f6c08d1fcc8dde9f0 | from __future__ import print_function
from types import MethodType
import torch
import torch.nn as nn
import models
from utils.metric import accuracy, AverageMeter, Timer
class NormalNN(nn.Module):
'''
Normal Neural Network with SGD for classification
'''
def __init__(self, agent_config):
'''
:param agent_config (dict): lr=float,momentum=float,weight_decay=float,
schedule=[int], # The last number in the list is the end of epoch
model_type=str,model_name=str,out_dim={task:dim},model_weights=str
force_single_head=bool
print_freq=int
gpuid=[int]
'''
super(NormalNN, self).__init__()
self.log = print if agent_config['print_freq'] > 0 else lambda \
*args: None # Use a void function to replace the print
self.config = agent_config
# If out_dim is a dict, there is a list of tasks. The model will have a head for each task.
self.multihead = True if len(
self.config['out_dim']) > 1 else False # A convenience flag to indicate multi-head/task
self.model = self.create_model()
self.criterion_fn = nn.CrossEntropyLoss()
if agent_config['gpuid'][0] >= 0:
self.cuda()
self.gpu = True
else:
self.gpu = False
self.init_optimizer()
self.reset_optimizer = False
self.valid_out_dim = 'ALL' # Default: 'ALL' means all output nodes are active
# Set a interger here for the incremental class scenario
def init_optimizer(self):
optimizer_arg = {'params': self.model.parameters(),
'lr': self.config['lr'],
'weight_decay': self.config['weight_decay']}
if self.config['optimizer'] in ['SGD', 'RMSprop']:
optimizer_arg['momentum'] = self.config['momentum']
elif self.config['optimizer'] in ['Rprop']:
optimizer_arg.pop('weight_decay')
elif self.config['optimizer'] == 'amsgrad':
optimizer_arg['amsgrad'] = True
self.config['optimizer'] = 'Adam'
self.optimizer = torch.optim.__dict__[self.config['optimizer']](**optimizer_arg)
self.scheduler = torch.optim.lr_scheduler.MultiStepLR(self.optimizer, milestones=self.config['schedule'],
gamma=0.1)
def create_model(self):
cfg = self.config
# Define the backbone (MLP, LeNet, VGG, ResNet ... etc) of model
model = models.__dict__[cfg['model_type']].__dict__[cfg['model_name']]()
# Apply network surgery to the backbone
# Create the heads for tasks (It can be single task or multi-task)
try:
n_feat = model.last.in_features
except AttributeError:
n_feat = model.fc.in_features
# The output of the model will be a dict: {task_name1:output1, task_name2:output2 ...}
# For a single-headed model the output will be {'All':output}
model.last = nn.ModuleDict()
for task, out_dim in cfg['out_dim'].items():
model.last[task] = nn.Linear(n_feat, out_dim)
# Redefine the task-dependent function
def new_logits(self, x):
outputs = {}
for task, func in self.last.items():
outputs[task] = func(x)
return outputs
# Replace the task-dependent function
model.logits = MethodType(new_logits, model)
# Load pre-trained weights
if cfg['model_weights'] is not None:
print('=> Load model weights:', cfg['model_weights'])
model_state = torch.load(cfg['model_weights'],
map_location=lambda storage, loc: storage) # Load to CPU.
model.load_state_dict(model_state)
print('=> Load Done')
return model
def forward(self, x):
return self.model.forward(x)
def predict(self, inputs):
self.model.eval()
out = self.forward(inputs)
for t in out.keys():
out[t] = out[t].detach()
return out
def validation(self, dataloader, no_task=False):
# This function doesn't distinguish tasks.
if no_task:
print("using no task ")
batch_timer = Timer()
acc = AverageMeter()
batch_timer.tic()
orig_mode = self.training
self.eval()
for i, (input, target, task) in enumerate(dataloader):
if self.gpu:
with torch.no_grad():
input = input.cuda()
target = target.cuda()
output = self.predict(input)
# Summarize the performance of all tasks, or 1 task, depends on dataloader.
# Calculated by total number of data.
if no_task:
acc = accumulate_acc(output, target, task, acc, guess_task=True)
else:
acc = accumulate_acc(output, target, task, acc)
self.train(orig_mode)
self.log(' * Val Acc {acc.avg:.3f}, Total time {time:.2f}'
.format(acc=acc, time=batch_timer.toc()))
return acc.avg
def criterion(self, preds, targets, tasks, **kwargs):
# The inputs and targets could come from single task or a mix of tasks
# The network always makes the predictions with all its heads
# The criterion will match the head and task to calculate the loss.
if self.multihead:
loss = 0
for t, t_preds in preds.items():
inds = [i for i in range(len(tasks)) if tasks[i] == t] # The index of inputs that matched specific task
if len(inds) > 0:
t_preds = t_preds[inds]
t_target = targets[inds]
loss += self.criterion_fn(t_preds, t_target) * len(inds) # restore the loss from average
loss /= len(targets) # Average the total loss by the mini-batch size
else:
pred = preds['All']
if isinstance(self.valid_out_dim,
int): # (Not 'ALL') Mask out the outputs of unseen classes for incremental class scenario
pred = preds['All'][:, :self.valid_out_dim]
loss = self.criterion_fn(pred, targets)
return loss
def update_model(self, inputs, targets, tasks):
out = self.forward(inputs)
loss = self.criterion(out, targets, tasks)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
return loss.detach(), out
def learn_batch(self, train_loader, num_batches, val_loader=None):
"""this is the learning loop within a dataloader"""
if self.reset_optimizer: # Reset optimizer before learning each task
self.log('Optimizer is reset!')
self.init_optimizer()
for epoch in range(self.config['schedule'][-1]):
data_timer = Timer()
batch_timer = Timer()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
acc = AverageMeter()
# Config the model and optimizer
self.log('Epoch:{0}'.format(epoch))
self.model.train()
for param_group in self.optimizer.param_groups:
self.log('LR:', param_group['lr'])
# Learning with mini-batch
data_timer.tic()
batch_timer.tic()
self.log('Itr\t\tTime\t\t Data\t\t Loss\t\tAcc')
for i, (input, target, task) in zip(range(num_batches), train_loader):
data_time.update(data_timer.toc()) # measure data loading time
if self.gpu:
input = input.cuda()
target = target.cuda()
loss, output = self.update_model(input, target, task)
input = input.detach()
target = target.detach()
# measure accuracy and record loss
acc = accumulate_acc(output, target, task, acc)
losses.update(loss, input.size(0))
batch_time.update(batch_timer.toc()) # measure elapsed time
data_timer.toc()
if ((self.config['print_freq'] > 0) and (i % self.config['print_freq'] == 0)) or (i + 1) == num_batches:
self.log('[{0}/{1}]\t'
'{batch_time.val:.4f} ({batch_time.avg:.4f})\t'
'{data_time.val:.4f} ({data_time.avg:.4f})\t'
'{loss.val:.3f} ({loss.avg:.3f})\t'
'{acc.val:.2f} ({acc.avg:.2f})'.format(
i, num_batches, batch_time=batch_time,
data_time=data_time, loss=losses, acc=acc))
self.log(' * Train Acc {acc.avg:.3f}'.format(acc=acc))
# Evaluate the performance of current task
if val_loader != None:
self.validation(val_loader)
self.scheduler.step()
def learn_stream(self, data, label):
assert False, 'No implementation yet'
def add_valid_output_dim(self, dim=0):
# This function is kind of ad-hoc, but it is the simplest way to support incremental class learning
self.log('Incremental class: Old valid output dimension:', self.valid_out_dim)
if self.valid_out_dim == 'ALL':
self.valid_out_dim = 0 # Initialize it with zero
self.valid_out_dim += dim
self.log('Incremental class: New Valid output dimension:', self.valid_out_dim)
return self.valid_out_dim
def count_parameter(self):
return sum(p.numel() for p in self.model.parameters())
def save_model(self, filename):
model_state = self.model.state_dict()
if isinstance(self.model, torch.nn.DataParallel):
# Get rid of 'module' before the name of states
model_state = self.model.module.state_dict()
for key in model_state.keys(): # Always save it to cpu
model_state[key] = model_state[key].cpu()
print('=> Saving model to:', filename)
torch.save(model_state, filename + '.pth')
print('=> Save Done')
def cuda(self):
torch.cuda.set_device(self.config['gpuid'][0])
self.model = self.model.cuda()
self.criterion_fn = self.criterion_fn.cuda()
# Multi-GPU
if len(self.config['gpuid']) > 1:
self.model = torch.nn.DataParallel(self.model, device_ids=self.config['gpuid'],
output_device=self.config['gpuid'][0])
return self
def _guess_task(output):
task_names = tuple(output.keys())
task = []
for group_predictions in zip(*output.values()):
max_index = int(torch.argmax(torch.Tensor([x.max() for x in group_predictions])))
task.append(task_names[max_index])
return task
def _str_equal(string_list1, string_list2):
total = len(string_list1)
right = 0
for l1, l2 in zip(string_list1, string_list2):
if l1 == l2:
right += 1
return float(right / total)
def accumulate_acc(output, target, task, meter, guess_task=False):
def update_meter(output, target, task, meter):
for t, t_out in output.items():
inds = [i for i in range(len(task)) if task[i] == t] # The index of inputs that matched specific task
if len(inds) > 0:
t_out = t_out[inds]
t_target = target[inds]
meter.update(accuracy(t_out, t_target), len(inds))
if 'All' in output.keys(): # Single-headed model
meter.update(accuracy(output['All'], target), len(target))
else: # outputs from multi-headed (multi-task) model
if not guess_task:
update_meter(output, target, task, meter)
else: # you must guess the task
new_task = _guess_task(output)
print(f"guess task acc: {_str_equal(new_task, task):.2f}")
update_meter(output, target, new_task, meter)
return meter
|
py | b412f45122f8b9957bc5f0187248f8960192634a | import numpy
# FIXME
IntType = numpy.dtype(numpy.int32)
RealType = numpy.dtype(numpy.float64)
ScalarType = numpy.dtype(numpy.float64)
|
py | b412f6970364252469fc40e35e51edbe88fc193a | """
Database Connector class that provides a context manager for database connections
"""
import logging
from typing import Dict, Any, List, Optional
from sqlalchemy import create_engine, engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import scoped_session, sessionmaker, Session
from sqlalchemy.sql import text
from .config import DatabaseConnectionConfig
BASE_MODEL = declarative_base()
class DatabaseConnection:
"""
Connects to a given database, returning a session in a context manager, and runs a given query
"""
connections = {}
def __init__(self, db_connection_config: DatabaseConnectionConfig):
self.db_uri = db_connection_config.db_uri
self.db_connection_config = db_connection_config
self.db_session: Optional[Session] = None
if db_connection_config.db_uri.startswith('sqlite'):
self.connection_engine: engine.Engine = create_engine(self.db_uri)
else:
self.connection_engine: engine.Engine = create_engine(
self.db_uri,
pool_size=self.db_connection_config.pool_size,
max_overflow=self.db_connection_config.pool_size + 5,
pool_timeout=self.db_connection_config.pool_timeout,
)
DatabaseConnection.connections[self.db_uri] = self
def __enter__(self):
"""
Creates a scoped session for the database that is thread-safe for reads
"""
session = sessionmaker(
bind=self.connection_engine,
autocommit=self.db_connection_config.autocommit,
autoflush=self.db_connection_config.autocommit,
expire_on_commit=False,
)
self.db_session = scoped_session(session)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""
This runs at the end of the 'with' statement as a way of cleaning up
"""
if not self.db_connection_config.autocommit:
# Attempt to commit any changes
self.db_session.commit()
self.db_session.close()
def close(self):
"""
Closes the db connection engine
"""
try:
self.connection_engine.dispose()
logging.info(f'Successfully closed connection {self.db_uri}')
except Exception as exp:
logging.warning(str(exp))
def execute_sql(self, sql: str, params: Dict[str, Any] = {}) -> engine.ResultProxy:
"""
Executes the sql passed as a parameter
"""
return self.db_session.execute(text(sql), params=params)
@classmethod
def get_db_connection(cls, db_connection_config: DatabaseConnectionConfig):
"""
Returns an instance of this class that has the given db_uri or if none, it creates a new one
"""
db_connection = cls.connections.get(db_connection_config.db_uri, None)
if db_connection is None:
db_connection = cls(db_connection_config=db_connection_config)
return db_connection
@classmethod
def close_all_connections(cls):
"""
Closes all connections
"""
for _, db_connection in cls.connections.items():
db_connection.close()
@classmethod
def open_connections(cls, db_configs: List[DatabaseConnectionConfig]):
"""
Opens database connections if they are not open
"""
for db_config in db_configs:
cls.get_db_connection(db_connection_config=db_config)
|
py | b412f74be8bb7dac34066079d3a515ead1cc1f72 | @Setup
def setup(ctx):
ctx["declare"] = True;
@Token("T_STRING")
def T_STRING(string, ctx):
if ctx["declare"] = False:
ctx["target"] = string;
return string;
@Token("T_PRODUCER")
def T_PRODUCER(prod, ctx): return prod;
@Token("T_SPLIT")
def T_SPLIT(split, ctx):
ctx["declare"] = False;
return split;
@Pattern(["T_STRING", "T_PRODUCER", "T_STRING"])
def S_SUB(string1, prod, string2, ctx):
return "replacements[\"" + string1 + "\"] = " + string2 + ";";
@Pattern(["S_SUB"])
def S_DEF_SINGLE(sub, ctx): return sub;
@Pattern(["S_SUB", "S_DEF"])
def S_DEF_RECURSE(sub, sdef, ctx):
return sub + "\n" + sdef;
|
py | b412f75b24f0e43a4fab9a13b960f37104fdfde3 | import os
import keras
import sys
import time
import numpy as np
from keras.models import Sequential
from keras.layers import Activation, GlobalAveragePooling2D
from keras.layers.core import Dense, Dropout, Flatten
from keras.optimizers import Adam, SGD
from keras.metrics import categorical_crossentropy
from keras.preprocessing.image import ImageDataGenerator
from keras.layers.normalization import BatchNormalization
from keras.layers.convolutional import *
from keras.utils import Sequence
import cv2
import matplotlib.pyplot as plt
from pyIGTLink import pyIGTLink
# Parameters
image_size = 128
# Check command line arguments
if len(sys.argv) < 2:
print("Usage: {} WEIGHTS_FILE".format(sys.argv[0]))
sys.exit()
weights_file_name = sys.argv[1]
print("Loading weights from: {}".format(weights_file_name))
# Building the model. Should be the same as the weights to be loaded.
model = Sequential()
model.add(Conv2D(16, (3, 3), padding='same', activation='relu', input_shape=(image_size, image_size, 1)))
model.add(Conv2D(32, (3, 3), padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(.2))
model.add(Conv2D(64, (3, 3), padding='same', activation='relu'))
model.add(Conv2D(128, (3, 3), padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(.2))
model.add(Conv2D(128, (3, 3), padding='same', activation='relu'))
model.add(Conv2D(128, (3, 3), padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dropout(.4))
model.add(Dense(2, activation='sigmoid'))
model.load_weights(weights_file_name)
print("Server starting...")
client = pyIGTLink.PyIGTLinkClient(host="127.0.0.1")
client.start()
print("Server running...")
try:
image_squeezed = np.zeros([image_size, image_size]).astype(np.uint8)
image_padded = np.zeros([1, image_size, image_size, 1]).astype(np.uint8)
image_input = np.zeros([1, image_size, image_size, 1]).astype(np.uint8)
cv2.circle(image_input[0,:,:,0], (10,10), 15, 255, 5)
cv2.imshow("image", image_input[0,:,:,0])
cv2.waitKey(10)
while True:
messages = client.get_latest_messages()
if len(messages) > 0:
for message in messages:
if message._type == "IMAGE":
image = message._image
image = np.flip(image, 1)
image_squeezed = np.squeeze(image)
image_padded[0,:,:,0] = cv2.resize(image_squeezed, (image_size, image_size)).astype(np.uint8)
image_input = image_padded / 255.0
prediction = model.predict(image_input).tolist()
print("Predicted center line: " + str(prediction[0]))
cx = int(image_size*prediction[0][0])
cy = int(image_size*prediction[0][1])
cv2.circle(image_input[0,:,:,0], (cx, cy), 2, 255, thickness=1)
cv2.imshow("image", image_input[0,:,:,0])
cv2.waitKey(10)
client.send_message(pyIGTLink.StringMessage(str(prediction[0]), device_name=message._device_name+"Predicted"))
time.sleep(0.1)
except KeyboardInterrupt:
pass
client.stop()
|
py | b412f78c722a5b71d2e72bdf67d6c7b986e1cdc1 | from django.contrib import admin
from .models import *
class PerConfig(admin.ModelAdmin):
list_display = ['title','url','group','action']
# admin.site.register(UserInfo)
admin.site.register(Role)
admin.site.register(Permission,PerConfig)
admin.site.register(PermissionGroup)
|
py | b412f8105a21cdcea1e9500f110cdec3e1789228 | import urllib.request
import csv
import io
from COVID19Map.PublicFun.sql import TCountryMappingRepository
from COVID19Map.PublicFun.model import TCountryMapping
def getCSVFile(filePath: str):
try:
res = urllib.request.urlopen(filePath)
context = res.read().decode("utf-8")
return csv.reader(io.StringIO(context))
except IOError as ioerror:
print(ioerror)
except Exception as e:
print(e)
def saveCountryMapping(csvFile):
try:
countryMappingList = []
firstline = True
for line in csvFile:
if firstline:
firstline = False
continue
temp = TCountryMapping.TCountryMapping()
temp.UID = line[0]
temp.iso2 = line[1]
temp.iso3 = line[2]
temp.code3 = line[3]
temp.FIPS = line[4]
temp.Admin2 = line[5]
temp.ProvinceState = line[6]
temp.CountryRegion = line[7]
temp.PointLat = line[8]
temp.PointLong = line[9]
temp.CombinedKey = line[10]
temp.Population = line[11]
countryMappingList.append(temp)
TCountryMappingRepository.TCountryMappingListInsert(countryMappingList)
except:
pass
if __name__ == "__main__":
golbalfile = "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv"
countryMappingFileUrl = "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/UID_ISO_FIPS_LookUp_Table.csv"
csvFile = getCSVFile(countryMappingFileUrl)
saveCountryMapping(csvFile)
|
py | b412f8950ed4b4316fc08486c596a26c1b6e7716 | #
# Copyright 2021 Mobvista
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import io
import inspect
import pickle
import cloudpickle
from cloudpickle import PYPY
import torch
def _patch_lookup_module_and_qualname():
_orig_whichmodule = cloudpickle.cloudpickle._whichmodule
_orig_lookup_module_and_qualname = cloudpickle.cloudpickle._lookup_module_and_qualname
def patched_lookup_module_and_qualname(obj, name=None):
if name is None:
name = getattr(obj, '__qualname__', None)
if name is None:
name = getattr(obj, '__name__', None)
module_name = _orig_whichmodule(obj, name)
if module_name is None:
return None
if module_name.startswith('unusual_prefix_'):
return None
return _orig_lookup_module_and_qualname(obj, name)
cloudpickle.cloudpickle._lookup_module_and_qualname = patched_lookup_module_and_qualname
def _patch_getsourcelines():
_orig_getsourcelines = inspect.getsourcelines
def patched_getsourcelines(obj):
if not hasattr(obj, '_SourcePatchingPickler__filename'):
return _orig_getsourcelines(obj)
sourcelines = getattr(obj, '_SourcePatchingPickler__sourcelines')
file_lineno = getattr(obj, '_SourcePatchingPickler__file_lineno')
sourcelines = list(sourcelines)
return sourcelines, file_lineno
inspect.getsourcelines = patched_getsourcelines
class SourcePatchingPickler(cloudpickle.CloudPickler):
def _patch_source(self, module_class):
if module_class.__module__.startswith('torch.nn.'):
return
if module_class.__module__.startswith('mindalpha.nn.'):
return
forward_method = module_class.forward
if hasattr(forward_method, '_SourcePatchingPickler__filename'):
return
filename = inspect.getsourcefile(forward_method)
sourcelines, file_lineno = inspect.getsourcelines(forward_method)
sourcelines = tuple(sourcelines)
setattr(forward_method, '_SourcePatchingPickler__filename', filename)
setattr(forward_method, '_SourcePatchingPickler__sourcelines', sourcelines)
setattr(forward_method, '_SourcePatchingPickler__file_lineno', file_lineno)
if pickle.HIGHEST_PROTOCOL >= 5 and not PYPY:
def reducer_override(self, obj):
if isinstance(obj, type) and issubclass(obj, torch.nn.Module):
self._patch_source(obj)
return super().reducer_override(obj)
else:
def save(self, obj, save_persistent_id=True):
if isinstance(obj, type) and issubclass(obj, torch.nn.Module):
self._patch_source(obj)
super().save(obj, save_persistent_id=save_persistent_id)
if pickle.HIGHEST_PROTOCOL >= 5 and not PYPY:
def dump(obj, file, protocol=None, buffer_callback=None):
SourcePatchingPickler(
file, protocol=protocol, buffer_callback=buffer_callback
).dump(obj)
def dumps(obj, protocol=None, buffer_callback=None):
with io.BytesIO() as file:
cp = SourcePatchingPickler(
file, protocol=protocol, buffer_callback=buffer_callback
)
cp.dump(obj)
return file.getvalue()
else:
def dump(obj, file, protocol=None):
SourcePatchingPickler(file, protocol=protocol).dump(obj)
def dumps(obj, protocol=None):
with io.BytesIO() as file:
cp = SourcePatchingPickler(file, protocol=protocol)
cp.dump(obj)
return file.getvalue()
load, loads = pickle.load, pickle.loads
Pickler = SourcePatchingPickler
|
py | b412f89cd11ac9f26baea1e110c439f69985f23c | import httpx
import pytest
from demo_project.api.dependencies import azure_scheme
from demo_project.core.config import settings
from demo_project.main import app
from tests.utils import build_openid_keys, keys_url, openid_config_url, openid_configuration
from fastapi_azure_auth import MultiTenantAzureAuthorizationCodeBearer
@pytest.fixture
def multi_tenant_app():
async def issuer_fetcher(tid):
tids = {'intility_tenant_id': 'https://login.microsoftonline.com/intility_tenant/v2.0'}
return tids[tid]
azure_scheme_overrides = MultiTenantAzureAuthorizationCodeBearer(
app_client_id=settings.APP_CLIENT_ID,
scopes={
f'api://{settings.APP_CLIENT_ID}/user_impersonation': 'User impersonation',
},
validate_iss=True,
iss_callable=issuer_fetcher,
)
app.dependency_overrides[azure_scheme] = azure_scheme_overrides
yield azure_scheme
@pytest.fixture
def mock_openid(respx_mock):
respx_mock.get(openid_config_url(version=2, multi_tenant=True)).respond(json=openid_configuration(version=2))
yield
@pytest.fixture
def mock_openid_and_keys(respx_mock, mock_openid):
respx_mock.get(keys_url(version=2)).respond(json=build_openid_keys())
yield
@pytest.fixture
def mock_openid_and_empty_keys(respx_mock, mock_openid):
respx_mock.get(keys_url(version=2)).respond(json=build_openid_keys(empty_keys=True))
yield
@pytest.fixture
def mock_openid_ok_then_empty(respx_mock, mock_openid):
keys_route = respx_mock.get(keys_url(version=2))
keys_route.side_effect = [
httpx.Response(json=build_openid_keys(), status_code=200),
httpx.Response(json=build_openid_keys(empty_keys=True), status_code=200),
]
openid_route = respx_mock.get(openid_config_url(version=2, multi_tenant=True))
openid_route.side_effect = [
httpx.Response(json=openid_configuration(version=2), status_code=200),
httpx.Response(json=openid_configuration(version=2), status_code=200),
]
yield
@pytest.fixture
def mock_openid_and_no_valid_keys(respx_mock, mock_openid):
respx_mock.get(keys_url(version=2)).respond(json=build_openid_keys(no_valid_keys=True))
yield
|
py | b412fa5a52ed83f84d6a214ee2f04aa4a22b830c | from ... import types as sqltypes
class JSON(sqltypes.JSON):
"""SQLite JSON type.
SQLite supports JSON as of version 3.9 through its JSON1_ extension. Note
that JSON1_ is a
`loadable extension <https://www.sqlite.org/loadext.html>`_ and as such
may not be available, or may require run-time loading.
:class:`_sqlite.JSON` is used automatically whenever the base
:class:`_types.JSON` datatype is used against a SQLite backend.
.. seealso::
:class:`_types.JSON` - main documentation for the generic
cross-platform JSON datatype.
The :class:`_sqlite.JSON` type supports persistence of JSON values
as well as the core index operations provided by :class:`_types.JSON`
datatype, by adapting the operations to render the ``JSON_EXTRACT``
function wrapped in the ``JSON_QUOTE`` function at the database level.
Extracted values are quoted in order to ensure that the results are
always JSON string values.
.. versionadded:: 1.3
.. _JSON1: https://www.sqlite.org/json1.html
"""
# Note: these objects currently match exactly those of MySQL, however since
# these are not generalizable to all JSON implementations, remain separately
# implemented for each dialect.
class _FormatTypeMixin:
def _format_value(self, value):
raise NotImplementedError()
def bind_processor(self, dialect):
super_proc = self.string_bind_processor(dialect)
def process(value):
value = self._format_value(value)
if super_proc:
value = super_proc(value)
return value
return process
def literal_processor(self, dialect):
super_proc = self.string_literal_processor(dialect)
def process(value):
value = self._format_value(value)
if super_proc:
value = super_proc(value)
return value
return process
class JSONIndexType(_FormatTypeMixin, sqltypes.JSON.JSONIndexType):
def _format_value(self, value):
if isinstance(value, int):
value = "$[%s]" % value
else:
value = '$."%s"' % value
return value
class JSONPathType(_FormatTypeMixin, sqltypes.JSON.JSONPathType):
def _format_value(self, value):
return "$%s" % (
"".join(
[
"[%s]" % elem if isinstance(elem, int) else '."%s"' % elem
for elem in value
]
)
)
|
py | b412fa6f1456745a05da07e85b167ec6006aa450 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for trackable object SavedModel save."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
from tensorflow.python.client import session as session_lib
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import backprop
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.framework import versions
from tensorflow.python.keras.engine import sequential
from tensorflow.python.keras.layers import core
from tensorflow.python.keras.optimizer_v2 import adam
from tensorflow.python.lib.io import file_io
from tensorflow.python.module import module
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import loader_impl
from tensorflow.python.saved_model import save
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.training.tracking import tracking
from tensorflow.python.training.tracking import util
from tensorflow.python.util import compat
class _ModelWithOptimizer(util.Checkpoint):
def __init__(self):
self.dense = core.Dense(1)
self.optimizer = adam.Adam(0.01)
@def_function.function(
input_signature=(tensor_spec.TensorSpec([None, 2], dtypes.float32),
tensor_spec.TensorSpec([None], dtypes.float32)))
def call(self, x, y):
with backprop.GradientTape() as tape:
loss = math_ops.reduce_mean((self.dense(x) - y) ** 2.)
trainable_variables = self.dense.trainable_variables
gradients = tape.gradient(loss, trainable_variables)
self.optimizer.apply_gradients(zip(gradients, trainable_variables))
return {"loss": loss}
def _import_and_infer(
save_dir, inputs,
signature_key=signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY):
"""Import a SavedModel into a TF 1.x-style graph and run `signature_key`."""
graph = ops.Graph()
with graph.as_default(), session_lib.Session() as session:
model = loader.load(session, [tag_constants.SERVING], save_dir)
signature = model.signature_def[signature_key]
assert set(inputs.keys()) == set(signature.inputs.keys())
feed_dict = {}
for arg_name in inputs.keys():
feed_dict[graph.get_tensor_by_name(signature.inputs[arg_name].name)] = (
inputs[arg_name])
output_dict = {}
for output_name, output_tensor_info in signature.outputs.items():
output_dict[output_name] = graph.get_tensor_by_name(
output_tensor_info.name)
return session.run(output_dict, feed_dict=feed_dict)
class SaveTest(test.TestCase):
def test_method_save_signature(self):
root = tracking.AutoTrackable()
root.f = def_function.function(
lambda x: 2. * x,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
root.f(constant_op.constant(1.))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(root, save_dir, root.f)
self.assertEqual(
{"output_0": 2.},
_import_and_infer(save_dir, {"x": 1.}))
def test_method_save_concrete(self):
root = tracking.AutoTrackable()
root.f = def_function.function(
lambda z: {"out": 2. * z})
root.f(constant_op.constant(1.))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(
root,
save_dir,
{"non_default_key": root.f.get_concrete_function(
tensor_spec.TensorSpec(None, dtypes.float32))})
self.assertEqual(
{"out": 2.},
_import_and_infer(
save_dir, {"z": 1.}, signature_key="non_default_key"))
def test_unbuilt_model_does_not_prevent_saving(self):
root = util.Checkpoint(model=sequential.Sequential([core.Dense(2)]))
save.save(root, os.path.join(self.get_temp_dir(), "saved_model"))
def test_version_information_included(self):
root = tracking.AutoTrackable()
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(root, save_dir)
saved_model_proto = loader_impl.parse_saved_model(save_dir)
self.assertEqual(
versions.__version__,
saved_model_proto.meta_graphs[0].meta_info_def.tensorflow_version)
self.assertEqual(
versions.__git_version__,
saved_model_proto.meta_graphs[0].meta_info_def.tensorflow_git_version)
def test_non_concrete_error(self):
root = tracking.AutoTrackable()
root.f = def_function.function(lambda x: 2. * x)
root.f(constant_op.constant(1.))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
with self.assertRaisesRegexp(
ValueError, "Expected a TensorFlow function"):
save.save(root, save_dir, root.f)
def test_captures_unreachable_variable(self):
root = tracking.AutoTrackable()
unreachable_variable = variables.Variable([5.0, 2.0])
root.reachable_variable = variables.Variable([1.0, 3.0])
@def_function.function
def increase_variable(x):
return 2 * unreachable_variable * x + root.reachable_variable
root.f = increase_variable
self.assertAllEqual([101.0, 83.0],
root.f(constant_op.constant([10.0, 20.0])).numpy())
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
with self.assertRaisesRegexp(KeyError, "not reachable from root"):
save.save(root, save_dir)
def test_nested_inputs(self):
root = tracking.AutoTrackable()
root.f = def_function.function(
lambda x: 2. * x[0],
input_signature=([tensor_spec.TensorSpec(None, dtypes.float32),
tensor_spec.TensorSpec(None, dtypes.float32)],))
root.f([constant_op.constant(1.), constant_op.constant(1.)])
def test_nested_outputs(self):
root = tracking.AutoTrackable()
root.f = def_function.function(lambda x: (2. * x, (3. * x, 4. * x)))
root.f(constant_op.constant(1.))
to_save = root.f.get_concrete_function(constant_op.constant(1.))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
with self.assertRaisesRegexp(
ValueError, "non-flat outputs"):
save.save(root, save_dir, to_save)
def test_nested_dict_outputs(self):
root = util.Checkpoint(
f=def_function.function(
lambda x: {"a": 2. * x, "b": (3. * x, 4. * x)}))
root.f(constant_op.constant(1.))
to_save = root.f.get_concrete_function(constant_op.constant(1.))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
with self.assertRaisesRegexp(
ValueError, "dictionary containing non-Tensor value"):
save.save(root, save_dir, to_save)
def test_variable(self):
root = tracking.AutoTrackable()
root.v1 = variables.Variable(3.)
root.v2 = variables.Variable(2.)
root.f = def_function.function(
lambda x: root.v1 * root.v2 * x)
root.f(constant_op.constant(1.))
to_save = root.f.get_concrete_function(constant_op.constant(1.))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(root, save_dir, to_save)
self.assertAllEqual({"output_0": 12.},
_import_and_infer(save_dir, {"x": 2.}))
def test_optimizer(self):
x = constant_op.constant([[3., 4.]])
y = constant_op.constant([2.])
model = _ModelWithOptimizer()
first_loss = model.call(x, y)
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(model, save_dir, model.call)
second_loss = model.call(x, y)
self.assertNotEqual(first_loss, second_loss)
self.assertAllClose(
second_loss,
_import_and_infer(save_dir, {"x": [[3., 4.]], "y": [2.]}))
def test_single_method_default_signature(self):
model = _ModelWithOptimizer()
x = constant_op.constant([[3., 4.]])
y = constant_op.constant([2.])
model.call(x, y)
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(model, save_dir)
self.assertIn("loss",
_import_and_infer(save_dir,
{"x": [[3., 4.]], "y": [2.]}))
def test_single_function_default_signature(self):
model = tracking.AutoTrackable()
model.f = def_function.function(lambda: 3., input_signature=())
model.f()
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(model, save_dir)
self.assertAllClose({"output_0": 3.},
_import_and_infer(save_dir, {}))
def test_single_function_no_signature(self):
model = tracking.AutoTrackable()
model.f = def_function.function(lambda: 3.)
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(model, save_dir)
def test_find_default_save_function(self):
class ObjWithDefaultSignature(util.Checkpoint):
@def_function.function(input_signature=[tensor_spec.TensorSpec(
shape=None, dtype=dtypes.float32)])
def _default_save_signature(self, x):
return x + x + 1
obj = ObjWithDefaultSignature()
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(obj, save_dir)
self.assertAllClose(
{"output_0": 7.}, _import_and_infer(save_dir, {"x": 3.}))
def test_docstring(self):
class Adder(module.Module):
@def_function.function(input_signature=[tensor_spec.TensorSpec(
shape=None, dtype=dtypes.float32)])
def add(self, x):
return x + x + 1.
to_save = Adder()
to_save.add(constant_op.constant(1.))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(to_save, save_dir)
self.assertAllClose({"output_0": 7.},
_import_and_infer(save_dir, {"x": 3.}))
def test_datastructures(self):
class HasDatastructures(util.Checkpoint):
def __init__(self):
self.a = [1.]
self.a.append(variables.Variable(2.))
self.b = {"a": variables.Variable(3.)}
@def_function.function(input_signature=[tensor_spec.TensorSpec(
shape=None, dtype=dtypes.float32)])
def add(self, x):
return x + math_ops.add_n(self.a) + self.b["a"]
to_save = HasDatastructures()
to_save.add(constant_op.constant(1.))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(to_save, save_dir)
self.assertAllClose({"output_0": 10.},
_import_and_infer(save_dir, {"x": 4.}))
def test_default_attr_stripping(self):
class Complex(util.Checkpoint):
@def_function.function(input_signature=[])
def __call__(self):
return math_ops.complex(
constant_op.constant(1.),
constant_op.constant(2.),
name="complex")
to_save = Complex()
to_save()
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(to_save, save_dir)
graph = ops.Graph()
with graph.as_default(), self.session(graph) as session:
loader.load(session, [tag_constants.SERVING], save_dir)
func, = [f for name, f in graph._functions.items() if "call" in name]
complex_node, = [
node for node in func.definition.node_def if node.op == "Complex"]
self.assertNotIn("T", complex_node.attr)
self.assertNotIn("Tout", complex_node.attr)
def test_signature_attribute_reserved(self):
root = util.Checkpoint(signatures=variables.Variable(1.))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
with self.assertRaisesRegexp(ValueError, "del obj.signatures"):
save.save(root, save_dir)
del root.signatures
save.save(root, save_dir)
def test_function_with_captured_dataset(self):
if test_util.is_gpu_available():
self.skipTest("Currently broken when a GPU is available.")
class HasDataset(module.Module):
def __init__(self):
super(HasDataset, self).__init__()
self.dataset = (
dataset_ops.Dataset.range(5)
.map(lambda x: x ** 2))
@def_function.function
def __call__(self, x):
current_sum = array_ops.zeros([], dtype=dtypes.int64)
for element in self.dataset:
current_sum += x * element
return current_sum
root = HasDataset()
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(
root, save_dir,
signatures=root.__call__.get_concrete_function(
tensor_spec.TensorSpec(None, dtypes.int64)))
self.assertAllClose({"output_0": 3 * (1 + 4 + 9 + 16)},
_import_and_infer(save_dir, {"x": 3}))
class AssetTests(test.TestCase):
def setUp(self):
super(AssetTests, self).setUp()
self._vocab_path = os.path.join(self.get_temp_dir(), "vocab.txt")
with open(self._vocab_path, "w") as f:
f.write("alpha\nbeta\ngamma\n")
def test_asset_path_returned(self):
root = tracking.AutoTrackable()
root.path = tracking.TrackableAsset(self._vocab_path)
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
root.get_asset = def_function.function(lambda: root.path.asset_path)
save.save(root, save_dir, signatures=root.get_asset.get_concrete_function())
second_dir = os.path.join(self.get_temp_dir(), "second_dir")
file_io.rename(save_dir, second_dir)
imported_path = _import_and_infer(second_dir, {})["output_0"]
self.assertIn(compat.as_str_any(second_dir),
compat.as_str_any(imported_path))
def test_table(self):
initializer = lookup_ops.TextFileInitializer(
self._vocab_path,
key_dtype=dtypes.string,
key_index=lookup_ops.TextFileIndex.WHOLE_LINE,
value_dtype=dtypes.int64,
value_index=lookup_ops.TextFileIndex.LINE_NUMBER)
root = util.Checkpoint(table=lookup_ops.HashTable(
initializer, default_value=-1))
root.table_user = def_function.function(
root.table.lookup,
input_signature=[tensor_spec.TensorSpec(None, dtypes.string)])
self.assertEqual(
2,
self.evaluate(root.table_user(constant_op.constant("gamma"))))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(root, save_dir)
file_io.delete_file(self._vocab_path)
self.assertAllClose(
{"output_0": [2, 0]},
_import_and_infer(save_dir, {"keys": ["gamma", "alpha"]}))
second_dir = os.path.join(self.get_temp_dir(), "second_dir")
# Asset paths should track the location the SavedModel is loaded from.
file_io.rename(save_dir, second_dir)
self.assertAllClose(
{"output_0": [2, 1]},
_import_and_infer(second_dir, {"keys": ["gamma", "beta"]}))
def test_unused_asset(self):
root = tracking.AutoTrackable()
root.f = def_function.function(
lambda x: 2. * x,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
root.asset = tracking.TrackableAsset(self._vocab_path)
export_dir = os.path.join(self.get_temp_dir(), "save_dir")
save.save(root, export_dir)
self.assertAllClose(
{"output_0": [0.2]},
_import_and_infer(export_dir, {"x": [0.1]}))
def test_sensible_function_building_exception(self):
root = util.Checkpoint(v=variables.Variable(2.))
root.f = def_function.function(
lambda x: 2. * root.v,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
export_dir = os.path.join(self.get_temp_dir(), "save_dir")
@def_function.function
def _calls_save():
save.save(root, export_dir)
with self.assertRaisesRegexp(AssertionError, "tf.function"):
_calls_save()
class _ModelWithOptimizerUsingDefun(util.Checkpoint):
def __init__(self):
self.dense = core.Dense(1)
self.optimizer = adam.Adam(0.01)
# Using defun due to control flow v2 cycles, b/121159261. def_function uses
# conds to gate variable initialization and so triggers cond reference cycles,
# but the thing being wrapped here does not use cond itself.
@function.defun(
input_signature=(tensor_spec.TensorSpec([None, 2], dtypes.float32),
tensor_spec.TensorSpec([None], dtypes.float32)),
)
def call(self, x, y):
with backprop.GradientTape() as tape:
loss = math_ops.reduce_mean((self.dense(x) - y) ** 2.)
trainable_variables = self.dense.trainable_variables
gradients = tape.gradient(loss, trainable_variables)
self.optimizer.apply_gradients(zip(gradients, trainable_variables))
return {"loss": loss}
class MemoryTests(test.TestCase):
def setUp(self):
self._model = _ModelWithOptimizerUsingDefun()
@test_util.assert_no_garbage_created
def test_no_reference_cycles(self):
x = constant_op.constant([[3., 4.]])
y = constant_op.constant([2.])
self._model.call(x, y)
if sys.version_info[0] < 3:
# TODO(allenl): debug reference cycles in Python 2.x
self.skipTest("This test only works in Python 3+. Reference cycles are "
"created in older Python versions.")
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(self._model, save_dir, self._model.call)
if __name__ == "__main__":
test.main()
|
py | b412faf751e0f54fa2d18eb1a931f4090951de75 | import sys
import re
from multiprocessing.connection import Listener
from kazoo.client import KazooClient
from kazoo.client import KazooState
from data_api import DataAPI
class DataApiListener():
def __init__(self, pipe_address, zoo_address):
# Requests pipe
self.listener = Listener(pipe_address)
# Kazoo client
self.kz_client = KazooClient(hosts=zoo_address)
self.kz_client.start()
# Requesting API
self.__api = DataAPI(self.kz_client)
# Flag to disconnect
self.__keep_running = True
def run(self):
"""Run a daemon, accepting requests for the API.
This daemon will run until stop() is called on the
ApiListener instance.
"""
while self.__keep_running:
new_c = self.listener.accept()
# Check there is data to read
if not new_c.poll(0.2):
# Close after 0.2 seconds idle
new_c.close()
continue
# Accept new connections
c_data = new_c.recv()
# Request api
result = self.__parse_request(c_data)
# Return result of operation
new_c.send(result)
# Close connection
new_c.close()
# Close socket
self.listener.close()
# End Kazoo connection
self.kz_client.stop()
self.kz_client.close()
def stop(self):
"""Stop further connections to this client."""
self.__keep_running = False
def __parse_request(self, req):
"""Parse an incoming request.
Returns a dictionary containing the following items:
res = {
code : <integer>,
data : <string, dict>
}
"""
result = {'code' : 0, 'data' : ''}
if self.kz_client.state is not KazooState.CONNECTED:
result['code'] = -2
result['data'] = "Server unavailable"
return result
if type(req) is not dict:
result['code'] = -1
result['data'] = "Invalid data format"
return result
try:
result['data'] = self.__run_request(req)
except Exception as e:
result['code'] = -3
result['data'] = str(e)
return result
def __run_request(self, req):
result = ''
op = req.get('operation', None)
data = req.get('data', None)
if op == "create":
result = self.__api.create_tournament(
name = data['name'],
modality = data['modality'],
password = data['password'],
players = data['players']
)
elif op == "update":
result = self.__api.update_tournament(
tournament_id = data['id'],
version = data['version'],
classification = data['classification'],
password = data['password']
)
elif op == "delete":
result = self.__api.delete_tournament(
tournament_id = data['id'],
password = data['password']
)
elif op == "get":
result = self.__api.get_tournament(
tournament_id = data['id']
)
elif op == "get_list":
result = self.__api.get_tournament_list()
elif op == "status":
result = {
'status' : self.kz_client.state,
'address' : self.kz_client.hosts
}
elif op == "setpath":
new_path = data['path']
# Match filesystem format "/path/to/some_where/else"
if re.compile("/([\d\w_]+/?)*").match(new_path):
self.__api.set_data_path(data['path'])
result = data['path']
else:
raise Exception("Malformed path")
elif op == "dummy":
result = 'OK'
else:
raise Exception("Operation " + op + " is invalid.")
return result
if __name__ == '__main__':
if len(sys.argv) != 3:
print("Wrong argument count!")
print("python3 {0} <socket_port> <zookeeper_ip:port>".format(
sys.argv[0]))
else:
pipe_address = ("localhost", int(sys.argv[1]))
zoo_address = sys.argv[2]
listener = DataApiListener(pipe_address, zoo_address)
listener.run()
|
py | b412fb24367de732566ec094764a363b17ab894b | x=5.9
x_int=int(x)
print(x_int)ф
result=x-x_int
print(result)
print(int(result*10))
|
py | b412fb390dc5e8ac583aab32a8abbc1cc5688405 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from botbuilder.dialogs import DialogTurnResult, ComponentDialog, DialogContext
from botbuilder.core import BotFrameworkAdapter
from botbuilder.schema import ActivityTypes
class LogoutDialog(ComponentDialog):
def __init__(self, dialog_id: str, connection_name: str):
super(LogoutDialog, self).__init__(dialog_id)
self.connection_name = connection_name
async def on_begin_dialog(
self, inner_dc: DialogContext, options: object
) -> DialogTurnResult:
result = await self._interrupt(inner_dc)
if result:
return result
return await super().on_begin_dialog(inner_dc, options)
async def on_continue_dialog(self, inner_dc: DialogContext) -> DialogTurnResult:
result = await self._interrupt(inner_dc)
if result:
return result
return await super().on_continue_dialog(inner_dc)
async def _interrupt(self, inner_dc: DialogContext):
if inner_dc.context.activity.type == ActivityTypes.message:
text = inner_dc.context.activity.text.lower()
if text == "logout":
bot_adapter: BotFrameworkAdapter = inner_dc.context.adapter
await bot_adapter.sign_out_user(inner_dc.context, self.connection_name)
await inner_dc.context.send_activity("You have been signed out.")
return await inner_dc.cancel_all_dialogs()
|
py | b412fb4ff49c645e85db64199af44b4dbab33202 | import micropyro as mp
# read the database
database = mp.ReadDatabase.from_csv('database_example.csv')
# read the experimental matrix
exp_matrix = mp.ReadExperimentTable.from_csv("experimental_matrix.csv", header=0, use_is=True)
exp_matrix.compute_is_amount(concentration=0.03)
# get the blob dfs
dfs = []
temperatures = []
for row_name, row in exp_matrix.df.iterrows():
# print(row_name)
blob_file = mp.read_blob_file(f'{row_name}.cdf_img01_Blob_Table.csv')
mp.perform_matching_database(blob_df=blob_file, database_df=database.df, extra_columns=['group'])
blob_file = mp.compute_yields_is(experiment_df_row=row, blob_df=blob_file,
internal_standard_name='fluoranthene')
temperatures.append(row['temperature'])
# print(blob_file['yield mrf'].nlargest(5))
dfs.append(blob_file)
fig, ax = mp.compare_yields(dfs, compounds=['phenol', 'p-cresol', "benzene"], x_axis = temperatures)
ax.set_xlabel('Reactor Temperature, C')
ax.set_ylabel('Yield, %')
fig.savefig('comparison.pdf') |
py | b412fba11bcbf32bed64fcccf826f3efc5eb7d01 | #!/usr/bin/env python3
# pylint: disable=too-few-public-methods, missing-docstring
from __future__ import division, print_function
import argparse
import datetime
import json
import logging
import os
import re
import sqlite3
import subprocess
import sys
import time
__version__ = '0.9.2.dev'
LOG = logging.getLogger('slurm2sql')
LOG.setLevel(logging.DEBUG)
if sys.version_info[0] >= 3:
logging.lastResort.setLevel(logging.INFO)
else:
ch = logging.lastResort = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
LOG.addHandler(ch)
#
# First, many converter functions/classes which convert strings to
# useful values.
#
# Single converter functions: transform one column to sqlite value
# stored as that same column.
def nullint(x):
"""int or None"""
return int(x) if x else None
def nullstr_strip(x):
"""str or None"""
return str(x).strip() if x else None
def unixtime(x):
"""Timestamp in local time, converted to unixtime"""
if not x: return None
if x == 'Unknown': return None
return time.mktime(time.strptime(x, '%Y-%m-%dT%H:%M:%S'))
def datetime_timestamp(dt):
"""Convert a datetime object to unixtime
- Only needed because we support python 2"""
if hasattr(dt, 'timestamp'): # python3
return dt.timestamp()
return time.mktime(dt.timetuple())
def slurmtime(x):
"""Parse slurm time of format [dd-[hh:]]mm:ss"""
if not x: return None
# Handle 'UNLIMITED' ,'Partition_Limit' in 'timelimit' field
if x in {'Partition_Limit', 'UNLIMITED'}:
return None
seconds = 0
# The anchor is different if there is '-' or not. With '-' it is [dd]-hh[:mm[:ss]]. Without it is mm[:ss] first, then hh:mm:ss
if '-' in x:
days, time_ = x.split('-', 1)
seconds += int(days) * 24 * 3600
hms = time_.split(':')
if len(hms) >= 1: seconds += 3600 * int(hms[0]) # hour
if len(hms) >= 2: seconds += 60 * float(hms[1]) # min
if len(hms) >= 3: seconds += int(hms[2]) # sec
else:
time_ = x
hms = time_.split(':')
# If only a single number is given, it is interperted as minutes
if len(hms) >= 3: seconds += 3600 * int(hms[-3]) # hour
if len(hms) >= 2: seconds += float(hms[-1]) # sec
if len(hms) >= 1: seconds += 60 * int(hms[-2] if len(hms)>=2 else hms[-1]) # min
return seconds
def slurm_timestamp(x):
"""Convert a datetime to the Slurm format of timestamp
"""
if not isinstance(x, datetime.datetime):
x = datetime.datetime.fromtimestamp(x - 5)
return x.strftime('%Y-%m-%dT%H:%M:%S')
def str_unknown(x):
if x == 'Unknown': return None
return x
def slurmmem(x):
"""Memory, removing 'n' or 'c' at end, in KB"""
if not x: return None
x = x.strip('nc')
return float_bytes(x)
# Converting kibi/mibi, etc units to numbers
def unit_value_binary(unit):
"""Convert a unit to its value, e.g. 'K'-->1024, 'M'-->1048576"""
if unit is None: unit = '_'
return 2**(10*'_kmgtpezy'.index(unit.lower()))
def unit_value_metric(unit):
"""Convert a unit to its value, e.g. 'K'-->1000, 'M'-->1000000"""
if unit is None: unit = '_'
return 1000**('_kmgtpezy'.index(unit.lower()))
def float_bytes(x, convert=float):
"""Convert a float with unit (K,M, etc) to value"""
if not x: return None
unit = x[-1].lower()
if unit in 'kmgtpezy':
return convert(x[:-1]) * unit_value_binary(unit)
return convert(x)
def int_bytes(x):
return float_bytes(x, convert=lambda x: int(float(x)))
def float_metric(x, convert=float):
"""Convert a float with unit (K,M, etc) to value"""
if not x: return None
unit = x[-1].lower()
if unit in 'kmgtpezy':
return convert(x[:-1]) * unit_value_metric(unit)
return convert(x)
def int_metric(x):
return float_metric(x, convert=lambda x: int(float(x)))
# Row converter fuctions which need *all* values to convert. Classes
# with one method, 'calc', which takes the whole row (a dict) and
# converts it to a sqlite value. The only point of the class is to be
# able to easily distinguish the function from the column functions
# above.
class linefunc(object):
"""Base class for all converter functions"""
linefunc = True
# Submit, start, and end times as unixtimes
class slurmDefaultTime(linefunc):
@staticmethod
def calc(row):
"""Latest active time.
All jobs in sacct are already started, so this is either current
time or end time.
"""
if row['End'] != 'Unknown':
return row['End']
if row['Start'] != 'Unknown':
# Currently running, return current time since it's constantly updated.
return time.strftime("%Y-%m-%dT%H:%M:%S")
# Return submit time, since there is nothing else.
return row['Submit']
class slurmDefaultTimeTS(linefunc):
@staticmethod
def calc(row):
"""Lastest active time (see above), unixtime."""
return unixtime(slurmDefaultTime.calc(row))
class slurmSubmitTS(linefunc):
@staticmethod
def calc(row):
return unixtime(row['Submit'])
class slurmStartTS(linefunc):
@staticmethod
def calc(row):
return unixtime(row['Start'])
class slurmEndTS(linefunc):
@staticmethod
def calc(row):
return unixtime(row['End'])
class slurmBilling(linefunc):
@staticmethod
def calc(row):
tres = row['AllocTRES']
if not tres: return None
m = re.search(r'billing=(\d+)', tres)
if m:
return int(m.group(1))
# Memory stuff
class slurmMemNode(linefunc):
"""Memory per node"""
@staticmethod
def calc(row):
reqmem = row['ReqMem']
if not reqmem: return None
ncpus = int(row['NCPUS'])
if ncpus == 0: return 0
nnodes = int(row['NNodes'])
if nnodes == 0: return None
if reqmem.endswith('c'):
return slurmmem(reqmem) * ncpus / nnodes
if reqmem.endswith('n'):
return slurmmem(reqmem)
return slurmmem(reqmem) / nnodes
class slurmMemCPU(linefunc):
"""Memory per cpu, computed if necessary"""
@staticmethod
def calc(row):
reqmem = row['ReqMem']
if not reqmem: return None
nnodes = int(row['NNodes'])
if nnodes == 0: return 0
ncpus = int(row['NCPUS'])
if ncpus == 0: return None
if reqmem.endswith('c'):
return slurmmem(reqmem)
if reqmem.endswith('n'):
return slurmmem(reqmem) * nnodes / ncpus
return slurmmem(reqmem) / ncpus
class slurmMemType(linefunc):
"""Memory type: 'n' per node, 'c' per core"""
@staticmethod
def calc(row):
reqmem = row['ReqMem']
if not reqmem: return None
if reqmem.endswith('n'): return 'n'
if reqmem.endswith('c'): return 'c'
return None # latest slurm seems to not have this, ~2021-2022
class slurmMemRaw(linefunc):
"""Raw value of ReqMem column, with 'c' or 'n' suffix"""
@staticmethod
def calc(row):
return row['ReqMem']
# GPU stuff
class slurmReqGPU(linefunc):
@staticmethod
def calc(row):
if 'ReqGRES' in row:
gres = row['ReqGRES']
else:
gres = row['ReqTRES']
if not gres: return None
# Slurm 20.11 uses gres= within ReqTRES (instead of ReqGRES)
m = re.search(r'gpu[:=](\d+)', gres)
if m:
return int(m.group(1))
class slurmGPUMem(linefunc):
@staticmethod
def calc(row):
comment = row['Comment']
if not comment.strip(): return
if 'No GPU stats' in comment: return
if comment == 'abort': return
try:
comment = json.loads(comment)
except:
return None
if 'gpu_mem_max' not in comment: return
return comment.get('gpu_mem_max') * (2**20)
class slurmGPUEff(linefunc):
@staticmethod
def calc(row):
comment = row['Comment']
if not comment.strip(): return
if 'No GPU stats' in comment: return
if comment == 'abort': return
try:
comment = json.loads(comment)
except:
return None
if 'gpu_util' not in comment: return
return comment['gpu_util']/100.
class slurmGPUCountComment(linefunc):
@staticmethod
def calc(row):
comment = row['Comment']
if not comment.strip(): return
if 'No GPU stats' in comment: return
if comment == 'abort': return
try:
comment = json.loads(comment)
except:
return None
return comment.get('ngpu')
class slurmGPUCount(linefunc):
@staticmethod
def calc(row):
tres = row['AllocTRES'] or row['ReqTRES']
if not tres: return None
m = re.search(r'gpu=(\d+)', tres)
if m:
return int(m.group(1))
# Job ID related stuff
class slurmJobIDplain(linefunc):
"""The JobID without any . or _"""
@staticmethod
def calc(row):
return int(row['JobID'].split('_')[0].split('.')[0])
class slurmJobIDrawplain(linefunc):
"""The JobID without any . or _"""
@staticmethod
def calc(row):
return int(row['JobIDRaw'].split('_')[0].split('.')[0])
class slurmJobIDRawnostep(linefunc):
"""The JobID without any . or _"""
@staticmethod
def calc(row):
return int(row['JobIDRaw'].split('_')[0].split('.')[0])
class slurmArrayTaskID(linefunc):
@staticmethod
def calc(row):
if '_' not in row['JobID']: return
if '[' in row['JobID']: return
return int(row['JobID'].split('_')[1].split('.')[0])
class slurmJobStep(linefunc):
@staticmethod
def calc(row):
if '.' not in row['JobID']: return
return row['JobID'].split('.')[-1] # not necessarily an integer
class slurmJobIDslurm(linefunc):
"""The JobID field as slurm gives it, including _ and ."""
@staticmethod
def calc(row):
return row['JobID']
# Efficiency stuff
class slurmMemEff(linefunc):
#https://github.com/SchedMD/slurm/blob/master/contribs/seff/seff
@staticmethod
def calc(row):
reqmem_type = slurmMemType.calc(row)
mem_max = slurmmem(row['MaxRSS'])
reqmem = slurmmem(row['ReqMem'])
nnodes = slurmmem(row['NNodes'])
if not reqmem or mem_max is None: return
if reqmem_type == 'c':
nodemem = reqmem * int(row['NCPUS'])
elif reqmem_type == 'n':
nodemem = reqmem
elif reqmem_type is None:
nodemem = reqmem / nnodes
else:
raise ValueError('unknown memory type: %s'%reqmem_type)
return mem_max / nodemem
class slurmCPUEff(linefunc):
# This matches the seff tool currently:
# https://github.com/SchedMD/slurm/blob/master/contribs/seff/seff
@staticmethod
def calc(row):
walltime = slurmtime(row['Elapsed'])
if not walltime: return None
cpueff = slurmtime(row['TotalCPU']) / (walltime * int(row['NCPUS']))
return cpueff
class slurmConsumedEnergy(linefunc):
@staticmethod
def calc(row):
if not row['ConsumedEnergyRaw']: return None
return int(row['ConsumedEnergyRaw'])
class slurmExitCodeRaw(linefunc):
@staticmethod
def calc(row):
if not row['ExitCode']: return None
return row['ExitCode']
class slurmExitCode(linefunc):
@staticmethod
def calc(row):
if not row['ExitCode']: return None
return int(row['ExitCode'].split(':')[0])
class slurmExitSignal(linefunc):
@staticmethod
def calc(row):
if not row['ExitCode']: return None
return int(row['ExitCode'].split(':')[1])
# All defined columns and their respective converter functions. If a
# key begins in a underscore, this is not a Slurm DB field (don't
# query it from sacct), it is computed from other fields in this
# program. It gets added to our sqlite database without the
# underscore.
COLUMNS = {
# Basic job metadata
# Job IDs are of the forms (from sacct man page):
# - JobID.JobStep
# - ArrayJobID_ArrayTaskID.JobStep
# And the below is consistent with this.
'JobID': slurmJobIDrawplain, # Integer JobID (for arrays JobIDRaw),
# without array/step suffixes.
'_ArrayJobID': slurmJobIDplain, # Same job id for all jobs in an array.
# If not array, same as JobID
'_ArrayTaskID': slurmArrayTaskID, # Part between '_' and '.'
'_JobStep': slurmJobStep, # Part after '.'
'_JobIDSlurm': slurmJobIDslurm, # JobID directly as Slurm presents it
# (with '_' and '.')
#'JobIDRawSlurm': str, #
'JobName': str, # Free-form text name of the job
'User': str, # Username
'Group': str, # Group
'Account': str, # Account
'_Billing': slurmBilling, # Billing (from tres)
# Times and runtime info
'State': str, # Job state
'Timelimit': slurmtime, # Timelimit specified by user
'Elapsed': slurmtime, # Walltime of the job
#'_Time': slurmDefaultTime, # Genalized time, max(Submit, End, (current if started))
#'Submit': str_unknown, # Submit time in yyyy-mm-ddThh:mm:ss straight from slurm
#'Start': str_unknown, # Same, job start time
#'End': str_unknown, # Same, job end time
'_Time': slurmDefaultTimeTS, # unixtime: Genalized time, max(Submit, End, (current if started))
'Submit': slurmSubmitTS, # unixtime: Submit
'Start': slurmStartTS, # unixtime: Start
'End': slurmEndTS, # unixtime: End
'Partition': str, # Partition
'_ExitCodeRaw': slurmExitCodeRaw, # ExitStatus:Signal
'ExitCode': slurmExitCode, # ExitStatus from above, int
'_ExitSignal': slurmExitSignal, # Signal from above, int
'NodeList': str, # Node list of jobs
'Priority': nullint, # Slurm priority (higher = will run sooner)
'_ConsumedEnergy': slurmConsumedEnergy,
# Stuff about number of nodes
'ReqNodes': int_bytes, # Requested number of nodes
'NNodes': nullint, # Number of nodes (allocated if ran, requested if not yet)
'AllocNodes': nullint, # Number of nodes (allocated, zero if not running yet)
# Miscelaneous requested resources
'ReqTRES': str,
'ReqGRES': str, # Raw GRES string
'NTasks': nullint,
#'AllocGRES'
'AllocTRES': str,
# CPU related
'NCPUS': nullint, # === AllocCPUS
'ReqCPUS': nullint, # Requested CPUs
'AllocCPUS': nullint, # === NCPUS
'CPUTime': slurmtime, # = Elapsed * NCPUS (= CPUTimeRaw) (not how much used)
'TotalCPU': slurmtime, # = Elapsed * NCPUS * efficiency
'UserCPU': slurmtime, #
'SystemCPU': slurmtime, #
'_CPUEff': slurmCPUEff, # CPU efficiency, should be same as seff
'MinCPU': slurmtime, # Minimum CPU used by any task in the job
'MinCPUNode': str,
'MinCPUTask': str,
# Memory related
'ReqMem': str, # Requested mem, value from slurm. Has a 'c' on 'n' suffix
'_ReqMemType': slurmMemType, # 'c' for mem-per-cpu or 'n' for mem-per-node
'_ReqMemNode': slurmMemNode, # Mem per node, computed if type 'c'
'_ReqMemCPU': slurmMemCPU, # Mem per cpu, computed if type 'n'
'AveRSS': slurmmem,
'MaxRSS': slurmmem,
'MaxRSSNode': str,
'MaxRSSTask': str,
'MaxPages': int_metric,
'MaxVMSize': slurmmem,
'_MemEff': slurmMemEff, # Slurm memory efficiency
# Disk related
'AveDiskRead': int_bytes,
'AveDiskWrite': int_bytes,
'MaxDiskRead': int_bytes,
'MaxDiskReadNode': str,
'MaxDiskReadTask': str,
'MaxDiskWrite': int_bytes,
'MaxDiskWriteNode': str,
'MaxDiskWriteTask': str,
# GPU related
'_ReqGPUS': slurmReqGPU, # Number of GPUS requested
'Comment': nullstr_strip, # Slurm Comment field (at Aalto used for GPU stats)
'_GPUMem': slurmGPUMem, # GPU mem extracted from comment field
'_GPUEff': slurmGPUEff, # GPU utilization (0.0 to 1.0) extracted from comment field
'_NGPU': slurmGPUCount, # Number of GPUs, extracted from comment field
}
# Everything above that does not begin with '_' is queried from sacct.
# These extra columns are added (don't duplicate with the above!)
COLUMNS_EXTRA = ['ConsumedEnergyRaw', 'JobIDRaw']
def main(argv=sys.argv[1:], db=None, raw_sacct=None):
"""Parse arguments and use the other API"""
parser = argparse.ArgumentParser()
parser.add_argument('db', help="Database filename to create or update")
parser.add_argument('sacct_filter', nargs='*',
help="sacct options to filter jobs. For example, one "
"would usually give '-a' or '-S 2019-08-01' "
"here, for example")
parser.add_argument('--update', '-u', action='store_true',
help="If given, don't delete existing database and "
"instead insert or update rows")
parser.add_argument('--history',
help="Scrape dd-hh:mm:ss or [hh:]mm:ss from the past to now (Slurm time format)")
parser.add_argument('--history-resume', action='store_true',
help="Day-by-day collect history, starting from last collection.")
parser.add_argument('--history-days', type=int,
help="Day-by-day collect history, starting this many days ago.")
parser.add_argument('--history-start',
help="Day-by-day collect history, starting on this day.")
parser.add_argument('--history-end',
help="Day-by-day collect history ends on this day. Must include one "
"of the other history options to have any effect.")
parser.add_argument('--jobs-only', action='store_true',
help="Don't include job steps but only the man jobs")
parser.add_argument('--quiet', '-q', action='store_true',
help="Don't output anything unless errors")
parser.add_argument('--verbose', '-v', action='store_true',
help="Output more logging info")
args = parser.parse_args(argv)
if args.verbose:
logging.lastResort.setLevel(logging.DEBUG)
LOG.debug(args)
if args.quiet:
logging.lastResort.setLevel(logging.WARN)
# db is only given as an argument in tests (normally)
if db is None:
# Delete existing database unless --update/-u is given
if not (args.update or args.history_resume) and os.path.exists(args.db):
os.unlink(args.db)
db = sqlite3.connect(args.db)
sacct_filter = args.sacct_filter
# If --history-days, get just this many days history
if (args.history is not None
or args.history_resume
or args.history_days is not None
or args.history_start is not None):
errors = get_history(db, sacct_filter=sacct_filter,
history=args.history,
history_resume=args.history_resume,
history_days=args.history_days,
history_start=args.history_start,
history_end=args.history_end,
jobs_only=args.jobs_only,
raw_sacct=raw_sacct)
create_indexes(db)
# Normal operation
else:
errors = slurm2sql(db, sacct_filter=sacct_filter,
update=args.update,
jobs_only=args.jobs_only,
raw_sacct=raw_sacct,
verbose=args.verbose)
create_indexes(db)
if errors:
LOG.warning("Completed with %s errors", errors)
return(1)
return(0)
def get_history(db, sacct_filter=['-a'],
history=None, history_resume=None, history_days=None,
history_start=None, history_end=None,
jobs_only=False, raw_sacct=None):
"""Get history for a certain period of days.
Queries each day and updates the database, so as to avoid
overloading sacct and causing a failure.
Returns: the number of errors.
"""
errors = 0
now = datetime.datetime.now().replace(microsecond=0)
today = datetime.date.today()
if history_resume:
try:
start = get_last_timestamp(db)
except sqlite3.OperationalError:
import traceback
traceback.print_exc()
print()
print("Error fetching last start time (see above)", file=sys.stderr)
exit(5)
start = datetime.datetime.fromtimestamp(start - 5)
elif history is not None:
start = now - datetime.timedelta(seconds=slurmtime(history))
elif history_days is not None:
start = datetime.datetime.combine(today - datetime.timedelta(days=history_days), datetime.time())
elif history_start is not None:
start = datetime.datetime.strptime(history_start, '%Y-%m-%d')
if history_end is not None:
stop = datetime.datetime.strptime(history_end, '%Y-%m-%d')
else:
stop = now + datetime.timedelta(seconds=6*3600)
days_ago = (now - start).days
day_interval = 1
while start <= stop:
end = start+datetime.timedelta(days=day_interval)
end = end.replace(hour=0, minute=0, second=0, microsecond=0)
new_filter = sacct_filter + [
'-S', slurm_timestamp(start),
'-E', slurm_timestamp(end),
]
LOG.debug(new_filter)
LOG.info("%s %s", days_ago, start.date() if history_days is not None else start)
errors += slurm2sql(db, sacct_filter=new_filter, update=True, jobs_only=jobs_only,
raw_sacct=raw_sacct)
db.commit()
update_last_timestamp(db, update_time=end)
start = end
days_ago -= day_interval
return errors
def sacct(slurm_cols, sacct_filter):
cmd = ['sacct', '-o', ','.join(slurm_cols), '-P', '--units=K',
'--delimiter=;|;',
#'--allocations', # no job steps, only total jobs, but doesn't show used resources.
] + list(sacct_filter)
#LOG.debug(' '.join(cmd))
error_handling = {'errors':'replace'} if sys.version_info[0]>=3 else {}
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE, universal_newlines=True,
**error_handling)
return p.stdout
def create_indexes(db):
db.execute('CREATE INDEX IF NOT EXISTS idx_slurm_start ON slurm (Start)')
db.execute('CREATE INDEX IF NOT EXISTS idx_slurm_user_start ON slurm (User, Start)')
db.execute('CREATE INDEX IF NOT EXISTS idx_slurm_time ON slurm (Time)')
db.execute('CREATE INDEX IF NOT EXISTS idx_slurm_user_time ON slurm (User, Time)')
db.execute('ANALYZE;')
db.commit()
def slurm2sql(db, sacct_filter=['-a'], update=False, jobs_only=False,
raw_sacct=None, verbose=False):
"""Import one call of sacct to a sqlite database.
db:
open sqlite3 database file object.
sacct_filter:
filter for sacct, list of arguments. This should only be row
filters, such as ['-a'], ['-S' '2019-08-01'], and so on. The
argument should be a list. You can't currently filter what columns
are selected.
raw_sacct: If given, do not run sacct but use this as the input
(file-like object)
Returns: the number of errors
"""
columns = COLUMNS.copy()
# Slurm > 20.11 deprecates ReqGRES, everything is put only into
# ReqTRES. So don't fitch ReqGRES to avoid a warning.
if slurm_version() >= (20, 11):
del columns['ReqGRES']
create_columns = ', '.join('"'+c.strip('_')+'"' for c in columns)
create_columns = create_columns.replace('JobIDSlurm"', 'JobIDSlurm" UNIQUE')
db.execute('CREATE TABLE IF NOT EXISTS slurm (%s)'%create_columns)
db.execute('CREATE TABLE IF NOT EXISTS meta_slurm_lastupdate (id INTEGER PRIMARY KEY, update_time REAL)')
db.execute('CREATE VIEW IF NOT EXISTS allocations AS select * from slurm where JobStep is null;')
db.execute('PRAGMA journal_mode = WAL;')
db.commit()
c = db.cursor()
slurm_cols = tuple(c for c in list(columns.keys()) + COLUMNS_EXTRA if not c.startswith('_'))
# Read data from sacct, or interpert sacct_filter directly as
# testdata if it has the attribute 'testdata'
if raw_sacct is None:
# This is a real filter, read data
lines = sacct(slurm_cols, sacct_filter)
else:
# Support tests - raw lines can be put in
lines = raw_sacct
# We don't use the csv module because the csv can be malformed.
# In particular, job name can include newlines(!). TODO: handle job
# names with newlines.
errors = 0
line_continuation = None
for i, rawline in enumerate(lines):
if i == 0:
# header
header = rawline.strip().split(';|;')
continue
# Handle fields that have embedded newline (JobName). If we
# have too few fields, save the line and continue.
if line_continuation:
rawline = line_continuation + rawline
line_continuation = None
line = rawline.strip().split(';|;')
if len(line) < len(slurm_cols):
line_continuation = rawline
continue
# (end)
if len(line) > len(slurm_cols):
LOG.error("Line with wrong number of columns: (want=%s, have=%s) %s", len(slurm_cols), len(line), rawline)
errors += 1
continue
line = dict(zip(header, line))
# If --jobs-only, then skip all job steps (sacct updates the
# mem/cpu usage on the allocation itself already)
step_id = slurmJobStep.calc(line)
if jobs_only and step_id is not None:
continue
#LOG.debug(line)
processed_line = {k.strip('_'): (columns[k](line[k])
#if not isinstance(columns[k], type) or not issubclass(columns[k], linefunc)
if not hasattr(columns[k], 'linefunc')
else columns[k].calc(line))
for k in columns.keys()}
c.execute('INSERT %s INTO slurm (%s) VALUES (%s)'%(
'OR REPLACE' if update else '',
','.join('"'+x+'"' for x in processed_line.keys()),
','.join(['?']*len(processed_line))),
tuple(processed_line.values()))
# Committing every so often allows other queries to succeed
if i%10000 == 0:
#print('committing')
db.commit()
if verbose:
print('... processing row %d'%i)
db.commit()
return errors
def update_last_timestamp(db, update_time=None):
"""Update the last update time in the database, for resuming.
Updates the one row of the meta_slurm_lastupdate with the latest
unix timestamp, as passed as an argument (or now)
"""
if update_time is None:
update_time = time.time()
if isinstance(update_time, datetime.datetime):
update_time = datetime_timestamp(update_time)
update_time = min(update_time, time.time())
db.execute("INSERT OR REPLACE INTO meta_slurm_lastupdate (id, update_time) VALUES (0, ?)", (update_time, ))
db.commit()
def get_last_timestamp(db):
"""Return the last update timestamp from the database"""
return db.execute('SELECT update_time FROM meta_slurm_lastupdate').fetchone()[0]
def slurm_version(cmd=['sacct', '--version']):
"""Return the version number of Slurm, as a tuple"""
# example output: b'slurm 18.08.8\n' or slurm 19.05.7-Bull.1.0
slurm_version = subprocess.check_output(cmd).decode()
slurm_version = re.match(r"slurm\s([0-9]+)\.([0-9]+)\.([0-9]+)", slurm_version)
slurm_version = tuple(int(x) for x in slurm_version.groups())
return slurm_version
if __name__ == "__main__":
exit(main(sys.argv[1:]))
|
py | b412fc24ca322b3b4b3ef1d71c52ba492842e7b3 | #!/usr/bin/env python3
# Copyright 2020 Stanford University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import h5py
import pygion
from pygion import acquire, attach_hdf5, task, Fspace, Ispace, Region, R
import numpy
def generate_hdf5_file(filename, dims):
with h5py.File(filename, 'w') as f:
f.create_dataset('x', dims, dtype='i4')
f.create_dataset('uu', dims, dtype='i4')
f.create_dataset('z', dims, dtype='i4')
f.create_dataset('w', dims, dtype='i4')
@task(privileges=[R])
def print_region(R):
print(R.x)
@task
def main():
R = Region([4, 4], {'x': pygion.int32, 'uu': pygion.int32, 'z': pygion.int32, 'w': pygion.int32})
generate_hdf5_file('test.h5', [4, 4])
with attach_hdf5(R, 'test.h5', {'x': 'x', 'uu': 'uu', 'z': 'z', 'w': 'w'}, pygion.file_read_only):
with acquire(R, ['x', 'uu', 'z']):
print_region(R)
if __name__ == '__main__':
main()
|
py | b412fd2b2decc24c4bc05b92899f9897067c3655 | # AI for Business - Minimize cost with Deep Q-Learning
# Building the Brain without Dropout
# Importing the libraries
from keras.layers import Input, Dense
from keras.models import Model
from keras.optimizers import Adam
# BUILDING THE BRAIN
class Brain(object):
# BUILDING A FULLY CONNECTED NEURAL NETWORK DIRECTLY INSIDE THE INIT METHOD
def __init__(self, learning_rate = 0.001, number_actions = 5):
self.learning_rate = learning_rate
# BUILDIND THE INPUT LAYER COMPOSED OF THE INPUT STATE
states = Input(shape = (3,))
# BUILDING THE FULLY CONNECTED HIDDEN LAYERS
x = Dense(units = 64, activation = 'sigmoid')(states)
y = Dense(units = 32, activation = 'sigmoid')(x)
# BUILDING THE OUTPUT LAYER, FULLY CONNECTED TO THE LAST HIDDEN LAYER
q_values = Dense(units = number_actions, activation = 'softmax')(y)
# ASSEMBLING THE FULL ARCHITECTURE INSIDE A MODEL OBJECT
self.model = Model(inputs = states, outputs = q_values)
# COMPILING THE MODEL WITH A MEAN-SQUARED ERROR LOSS AND A CHOSEN OPTIMIZER
self.model.compile(loss = 'mse', optimizer = Adam(lr = learning_rate))
|
py | b412fdce5baec909784e38839857f5f2c6abada7 | # Copyright 2019 The IREE Authors
#
# Licensed under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
import json
import numpy as np
import unittest
from iree import runtime as rt
from iree.runtime.function import (
FunctionInvoker,
IMPLICIT_BUFFER_ARG_MEMORY_TYPE,
IMPLICIT_BUFFER_ARG_USAGE,
)
from iree.runtime._binding import VmVariantList
class MockVmContext:
def __init__(self, invoke_callback):
self._invoke_callback = invoke_callback
self.invocations = []
def invoke(self, vm_function, arg_list, ret_list):
self._invoke_callback(arg_list, ret_list)
self.invocations.append((vm_function, arg_list, ret_list))
print(f"INVOKE: {arg_list} -> {ret_list}")
@property
def mock_arg_reprs(self):
return repr([arg_list for _, arg_list, _ in self.invocations])
class MockVmFunction:
def __init__(self, reflection):
self.reflection = reflection
class FunctionTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Doesn't matter what device. We just need one.
config = rt.Config("vmvx")
cls.device = config.device
def testNoReflectionScalars(self):
def invoke(arg_list, ret_list):
ret_list.push_int(3)
ret_list.push_int(4)
vm_context = MockVmContext(invoke)
vm_function = MockVmFunction(reflection={})
invoker = FunctionInvoker(vm_context, self.device, vm_function, tracer=None)
result = invoker(1, 2)
self.assertEqual("[<VmVariantList(2): [1, 2]>]", vm_context.mock_arg_reprs)
self.assertEqual((3, 4), result)
def testKeywordArgs(self):
def invoke(arg_list, ret_list):
ret_list.push_int(3)
vm_context = MockVmContext(invoke)
vm_function = MockVmFunction(
reflection={
"iree.abi":
json.dumps({
"a": [
"i32",
["named", "a", "i32"],
["named", "b", "i32"],
],
"r": ["i32",],
})
})
invoker = FunctionInvoker(vm_context, self.device, vm_function, tracer=None)
result = invoker(-1, a=1, b=2)
self.assertEqual("[<VmVariantList(3): [-1, 1, 2]>]",
vm_context.mock_arg_reprs)
self.assertEqual(3, result)
def testListArg(self):
def invoke(arg_list, ret_list):
ret_list.push_int(3)
vm_context = MockVmContext(invoke)
vm_function = MockVmFunction(reflection={
"iree.abi":
json.dumps({
"a": [["slist", "i32", "i32"],],
"r": ["i32",],
})
})
invoker = FunctionInvoker(vm_context, self.device, vm_function, tracer=None)
_ = invoker([2, 3])
self.assertEqual("[<VmVariantList(1): [List[2, 3]]>]",
vm_context.mock_arg_reprs)
def testListArgNoReflection(self):
def invoke(arg_list, ret_list):
ret_list.push_int(3)
vm_context = MockVmContext(invoke)
vm_function = MockVmFunction(reflection={})
invoker = FunctionInvoker(vm_context, self.device, vm_function, tracer=None)
_ = invoker([2, 3])
self.assertEqual("[<VmVariantList(1): [List[2, 3]]>]",
vm_context.mock_arg_reprs)
def testListArgArityMismatch(self):
def invoke(arg_list, ret_list):
ret_list.push_int(3)
vm_context = MockVmContext(invoke)
vm_function = MockVmFunction(reflection={
"iree.abi":
json.dumps({
"a": [["slist", "i32", "i32"],],
"r": ["i32",],
})
})
invoker = FunctionInvoker(vm_context, self.device, vm_function, tracer=None)
with self.assertRaisesRegex(ValueError,
"expected a sequence with 2 values. got:"):
_ = invoker([2, 3, 4])
def testTupleArg(self):
def invoke(arg_list, ret_list):
ret_list.push_int(3)
vm_context = MockVmContext(invoke)
vm_function = MockVmFunction(reflection={
"iree.abi":
json.dumps({
"a": [["stuple", "i32", "i32"],],
"r": ["i32",],
})
})
invoker = FunctionInvoker(vm_context, self.device, vm_function, tracer=None)
_ = invoker((2, 3))
self.assertEqual("[<VmVariantList(1): [List[2, 3]]>]",
vm_context.mock_arg_reprs)
def testDictArg(self):
def invoke(arg_list, ret_list):
ret_list.push_int(3)
vm_context = MockVmContext(invoke)
vm_function = MockVmFunction(
reflection={
"iree.abi":
json.dumps({
"a": [["sdict", ["a", "i32"], ["b", "i32"]],],
"r": ["i32",],
})
})
invoker = FunctionInvoker(vm_context, self.device, vm_function, tracer=None)
_ = invoker({"b": 3, "a": 2})
self.assertEqual("[<VmVariantList(1): [List[2, 3]]>]",
vm_context.mock_arg_reprs)
def testDictArgArityMismatch(self):
def invoke(arg_list, ret_list):
ret_list.push_int(3)
vm_context = MockVmContext(invoke)
vm_function = MockVmFunction(
reflection={
"iree.abi":
json.dumps({
"a": [["sdict", ["a", "i32"], ["b", "i32"]],],
"r": ["i32",],
})
})
invoker = FunctionInvoker(vm_context, self.device, vm_function, tracer=None)
with self.assertRaisesRegex(ValueError,
"expected a dict with 2 values. got:"):
_ = invoker({"a": 2, "b": 3, "c": 4})
def testDictArgKeyError(self):
def invoke(arg_list, ret_list):
ret_list.push_int(3)
vm_context = MockVmContext(invoke)
vm_function = MockVmFunction(
reflection={
"iree.abi":
json.dumps({
"a": [["sdict", ["a", "i32"], ["b", "i32"]],],
"r": ["i32",],
})
})
invoker = FunctionInvoker(vm_context, self.device, vm_function, tracer=None)
with self.assertRaisesRegex(ValueError, "could not get item 'b' from: "):
_ = invoker({"a": 2, "c": 3})
def testDictArgNoReflection(self):
def invoke(arg_list, ret_list):
ret_list.push_int(3)
vm_context = MockVmContext(invoke)
vm_function = MockVmFunction(reflection={})
invoker = FunctionInvoker(vm_context, self.device, vm_function, tracer=None)
_ = invoker({"b": 3, "a": 2})
self.assertEqual("[<VmVariantList(1): [List[2, 3]]>]",
vm_context.mock_arg_reprs)
def testInlinedResults(self):
def invoke(arg_list, ret_list):
ret_list.push_int(3)
ret_list.push_int(4)
vm_context = MockVmContext(invoke)
vm_function = MockVmFunction(reflection={
"iree.abi": json.dumps({
"a": [],
"r": [["slist", "i32", "i32"]],
})
})
invoker = FunctionInvoker(vm_context, self.device, vm_function, tracer=None)
result = invoker()
self.assertEqual([3, 4], result)
def testNestedResults(self):
def invoke(arg_list, ret_list):
ret_list.push_int(3)
sub_list = VmVariantList(2)
sub_dict = VmVariantList(2)
sub_dict.push_int(100)
sub_dict.push_int(200)
sub_list.push_list(sub_dict)
sub_list.push_int(6)
ret_list.push_list(sub_list)
vm_context = MockVmContext(invoke)
vm_function = MockVmFunction(
reflection={
"iree.abi":
json.dumps({
"a": [],
"r": [
"i32",
[
"slist",
["sdict", ["bar", "i32"], ["foo", "i32"]],
"i64",
]
],
})
})
invoker = FunctionInvoker(vm_context, self.device, vm_function, tracer=None)
result = invoker()
self.assertEqual((3, [{"bar": 100, "foo": 200}, 6]), result)
def testMissingPositional(self):
def invoke(arg_list, ret_list):
ret_list.push_int(3)
vm_context = MockVmContext(invoke)
vm_function = MockVmFunction(
reflection={
"iree.abi":
json.dumps({
"a": [
"i32",
["named", "a", "i32"],
["named", "b", "i32"],
],
"r": ["i32",],
})
})
invoker = FunctionInvoker(vm_context, self.device, vm_function, tracer=None)
with self.assertRaisesRegex(ValueError, "mismatched call arity:"):
result = invoker(a=1, b=1)
def testMissingPositionalNdarray(self):
def invoke(arg_list, ret_list):
ret_list.push_int(3)
vm_context = MockVmContext(invoke)
vm_function = MockVmFunction(
reflection={
"iree.abi":
json.dumps({
"a": [
["ndarray", "i32", 1, 1],
["named", "a", ["ndarray", "i32", 1, 1]],
["named", "b", ["ndarray", "i32", 1, 1]],
],
"r": ["i32",],
})
})
invoker = FunctionInvoker(vm_context, self.device, vm_function, tracer=None)
with self.assertRaisesRegex(ValueError, "mismatched call arity:"):
result = invoker(a=1, b=1)
def testMissingKeyword(self):
def invoke(arg_list, ret_list):
ret_list.push_int(3)
vm_context = MockVmContext(invoke)
vm_function = MockVmFunction(
reflection={
"iree.abi":
json.dumps({
"a": [
"i32",
["named", "a", "i32"],
["named", "b", "i32"],
],
"r": ["i32",],
})
})
invoker = FunctionInvoker(vm_context, self.device, vm_function, tracer=None)
with self.assertRaisesRegex(ValueError, "mismatched call arity:"):
result = invoker(-1, a=1)
def testMissingKeywordNdArray(self):
def invoke(arg_list, ret_list):
ret_list.push_int(3)
vm_context = MockVmContext(invoke)
vm_function = MockVmFunction(
reflection={
"iree.abi":
json.dumps({
"a": [
["ndarray", "i32", 1, 1],
["named", "a", ["ndarray", "i32", 1, 1]],
["named", "b", ["ndarray", "i32", 1, 1]],
],
"r": ["i32",],
})
})
invoker = FunctionInvoker(vm_context, self.device, vm_function, tracer=None)
with self.assertRaisesRegex(ValueError, "mismatched call arity:"):
result = invoker(-1, a=1)
def testExtraKeyword(self):
def invoke(arg_list, ret_list):
ret_list.push_int(3)
vm_context = MockVmContext(invoke)
vm_function = MockVmFunction(
reflection={
"iree.abi":
json.dumps({
"a": [
"i32",
["named", "a", "i32"],
["named", "b", "i32"],
],
"r": ["i32",],
})
})
invoker = FunctionInvoker(vm_context, self.device, vm_function, tracer=None)
with self.assertRaisesRegex(ValueError, "specified kwarg 'c' is unknown"):
result = invoker(-1, a=1, b=2, c=3)
def testNdarrayArg(self):
arg_array = np.asarray([1, 0], dtype=np.int32)
invoked_arg_list = None
def invoke(arg_list, ret_list):
nonlocal invoked_arg_list
invoked_arg_list = arg_list
vm_context = MockVmContext(invoke)
vm_function = MockVmFunction(reflection={
"iree.abi": json.dumps({
"a": [["ndarray", "i32", 1, 2]],
"r": [],
})
})
invoker = FunctionInvoker(vm_context, self.device, vm_function, tracer=None)
result = invoker(arg_array)
self.assertEqual("<VmVariantList(1): [HalBufferView(2:0x20000011)]>",
repr(invoked_arg_list))
def testDeviceArrayArg(self):
# Note that since the device array is set up to disallow implicit host
# transfers, this also verifies that no accidental/automatic transfers
# are done as part of marshalling the array to the function.
arg_array = rt.asdevicearray(self.device,
np.asarray([1, 0], dtype=np.int32),
implicit_host_transfer=False)
invoked_arg_list = None
def invoke(arg_list, ret_list):
nonlocal invoked_arg_list
invoked_arg_list = arg_list
vm_context = MockVmContext(invoke)
vm_function = MockVmFunction(reflection={
"iree.abi": json.dumps({
"a": [["ndarray", "i32", 1, 2]],
"r": [],
})
})
invoker = FunctionInvoker(vm_context, self.device, vm_function, tracer=None)
result = invoker(arg_array)
self.assertEqual("<VmVariantList(1): [HalBufferView(2:0x20000011)]>",
repr(invoked_arg_list))
def testBufferViewArg(self):
arg_buffer_view = self.device.allocator.allocate_buffer_copy(
memory_type=IMPLICIT_BUFFER_ARG_MEMORY_TYPE,
allowed_usage=IMPLICIT_BUFFER_ARG_USAGE,
buffer=np.asarray([1, 0], dtype=np.int32),
element_type=rt.HalElementType.SINT_32)
invoked_arg_list = None
def invoke(arg_list, ret_list):
nonlocal invoked_arg_list
invoked_arg_list = arg_list
vm_context = MockVmContext(invoke)
vm_function = MockVmFunction(reflection={
"iree.abi": json.dumps({
"a": [["ndarray", "i32", 1, 2]],
"r": [],
})
})
invoker = FunctionInvoker(vm_context, self.device, vm_function, tracer=None)
_ = invoker(arg_buffer_view)
self.assertEqual("<VmVariantList(1): [HalBufferView(2:0x20000011)]>",
repr(invoked_arg_list))
def testNdarrayArgNoReflection(self):
arg_array = np.asarray([1, 0], dtype=np.int32)
invoked_arg_list = None
def invoke(arg_list, ret_list):
nonlocal invoked_arg_list
invoked_arg_list = arg_list
vm_context = MockVmContext(invoke)
vm_function = MockVmFunction(reflection={})
invoker = FunctionInvoker(vm_context, self.device, vm_function, tracer=None)
result = invoker(arg_array)
self.assertEqual("<VmVariantList(1): [HalBufferView(2:0x20000011)]>",
repr(invoked_arg_list))
def testDeviceArrayArgNoReflection(self):
# Note that since the device array is set up to disallow implicit host
# transfers, this also verifies that no accidental/automatic transfers
# are done as part of marshalling the array to the function.
arg_array = rt.asdevicearray(self.device,
np.asarray([1, 0], dtype=np.int32),
implicit_host_transfer=False)
invoked_arg_list = None
def invoke(arg_list, ret_list):
nonlocal invoked_arg_list
invoked_arg_list = arg_list
vm_context = MockVmContext(invoke)
vm_function = MockVmFunction(reflection={})
invoker = FunctionInvoker(vm_context, self.device, vm_function, tracer=None)
result = invoker(arg_array)
self.assertEqual("<VmVariantList(1): [HalBufferView(2:0x20000011)]>",
repr(invoked_arg_list))
def testBufferViewArgNoReflection(self):
arg_buffer_view = self.device.allocator.allocate_buffer_copy(
memory_type=IMPLICIT_BUFFER_ARG_MEMORY_TYPE,
allowed_usage=IMPLICIT_BUFFER_ARG_USAGE,
buffer=np.asarray([1, 0], dtype=np.int32),
element_type=rt.HalElementType.SINT_32)
invoked_arg_list = None
def invoke(arg_list, ret_list):
nonlocal invoked_arg_list
invoked_arg_list = arg_list
vm_context = MockVmContext(invoke)
vm_function = MockVmFunction(reflection={})
invoker = FunctionInvoker(vm_context, self.device, vm_function, tracer=None)
_ = invoker(arg_buffer_view)
self.assertEqual("<VmVariantList(1): [HalBufferView(2:0x20000011)]>",
repr(invoked_arg_list))
def testReturnBufferView(self):
result_array = np.asarray([1, 0], dtype=np.int32)
def invoke(arg_list, ret_list):
buffer_view = self.device.allocator.allocate_buffer_copy(
memory_type=IMPLICIT_BUFFER_ARG_MEMORY_TYPE,
allowed_usage=IMPLICIT_BUFFER_ARG_USAGE,
buffer=result_array,
element_type=rt.HalElementType.SINT_32)
ret_list.push_buffer_view(buffer_view)
vm_context = MockVmContext(invoke)
vm_function = MockVmFunction(reflection={
"iree.abi": json.dumps({
"a": [],
"r": [["ndarray", "i32", 1, 2]],
})
})
invoker = FunctionInvoker(vm_context, self.device, vm_function, tracer=None)
result = invoker()
np.testing.assert_array_equal([1, 0], result)
def testReturnBufferViewNoReflection(self):
result_array = np.asarray([1, 0], dtype=np.int32)
def invoke(arg_list, ret_list):
buffer_view = self.device.allocator.allocate_buffer_copy(
memory_type=IMPLICIT_BUFFER_ARG_MEMORY_TYPE,
allowed_usage=IMPLICIT_BUFFER_ARG_USAGE,
buffer=result_array,
element_type=rt.HalElementType.SINT_32)
ret_list.push_buffer_view(buffer_view)
vm_context = MockVmContext(invoke)
vm_function = MockVmFunction(reflection={})
invoker = FunctionInvoker(vm_context, self.device, vm_function, tracer=None)
result = invoker()
np.testing.assert_array_equal([1, 0], result)
# TODO: Fill out all return types.
def testReturnTypeNdArrayBool(self):
result_array = np.asarray([1, 0], dtype=np.int8)
def invoke(arg_list, ret_list):
buffer_view = self.device.allocator.allocate_buffer_copy(
memory_type=IMPLICIT_BUFFER_ARG_MEMORY_TYPE,
allowed_usage=IMPLICIT_BUFFER_ARG_USAGE,
buffer=result_array,
element_type=rt.HalElementType.UINT_8)
ret_list.push_buffer_view(buffer_view)
vm_context = MockVmContext(invoke)
vm_function = MockVmFunction(reflection={
"iree.abi": json.dumps({
"a": [],
"r": [["ndarray", "i1", 1, 2]],
})
})
invoker = FunctionInvoker(vm_context, self.device, vm_function, tracer=None)
result = invoker()
# assertEqual on bool arrays is fraught for... reasons.
np.testing.assert_array_equal([True, False], result)
def testReturnTypeList(self):
vm_list = VmVariantList(2)
vm_list.push_int(1)
vm_list.push_int(2)
def invoke(arg_list, ret_list):
ret_list.push_list(vm_list)
vm_context = MockVmContext(invoke)
vm_function = MockVmFunction(reflection={
"iree.abi":
json.dumps({
"a": [],
"r": [["py_homogeneous_list", "i64"]],
})
})
invoker = FunctionInvoker(vm_context, self.device, vm_function, tracer=None)
result = invoker()
self.assertEqual("[1, 2]", repr(result))
if __name__ == "__main__":
unittest.main()
|
py | b412fea5ffa5a86f53deff60c860f038f7d62527 | import json
from flask import url_for
from app.models import ApiKey, KEY_TYPE_NORMAL
from app.dao.api_key_dao import expire_api_key
from tests import create_authorization_header
from tests.app.db import create_api_key, create_service, create_user
def test_api_key_should_create_new_api_key_for_service(notify_api, sample_service):
with notify_api.test_request_context():
with notify_api.test_client() as client:
data = {
'name': 'some secret name',
'created_by': str(sample_service.created_by.id),
'key_type': KEY_TYPE_NORMAL
}
auth_header = create_authorization_header()
response = client.post(url_for('service.create_api_key', service_id=sample_service.id),
data=json.dumps(data),
headers=[('Content-Type', 'application/json'), auth_header])
assert response.status_code == 201
assert 'data' in json.loads(response.get_data(as_text=True))
saved_api_key = ApiKey.query.filter_by(service_id=sample_service.id).first()
assert saved_api_key.service_id == sample_service.id
assert saved_api_key.name == 'some secret name'
def test_api_key_should_return_error_when_service_does_not_exist(notify_api, sample_service):
with notify_api.test_request_context():
with notify_api.test_client() as client:
import uuid
missing_service_id = uuid.uuid4()
auth_header = create_authorization_header()
response = client.post(url_for('service.create_api_key', service_id=missing_service_id),
headers=[('Content-Type', 'application/json'), auth_header])
assert response.status_code == 404
def test_create_api_key_without_key_type_rejects(notify_api, sample_service):
with notify_api.test_request_context(), notify_api.test_client() as client:
data = {
'name': 'some secret name',
'created_by': str(sample_service.created_by.id)
}
auth_header = create_authorization_header()
response = client.post(url_for('service.create_api_key', service_id=sample_service.id),
data=json.dumps(data),
headers=[('Content-Type', 'application/json'), auth_header])
assert response.status_code == 400
json_resp = json.loads(response.get_data(as_text=True))
assert json_resp['result'] == 'error'
assert json_resp['message'] == {'key_type': ['Missing data for required field.']}
def test_revoke_should_expire_api_key_for_service(notify_api, sample_api_key):
with notify_api.test_request_context():
with notify_api.test_client() as client:
assert ApiKey.query.count() == 1
auth_header = create_authorization_header()
response = client.post(url_for('service.revoke_api_key',
service_id=sample_api_key.service_id,
api_key_id=sample_api_key.id),
headers=[auth_header])
assert response.status_code == 202
api_keys_for_service = ApiKey.query.get(sample_api_key.id)
assert api_keys_for_service.expiry_date is not None
def test_api_key_should_create_multiple_new_api_key_for_service(notify_api, sample_service):
with notify_api.test_request_context():
with notify_api.test_client() as client:
assert ApiKey.query.count() == 0
data = {
'name': 'some secret name',
'created_by': str(sample_service.created_by.id),
'key_type': KEY_TYPE_NORMAL
}
auth_header = create_authorization_header()
response = client.post(url_for('service.create_api_key', service_id=sample_service.id),
data=json.dumps(data),
headers=[('Content-Type', 'application/json'), auth_header])
assert response.status_code == 201
assert ApiKey.query.count() == 1
data['name'] = 'another secret name'
auth_header = create_authorization_header()
response2 = client.post(url_for('service.create_api_key', service_id=sample_service.id),
data=json.dumps(data),
headers=[('Content-Type', 'application/json'), auth_header])
assert response2.status_code == 201
assert json.loads(response.get_data(as_text=True)) != json.loads(response2.get_data(as_text=True))
assert ApiKey.query.count() == 2
def test_get_api_keys_should_return_all_keys_for_service(notify_api, sample_api_key):
with notify_api.test_request_context():
with notify_api.test_client() as client:
another_user = create_user(email='[email protected]')
another_service = create_service(user=another_user, service_name='Another service')
# key for another service
create_api_key(another_service)
# this service already has one key, add two more, one expired
create_api_key(sample_api_key.service)
one_to_expire = create_api_key(sample_api_key.service)
expire_api_key(service_id=one_to_expire.service_id, api_key_id=one_to_expire.id)
assert ApiKey.query.count() == 4
auth_header = create_authorization_header()
response = client.get(url_for('service.get_api_keys',
service_id=sample_api_key.service_id),
headers=[('Content-Type', 'application/json'), auth_header])
assert response.status_code == 200
json_resp = json.loads(response.get_data(as_text=True))
assert len(json_resp['apiKeys']) == 3
def test_get_api_keys_should_return_one_key_for_service(notify_api, sample_api_key):
with notify_api.test_request_context():
with notify_api.test_client() as client:
auth_header = create_authorization_header()
response = client.get(url_for('service.get_api_keys',
service_id=sample_api_key.service_id,
key_id=sample_api_key.id),
headers=[('Content-Type', 'application/json'), auth_header])
assert response.status_code == 200
json_resp = json.loads(response.get_data(as_text=True))
assert len(json_resp['apiKeys']) == 1
|
py | b412feab92dd129969896bc20c71f37b51fdd50f | from django.db import models
from django.shortcuts import resolve_url as r
from djexperience.core.models import TimeStampedModel
from djexperience.utils.lists import STATUS_LIST, METHOD_PAID
from .managers import PublishedManager
class Author(models.Model):
name = models.CharField('nome', max_length=50)
class Meta:
ordering = ['name']
verbose_name = 'autor'
verbose_name_plural = 'autores'
def __str__(self):
return self.name
class Book(models.Model):
name = models.CharField('nome', max_length=50)
authors = models.ManyToManyField('Author', verbose_name='autores')
published = models.BooleanField('publicado', default=True)
# add our custom model manager
objects = PublishedManager()
class Meta:
ordering = ['name']
verbose_name = 'livro'
verbose_name_plural = 'livros'
def __str__(self):
return self.name
def get_absolute_url(self):
return r('bookstore:book_detail', pk=self.pk)
class People(models.Model):
first_name = models.CharField('nome', max_length=50)
last_name = models.CharField('sobrenome', max_length=50)
email = models.EmailField()
birthday = models.DateTimeField('nascimento', null=True, blank=True)
active = models.BooleanField('ativo', default=True)
class Meta:
ordering = ['first_name']
verbose_name = 'pessoa'
verbose_name_plural = 'pessoas'
def __str__(self):
return ' '.join(filter(None, [self.first_name, self.last_name]))
full_name = property(__str__)
class PF(People):
rg = models.CharField('RG', max_length=50, blank=True)
cpf = models.CharField('CPF', max_length=50, blank=True)
class Meta:
ordering = ['first_name']
verbose_name = 'pessoa física'
verbose_name_plural = 'pessoas físicas'
def __str__(self):
return self.first_name
class PJ(People):
cnpj = models.CharField('CNPJ', max_length=50, blank=True)
ie = models.CharField('IE', max_length=50, blank=True)
class Meta:
ordering = ['first_name']
verbose_name = 'pessoa jurídica'
verbose_name_plural = 'pessoas jurídicas'
def __str__(self):
return self.first_name
class Customer(People):
pass
class Meta:
ordering = ['first_name']
verbose_name = 'cliente'
verbose_name_plural = 'clientes'
def get_absolute_url(self):
return r('bookstore:customer_detail', pk=self.pk)
class Provider(People):
books = models.ManyToManyField('Book', verbose_name='livros')
price = models.DecimalField('preço', max_digits=5, decimal_places=2)
class Meta:
ordering = ['first_name']
verbose_name = 'fornecedor'
verbose_name_plural = 'fornecedores'
class Ordered(TimeStampedModel):
customer = models.ForeignKey('Customer', verbose_name='cliente')
status_ordered = models.CharField(
'status', max_length=2, choices=STATUS_LIST)
class Meta:
ordering = ['-created']
verbose_name = 'pedido'
verbose_name_plural = 'pedidos'
class Sale(TimeStampedModel):
ordered = models.OneToOneField('Ordered', verbose_name='pedido')
paid = models.BooleanField('pago')
date_paid = models.DateField('data da pagamento')
method_paid = models.CharField(
'forma da pagamento', max_length=2, choices=METHOD_PAID)
deadline = models.CharField('prazo de entrega', max_length=50)
class Meta:
ordering = ['-created']
verbose_name = 'venda'
verbose_name_plural = 'vendas'
|
py | b412feef3b63a160fbf0a01cddb33df1ee39c34f | from rlkit.torch import pytorch_util as ptu
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions.normal import Normal
from torch.distributions import kl_divergence
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
try:
nn.init.xavier_uniform_(m.weight.data)
m.bias.data.fill_(0)
except AttributeError:
print("Skipping initialization of ", classname)
class GatedActivation(nn.Module):
def forward(self, x):
x, y = x.chunk(2, dim=1)
return torch.tanh(x) * torch.sigmoid(y)
class GatedMaskedConv2d(nn.Module):
def __init__(self, mask_type, dim, kernel, residual=True, n_classes=1):
super().__init__()
assert kernel % 2 == 1, print("Kernel size must be odd")
self.mask_type = mask_type
self.residual = residual
self.class_cond_embedding = nn.Linear(
1, 2 * dim
)
kernel_shp = (kernel // 2 + 1, kernel) # (ceil(n/2), n)
padding_shp = (kernel // 2, kernel // 2)
self.vert_stack = nn.Conv2d(
dim, dim * 2,
kernel_shp, 1, padding_shp
)
self.vert_to_horiz = nn.Conv2d(2 * dim, 2 * dim, 1)
kernel_shp = (1, kernel // 2 + 1)
padding_shp = (0, kernel // 2)
self.horiz_stack = nn.Conv2d(
dim, dim * 2,
kernel_shp, 1, padding_shp
)
self.horiz_resid = nn.Conv2d(dim, dim, 1)
self.gate = GatedActivation()
def make_causal(self):
self.vert_stack.weight.data[:, :, -1].zero_() # Mask final row
self.horiz_stack.weight.data[:, :, :, -1].zero_() # Mask final column
def forward(self, x_v, x_h, h):
if self.mask_type == 'A':
self.make_causal()
h = self.class_cond_embedding(h)
h_vert = self.vert_stack(x_v)
h_vert = h_vert[:, :, :x_v.size(-1), :]
out_v = self.gate(h_vert + h[:, :, None, None])
h_horiz = self.horiz_stack(x_h)
h_horiz = h_horiz[:, :, :, :x_h.size(-2)]
v2h = self.vert_to_horiz(h_vert)
out = self.gate(v2h + h_horiz + h[:, :, None, None])
if self.residual:
out_h = self.horiz_resid(out) + x_h
else:
out_h = self.horiz_resid(out)
return out_v, out_h
class GatedPixelCNN(nn.Module):
def __init__(self, input_dim=256, dim=64, n_layers=15, n_classes=1):
super().__init__()
self.dim = dim
# Create embedding layer to embed input
self.embedding = nn.Embedding(input_dim, dim)
# Building the PixelCNN layer by layer
self.layers = nn.ModuleList()
# Initial block with Mask-A convolution
# Rest with Mask-B convolutions
for i in range(n_layers):
mask_type = 'A' if i == 0 else 'B'
kernel = 7 if i == 0 else 3
residual = False if i == 0 else True
self.layers.append(
GatedMaskedConv2d(mask_type, dim, kernel, residual, n_classes)
)
# Add the output layer
self.output_conv = nn.Sequential(
nn.Conv2d(dim, 512, 1),
nn.ReLU(True),
nn.Conv2d(512, input_dim, 1)
)
self.apply(weights_init)
def forward(self, x, label):
param = next(self.parameters())
shp = x.size() + (-1, )
x = self.embedding(x.contiguous().view(-1)).view(shp) # (B, H, W, C)
x = x.permute(0, 3, 1, 2) # (B, C, W, H)
#label = torch.zeros(1, dtype=torch.long, device=param.device)
x_v, x_h = (x, x)
for i, layer in enumerate(self.layers):
x_v, x_h = layer(x_v, x_h, label)
return self.output_conv(x_h)
def generate(self, label, shape=(12, 12), batch_size=64):
param = next(self.parameters())
x = torch.zeros(
(batch_size, *shape),
dtype=torch.int64, device=param.device
)
for i in range(shape[0]):
for j in range(shape[1]):
logits = self.forward(x, label) #might need to convert 0 to long
probs = F.softmax(logits[:, :, i, j], -1)
x.data[:, i, j].copy_(
probs.multinomial(1).squeeze().data
)
return x |
py | b412ff3ce6b212efac5600702a8e27371716f274 | from os import path
from cryptography.fernet import Fernet
def get_decrypted_file(settings_key, settings_file, encrypted_extension='enc', initial_extension='json'):
encryption = Fernet(settings_key)
encrypted_file_name = f'{settings_file}.{encrypted_extension}'
encrypted_data = None
if path.isfile(encrypted_file_name):
with open(encrypted_file_name, 'rb') as f:
encrypted_data = encryption.decrypt(f.read())
raw_file_name = f'{settings_file}.{initial_extension}'
initial_data = None
if path.isfile(raw_file_name):
with open(raw_file_name, 'rb') as f:
initial_data = f.read()
if not initial_data:
return encrypted_data
elif initial_data != encrypted_data:
encrypted_data = encryption.encrypt(initial_data)
with open(encrypted_file_name, 'wb') as f:
f.write(encrypted_data)
return initial_data
def random_key():
return Fernet.generate_key().decode("utf-8")
|
py | b412fff1dd782b40f0c7818bd9ca0de1fdc907b5 | #-------------------------------------------------------------------------------
# NodeCoupling
#-------------------------------------------------------------------------------
from PYB11Generator import *
@PYB11module("SpheralSPH")
class NodeCoupling:
"A functor base class encapsulating how we couple pairs of nodes."
def pyinit(self):
"Default constructor"
@PYB11virtual
@PYB11const
@PYB11cppname("operator()")
def __call__(self,
nodeListi = "const unsigned",
i = "const unsigned",
nodeListj = "const unsigned",
j = "const unsigned"):
"Functional method to override for coupling (nodeListi, i) <-> (nodeListj, j)"
return "double"
#-------------------------------------------------------------------------------
# DamagedNodeCoupling
#-------------------------------------------------------------------------------
@PYB11template("Dimension")
class DamagedNodeCoupling(NodeCoupling):
"A functor class encapsulating how we couple solid nodes in the presence of multiple materials and damage."
PYB11typedefs = """
typedef typename %(Dimension)s::Vector Vector;
typedef typename %(Dimension)s::SymTensor SymTensor;
"""
def pyinit(self,
damage = "const FieldList<%(Dimension)s, SymTensor>&",
damageGradient = "const FieldList<%(Dimension)s, Vector>&",
H = "const FieldList<%(Dimension)s, SymTensor>&"):
"Constructor"
@PYB11virtual
@PYB11const
@PYB11cppname("operator()")
def __call__(self,
nodeListi = "const unsigned",
i = "const unsigned",
nodeListj = "const unsigned",
j = "const unsigned"):
"Provides a damaged coupling between nodes (nodeListi, i) <-> (nodeListj, j)"
return "double"
#-------------------------------------------------------------------------------
# DamagedNodeCouplingWithFrags
#-------------------------------------------------------------------------------
@PYB11template("Dimension")
class DamagedNodeCouplingWithFrags(DamagedNodeCoupling):
"""A functor class encapsulating how we couple solid nodes in the presence of
multiple materials and damage. This version adds logic to decouple based
on fragment ID as well."""
PYB11typedefs = """
typedef typename %(Dimension)s::Scalar Scalar;
typedef typename %(Dimension)s::Vector Vector;
typedef typename %(Dimension)s::Tensor Tensor;
typedef typename %(Dimension)s::SymTensor SymTensor;
"""
def pyinit(self,
damage = "const FieldList<%(Dimension)s, SymTensor>&",
damageGradient = "const FieldList<%(Dimension)s, Vector>&",
H = "const FieldList<%(Dimension)s, SymTensor>&",
fragIDs = "const FieldList<%(Dimension)s, int>&"):
"Constructor"
@PYB11virtual
@PYB11const
@PYB11cppname("operator()")
def __call__(self,
nodeListi = "const unsigned",
i = "const unsigned",
nodeListj = "const unsigned",
j = "const unsigned"):
"Provides a damaged coupling between nodes (nodeListi, i) <-> (nodeListj, j)"
return "double"
|
py | b413006fc8ed3590e233e900bfab114a6b7dd2aa | import os
gettext = lambda s: s
DATA_DIR = os.path.dirname(os.path.dirname(__file__))
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.8.16.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'mbogc22qi7&dlb7%1c0qbfu2y=my6#r-@^2kiym(yn(oy3)rof'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
ROOT_URLCONF = 'mysite.urls'
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'zh'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(DATA_DIR, 'media')
STATIC_ROOT = os.path.join(DATA_DIR, 'static')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'mysite', 'static'),
)
SITE_ID = 1
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'mysite', 'templates'),],
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.i18n',
'django.core.context_processors.debug',
'django.core.context_processors.request',
'django.core.context_processors.media',
'django.core.context_processors.csrf',
'django.core.context_processors.tz',
'sekizai.context_processors.sekizai',
'django.core.context_processors.static',
'cms.context_processors.cms_settings'
],
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'django.template.loaders.eggs.Loader'
],
},
},
]
MIDDLEWARE_CLASSES = (
'cms.middleware.utils.ApphookReloadMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'cms.middleware.user.CurrentUserMiddleware',
'cms.middleware.page.CurrentPageMiddleware',
'cms.middleware.toolbar.ToolbarMiddleware',
'cms.middleware.language.LanguageCookieMiddleware'
)
INSTALLED_APPS = (
'djangocms_admin_style',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.admin',
'django.contrib.sites',
'django.contrib.sitemaps',
'django.contrib.staticfiles',
'django.contrib.messages',
'cms',
'menus',
'sekizai',
'treebeard',
'djangocms_text_ckeditor',
'filer',
'easy_thumbnails',
'djangocms_column',
'djangocms_link',
'cmsplugin_filer_file',
'cmsplugin_filer_folder',
'cmsplugin_filer_image',
'cmsplugin_filer_utils',
'djangocms_style',
'djangocms_snippet',
'djangocms_googlemap',
'djangocms_video',
'mysite',
#REST
'rest_framework',
'rest_framework.authtoken',
'djoser',
#APP
'aldryn_background_image',
'aldryn_bootstrap3',
'parler',
'aldryn_search',
'aldryn_categories',
)
HAYSTACK_ROUTERS = ['aldryn_search.router.LanguageRouter',]
ALDRYN_SEARCH_REGISTER_APPHOOK = True
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
),
}
DJOSER = {
'PASSWORD_RESET_CONFIRM_URL': '#/password/reset/confirm/{uid}/{token}',
}
LANGUAGES = (
## Customize this
('zh', gettext('zh')),
)
CMS_LANGUAGES = {
## Customize this
'default': {
'public': True,
'hide_untranslated': False,
'redirect_on_fallback': True,
},
1: [
{
'public': True,
'code': 'zh',
'hide_untranslated': False,
'name': gettext('zh'),
'redirect_on_fallback': True,
},
],
}
CMS_TEMPLATES = (
## Customize this
('fullwidth.html', 'Fullwidth'),
('sidebar_left.html', 'Sidebar Left'),
('sidebar_right.html', 'Sidebar Right')
)
CMS_PERMISSION = True
CMS_PLACEHOLDER_CONF = {}
DATABASES = {
'default': {
'CONN_MAX_AGE': 0,
'ENGINE': 'django.db.backends.sqlite3',
'HOST': 'localhost',
'NAME': 'project.db',
'PASSWORD': '',
'PORT': '',
'USER': ''
}
}
MIGRATION_MODULES = {
}
THUMBNAIL_PROCESSORS = (
'easy_thumbnails.processors.colorspace',
'easy_thumbnails.processors.autocrop',
'filer.thumbnail_processors.scale_and_crop_with_subject_location',
'easy_thumbnails.processors.filters'
)
|
py | b413008e45832ed8e62c461d642fe5081157089b | """
Tests for the blaze interface to the pipeline api.
"""
from __future__ import division
from collections import OrderedDict
from datetime import timedelta, time
from itertools import product, chain
import warnings
import blaze as bz
from datashape import dshape, var, Record
from nose_parameterized import parameterized
import numpy as np
from numpy.testing.utils import assert_array_almost_equal
from odo import odo
import pandas as pd
from pandas.util.testing import assert_frame_equal
from toolz import keymap, valmap, concatv
from toolz.curried import operator as op
from catalyst.assets.synthetic import make_simple_equity_info
from catalyst.errors import UnsupportedPipelineOutput
from catalyst.pipeline import Pipeline, CustomFactor
from catalyst.pipeline.data import DataSet, BoundColumn, Column
from catalyst.pipeline.engine import SimplePipelineEngine
from catalyst.pipeline.loaders.blaze import (
from_blaze,
BlazeLoader,
NoMetaDataWarning,
)
from catalyst.pipeline.loaders.blaze.core import (
ExprData,
NonPipelineField,
)
from catalyst.testing import (
CatalystTestCase,
parameter_space,
tmp_asset_finder,
)
from catalyst.testing.fixtures import WithAssetFinder
from catalyst.testing.predicates import assert_equal, assert_isidentical
from catalyst.utils.numpy_utils import float64_dtype, int64_dtype
nameof = op.attrgetter('name')
dtypeof = op.attrgetter('dtype')
asset_infos = (
(make_simple_equity_info(
tuple(map(ord, 'ABC')),
pd.Timestamp(0),
pd.Timestamp('2015'),
),),
(make_simple_equity_info(
tuple(map(ord, 'ABCD')),
pd.Timestamp(0),
pd.Timestamp('2015'),
),),
)
simple_asset_info = asset_infos[0][0]
with_extra_sid = parameterized.expand(asset_infos)
with_ignore_sid = parameterized.expand(
product(chain.from_iterable(asset_infos), [True, False])
)
def _utc_localize_index_level_0(df):
"""``tz_localize`` the first level of a multiindexed dataframe to utc.
Mutates df in place.
"""
idx = df.index
df.index = pd.MultiIndex.from_product(
(idx.levels[0].tz_localize('utc'), idx.levels[1]),
names=idx.names,
)
return df
class BlazeToPipelineTestCase(WithAssetFinder, CatalystTestCase):
START_DATE = pd.Timestamp(0)
END_DATE = pd.Timestamp('2015')
@classmethod
def init_class_fixtures(cls):
super(BlazeToPipelineTestCase, cls).init_class_fixtures()
cls.dates = dates = pd.date_range('2014-01-01', '2014-01-03')
dates = cls.dates.repeat(3)
cls.df = df = pd.DataFrame({
'sid': cls.ASSET_FINDER_EQUITY_SIDS * 3,
'value': (0., 1., 2., 1., 2., 3., 2., 3., 4.),
'int_value': (0, 1, 2, 1, 2, 3, 2, 3, 4),
'asof_date': dates,
'timestamp': dates,
})
cls.dshape = dshape("""
var * {
sid: ?int64,
value: ?float64,
int_value: ?int64,
asof_date: datetime,
timestamp: datetime
}
""")
cls.macro_df = df[df.sid == 65].drop('sid', axis=1)
dshape_ = OrderedDict(cls.dshape.measure.fields)
del dshape_['sid']
cls.macro_dshape = var * Record(dshape_)
cls.garbage_loader = BlazeLoader()
cls.missing_values = {'int_value': 0}
cls.value_dshape = dshape("""var * {
sid: ?int64,
value: float64,
asof_date: datetime,
timestamp: datetime,
}""")
def test_tabular(self):
name = 'expr'
expr = bz.data(self.df, name=name, dshape=self.dshape)
ds = from_blaze(
expr,
loader=self.garbage_loader,
no_deltas_rule='ignore',
no_checkpoints_rule='ignore',
missing_values=self.missing_values,
)
self.assertEqual(ds.__name__, name)
self.assertTrue(issubclass(ds, DataSet))
self.assertIs(ds.value.dtype, float64_dtype)
self.assertIs(ds.int_value.dtype, int64_dtype)
self.assertTrue(np.isnan(ds.value.missing_value))
self.assertEqual(ds.int_value.missing_value, 0)
# test memoization
self.assertIs(
from_blaze(
expr,
loader=self.garbage_loader,
no_deltas_rule='ignore',
no_checkpoints_rule='ignore',
missing_values=self.missing_values,
),
ds,
)
def test_column(self):
exprname = 'expr'
expr = bz.data(self.df, name=exprname, dshape=self.dshape)
value = from_blaze(
expr.value,
loader=self.garbage_loader,
no_deltas_rule='ignore',
no_checkpoints_rule='ignore',
missing_values=self.missing_values,
)
self.assertEqual(value.name, 'value')
self.assertIsInstance(value, BoundColumn)
self.assertIs(value.dtype, float64_dtype)
# test memoization
self.assertIs(
from_blaze(
expr.value,
loader=self.garbage_loader,
no_deltas_rule='ignore',
no_checkpoints_rule='ignore',
missing_values=self.missing_values,
),
value,
)
self.assertIs(
from_blaze(
expr,
loader=self.garbage_loader,
no_deltas_rule='ignore',
no_checkpoints_rule='ignore',
missing_values=self.missing_values,
).value,
value,
)
# test the walk back up the tree
self.assertIs(
from_blaze(
expr,
loader=self.garbage_loader,
no_deltas_rule='ignore',
no_checkpoints_rule='ignore',
missing_values=self.missing_values,
),
value.dataset,
)
self.assertEqual(value.dataset.__name__, exprname)
def test_missing_asof(self):
expr = bz.data(
self.df.loc[:, ['sid', 'value', 'timestamp']],
name='expr',
dshape="""var * {
sid: int64,
value: float64,
timestamp: datetime,
}""",
)
with self.assertRaises(TypeError) as e:
from_blaze(
expr,
loader=self.garbage_loader,
no_deltas_rule='ignore',
no_checkpoints_rule='ignore',
)
self.assertIn("'asof_date'", str(e.exception))
self.assertIn(repr(str(expr.dshape.measure)), str(e.exception))
def test_missing_timestamp(self):
expr = bz.data(
self.df.loc[:, ['sid', 'value', 'asof_date']],
name='expr',
dshape="""var * {
sid: int64,
value: float64,
asof_date: datetime,
}""",
)
loader = BlazeLoader()
from_blaze(
expr,
loader=loader,
no_deltas_rule='ignore',
no_checkpoints_rule='ignore',
)
self.assertEqual(len(loader), 1)
exprdata, = loader.values()
assert_isidentical(
exprdata.expr,
bz.transform(expr, timestamp=expr.asof_date),
)
def test_from_blaze_no_resources_dataset_expr(self):
expr = bz.symbol('expr', self.dshape)
with self.assertRaises(ValueError) as e:
from_blaze(
expr,
loader=self.garbage_loader,
no_deltas_rule='ignore',
no_checkpoints_rule='ignore',
missing_values=self.missing_values,
)
assert_equal(
str(e.exception),
'no resources provided to compute expr',
)
@parameter_space(metadata={'deltas', 'checkpoints'})
def test_from_blaze_no_resources_metadata_expr(self, metadata):
expr = bz.data(self.df, name='expr', dshape=self.dshape)
metadata_expr = bz.symbol('metadata', self.dshape)
with self.assertRaises(ValueError) as e:
from_blaze(
expr,
loader=self.garbage_loader,
no_deltas_rule='ignore',
no_checkpoints_rule='ignore',
missing_values=self.missing_values,
**{metadata: metadata_expr}
)
assert_equal(
str(e.exception),
'no resources provided to compute %s' % metadata,
)
def test_from_blaze_mixed_resources_dataset_expr(self):
expr = bz.data(self.df, name='expr', dshape=self.dshape)
with self.assertRaises(ValueError) as e:
from_blaze(
expr,
resources={expr: self.df},
loader=self.garbage_loader,
no_deltas_rule='ignore',
no_checkpoints_rule='ignore',
missing_values=self.missing_values,
)
assert_equal(
str(e.exception),
'explicit and implicit resources provided to compute expr',
)
@parameter_space(metadata={'deltas', 'checkpoints'})
def test_from_blaze_mixed_resources_metadata_expr(self, metadata):
expr = bz.symbol('expr', self.dshape)
metadata_expr = bz.data(self.df, name=metadata, dshape=self.dshape)
with self.assertRaises(ValueError) as e:
from_blaze(
expr,
resources={metadata_expr: self.df},
loader=self.garbage_loader,
no_deltas_rule='ignore',
no_checkpoints_rule='ignore',
missing_values=self.missing_values,
**{metadata: metadata_expr}
)
assert_equal(
str(e.exception),
'explicit and implicit resources provided to compute %s' %
metadata,
)
@parameter_space(deltas={True, False}, checkpoints={True, False})
def test_auto_metadata(self, deltas, checkpoints):
select_level = op.getitem(('ignore', 'raise'))
m = {'ds': self.df}
if deltas:
m['ds_deltas'] = pd.DataFrame(columns=self.df.columns),
if checkpoints:
m['ds_checkpoints'] = pd.DataFrame(columns=self.df.columns),
expr = bz.data(
m,
dshape=var * Record((k, self.dshape.measure) for k in m),
)
loader = BlazeLoader()
ds = from_blaze(
expr.ds,
loader=loader,
missing_values=self.missing_values,
no_deltas_rule=select_level(deltas),
no_checkpoints_rule=select_level(checkpoints),
)
self.assertEqual(len(loader), 1)
exprdata = loader[ds]
self.assertTrue(exprdata.expr.isidentical(expr.ds))
if deltas:
self.assertTrue(exprdata.deltas.isidentical(expr.ds_deltas))
else:
self.assertIsNone(exprdata.deltas)
if checkpoints:
self.assertTrue(
exprdata.checkpoints.isidentical(expr.ds_checkpoints),
)
else:
self.assertIsNone(exprdata.checkpoints)
@parameter_space(deltas={True, False}, checkpoints={True, False})
def test_auto_metadata_fail_warn(self, deltas, checkpoints):
select_level = op.getitem(('ignore', 'warn'))
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter('always')
loader = BlazeLoader()
expr = bz.data(self.df, dshape=self.dshape)
from_blaze(
expr,
loader=loader,
no_deltas_rule=select_level(deltas),
no_checkpoints_rule=select_level(checkpoints),
missing_values=self.missing_values,
)
self.assertEqual(len(ws), deltas + checkpoints)
for w in ws:
w = w.message
self.assertIsInstance(w, NoMetaDataWarning)
self.assertIn(str(expr), str(w))
@parameter_space(deltas={True, False}, checkpoints={True, False})
def test_auto_metadata_fail_raise(self, deltas, checkpoints):
if not (deltas or checkpoints):
# not a real case
return
select_level = op.getitem(('ignore', 'raise'))
loader = BlazeLoader()
expr = bz.data(self.df, dshape=self.dshape)
with self.assertRaises(ValueError) as e:
from_blaze(
expr,
loader=loader,
no_deltas_rule=select_level(deltas),
no_checkpoints_rule=select_level(checkpoints),
)
self.assertIn(str(expr), str(e.exception))
def test_non_pipeline_field(self):
expr = bz.data(
[],
dshape="""
var * {
a: complex,
asof_date: datetime,
timestamp: datetime,
}""",
)
ds = from_blaze(
expr,
loader=self.garbage_loader,
no_deltas_rule='ignore',
no_checkpoints_rule='ignore',
)
with self.assertRaises(AttributeError):
ds.a
self.assertIsInstance(
object.__getattribute__(ds, 'a'),
NonPipelineField,
)
def _test_cols_with_all_missing_vals(self):
"""
Tests that when there is no known data, we get output where the
columns have the right dtypes and the right missing values filled in.
input (self.df):
Empty DataFrame
Columns: [sid, float_value, str_value, int_value, bool_value, dt_value,
asof_date, timestamp]
Index: []
output (expected)
str_value float_value int_value
2014-01-01 Equity(65 [A]) None NaN 0
Equity(66 [B]) None NaN 0
Equity(67 [C]) None NaN 0
2014-01-02 Equity(65 [A]) None NaN 0
Equity(66 [B]) None NaN 0
Equity(67 [C]) None NaN 0
2014-01-03 Equity(65 [A]) None NaN 0
Equity(66 [B]) None NaN 0
Equity(67 [C]) None NaN 0
dt_value bool_value
2014-01-01 Equity(65 [A]) NaT False
Equity(66 [B]) NaT False
Equity(67 [C]) NaT False
2014-01-02 Equity(65 [A]) NaT False
Equity(66 [B]) NaT False
Equity(67 [C]) NaT False
2014-01-03 Equity(65 [A]) NaT False
Equity(66 [B]) NaT False
Equity(67 [C]) NaT False
"""
df = pd.DataFrame(columns=['sid', 'float_value', 'str_value',
'int_value', 'bool_value', 'dt_value',
'asof_date', 'timestamp'])
expr = bz.data(
df,
dshape="""
var * {
sid: int64,
float_value: float64,
str_value: string,
int_value: int64,
bool_value: bool,
dt_value: datetime,
asof_date: datetime,
timestamp: datetime,
}""",
)
fields = OrderedDict(expr.dshape.measure.fields)
expected = pd.DataFrame({
"str_value": np.array([None,
None,
None,
None,
None,
None,
None,
None,
None],
dtype='object'),
"float_value": np.array([np.NaN,
np.NaN,
np.NaN,
np.NaN,
np.NaN,
np.NaN,
np.NaN,
np.NaN,
np.NaN],
dtype='float64'),
"int_value": np.array([0,
0,
0,
0,
0,
0,
0,
0,
0],
dtype='int64'),
"bool_value": np.array([False,
False,
False,
False,
False,
False,
False,
False,
False],
dtype='bool'),
"dt_value": [pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT],
},
columns=['str_value', 'float_value', 'int_value', 'bool_value',
'dt_value'],
index=pd.MultiIndex.from_product(
(self.dates, self.asset_finder.retrieve_all(
self.ASSET_FINDER_EQUITY_SIDS
))
)
)
self._test_id(
df,
var * Record(fields),
expected,
self.asset_finder,
('float_value', 'str_value', 'int_value', 'bool_value',
'dt_value'),
)
def _test_cols_with_some_missing_vals(self):
"""
Tests the following:
1) Forward filling replaces missing values correctly for the data
types supported in pipeline.
2) We don't forward fill when the missing value is the actual value
we got for a date in the case of int/bool columns.
3) We get the correct type of missing value in the output.
input (self.df):
asof_date bool_value dt_value float_value int_value sid
0 2014-01-01 True 2011-01-01 0 1 65
1 2014-01-03 True 2011-01-02 1 2 66
2 2014-01-01 True 2011-01-03 2 3 67
3 2014-01-02 False NaT NaN 0 67
str_value timestamp
0 a 2014-01-01
1 b 2014-01-03
2 c 2014-01-01
3 None 2014-01-02
output (expected)
str_value float_value int_value bool_value
2014-01-01 Equity(65 [A]) a 0 1 True
Equity(66 [B]) None NaN 0 False
Equity(67 [C]) c 2 3 True
2014-01-02 Equity(65 [A]) a 0 1 True
Equity(66 [B]) None NaN 0 False
Equity(67 [C]) c 2 0 False
2014-01-03 Equity(65 [A]) a 0 1 True
Equity(66 [B]) b 1 2 True
Equity(67 [C]) c 2 0 False
dt_value
2014-01-01 Equity(65 [A]) 2011-01-01
Equity(66 [B]) NaT
Equity(67 [C]) 2011-01-03
2014-01-02 Equity(65 [A]) 2011-01-01
Equity(66 [B]) NaT
Equity(67 [C]) 2011-01-03
2014-01-03 Equity(65 [A]) 2011-01-01
Equity(66 [B]) 2011-01-02
Equity(67 [C]) 2011-01-03
"""
dates = (self.dates[0], self.dates[-1], self.dates[0], self.dates[1])
df = pd.DataFrame({
'sid': self.ASSET_FINDER_EQUITY_SIDS[:-1] +
(self.ASSET_FINDER_EQUITY_SIDS[-1],)*2,
'float_value': (0., 1., 2., np.NaN),
'str_value': ("a", "b", "c", None),
'int_value': (1, 2, 3, 0),
'bool_value': (True, True, True, False),
'dt_value': (pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03'),
pd.NaT),
'asof_date': dates,
'timestamp': dates,
})
expr = bz.data(
df,
dshape="""
var * {
sid: int64,
float_value: float64,
str_value: string,
int_value: int64,
bool_value: bool,
dt_value: datetime,
asof_date: datetime,
timestamp: datetime,
}""",
)
fields = OrderedDict(expr.dshape.measure.fields)
expected = pd.DataFrame({
"str_value": np.array(["a",
None,
"c",
"a",
None,
"c",
"a",
"b",
"c"],
dtype='object'),
"float_value": np.array([0,
np.NaN,
2,
0,
np.NaN,
2,
0,
1,
2],
dtype='float64'),
"int_value": np.array([1,
0,
3,
1,
0,
0,
1,
2,
0],
dtype='int64'),
"bool_value": np.array([True,
False,
True,
True,
False,
False,
True,
True,
False],
dtype='bool'),
"dt_value": [pd.Timestamp('2011-01-01'),
pd.NaT,
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-01'),
pd.NaT,
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03')],
},
columns=['str_value', 'float_value', 'int_value', 'bool_value',
'dt_value'],
index=pd.MultiIndex.from_product(
(self.dates, self.asset_finder.retrieve_all(
self.ASSET_FINDER_EQUITY_SIDS
))
)
)
self._test_id(
df,
var * Record(fields),
expected,
self.asset_finder,
('float_value', 'str_value', 'int_value', 'bool_value',
'dt_value'),
)
def test_complex_expr(self):
expr = bz.data(self.df, dshape=self.dshape, name='expr')
# put an Add in the table
expr_with_add = bz.transform(expr, value=expr.value + 1)
# test that we can have complex expressions with no metadata
from_blaze(
expr_with_add,
deltas=None,
checkpoints=None,
loader=self.garbage_loader,
missing_values=self.missing_values,
no_checkpoints_rule='ignore',
)
with self.assertRaises(TypeError) as e:
# test that we cannot create a single column from a non field
from_blaze(
expr.value + 1, # put an Add in the column
deltas=None,
checkpoints=None,
loader=self.garbage_loader,
missing_values=self.missing_values,
no_checkpoints_rule='ignore',
)
assert_equal(
str(e.exception),
"expression 'expr.value + 1' was array-like but not a simple field"
" of some larger table",
)
deltas = bz.data(
pd.DataFrame(columns=self.df.columns),
dshape=self.dshape,
name='deltas',
)
checkpoints = bz.data(
pd.DataFrame(columns=self.df.columns),
dshape=self.dshape,
name='checkpoints',
)
# test that we can have complex expressions with explicit metadata
from_blaze(
expr_with_add,
deltas=deltas,
checkpoints=checkpoints,
loader=self.garbage_loader,
missing_values=self.missing_values,
)
with self.assertRaises(TypeError) as e:
# test that we cannot create a single column from a non field
# even with explicit metadata
from_blaze(
expr.value + 1,
deltas=deltas,
checkpoints=checkpoints,
loader=self.garbage_loader,
missing_values=self.missing_values,
)
assert_equal(
str(e.exception),
"expression 'expr.value + 1' was array-like but not a simple field"
" of some larger table",
)
def _test_id(self, df, dshape, expected, finder, add):
expr = bz.data(df, name='expr', dshape=dshape)
loader = BlazeLoader()
ds = from_blaze(
expr,
loader=loader,
no_deltas_rule='ignore',
no_checkpoints_rule='ignore',
missing_values=self.missing_values,
)
p = Pipeline()
for a in add:
p.add(getattr(ds, a).latest, a)
dates = self.dates
result = SimplePipelineEngine(
loader,
dates,
finder,
).run_pipeline(p, dates[0], dates[-1])
assert_frame_equal(
result.sort_index(axis=1),
_utc_localize_index_level_0(expected.sort_index(axis=1)),
check_dtype=False,
)
def _test_id_macro(self, df, dshape, expected, finder, add, dates=None):
if dates is None:
dates = self.dates
expr = bz.data(df, name='expr', dshape=dshape)
loader = BlazeLoader()
ds = from_blaze(
expr,
loader=loader,
no_deltas_rule='ignore',
no_checkpoints_rule='ignore',
missing_values=self.missing_values,
)
p = Pipeline()
macro_inputs = []
for column_name in add:
column = getattr(ds, column_name)
macro_inputs.append(column)
with self.assertRaises(UnsupportedPipelineOutput):
# Single column output terms cannot be added to a pipeline.
p.add(column.latest, column_name)
class UsesMacroInputs(CustomFactor):
inputs = macro_inputs
window_length = 1
def compute(self, today, assets, out, *inputs):
e = expected.loc[today]
for i, input_ in enumerate(inputs):
# Each macro input should only have one column.
assert input_.shape == (self.window_length, 1)
assert_equal(input_[0, 0], e[i])
# Run the pipeline with our custom factor. Assertions about the
# expected macro data are made in the `compute` function of our custom
# factor above.
p.add(UsesMacroInputs(), 'uses_macro_inputs')
engine = SimplePipelineEngine(loader, dates, finder)
engine.run_pipeline(p, dates[0], dates[-1])
def _test_custom_query_time_tz(self):
df = self.df.copy()
df['timestamp'] = (
pd.DatetimeIndex(df['timestamp'], tz='EST') +
timedelta(hours=8, minutes=44)
).tz_convert('utc').tz_localize(None)
df.ix[3:5, 'timestamp'] = pd.Timestamp('2014-01-01 13:45')
expr = bz.data(df, name='expr', dshape=self.dshape)
loader = BlazeLoader(data_query_time=time(8, 45), data_query_tz='EST')
ds = from_blaze(
expr,
loader=loader,
no_deltas_rule='ignore',
no_checkpoints_rule='ignore',
missing_values=self.missing_values,
)
p = Pipeline()
p.add(ds.value.latest, 'value')
p.add(ds.int_value.latest, 'int_value')
dates = self.dates
result = SimplePipelineEngine(
loader,
dates,
self.asset_finder,
).run_pipeline(p, dates[0], dates[-1])
expected = df.drop('asof_date', axis=1)
expected['timestamp'] = expected['timestamp'].dt.normalize().astype(
'datetime64[ns]',
).dt.tz_localize('utc')
expected.ix[3:5, 'timestamp'] += timedelta(days=1)
expected.set_index(['timestamp', 'sid'], inplace=True)
expected.index = pd.MultiIndex.from_product((
expected.index.levels[0],
self.asset_finder.retrieve_all(expected.index.levels[1]),
))
assert_frame_equal(result, expected, check_dtype=False)
def _test_id(self): # noqa F811
"""
input (self.df):
asof_date sid timestamp int_value value
0 2014-01-01 65 2014-01-01 0 0
1 2014-01-01 66 2014-01-01 1 1
2 2014-01-01 67 2014-01-01 2 2
3 2014-01-02 65 2014-01-02 1 1
4 2014-01-02 66 2014-01-02 2 2
5 2014-01-02 67 2014-01-02 3 3
6 2014-01-03 65 2014-01-03 2 2
7 2014-01-03 66 2014-01-03 3 3
8 2014-01-03 67 2014-01-03 4 4
output (expected)
int_value value
2014-01-01 Equity(65 [A]) 0 0
Equity(66 [B]) 1 1
Equity(67 [C]) 2 2
2014-01-02 Equity(65 [A]) 1 1
Equity(66 [B]) 2 2
Equity(67 [C]) 3 3
2014-01-03 Equity(65 [A]) 2 2
Equity(66 [B]) 3 3
Equity(67 [C]) 4 4
"""
expected = self.df.drop('asof_date', axis=1).set_index(
['timestamp', 'sid'],
)
expected.index = pd.MultiIndex.from_product((
expected.index.levels[0],
self.asset_finder.retrieve_all(expected.index.levels[1]),
))
self._test_id(
self.df, self.dshape, expected, self.asset_finder,
('int_value', 'value',)
)
def _test_id_with_asof_date(self):
"""
input (self.df):
asof_date sid timestamp int_value value
0 2014-01-01 65 2014-01-01 0 0
1 2014-01-01 66 2014-01-01 1 1
2 2014-01-01 67 2014-01-01 2 2
3 2014-01-02 65 2014-01-02 1 1
4 2014-01-02 66 2014-01-02 2 2
5 2014-01-02 67 2014-01-02 3 3
6 2014-01-03 65 2014-01-03 2 2
7 2014-01-03 66 2014-01-03 3 3
8 2014-01-03 67 2014-01-03 4 4
output (expected)
asof_date
2014-01-01 Equity(65 [A]) 2014-01-01
Equity(66 [B]) 2014-01-01
Equity(67 [C]) 2014-01-01
2014-01-02 Equity(65 [A]) 2014-01-02
Equity(66 [B]) 2014-01-02
Equity(67 [C]) 2014-01-02
2014-01-03 Equity(65 [A]) 2014-01-03
Equity(66 [B]) 2014-01-03
Equity(67 [C]) 2014-01-03
"""
expected = self.df.drop(['value', 'int_value'], axis=1).set_index(
['timestamp', 'sid'],
)
expected.index = pd.MultiIndex.from_product((
expected.index.levels[0],
self.asset_finder.retrieve_all(expected.index.levels[1]),
))
self._test_id(
self.df, self.dshape, expected, self.asset_finder,
('asof_date',)
)
def _test_id_ffill_out_of_window(self):
"""
input (df):
asof_date timestamp sid other value
0 2013-12-22 2013-12-22 65 0 0
1 2013-12-22 2013-12-22 66 NaN 1
2 2013-12-22 2013-12-22 67 2 NaN
3 2013-12-23 2013-12-23 65 NaN 1
4 2013-12-23 2013-12-23 66 2 NaN
5 2013-12-23 2013-12-23 67 3 3
6 2013-12-24 2013-12-24 65 2 NaN
7 2013-12-24 2013-12-24 66 3 3
8 2013-12-24 2013-12-24 67 NaN 4
output (expected):
other value
2014-01-01 Equity(65 [A]) 2 1
Equity(66 [B]) 3 3
Equity(67 [C]) 3 4
2014-01-02 Equity(65 [A]) 2 1
Equity(66 [B]) 3 3
Equity(67 [C]) 3 4
2014-01-03 Equity(65 [A]) 2 1
Equity(66 [B]) 3 3
Equity(67 [C]) 3 4
"""
dates = self.dates.repeat(3) - timedelta(days=10)
df = pd.DataFrame({
'sid': self.ASSET_FINDER_EQUITY_SIDS * 3,
'value': (0, 1, np.nan, 1, np.nan, 3, np.nan, 3, 4),
'other': (0, np.nan, 2, np.nan, 2, 3, 2, 3, np.nan),
'asof_date': dates,
'timestamp': dates,
})
fields = OrderedDict(self.dshape.measure.fields)
fields['other'] = fields['value']
expected = pd.DataFrame(
np.array([[2, 1],
[3, 3],
[3, 4],
[2, 1],
[3, 3],
[3, 4],
[2, 1],
[3, 3],
[3, 4]]),
columns=['other', 'value'],
index=pd.MultiIndex.from_product(
(self.dates, self.asset_finder.retrieve_all(
self.ASSET_FINDER_EQUITY_SIDS
)),
),
)
self._test_id(
df,
var * Record(fields),
expected,
self.asset_finder,
('value', 'other'),
)
def _test_id_multiple_columns(self):
"""
input (df):
asof_date sid timestamp value other
0 2014-01-01 65 2014-01-01 0 1
1 2014-01-01 66 2014-01-01 1 2
2 2014-01-01 67 2014-01-01 2 3
3 2014-01-02 65 2014-01-02 1 2
4 2014-01-02 66 2014-01-02 2 3
5 2014-01-02 67 2014-01-02 3 4
6 2014-01-03 65 2014-01-03 2 3
7 2014-01-03 66 2014-01-03 3 4
8 2014-01-03 67 2014-01-03 4 5
output (expected):
value other
2014-01-01 Equity(65 [A]) 0 1
Equity(66 [B]) 1 2
Equity(67 [C]) 2 3
2014-01-02 Equity(65 [A]) 1 2
Equity(66 [B]) 2 3
Equity(67 [C]) 3 4
2014-01-03 Equity(65 [A]) 2 3
Equity(66 [B]) 3 4
Equity(67 [C]) 4 5
"""
df = self.df.copy()
df['other'] = df.value + 1
fields = OrderedDict(self.dshape.measure.fields)
fields['other'] = fields['value']
expected = df.drop('asof_date', axis=1).set_index(
['timestamp', 'sid'],
).sort_index(axis=1)
expected.index = pd.MultiIndex.from_product((
expected.index.levels[0],
self.asset_finder.retrieve_all(expected.index.levels[1]),
))
self._test_id(
df,
var * Record(fields),
expected,
self.asset_finder,
('value', 'int_value', 'other'),
)
def _test_id_macro_dataset(self):
"""
input (self.macro_df)
asof_date timestamp value
0 2014-01-01 2014-01-01 0
3 2014-01-02 2014-01-02 1
6 2014-01-03 2014-01-03 2
output (expected):
value
2014-01-01 0
2014-01-02 1
2014-01-03 2
"""
expected = pd.DataFrame(
data=[[0],
[1],
[2]],
columns=['value'],
index=self.dates,
)
self._test_id_macro(
self.macro_df,
self.macro_dshape,
expected,
self.asset_finder,
('value',),
)
def _test_id_ffill_out_of_window_macro_dataset(self):
"""
input (df):
asof_date timestamp other value
0 2013-12-22 2013-12-22 NaN 0
1 2013-12-23 2013-12-23 1 NaN
2 2013-12-24 2013-12-24 NaN NaN
output (expected):
other value
2014-01-01 1 0
2014-01-02 1 0
2014-01-03 1 0
"""
dates = self.dates - timedelta(days=10)
df = pd.DataFrame({
'value': (0, np.nan, np.nan),
'other': (np.nan, 1, np.nan),
'asof_date': dates,
'timestamp': dates,
})
fields = OrderedDict(self.macro_dshape.measure.fields)
fields['other'] = fields['value']
expected = pd.DataFrame(
data=[[0, 1],
[0, 1],
[0, 1]],
columns=['other', 'value'],
index=self.dates,
)
self._test_id_macro(
df,
var * Record(fields),
expected,
self.asset_finder,
('value', 'other'),
)
def _test_id_macro_dataset_multiple_columns(self):
"""
input (df):
asof_date timestamp other value
0 2014-01-01 2014-01-01 1 0
3 2014-01-02 2014-01-02 2 1
6 2014-01-03 2014-01-03 3 2
output (expected):
other value
2014-01-01 1 0
2014-01-02 2 1
2014-01-03 3 2
"""
df = self.macro_df.copy()
df['other'] = df.value + 1
fields = OrderedDict(self.macro_dshape.measure.fields)
fields['other'] = fields['value']
with tmp_asset_finder(equities=simple_asset_info) as finder:
expected = pd.DataFrame(
data=[[0, 1],
[1, 2],
[2, 3]],
columns=['value', 'other'],
index=self.dates,
dtype=np.float64,
)
self._test_id_macro(
df,
var * Record(fields),
expected,
finder,
('value', 'other'),
)
def _test_id_take_last_in_group(self):
T = pd.Timestamp
df = pd.DataFrame(
columns=['asof_date', 'timestamp', 'sid', 'other', 'value'],
data=[
[T('2014-01-01'), T('2014-01-01 00'), 65, 0, 0],
[T('2014-01-01'), T('2014-01-01 01'), 65, 1, np.nan],
[T('2014-01-01'), T('2014-01-01 00'), 66, np.nan, np.nan],
[T('2014-01-01'), T('2014-01-01 01'), 66, np.nan, 1],
[T('2014-01-01'), T('2014-01-01 00'), 67, 2, np.nan],
[T('2014-01-01'), T('2014-01-01 01'), 67, np.nan, np.nan],
[T('2014-01-02'), T('2014-01-02 00'), 65, np.nan, np.nan],
[T('2014-01-02'), T('2014-01-02 01'), 65, np.nan, 1],
[T('2014-01-02'), T('2014-01-02 00'), 66, np.nan, np.nan],
[T('2014-01-02'), T('2014-01-02 01'), 66, 2, np.nan],
[T('2014-01-02'), T('2014-01-02 00'), 67, 3, 3],
[T('2014-01-02'), T('2014-01-02 01'), 67, 3, 3],
[T('2014-01-03'), T('2014-01-03 00'), 65, 2, np.nan],
[T('2014-01-03'), T('2014-01-03 01'), 65, 2, np.nan],
[T('2014-01-03'), T('2014-01-03 00'), 66, 3, 3],
[T('2014-01-03'), T('2014-01-03 01'), 66, np.nan, np.nan],
[T('2014-01-03'), T('2014-01-03 00'), 67, np.nan, np.nan],
[T('2014-01-03'), T('2014-01-03 01'), 67, np.nan, 4],
],
)
fields = OrderedDict(self.dshape.measure.fields)
fields['other'] = fields['value']
expected = pd.DataFrame(
columns=['other', 'value'],
data=[
[1, 0], # 2014-01-01 Equity(65 [A])
[np.nan, 1], # Equity(66 [B])
[2, np.nan], # Equity(67 [C])
[1, 1], # 2014-01-02 Equity(65 [A])
[2, 1], # Equity(66 [B])
[3, 3], # Equity(67 [C])
[2, 1], # 2014-01-03 Equity(65 [A])
[3, 3], # Equity(66 [B])
[3, 3], # Equity(67 [C])
],
index=pd.MultiIndex.from_product(
(self.dates, self.asset_finder.retrieve_all(
self.ASSET_FINDER_EQUITY_SIDS
)),
),
)
self._test_id(
df,
var * Record(fields),
expected,
self.asset_finder,
('value', 'other'),
)
def _test_id_take_last_in_group_macro(self):
"""
output (expected):
other value
2014-01-01 NaN 1
2014-01-02 1 2
2014-01-03 2 2
"""
T = pd.Timestamp
df = pd.DataFrame(
columns=['asof_date', 'timestamp', 'other', 'value'],
data=[
[T('2014-01-01'), T('2014-01-01 00'), np.nan, 1],
[T('2014-01-01'), T('2014-01-01 01'), np.nan, np.nan],
[T('2014-01-02'), T('2014-01-02 00'), 1, np.nan],
[T('2014-01-02'), T('2014-01-02 01'), np.nan, 2],
[T('2014-01-03'), T('2014-01-03 00'), 2, np.nan],
[T('2014-01-03'), T('2014-01-03 01'), 3, 3],
],
)
fields = OrderedDict(self.macro_dshape.measure.fields)
fields['other'] = fields['value']
expected = pd.DataFrame(
data=[[np.nan, 1], # 2014-01-01
[1, 2], # 2014-01-02
[2, 2]], # 2014-01-03
columns=['other', 'value'],
index=self.dates,
)
self._test_id_macro(
df,
var * Record(fields),
expected,
self.asset_finder,
('other', 'value'),
)
def _run_pipeline(self,
expr,
deltas,
checkpoints,
expected_views,
expected_output,
finder,
calendar,
start,
end,
window_length,
compute_fn,
apply_deltas_adjustments=True):
loader = BlazeLoader()
ds = from_blaze(
expr,
deltas,
checkpoints,
apply_deltas_adjustments=apply_deltas_adjustments,
loader=loader,
no_deltas_rule='raise',
no_checkpoints_rule='ignore',
missing_values=self.missing_values,
)
p = Pipeline()
# prevent unbound locals issue in the inner class
window_length_ = window_length
class TestFactor(CustomFactor):
inputs = ds.value,
window_length = window_length_
def compute(self, today, assets, out, data):
assert_array_almost_equal(
data,
expected_views[today],
err_msg=str(today),
)
out[:] = compute_fn(data)
p.add(TestFactor(), 'value')
result = SimplePipelineEngine(
loader,
calendar,
finder,
).run_pipeline(p, start, end)
assert_frame_equal(
result,
_utc_localize_index_level_0(expected_output),
check_dtype=False,
)
@with_ignore_sid
def _test_deltas(self, asset_info, add_extra_sid):
df = self.df.copy()
if add_extra_sid:
extra_sid_df = pd.DataFrame({
'asof_date': self.dates,
'timestamp': self.dates,
'sid': (ord('E'),) * 3,
'value': (3., 4., 5.,),
'int_value': (3, 4, 5),
})
df = df.append(extra_sid_df, ignore_index=True)
expr = bz.data(df, name='expr', dshape=self.dshape)
deltas = bz.data(df, dshape=self.dshape)
deltas = bz.data(
odo(
bz.transform(
deltas,
value=deltas.value + 10,
timestamp=deltas.timestamp + timedelta(days=1),
),
pd.DataFrame,
),
name='delta',
dshape=self.dshape,
)
expected_views = keymap(pd.Timestamp, {
'2014-01-02': np.array([[10.0, 11.0, 12.0],
[1.0, 2.0, 3.0]]),
'2014-01-03': np.array([[11.0, 12.0, 13.0],
[2.0, 3.0, 4.0]]),
'2014-01-04': np.array([[12.0, 13.0, 14.0],
[12.0, 13.0, 14.0]]),
})
nassets = len(asset_info)
if nassets == 4:
expected_views = valmap(
lambda view: np.c_[view, [np.nan, np.nan]],
expected_views,
)
with tmp_asset_finder(equities=asset_info) as finder:
expected_output = pd.DataFrame(
list(concatv([12] * nassets, [13] * nassets, [14] * nassets)),
index=pd.MultiIndex.from_product((
sorted(expected_views.keys()),
finder.retrieve_all(asset_info.index),
)),
columns=('value',),
)
dates = self.dates
dates = dates.insert(len(dates), dates[-1] + timedelta(days=1))
self._run_pipeline(
expr,
deltas,
None,
expected_views,
expected_output,
finder,
calendar=dates,
start=dates[1],
end=dates[-1],
window_length=2,
compute_fn=np.nanmax,
)
@with_extra_sid
def _test_deltas_only_one_delta_in_universe(self, asset_info):
expr = bz.data(self.df, name='expr', dshape=self.dshape)
deltas = pd.DataFrame({
'sid': [65, 66],
'asof_date': [self.dates[1], self.dates[0]],
'timestamp': [self.dates[2], self.dates[1]],
'value': [10, 11],
})
deltas = bz.data(deltas, name='deltas', dshape=self.dshape)
expected_views = keymap(pd.Timestamp, {
'2014-01-02': np.array([[0.0, 11.0, 2.0],
[1.0, 2.0, 3.0]]),
'2014-01-03': np.array([[10.0, 2.0, 3.0],
[2.0, 3.0, 4.0]]),
'2014-01-04': np.array([[2.0, 3.0, 4.0],
[2.0, 3.0, 4.0]]),
})
nassets = len(asset_info)
if nassets == 4:
expected_views = valmap(
lambda view: np.c_[view, [np.nan, np.nan]],
expected_views,
)
with tmp_asset_finder(equities=asset_info) as finder:
expected_output = pd.DataFrame(
columns=[
'value',
],
data=np.array([11, 10, 4]).repeat(len(asset_info.index)),
index=pd.MultiIndex.from_product((
sorted(expected_views.keys()),
finder.retrieve_all(asset_info.index),
)),
)
dates = self.dates
dates = dates.insert(len(dates), dates[-1] + timedelta(days=1))
self._run_pipeline(
expr,
deltas,
None,
expected_views,
expected_output,
finder,
calendar=dates,
start=dates[1],
end=dates[-1],
window_length=2,
compute_fn=np.nanmax,
)
def _test_deltas_macro(self):
expr = bz.data(self.macro_df, name='expr', dshape=self.macro_dshape)
deltas = bz.data(
self.macro_df.iloc[:-1],
name='deltas',
dshape=self.macro_dshape,
)
deltas = bz.transform(
deltas,
value=deltas.value + 10,
timestamp=deltas.timestamp + timedelta(days=1),
)
nassets = len(simple_asset_info)
expected_views = keymap(pd.Timestamp, {
'2014-01-02': np.array([[10.0],
[1.0]]),
'2014-01-03': np.array([[11.0],
[2.0]]),
})
with tmp_asset_finder(equities=simple_asset_info) as finder:
expected_output = pd.DataFrame(
list(concatv([10] * nassets, [11] * nassets)),
index=pd.MultiIndex.from_product((
sorted(expected_views.keys()),
finder.retrieve_all(simple_asset_info.index),
)),
columns=('value',),
)
dates = self.dates
self._run_pipeline(
expr,
deltas,
None,
expected_views,
expected_output,
finder,
calendar=dates,
start=dates[1],
end=dates[-1],
window_length=2,
compute_fn=np.nanmax,
)
@with_extra_sid
def _test_novel_deltas(self, asset_info):
base_dates = pd.DatetimeIndex([
pd.Timestamp('2014-01-01'),
pd.Timestamp('2014-01-04')
])
repeated_dates = base_dates.repeat(3)
baseline = pd.DataFrame({
'sid': self.ASSET_FINDER_EQUITY_SIDS * 2,
'value': (0., 1., 2., 1., 2., 3.),
'int_value': (0, 1, 2, 1, 2, 3),
'asof_date': repeated_dates,
'timestamp': repeated_dates,
})
expr = bz.data(baseline, name='expr', dshape=self.dshape)
deltas = bz.data(
odo(
bz.transform(
expr,
value=expr.value + 10,
timestamp=expr.timestamp + timedelta(days=1),
),
pd.DataFrame,
),
name='delta',
dshape=self.dshape,
)
expected_views_all_deltas = keymap(pd.Timestamp, {
'2014-01-03': np.array([[10.0, 11.0, 12.0],
[10.0, 11.0, 12.0],
[10.0, 11.0, 12.0]]),
'2014-01-06': np.array([[10.0, 11.0, 12.0],
[10.0, 11.0, 12.0],
[11.0, 12.0, 13.0]]),
})
# The only novel delta is on 2014-01-05, because it modifies a
# baseline data point that occurred on 2014-01-04, which is on a
# Saturday. The other delta, occurring on 2014-01-02, is seen after
# we already see the baseline data it modifies, and so it is a
# non-novel delta. Thus, the only delta seen in the expected view for
# novel deltas is on 2014-01-06 at (2, 0), (2, 1), and (2, 2).
expected_views_novel_deltas = keymap(pd.Timestamp, {
'2014-01-03': np.array([[0.0, 1.0, 2.0],
[0.0, 1.0, 2.0],
[0.0, 1.0, 2.0]]),
'2014-01-06': np.array([[0.0, 1.0, 2.0],
[0.0, 1.0, 2.0],
[11.0, 12.0, 13.0]]),
})
def get_fourth_asset_view(expected_views, window_length):
return valmap(
lambda view: np.c_[view, [np.nan] * window_length],
expected_views,
)
if len(asset_info) == 4:
expected_views_all_deltas = get_fourth_asset_view(
expected_views_all_deltas, window_length=3
)
expected_views_novel_deltas = get_fourth_asset_view(
expected_views_novel_deltas, window_length=3
)
expected_output_buffer_all_deltas = [
10, 11, 12, np.nan, 11, 12, 13, np.nan
]
expected_output_buffer_novel_deltas = [
0, 1, 2, np.nan, 11, 12, 13, np.nan
]
else:
expected_output_buffer_all_deltas = [
10, 11, 12, 11, 12, 13
]
expected_output_buffer_novel_deltas = [
0, 1, 2, 11, 12, 13
]
cal = pd.DatetimeIndex([
pd.Timestamp('2014-01-01'),
pd.Timestamp('2014-01-02'),
pd.Timestamp('2014-01-03'),
# omitting the 4th and 5th to simulate a weekend
pd.Timestamp('2014-01-06'),
])
with tmp_asset_finder(equities=asset_info) as finder:
expected_output_all_deltas = pd.DataFrame(
expected_output_buffer_all_deltas,
index=pd.MultiIndex.from_product((
sorted(expected_views_all_deltas.keys()),
finder.retrieve_all(asset_info.index),
)),
columns=('value',),
)
expected_output_novel_deltas = pd.DataFrame(
expected_output_buffer_novel_deltas,
index=pd.MultiIndex.from_product((
sorted(expected_views_novel_deltas.keys()),
finder.retrieve_all(asset_info.index),
)),
columns=('value',),
)
it = (
(
True,
expected_views_all_deltas,
expected_output_all_deltas
),
(
False,
expected_views_novel_deltas,
expected_output_novel_deltas
)
)
for apply_deltas_adjs, expected_views, expected_output in it:
self._run_pipeline(
expr,
deltas,
None,
expected_views,
expected_output,
finder,
calendar=cal,
start=cal[2],
end=cal[-1],
window_length=3,
compute_fn=op.itemgetter(-1),
apply_deltas_adjustments=apply_deltas_adjs,
)
def _test_novel_deltas_macro(self):
base_dates = pd.DatetimeIndex([
pd.Timestamp('2014-01-01'),
pd.Timestamp('2014-01-04')
])
baseline = pd.DataFrame({
'value': (0., 1.),
'asof_date': base_dates,
'timestamp': base_dates,
})
expr = bz.data(baseline, name='expr', dshape=self.macro_dshape)
deltas = bz.data(baseline, name='deltas', dshape=self.macro_dshape)
deltas = bz.transform(
deltas,
value=deltas.value + 10,
timestamp=deltas.timestamp + timedelta(days=1),
)
nassets = len(simple_asset_info)
expected_views_all_deltas = keymap(pd.Timestamp, {
'2014-01-03': np.array([[10.0],
[10.0],
[10.0]]),
'2014-01-06': np.array([[10.0],
[10.0],
[11.0]]),
})
# The only novel delta is on 2014-01-05, because it modifies a
# baseline data point that occurred on 2014-01-04, which is on a
# Saturday. The other delta, occurring on 2014-01-02, is seen after
# we already see the baseline data it modifies, and so it is a
# non-novel delta. Thus, the only delta seen in the expected view for
# novel deltas is on 2014-01-06 at (2, 0).
expected_views_novel_deltas = keymap(pd.Timestamp, {
'2014-01-03': np.array([[0.0],
[0.0],
[0.0]]),
'2014-01-06': np.array([[0.0],
[0.0],
[11.0]]),
})
cal = pd.DatetimeIndex([
pd.Timestamp('2014-01-01'),
pd.Timestamp('2014-01-02'),
pd.Timestamp('2014-01-03'),
# omitting the 4th and 5th to simulate a weekend
pd.Timestamp('2014-01-06'),
])
def get_expected_output(expected_views, values, asset_info):
return pd.DataFrame(
list(concatv(*([value] * nassets for value in values))),
index=pd.MultiIndex.from_product(
(sorted(expected_views.keys()),
finder.retrieve_all(asset_info.index),)
), columns=('value',),
)
with tmp_asset_finder(equities=simple_asset_info) as finder:
expected_output_all_deltas = get_expected_output(
expected_views_all_deltas,
[10, 11],
simple_asset_info,
)
expected_output_novel_deltas = get_expected_output(
expected_views_novel_deltas,
[0, 11],
simple_asset_info,
)
it = (
(
True,
expected_views_all_deltas,
expected_output_all_deltas
),
(
False,
expected_views_novel_deltas,
expected_output_novel_deltas
)
)
for apply_deltas_adjs, expected_views, expected_output in it:
self._run_pipeline(
expr,
deltas,
None,
expected_views,
expected_output,
finder,
calendar=cal,
start=cal[2],
end=cal[-1],
window_length=3,
compute_fn=op.itemgetter(-1),
apply_deltas_adjustments=apply_deltas_adjs,
)
def _test_checkpoints_macro(self, checkpoints, ffilled_value=-1.0):
"""Simple checkpoints test that accepts a checkpoints dataframe and
the expected value for 2014-01-03 for macro datasets.
The underlying data has value -1.0 on 2014-01-01 and 1.0 on 2014-01-04.
Parameters
----------
checkpoints : pd.DataFrame
The checkpoints data.
ffilled_value : float, optional
The value to be read on the third, if not provided, it will be the
value in the base data that will be naturally ffilled there.
"""
dates = pd.Timestamp('2014-01-01'), pd.Timestamp('2014-01-04')
baseline = pd.DataFrame({
'value': [-1.0, 1.0],
'asof_date': dates,
'timestamp': dates,
})
nassets = len(simple_asset_info)
expected_views = keymap(pd.Timestamp, {
'2014-01-03': np.array([[ffilled_value]]),
'2014-01-04': np.array([[1.0]]),
})
with tmp_asset_finder(equities=simple_asset_info) as finder:
expected_output = pd.DataFrame(
list(concatv([ffilled_value] * nassets, [1.0] * nassets)),
index=pd.MultiIndex.from_product((
sorted(expected_views.keys()),
finder.retrieve_all(simple_asset_info.index),
)),
columns=('value',),
)
self._run_pipeline(
bz.data(baseline, name='expr', dshape=self.macro_dshape),
None,
bz.data(
checkpoints,
name='expr_checkpoints',
dshape=self.macro_dshape,
),
expected_views,
expected_output,
finder,
calendar=pd.date_range('2014-01-01', '2014-01-04'),
start=pd.Timestamp('2014-01-03'),
end=dates[-1],
window_length=1,
compute_fn=op.itemgetter(-1),
)
def _test_checkpoints_macro(self): # noqa F811
ffilled_value = 0.0
checkpoints_ts = pd.Timestamp('2014-01-02')
checkpoints = pd.DataFrame({
'value': [ffilled_value],
'asof_date': checkpoints_ts,
'timestamp': checkpoints_ts,
})
self._test_checkpoints_macro(checkpoints, ffilled_value)
def _test_empty_checkpoints_macro(self):
empty_checkpoints = pd.DataFrame({
'value': [],
'asof_date': [],
'timestamp': [],
})
self._test_checkpoints_macro(empty_checkpoints)
def _test_checkpoints_out_of_bounds_macro(self):
# provide two checkpoints, one before the data in the base table
# and one after, these should not affect the value on the third
dates = pd.to_datetime(['2013-12-31', '2014-01-05'])
checkpoints = pd.DataFrame({
'value': [-2, 2],
'asof_date': dates,
'timestamp': dates,
})
self._test_checkpoints_macro(checkpoints)
def _test_checkpoints(self, checkpoints, ffilled_values=None):
"""Simple checkpoints test that accepts a checkpoints dataframe and
the expected value for 2014-01-03.
The underlying data has value -1.0 on 2014-01-01 and 1.0 on 2014-01-04.
Parameters
----------
checkpoints : pd.DataFrame
The checkpoints data.
ffilled_value : float, optional
The value to be read on the third, if not provided, it will be the
value in the base data that will be naturally ffilled there.
"""
nassets = len(simple_asset_info)
dates = pd.to_datetime(['2014-01-01', '2014-01-04'])
dates_repeated = np.tile(dates, nassets)
values = np.arange(nassets) + 1
values = np.hstack((values[::-1], values))
baseline = pd.DataFrame({
'sid': np.tile(simple_asset_info.index, 2),
'value': values,
'asof_date': dates_repeated,
'timestamp': dates_repeated,
})
if ffilled_values is None:
ffilled_values = baseline.value.iloc[:nassets]
updated_values = baseline.value.iloc[nassets:]
expected_views = keymap(pd.Timestamp, {
'2014-01-03': [ffilled_values],
'2014-01-04': [updated_values],
})
with tmp_asset_finder(equities=simple_asset_info) as finder:
expected_output = pd.DataFrame(
list(concatv(ffilled_values, updated_values)),
index=pd.MultiIndex.from_product((
sorted(expected_views.keys()),
finder.retrieve_all(simple_asset_info.index),
)),
columns=('value',),
)
self._run_pipeline(
bz.data(baseline, name='expr', dshape=self.value_dshape),
None,
bz.data(
checkpoints,
name='expr_checkpoints',
dshape=self.value_dshape,
),
expected_views,
expected_output,
finder,
calendar=pd.date_range('2014-01-01', '2014-01-04'),
start=pd.Timestamp('2014-01-03'),
end=dates[-1],
window_length=1,
compute_fn=op.itemgetter(-1),
)
def _test_checkpoints(self): # noqa F811
nassets = len(simple_asset_info)
ffilled_values = (np.arange(nassets, dtype=np.float64) + 1) * 10
dates = [pd.Timestamp('2014-01-02')] * nassets
checkpoints = pd.DataFrame({
'sid': simple_asset_info.index,
'value': ffilled_values,
'asof_date': dates,
'timestamp': dates,
})
self._test_checkpoints(checkpoints, ffilled_values)
def _test_empty_checkpoints(self):
checkpoints = pd.DataFrame({
'sid': [],
'value': [],
'asof_date': [],
'timestamp': [],
})
self._test_checkpoints(checkpoints)
def _test_checkpoints_out_of_bounds(self):
nassets = len(simple_asset_info)
# provide two sets of checkpoints, one before the data in the base
# table and one after, these should not affect the value on the third
dates = pd.to_datetime(['2013-12-31', '2014-01-05'])
dates_repeated = np.tile(dates, nassets)
ffilled_values = (np.arange(nassets) + 2) * 10
ffilled_values = np.hstack((ffilled_values[::-1], ffilled_values))
checkpoints = pd.DataFrame({
'sid': np.tile(simple_asset_info.index, 2),
'value': ffilled_values,
'asof_date': dates_repeated,
'timestamp': dates_repeated,
})
self._test_checkpoints(checkpoints)
def _test_id_take_last_in_group_sorted(self):
"""
input
asof_date timestamp other value
2014-01-03 2014-01-04 00 3 3
2014-01-02 2014-01-04 00 2 2
output (expected):
other value
2014-01-02 NaN NaN
2014-01-03 NaN NaN
2014-01-06 3 3
"""
dates = pd.DatetimeIndex([
pd.Timestamp('2014-01-02'),
pd.Timestamp('2014-01-03'),
pd.Timestamp('2014-01-06'),
])
T = pd.Timestamp
df = pd.DataFrame(
columns=['asof_date', 'timestamp', 'other', 'value'],
data=[
# asof-dates are flipped in terms of order so that if we
# don't sort on asof-date before getting the last in group,
# we will get the wrong result.
[T('2014-01-03'), T('2014-01-04 00'), 3, 3],
[T('2014-01-02'), T('2014-01-04 00'), 2, 2],
],
)
fields = OrderedDict(self.macro_dshape.measure.fields)
fields['other'] = fields['value']
expected = pd.DataFrame(
data=[[np.nan, np.nan], # 2014-01-02
[np.nan, np.nan], # 2014-01-03
[3, 3]], # 2014-01-06
columns=['other', 'value'],
index=dates,
)
self._test_id_macro(
df,
var * Record(fields),
expected,
self.asset_finder,
('other', 'value'),
dates=dates,
)
class MiscTestCase(CatalystTestCase):
def test_exprdata_repr(self):
strd = set()
class BadRepr(object):
"""A class which cannot be repr'd.
"""
def __init__(self, name):
self._name = name
def __repr__(self): # pragma: no cover
raise AssertionError('ayy')
def __str__(self):
strd.add(self)
return self._name
assert_equal(
repr(ExprData(
expr=BadRepr('expr'),
deltas=BadRepr('deltas'),
checkpoints=BadRepr('checkpoints'),
odo_kwargs={'a': 'b'},
)),
"ExprData(expr='expr', deltas='deltas',"
" checkpoints='checkpoints', odo_kwargs={'a': 'b'}, "
"apply_deltas_adjustments=True)",
)
def test_blaze_loader_repr(self):
assert_equal(repr(BlazeLoader()), '<BlazeLoader: {}>')
def test_blaze_loader_lookup_failure(self):
class D(DataSet):
c = Column(dtype='float64')
with self.assertRaises(KeyError) as e:
BlazeLoader()(D.c)
assert_equal(str(e.exception), 'D.c::float64')
|
py | b413009fb2c2002449a441424a66c578cb81306f | # -*- coding: utf-8 -*-
# (C) Copyright 2019 Hewlett Packard Enterprise Development LP.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# __author__ = "@netwookie"
# __credits__ = ["Rick Kauffman"]
# __license__ = "Apache2.0"
# __maintainer__ = "Rick Kauffman"
# __email__ = "[email protected]"
#
# a python module that handles the link-aggregation API call to the CFM controller
#
#---------------------------------------------------------------------------------------
from flask import Blueprint, render_template, request, redirect, session, url_for, abort
import os
from werkzeug import secure_filename
from mongoengine import Q
import pygal
import json
# Place to stach the user temporarily
from database.sidekick import Sidekick
from database.ports import Ports
from database.b_ports import b_Ports
from pyhpecfm.client import CFMClient
from pyhpecfm import fabric
from pyhpecfm import system
from utilities.get_client import access_client
from utilities.switch_array import get_switches
from utilities.port_array import get_ports
from utilities.vlan_array import get_vlans
lag_app=Blueprint('lag_app', __name__)
@lag_app.route('/process_lags', methods=('GET', 'POST'))
def process_lags():
# Get a client connection.
client=access_client()
# assignment of attributes
count_only=False
mac_attachments=True
mac_learning=True
ports=True
port_type='access'
tags=True
type='provisioned'
vlan_groups=True
# Create an attribute dictionary
params={
'count_only': count_only,
'mac_attachments': mac_attachments,
'mac_learning': mac_learning,
'ports': ports,
'port_type': port_type,
'tags': tags,
'type': type,
'vlan_groups': vlan_groups
}
# Pull the CFM controller for all lags
try:
cfm_lags=fabric.get_lags(client, params)
except:
error="ERR-LOGIN - Failed to log into CFM controller"
return error
# build properties and ports disctionaries
# Create some empty lists
port_props=[]
port_detail=[]
lag_group=[]
for lag in cfm_lags:
if len(lag['port_properties']) > 1:
# Iterate through port_properties
for item in lag['port_properties']:
lag_mode=item['lacp']['mode']
lag_speed=item['speed']['current']
lag_partner_status=item['port_lacp_state'][0]['partner_state_lacp_status']
lag_partner_system=item['port_lacp_state'][0]['partner_state_system_id']
lag_partner_port_state=item['port_lacp_state'][0]['partner_state_port']
actor_lacp_status=item['port_lacp_state'][0]['actor_state_lacp_status']
# Define properties dictionary
properties={
'mode':lag_mode,
'speed':lag_speed,
'partner_status':lag_partner_status,
'partner_system':lag_partner_system,
'partner_port_state':lag_partner_port_state,
'actor_status':actor_lacp_status
}
# Extract port detail.
for ports in item['ports']:
switch_name=ports['switch_name']
link_state=ports['link_state']
admin_state=ports['admin_state']
port_security_enabled=ports['port_security_enabled']
vlans=ports['vlans']
speed=ports['speed']
port_label=ports['port_label']
bridge_loop_detection=ports['bridge_loop_detection']
# Define port dictionary
port_information={
'switch_name': switch_name,
'link_state': link_state,
'admin_state': admin_state,
'port_security_enabled': port_security_enabled,
'vlans': vlans,
'speed': speed,
'port_label': port_label,
'bridge_loop_detection': bridge_loop_detection
}
port_detail.append(port_information)
#ports = {'ports':port_detail}
#----add port detail to the dictionary items
properties['ports']=port_detail
properties['name']=lag['name']
# Now make it a dictionary
properties={'properties': properties}
lag_group.append(properties)
lag_data=[]
port_props=[]
port_detail=[]
return render_template('lags/lags.html', l=lag_group)
@lag_app.route('/autolag', methods=('GET', 'POST'))
def autolag():
# Clear ports database on new session.
Ports.objects().delete()
# Clear ports database on new session.
b_Ports.objects().delete()
switch_list_out=[]
switch_list=[]
vlan_list_out=[]
vlan_list=[]
# Get a client connection.
switches=get_switches()
for switch in switches:
switch_name=switch[3].encode('utf-8')
switch_uuid=switch[5].encode('utf-8')
switch_list=[switch_name,switch_uuid]
switch_list_out.append(switch_list)
# Get a client connection.
vlans=get_vlans()
for vlan in vlans:
vlansx=vlan['vlans'].encode('utf-8')
uuid=vlan['uuid'].encode('utf-8')
vlan_list=[vlansx,uuid]
vlan_list_out.append(vlan_list)
count = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48]
return render_template('lags/autolag.html', switches=switch_list_out, vlans=vlan_list_out, count=count)
@lag_app.route('/makelags', methods=('GET', 'POST'))
def makelags():
# Get a client connection.
client=access_client()
a_list=[]
a_list_out=[]
b_list=[]
b_list_out=[]
#Get items from the chooser rick.append('fail')
vlan_uuid=request.form['vlan'].encode('utf-8')
a_switch=request.form['a_switch'].encode('utf-8')
b_switch=request.form['b_switch'].encode('utf-8')
count=request.form['count'].encode('utf-8')
count = int(count)
# Verify selections
if a_switch == b_switch:
error="ERR009 - You picked the same switches. Please pick two different switches"
return render_template('sidekick/dberror.html', error=error)
if a_switch == 'no select' or b_switch == 'no select' or vlan_uuid == 'no select':
error="ERR00910 - You missed a selection. Make sure to select valid items"
return render_template('sidekick/dberror.html', error=error)
# Build a side ports database
port_info=[]
port_info_out=[]
ports=get_ports()
for port in ports:
if port['switch_uuid'] == a_switch:
speed=port['speed']['current']
speed=str(speed)
speed=speed.encode('utf-8')
uuid=port['uuid'].encode('utf-8')
port_label=port['port_label'].encode('utf-8')
silkscreen=port['silkscreen'].encode('utf-8')
switch_uuid=port['switch_uuid'].encode('utf-8')
# Build database entry to save creds
port = Ports(speed=speed,uuid=uuid,switch_uuid=switch_uuid,port_label=port_label,silkscreen=silkscreen)
# Save the record
try:
port.save()
except:
error="ERR001 - Failed to save port information"
return render_template('sidekick/dberror.html', error=error)
# Build b side ports database
port_info=[]
port_info_out=[]
ports=get_ports()
for port in ports:
if port['switch_uuid'] == b_switch:
speed=port['speed']['current']
speed=str(speed)
speed=speed.encode('utf-8')
uuid=port['uuid'].encode('utf-8')
port_label=port['port_label'].encode('utf-8')
silkscreen=port['silkscreen'].encode('utf-8')
switch_uuid=port['switch_uuid'].encode('utf-8')
# Build database entry to save creds
port = b_Ports(speed=speed,uuid=uuid,switch_uuid=switch_uuid,port_label=port_label,silkscreen=silkscreen)
# Save the record
try:
port.save()
except:
error="ERR001 - Failed to save port information"
return render_template('sidekick/dberror.html', error=error)
# Build static variables
description = "Symetrical Link Aggregation for DL attached servers"
test=[]
port_counter = 1
# Process a-side-ports
while port_counter <= count:
a_switch_ports = Ports.objects(silkscreen=str(port_counter))
port_counter=port_counter + 1
a_silkscreen=a_switch_ports[0]['silkscreen'].encode('utf-8')
a_port_uuid=a_switch_ports[0]['uuid'].encode('utf-8')
speed=a_switch_ports[0]['speed'].encode('utf-8')
speed=int(speed)
#
# Find matching B side port rick.append('fail')
b_switch_ports = b_Ports.objects(silkscreen=a_silkscreen)
for obj in b_switch_ports:
if obj['switch_uuid'] == b_switch:
b_port_uuid=obj['uuid'].encode('utf-8')
b_port_silk=obj['silkscreen'].encode('utf-8')
out = [a_silkscreen,b_port_silk,speed,a_port_uuid, b_port_uuid,port_counter, count]
print 'writing lag to cfm....'
name = "Auto generated link aggregation-%s" % (port_counter)
result = fabric.add_lags(client, name, description, vlan_uuid, a_port_uuid, b_port_uuid, speed)
test.append(out)
print test
return render_template('lags/autolag_success.html', result=result)
#rick.append('fail')
@lag_app.route('/testports', methods=('GET', 'POST'))
def testports():
# Process a-side-ports
a_switch_ports = Ports.objects(switch_uuid=a_switch)
for i in sorted (silkscreen.keys()):
print i
return render_template('lags/autolag_success.html')
|
py | b41300e18dc5618376beec3248cfbe0e2ecd31af | import os
from setuptools import find_packages, setup
VERSION = __import__('herald').__version__
def read_file(filename):
"""Read a file into a string"""
path = os.path.abspath(os.path.dirname(__file__))
filepath = os.path.join(path, filename)
try:
return open(filepath).read()
except IOError:
return ''
install_requires = [
'django>=1.8',
'six',
'jsonpickle',
]
dev_requires = [
'pytz',
]
twilio_requires = [
'twilio',
]
html2text_requires = [
'html2text',
]
setup(
name='django-herald',
version=VERSION,
author='Worthwhile',
author_email='[email protected]',
install_requires=install_requires,
extras_require={
'dev': install_requires + dev_requires,
'twilio': twilio_requires,
'html2text': html2text_requires,
},
packages=find_packages(include=('herald', 'herald.*')),
include_package_data=True, # declarations in MANIFEST.in
license='MIT',
url='https://github.com/worthwhile/django-herald/',
download_url='https://github.com/worthwhile/django-herald/tarball/'+VERSION,
description="Django library for separating the message content from transmission method",
long_description=read_file('README.md'),
long_description_content_type="text/markdown",
keywords=['django', 'notifications', 'messaging'],
classifiers=[
'Framework :: Django',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'License :: OSI Approved :: MIT License',
],
)
|
py | b413010d03cb32b02edf88c26020306dac423fae | import datetime
now = datetime.datetime.now()
one_month_ago = now - datetime.timedelta(days=30)
cmod.Rendtal.objects.filter(due_date__gte=one_month_ago, due_date__lte=now).exclude(active=False) |
py | b41301593f6bd20f0bbc1c58877796f402c4d829 | word = input('Enter a word: ')
if word == word[::-1]:
print(f'The word "{word}" is a palindrome.')
else:
print(f'The word "{word}" is not a palindrome.') |
py | b41303b092fc748f527e2dce4ad48ae152a57f20 | """A controller for watering a plant.
This acts a both a source (current output state) and a sink (receives sensor
data to act on).
"""
import atexit
import logging
from threading import Lock, Timer
from RPi import GPIO
from sensor_feed.sensor import SleepingSensor
from sensor_feed.sink import Sink
LOGGER = logging.getLogger(__name__)
GPIO.setmode(GPIO.BCM)
class PlantControl(SleepingSensor, Sink):
"""A controller to water a plant."""
param_name = 'water_input'
param_id = 'water_input'
param_unit = 'seconds'
trigger_param = 'soil'
def __init__(self, *args, **kwargs):
super(SleepingSensor, self).__init__(*args, **kwargs)
super(Sink, self).__init__(*args, **kwargs)
self._water_input = 0
self.threshold = 1300
self.water_period = 20
self.min_period = self.water_period + 2
self._watering = Lock()
self.gpio_pin = 17
GPIO.setup(self.gpio_pin, GPIO.OUT)
atexit.register(GPIO.cleanup)
def get_value(self):
last_water = self._water_input
self._water_input = 0
return last_water
def process_value(self, param_name, timestamp, value):
# only interested in one parameter
if param_name != self.trigger_param:
return
if value > self.threshold:
self.apply_water()
def apply_water(self):
LOGGER.critical('Applying water.')
self._water_input += self.water_period
if self._watering.locked():
LOGGER.critical('Already watering.')
return
self._watering.acquire()
# turn on water supply.
GPIO.output(self.gpio_pin, GPIO.HIGH)
LOGGER.critical('Tap on.')
timer = Timer(self.water_period, self._stop)
timer.start()
def _stop(self):
LOGGER.critical('Tap off.')
GPIO.output(self.gpio_pin, GPIO.LOW)
self._watering.release()
def __del__(self):
GPIO.output(self.gpio_pin, GPIO.LOW)
LOGGER.critical('Ensure tap off.')
|
py | b413078441e25f227284dd1d88e8c021273b76f7 | #!/usr/bin/python
# Author: @BlankGodd
class Stack:
"""Last in First Out"""
def __init__(self):
self._stack = []
self._size = 0
def add(self, data):
# adding to a stack
self._stack.append(data)
self._size += 1
def remove(self):
# getting the item top of the stack
if self._size == 0:
print("Stack is Empty!")
return
x = self._stack[-1]
del(self._stack[-1])
self._size -= 1
return x
def top(self):
# display the item top of the stack without removing
if self._size == 0:
print("Stack is Empty!")
return
print(self._stack[-1])
def length(self):
return self._size
def flip(self):
# to turn the stack over
if self._size == 0 or self._size == 1:
return
a,b = 0,1
x = [i for i in self._stack]
for i in range(self._size):
self._stack[a] = x[-b]
a += 1
b += 1
def display(self):
print(self._stack)
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.