blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a60935cfa8dae08bce2ff3cda00428329bb70552 | 331ed33890f103ce95318abe0d4bd255929e8f4d | /source/addcode3.py | de77590e3a533f3efb249fe212a7c9c7f61e1647 | [] | no_license | manon2012/e | fa15ce55a72fa6ee20f10d06e9f670ade207209a | c20a345e96ccd702b56a802e2efbd924f1cd808d | refs/heads/master | 2021-01-22T19:25:41.688876 | 2018-09-20T09:59:29 | 2018-09-20T09:59:29 | 102,418,775 | 0 | 0 | null | 2017-09-22T08:21:04 | 2017-09-05T01:30:02 | Python | UTF-8 | Python | false | false | 524 | py | #! /usr/bin/env python
#coding=utf-8
import string, random
#激活码中的字符和数字
field = string.letters + string.digits
#获得四个字母和数字的随机组合
def getRandom():
return "".join(random.sample(field,4))
#生成的每个激活码中有几组
def concatenate(group):
return "-".join([getRandom() for i in range(group)])
#生成n组激活码
def generate(n):
return [concatenate(4) for i in range(n)]
if __name__ == '__main__':
print generate(2) | [
"[email protected]"
] | |
50fdc9adae0c20329e8f09e7b153a38d0e0d84e4 | 804d40b874e2eb1f2e9f3f3f124d507bf2b517f1 | /env/Lib/site-packages/sqlalchemy/engine/default.py | bc9d9efa8ddf2cda5eab5f4abef8c36a82532d63 | [] | no_license | Nestor-Leyva/api-flask | 86d5d3053e62767813aeacea5f30cc6a355320d0 | 55675a02fd79263518b0dfc731a2b4a2be50bd0d | refs/heads/main | 2023-08-21T03:00:18.740097 | 2021-10-04T19:25:38 | 2021-10-04T19:25:38 | 413,517,941 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 57,329 | py | # engine/default.py
# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Default implementations of per-dialect sqlalchemy.engine classes.
These are semi-private implementation classes which are only of importance
to database dialect authors; dialects will usually use the classes here
as the base class for their own corresponding classes.
"""
import codecs
import random
import re
import weakref
from . import interfaces
from . import reflection
from . import result
from .. import event
from .. import exc
from .. import pool
from .. import processors
from .. import types as sqltypes
from .. import util
from ..sql import compiler
from ..sql import expression
from ..sql import schema
from ..sql.elements import quoted_name
AUTOCOMMIT_REGEXP = re.compile(
r"\s*(?:UPDATE|INSERT|CREATE|DELETE|DROP|ALTER)", re.I | re.UNICODE
)
# When we're handed literal SQL, ensure it's a SELECT query
SERVER_SIDE_CURSOR_RE = re.compile(r"\s*SELECT", re.I | re.UNICODE)
class DefaultDialect(interfaces.Dialect):
"""Default implementation of Dialect"""
statement_compiler = compiler.SQLCompiler
ddl_compiler = compiler.DDLCompiler
type_compiler = compiler.GenericTypeCompiler
preparer = compiler.IdentifierPreparer
supports_alter = True
supports_comments = False
inline_comments = False
# the first value we'd get for an autoincrement
# column.
default_sequence_base = 1
# most DBAPIs happy with this for execute().
# not cx_oracle.
execute_sequence_format = tuple
supports_views = True
supports_sequences = False
sequences_optional = False
preexecute_autoincrement_sequences = False
postfetch_lastrowid = True
implicit_returning = False
supports_right_nested_joins = True
cte_follows_insert = False
supports_native_enum = False
supports_native_boolean = False
non_native_boolean_check_constraint = True
supports_simple_order_by_label = True
tuple_in_values = False
engine_config_types = util.immutabledict(
[
("convert_unicode", util.bool_or_str("force")),
("pool_timeout", util.asint),
("echo", util.bool_or_str("debug")),
("echo_pool", util.bool_or_str("debug")),
("pool_recycle", util.asint),
("pool_size", util.asint),
("max_overflow", util.asint),
("pool_threadlocal", util.asbool),
]
)
# if the NUMERIC type
# returns decimal.Decimal.
# *not* the FLOAT type however.
supports_native_decimal = False
if util.py3k:
supports_unicode_statements = True
supports_unicode_binds = True
returns_unicode_strings = True
description_encoding = None
else:
supports_unicode_statements = False
supports_unicode_binds = False
returns_unicode_strings = False
description_encoding = "use_encoding"
name = "default"
# length at which to truncate
# any identifier.
max_identifier_length = 9999
_user_defined_max_identifier_length = None
# sub-categories of max_identifier_length.
# currently these accommodate for MySQL which allows alias names
# of 255 but DDL names only of 64.
max_index_name_length = None
max_constraint_name_length = None
supports_sane_rowcount = True
supports_sane_multi_rowcount = True
colspecs = {}
default_paramstyle = "named"
supports_default_values = False
supports_empty_insert = True
supports_multivalues_insert = False
supports_is_distinct_from = True
supports_server_side_cursors = False
# extra record-level locking features (#4860)
supports_for_update_of = False
server_version_info = None
construct_arguments = None
"""Optional set of argument specifiers for various SQLAlchemy
constructs, typically schema items.
To implement, establish as a series of tuples, as in::
construct_arguments = [
(schema.Index, {
"using": False,
"where": None,
"ops": None
})
]
If the above construct is established on the PostgreSQL dialect,
the :class:`.Index` construct will now accept the keyword arguments
``postgresql_using``, ``postgresql_where``, nad ``postgresql_ops``.
Any other argument specified to the constructor of :class:`.Index`
which is prefixed with ``postgresql_`` will raise :class:`.ArgumentError`.
A dialect which does not include a ``construct_arguments`` member will
not participate in the argument validation system. For such a dialect,
any argument name is accepted by all participating constructs, within
the namespace of arguments prefixed with that dialect name. The rationale
here is so that third-party dialects that haven't yet implemented this
feature continue to function in the old way.
.. versionadded:: 0.9.2
.. seealso::
:class:`.DialectKWArgs` - implementing base class which consumes
:attr:`.DefaultDialect.construct_arguments`
"""
# indicates symbol names are
# UPPERCASEd if they are case insensitive
# within the database.
# if this is True, the methods normalize_name()
# and denormalize_name() must be provided.
requires_name_normalize = False
reflection_options = ()
dbapi_exception_translation_map = util.immutabledict()
"""mapping used in the extremely unusual case that a DBAPI's
published exceptions don't actually have the __name__ that they
are linked towards.
.. versionadded:: 1.0.5
"""
@util.deprecated_params(
convert_unicode=(
"1.3",
"The :paramref:`_sa.create_engine.convert_unicode` parameter "
"and corresponding dialect-level parameters are deprecated, "
"and will be removed in a future release. Modern DBAPIs support "
"Python Unicode natively and this parameter is unnecessary.",
)
)
def __init__(
self,
convert_unicode=False,
encoding="utf-8",
paramstyle=None,
dbapi=None,
implicit_returning=None,
supports_right_nested_joins=None,
case_sensitive=True,
supports_native_boolean=None,
empty_in_strategy="static",
max_identifier_length=None,
label_length=None,
**kwargs
):
if not getattr(self, "ported_sqla_06", True):
util.warn(
"The %s dialect is not yet ported to the 0.6 format"
% self.name
)
self.convert_unicode = convert_unicode
self.encoding = encoding
self.positional = False
self._ischema = None
self.dbapi = dbapi
if paramstyle is not None:
self.paramstyle = paramstyle
elif self.dbapi is not None:
self.paramstyle = self.dbapi.paramstyle
else:
self.paramstyle = self.default_paramstyle
if implicit_returning is not None:
self.implicit_returning = implicit_returning
self.positional = self.paramstyle in ("qmark", "format", "numeric")
self.identifier_preparer = self.preparer(self)
self.type_compiler = self.type_compiler(self)
if supports_right_nested_joins is not None:
self.supports_right_nested_joins = supports_right_nested_joins
if supports_native_boolean is not None:
self.supports_native_boolean = supports_native_boolean
self.case_sensitive = case_sensitive
self.empty_in_strategy = empty_in_strategy
if empty_in_strategy == "static":
self._use_static_in = True
elif empty_in_strategy in ("dynamic", "dynamic_warn"):
self._use_static_in = False
self._warn_on_empty_in = empty_in_strategy == "dynamic_warn"
else:
raise exc.ArgumentError(
"empty_in_strategy may be 'static', "
"'dynamic', or 'dynamic_warn'"
)
self._user_defined_max_identifier_length = max_identifier_length
if self._user_defined_max_identifier_length:
self.max_identifier_length = (
self._user_defined_max_identifier_length
)
self.label_length = label_length
if self.description_encoding == "use_encoding":
self._description_decoder = (
processors.to_unicode_processor_factory
)(encoding)
elif self.description_encoding is not None:
self._description_decoder = (
processors.to_unicode_processor_factory
)(self.description_encoding)
self._encoder = codecs.getencoder(self.encoding)
self._decoder = processors.to_unicode_processor_factory(self.encoding)
@util.memoized_property
def _type_memos(self):
return weakref.WeakKeyDictionary()
@property
def dialect_description(self):
return self.name + "+" + self.driver
@property
def supports_sane_rowcount_returning(self):
"""True if this dialect supports sane rowcount even if RETURNING is
in use.
For dialects that don't support RETURNING, this is synonymous with
``supports_sane_rowcount``.
"""
return self.supports_sane_rowcount
@classmethod
def get_pool_class(cls, url):
return getattr(cls, "poolclass", pool.QueuePool)
@classmethod
def load_provisioning(cls):
package = ".".join(cls.__module__.split(".")[0:-1])
try:
__import__(package + ".provision")
except ImportError:
pass
def initialize(self, connection):
try:
self.server_version_info = self._get_server_version_info(
connection
)
except NotImplementedError:
self.server_version_info = None
try:
self.default_schema_name = self._get_default_schema_name(
connection
)
except NotImplementedError:
self.default_schema_name = None
try:
self.default_isolation_level = self.get_default_isolation_level(
connection.connection
)
except NotImplementedError:
self.default_isolation_level = None
self.returns_unicode_strings = self._check_unicode_returns(connection)
if (
self.description_encoding is not None
and self._check_unicode_description(connection)
):
self._description_decoder = self.description_encoding = None
if not self._user_defined_max_identifier_length:
max_ident_length = self._check_max_identifier_length(connection)
if max_ident_length:
self.max_identifier_length = max_ident_length
if (
self.label_length
and self.label_length > self.max_identifier_length
):
raise exc.ArgumentError(
"Label length of %d is greater than this dialect's"
" maximum identifier length of %d"
% (self.label_length, self.max_identifier_length)
)
def on_connect(self):
# inherits the docstring from interfaces.Dialect.on_connect
return None
def _check_max_identifier_length(self, connection):
"""Perform a connection / server version specific check to determine
the max_identifier_length.
If the dialect's class level max_identifier_length should be used,
can return None.
.. versionadded:: 1.3.9
"""
return None
def get_default_isolation_level(self, dbapi_conn):
"""Given a DBAPI connection, return its isolation level, or
a default isolation level if one cannot be retrieved.
May be overridden by subclasses in order to provide a
"fallback" isolation level for databases that cannot reliably
retrieve the actual isolation level.
By default, calls the :meth:`_engine.Interfaces.get_isolation_level`
method, propagating any exceptions raised.
.. versionadded:: 1.3.22
"""
return self.get_isolation_level(dbapi_conn)
def _check_unicode_returns(self, connection, additional_tests=None):
if util.py2k and not self.supports_unicode_statements:
cast_to = util.binary_type
else:
cast_to = util.text_type
if self.positional:
parameters = self.execute_sequence_format()
else:
parameters = {}
def check_unicode(test):
statement = cast_to(
expression.select([test]).compile(dialect=self)
)
try:
cursor = connection.connection.cursor()
connection._cursor_execute(cursor, statement, parameters)
row = cursor.fetchone()
cursor.close()
except exc.DBAPIError as de:
# note that _cursor_execute() will have closed the cursor
# if an exception is thrown.
util.warn(
"Exception attempting to "
"detect unicode returns: %r" % de
)
return False
else:
return isinstance(row[0], util.text_type)
tests = [
# detect plain VARCHAR
expression.cast(
expression.literal_column("'test plain returns'"),
sqltypes.VARCHAR(60),
),
# detect if there's an NVARCHAR type with different behavior
# available
expression.cast(
expression.literal_column("'test unicode returns'"),
sqltypes.Unicode(60),
),
]
if additional_tests:
tests += additional_tests
results = {check_unicode(test) for test in tests}
if results.issuperset([True, False]):
return "conditional"
else:
return results == {True}
def _check_unicode_description(self, connection):
# all DBAPIs on Py2K return cursor.description as encoded,
# until pypy2.1beta2 with sqlite, so let's just check it -
# it's likely others will start doing this too in Py2k.
if util.py2k and not self.supports_unicode_statements:
cast_to = util.binary_type
else:
cast_to = util.text_type
cursor = connection.connection.cursor()
try:
cursor.execute(
cast_to(
expression.select(
[expression.literal_column("'x'").label("some_label")]
).compile(dialect=self)
)
)
return isinstance(cursor.description[0][0], util.text_type)
finally:
cursor.close()
def type_descriptor(self, typeobj):
"""Provide a database-specific :class:`.TypeEngine` object, given
the generic object which comes from the types module.
This method looks for a dictionary called
``colspecs`` as a class or instance-level variable,
and passes on to :func:`_types.adapt_type`.
"""
return sqltypes.adapt_type(typeobj, self.colspecs)
def reflecttable(
self,
connection,
table,
include_columns,
exclude_columns,
resolve_fks,
**opts
):
insp = reflection.Inspector.from_engine(connection)
return insp.reflecttable(
table, include_columns, exclude_columns, resolve_fks, **opts
)
def get_pk_constraint(self, conn, table_name, schema=None, **kw):
"""Compatibility method, adapts the result of get_primary_keys()
for those dialects which don't implement get_pk_constraint().
"""
return {
"constrained_columns": self.get_primary_keys(
conn, table_name, schema=schema, **kw
)
}
def validate_identifier(self, ident):
if len(ident) > self.max_identifier_length:
raise exc.IdentifierError(
"Identifier '%s' exceeds maximum length of %d characters"
% (ident, self.max_identifier_length)
)
def connect(self, *cargs, **cparams):
# inherits the docstring from interfaces.Dialect.connect
return self.dbapi.connect(*cargs, **cparams)
def create_connect_args(self, url):
# inherits the docstring from interfaces.Dialect.create_connect_args
opts = url.translate_connect_args()
opts.update(url.query)
return [[], opts]
def set_engine_execution_options(self, engine, opts):
if "isolation_level" in opts:
isolation_level = opts["isolation_level"]
@event.listens_for(engine, "engine_connect")
def set_isolation(connection, branch):
if not branch:
self._set_connection_isolation(connection, isolation_level)
if "schema_translate_map" in opts:
getter = schema._schema_getter(opts["schema_translate_map"])
engine.schema_for_object = getter
@event.listens_for(engine, "engine_connect")
def set_schema_translate_map(connection, branch):
connection.schema_for_object = getter
def set_connection_execution_options(self, connection, opts):
if "isolation_level" in opts:
self._set_connection_isolation(connection, opts["isolation_level"])
if "schema_translate_map" in opts:
getter = schema._schema_getter(opts["schema_translate_map"])
connection.schema_for_object = getter
def _set_connection_isolation(self, connection, level):
if connection.in_transaction():
util.warn(
"Connection is already established with a Transaction; "
"setting isolation_level may implicitly rollback or commit "
"the existing transaction, or have no effect until "
"next transaction"
)
self.set_isolation_level(connection.connection, level)
connection.connection._connection_record.finalize_callback.append(
self.reset_isolation_level
)
def do_begin(self, dbapi_connection):
pass
def do_rollback(self, dbapi_connection):
dbapi_connection.rollback()
def do_commit(self, dbapi_connection):
dbapi_connection.commit()
def do_close(self, dbapi_connection):
dbapi_connection.close()
@util.memoized_property
def _dialect_specific_select_one(self):
return str(expression.select([1]).compile(dialect=self))
def do_ping(self, dbapi_connection):
cursor = None
try:
cursor = dbapi_connection.cursor()
try:
cursor.execute(self._dialect_specific_select_one)
finally:
cursor.close()
except self.dbapi.Error as err:
if self.is_disconnect(err, dbapi_connection, cursor):
return False
else:
raise
else:
return True
def create_xid(self):
"""Create a random two-phase transaction ID.
This id will be passed to do_begin_twophase(), do_rollback_twophase(),
do_commit_twophase(). Its format is unspecified.
"""
return "_sa_%032x" % random.randint(0, 2 ** 128)
def do_savepoint(self, connection, name):
connection.execute(expression.SavepointClause(name))
def do_rollback_to_savepoint(self, connection, name):
connection.execute(expression.RollbackToSavepointClause(name))
def do_release_savepoint(self, connection, name):
connection.execute(expression.ReleaseSavepointClause(name))
def do_executemany(self, cursor, statement, parameters, context=None):
cursor.executemany(statement, parameters)
def do_execute(self, cursor, statement, parameters, context=None):
cursor.execute(statement, parameters)
def do_execute_no_params(self, cursor, statement, context=None):
cursor.execute(statement)
def is_disconnect(self, e, connection, cursor):
return False
def reset_isolation_level(self, dbapi_conn):
# default_isolation_level is read from the first connection
# after the initial set of 'isolation_level', if any, so is
# the configured default of this dialect.
self.set_isolation_level(dbapi_conn, self.default_isolation_level)
def normalize_name(self, name):
if name is None:
return None
if util.py2k:
if isinstance(name, str):
name = name.decode(self.encoding)
name_lower = name.lower()
name_upper = name.upper()
if name_upper == name_lower:
# name has no upper/lower conversion, e.g. non-european characters.
# return unchanged
return name
elif name_upper == name and not (
self.identifier_preparer._requires_quotes
)(name_lower):
# name is all uppercase and doesn't require quoting; normalize
# to all lower case
return name_lower
elif name_lower == name:
# name is all lower case, which if denormalized means we need to
# force quoting on it
return quoted_name(name, quote=True)
else:
# name is mixed case, means it will be quoted in SQL when used
# later, no normalizes
return name
def denormalize_name(self, name):
if name is None:
return None
name_lower = name.lower()
name_upper = name.upper()
if name_upper == name_lower:
# name has no upper/lower conversion, e.g. non-european characters.
# return unchanged
return name
elif name_lower == name and not (
self.identifier_preparer._requires_quotes
)(name_lower):
name = name_upper
if util.py2k:
if not self.supports_unicode_binds:
name = name.encode(self.encoding)
else:
name = unicode(name) # noqa
return name
class _RendersLiteral(object):
def literal_processor(self, dialect):
def process(value):
return "'%s'" % value
return process
class _StrDateTime(_RendersLiteral, sqltypes.DateTime):
pass
class _StrDate(_RendersLiteral, sqltypes.Date):
pass
class _StrTime(_RendersLiteral, sqltypes.Time):
pass
class StrCompileDialect(DefaultDialect):
statement_compiler = compiler.StrSQLCompiler
ddl_compiler = compiler.DDLCompiler
type_compiler = compiler.StrSQLTypeCompiler
preparer = compiler.IdentifierPreparer
supports_sequences = True
sequences_optional = True
preexecute_autoincrement_sequences = False
implicit_returning = False
supports_native_boolean = True
supports_simple_order_by_label = True
colspecs = {
sqltypes.DateTime: _StrDateTime,
sqltypes.Date: _StrDate,
sqltypes.Time: _StrTime,
}
class DefaultExecutionContext(interfaces.ExecutionContext):
isinsert = False
isupdate = False
isdelete = False
is_crud = False
is_text = False
isddl = False
executemany = False
compiled = None
statement = None
result_column_struct = None
returned_defaults = None
_is_implicit_returning = False
_is_explicit_returning = False
# a hook for SQLite's translation of
# result column names
_translate_colname = None
_expanded_parameters = util.immutabledict()
@classmethod
def _init_ddl(cls, dialect, connection, dbapi_connection, compiled_ddl):
"""Initialize execution context for a DDLElement construct."""
self = cls.__new__(cls)
self.root_connection = connection
self._dbapi_connection = dbapi_connection
self.dialect = connection.dialect
self.compiled = compiled = compiled_ddl
self.isddl = True
self.execution_options = compiled.execution_options
if connection._execution_options:
self.execution_options = dict(self.execution_options)
self.execution_options.update(connection._execution_options)
if not dialect.supports_unicode_statements:
self.unicode_statement = util.text_type(compiled)
self.statement = dialect._encoder(self.unicode_statement)[0]
else:
self.statement = self.unicode_statement = util.text_type(compiled)
self.cursor = self.create_cursor()
self.compiled_parameters = []
if dialect.positional:
self.parameters = [dialect.execute_sequence_format()]
else:
self.parameters = [{}]
return self
@classmethod
def _init_compiled(
cls, dialect, connection, dbapi_connection, compiled, parameters
):
"""Initialize execution context for a Compiled construct."""
self = cls.__new__(cls)
self.root_connection = connection
self._dbapi_connection = dbapi_connection
self.dialect = connection.dialect
self.compiled = compiled
# this should be caught in the engine before
# we get here
assert compiled.can_execute
self.execution_options = compiled.execution_options.union(
connection._execution_options
)
self.result_column_struct = (
compiled._result_columns,
compiled._ordered_columns,
compiled._textual_ordered_columns,
)
self.unicode_statement = util.text_type(compiled)
if not dialect.supports_unicode_statements:
self.statement = self.unicode_statement.encode(
self.dialect.encoding
)
else:
self.statement = self.unicode_statement
self.isinsert = compiled.isinsert
self.isupdate = compiled.isupdate
self.isdelete = compiled.isdelete
self.is_text = compiled.isplaintext
if not parameters:
self.compiled_parameters = [compiled.construct_params()]
else:
self.compiled_parameters = [
compiled.construct_params(m, _group_number=grp)
for grp, m in enumerate(parameters)
]
self.executemany = len(parameters) > 1
self.cursor = self.create_cursor()
if self.isinsert or self.isupdate or self.isdelete:
self.is_crud = True
self._is_explicit_returning = bool(compiled.statement._returning)
self._is_implicit_returning = bool(
compiled.returning and not compiled.statement._returning
)
if self.compiled.insert_prefetch or self.compiled.update_prefetch:
if self.executemany:
self._process_executemany_defaults()
else:
self._process_executesingle_defaults()
processors = compiled._bind_processors
if compiled.contains_expanding_parameters:
# copy processors for this case as they will be mutated
processors = dict(processors)
positiontup = self._expand_in_parameters(compiled, processors)
elif compiled.positional:
positiontup = self.compiled.positiontup
# Convert the dictionary of bind parameter values
# into a dict or list to be sent to the DBAPI's
# execute() or executemany() method.
parameters = []
if compiled.positional:
for compiled_params in self.compiled_parameters:
param = []
for key in positiontup:
if key in processors:
param.append(processors[key](compiled_params[key]))
else:
param.append(compiled_params[key])
parameters.append(dialect.execute_sequence_format(param))
else:
encode = not dialect.supports_unicode_statements
for compiled_params in self.compiled_parameters:
if encode:
param = dict(
(
dialect._encoder(key)[0],
processors[key](compiled_params[key])
if key in processors
else compiled_params[key],
)
for key in compiled_params
)
else:
param = dict(
(
key,
processors[key](compiled_params[key])
if key in processors
else compiled_params[key],
)
for key in compiled_params
)
parameters.append(param)
self.parameters = dialect.execute_sequence_format(parameters)
return self
def _expand_in_parameters(self, compiled, processors):
"""handle special 'expanding' parameters, IN tuples that are rendered
on a per-parameter basis for an otherwise fixed SQL statement string.
"""
if self.executemany:
raise exc.InvalidRequestError(
"'expanding' parameters can't be used with " "executemany()"
)
if self.compiled.positional and self.compiled._numeric_binds:
# I'm not familiar with any DBAPI that uses 'numeric'
raise NotImplementedError(
"'expanding' bind parameters not supported with "
"'numeric' paramstyle at this time."
)
self._expanded_parameters = {}
compiled_params = self.compiled_parameters[0]
if compiled.positional:
positiontup = []
else:
positiontup = None
replacement_expressions = {}
to_update_sets = {}
for name in (
self.compiled.positiontup
if compiled.positional
else self.compiled.binds
):
parameter = self.compiled.binds[name]
if parameter.expanding:
if name in replacement_expressions:
to_update = to_update_sets[name]
else:
# we are removing the parameter from compiled_params
# because it is a list value, which is not expected by
# TypeEngine objects that would otherwise be asked to
# process it. the single name is being replaced with
# individual numbered parameters for each value in the
# param.
values = compiled_params.pop(name)
if not values:
to_update = to_update_sets[name] = []
replacement_expressions[
name
] = self.compiled.visit_empty_set_expr(
parameter._expanding_in_types
if parameter._expanding_in_types
else [parameter.type]
)
elif isinstance(values[0], (tuple, list)):
to_update = to_update_sets[name] = [
("%s_%s_%s" % (name, i, j), value)
for i, tuple_element in enumerate(values, 1)
for j, value in enumerate(tuple_element, 1)
]
replacement_expressions[name] = (
"VALUES " if self.dialect.tuple_in_values else ""
) + ", ".join(
"(%s)"
% ", ".join(
self.compiled.bindtemplate
% {
"name": to_update[
i * len(tuple_element) + j
][0]
}
for j, value in enumerate(tuple_element)
)
for i, tuple_element in enumerate(values)
)
else:
to_update = to_update_sets[name] = [
("%s_%s" % (name, i), value)
for i, value in enumerate(values, 1)
]
replacement_expressions[name] = ", ".join(
self.compiled.bindtemplate % {"name": key}
for key, value in to_update
)
compiled_params.update(to_update)
processors.update(
(key, processors[name])
for key, value in to_update
if name in processors
)
if compiled.positional:
positiontup.extend(name for name, value in to_update)
self._expanded_parameters[name] = [
expand_key for expand_key, value in to_update
]
elif compiled.positional:
positiontup.append(name)
def process_expanding(m):
return replacement_expressions[m.group(1)]
self.statement = re.sub(
r"\[EXPANDING_(\S+)\]", process_expanding, self.statement
)
return positiontup
@classmethod
def _init_statement(
cls, dialect, connection, dbapi_connection, statement, parameters
):
"""Initialize execution context for a string SQL statement."""
self = cls.__new__(cls)
self.root_connection = connection
self._dbapi_connection = dbapi_connection
self.dialect = connection.dialect
self.is_text = True
# plain text statement
self.execution_options = connection._execution_options
if not parameters:
if self.dialect.positional:
self.parameters = [dialect.execute_sequence_format()]
else:
self.parameters = [{}]
elif isinstance(parameters[0], dialect.execute_sequence_format):
self.parameters = parameters
elif isinstance(parameters[0], dict):
if dialect.supports_unicode_statements:
self.parameters = parameters
else:
self.parameters = [
{dialect._encoder(k)[0]: d[k] for k in d}
for d in parameters
] or [{}]
else:
self.parameters = [
dialect.execute_sequence_format(p) for p in parameters
]
self.executemany = len(parameters) > 1
if not dialect.supports_unicode_statements and isinstance(
statement, util.text_type
):
self.unicode_statement = statement
self.statement = dialect._encoder(statement)[0]
else:
self.statement = self.unicode_statement = statement
self.cursor = self.create_cursor()
return self
@classmethod
def _init_default(cls, dialect, connection, dbapi_connection):
"""Initialize execution context for a ColumnDefault construct."""
self = cls.__new__(cls)
self.root_connection = connection
self._dbapi_connection = dbapi_connection
self.dialect = connection.dialect
self.execution_options = connection._execution_options
self.cursor = self.create_cursor()
return self
@util.memoized_property
def engine(self):
return self.root_connection.engine
@util.memoized_property
def postfetch_cols(self):
return self.compiled.postfetch
@util.memoized_property
def prefetch_cols(self):
if self.isinsert:
return self.compiled.insert_prefetch
elif self.isupdate:
return self.compiled.update_prefetch
else:
return ()
@util.memoized_property
def returning_cols(self):
self.compiled.returning
@util.memoized_property
def no_parameters(self):
return self.execution_options.get("no_parameters", False)
@util.memoized_property
def should_autocommit(self):
autocommit = self.execution_options.get(
"autocommit",
not self.compiled
and self.statement
and expression.PARSE_AUTOCOMMIT
or False,
)
if autocommit is expression.PARSE_AUTOCOMMIT:
return self.should_autocommit_text(self.unicode_statement)
else:
return autocommit
def _execute_scalar(self, stmt, type_):
"""Execute a string statement on the current cursor, returning a
scalar result.
Used to fire off sequences, default phrases, and "select lastrowid"
types of statements individually or in the context of a parent INSERT
or UPDATE statement.
"""
conn = self.root_connection
if (
isinstance(stmt, util.text_type)
and not self.dialect.supports_unicode_statements
):
stmt = self.dialect._encoder(stmt)[0]
if self.dialect.positional:
default_params = self.dialect.execute_sequence_format()
else:
default_params = {}
conn._cursor_execute(self.cursor, stmt, default_params, context=self)
r = self.cursor.fetchone()[0]
if type_ is not None:
# apply type post processors to the result
proc = type_._cached_result_processor(
self.dialect, self.cursor.description[0][1]
)
if proc:
return proc(r)
return r
@property
def connection(self):
return self.root_connection._branch()
def should_autocommit_text(self, statement):
return AUTOCOMMIT_REGEXP.match(statement)
def _use_server_side_cursor(self):
if not self.dialect.supports_server_side_cursors:
return False
if self.dialect.server_side_cursors:
use_server_side = self.execution_options.get(
"stream_results", True
) and (
(
self.compiled
and isinstance(
self.compiled.statement, expression.Selectable
)
or (
(
not self.compiled
or isinstance(
self.compiled.statement, expression.TextClause
)
)
and self.statement
and SERVER_SIDE_CURSOR_RE.match(self.statement)
)
)
)
else:
use_server_side = self.execution_options.get(
"stream_results", False
)
return use_server_side
def create_cursor(self):
if self._use_server_side_cursor():
self._is_server_side = True
return self.create_server_side_cursor()
else:
self._is_server_side = False
return self._dbapi_connection.cursor()
def create_server_side_cursor(self):
raise NotImplementedError()
def pre_exec(self):
pass
def post_exec(self):
pass
def get_result_processor(self, type_, colname, coltype):
"""Return a 'result processor' for a given type as present in
cursor.description.
This has a default implementation that dialects can override
for context-sensitive result type handling.
"""
return type_._cached_result_processor(self.dialect, coltype)
def get_lastrowid(self):
"""return self.cursor.lastrowid, or equivalent, after an INSERT.
This may involve calling special cursor functions,
issuing a new SELECT on the cursor (or a new one),
or returning a stored value that was
calculated within post_exec().
This function will only be called for dialects
which support "implicit" primary key generation,
keep preexecute_autoincrement_sequences set to False,
and when no explicit id value was bound to the
statement.
The function is called once, directly after
post_exec() and before the transaction is committed
or ResultProxy is generated. If the post_exec()
method assigns a value to `self._lastrowid`, the
value is used in place of calling get_lastrowid().
Note that this method is *not* equivalent to the
``lastrowid`` method on ``ResultProxy``, which is a
direct proxy to the DBAPI ``lastrowid`` accessor
in all cases.
"""
return self.cursor.lastrowid
def handle_dbapi_exception(self, e):
pass
def get_result_proxy(self):
if self._is_server_side:
return result.BufferedRowResultProxy(self)
else:
return result.ResultProxy(self)
@property
def rowcount(self):
return self.cursor.rowcount
def supports_sane_rowcount(self):
return self.dialect.supports_sane_rowcount
def supports_sane_multi_rowcount(self):
return self.dialect.supports_sane_multi_rowcount
def _setup_crud_result_proxy(self):
if self.isinsert and not self.executemany:
if (
not self._is_implicit_returning
and not self.compiled.inline
and self.dialect.postfetch_lastrowid
):
self._setup_ins_pk_from_lastrowid()
elif not self._is_implicit_returning:
self._setup_ins_pk_from_empty()
result = self.get_result_proxy()
if self.isinsert:
if self._is_implicit_returning:
row = result.fetchone()
self.returned_defaults = row
self._setup_ins_pk_from_implicit_returning(row)
result._soft_close()
result._metadata = None
elif not self._is_explicit_returning:
result._soft_close()
result._metadata = None
elif self.isupdate and self._is_implicit_returning:
row = result.fetchone()
self.returned_defaults = row
result._soft_close()
result._metadata = None
elif result._metadata is None:
# no results, get rowcount
# (which requires open cursor on some drivers
# such as kintersbasdb, mxodbc)
result.rowcount
result._soft_close()
return result
def _setup_ins_pk_from_lastrowid(self):
key_getter = self.compiled._key_getters_for_crud_column[2]
table = self.compiled.statement.table
compiled_params = self.compiled_parameters[0]
lastrowid = self.get_lastrowid()
if lastrowid is not None:
autoinc_col = table._autoincrement_column
if autoinc_col is not None:
# apply type post processors to the lastrowid
proc = autoinc_col.type._cached_result_processor(
self.dialect, None
)
if proc is not None:
lastrowid = proc(lastrowid)
self.inserted_primary_key = [
lastrowid
if c is autoinc_col
else compiled_params.get(key_getter(c), None)
for c in table.primary_key
]
else:
# don't have a usable lastrowid, so
# do the same as _setup_ins_pk_from_empty
self.inserted_primary_key = [
compiled_params.get(key_getter(c), None)
for c in table.primary_key
]
def _setup_ins_pk_from_empty(self):
key_getter = self.compiled._key_getters_for_crud_column[2]
table = self.compiled.statement.table
compiled_params = self.compiled_parameters[0]
self.inserted_primary_key = [
compiled_params.get(key_getter(c), None) for c in table.primary_key
]
def _setup_ins_pk_from_implicit_returning(self, row):
if row is None:
self.inserted_primary_key = None
return
key_getter = self.compiled._key_getters_for_crud_column[2]
table = self.compiled.statement.table
compiled_params = self.compiled_parameters[0]
self.inserted_primary_key = [
row[col] if value is None else value
for col, value in [
(col, compiled_params.get(key_getter(col), None))
for col in table.primary_key
]
]
def lastrow_has_defaults(self):
return (self.isinsert or self.isupdate) and bool(
self.compiled.postfetch
)
def set_input_sizes(
self, translate=None, include_types=None, exclude_types=None
):
"""Given a cursor and ClauseParameters, call the appropriate
style of ``setinputsizes()`` on the cursor, using DB-API types
from the bind parameter's ``TypeEngine`` objects.
This method only called by those dialects which require it,
currently cx_oracle.
"""
if not hasattr(self.compiled, "bind_names"):
return
inputsizes = {}
for bindparam in self.compiled.bind_names:
dialect_impl = bindparam.type._unwrapped_dialect_impl(self.dialect)
dialect_impl_cls = type(dialect_impl)
dbtype = dialect_impl.get_dbapi_type(self.dialect.dbapi)
if (
dbtype is not None
and (
not exclude_types
or dbtype not in exclude_types
and dialect_impl_cls not in exclude_types
)
and (
not include_types
or dbtype in include_types
or dialect_impl_cls in include_types
)
):
inputsizes[bindparam] = dbtype
else:
inputsizes[bindparam] = None
if self.dialect._has_events:
self.dialect.dispatch.do_setinputsizes(
inputsizes, self.cursor, self.statement, self.parameters, self
)
if self.dialect.positional:
positional_inputsizes = []
for key in self.compiled.positiontup:
bindparam = self.compiled.binds[key]
dbtype = inputsizes.get(bindparam, None)
if dbtype is not None:
if key in self._expanded_parameters:
positional_inputsizes.extend(
[dbtype] * len(self._expanded_parameters[key])
)
else:
positional_inputsizes.append(dbtype)
try:
self.cursor.setinputsizes(*positional_inputsizes)
except BaseException as e:
self.root_connection._handle_dbapi_exception(
e, None, None, None, self
)
else:
keyword_inputsizes = {}
for bindparam, key in self.compiled.bind_names.items():
dbtype = inputsizes.get(bindparam, None)
if dbtype is not None:
if translate:
# TODO: this part won't work w/ the
# expanded_parameters feature, e.g. for cx_oracle
# quoted bound names
key = translate.get(key, key)
if not self.dialect.supports_unicode_binds:
key = self.dialect._encoder(key)[0]
if key in self._expanded_parameters:
keyword_inputsizes.update(
(expand_key, dbtype)
for expand_key in self._expanded_parameters[key]
)
else:
keyword_inputsizes[key] = dbtype
try:
self.cursor.setinputsizes(**keyword_inputsizes)
except BaseException as e:
self.root_connection._handle_dbapi_exception(
e, None, None, None, self
)
def _exec_default(self, column, default, type_):
if default.is_sequence:
return self.fire_sequence(default, type_)
elif default.is_callable:
self.current_column = column
return default.arg(self)
elif default.is_clause_element:
# TODO: expensive branching here should be
# pulled into _exec_scalar()
conn = self.connection
if not default._arg_is_typed:
default_arg = expression.type_coerce(default.arg, type_)
else:
default_arg = default.arg
c = expression.select([default_arg]).compile(bind=conn)
return conn._execute_compiled(c, (), {}).scalar()
else:
return default.arg
current_parameters = None
"""A dictionary of parameters applied to the current row.
This attribute is only available in the context of a user-defined default
generation function, e.g. as described at :ref:`context_default_functions`.
It consists of a dictionary which includes entries for each column/value
pair that is to be part of the INSERT or UPDATE statement. The keys of the
dictionary will be the key value of each :class:`_schema.Column`,
which is usually
synonymous with the name.
Note that the :attr:`.DefaultExecutionContext.current_parameters` attribute
does not accommodate for the "multi-values" feature of the
:meth:`_expression.Insert.values` method. The
:meth:`.DefaultExecutionContext.get_current_parameters` method should be
preferred.
.. seealso::
:meth:`.DefaultExecutionContext.get_current_parameters`
:ref:`context_default_functions`
"""
def get_current_parameters(self, isolate_multiinsert_groups=True):
"""Return a dictionary of parameters applied to the current row.
This method can only be used in the context of a user-defined default
generation function, e.g. as described at
:ref:`context_default_functions`. When invoked, a dictionary is
returned which includes entries for each column/value pair that is part
of the INSERT or UPDATE statement. The keys of the dictionary will be
the key value of each :class:`_schema.Column`,
which is usually synonymous
with the name.
:param isolate_multiinsert_groups=True: indicates that multi-valued
INSERT constructs created using :meth:`_expression.Insert.values`
should be
handled by returning only the subset of parameters that are local
to the current column default invocation. When ``False``, the
raw parameters of the statement are returned including the
naming convention used in the case of multi-valued INSERT.
.. versionadded:: 1.2 added
:meth:`.DefaultExecutionContext.get_current_parameters`
which provides more functionality over the existing
:attr:`.DefaultExecutionContext.current_parameters`
attribute.
.. seealso::
:attr:`.DefaultExecutionContext.current_parameters`
:ref:`context_default_functions`
"""
try:
parameters = self.current_parameters
column = self.current_column
except AttributeError:
raise exc.InvalidRequestError(
"get_current_parameters() can only be invoked in the "
"context of a Python side column default function"
)
if (
isolate_multiinsert_groups
and self.isinsert
and self.compiled.statement._has_multi_parameters
):
if column._is_multiparam_column:
index = column.index + 1
d = {column.original.key: parameters[column.key]}
else:
d = {column.key: parameters[column.key]}
index = 0
keys = self.compiled.statement.parameters[0].keys()
d.update(
(key, parameters["%s_m%d" % (key, index)]) for key in keys
)
return d
else:
return parameters
def get_insert_default(self, column):
if column.default is None:
return None
else:
return self._exec_default(column, column.default, column.type)
def get_update_default(self, column):
if column.onupdate is None:
return None
else:
return self._exec_default(column, column.onupdate, column.type)
def _process_executemany_defaults(self):
key_getter = self.compiled._key_getters_for_crud_column[2]
scalar_defaults = {}
insert_prefetch = self.compiled.insert_prefetch
update_prefetch = self.compiled.update_prefetch
# pre-determine scalar Python-side defaults
# to avoid many calls of get_insert_default()/
# get_update_default()
for c in insert_prefetch:
if c.default and c.default.is_scalar:
scalar_defaults[c] = c.default.arg
for c in update_prefetch:
if c.onupdate and c.onupdate.is_scalar:
scalar_defaults[c] = c.onupdate.arg
for param in self.compiled_parameters:
self.current_parameters = param
for c in insert_prefetch:
if c in scalar_defaults:
val = scalar_defaults[c]
else:
val = self.get_insert_default(c)
if val is not None:
param[key_getter(c)] = val
for c in update_prefetch:
if c in scalar_defaults:
val = scalar_defaults[c]
else:
val = self.get_update_default(c)
if val is not None:
param[key_getter(c)] = val
del self.current_parameters
def _process_executesingle_defaults(self):
key_getter = self.compiled._key_getters_for_crud_column[2]
self.current_parameters = (
compiled_parameters
) = self.compiled_parameters[0]
for c in self.compiled.insert_prefetch:
if c.default and not c.default.is_sequence and c.default.is_scalar:
val = c.default.arg
else:
val = self.get_insert_default(c)
if val is not None:
compiled_parameters[key_getter(c)] = val
for c in self.compiled.update_prefetch:
val = self.get_update_default(c)
if val is not None:
compiled_parameters[key_getter(c)] = val
del self.current_parameters
DefaultDialect.execution_ctx_cls = DefaultExecutionContext
| [
"[email protected]"
] | |
21bf7c487c1b9c93cf82fd2d75a3211f366adae1 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02627/s593488255.py | 20f1737d2fcd33b169b6e476a79494c243027b86 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 63 | py | x = input()
if x.islower():
print("a")
else:
print("A") | [
"[email protected]"
] | |
0eef2b6a52956de948410e4e9c6e033167b702c7 | 62e58c051128baef9452e7e0eb0b5a83367add26 | /x12/5040/264005040.py | 513841a97cbf65b75f2325a806e4038bc2fd7268 | [] | no_license | dougvanhorn/bots-grammars | 2eb6c0a6b5231c14a6faf194b932aa614809076c | 09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d | refs/heads/master | 2021-05-16T12:55:58.022904 | 2019-05-17T15:22:23 | 2019-05-17T15:22:23 | 105,274,633 | 0 | 0 | null | 2017-09-29T13:21:21 | 2017-09-29T13:21:21 | null | UTF-8 | Python | false | false | 2,104 | py | from bots.botsconfig import *
from records005040 import recorddefs
syntax = {
'version': '00504',
'functionalgroup': 'MG',
}
structure = [
{ID: 'ST', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BGN', MIN: 1, MAX: 1},
{ID: 'MIS', MIN: 0, MAX: 1},
{ID: 'N1', MIN: 1, MAX: 2, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 1},
{ID: 'N3', MIN: 0, MAX: 1},
{ID: 'N4', MIN: 0, MAX: 2},
{ID: 'PER', MIN: 0, MAX: 2},
]},
{ID: 'LX', MIN: 1, MAX: 99999, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 2},
{ID: 'N1', MIN: 0, MAX: 1},
{ID: 'N2', MIN: 0, MAX: 1},
{ID: 'N3', MIN: 0, MAX: 1},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'REF', MIN: 0, MAX: 2},
{ID: 'PER', MIN: 0, MAX: 2},
{ID: 'QTY', MIN: 0, MAX: 2},
{ID: 'AMT', MIN: 0, MAX: 2},
{ID: 'DTP', MIN: 1, MAX: 99999, LEVEL: [
{ID: 'REF', MIN: 1, MAX: 10},
{ID: 'N1', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 1},
{ID: 'N3', MIN: 0, MAX: 1},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'PER', MIN: 0, MAX: 2},
{ID: 'REF', MIN: 0, MAX: 4},
]},
{ID: 'LS', MIN: 0, MAX: 1, LEVEL: [
{ID: 'REC', MIN: 1, MAX: 1, LEVEL: [
{ID: 'N3', MIN: 0, MAX: 1},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'DFI', MIN: 0, MAX: 99999},
{ID: 'QTY', MIN: 0, MAX: 1},
{ID: 'AMT', MIN: 0, MAX: 10},
{ID: 'INT', MIN: 0, MAX: 1},
{ID: 'SOM', MIN: 0, MAX: 10},
{ID: 'DTP', MIN: 0, MAX: 14},
{ID: 'MRC', MIN: 0, MAX: 2},
{ID: 'MSG', MIN: 0, MAX: 11},
{ID: 'YNQ', MIN: 0, MAX: 2},
{ID: 'PER', MIN: 0, MAX: 99999},
]},
{ID: 'LE', MIN: 1, MAX: 1},
]},
]},
]},
{ID: 'QTY', MIN: 0, MAX: 2},
{ID: 'AMT', MIN: 0, MAX: 2},
{ID: 'SE', MIN: 1, MAX: 1},
]}
]
| [
"[email protected]"
] | |
c492c46afabdb6f86014168232c25d145e6184d4 | db7a459e31c0a186dca64a829f93090fa58feab0 | /model_pytorch/pretrained/modeling.py | b96a81d856c28817aaaf7a1089a83c618befe74c | [] | no_license | ZouJoshua/dl_project | a3e7c9e035c37af698d4ef388fbb8c46174d5de1 | ee7ecedd55ce544b127be8009e026ac2cdc3f71b | refs/heads/master | 2022-12-04T04:21:19.937698 | 2022-01-27T07:33:37 | 2022-01-27T07:33:37 | 175,645,793 | 9 | 3 | null | 2022-11-21T21:30:23 | 2019-03-14T15:07:27 | Python | UTF-8 | Python | false | false | 60,059 | py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""
from __future__ import absolute_import, division, print_function, unicode_literals
import copy
import json
import logging
import math
import os
import shutil
import tarfile
import tempfile
import sys
from io import open
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from .file_utils import cached_path, WEIGHTS_NAME, CONFIG_NAME
logger = logging.getLogger(__name__)
PRETRAINED_MODEL_ARCHIVE_MAP = {
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased.tar.gz",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased.tar.gz",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased.tar.gz",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased.tar.gz",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased.tar.gz",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased.tar.gz",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese.tar.gz",
}
BERT_CONFIG_NAME = 'bert_config.json'
TF_WEIGHTS_NAME = 'model.ckpt'
def load_tf_weights_in_bert(model, tf_checkpoint_path):
""" Load tf checkpoints in a pytorch model
"""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
print("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
tf_path = os.path.abspath(tf_checkpoint_path)
print("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
print("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split('/')
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(n in ["adam_v", "adam_m", "global_step"] for n in name):
print("Skipping {}".format("/".join(name)))
continue
pointer = model
for m_name in name:
if re.fullmatch(r'[A-Za-z]+_\d+', m_name):
l = re.split(r'_(\d+)', m_name)
else:
l = [m_name]
if l[0] == 'kernel' or l[0] == 'gamma':
pointer = getattr(pointer, 'weight')
elif l[0] == 'output_bias' or l[0] == 'beta':
pointer = getattr(pointer, 'bias')
elif l[0] == 'output_weights':
pointer = getattr(pointer, 'weight')
elif l[0] == 'squad':
pointer = getattr(pointer, 'classifier')
else:
try:
pointer = getattr(pointer, l[0])
except AttributeError:
print("Skipping {}".format("/".join(name)))
continue
if len(l) >= 2:
num = int(l[1])
pointer = pointer[num]
if m_name[-11:] == '_embeddings':
pointer = getattr(pointer, 'weight')
elif m_name == 'kernel':
array = np.transpose(array)
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
print("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def swish(x):
return x * torch.sigmoid(x)
ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}
class BertConfig(object):
"""Configuration class to store the configuration of a `BertModel`.
"""
def __init__(self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02):
"""Constructs BertConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
and isinstance(vocab_size_or_config_json_file, unicode)):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
else:
raise ValueError("First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)")
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path):
""" Save this instance to a json file."""
with open(json_file_path, "w", encoding='utf-8') as writer:
writer.write(self.to_json_string())
try:
from apex.normalization.fused_layer_norm import FusedLayerNorm as BertLayerNorm
except ImportError:
logger.info("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex .")
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(BertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None):
seq_length = input_ids.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, attention_mask):
self_output = self.self(input_tensor, attention_mask)
attention_output = self.output(self_output, input_tensor)
return attention_output
class BertIntermediate(nn.Module):
def __init__(self, config):
super(BertIntermediate, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super(BertOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(nn.Module):
def __init__(self, config):
super(BertLayer, self).__init__()
self.attention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask):
attention_output = self.attention(hidden_states, attention_mask)
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class BertEncoder(nn.Module):
def __init__(self, config):
super(BertEncoder, self).__init__()
layer = BertLayer(config)
self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)])
def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True):
all_encoder_layers = []
for layer_module in self.layer:
hidden_states = layer_module(hidden_states, attention_mask)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
return all_encoder_layers
class BertPooler(nn.Module):
def __init__(self, config):
super(BertPooler, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super(BertPredictionHeadTransform, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertLMPredictionHead, self).__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(bert_model_embedding_weights.size(1),
bert_model_embedding_weights.size(0),
bias=False)
self.decoder.weight = bert_model_embedding_weights
self.bias = nn.Parameter(torch.zeros(bert_model_embedding_weights.size(0)))
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states) + self.bias
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertOnlyMLMHead, self).__init__()
self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class BertOnlyNSPHead(nn.Module):
def __init__(self, config):
super(BertOnlyNSPHead, self).__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
class BertPreTrainingHeads(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertPreTrainingHeads, self).__init__()
self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class BertPreTrainedModel(nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
def __init__(self, config, *inputs, **kwargs):
super(BertPreTrainedModel, self).__init__()
if not isinstance(config, BertConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `BertConfig`. "
"To create a model from a Google pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
))
self.config = config
def init_bert_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs):
"""
Instantiate a BertPreTrainedModel from a pre-trained model file or a pytorch state dict.
Download and cache the pre-trained model file if needed.
Params:
pretrained_model_name_or_path: either:
- a str with the name of a pre-trained model to load selected in the list of:
. `bert-base-uncased`
. `bert-large-uncased`
. `bert-base-cased`
. `bert-large-cased`
. `bert-base-multilingual-uncased`
. `bert-base-multilingual-cased`
. `bert-base-chinese`
- a path or url to a pretrained model archive containing:
. `bert_config.json` a configuration file for the model
. `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance
- a path or url to a pretrained model archive containing:
. `bert_config.json` a configuration file for the model
. `model.chkpt` a TensorFlow checkpoint
from_tf: should we load the weights from a locally saved TensorFlow checkpoint
cache_dir: an optional path to a folder in which the pre-trained models will be cached.
state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of Google pre-trained models
*inputs, **kwargs: additional input for the specific Bert class
(ex: num_labels for BertForSequenceClassification)
"""
state_dict = kwargs.get('state_dict', None)
kwargs.pop('state_dict', None)
cache_dir = kwargs.get('cache_dir', None)
kwargs.pop('cache_dir', None)
from_tf = kwargs.get('from_tf', False)
kwargs.pop('from_tf', None)
if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP:
archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path]
else:
archive_file = pretrained_model_name_or_path
# redirect to the cache, if necessary
try:
resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)
except EnvironmentError:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find any file "
"associated to this path or url.".format(
pretrained_model_name_or_path,
', '.join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()),
archive_file))
return None
if resolved_archive_file == archive_file:
logger.info("loading archive file {}".format(archive_file))
else:
logger.info("loading archive file {} from cache at {}".format(
archive_file, resolved_archive_file))
tempdir = None
if os.path.isdir(resolved_archive_file) or from_tf:
serialization_dir = resolved_archive_file
else:
# Extract archive to temp dir
tempdir = tempfile.mkdtemp()
logger.info("extracting archive file {} to temp dir {}".format(
resolved_archive_file, tempdir))
with tarfile.open(resolved_archive_file, 'r:gz') as archive:
archive.extractall(tempdir)
serialization_dir = tempdir
# Load config
config_file = os.path.join(serialization_dir, CONFIG_NAME)
if not os.path.exists(config_file):
# Backward compatibility with old naming format
config_file = os.path.join(serialization_dir, BERT_CONFIG_NAME)
config = BertConfig.from_json_file(config_file)
logger.info("Model config {}".format(config))
# Instantiate model.
model = cls(config, *inputs, **kwargs)
if state_dict is None and not from_tf:
weights_path = os.path.join(serialization_dir, WEIGHTS_NAME)
state_dict = torch.load(weights_path, map_location='cpu')
if tempdir:
# Clean up temp dir
shutil.rmtree(tempdir)
if from_tf:
# Directly load from a TensorFlow checkpoint
weights_path = os.path.join(serialization_dir, TF_WEIGHTS_NAME)
return load_tf_weights_in_bert(model, weights_path)
# Load from a PyTorch state_dict
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if 'gamma' in key:
new_key = key.replace('gamma', 'weight')
if 'beta' in key:
new_key = key.replace('beta', 'bias')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
start_prefix = ''
if not hasattr(model, 'bert') and any(s.startswith('bert.') for s in state_dict.keys()):
start_prefix = 'bert.'
load(model, prefix=start_prefix)
if len(missing_keys) > 0:
logger.info("Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
logger.info("Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys))
if len(error_msgs) > 0:
raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(
model.__class__.__name__, "\n\t".join(error_msgs)))
return model
class BertModel(BertPreTrainedModel):
"""BERT model ("Bidirectional Embedding Representations from a Transformer").
Params:
config: a BertConfig class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`output_all_encoded_layers`: boolean which controls the content of the `encoded_layers` output as described below. Default: `True`.
Outputs: Tuple of (encoded_layers, pooled_output)
`encoded_layers`: controled by `output_all_encoded_layers` argument:
- `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states at the end
of each attention block (i.e. 12 full sequences for BERT-base, 24 for BERT-large), each
encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, hidden_size],
- `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding
to the last attention block of shape [batch_size, sequence_length, hidden_size],
`pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size] which is the output of a
classifier pretrained on top of the hidden state associated to the first character of the
input (`CLS`) to train on the Next-Sentence task (see BERT's paper).
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = modeling.BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = modeling.BertModel(config=config)
all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertModel, self).__init__(config)
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, output_all_encoded_layers=True):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
embedding_output = self.embeddings(input_ids, token_type_ids)
encoded_layers = self.encoder(embedding_output,
extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers)
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return encoded_layers, pooled_output
class BertForPreTraining(BertPreTrainedModel):
"""BERT model with pre-training heads.
This module comprises the BERT model followed by the two pre-training heads:
- the masked language modeling head, and
- the next sentence classification head.
Params:
config: a BertConfig class instance with the configuration to build a new model.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`masked_lm_labels`: optional masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
is only computed for the labels set in [0, ..., vocab_size]
`next_sentence_label`: optional next sentence classification loss: torch.LongTensor of shape [batch_size]
with indices selected in [0, 1].
0 => next sentence is the continuation, 1 => next sentence is a random sentence.
Outputs:
if `masked_lm_labels` and `next_sentence_label` are not `None`:
Outputs the total_loss which is the sum of the masked language modeling loss and the next
sentence classification loss.
if `masked_lm_labels` or `next_sentence_label` is `None`:
Outputs a tuple comprising
- the masked language modeling logits of shape [batch_size, sequence_length, vocab_size], and
- the next sentence classification logits of shape [batch_size, 2].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForPreTraining(config)
masked_lm_logits_scores, seq_relationship_logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertForPreTraining, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertPreTrainingHeads(config, self.bert.embeddings.word_embeddings.weight)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None, next_sentence_label=None):
sequence_output, pooled_output = self.bert(input_ids, token_type_ids, attention_mask,
output_all_encoded_layers=False)
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
if masked_lm_labels is not None and next_sentence_label is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
total_loss = masked_lm_loss + next_sentence_loss
return total_loss
else:
return prediction_scores, seq_relationship_score
class BertForMaskedLM(BertPreTrainedModel):
"""BERT model with the masked language modeling head.
This module comprises the BERT model followed by the masked language modeling head.
Params:
config: a BertConfig class instance with the configuration to build a new model.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`masked_lm_labels`: masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
is only computed for the labels set in [0, ..., vocab_size]
Outputs:
if `masked_lm_labels` is not `None`:
Outputs the masked language modeling loss.
if `masked_lm_labels` is `None`:
Outputs the masked language modeling logits of shape [batch_size, sequence_length, vocab_size].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForMaskedLM(config)
masked_lm_logits_scores = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertForMaskedLM, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertOnlyMLMHead(config, self.bert.embeddings.word_embeddings.weight)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None):
sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask,
output_all_encoded_layers=False)
prediction_scores = self.cls(sequence_output)
if masked_lm_labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
return masked_lm_loss
else:
return prediction_scores
class BertForNextSentencePrediction(BertPreTrainedModel):
"""BERT model with next sentence prediction head.
This module comprises the BERT model followed by the next sentence classification head.
Params:
config: a BertConfig class instance with the configuration to build a new model.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`next_sentence_label`: next sentence classification loss: torch.LongTensor of shape [batch_size]
with indices selected in [0, 1].
0 => next sentence is the continuation, 1 => next sentence is a random sentence.
Outputs:
if `next_sentence_label` is not `None`:
Outputs the total_loss which is the sum of the masked language modeling loss and the next
sentence classification loss.
if `next_sentence_label` is `None`:
Outputs the next sentence classification logits of shape [batch_size, 2].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForNextSentencePrediction(config)
seq_relationship_logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertForNextSentencePrediction, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertOnlyNSPHead(config)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, next_sentence_label=None):
_, pooled_output = self.bert(input_ids, token_type_ids, attention_mask,
output_all_encoded_layers=False)
seq_relationship_score = self.cls( pooled_output)
if next_sentence_label is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
return next_sentence_loss
else:
return seq_relationship_score
class BertForSequenceClassification(BertPreTrainedModel):
"""BERT model for classification.
This module is composed of the BERT model with a linear layer on top of
the pooled output.
Params:
`config`: a BertConfig class instance with the configuration to build a new model.
`num_labels`: the number of classes for the classifier. Default = 2.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary. Items in the batch should begin with the special "CLS" token. (see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`labels`: labels for the classification output: torch.LongTensor of shape [batch_size]
with indices selected in [0, ..., num_labels].
Outputs:
if `labels` is not `None`:
Outputs the CrossEntropy classification loss of the output with the labels.
if `labels` is `None`:
Outputs the classification logits of shape [batch_size, num_labels].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
num_labels = 2
model = BertForSequenceClassification(config, num_labels)
logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, num_labels):
super(BertForSequenceClassification, self).__init__(config)
self.num_labels = num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, num_labels)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):
_, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
return loss
else:
return logits
class BertForMultipleChoice(BertPreTrainedModel):
"""BERT model for multiple choice tasks.
This module is composed of the BERT model with a linear layer on top of
the pooled output.
Params:
`config`: a BertConfig class instance with the configuration to build a new model.
`num_choices`: the number of classes for the classifier. Default = 2.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, num_choices, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length]
with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A`
and type 1 corresponds to a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`labels`: labels for the classification output: torch.LongTensor of shape [batch_size]
with indices selected in [0, ..., num_choices].
Outputs:
if `labels` is not `None`:
Outputs the CrossEntropy classification loss of the output with the labels.
if `labels` is `None`:
Outputs the classification logits of shape [batch_size, num_labels].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[[31, 51, 99], [15, 5, 0]], [[12, 16, 42], [14, 28, 57]]])
input_mask = torch.LongTensor([[[1, 1, 1], [1, 1, 0]],[[1,1,0], [1, 0, 0]]])
token_type_ids = torch.LongTensor([[[0, 0, 1], [0, 1, 0]],[[0, 1, 1], [0, 0, 1]]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
num_choices = 2
model = BertForMultipleChoice(config, num_choices)
logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, num_choices):
super(BertForMultipleChoice, self).__init__(config)
self.num_choices = num_choices
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):
flat_input_ids = input_ids.view(-1, input_ids.size(-1))
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
_, pooled_output = self.bert(flat_input_ids, flat_token_type_ids, flat_attention_mask, output_all_encoded_layers=False)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, self.num_choices)
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
return loss
else:
return reshaped_logits
class BertForTokenClassification(BertPreTrainedModel):
"""BERT model for token-level classification.
This module is composed of the BERT model with a linear layer on top of
the full hidden state of the last layer.
Params:
`config`: a BertConfig class instance with the configuration to build a new model.
`num_labels`: the number of classes for the classifier. Default = 2.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`labels`: labels for the classification output: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [0, ..., num_labels].
Outputs:
if `labels` is not `None`:
Outputs the CrossEntropy classification loss of the output with the labels.
if `labels` is `None`:
Outputs the classification logits of shape [batch_size, sequence_length, num_labels].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
num_labels = 2
model = BertForTokenClassification(config, num_labels)
logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, num_labels):
super(BertForTokenClassification, self).__init__(config)
self.num_labels = num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, num_labels)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):
sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)[active_loss]
active_labels = labels.view(-1)[active_loss]
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
return loss
else:
return logits
class BertForQuestionAnswering(BertPreTrainedModel):
"""BERT model for Question Answering (span extraction).
This module is composed of the BERT model with a linear layer on top of
the sequence output that computes start_logits and end_logits
Params:
`config`: a BertConfig class instance with the configuration to build a new model.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`start_positions`: position of the first token for the labeled span: torch.LongTensor of shape [batch_size].
Positions are clamped to the length of the sequence and position outside of the sequence are not taken
into account for computing the loss.
`end_positions`: position of the last token for the labeled span: torch.LongTensor of shape [batch_size].
Positions are clamped to the length of the sequence and position outside of the sequence are not taken
into account for computing the loss.
Outputs:
if `start_positions` and `end_positions` are not `None`:
Outputs the total_loss which is the sum of the CrossEntropy loss for the start and end token positions.
if `start_positions` or `end_positions` is `None`:
Outputs a tuple of start_logits, end_logits which are the logits respectively for the start and end
position tokens of shape [batch_size, sequence_length].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForQuestionAnswering(config)
start_logits, end_logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertForQuestionAnswering, self).__init__(config)
self.bert = BertModel(config)
# TODO check with Google if it's normal there is no dropout on the token classifier of SQuAD in the TF version
# self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.qa_outputs = nn.Linear(config.hidden_size, 2)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, start_positions=None, end_positions=None):
sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
return total_loss
else:
return start_logits, end_logits | [
"[email protected]"
] | |
be482746b575856b1c3066b076cb0366368f336f | 3abcde3ca444d7612e24a0faf9b89af61d9bad6d | /backend/helpers/basics.py | d2b90f0117484a68d11355657f30f6ecfa701b56 | [] | no_license | cbib/COBRA | fa43d5600beaf36d3dcab98bc7b8faa940a02aea | 54f43d3d2867b4f228dccc6630416808e258be77 | refs/heads/master | 2022-09-25T00:20:57.688447 | 2021-02-16T21:53:28 | 2021-02-16T21:53:28 | 29,740,646 | 2 | 3 | null | 2022-09-01T22:15:54 | 2015-01-23T16:10:43 | HTML | UTF-8 | Python | false | false | 660 | py | # -*- coding: utf-8 -*-
import os
import socket
from configobj import ConfigObj
from helpers.path import config_dir
def get_hostname():
"""
Get host name
:return: host name in lower case
"""
return socket.gethostname().lower()
def load_config(filepath=None):
"""
Load config object from a file
:param filepath: path of config file
:return: a config object
"""
if not filepath:
filename = '%s.ini' % get_hostname()
filepath = os.path.join(config_dir, filename)
if not os.path.exists(filepath):
filepath = os.path.join(config_dir, 'default.ini')
return ConfigObj(filepath)
| [
"[email protected]"
] | |
985bcdf1b5a6191154114626de32dee9d23f0777 | 18c99e7d06cb18570a7a2177066e5da2372f895c | /resources/Rougier Tutorial/scripts/contour_ex.py | 7b23fce2fe125487b170f498a7ef97a154bbb519 | [
"MIT",
"CC-BY-SA-4.0"
] | permissive | nstarman/2019-10-22-dotAstronomy-Plotting-Workshop | 00a7d9cc17a32effa0bb6cf758c02efa25d33bd8 | 31e1a10b3d0f051a2cd197ce390bcf96753f153c | refs/heads/master | 2021-07-13T12:14:28.031574 | 2019-10-22T16:00:54 | 2019-10-22T16:00:54 | 214,871,831 | 7 | 0 | MIT | 2020-10-10T07:10:38 | 2019-10-13T18:27:13 | Jupyter Notebook | UTF-8 | Python | false | false | 772 | py | # -----------------------------------------------------------------------------
# Copyright (c) 2015, Nicolas P. Rougier. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
import numpy as np
import matplotlib.pyplot as plt
def f(x,y):
return (1-x/2+x**5+y**3)*np.exp(-x**2-y**2)
n = 256
x = np.linspace(-3,3,n)
y = np.linspace(-3,3,n)
X,Y = np.meshgrid(x,y)
plt.axes([0.025,0.025,0.95,0.95])
plt.contourf(X, Y, f(X,Y), 8, alpha=.75, cmap=plt.cm.hot)
C = plt.contour(X, Y, f(X,Y), 8, colors='black', linewidth=.5)
plt.clabel(C, inline=1, fontsize=10)
plt.xticks([]), plt.yticks([])
# savefig('../figures/contour_ex.png',dpi=48)
plt.show()
| [
"[email protected]"
] | |
07550d5e6fdcbc52b17afb6e73ec3d7d63f2f34f | bef2f86cfbf8dd7915c5ec72a6b38e26b5238641 | /application/apps.py | b3e76b61bebbdcea9af4d962559306f250536674 | [] | no_license | rayhancse08/IMS | 19f66ba20b3101ea4ced7d88cd7bd5c8c22b14f6 | 553050a1b6f621a43c1b141459fc1c89c39cfd4a | refs/heads/master | 2023-04-29T23:37:41.202213 | 2019-09-22T10:21:38 | 2019-09-22T10:21:38 | 210,084,538 | 0 | 0 | null | 2023-04-21T20:37:48 | 2019-09-22T03:23:48 | Python | UTF-8 | Python | false | false | 192 | py | from django.apps import AppConfig
class ApiConfig(AppConfig):
name = 'application'
def ready(self):
# noinspection PyUnresolvedReferences
import application.signals
| [
"[email protected]"
] | |
c0145b7cab1a1625f2d70ee33750828557e05f2d | 4a6e49b33d07c83da9ec56621c27c37a6a28a8ce | /configs/hyper_c32/upernet_swin_small_patch4_window7_4k_hyper_c32.py | b4b092bc4bac1b426b1f12be9915ec6ece9aba54 | [
"Apache-2.0",
"MIT"
] | permissive | DKJJ/Swin-Transformer-Semantic-Segmentation | 213e7516d2abc34b9ecca9dc6037b0ab5499397f | c8707951ddabdc0189451bcbd25c145f1f6cc041 | refs/heads/main | 2023-04-21T12:13:00.624902 | 2021-05-06T12:15:13 | 2021-05-06T12:15:13 | 365,226,907 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,497 | py | _base_ = [
'../_base_/models/upernet_swin.py', '../_base_/datasets/hyper_c32.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_4k.py'
]
norm_cfg = dict(type='BN', requires_grad=True)
model = dict(
backbone=dict(
embed_dim=96,
depths=[2, 2, 18, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
ape=False,
drop_path_rate=0.3,
patch_norm=True,
use_checkpoint=False,
in_chans=32
),
decode_head=dict(
in_channels=[96, 192, 384, 768],
num_classes=2,
norm_cfg=norm_cfg
),
auxiliary_head=dict(
in_channels=384,
num_classes=2,
norm_cfg=norm_cfg
))
# AdamW optimizer, no weight decay for position embedding & layer norm in backbone
optimizer = dict(_delete_=True, type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.01,
paramwise_cfg=dict(custom_keys={'absolute_pos_embed': dict(decay_mult=0.),
'relative_position_bias_table': dict(decay_mult=0.),
'norm': dict(decay_mult=0.)}))
lr_config = dict(_delete_=True, policy='poly',
warmup='linear',
warmup_iters=1500,
warmup_ratio=1e-6,
power=1.0, min_lr=0.0, by_epoch=False)
# By default, models are trained on 8 GPUs with 2 images per GPU
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,)
| [
"[email protected]"
] | |
a6a678b8ba3327da88bff0b8846c393e3e21ab86 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2606/60622/314959.py | b4aeade74d5cd1b7d707179de1be3c9a40e2440f | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 156 | py | arr=eval(input())
targrt=int(input())
get=False
for i in range(len(arr)):
if arr[i]==target:
get=True
print(i)
if not get:
print(-1) | [
"[email protected]"
] | |
5ef3e003fbc3b211a53280eedf68c8e5c6f2743d | 9b64f0f04707a3a18968fd8f8a3ace718cd597bc | /huaweicloud-sdk-dcs/huaweicloudsdkdcs/v2/model/cluster_redis_node_monitored_object.py | e9d88d7507107ddb26464bc9702d96002ef08dc5 | [
"Apache-2.0"
] | permissive | jaminGH/huaweicloud-sdk-python-v3 | eeecb3fb0f3396a475995df36d17095038615fba | 83ee0e4543c6b74eb0898079c3d8dd1c52c3e16b | refs/heads/master | 2023-06-18T11:49:13.958677 | 2021-07-16T07:57:47 | 2021-07-16T07:57:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,524 | py | # coding: utf-8
import re
import six
class ClusterRedisNodeMonitoredObject:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'dcs_instance_id': 'str',
'name': 'str',
'dcs_cluster_redis_node': 'str',
'status': 'str'
}
attribute_map = {
'dcs_instance_id': 'dcs_instance_id',
'name': 'name',
'dcs_cluster_redis_node': 'dcs_cluster_redis_node',
'status': 'status'
}
def __init__(self, dcs_instance_id=None, name=None, dcs_cluster_redis_node=None, status=None):
"""ClusterRedisNodeMonitoredObject - a model defined in huaweicloud sdk"""
self._dcs_instance_id = None
self._name = None
self._dcs_cluster_redis_node = None
self._status = None
self.discriminator = None
if dcs_instance_id is not None:
self.dcs_instance_id = dcs_instance_id
if name is not None:
self.name = name
if dcs_cluster_redis_node is not None:
self.dcs_cluster_redis_node = dcs_cluster_redis_node
if status is not None:
self.status = status
@property
def dcs_instance_id(self):
"""Gets the dcs_instance_id of this ClusterRedisNodeMonitoredObject.
测量对象ID,即节点的ID。
:return: The dcs_instance_id of this ClusterRedisNodeMonitoredObject.
:rtype: str
"""
return self._dcs_instance_id
@dcs_instance_id.setter
def dcs_instance_id(self, dcs_instance_id):
"""Sets the dcs_instance_id of this ClusterRedisNodeMonitoredObject.
测量对象ID,即节点的ID。
:param dcs_instance_id: The dcs_instance_id of this ClusterRedisNodeMonitoredObject.
:type: str
"""
self._dcs_instance_id = dcs_instance_id
@property
def name(self):
"""Gets the name of this ClusterRedisNodeMonitoredObject.
测量对象名称,即节点IP。
:return: The name of this ClusterRedisNodeMonitoredObject.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ClusterRedisNodeMonitoredObject.
测量对象名称,即节点IP。
:param name: The name of this ClusterRedisNodeMonitoredObject.
:type: str
"""
self._name = name
@property
def dcs_cluster_redis_node(self):
"""Gets the dcs_cluster_redis_node of this ClusterRedisNodeMonitoredObject.
维度dcs_cluster_redis_node的测量对象的ID。
:return: The dcs_cluster_redis_node of this ClusterRedisNodeMonitoredObject.
:rtype: str
"""
return self._dcs_cluster_redis_node
@dcs_cluster_redis_node.setter
def dcs_cluster_redis_node(self, dcs_cluster_redis_node):
"""Sets the dcs_cluster_redis_node of this ClusterRedisNodeMonitoredObject.
维度dcs_cluster_redis_node的测量对象的ID。
:param dcs_cluster_redis_node: The dcs_cluster_redis_node of this ClusterRedisNodeMonitoredObject.
:type: str
"""
self._dcs_cluster_redis_node = dcs_cluster_redis_node
@property
def status(self):
"""Gets the status of this ClusterRedisNodeMonitoredObject.
测量对象状态,即节点状态。
:return: The status of this ClusterRedisNodeMonitoredObject.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this ClusterRedisNodeMonitoredObject.
测量对象状态,即节点状态。
:param status: The status of this ClusterRedisNodeMonitoredObject.
:type: str
"""
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
import simplejson as json
return json.dumps(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ClusterRedisNodeMonitoredObject):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
d6a97cd6ef028407a1cfffa44d848b5f9689f803 | c57439f0c98af370ace65f9d55ef5a457bedc531 | /ydk/models/infra/_meta/_Cisco_IOS_XR_infra_alarm_logger_oper.py | 77489aa52aacd470c677f59e9d95d406bef1006f | [
"Apache-2.0"
] | permissive | myahmao/ydk-py | c932fbd8245e554227cce0fd723d9a22887b0c40 | 2f367d93f2088d4abdc2f2bb10ca4864952b458a | refs/heads/master | 2021-01-14T11:32:29.064494 | 2016-03-15T22:44:05 | 2016-03-15T22:44:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,724 | py |
import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum, _dm_validate_value
from ydk.types import Empty, YList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION
from ydk.errors import YPYError, YPYDataValidationError
from ydk.models import _yang_ns
_meta_table = {
'AlAlarmBistate_Enum' : _MetaInfoEnum('AlAlarmBistate_Enum', 'ydk.models.infra.Cisco_IOS_XR_infra_alarm_logger_oper',
{
'not-available':'NOT_AVAILABLE',
'active':'ACTIVE',
'clear':'CLEAR',
}, 'Cisco-IOS-XR-infra-alarm-logger-oper', _yang_ns._namespaces['Cisco-IOS-XR-infra-alarm-logger-oper']),
'AlAlarmSeverity_Enum' : _MetaInfoEnum('AlAlarmSeverity_Enum', 'ydk.models.infra.Cisco_IOS_XR_infra_alarm_logger_oper',
{
'unknown':'UNKNOWN',
'emergency':'EMERGENCY',
'alert':'ALERT',
'critical':'CRITICAL',
'error':'ERROR',
'warning':'WARNING',
'notice':'NOTICE',
'informational':'INFORMATIONAL',
'debugging':'DEBUGGING',
}, 'Cisco-IOS-XR-infra-alarm-logger-oper', _yang_ns._namespaces['Cisco-IOS-XR-infra-alarm-logger-oper']),
'AlarmLogger.Alarms.Alarm' : {
'meta_info' : _MetaInfoClass('AlarmLogger.Alarms.Alarm',
False,
[
_MetaInfoClassMember('event-id', ATTRIBUTE, 'int' , None, None,
[(-2147483648, 2147483647)], [],
''' Event ID
''',
'event_id',
'Cisco-IOS-XR-infra-alarm-logger-oper', True),
_MetaInfoClassMember('additional-text', ATTRIBUTE, 'str' , None, None,
[], [],
''' Full text of the Alarm
''',
'additional_text',
'Cisco-IOS-XR-infra-alarm-logger-oper', False),
_MetaInfoClassMember('category', ATTRIBUTE, 'str' , None, None,
[], [],
''' Category of the alarm
''',
'category',
'Cisco-IOS-XR-infra-alarm-logger-oper', False),
_MetaInfoClassMember('code', ATTRIBUTE, 'str' , None, None,
[], [],
''' Alarm code which further qualifies the alarm
within a message group
''',
'code',
'Cisco-IOS-XR-infra-alarm-logger-oper', False),
_MetaInfoClassMember('correlation-id', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Correlation Identifier
''',
'correlation_id',
'Cisco-IOS-XR-infra-alarm-logger-oper', False),
_MetaInfoClassMember('group', ATTRIBUTE, 'str' , None, None,
[], [],
''' Group of messages to which this alarm belongs to
''',
'group',
'Cisco-IOS-XR-infra-alarm-logger-oper', False),
_MetaInfoClassMember('is-admin', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Indicates the event id admin-level
''',
'is_admin',
'Cisco-IOS-XR-infra-alarm-logger-oper', False),
_MetaInfoClassMember('severity', REFERENCE_ENUM_CLASS, 'AlAlarmSeverity_Enum' , 'ydk.models.infra.Cisco_IOS_XR_infra_alarm_logger_oper', 'AlAlarmSeverity_Enum',
[], [],
''' Severity of the alarm
''',
'severity',
'Cisco-IOS-XR-infra-alarm-logger-oper', False),
_MetaInfoClassMember('source-id', ATTRIBUTE, 'str' , None, None,
[], [],
''' Source Identifier(Location).Indicates the node
in which the alarm was generated
''',
'source_id',
'Cisco-IOS-XR-infra-alarm-logger-oper', False),
_MetaInfoClassMember('state', REFERENCE_ENUM_CLASS, 'AlAlarmBistate_Enum' , 'ydk.models.infra.Cisco_IOS_XR_infra_alarm_logger_oper', 'AlAlarmBistate_Enum',
[], [],
''' State of the alarm (bistate alarms only)
''',
'state',
'Cisco-IOS-XR-infra-alarm-logger-oper', False),
_MetaInfoClassMember('timestamp', ATTRIBUTE, 'int' , None, None,
[(0, 18446744073709551615L)], [],
''' Time when the alarm was generated. It is
expressed in number of milliseconds since 00:00
:00 UTC, January 1, 1970
''',
'timestamp',
'Cisco-IOS-XR-infra-alarm-logger-oper', False),
],
'Cisco-IOS-XR-infra-alarm-logger-oper',
'alarm',
_yang_ns._namespaces['Cisco-IOS-XR-infra-alarm-logger-oper'],
'ydk.models.infra.Cisco_IOS_XR_infra_alarm_logger_oper'
),
},
'AlarmLogger.Alarms' : {
'meta_info' : _MetaInfoClass('AlarmLogger.Alarms',
False,
[
_MetaInfoClassMember('alarm', REFERENCE_LIST, 'Alarm' , 'ydk.models.infra.Cisco_IOS_XR_infra_alarm_logger_oper', 'AlarmLogger.Alarms.Alarm',
[], [],
''' One of the logged alarms
''',
'alarm',
'Cisco-IOS-XR-infra-alarm-logger-oper', False),
],
'Cisco-IOS-XR-infra-alarm-logger-oper',
'alarms',
_yang_ns._namespaces['Cisco-IOS-XR-infra-alarm-logger-oper'],
'ydk.models.infra.Cisco_IOS_XR_infra_alarm_logger_oper'
),
},
'AlarmLogger.BufferStatus' : {
'meta_info' : _MetaInfoClass('AlarmLogger.BufferStatus',
False,
[
_MetaInfoClassMember('capacity-threshold', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Percentage of the buffer utilization which, when
exceeded, triggers the generation of a
notification for the clients of the XML agent
''',
'capacity_threshold',
'Cisco-IOS-XR-infra-alarm-logger-oper', False),
_MetaInfoClassMember('log-buffer-size', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Current Logging Buffer Size (Bytes)
''',
'log_buffer_size',
'Cisco-IOS-XR-infra-alarm-logger-oper', False),
_MetaInfoClassMember('max-log-buffer-size', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Maximum Logging Buffer Size (Bytes)
''',
'max_log_buffer_size',
'Cisco-IOS-XR-infra-alarm-logger-oper', False),
_MetaInfoClassMember('record-count', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Number of Records in the Buffer
''',
'record_count',
'Cisco-IOS-XR-infra-alarm-logger-oper', False),
_MetaInfoClassMember('severity-filter', REFERENCE_ENUM_CLASS, 'AlAlarmSeverity_Enum' , 'ydk.models.infra.Cisco_IOS_XR_infra_alarm_logger_oper', 'AlAlarmSeverity_Enum',
[], [],
''' Severity Filter
''',
'severity_filter',
'Cisco-IOS-XR-infra-alarm-logger-oper', False),
],
'Cisco-IOS-XR-infra-alarm-logger-oper',
'buffer-status',
_yang_ns._namespaces['Cisco-IOS-XR-infra-alarm-logger-oper'],
'ydk.models.infra.Cisco_IOS_XR_infra_alarm_logger_oper'
),
},
'AlarmLogger' : {
'meta_info' : _MetaInfoClass('AlarmLogger',
False,
[
_MetaInfoClassMember('alarms', REFERENCE_CLASS, 'Alarms' , 'ydk.models.infra.Cisco_IOS_XR_infra_alarm_logger_oper', 'AlarmLogger.Alarms',
[], [],
''' Table that contains the database of logged
alarms
''',
'alarms',
'Cisco-IOS-XR-infra-alarm-logger-oper', False),
_MetaInfoClassMember('buffer-status', REFERENCE_CLASS, 'BufferStatus' , 'ydk.models.infra.Cisco_IOS_XR_infra_alarm_logger_oper', 'AlarmLogger.BufferStatus',
[], [],
''' Describes buffer utilization and parameters
configured
''',
'buffer_status',
'Cisco-IOS-XR-infra-alarm-logger-oper', False),
],
'Cisco-IOS-XR-infra-alarm-logger-oper',
'alarm-logger',
_yang_ns._namespaces['Cisco-IOS-XR-infra-alarm-logger-oper'],
'ydk.models.infra.Cisco_IOS_XR_infra_alarm_logger_oper'
),
},
}
_meta_table['AlarmLogger.Alarms.Alarm']['meta_info'].parent =_meta_table['AlarmLogger.Alarms']['meta_info']
_meta_table['AlarmLogger.Alarms']['meta_info'].parent =_meta_table['AlarmLogger']['meta_info']
_meta_table['AlarmLogger.BufferStatus']['meta_info'].parent =_meta_table['AlarmLogger']['meta_info']
| [
"[email protected]"
] | |
c3498026921873ea903ce9a2244d311663417b05 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/built-in/others/CLIP_for_PyTorch/transformers/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_flax_{{cookiecutter.lowercase_modelname}}.py | 7c33e637c9f47c5c6cf5cd3e91458c74e23af9f5 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 120,457 | py | # coding=utf-8
# Copyright 2022 {{cookiecutter.authors}} and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Flax {{cookiecutter.modelname}} model. """
{% if cookiecutter.is_encoder_decoder_model == "False" %}
from typing import Callable, Optional, Tuple
import numpy as np
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from flax.linen.attention import dot_product_attention_weights
from jax import lax
from ...file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_flax_outputs import (
FlaxBaseModelOutput,
FlaxBaseModelOutputWithPooling,
FlaxCausalLMOutput,
FlaxMaskedLMOutput,
FlaxMultipleChoiceModelOutput,
FlaxQuestionAnsweringModelOutput,
FlaxSequenceClassifierOutput,
FlaxTokenClassifierOutput,
)
from ...modeling_flax_utils import (
ACT2FN,
FlaxPreTrainedModel,
append_call_sample_docstring,
overwrite_call_docstring,
)
from ...utils import logging
from .configuration_{{cookiecutter.lowercase_modelname}} import {{cookiecutter.camelcase_modelname}}Config
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "{{cookiecutter.checkpoint_identifier}}"
_CONFIG_FOR_DOC = "{{cookiecutter.camelcase_modelname}}Config"
_TOKENIZER_FOR_DOC = "{{cookiecutter.camelcase_modelname}}Tokenizer"
{{cookiecutter.uppercase_modelname}}_START_DOCSTRING = r"""
This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the
generic methods the library implements for all its model (such as downloading, saving and converting weights from
PyTorch models)
This model is also a Flax Linen [flax.linen.Module](https://flax.readthedocs.io/en/latest/flax.linen.html#module) subclass. Use it as a regular Flax linen Module
and refer to the Flax documentation for all matter related to general usage and behavior.
Finally, this model supports inherent JAX features such as:
- [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
- [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
- [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
- [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
Parameters:
config ([`~{{cookiecutter.uppercase_modelname}}Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the
model weights.
dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on
GPUs) and `jax.numpy.bfloat16` (on TPUs).
This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
specified all the computation will be performed with the given `dtype`.
**Note that this only specifies the dtype of the computation and does not influence the dtype of model
parameters.**
If you wish to change the dtype of the model parameters, see
[`~FlaxPreTrainedModel.to_fp16`] and [`~FlaxPreTrainedModel.to_bf16`].
"""
{{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING = r"""
Args:
input_ids (`numpy.ndarray` of shape `({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`~{{cookiecutter.uppercase_modelname}}ConfiTokenizer`]. See
[`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for
details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`numpy.ndarray` of shape `({0})`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
token_type_ids (`numpy.ndarray` of shape `({0})`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`numpy.ndarray` of shape `({0})`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`.
head_mask (`numpy.ndarray` of shape `({0})`, `optional): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
# Copied from transformers.models.bert.modeling_flax_bert.FlaxBertEmbeddings with Bert->{{cookiecutter.camelcase_modelname}}
class Flax{{cookiecutter.camelcase_modelname}}Embeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
config: {{cookiecutter.camelcase_modelname}}Config
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
def setup(self):
self.word_embeddings = nn.Embed(
self.config.vocab_size,
self.config.hidden_size,
embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
)
self.position_embeddings = nn.Embed(
self.config.max_position_embeddings,
self.config.hidden_size,
embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
)
self.token_type_embeddings = nn.Embed(
self.config.type_vocab_size,
self.config.hidden_size,
embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
)
self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
def __call__(self, input_ids, token_type_ids, position_ids, attention_mask, deterministic: bool = True):
# Embed
inputs_embeds = self.word_embeddings(input_ids.astype("i4"))
position_embeds = self.position_embeddings(position_ids.astype("i4"))
token_type_embeddings = self.token_type_embeddings(token_type_ids.astype("i4"))
# Sum all embeddings
hidden_states = inputs_embeds + token_type_embeddings + position_embeds
# Layer Norm
hidden_states = self.LayerNorm(hidden_states)
hidden_states = self.dropout(hidden_states, deterministic=deterministic)
return hidden_states
# Copied from transformers.models.bert.modeling_flax_bert.FlaxBertSelfAttention with Bert->{{cookiecutter.camelcase_modelname}}
class Flax{{cookiecutter.camelcase_modelname}}SelfAttention(nn.Module):
config: {{cookiecutter.camelcase_modelname}}Config
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
def setup(self):
if self.config.hidden_size % self.config.num_attention_heads != 0:
raise ValueError(
"`config.hidden_size`: {self.config.hidden_size} has to be a multiple of `config.num_attention_heads`\
: {self.config.num_attention_heads}"
)
self.query = nn.Dense(
self.config.hidden_size,
dtype=self.dtype,
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
)
self.key = nn.Dense(
self.config.hidden_size,
dtype=self.dtype,
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
)
self.value = nn.Dense(
self.config.hidden_size,
dtype=self.dtype,
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
)
def __call__(
self,
hidden_states,
attention_mask,
layer_head_mask,
deterministic=True,
output_attentions: bool = False
):
head_dim = self.config.hidden_size // self.config.num_attention_heads
query_states = self.query(hidden_states).reshape(
hidden_states.shape[:2] + (self.config.num_attention_heads, head_dim)
)
value_states = self.value(hidden_states).reshape(
hidden_states.shape[:2] + (self.config.num_attention_heads, head_dim)
)
key_states = self.key(hidden_states).reshape(
hidden_states.shape[:2] + (self.config.num_attention_heads, head_dim)
)
# Convert the boolean attention mask to an attention bias.
if attention_mask is not None:
# attention mask in the form of attention bias
attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2))
attention_bias = lax.select(
attention_mask > 0,
jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
jnp.full(attention_mask.shape, -1e10).astype(self.dtype),
)
else:
attention_bias = None
dropout_rng = None
if not deterministic and self.config.attention_probs_dropout_prob > 0.0:
dropout_rng = self.make_rng("dropout")
attn_weights = dot_product_attention_weights(
query_states,
key_states,
bias=attention_bias,
dropout_rng=dropout_rng,
dropout_rate=self.config.attention_probs_dropout_prob,
broadcast_dropout=True,
deterministic=deterministic,
dtype=self.dtype,
precision=None,
)
# Mask heads if we want to
if layer_head_mask is not None:
attn_weights = jnp.einsum("...hqk,h->...hqk", attn_weights, layer_head_mask)
attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states)
attn_output = attn_output.reshape(attn_output.shape[:2] + (-1,))
outputs = (attn_output, attn_weights) if output_attentions else (attn_output,)
return outputs
# Copied from transformers.models.bert.modeling_flax_bert.FlaxBertSelfOutput with Bert->{{cookiecutter.camelcase_modelname}}
class Flax{{cookiecutter.camelcase_modelname}}SelfOutput(nn.Module):
config: {{cookiecutter.camelcase_modelname}}Config
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
def setup(self):
self.dense = nn.Dense(
self.config.hidden_size,
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
dtype=self.dtype,
)
self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
def __call__(self, hidden_states, input_tensor, deterministic: bool = True):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states, deterministic=deterministic)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
# Copied from transformers.models.bert.modeling_flax_bert.FlaxBertAttention with Bert->{{cookiecutter.camelcase_modelname}}
class Flax{{cookiecutter.camelcase_modelname}}Attention(nn.Module):
config: {{cookiecutter.camelcase_modelname}}Config
dtype: jnp.dtype = jnp.float32
def setup(self):
self.self = Flax{{cookiecutter.camelcase_modelname}}SelfAttention(self.config, dtype=self.dtype)
self.output = Flax{{cookiecutter.camelcase_modelname}}SelfOutput(self.config, dtype=self.dtype)
def __call__(
self,
hidden_states,
attention_mask,
layer_head_mask,
deterministic=True,
output_attentions: bool = False,
):
# Attention mask comes in as attention_mask.shape == (*batch_sizes, kv_length)
# FLAX expects: attention_mask.shape == (*batch_sizes, 1, 1, kv_length) such that it is broadcastable
# with attn_weights.shape == (*batch_sizes, num_heads, q_length, kv_length)
attn_outputs = self.self(
hidden_states,
attention_mask,
layer_head_mask=layer_head_mask,
deterministic=deterministic,
output_attentions=output_attentions,
)
attn_output = attn_outputs[0]
hidden_states = self.output(attn_output, hidden_states, deterministic=deterministic)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_outputs[1],)
return outputs
# Copied from transformers.models.bert.modeling_flax_bert.FlaxBertIntermediate with Bert->{{cookiecutter.camelcase_modelname}}
class Flax{{cookiecutter.camelcase_modelname}}Intermediate(nn.Module):
config: {{cookiecutter.camelcase_modelname}}Config
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
def setup(self):
self.dense = nn.Dense(
self.config.intermediate_size,
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
dtype=self.dtype,
)
self.activation = ACT2FN[self.config.hidden_act]
def __call__(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.activation(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_flax_bert.FlaxBertOutput with Bert->{{cookiecutter.camelcase_modelname}}
class Flax{{cookiecutter.camelcase_modelname}}Output(nn.Module):
config: {{cookiecutter.camelcase_modelname}}Config
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
def setup(self):
self.dense = nn.Dense(
self.config.hidden_size,
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
dtype=self.dtype,
)
self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
def __call__(self, hidden_states, attention_output, deterministic: bool = True):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states, deterministic=deterministic)
hidden_states = self.LayerNorm(hidden_states + attention_output)
return hidden_states
# Copied from transformers.models.bert.modeling_flax_bert.FlaxBertLayer with Bert->{{cookiecutter.camelcase_modelname}}
class Flax{{cookiecutter.camelcase_modelname}}Layer(nn.Module):
config: {{cookiecutter.camelcase_modelname}}Config
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
def setup(self):
self.attention = Flax{{cookiecutter.camelcase_modelname}}Attention(self.config, dtype=self.dtype)
self.intermediate = Flax{{cookiecutter.camelcase_modelname}}Intermediate(self.config, dtype=self.dtype)
self.output = Flax{{cookiecutter.camelcase_modelname}}Output(self.config, dtype=self.dtype)
def __call__(
self,
hidden_states,
attention_mask,
layer_head_mask,
deterministic: bool = True,
output_attentions: bool = False,
):
attention_outputs = self.attention(
hidden_states,
attention_mask,
layer_head_mask=layer_head_mask,
deterministic=deterministic,
output_attentions=output_attentions,
)
attention_output = attention_outputs[0]
hidden_states = self.intermediate(attention_output)
hidden_states = self.output(hidden_states, attention_output, deterministic=deterministic)
outputs = (hidden_states,)
if output_attentions:
outputs += (attention_outputs[1],)
return outputs
# Copied from transformers.models.bert.modeling_flax_bert.FlaxBertLayerCollection with Bert->{{cookiecutter.camelcase_modelname}}
class Flax{{cookiecutter.camelcase_modelname}}LayerCollection(nn.Module):
config: {{cookiecutter.camelcase_modelname}}Config
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
def setup(self):
self.layers = [
Flax{{cookiecutter.camelcase_modelname}}Layer(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.num_hidden_layers)
]
def __call__(
self,
hidden_states,
attention_mask,
head_mask,
deterministic: bool = True,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
all_attentions = () if output_attentions else None
all_hidden_states = () if output_hidden_states else None
# Check if head_mask has a correct number of layers specified if desired
if head_mask is not None:
if head_mask.shape[0] != (len(self.layers)):
raise ValueError(
f"The head_mask should be specified for {len(self.layers)} layers, but it is for \
{head_mask.shape[0]}."
)
for i, layer in enumerate(self.layers):
if output_hidden_states:
all_hidden_states += (hidden_states,)
layer_outputs = layer(
hidden_states,
attention_mask,
layer_head_mask=head_mask[i] if head_mask is not None else None,
deterministic=deterministic,
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions += (layer_outputs[1],)
if output_hidden_states:
all_hidden_states += (hidden_states,)
outputs = (hidden_states,)
if not return_dict:
return tuple(v for v in outputs if v is not None)
return FlaxBaseModelOutput(
last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
)
# Copied from transformers.models.bert.modeling_flax_bert.FlaxBertEncoder with Bert->{{cookiecutter.camelcase_modelname}}
class Flax{{cookiecutter.camelcase_modelname}}Encoder(nn.Module):
config: {{cookiecutter.camelcase_modelname}}Config
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
def setup(self):
self.layer = Flax{{cookiecutter.camelcase_modelname}}LayerCollection(self.config, dtype=self.dtype)
def __call__(
self,
hidden_states,
attention_mask,
head_mask,
deterministic: bool = True,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
return self.layer(
hidden_states,
attention_mask,
head_mask=head_mask,
deterministic=deterministic,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
# Copied from transformers.models.bert.modeling_flax_bert.FlaxBertPooler with Bert->{{cookiecutter.camelcase_modelname}}
class Flax{{cookiecutter.camelcase_modelname}}Pooler(nn.Module):
config: {{cookiecutter.camelcase_modelname}}Config
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
def setup(self):
self.dense = nn.Dense(
self.config.hidden_size,
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
dtype=self.dtype,
)
def __call__(self, hidden_states):
cls_hidden_state = hidden_states[:, 0]
cls_hidden_state = self.dense(cls_hidden_state)
return nn.tanh(cls_hidden_state)
# Copied from transformers.models.bert.modeling_flax_bert.FlaxBertPredictionHeadTransform with Bert->{{cookiecutter.camelcase_modelname}}
class Flax{{cookiecutter.camelcase_modelname}}PredictionHeadTransform(nn.Module):
config: {{cookiecutter.camelcase_modelname}}Config
dtype: jnp.dtype = jnp.float32
def setup(self):
self.dense = nn.Dense(self.config.hidden_size, dtype=self.dtype)
self.activation = ACT2FN[self.config.hidden_act]
self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
def __call__(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.activation(hidden_states)
return self.LayerNorm(hidden_states)
# Copied from transformers.models.bert.modeling_flax_bert.FlaxBertLMPredictionHead with Bert->{{cookiecutter.camelcase_modelname}}
class Flax{{cookiecutter.camelcase_modelname}}LMPredictionHead(nn.Module):
config: {{cookiecutter.camelcase_modelname}}Config
dtype: jnp.dtype = jnp.float32
bias_init: Callable[..., np.ndarray] = jax.nn.initializers.zeros
def setup(self):
self.transform = Flax{{cookiecutter.camelcase_modelname}}PredictionHeadTransform(self.config, dtype=self.dtype)
self.decoder = nn.Dense(self.config.vocab_size, dtype=self.dtype, use_bias=False)
self.bias = self.param("bias", self.bias_init, (self.config.vocab_size,))
def __call__(self, hidden_states, shared_embedding=None):
hidden_states = self.transform(hidden_states)
if shared_embedding is not None:
hidden_states = self.decoder.apply({"params": {"kernel": shared_embedding.T}}, hidden_states)
else:
hidden_states = self.decoder(hidden_states)
hidden_states += self.bias
return hidden_states
# Copied from transformers.models.bert.modeling_flax_bert.FlaxBertOnlyMLMHead with Bert->{{cookiecutter.camelcase_modelname}}
class Flax{{cookiecutter.camelcase_modelname}}OnlyMLMHead(nn.Module):
config: {{cookiecutter.camelcase_modelname}}Config
dtype: jnp.dtype = jnp.float32
def setup(self):
self.predictions = Flax{{cookiecutter.camelcase_modelname}}LMPredictionHead(self.config, dtype=self.dtype)
def __call__(self, hidden_states, shared_embedding=None):
hidden_states = self.predictions(hidden_states, shared_embedding=shared_embedding)
return hidden_states
# Copied from transformers.models.bert.modeling_flax_bert.FlaxBertOnlyNSPHead with Bert->{{cookiecutter.camelcase_modelname}}
class Flax{{cookiecutter.camelcase_modelname}}OnlyNSPHead(nn.Module):
dtype: jnp.dtype = jnp.float32
def setup(self):
self.seq_relationship = nn.Dense(2, dtype=self.dtype)
def __call__(self, pooled_output):
return self.seq_relationship(pooled_output)
# Copied from transformers.models.bert.modeling_flax_bert.FlaxBertPreTrainingHeads with Bert->{{cookiecutter.camelcase_modelname}}
class Flax{{cookiecutter.camelcase_modelname}}PreTrainingHeads(nn.Module):
config: {{cookiecutter.camelcase_modelname}}Config
dtype: jnp.dtype = jnp.float32
def setup(self):
self.predictions = Flax{{cookiecutter.camelcase_modelname}}LMPredictionHead(self.config, dtype=self.dtype)
self.seq_relationship = nn.Dense(2, dtype=self.dtype)
def __call__(self, hidden_states, pooled_output, shared_embedding=None):
prediction_scores = self.predictions(hidden_states, shared_embedding=shared_embedding)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class Flax{{cookiecutter.camelcase_modelname}}PreTrainedModel(FlaxPreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = {{cookiecutter.camelcase_modelname}}Config
base_model_prefix = "{{cookiecutter.lowercase_modelname}}"
module_class: nn.Module = None
def __init__(
self, config: {{cookiecutter.camelcase_modelname}}Config, input_shape: Tuple = (1, 1), seed: int = 0, dtype: jnp.dtype = jnp.float32, **kwargs
):
module = self.module_class(config=config, dtype=dtype, **kwargs)
super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype)
def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple) -> FrozenDict:
# init input tensors
input_ids = jnp.zeros(input_shape, dtype="i4")
token_type_ids = jnp.zeros_like(input_ids)
position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_shape)
attention_mask = jnp.ones_like(input_ids)
head_mask = jnp.ones((self.config.num_hidden_layers, self.config.num_attention_heads))
params_rng, dropout_rng = jax.random.split(rng)
rngs = {"params": params_rng, "dropout": dropout_rng}
return self.module.init(
rngs, input_ids, attention_mask, token_type_ids, position_ids, head_mask, return_dict=False
)["params"]
@add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
def __call__(
self,
input_ids,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
params: dict = None,
dropout_rng: jax.random.PRNGKey = None,
train: bool = False,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.return_dict
# init input tensors if not passed
if token_type_ids is None:
token_type_ids = jnp.zeros_like(input_ids)
if position_ids is None:
position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape)
if attention_mask is None:
attention_mask = jnp.ones_like(input_ids)
if head_mask is None:
head_mask = jnp.ones((self.config.num_hidden_layers, self.config.num_attention_heads))
# Handle any PRNG if needed
rngs = {}
if dropout_rng is not None:
rngs["dropout"] = dropout_rng
return self.module.apply(
{"params": params or self.params},
jnp.array(input_ids, dtype="i4"),
jnp.array(attention_mask, dtype="i4"),
jnp.array(token_type_ids, dtype="i4"),
jnp.array(position_ids, dtype="i4"),
jnp.array(head_mask, dtype="i4"),
not train,
output_attentions,
output_hidden_states,
return_dict,
rngs=rngs,
)
# Copied from transformers.models.bert.modeling_flax_bert.FlaxBertModule with Bert->{{cookiecutter.camelcase_modelname}}
class Flax{{cookiecutter.camelcase_modelname}}Module(nn.Module):
config: {{cookiecutter.camelcase_modelname}}Config
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
add_pooling_layer: bool = True
def setup(self):
self.embeddings = Flax{{cookiecutter.camelcase_modelname}}Embeddings(self.config, dtype=self.dtype)
self.encoder = Flax{{cookiecutter.camelcase_modelname}}Encoder(self.config, dtype=self.dtype)
self.pooler = Flax{{cookiecutter.camelcase_modelname}}Pooler(self.config, dtype=self.dtype)
def __call__(
self,
input_ids,
attention_mask,
token_type_ids,
position_ids,
head_mask,
deterministic: bool = True,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
hidden_states = self.embeddings(
input_ids, token_type_ids, position_ids, attention_mask, deterministic=deterministic
)
outputs = self.encoder(
hidden_states,
attention_mask,
head_mask=head_mask,
deterministic=deterministic,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = outputs[0]
pooled = self.pooler(hidden_states) if self.add_pooling_layer else None
if not return_dict:
# if pooled is None, don't return it
if pooled is None:
return (hidden_states,) + outputs[1:]
return (hidden_states, pooled) + outputs[1:]
return FlaxBaseModelOutputWithPooling(
last_hidden_state=hidden_states,
pooler_output=pooled,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
add_start_docstrings(
"The bare {{cookiecutter.camelcase_modelname}} Model transformer outputting raw hidden-states without any specific head on top.",
{{cookiecutter.uppercase_modelname}}_START_DOCSTRING,
)
class Flax{{cookiecutter.camelcase_modelname}}Model(Flax{{cookiecutter.camelcase_modelname}}PreTrainedModel):
module_class = Flax{{cookiecutter.camelcase_modelname}}Module
class Flax{{cookiecutter.camelcase_modelname}}ForMaskedLMModule(nn.Module):
config: {{cookiecutter.camelcase_modelname}}Config
dtype: jnp.dtype = jnp.float32
def setup(self):
self.{{cookiecutter.lowercase_modelname}} = Flax{{cookiecutter.camelcase_modelname}}Module(config=self.config, add_pooling_layer=False, dtype=self.dtype)
self.cls = Flax{{cookiecutter.camelcase_modelname}}OnlyMLMHead(config=self.config, dtype=self.dtype)
def __call__(
self,
input_ids,
attention_mask,
token_type_ids,
position_ids,
head_mask,
deterministic: bool = True,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
# Model
outputs = self.{{cookiecutter.lowercase_modelname}}(
input_ids,
attention_mask,
token_type_ids,
position_ids,
head_mask,
deterministic=deterministic,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = outputs[0]
if self.config.tie_word_embeddings:
shared_embedding = self.{{cookiecutter.lowercase_modelname}}.variables["params"]["embeddings"]["word_embeddings"]["embedding"]
else:
shared_embedding = None
# Compute the prediction scores
logits = self.cls(hidden_states, shared_embedding=shared_embedding)
if not return_dict:
return (logits,) + outputs[1:]
return FlaxCausalLMOutput(
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings("""{{cookiecutter.camelcase_modelname}} Model with a `language modeling` head on top for MLM training. """, {{cookiecutter.uppercase_modelname}}_START_DOCSTRING)
class Flax{{cookiecutter.camelcase_modelname}}ForMaskedLM(Flax{{cookiecutter.camelcase_modelname}}PreTrainedModel):
module_class = Flax{{cookiecutter.camelcase_modelname}}ForMaskedLMModule
append_call_sample_docstring(
Flax{{cookiecutter.camelcase_modelname}}ForMaskedLM, _TOKENIZER_FOR_DOC, _CHECKPOINT_FOR_DOC, FlaxMaskedLMOutput, _CONFIG_FOR_DOC
)
class Flax{{cookiecutter.camelcase_modelname}}ForCausalLMModule(nn.Module):
config: {{cookiecutter.camelcase_modelname}}Config
dtype: jnp.dtype = jnp.float32
def setup(self):
self.{{cookiecutter.lowercase_modelname}} = Flax{{cookiecutter.camelcase_modelname}}Module(config=self.config, add_pooling_layer=False, dtype=self.dtype)
self.cls = Flax{{cookiecutter.camelcase_modelname}}OnlyMLMHead(config=self.config, dtype=self.dtype)
def __call__(
self,
input_ids,
attention_mask,
token_type_ids,
position_ids,
head_mask,
deterministic: bool = True,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
# Model
outputs = self.{{cookiecutter.lowercase_modelname}}(
input_ids,
attention_mask,
token_type_ids,
position_ids,
head_mask,
deterministic=deterministic,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = outputs[0]
if self.config.tie_word_embeddings:
shared_embedding = self.{{cookiecutter.lowercase_modelname}}.variables["params"]["embeddings"]["word_embeddings"]["embedding"]
else:
shared_embedding = None
# Compute the prediction scores
logits = self.cls(hidden_states, shared_embedding=shared_embedding)
if not return_dict:
return (logits,) + outputs[1:]
return FlaxCausalLMOutput(
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings("""{{cookiecutter.camelcase_modelname}} Model with a `language modeling` head on top for CLM training. """, {{cookiecutter.uppercase_modelname}}_START_DOCSTRING)
class Flax{{cookiecutter.camelcase_modelname}}ForCausalLM(Flax{{cookiecutter.camelcase_modelname}}PreTrainedModel):
module_class = Flax{{cookiecutter.camelcase_modelname}}ForCausalLMModule
append_call_sample_docstring(
Flax{{cookiecutter.camelcase_modelname}}ForCausalLM, _TOKENIZER_FOR_DOC, _CHECKPOINT_FOR_DOC, FlaxCausalLMOutput, _CONFIG_FOR_DOC
)
class Flax{{cookiecutter.camelcase_modelname}}ForSequenceClassificationModule(nn.Module):
config: {{cookiecutter.camelcase_modelname}}Config
dtype: jnp.dtype = jnp.float32
def setup(self):
self.{{cookiecutter.lowercase_modelname}} = Flax{{cookiecutter.camelcase_modelname}}Module(config=self.config, dtype=self.dtype)
self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
self.classifier = nn.Dense(
self.config.num_labels,
dtype=self.dtype,
)
def __call__(
self,
input_ids,
attention_mask,
token_type_ids,
position_ids,
head_mask,
deterministic: bool = True,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
# Model
outputs = self.{{cookiecutter.lowercase_modelname}}(
input_ids,
attention_mask,
token_type_ids,
position_ids,
head_mask,
deterministic=deterministic,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output, deterministic=deterministic)
logits = self.classifier(pooled_output)
if not return_dict:
return (logits,) + outputs[2:]
return FlaxSequenceClassifierOutput(
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
{{cookiecutter.camelcase_modelname}} Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
output) e.g. for GLUE tasks.
""",
{{cookiecutter.uppercase_modelname}}_START_DOCSTRING,
)
class Flax{{cookiecutter.camelcase_modelname}}ForSequenceClassification(Flax{{cookiecutter.camelcase_modelname}}PreTrainedModel):
module_class = Flax{{cookiecutter.camelcase_modelname}}ForSequenceClassificationModule
append_call_sample_docstring(
Flax{{cookiecutter.camelcase_modelname}}ForSequenceClassification,
_TOKENIZER_FOR_DOC,
_CHECKPOINT_FOR_DOC,
FlaxSequenceClassifierOutput,
_CONFIG_FOR_DOC,
)
class Flax{{cookiecutter.camelcase_modelname}}ForMultipleChoiceModule(nn.Module):
config: {{cookiecutter.camelcase_modelname}}Config
dtype: jnp.dtype = jnp.float32
def setup(self):
self.{{cookiecutter.lowercase_modelname}} = Flax{{cookiecutter.camelcase_modelname}}Module(config=self.config, dtype=self.dtype)
self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
self.classifier = nn.Dense(1, dtype=self.dtype)
def __call__(
self,
input_ids,
attention_mask,
token_type_ids,
position_ids,
head_mask,
deterministic: bool = True,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
num_choices = input_ids.shape[1]
input_ids = input_ids.reshape(-1, input_ids.shape[-1]) if input_ids is not None else None
attention_mask = attention_mask.reshape(-1, attention_mask.shape[-1]) if attention_mask is not None else None
token_type_ids = token_type_ids.reshape(-1, token_type_ids.shape[-1]) if token_type_ids is not None else None
position_ids = position_ids.reshape(-1, position_ids.shape[-1]) if position_ids is not None else None
# Model
outputs = self.{{cookiecutter.lowercase_modelname}}(
input_ids,
attention_mask,
token_type_ids,
position_ids,
head_mask,
deterministic=deterministic,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output, deterministic=deterministic)
logits = self.classifier(pooled_output)
reshaped_logits = logits.reshape(-1, num_choices)
if not return_dict:
return (reshaped_logits,) + outputs[2:]
return FlaxMultipleChoiceModelOutput(
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
{{cookiecutter.camelcase_modelname}} Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
softmax) e.g. for RocStories/SWAG tasks.
""",
{{cookiecutter.uppercase_modelname}}_START_DOCSTRING,
)
class Flax{{cookiecutter.camelcase_modelname}}ForMultipleChoice(Flax{{cookiecutter.camelcase_modelname}}PreTrainedModel):
module_class = Flax{{cookiecutter.camelcase_modelname}}ForMultipleChoiceModule
overwrite_call_docstring(
Flax{{cookiecutter.camelcase_modelname}}ForMultipleChoice, {{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
)
append_call_sample_docstring(
Flax{{cookiecutter.camelcase_modelname}}ForMultipleChoice, _TOKENIZER_FOR_DOC, _CHECKPOINT_FOR_DOC, FlaxMultipleChoiceModelOutput, _CONFIG_FOR_DOC
)
class Flax{{cookiecutter.camelcase_modelname}}ForTokenClassificationModule(nn.Module):
config: {{cookiecutter.camelcase_modelname}}Config
dtype: jnp.dtype = jnp.float32
def setup(self):
self.{{cookiecutter.lowercase_modelname}} = Flax{{cookiecutter.camelcase_modelname}}Module(config=self.config, dtype=self.dtype, add_pooling_layer=False)
self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
self.classifier = nn.Dense(self.config.num_labels, dtype=self.dtype)
def __call__(
self,
input_ids,
attention_mask,
token_type_ids,
position_ids,
head_mask,
deterministic: bool = True,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
# Model
outputs = self.{{cookiecutter.lowercase_modelname}}(
input_ids,
attention_mask,
token_type_ids,
position_ids,
head_mask,
deterministic=deterministic,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = outputs[0]
hidden_states = self.dropout(hidden_states, deterministic=deterministic)
logits = self.classifier(hidden_states)
if not return_dict:
return (logits,) + outputs[1:]
return FlaxTokenClassifierOutput(
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
{{cookiecutter.camelcase_modelname}} Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
Named-Entity-Recognition (NER) tasks.
""",
{{cookiecutter.uppercase_modelname}}_START_DOCSTRING,
)
class Flax{{cookiecutter.camelcase_modelname}}ForTokenClassification(Flax{{cookiecutter.camelcase_modelname}}PreTrainedModel):
module_class = Flax{{cookiecutter.camelcase_modelname}}ForTokenClassificationModule
append_call_sample_docstring(
Flax{{cookiecutter.camelcase_modelname}}ForTokenClassification, _TOKENIZER_FOR_DOC, _CHECKPOINT_FOR_DOC, FlaxTokenClassifierOutput, _CONFIG_FOR_DOC
)
class Flax{{cookiecutter.camelcase_modelname}}ForQuestionAnsweringModule(nn.Module):
config: {{cookiecutter.camelcase_modelname}}Config
dtype: jnp.dtype = jnp.float32
def setup(self):
self.{{cookiecutter.lowercase_modelname}} = Flax{{cookiecutter.camelcase_modelname}}Module(config=self.config, dtype=self.dtype, add_pooling_layer=False)
self.qa_outputs = nn.Dense(self.config.num_labels, dtype=self.dtype)
def __call__(
self,
input_ids,
attention_mask,
token_type_ids,
position_ids,
head_mask,
deterministic: bool = True,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
# Model
outputs = self.{{cookiecutter.lowercase_modelname}}(
input_ids,
attention_mask,
token_type_ids,
position_ids,
head_mask,
deterministic=deterministic,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = outputs[0]
logits = self.qa_outputs(hidden_states)
start_logits, end_logits = logits.split(self.config.num_labels, axis=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
if not return_dict:
return (start_logits, end_logits) + outputs[1:]
return FlaxQuestionAnsweringModelOutput(
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
{{cookiecutter.camelcase_modelname}} Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
{{cookiecutter.uppercase_modelname}}_START_DOCSTRING,
)
class Flax{{cookiecutter.camelcase_modelname}}ForQuestionAnswering(Flax{{cookiecutter.camelcase_modelname}}PreTrainedModel):
module_class = Flax{{cookiecutter.camelcase_modelname}}ForQuestionAnsweringModule
append_call_sample_docstring(
Flax{{cookiecutter.camelcase_modelname}}ForQuestionAnswering,
_TOKENIZER_FOR_DOC,
_CHECKPOINT_FOR_DOC,
FlaxQuestionAnsweringModelOutput,
_CONFIG_FOR_DOC,
)
{# encoder_decoder #}
{% else %}
import math
import random
from functools import partial
from typing import Callable, Optional, Tuple
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict, unfreeze
from flax.linen import combine_masks, make_causal_mask
from flax.linen.attention import dot_product_attention_weights
from jax import lax
from jax.random import PRNGKey
from ...file_utils import add_start_docstrings, replace_return_docstrings
from ...modeling_flax_outputs import (
FlaxBaseModelOutput,
FlaxBaseModelOutputWithPastAndCrossAttentions,
FlaxCausalLMOutputWithCrossAttentions,
FlaxSeq2SeqLMOutput,
FlaxSeq2SeqModelOutput,
FlaxSeq2SeqQuestionAnsweringModelOutput,
FlaxSeq2SeqSequenceClassifierOutput,
)
from ...modeling_flax_utils import (
ACT2FN,
FlaxPreTrainedModel,
append_call_sample_docstring,
append_replace_return_docstrings,
overwrite_call_docstring,
)
from ...utils import logging
from .configuration_{{cookiecutter.lowercase_modelname}} import {{cookiecutter.camelcase_modelname}}Config
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "{{cookiecutter.checkpoint_identifier}}"
_CONFIG_FOR_DOC = "{{cookiecutter.camelcase_modelname}}Config"
_TOKENIZER_FOR_DOC = "{{cookiecutter.camelcase_modelname}}Tokenizer"
{{cookiecutter.uppercase_modelname}}_START_DOCSTRING = r"""
This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the
generic methods the library implements for all its model (such as downloading or saving, resizing the input
embeddings, pruning heads etc.)
This model is also a Flax Linen [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a regular Flax
Module and refer to the Flax documentation for all matter related to general usage and behavior.
Finally, this model supports inherent JAX features such as:
- [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
- [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
- [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
- [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
Parameters:
config ([`~{{cookiecutter.camelcase_modelname}}Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the
model weights.
dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on
GPUs) and `jax.numpy.bfloat16` (on TPUs).
This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
specified all the computation will be performed with the given `dtype`.
**Note that this only specifies the dtype of the computation and does not influence the dtype of model
parameters.**
If you wish to change the dtype of the model parameters, see
[`~FlaxPreTrainedModel.to_fp16`] and [`~FlaxPreTrainedModel.to_bf16`].
"""
{{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING = r"""
Args:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
it.
Indices can be obtained using [`~{{cookiecutter.camelcase_modelname}}Tokenizer`]. See
[`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for
details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`~{{cookiecutter.camelcase_modelname}}Tokenizer`]. See
[`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for
details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
For translation and summarization training, `decoder_input_ids` should be provided. If no
`decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to
the right for denoising pre-training following the paper.
decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will
also be used by default.
If you want to change padding behavior, you should modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy.
position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`.
decoder_position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the
range `[0, config.max_position_embeddings - 1]`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
{{cookiecutter.uppercase_modelname}}_ENCODE_INPUTS_DOCSTRING = r"""
Args:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
it.
Indices can be obtained using [`~{{cookiecutter.camelcase_modelname}}Tokenizer`]. See
[`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for
details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
{{cookiecutter.uppercase_modelname}}_DECODE_INPUTS_DOCSTRING = r"""
Args:
decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`~{{cookiecutter.camelcase_modelname}}Tokenizer`]. See
[`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for
details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
For translation and summarization training, `decoder_input_ids` should be provided. If no
`decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to
the right for denoising pre-training following the paper.
encoder_outputs (`tuple(tuple(jnp.ndarray)`):
Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*:
`attentions`) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`,
*optional*) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the
cross-attention of the decoder.
encoder_attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will
also be used by default.
If you want to change padding behavior, you should modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy.
decoder_position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the
range `[0, config.max_position_embeddings - 1]`.
past_key_values (`Dict[str, np.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`):
Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast
auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
def shift_tokens_right(input_ids: jnp.ndarray, pad_token_id: int, decoder_start_token_id: int) -> jnp.ndarray:
"""
Shift input ids one token to the right.
"""
shifted_input_ids = jnp.roll(input_ids, 1, axis=-1)
shifted_input_ids = shifted_input_ids.at[(..., 0)].set(decoder_start_token_id)
# replace possible -100 values in labels by `pad_token_id`
shifted_input_ids = jnp.where(shifted_input_ids == -100, pad_token_id, shifted_input_ids)
return shifted_input_ids
class Flax{{cookiecutter.camelcase_modelname}}Attention(nn.Module):
config: {{cookiecutter.camelcase_modelname}}Config
embed_dim: int
num_heads: int
dropout: float = 0.0
causal: bool = False
bias: bool = True
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
def setup(self) -> None:
self.head_dim = self.embed_dim // self.num_heads
assert (
self.head_dim * self.num_heads == self.embed_dim
), f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads})."
dense = partial(
nn.Dense,
self.embed_dim,
use_bias=self.bias,
dtype=self.dtype,
kernel_init=jax.nn.initializers.normal(self.config.init_std),
)
self.q_proj, self.k_proj, self.v_proj = dense(), dense(), dense()
self.out_proj = dense()
self.dropout_layer = nn.Dropout(rate=self.dropout)
if self.causal:
self.causal_mask = make_causal_mask(
jnp.ones((1, self.config.max_position_embeddings), dtype="bool"), dtype="bool"
)
def _split_heads(self, hidden_states):
return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads, self.head_dim))
def _merge_heads(self, hidden_states):
return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,))
@nn.compact
def _concatenate_to_cache(self, key, value, query, attention_mask):
"""
This function takes projected key, value states from a single input token and concatenates the states to cached
states from previous steps. This function is slighly adapted from the official Flax repository:
https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252
"""
# detect if we're initializing by absence of existing cache data.
is_initialized = self.has_variable("cache", "cached_key")
cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype)
cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype)
cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32))
if is_initialized:
*batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape
# update key, value caches with our new 1d spatial slices
cur_index = cache_index.value
indices = (0,) * len(batch_dims) + (cur_index, 0, 0)
key = lax.dynamic_update_slice(cached_key.value, key, indices)
value = lax.dynamic_update_slice(cached_value.value, value, indices)
cached_key.value = key
cached_value.value = value
num_updated_cache_vectors = query.shape[1]
cache_index.value = cache_index.value + num_updated_cache_vectors
# causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements.
pad_mask = jnp.broadcast_to(
jnp.arange(max_length) < cur_index + num_updated_cache_vectors,
tuple(batch_dims) + (1, num_updated_cache_vectors, max_length),
)
attention_mask = combine_masks(pad_mask, attention_mask)
return key, value, attention_mask
def __call__(
self,
hidden_states: jnp.ndarray,
key_value_states: Optional[jnp.ndarray] = None,
attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
batch_size = hidden_states.shape[0]
# get query proj
query_states = self.q_proj(hidden_states)
# get key, value proj
if is_cross_attention:
# cross_attentions
key_states = self.k_proj(key_value_states)
value_states = self.v_proj(key_value_states)
else:
# self_attention
key_states = self.k_proj(hidden_states)
value_states = self.v_proj(hidden_states)
query_states = self._split_heads(query_states)
key_states = self._split_heads(key_states)
value_states = self._split_heads(value_states)
# handle cache prepare causal attention mask
if self.causal:
query_length, key_length = query_states.shape[1], key_states.shape[1]
if self.has_variable("cache", "cached_key"):
mask_shift = self.variables["cache"]["cache_index"]
max_decoder_length = self.variables["cache"]["cached_key"].shape[1]
causal_mask = lax.dynamic_slice(
self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length)
)
else:
causal_mask = self.causal_mask[:, :, :query_length, :key_length]
causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:])
# combine masks if needed
if attention_mask is not None and self.causal:
attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape)
attention_mask = combine_masks(attention_mask, causal_mask)
elif self.causal:
attention_mask = causal_mask
elif attention_mask is not None:
attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2))
# During fast autoregressive decoding, we feed one position at a time,
# and cache the keys and values step by step.
if self.causal and (self.has_variable("cache", "cached_key") or init_cache):
key_states, value_states, attention_mask = self._concatenate_to_cache(
key_states, value_states, query_states, attention_mask
)
# Convert the boolean attention mask to an attention bias.
if attention_mask is not None:
# attention mask in the form of attention bias
attention_bias = lax.select(
attention_mask > 0,
jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
jnp.full(attention_mask.shape, float("-inf")).astype(self.dtype),
)
else:
attention_bias = None
dropout_rng = None
if not deterministic and self.dropout > 0.0:
dropout_rng = self.make_rng("dropout")
attn_weights = dot_product_attention_weights(
query_states,
key_states,
bias=attention_bias,
dropout_rng=dropout_rng,
dropout_rate=self.dropout,
broadcast_dropout=True,
deterministic=deterministic,
dtype=self.dtype,
precision=None,
)
attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states)
attn_output = self._merge_heads(attn_output)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights
class Flax{{cookiecutter.camelcase_modelname}}EncoderLayer(nn.Module):
config: {{cookiecutter.camelcase_modelname}}Config
dtype: jnp.dtype = jnp.float32
def setup(self) -> None:
self.embed_dim = self.config.d_model
self.self_attn = Flax{{cookiecutter.camelcase_modelname}}Attention(
config=self.config,
embed_dim=self.embed_dim,
num_heads=self.config.encoder_attention_heads,
dropout=self.config.attention_dropout,
dtype=self.dtype
)
self.self_attn_layer_norm = nn.LayerNorm(dtype=self.dtype)
self.dropout_layer = nn.Dropout(rate=self.config.dropout)
self.activation_fn = ACT2FN[self.config.activation_function]
self.activation_dropout_layer = nn.Dropout(rate=self.config.activation_dropout)
self.fc1 = nn.Dense(
self.config.encoder_ffn_dim,
dtype=self.dtype,
kernel_init=jax.nn.initializers.normal(self.config.init_std),
)
self.fc2 = nn.Dense(
self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std)
)
self.final_layer_norm = nn.LayerNorm(dtype=self.dtype)
def __call__(
self,
hidden_states: jnp.ndarray,
attention_mask: jnp.ndarray,
output_attentions: bool = True,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
residual = hidden_states
hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask)
hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = self.activation_dropout_layer(hidden_states, deterministic=deterministic)
hidden_states = self.fc2(hidden_states)
hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
class Flax{{cookiecutter.camelcase_modelname}}EncoderLayerCollection(nn.Module):
config: {{cookiecutter.camelcase_modelname}}Config
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
def setup(self):
self.layers = [
Flax{{cookiecutter.camelcase_modelname}}EncoderLayer(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.encoder_layers)
]
self.layerdrop = self.config.encoder_layerdrop
def __call__(
self,
hidden_states,
attention_mask,
deterministic: bool = True,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
all_attentions = () if output_attentions else None
all_hidden_states = () if output_hidden_states else None
for encoder_layer in self.layers:
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = random.uniform(0, 1)
if not deterministic and (dropout_probability < self.layerdrop): # skip the layer
layer_outputs = (None, None)
else:
layer_outputs = encoder_layer(
hidden_states,
attention_mask,
output_attentions,
deterministic,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states += (hidden_states,)
outputs = (hidden_states, all_hidden_states, all_attentions)
if not return_dict:
return tuple(v for v in outputs if v is not None)
return FlaxBaseModelOutput(
last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
)
class Flax{{cookiecutter.camelcase_modelname}}DecoderLayer(nn.Module):
config: {{cookiecutter.camelcase_modelname}}Config
dtype: jnp.dtype = jnp.float32
def setup(self) -> None:
self.embed_dim = self.config.d_model
self.self_attn = Flax{{cookiecutter.camelcase_modelname}}Attention(
config=self.config,
embed_dim=self.embed_dim,
num_heads=self.config.decoder_attention_heads,
dropout=self.config.attention_dropout,
causal=True,
dtype=self.dtype,
)
self.dropout_layer = nn.Dropout(rate=self.config.dropout)
self.activation_fn = ACT2FN[self.config.activation_function]
self.activation_dropout_layer = nn.Dropout(rate=self.config.activation_dropout)
self.self_attn_layer_norm = nn.LayerNorm(dtype=self.dtype)
self.encoder_attn = Flax{{cookiecutter.camelcase_modelname}}Attention(
config=self.config,
embed_dim=self.embed_dim,
num_heads=self.config.decoder_attention_heads,
dropout=self.config.attention_dropout,
dtype=self.dtype,
)
self.encoder_attn_layer_norm = nn.LayerNorm(dtype=self.dtype)
self.fc1 = nn.Dense(
self.config.encoder_ffn_dim,
dtype=self.dtype,
kernel_init=jax.nn.initializers.normal(self.config.init_std),
)
self.fc2 = nn.Dense(
self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std)
)
self.final_layer_norm = nn.LayerNorm(dtype=self.dtype)
def __call__(
self,
hidden_states: jnp.ndarray,
attention_mask: jnp.ndarray,
encoder_hidden_states: Optional[jnp.ndarray] = None,
encoder_attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
output_attentions: bool = True,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
residual = hidden_states
# Self Attention
hidden_states, self_attn_weights = self.self_attn(
hidden_states=hidden_states, attention_mask=attention_mask, init_cache=init_cache
)
hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
# Cross-Attention Block
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
hidden_states, cross_attn_weights = self.encoder_attn(
hidden_states=hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
)
hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
hidden_states = residual + hidden_states
hidden_states = self.encoder_attn_layer_norm(hidden_states)
# Fully Connected
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = self.activation_dropout_layer(hidden_states, deterministic=deterministic)
hidden_states = self.fc2(hidden_states)
hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights, cross_attn_weights)
return outputs
class Flax{{cookiecutter.camelcase_modelname}}DecoderLayerCollection(nn.Module):
config: {{cookiecutter.camelcase_modelname}}Config
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
def setup(self):
self.layers = [
Flax{{cookiecutter.camelcase_modelname}}DecoderLayer(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.decoder_layers)
]
self.layerdrop = self.config.decoder_layerdrop
def __call__(
self,
hidden_states,
attention_mask,
encoder_hidden_states: Optional[jnp.ndarray] = None,
encoder_attention_mask: Optional[jnp.ndarray] = None,
deterministic: bool = True,
init_cache: bool = False,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
for decoder_layer in self.layers:
if output_hidden_states:
all_hidden_states += (hidden_states,)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = random.uniform(0, 1)
if not deterministic and (dropout_probability < self.layerdrop):
layer_outputs = (None, None, None)
else:
layer_outputs = decoder_layer(
hidden_states,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
init_cache=init_cache,
output_attentions=output_attentions,
deterministic=deterministic,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attns += (layer_outputs[1],)
if encoder_hidden_states is not None:
all_cross_attentions += (layer_outputs[2],)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
outputs = [hidden_states, all_hidden_states, all_self_attns, all_cross_attentions]
if not return_dict:
return tuple(v for v in outputs if v is not None)
return FlaxBaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attns,
cross_attentions=all_cross_attentions,
)
class Flax{{cookiecutter.camelcase_modelname}}ClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
config: {{cookiecutter.camelcase_modelname}}Config
inner_dim: int
num_classes: int
pooler_dropout: float
dtype: jnp.dtype = jnp.float32
def setup(self):
self.dense = nn.Dense(
self.inner_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std)
)
self.dropout = nn.Dropout(rate=self.pooler_dropout)
self.out_proj = nn.Dense(
self.num_classes,
dtype=self.dtype,
kernel_init=jax.nn.initializers.normal(self.config.init_std),
)
def __call__(self, hidden_states: jnp.ndarray, deterministic: bool):
hidden_states = self.dropout(hidden_states, deterministic=deterministic)
hidden_states = self.dense(hidden_states)
hidden_states = jnp.tanh(hidden_states)
hidden_states = self.dropout(hidden_states, deterministic=deterministic)
hidden_states = self.out_proj(hidden_states)
return hidden_states
class Flax{{cookiecutter.camelcase_modelname}}Encoder(nn.Module):
config: {{cookiecutter.camelcase_modelname}}Config
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
embed_tokens: Optional[nn.Embed] = None
def setup(self):
self.dropout_layer = nn.Dropout(rate=self.config.dropout)
embed_dim = self.config.d_model
self.padding_idx = self.config.pad_token_id
self.max_source_positions = self.config.max_position_embeddings
self.embed_scale = math.sqrt(embed_dim) if self.config.scale_embedding else 1.0
if self.embed_tokens is None:
self.embed_tokens = nn.Embed(
self.config.vocab_size,
embed_dim,
embedding_init=jax.nn.initializers.normal(self.config.init_std),
)
# {{cookiecutter.camelcase_modelname}} is set up so that if padding_idx is specified then offset the embedding ids by 2
# and adjust num_embeddings appropriately. Other models don't have this hack
self.offset = 2
self.embed_positions = nn.Embed(
self.config.max_position_embeddings + self.offset,
embed_dim,
embedding_init=jax.nn.initializers.normal(self.config.init_std),
)
self.layers = Flax{{cookiecutter.camelcase_modelname}}EncoderLayerCollection(self.config, self.dtype)
self.layernorm_embedding = nn.LayerNorm(dtype=self.dtype)
def __call__(
self,
input_ids,
attention_mask,
position_ids,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
deterministic: bool = True,
):
input_shape = input_ids.shape
input_ids = input_ids.reshape(-1, input_shape[-1])
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
embed_pos = self.embed_positions(position_ids + self.offset)
hidden_states = inputs_embeds + embed_pos
hidden_states = self.layernorm_embedding(hidden_states)
hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
outputs = self.layers(
hidden_states,
attention_mask,
deterministic=deterministic,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if not return_dict:
return outputs
return FlaxBaseModelOutput(
last_hidden_state=outputs.last_hidden_state,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class Flax{{cookiecutter.camelcase_modelname}}Decoder(nn.Module):
config: {{cookiecutter.camelcase_modelname}}Config
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
embed_tokens: Optional[nn.Embed] = None
def setup(self):
self.dropout_layer = nn.Dropout(rate=self.config.dropout)
embed_dim = self.config.d_model
self.padding_idx = self.config.pad_token_id
self.max_target_positions = self.config.max_position_embeddings
self.embed_scale = math.sqrt(self.config.d_model) if self.config.scale_embedding else 1.0
if self.embed_tokens is None:
self.embed_tokens = nn.Embed(
self.config.vocab_size,
embed_dim,
embedding_init=jax.nn.initializers.normal(self.config.init_std),
)
# {{cookiecutter.camelcase_modelname}} is set up so that if padding_idx is specified then offset the embedding ids by 2
# and adjust num_embeddings appropriately. Other models don't have this hack
self.offset = 2
self.embed_positions = nn.Embed(
self.config.max_position_embeddings + self.offset,
embed_dim,
embedding_init=jax.nn.initializers.normal(self.config.init_std),
)
self.layers = Flax{{cookiecutter.camelcase_modelname}}DecoderLayerCollection(self.config, self.dtype)
self.layernorm_embedding = nn.LayerNorm(dtype=self.dtype)
def __call__(
self,
input_ids,
attention_mask,
position_ids,
encoder_hidden_states: Optional[jnp.ndarray] = None,
encoder_attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
deterministic: bool = True,
):
input_shape = input_ids.shape
input_ids = input_ids.reshape(-1, input_shape[-1])
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
# embed positions
positions = self.embed_positions(position_ids + self.offset)
hidden_states = inputs_embeds + positions
hidden_states = self.layernorm_embedding(hidden_states)
hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
outputs = self.layers(
hidden_states,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
deterministic=deterministic,
init_cache=init_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if not return_dict:
return outputs
return FlaxBaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=outputs.last_hidden_state,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
class Flax{{cookiecutter.camelcase_modelname}}Module(nn.Module):
config: {{cookiecutter.camelcase_modelname}}Config
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
def setup(self):
self.shared = nn.Embed(
self.config.vocab_size,
self.config.d_model,
embedding_init=jax.nn.initializers.normal(self.config.init_std),
)
self.encoder = Flax{{cookiecutter.camelcase_modelname}}Encoder(self.config, dtype=self.dtype, embed_tokens=self.shared)
self.decoder = Flax{{cookiecutter.camelcase_modelname}}Decoder(self.config, dtype=self.dtype, embed_tokens=self.shared)
def _get_encoder_module(self):
return self.encoder
def _get_decoder_module(self):
return self.decoder
def __call__(
self,
input_ids,
attention_mask,
decoder_input_ids,
decoder_attention_mask,
position_ids,
decoder_position_ids,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
deterministic: bool = True,
):
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
deterministic=deterministic,
)
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
position_ids=decoder_position_ids,
encoder_hidden_states=encoder_outputs[0],
encoder_attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
deterministic=deterministic,
)
if not return_dict:
return decoder_outputs + encoder_outputs
return FlaxSeq2SeqModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
class Flax{{cookiecutter.camelcase_modelname}}PreTrainedModel(FlaxPreTrainedModel):
config_class = {{cookiecutter.camelcase_modelname}}Config
base_model_prefix: str = "model"
module_class: nn.Module = None
def __init__(
self,
config: {{cookiecutter.camelcase_modelname}}Config,
input_shape: Tuple[int] = (1, 1),
seed: int = 0,
dtype: jnp.dtype = jnp.float32,
**kwargs
):
module = self.module_class(config=config, dtype=dtype, **kwargs)
super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype)
def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple) -> FrozenDict:
# init input tensors
input_ids = jnp.zeros(input_shape, dtype="i4")
# make sure initialization pass will work for Flax{{cookiecutter.camelcase_modelname}}ForSequenceClassificationModule
input_ids = input_ids.at[(..., -1)].set(self.config.eos_token_id)
attention_mask = jnp.ones_like(input_ids)
decoder_input_ids = input_ids
decoder_attention_mask = jnp.ones_like(input_ids)
batch_size, sequence_length = input_ids.shape
position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
decoder_position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
params_rng, dropout_rng = jax.random.split(rng)
rngs = {"params": params_rng, "dropout": dropout_rng}
return self.module.init(
rngs,
input_ids,
attention_mask,
decoder_input_ids,
decoder_attention_mask,
position_ids,
decoder_position_ids,
)["params"]
def init_cache(self, batch_size, max_length, encoder_outputs):
r"""
Args:
batch_size (`int`):
batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.
max_length (`int`):
maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized
cache.
encoder_outputs (`Union[FlaxBaseModelOutput, tuple(tuple(jnp.ndarray)]`):
`encoder_outputs` consists of (`last_hidden_state`, *optional*: `hidden_states`,
*optional*: `attentions`). `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of hidden-states at the output of the last layer of the
encoder. Used in the cross-attention of the decoder.
"""
# init input variables to retrieve cache
decoder_input_ids = jnp.ones((batch_size, max_length), dtype="i4")
decoder_attention_mask = jnp.ones_like(decoder_input_ids)
decoder_position_ids = jnp.broadcast_to(
jnp.arange(jnp.atleast_2d(decoder_input_ids).shape[-1]), decoder_input_ids.shape
)
def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):
decoder_module = module._get_decoder_module()
return decoder_module(
decoder_input_ids,
decoder_attention_mask,
decoder_position_ids,
**kwargs,
)
init_variables = self.module.init(
jax.random.PRNGKey(0),
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
decoder_position_ids=decoder_position_ids,
encoder_hidden_states=encoder_outputs[0],
init_cache=True,
method=_decoder_forward, # we only need to call the decoder to init the cache
)
return unfreeze(init_variables["cache"])
@add_start_docstrings({{cookiecutter.uppercase_modelname}}_ENCODE_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=FlaxBaseModelOutput, config_class={{cookiecutter.camelcase_modelname}}Config)
def encode(
self,
input_ids: jnp.ndarray,
attention_mask: Optional[jnp.ndarray] = None,
position_ids: Optional[jnp.ndarray] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
train: bool = False,
params: dict = None,
dropout_rng: PRNGKey = None,
):
r"""
Returns:
Example:
```python
>>> from transformers import {{cookiecutter.camelcase_modelname}}Tokenizer, Flax{{cookiecutter.camelcase_modelname}}ForConditionalGeneration
>>> model = Flax{{cookiecutter.camelcase_modelname}}ForConditionalGeneration.from_pretrained('{{cookiecutter.checkpoint_identifier}}')
>>> tokenizer = {{cookiecutter.camelcase_modelname}}Tokenizer.from_pretrained('{{cookiecutter.checkpoint_identifier}}')
>>> text = "My friends are cool but they eat too many carbs."
>>> inputs = tokenizer(text, max_length=1024, return_tensors='np')
>>> encoder_outputs = model.encode(**inputs)
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.return_dict
if attention_mask is None:
attention_mask = jnp.ones_like(input_ids)
if position_ids is None:
batch_size, sequence_length = input_ids.shape
position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
# Handle any PRNG if needed
rngs = {}
if dropout_rng is not None:
rngs["dropout"] = dropout_rng
def _encoder_forward(module, input_ids, attention_mask, position_ids, **kwargs):
encode_module = module._get_encoder_module()
return encode_module(input_ids, attention_mask, position_ids, **kwargs)
return self.module.apply(
{"params": params or self.params},
input_ids=jnp.array(input_ids, dtype="i4"),
attention_mask=jnp.array(attention_mask, dtype="i4"),
position_ids=jnp.array(position_ids, dtype="i4"),
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
deterministic=not train,
rngs=rngs,
method=_encoder_forward,
)
@add_start_docstrings({{cookiecutter.uppercase_modelname}}_DECODE_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=FlaxBaseModelOutputWithPastAndCrossAttentions, config_class={{cookiecutter.camelcase_modelname}}Config)
def decode(
self,
decoder_input_ids,
encoder_outputs,
encoder_attention_mask: Optional[jnp.ndarray] = None,
decoder_attention_mask: Optional[jnp.ndarray] = None,
decoder_position_ids: Optional[jnp.ndarray] = None,
past_key_values: dict = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
train: bool = False,
params: dict = None,
dropout_rng: PRNGKey = None,
):
r"""
Returns:
Example:
```python
>>> import jax.numpy as jnp
>>> from transformers import {{cookiecutter.camelcase_modelname}}Tokenizer, Flax{{cookiecutter.camelcase_modelname}}ForConditionalGeneration
>>> model = Flax{{cookiecutter.camelcase_modelname}}ForConditionalGeneration.from_pretrained('{{cookiecutter.checkpoint_identifier}}')
>>> tokenizer = {{cookiecutter.camelcase_modelname}}Tokenizer.from_pretrained('{{cookiecutter.checkpoint_identifier}}')
>>> text = "My friends are cool but they eat too many carbs."
>>> inputs = tokenizer(text, max_length=1024, return_tensors='np')
>>> encoder_outputs = model.encode(**inputs)
>>> decoder_start_token_id = model.config.decoder_start_token_id
>>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id
>>> outputs = model.decode(decoder_input_ids, encoder_outputs)
>>> last_decoder_hidden_states = outputs.last_hidden_state
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.return_dict
encoder_hidden_states = encoder_outputs[0]
if encoder_attention_mask is None:
batch_size, sequence_length = encoder_hidden_states.shape[:2]
encoder_attention_mask = jnp.ones((batch_size, sequence_length))
batch_size, sequence_length = decoder_input_ids.shape
if decoder_attention_mask is None:
decoder_attention_mask = jnp.ones((batch_size, sequence_length))
if decoder_position_ids is None:
if past_key_values is not None:
raise ValueError("Make sure to provide `decoder_position_ids` when passing `past_key_values`.")
decoder_position_ids = jnp.broadcast_to(
jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)
)
# Handle any PRNG if needed
rngs = {}
if dropout_rng is not None:
rngs["dropout"] = dropout_rng
inputs = {"params": params or self.params}
# if past_key_values are passed then cache is already initialized a private flag init_cache has to be
# passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that
# it can be changed by Flax{{cookiecutter.camelcase_modelname}}Attention module
if past_key_values:
inputs["cache"] = past_key_values
mutable = ["cache"]
else:
mutable = False
def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):
decoder_module = module._get_decoder_module()
return decoder_module(
decoder_input_ids,
decoder_attention_mask,
decoder_position_ids,
**kwargs,
)
outputs = self.module.apply(
inputs,
decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"),
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"),
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
deterministic=not train,
rngs=rngs,
mutable=mutable,
method=_decoder_forward,
)
# add updated cache to model output
if past_key_values is not None and return_dict:
outputs, past = outputs
outputs["past_key_values"] = unfreeze(past["cache"])
return outputs
elif past_key_values is not None and not return_dict:
outputs, past = outputs
outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:]
return outputs
def __call__(
self,
input_ids: jnp.ndarray,
attention_mask: Optional[jnp.ndarray] = None,
decoder_input_ids: Optional[jnp.ndarray] = None,
decoder_attention_mask: Optional[jnp.ndarray] = None,
position_ids: Optional[jnp.ndarray] = None,
decoder_position_ids: Optional[jnp.ndarray] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
train: bool = False,
params: dict = None,
dropout_rng: PRNGKey = None,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.return_dict
# prepare encoder inputs
if attention_mask is None:
attention_mask = jnp.ones_like(input_ids)
if position_ids is None:
batch_size, sequence_length = input_ids.shape
position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
# prepare decoder inputs
if decoder_input_ids is None:
decoder_input_ids = shift_tokens_right(
input_ids, self.config.pad_token_id, decoder_start_token_id=self.config.decoder_start_token_id
)
if decoder_attention_mask is None:
decoder_attention_mask = jnp.ones_like(decoder_input_ids)
if decoder_position_ids is None:
batch_size, sequence_length = decoder_input_ids.shape
decoder_position_ids = jnp.broadcast_to(
jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)
)
# Handle any PRNG if needed
rngs = {"dropout": dropout_rng} if dropout_rng is not None else {}
return self.module.apply(
{"params": params or self.params},
input_ids=jnp.array(input_ids, dtype="i4"),
attention_mask=jnp.array(attention_mask, dtype="i4"),
position_ids=jnp.array(position_ids, dtype="i4"),
decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"),
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
deterministic=not train,
rngs=rngs,
)
@add_start_docstrings(
"The bare {{cookiecutter.camelcase_modelname}} Model transformer outputting raw hidden-states without any specific head on top.",
{{cookiecutter.uppercase_modelname}}_START_DOCSTRING,
)
class Flax{{cookiecutter.camelcase_modelname}}Model(Flax{{cookiecutter.camelcase_modelname}}PreTrainedModel):
config: {{cookiecutter.camelcase_modelname}}Config
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
module_class = Flax{{cookiecutter.camelcase_modelname}}Module
append_call_sample_docstring(
Flax{{cookiecutter.camelcase_modelname}}Model, _TOKENIZER_FOR_DOC, _CHECKPOINT_FOR_DOC, FlaxSeq2SeqModelOutput, _CONFIG_FOR_DOC
)
class Flax{{cookiecutter.camelcase_modelname}}ForConditionalGenerationModule(nn.Module):
config: {{cookiecutter.camelcase_modelname}}Config
dtype: jnp.dtype = jnp.float32
bias_init: Callable[..., jnp.ndarray] = jax.nn.initializers.zeros
def setup(self):
self.model = Flax{{cookiecutter.camelcase_modelname}}Module(config=self.config, dtype=self.dtype)
self.lm_head = nn.Dense(
self.model.shared.num_embeddings,
use_bias=False,
dtype=self.dtype,
kernel_init=jax.nn.initializers.normal(self.config.init_std),
)
self.final_logits_bias = self.param("final_logits_bias", self.bias_init, (1, self.model.shared.num_embeddings))
def _get_encoder_module(self):
return self.model.encoder
def _get_decoder_module(self):
return self.model.decoder
def __call__(
self,
input_ids,
attention_mask,
decoder_input_ids,
decoder_attention_mask,
position_ids,
decoder_position_ids,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
deterministic: bool = True,
):
outputs = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
position_ids=position_ids,
decoder_position_ids=decoder_position_ids,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
deterministic=deterministic,
)
hidden_states = outputs[0]
if self.config.tie_word_embeddings:
shared_embedding = self.model.variables["params"]["shared"]["embedding"]
lm_logits = self.lm_head.apply({"params": {"kernel": shared_embedding.T}}, hidden_states)
else:
lm_logits = self.lm_head(hidden_states)
lm_logits += self.final_logits_bias.astype(self.dtype)
if not return_dict:
output = (lm_logits,) + outputs[1:]
return output
return FlaxSeq2SeqLMOutput(
logits=lm_logits,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
)
@add_start_docstrings(
"The {{cookiecutter.uppercase_modelname}} Model with a language modeling head. Can be used for summarization.", {{cookiecutter.uppercase_modelname}}_START_DOCSTRING
)
class Flax{{cookiecutter.camelcase_modelname}}ForConditionalGeneration(Flax{{cookiecutter.camelcase_modelname}}PreTrainedModel):
module_class = Flax{{cookiecutter.camelcase_modelname}}ForConditionalGenerationModule
dtype: jnp.dtype = jnp.float32
@add_start_docstrings({{cookiecutter.uppercase_modelname}}_DECODE_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=FlaxCausalLMOutputWithCrossAttentions, config_class={{cookiecutter.camelcase_modelname}}Config)
def decode(
self,
decoder_input_ids,
encoder_outputs,
encoder_attention_mask: Optional[jnp.ndarray] = None,
decoder_attention_mask: Optional[jnp.ndarray] = None,
decoder_position_ids: Optional[jnp.ndarray] = None,
past_key_values: dict = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
deterministic: bool = True,
params: dict = None,
dropout_rng: PRNGKey = None,
):
r"""
Returns:
Example:
```python
>>> import jax.numpy as jnp
>>> from transformers import {{cookiecutter.camelcase_modelname}}Tokenizer, Flax{{cookiecutter.camelcase_modelname}}ForConditionalGeneration
>>> model = Flax{{cookiecutter.camelcase_modelname}}ForConditionalGeneration.from_pretrained('{{cookiecutter.checkpoint_identifier}}')
>>> tokenizer = {{cookiecutter.camelcase_modelname}}Tokenizer.from_pretrained('{{cookiecutter.checkpoint_identifier}}')
>>> text = "My friends are cool but they eat too many carbs."
>>> inputs = tokenizer(text, max_length=1024, return_tensors='np')
>>> encoder_outputs = model.encode(**inputs)
>>> decoder_start_token_id = model.config.decoder_start_token_id
>>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id
>>> outputs = model.decode(decoder_input_ids, encoder_outputs)
>>> logits = outputs.logits
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.return_dict
encoder_hidden_states = encoder_outputs[0]
if encoder_attention_mask is None:
batch_size, sequence_length = encoder_hidden_states.shape[:2]
encoder_attention_mask = jnp.ones((batch_size, sequence_length))
batch_size, sequence_length = decoder_input_ids.shape
if decoder_attention_mask is None:
decoder_attention_mask = jnp.ones((batch_size, sequence_length))
if decoder_position_ids is None:
if past_key_values is not None:
raise ValueError("Make sure to provide `decoder_position_ids` when passing `past_key_values`.")
decoder_position_ids = jnp.broadcast_to(
jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)
)
# Handle any PRNG if needed
rngs = {}
if dropout_rng is not None:
rngs["dropout"] = dropout_rng
inputs = {"params": params or self.params}
# if past_key_values are passed then cache is already initialized a private flag init_cache has to be
# passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that
# it can be changed by Flax{{cookiecutter.camelcase_modelname}}Attention module
if past_key_values:
inputs["cache"] = past_key_values
mutable = ["cache"]
else:
mutable = False
def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):
decoder_module = module._get_decoder_module()
outputs = decoder_module(
decoder_input_ids,
decoder_attention_mask,
decoder_position_ids,
**kwargs,
)
hidden_states = outputs[0]
if self.config.tie_word_embeddings:
shared_embedding = module.model.variables["params"]["shared"]["embedding"]
lm_logits = module.lm_head.apply({"params": {"kernel": shared_embedding.T}}, hidden_states)
else:
lm_logits = module.lm_head(hidden_states)
lm_logits += module.final_logits_bias.astype(self.dtype)
return lm_logits, outputs
outputs = self.module.apply(
inputs,
decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"),
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"),
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
deterministic=deterministic,
rngs=rngs,
mutable=mutable,
method=_decoder_forward,
)
if past_key_values is None:
lm_logits, decoder_outputs = outputs
else:
(lm_logits, decoder_outputs), past = outputs
if return_dict:
outputs = FlaxCausalLMOutputWithCrossAttentions(
logits=lm_logits,
hidden_states=decoder_outputs.hidden_states,
attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
)
else:
outputs = (lm_logits,) + decoder_outputs[1:]
# add updated cache to model output
if past_key_values is not None and return_dict:
outputs["past_key_values"] = unfreeze(past["cache"])
return outputs
elif past_key_values is not None and not return_dict:
outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:]
return outputs
def prepare_inputs_for_generation(
self,
decoder_input_ids,
max_length,
attention_mask: Optional[jnp.DeviceArray] = None,
decoder_attention_mask: Optional[jnp.DeviceArray] = None,
encoder_outputs=None,
**kwargs
):
# initializing the cache
batch_size, seq_length = decoder_input_ids.shape
past_key_values = self.init_cache(batch_size, max_length, encoder_outputs)
# Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length.
# But since the decoder uses a causal mask, those positions are masked anyways.
# Thus we can create a single static attention_mask here, which is more efficient for compilation
extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4")
if decoder_attention_mask is not None:
position_ids = decoder_attention_mask.cumsum(axis=-1) - 1
extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, decoder_attention_mask, (0, 0))
else:
position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length))
return {
"past_key_values": past_key_values,
"encoder_outputs": encoder_outputs,
"encoder_attention_mask": attention_mask,
"decoder_attention_mask": extended_attention_mask,
"decoder_position_ids": position_ids,
}
def update_inputs_for_generation(self, model_outputs, model_kwargs):
model_kwargs["past_key_values"] = model_outputs.past_key_values
model_kwargs["decoder_position_ids"] = model_kwargs["decoder_position_ids"][:, -1:] + 1
return model_kwargs
FLAX_{{cookiecutter.uppercase_modelname}}_CONDITIONAL_GENERATION_DOCSTRING = """
Returns:
Summarization example:
```python
>>> from transformers import {{cookiecutter.camelcase_modelname}}Tokenizer, Flax{{cookiecutter.camelcase_modelname}}ForConditionalGeneration
>>> model = Flax{{cookiecutter.camelcase_modelname}}ForConditionalGeneration.from_pretrained('{{cookiecutter.checkpoint_identifier}}')
>>> tokenizer = {{cookiecutter.camelcase_modelname}}Tokenizer.from_pretrained('{{cookiecutter.checkpoint_identifier}}')
>>> ARTICLE_TO_SUMMARIZE = "My friends are cool but they eat too many carbs."
>>> inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors='np')
>>> # Generate Summary
>>> summary_ids = model.generate(inputs['input_ids']).sequences
>>> print(tokenizer.batch_decode(summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False))
```
Mask filling example:
```python
>>> import jax
>>> from transformers import {{cookiecutter.camelcase_modelname}}Tokenizer, Flax{{cookiecutter.camelcase_modelname}}ForConditionalGeneration
>>> model = Flax{{cookiecutter.camelcase_modelname}}ForConditionalGeneration.from_pretrained('{{cookiecutter.checkpoint_identifier}}')
>>> tokenizer = {{cookiecutter.camelcase_modelname}}Tokenizer.from_pretrained('{{cookiecutter.checkpoint_identifier}}')
>>> TXT = "My friends are <mask> but they eat too many carbs."
>>> input_ids = tokenizer([TXT], return_tensors='np')['input_ids']
>>> logits = model(input_ids).logits
>>> masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item()
>>> probs = jax.nn.softmax(logits[0, masked_index], axis=0)
>>> values, predictions = jax.lax.top_k(probs, k=1)
>>> tokenizer.decode(predictions).split()
```
"""
overwrite_call_docstring(
Flax{{cookiecutter.camelcase_modelname}}ForConditionalGeneration, {{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING + FLAX_{{cookiecutter.uppercase_modelname}}_CONDITIONAL_GENERATION_DOCSTRING
)
append_replace_return_docstrings(
Flax{{cookiecutter.camelcase_modelname}}ForConditionalGeneration, output_type=FlaxSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC
)
class Flax{{cookiecutter.camelcase_modelname}}ForSequenceClassificationModule(nn.Module):
config: {{cookiecutter.camelcase_modelname}}Config
dtype: jnp.dtype = jnp.float32
num_labels: Optional[int] = None
def setup(self):
self.model = Flax{{cookiecutter.camelcase_modelname}}Module(config=self.config, dtype=self.dtype)
self.classification_head = Flax{{cookiecutter.camelcase_modelname}}ClassificationHead(
config=self.config,
inner_dim=self.config.d_model,
num_classes=self.num_labels if self.num_labels is not None else self.config.num_labels,
pooler_dropout=self.config.classifier_dropout,
)
def _get_encoder_module(self):
return self.model.encoder
def _get_decoder_module(self):
return self.model.decoder
def __call__(
self,
input_ids,
attention_mask,
decoder_input_ids,
decoder_attention_mask,
position_ids,
decoder_position_ids,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
deterministic: bool = True,
):
outputs = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
position_ids=position_ids,
decoder_position_ids=decoder_position_ids,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
deterministic=deterministic,
)
hidden_states = outputs[0] # last hidden state
eos_mask = jnp.where(input_ids == self.config.eos_token_id, 1, 0)
# The first condition is necessary to overcome jax._src.errors.ConcretizationTypeError during JIT compilation
if type(eos_mask) != jax.interpreters.partial_eval.DynamicJaxprTracer:
if len(jnp.unique(eos_mask.sum(1))) > 1:
raise ValueError("All examples must have the same number of <eos> tokens.")
if any(eos_mask.sum(1) == 0):
raise ValueError("There are missing <eos> tokens in input_ids")
# Ensure to keep 1 only for the last <eos> token for each example
eos_mask_noised = eos_mask + jnp.arange(eos_mask.shape[1]) * 1e-6
eos_mask = jnp.where(eos_mask_noised == eos_mask_noised.max(1).reshape(-1, 1), 1, 0)
sentence_representation = jnp.einsum("ijk, ij -> ijk", hidden_states, eos_mask).sum(1)
logits = self.classification_head(sentence_representation, deterministic=deterministic)
if not return_dict:
output = (logits,) + outputs[1:]
return output
return FlaxSeq2SeqSequenceClassifierOutput(
logits=logits,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
)
@add_start_docstrings(
"""
{{cookiecutter.camelcase_modelname}} model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE
tasks.
""",
{{cookiecutter.uppercase_modelname}}_START_DOCSTRING,
)
class Flax{{cookiecutter.camelcase_modelname}}ForSequenceClassification(Flax{{cookiecutter.camelcase_modelname}}PreTrainedModel):
module_class = Flax{{cookiecutter.camelcase_modelname}}ForSequenceClassificationModule
dtype = jnp.float32
append_call_sample_docstring(
Flax{{cookiecutter.camelcase_modelname}}ForSequenceClassification,
_TOKENIZER_FOR_DOC,
_CHECKPOINT_FOR_DOC,
FlaxSeq2SeqSequenceClassifierOutput,
_CONFIG_FOR_DOC,
)
class Flax{{cookiecutter.camelcase_modelname}}ForQuestionAnsweringModule(nn.Module):
config: {{cookiecutter.camelcase_modelname}}Config
dtype: jnp.dtype = jnp.float32
num_labels = 2
def setup(self):
self.model = Flax{{cookiecutter.camelcase_modelname}}Module(config=self.config, dtype=self.dtype)
self.qa_outputs = nn.Dense(
self.num_labels, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std)
)
def _get_encoder_module(self):
return self.model.encoder
def _get_decoder_module(self):
return self.model.decoder
def __call__(
self,
input_ids,
attention_mask,
decoder_input_ids,
decoder_attention_mask,
position_ids,
decoder_position_ids,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
deterministic: bool = True,
):
outputs = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
position_ids=position_ids,
decoder_position_ids=decoder_position_ids,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
deterministic=deterministic,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = jnp.split(logits, logits.shape[-1], axis=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
if not return_dict:
output = (start_logits, end_logits) + outputs[1:]
return output
return FlaxSeq2SeqQuestionAnsweringModelOutput(
start_logits=start_logits,
end_logits=end_logits,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
)
@add_start_docstrings(
"""
{{cookiecutter.uppercase_modelname}} Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
{{cookiecutter.uppercase_modelname}}_START_DOCSTRING,
)
class Flax{{cookiecutter.camelcase_modelname}}ForQuestionAnswering(Flax{{cookiecutter.camelcase_modelname}}PreTrainedModel):
module_class = Flax{{cookiecutter.camelcase_modelname}}ForQuestionAnsweringModule
dtype = jnp.float32
append_call_sample_docstring(
Flax{{cookiecutter.camelcase_modelname}}ForQuestionAnswering,
_TOKENIZER_FOR_DOC,
_CHECKPOINT_FOR_DOC,
FlaxSeq2SeqQuestionAnsweringModelOutput,
_CONFIG_FOR_DOC,
)
{% endif -%}
| [
"[email protected]"
] | |
019af73cde0ddb511c0a9d5cb31e3865fc933fd2 | 61f9c7094be028e040d1234f05ee6d7370c2206d | /pytext/models/output_layers/intent_slot_output_layer.py | 1a831b9e0f9fcb261594fa81513bece669755bdb | [
"BSD-3-Clause"
] | permissive | timgates42/pytext | 3ce5473fecca5174108a4eb63209a3eecfb6d8dd | 5f2c3ca6c3ba56e1001e95825abd7ee295de1dff | refs/heads/main | 2023-03-15T07:33:21.217159 | 2022-07-11T16:06:16 | 2022-07-11T16:06:16 | 231,028,915 | 0 | 0 | NOASSERTION | 2019-12-31T05:04:01 | 2019-12-31T05:04:00 | null | UTF-8 | Python | false | false | 7,272 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
import torch.nn as nn
from caffe2.python import core
from pytext.common.constants import DatasetFieldName
from pytext.data.utils import Vocabulary
from pytext.models.module import create_module
from pytext.utils.usage import log_class_usage
from torch import jit
from .doc_classification_output_layer import ClassificationOutputLayer
from .output_layer_base import OutputLayerBase
from .utils import query_word_reprs
from .word_tagging_output_layer import CRFOutputLayer, WordTaggingOutputLayer
class IntentSlotScores(nn.Module):
def __init__(self, doc_scores: jit.ScriptModule, word_scores: jit.ScriptModule):
super().__init__()
self.doc_scores = doc_scores
self.word_scores = word_scores
log_class_usage(__class__)
def forward(
self,
logits: Tuple[torch.Tensor, torch.Tensor],
context: Dict[str, torch.Tensor],
) -> Tuple[List[Dict[str, float]], List[List[Dict[str, float]]]]:
d_logits, w_logits = logits
if "token_indices" in context:
w_logits = query_word_reprs(w_logits, context["token_indices"])
d_results = self.doc_scores(d_logits)
w_results = self.word_scores(w_logits, context)
return d_results, w_results
class IntentSlotOutputLayer(OutputLayerBase):
"""
Output layer for joint intent classification and slot-filling models.
Intent classification is a document classification problem and slot filling
is a word tagging problem. Thus terms these can be used interchangeably in the
documentation.
Args:
doc_output (ClassificationOutputLayer): Output layer for intent
classification task. See
:class:`~pytext.models.output_layers.ClassificationOutputLayer` for
details.
word_output (WordTaggingOutputLayer): Output layer for slot filling task.
See :class:`~pytext.models.output_layers.WordTaggingOutputLayer` for
details.
Attributes:
doc_output (type): Output layer for intent classification task.
word_output (type): Output layer for slot filling task.
"""
class Config(OutputLayerBase.Config):
doc_output: ClassificationOutputLayer.Config = (
ClassificationOutputLayer.Config()
)
word_output: Union[
WordTaggingOutputLayer.Config, CRFOutputLayer.Config
] = WordTaggingOutputLayer.Config()
@classmethod
def from_config(
cls, config: Config, doc_labels: Vocabulary, word_labels: Vocabulary
):
return cls(
create_module(config.doc_output, labels=doc_labels),
create_module(config.word_output, labels=word_labels),
)
def __init__(
self, doc_output: ClassificationOutputLayer, word_output: WordTaggingOutputLayer
) -> None:
super().__init__()
self.doc_output = doc_output
self.word_output = word_output
log_class_usage(__class__)
def get_loss(
self,
logits: Tuple[torch.Tensor, torch.Tensor],
targets: Tuple[torch.Tensor, torch.Tensor],
context: Dict[str, Any] = None,
*args,
**kwargs,
) -> torch.Tensor:
"""Compute and return the averaged intent and slot-filling loss.
Args:
logit (Tuple[torch.Tensor, torch.Tensor]): Logits returned by
:class:`~pytext.models.joint_model.JointModel`. It is a tuple
containing logits for intent classification and slot filling.
targets (Tuple[torch.Tensor, torch.Tensor]): Tuple of target Tensors
containing true document label/target and true word labels/targets.
context (Dict[str, Any]): Context is a dictionary of items
that's passed as additional metadata. Defaults to None.
Returns:
torch.Tensor: Averaged intent and slot loss.
"""
d_logit, w_logit = logits
if DatasetFieldName.TOKEN_INDICES in context:
w_logit = query_word_reprs(w_logit, context[DatasetFieldName.TOKEN_INDICES])
d_target, w_target = targets
d_weight = context[DatasetFieldName.DOC_WEIGHT_FIELD] # noqa
w_weight = context[DatasetFieldName.WORD_WEIGHT_FIELD] # noqa
d_loss = self.doc_output.get_loss(
d_logit, d_target, context=context, reduce=False
)
w_loss = self.word_output.get_loss(
w_logit, w_target, context=context, reduce=False
)
# w_loss could have been flattened
w_hard_target = w_target[0] if type(w_target) is tuple else w_target
if w_loss.size()[0] != w_hard_target.size()[0]:
w_loss = w_loss.reshape(w_hard_target.size())
w_loss = torch.mean(w_loss, dim=1)
d_weighted_loss = torch.mean(torch.mul(d_loss, d_weight))
w_weighted_loss = torch.mean(torch.mul(w_loss, w_weight))
return d_weighted_loss + w_weighted_loss
def get_pred(
self,
logits: Tuple[torch.Tensor, torch.Tensor],
targets: Optional[torch.Tensor] = None,
context: Optional[Dict[str, Any]] = None,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Compute and return prediction and scores from the model.
Args:
logit (Tuple[torch.Tensor, torch.Tensor]): Logits returned by
:class:`~pytext.models.joint_model.JointModel`. It's tuple
containing logits for intent classification and slot filling.
targets (Optional[torch.Tensor]): Not applicable. Defaults to None.
context (Optional[Dict[str, Any]]): Context is a dictionary of items
that's passed as additional metadata. Defaults to None.
Returns:
Tuple[torch.Tensor, torch.Tensor]: Model prediction and scores.
"""
d_logit, w_logit = logits
if DatasetFieldName.TOKEN_INDICES in context:
w_logit = query_word_reprs(w_logit, context[DatasetFieldName.TOKEN_INDICES])
d_pred, d_score = self.doc_output.get_pred(d_logit, None, context)
w_pred, w_score = self.word_output.get_pred(w_logit, None, context)
return (d_pred, w_pred), (d_score, w_score)
def export_to_caffe2(
self,
workspace: core.workspace,
init_net: core.Net,
predict_net: core.Net,
model_out: List[torch.Tensor],
doc_out_name: str,
word_out_name: str,
) -> List[core.BlobReference]:
"""
Exports the intent slot output layer to Caffe2.
See `OutputLayerBase.export_to_caffe2()` for details.
"""
return self.doc_output.export_to_caffe2(
workspace, init_net, predict_net, model_out[0], doc_out_name
) + self.word_output.export_to_caffe2(
workspace, init_net, predict_net, model_out[1], word_out_name
)
def torchscript_predictions(self):
doc_scores = self.doc_output.torchscript_predictions()
word_scores = self.word_output.torchscript_predictions()
return jit.script(IntentSlotScores(doc_scores, word_scores))
| [
"[email protected]"
] | |
429b8aebe0e1a0732958d27d2a34cc2b6c0d64f3 | b28305dab0be0e03765c62b97bcd7f49a4f8073d | /components/cronet/tools/cr_cronet.py | 50a1c371b6c5044149e25ebb99b3029e2786eb66 | [
"BSD-3-Clause"
] | permissive | svarvel/browser-android-tabs | 9e5e27e0a6e302a12fe784ca06123e5ce090ced5 | bd198b4c7a1aca2f3e91f33005d881f42a8d0c3f | refs/heads/base-72.0.3626.105 | 2020-04-24T12:16:31.442851 | 2019-08-02T19:15:36 | 2019-08-02T19:15:36 | 171,950,555 | 1 | 2 | NOASSERTION | 2019-08-02T19:15:37 | 2019-02-21T21:47:44 | null | UTF-8 | Python | false | false | 7,538 | py | #!/usr/bin/python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
cr_cronet.py - cr - like helper tool for cronet developers
"""
import argparse
import os
import sys
def run(command, extra_options=''):
command = command + ' ' + extra_options
print command
return os.system(command)
def gn(out_dir, gn_args, gn_extra=''):
return run ('gn gen %s --args=\'%s\' %s' % (out_dir, gn_args, gn_extra))
def build(out_dir, build_target, extra_options=''):
return run('ninja -C ' + out_dir + ' ' + build_target,
get_ninja_jobs_option() + extra_options)
def install(out_dir):
cmd = 'build/android/adb_install_apk.py ' + out_dir + '/apks/{0}'
return run(cmd.format('CronetTest.apk')) or \
run(cmd.format('ChromiumNetTestSupport.apk'))
def test(out_dir, extra_options):
return run(out_dir + '/bin/run_cronet_test_instrumentation_apk ' + \
extra_options)
def unittest(out_dir, extra_options):
return run(out_dir + '/bin/run_cronet_unittests_android ' + \
extra_options)
def test_ios(out_dir, extra_options):
return run(out_dir + '/iossim -c "' + extra_options + '" ' + \
out_dir + '/cronet_test.app')
def unittest_ios(out_dir, extra_options):
return run(out_dir + '/iossim -c "' + extra_options + '" ' + \
out_dir + '/cronet_unittests_ios.app')
def debug(extra_options):
return run('build/android/adb_gdb --start ' + \
'--activity=.CronetTestActivity ' + \
'--program-name=CronetTest ' + \
'--package-name=org.chromium.net',
extra_options)
def stack(out_dir):
return run('adb logcat -d | CHROMIUM_OUTPUT_DIR=' + out_dir +
' third_party/android_platform/development/scripts/stack')
def use_goma():
home_goma = os.path.expanduser("~/goma")
if os.path.exists(home_goma) or os.environ.get("GOMA_DIR") or \
os.environ.get("GOMADIR"):
return 'use_goma=true '
return ''
def get_ninja_jobs_option():
if use_goma():
return " -j1000 "
return ""
def get_default_gn_args(target_os, is_release):
gn_args = 'target_os="' + target_os + '" enable_websockets=false '+ \
'disable_file_support=true disable_ftp_support=true '+ \
'disable_brotli_filter=false ' + \
'is_component_build=false ' + \
'use_crash_key_stubs=true ' + \
'ignore_elf32_limitations=true use_partition_alloc=false ' + \
'include_transport_security_state_preload_list=false ' + use_goma()
if (is_release):
gn_args += 'is_debug=false is_official_build=true '
return gn_args
def get_mobile_gn_args(target_os, is_release):
return get_default_gn_args(target_os, is_release) + \
'use_platform_icu_alternatives=true '
def get_ios_gn_args(is_release, target_cpu):
return get_mobile_gn_args('ios', is_release) + \
'is_cronet_build=true ' + \
'use_xcode_clang=true ' + \
'ios_deployment_target="9.0" ' + \
'enable_dsyms=true ' + \
'target_cpu="%s" ' % target_cpu
def get_mac_gn_args(is_release):
return get_default_gn_args('mac', is_release) + \
'disable_histogram_support=true ' + \
'enable_dsyms=true '
def main():
parser = argparse.ArgumentParser()
parser.add_argument('command',
choices=['gn',
'sync',
'build',
'install',
'proguard',
'test',
'build-test',
'unit',
'build-unit',
'stack',
'debug',
'build-debug'])
parser.add_argument('-d', '--out_dir', action='store',
help='name of the build directory')
parser.add_argument('-i', '--iphoneos', action='store_true',
help='build for physical iphone')
parser.add_argument('-x', '--x86', action='store_true',
help='build for Intel x86 architecture')
parser.add_argument('-r', '--release', action='store_true',
help='use release configuration')
parser.add_argument('-a', '--asan', action='store_true',
help='use address sanitizer')
options, extra_options_list = parser.parse_known_args()
print options
print extra_options_list
is_ios = (sys.platform == 'darwin')
if is_ios:
test_target = 'cronet_test'
unit_target = 'cronet_unittests_ios'
gn_extra = '--ide=xcode'
if options.iphoneos:
gn_args = get_ios_gn_args(options.release, 'arm64')
out_dir_suffix = '-iphoneos'
else:
gn_args = get_ios_gn_args(options.release, 'x64')
out_dir_suffix = '-iphonesimulator'
if options.asan:
gn_args += 'is_asan=true '
out_dir_suffix += '-asan'
else:
test_target = 'cronet_test_instrumentation_apk'
unit_target = 'cronet_unittests_android'
gn_args = get_mobile_gn_args('android', options.release) + \
'use_errorprone_java_compiler=true enable_reporting=true '
gn_extra = ''
out_dir_suffix = ''
if options.x86:
gn_args += 'target_cpu="x86" '
out_dir_suffix = '-x86'
else:
gn_args += 'arm_use_neon=false '
if options.asan:
# ASAN on Android requires one-time setup described here:
# https://www.chromium.org/developers/testing/addresssanitizer
gn_args += 'is_asan=true is_clang=true is_debug=false '
out_dir_suffix += '-asan'
extra_options = ' '.join(extra_options_list)
if options.release:
out_dir = 'out/Release' + out_dir_suffix
else:
out_dir = 'out/Debug' + out_dir_suffix
if options.out_dir:
out_dir = options.out_dir
if (options.command=='gn'):
return gn(out_dir, gn_args, gn_extra)
if (options.command=='sync'):
return run('git pull --rebase && gclient sync')
if (options.command=='build'):
return build(out_dir, test_target, extra_options)
if (not is_ios):
if (options.command=='install'):
return install(out_dir)
if (options.command=='proguard'):
return build (out_dir, 'cronet_sample_proguard_apk')
if (options.command=='test'):
return install(out_dir) or test(out_dir, extra_options)
if (options.command=='build-test'):
return build(out_dir, test_target) or install(out_dir) or \
test(out_dir, extra_options)
if (options.command=='stack'):
return stack(out_dir)
if (options.command=='debug'):
return install(out_dir) or debug(extra_options)
if (options.command=='build-debug'):
return build(out_dir, test_target) or install(out_dir) or \
debug(extra_options)
if (options.command=='unit'):
return unittest(out_dir, extra_options)
if (options.command=='build-unit'):
return build(out_dir, unit_target) or unittest(out_dir, extra_options)
else:
if (options.command=='test'):
return test_ios(out_dir, extra_options)
if (options.command=='build-test'):
return build(out_dir, test_target) or test_ios(out_dir, extra_options)
if (options.command=='unit'):
return unittest_ios(out_dir, extra_options)
if (options.command=='build-unit'):
return build(out_dir, unit_target) or unittest_ios(out_dir, extra_options)
parser.print_help()
return 1
if __name__ == '__main__':
sys.exit(main())
| [
"[email protected]"
] | |
75d1e9b373c45d4eeb5da68148b4476d955e7fd8 | 0811cd3a37e82eb5175e0c1ffdc13510b256c7de | /bcs-app/backend/apps/configuration/yaml_mode/release.py | 74c76eec59643c3e951e70340e7b17d81810fa86 | [
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-unicode",
"ICU",
"LicenseRef-scancode-unknown-license-reference",
"Artistic-2.0",
"Zlib",
"LicenseRef-scancode-openssl",
"NAIST-2003",
"ISC",
"NTP",
"BSL-1.0",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] | permissive | QiChangYin/bk-bcs-saas-copy-modify | 363d08a64dd860329b8ab051a02f4733a29c33dd | 7ef3b003ff20857a99415d7a3c99d8eb4f5764d9 | refs/heads/master | 2023-02-10T02:30:40.616494 | 2020-01-13T09:23:18 | 2020-01-13T09:23:18 | 233,553,174 | 0 | 0 | NOASSERTION | 2023-02-02T05:12:22 | 2020-01-13T09:01:09 | Python | UTF-8 | Python | false | false | 5,381 | py | # -*- coding: utf-8 -*-
#
# Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available.
# Copyright (C) 2017-2019 THL A29 Limited, a Tencent company. All rights reserved.
# Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://opensource.org/licenses/MIT
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
import json
import datetime
from io import StringIO
from collections import OrderedDict
import jinja2
from ruamel.yaml import YAML
from dataclasses import dataclass
from rest_framework.exceptions import ParseError
from backend.apps.configuration.constants import FileResourceName
from backend.bcs_k8s.app import bcs_info_injector
from backend.bcs_k8s.helm import bcs_variable
@dataclass
class ReleaseData:
project_id: str
namespace_info: dict
show_version: OrderedDict
template_files: list
class ReleaseDataProcessor:
def __init__(self, user, raw_release_data):
self.access_token = user.token.access_token
self.username = user.username
self.project_id = raw_release_data.project_id
self.namespace_info = raw_release_data.namespace_info
self.show_version = raw_release_data.show_version
self.template_files = raw_release_data.template_files
def _parse_yaml(self, yaml_content):
try:
yaml = YAML()
resources = list(yaml.load_all(yaml_content))
except Exception as e:
raise ParseError(f'Parse manifest failed: \n{e}\n\nManifest content:\n{yaml_content}')
else:
# ordereddict to dict
return json.loads(json.dumps(resources))
def _join_manifest(self, resources):
try:
yaml = YAML()
s = StringIO()
yaml.dump_all(resources, s)
except Exception as e:
raise ParseError(f'join manifest failed: {e}')
else:
return s.getvalue()
def _get_bcs_variables(self):
sys_variables = bcs_variable.collect_system_variable(
access_token=self.access_token,
project_id=self.project_id,
namespace_id=self.namespace_info['id']
)
bcs_variables = bcs_variable.get_namespace_variables(self.project_id, self.namespace_info['id'])
sys_variables.update(bcs_variables)
return sys_variables
def _render_with_variables(self, raw_content, bcs_variables):
t = jinja2.Template(raw_content)
return t.render(bcs_variables)
def _set_namespace(self, resources):
ignore_ns_res = [FileResourceName.ClusterRole.value,
FileResourceName.ClusterRoleBinding.value,
FileResourceName.StorageClass.value,
FileResourceName.PersistentVolume.value]
try:
for res_manifest in resources:
if res_manifest['kind'] in ignore_ns_res:
continue
metadata = res_manifest['metadata']
metadata['namespace'] = self.namespace_info['name']
except Exception:
raise ParseError('set namespace failed: no valid metadata in manifest')
def _inject_bcs_info(self, yaml_content, inject_configs):
resources = self._parse_yaml(yaml_content)
context = {
'creator': self.username,
'updator': self.username,
'version': self.show_version.name
}
manager = bcs_info_injector.InjectManager(
configs=inject_configs,
resources=resources,
context=context
)
resources = manager.do_inject()
self._set_namespace(resources)
return self._join_manifest(resources)
def _get_inject_configs(self):
now = datetime.datetime.now()
configs = bcs_info_injector.inject_configs(
access_token=self.access_token,
project_id=self.project_id,
cluster_id=self.namespace_info['cluster_id'],
namespace_id=self.namespace_info['id'],
namespace=self.namespace_info['name'],
creator=self.username,
updator=self.username,
created_at=now,
updated_at=now,
version=self.show_version.name,
source_type='template'
)
return configs
def _inject(self, raw_content, inject_configs, bcs_variables):
content = self._render_with_variables(raw_content, bcs_variables)
content = self._inject_bcs_info(content, inject_configs)
return content
def release_data(self):
inject_configs = self._get_inject_configs()
bcs_variables = self._get_bcs_variables()
for res_files in self.template_files:
for f in res_files['files']:
f['content'] = self._inject(f['content'], inject_configs, bcs_variables)
return ReleaseData(self.project_id, self.namespace_info, self.show_version, self.template_files)
| [
"[email protected]"
] | |
58a9bca015b47a6e87d1c80668382adeb76cc14b | fa3e527114cd5799dddb0a25067da4923eae354e | /FastSim/JUNO/LearnPDF/learn_pdf.py | 58df9e1684c302ce7aff741034c46b988eabe111 | [] | no_license | wenxingfang/FastSim_ML | e64c6b56ce2afd703d1ddda0ada2de6f65fde049 | d2f1abbb2f6879313d5f4f137b64c4d8bf10fe83 | refs/heads/master | 2022-11-28T01:35:39.727895 | 2020-08-03T15:47:37 | 2020-08-03T15:47:37 | 284,734,310 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,595 | py | import h5py
import sys
import argparse
import numpy as np
import math
import tensorflow as tf
#import tensorflow_probability as tfp
#import matplotlib.pyplot as plt
from sklearn.utils import shuffle
def load_data(data):
print('load data:',data)
d = h5py.File(data, 'r')
hit_time = d['firstHitTimeByPMT'][:]
n_pe = d['nPEByPMT'][:]
theta_PMT = d['infoPMT'][:] # 0 to 180 degree
d.close()
### normalize theta ##############
theta_PMT[:] = theta_PMT[:]/180
print("hit_time:",hit_time.shape,", n_pe:", n_pe.shape, ",theta:", theta_PMT.shape, ",event sizes:", n_pe.shape[0])
hit_time, n_pe = shuffle(hit_time, n_pe)
return hit_time, n_pe, theta_PMT
def Normal_cost(mu, sigma, y):
dist = tf.distributions.Normal(loc=mu, scale=sigma)
return tf.reduce_mean(-dist.log_prob(y))
def Possion_cost(rate, y):
#dist = tfp.distributions.Poisson(rate=rate, allow_nan_stats=False)
#return tf.reduce_mean(-dist.log_prob(y))
result = y*tf.math.log(rate) - tf.math.lgamma(1. + y) - rate
return tf.reduce_mean(-result)
def mae_cost(pred_y, label_y):
pred_y = tf.sort(pred_y , axis=0,direction='ASCENDING',name=None)
label_y = tf.sort(label_y, axis=0,direction='ASCENDING',name=None)
abs_diff = tf.math.abs(pred_y - label_y)
return tf.reduce_mean(abs_diff)
def mse_cost(pred_y, label_y):
pred_y = tf.sort(pred_y , axis=0,direction='ASCENDING',name=None)
label_y = tf.sort(label_y, axis=0,direction='ASCENDING',name=None)
diff = tf.math.pow((pred_y - label_y), 2)
return tf.reduce_mean(diff)
def get_parser():
parser = argparse.ArgumentParser(
description='Run MDN training. '
'Sensible defaults come from https://github.com/taboola/mdn-tensorflow-notebook-example/blob/master/mdn.ipynb',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--datafile', action='store', type=str,
help='HDF5 file paths')
parser.add_argument('--nb-epochs', action='store', type=int, default=50,
help='Number of epochs to train for.')
parser.add_argument('--batch-size', action='store', type=int, default=2,
help='batch size per update')
parser.add_argument('--ckpt_path', action='store', type=str,
help='ckpt file paths')
parser.add_argument('--produceEvent', action='store', type=int,
help='produceEvent')
parser.add_argument('--outFileName', action='store', type=str,
help='outFileName file paths')
parser.add_argument('--validation_file', action='store', type=str,
help='validation_file file paths')
parser.add_argument('--disc-lr', action='store', type=float, default=2e-5,
help='Adam learning rate for discriminator')
parser.add_argument('--gen-lr', action='store', type=float, default=2e-4,
help='Adam learning rate for generator')
parser.add_argument('--adam-beta', action='store', type=float, default=0.5,
help='Adam beta_1 parameter')
return parser
if __name__ == '__main__':
print('start...')
#physical_devices = tf.config.experimental.list_physical_devices('GPU')
#if physical_devices:
# tf.config.experimental.set_memory_growth(physical_devices[0], True)
#####################################
parser = get_parser()
parse_args = parser.parse_args()
epochs = parse_args.nb_epochs
batch_size = parse_args.batch_size
datafile = parse_args.datafile
ckpt_path = parse_args.ckpt_path
produceEvent = parse_args.produceEvent
outFileName = parse_args.outFileName
validation_file = parse_args.validation_file
#####################################
print('constructing graph')
tf.reset_default_graph()
x = tf.placeholder(name='x',shape=(None,2),dtype=tf.float32)
y = tf.placeholder(name='y',shape=(None,1),dtype=tf.float32)
layer = x
for _ in range(3):
layer = tf.layers.dense(inputs=layer, units=12, activation=tf.nn.tanh)
#Pred_y = tf.layers.dense(inputs=layer, units=1, activation=lambda x: tf.nn.elu(x) + 1)
Pred_y = tf.layers.dense(inputs=layer, units=1, activation=lambda x: tf.nn.relu(x))
cost = mse_cost(Pred_y, y)
learning_rate = 0.0003
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
# Add ops to save and restore all the variables.
saver = tf.train.Saver()
########################################
print('preparing data')
f_DataSet = open(datafile, 'r')
Data = []
Event = []
Batch = []
for line in f_DataSet:
idata = line.strip('\n')
idata = idata.strip(' ')
if "#" in idata: continue ##skip the commented one
Data.append(idata)
print(idata)
d = h5py.File(str(idata), 'r')
ievent = d['infoMC'].shape[0]
d.close()
Event.append(float(ievent))
Batch.append(int(float(ievent)/batch_size))
total_event = sum(Event)
f_DataSet.close()
print('total sample:', total_event)
print('All Batch:', Batch)
########################################
print('commencing training')
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(epochs):
total_cost = 0
for ib in range(len(Batch)):
hit_time, n_pe, theta = load_data(Data[ib])
theta = theta.repeat(batch_size,axis=0)
ibatch = Batch[ib]
print('ib {0}, ibatch {1}'.format(ib, ibatch))
for index in range(ibatch):
hit_time_batch = hit_time [index * batch_size:(index + 1) * batch_size]
n_pe_batch = n_pe [index * batch_size:(index + 1) * batch_size]
for itheta in range(theta.shape[1]):
train_x = theta [:,itheta:itheta+1]
noise = np.random.uniform(-1, 1, (train_x.shape[0], 1))
input_x = np.concatenate ((train_x, noise), axis=-1)
train_y = n_pe_batch[:,itheta:itheta+1]
if np.any(np.isnan(train_x)): print('find Nan in train_x')
if np.any(np.isnan(train_y)): print('find Nan in train_y')
_, c = sess.run([optimizer,cost], feed_dict={x:input_x, y:train_y})
total_cost += c
avg_cost = total_cost/(sum(Batch)*theta.shape[1])
if epoch % 1 == 0:
print('Epoch {0} | cost = {1:.4f}'.format(epoch,avg_cost))
### validation ############################
print('Do validation')
hit_time, n_pe, theta = load_data(validation_file)
theta = theta.repeat(n_pe.shape[0],axis=0)
cost_valid = 0
for itheta in range(theta.shape[1]):
valid_x = theta [:,itheta:itheta+1]
noise = np.random.uniform(-1, 1, (valid_x.shape[0], 1))
input_x = np.concatenate ((valid_x, noise), axis=-1)
rate_pred, c_pred = sess.run([Pred_y,cost],feed_dict={x:input_x, y:n_pe[:,itheta:itheta+1]})
#print('valid cost = {0:.4f}'.format(c_pred))
cost_valid = cost_valid + c_pred/theta.shape[1]
print('ave valid cost = {0:.4f}'.format(cost_valid))
#### produce predicted data #################
print('Saving produced data')
theta_list = list(set(theta[0,:]))
theta_list.sort()
print('theta_list=', len(theta_list))
pred_n_pe = np.full((produceEvent, len(theta_list) ), 0 ,dtype=np.float32)#init
for i in theta_list:
ithe = np.full((produceEvent, 1 ), i ,dtype=np.float32)
noise = np.random.uniform(-1, 1, (ithe.shape[0], 1))
input_x = np.concatenate ((ithe, noise), axis=-1)
y_pred = sess.run(Pred_y,feed_dict={x:input_x})
y_pred = y_pred.reshape((-1,1))
pred_n_pe[:,theta_list.index(i):theta_list.index(i)+1] = y_pred
hf = h5py.File(outFileName, 'w')
hf.create_dataset('pred_n_pe', data=pred_n_pe)
hf.create_dataset('theta_set', data=np.array(theta_list))
hf.close()
print('Saved produced data %s'%outFileName)
############## Save the variables to disk.
if False:
save_path = saver.save(sess, "%s/model.ckpt"%(ckpt_path))
print("Model saved in path: %s" % save_path)
print('done')
| [
"[email protected]"
] | |
31be9df7d3cd9b20bfa38224354423ad59df2139 | 45c170fb0673deece06f3055979ece25c3210380 | /toontown/building/ToonInterior.py | ff17ab85cfeaca2646dd0331d92b7043f0f90da7 | [] | no_license | MTTPAM/PublicRelease | 5a479f5f696cfe9f2d9dcd96f378b5ce160ec93f | 825f562d5021c65d40115d64523bb850feff6a98 | refs/heads/master | 2021-07-24T09:48:32.607518 | 2018-11-13T03:17:53 | 2018-11-13T03:17:53 | 119,129,731 | 2 | 6 | null | 2018-11-07T22:10:10 | 2018-01-27T03:43:39 | Python | UTF-8 | Python | false | false | 9,180 | py | #Embedded file name: toontown.building.ToonInterior
from direct.directnotify import DirectNotifyGlobal
from direct.fsm import ClassicFSM, State
from direct.fsm import State
from direct.fsm import StateData
from direct.showbase import DirectObject
from direct.task import Task
from pandac.PandaModules import *
from otp.distributed.TelemetryLimiter import RotationLimitToH, TLGatherAllAvs
from toontown.hood import Place
from toontown.hood import ZoneUtil
from toontown.nametag import NametagGlobals
from toontown.toon import HealthForceAcknowledge
from toontown.toon import NPCForceAcknowledge
from toontown.toonbase import TTLocalizer
from toontown.toonbase import ToontownGlobals
from toontown.toonbase.ToonBaseGlobal import *
class ToonInterior(Place.Place):
notify = DirectNotifyGlobal.directNotify.newCategory('ToonInterior')
def __init__(self, loader, parentFSMState, doneEvent):
Place.Place.__init__(self, loader, doneEvent)
self.dnaFile = 'phase_7/models/modules/toon_interior'
self.isInterior = 1
self.townBattle = None
self.tfaDoneEvent = 'tfaDoneEvent'
self.hfaDoneEvent = 'hfaDoneEvent'
self.npcfaDoneEvent = 'npcfaDoneEvent'
self.fsm = ClassicFSM.ClassicFSM('ToonInterior', [State.State('start', self.enterStart, self.exitStart, ['doorIn', 'teleportIn', 'tutorial']),
State.State('walk', self.enterWalk, self.exitWalk, ['sit',
'stickerBook',
'doorOut',
'DFA',
'trialerFA',
'teleportOut',
'quest',
'purchase',
'phone',
'stopped',
'pet']),
State.State('sit', self.enterSit, self.exitSit, ['walk']),
State.State('stickerBook', self.enterStickerBook, self.exitStickerBook, ['walk',
'DFA',
'trialerFA',
'sit',
'doorOut',
'teleportOut',
'quest',
'purchase',
'phone',
'stopped',
'pet']),
State.State('trialerFA', self.enterTrialerFA, self.exitTrialerFA, ['trialerFAReject', 'DFA']),
State.State('trialerFAReject', self.enterTrialerFAReject, self.exitTrialerFAReject, ['walk']),
State.State('DFA', self.enterDFA, self.exitDFA, ['DFAReject',
'HFA',
'NPCFA',
'teleportOut',
'doorOut']),
State.State('DFAReject', self.enterDFAReject, self.exitDFAReject, ['walk']),
State.State('NPCFA', self.enterNPCFA, self.exitNPCFA, ['NPCFAReject', 'HFA', 'teleportOut']),
State.State('NPCFAReject', self.enterNPCFAReject, self.exitNPCFAReject, ['walk']),
State.State('HFA', self.enterHFA, self.exitHFA, ['HFAReject', 'teleportOut', 'tunnelOut']),
State.State('HFAReject', self.enterHFAReject, self.exitHFAReject, ['walk']),
State.State('doorIn', self.enterDoorIn, self.exitDoorIn, ['walk']),
State.State('doorOut', self.enterDoorOut, self.exitDoorOut, ['walk']),
State.State('teleportIn', self.enterTeleportIn, self.exitTeleportIn, ['walk']),
State.State('teleportOut', self.enterTeleportOut, self.exitTeleportOut, ['teleportIn']),
State.State('quest', self.enterQuest, self.exitQuest, ['walk', 'doorOut']),
State.State('tutorial', self.enterTutorial, self.exitTutorial, ['walk', 'quest']),
State.State('purchase', self.enterPurchase, self.exitPurchase, ['walk', 'doorOut']),
State.State('pet', self.enterPet, self.exitPet, ['walk']),
State.State('phone', self.enterPhone, self.exitPhone, ['walk', 'doorOut']),
State.State('stopped', self.enterStopped, self.exitStopped, ['walk', 'doorOut']),
State.State('final', self.enterFinal, self.exitFinal, ['start'])], 'start', 'final')
self.parentFSMState = parentFSMState
def load(self):
Place.Place.load(self)
self.parentFSMState.addChild(self.fsm)
def unload(self):
Place.Place.unload(self)
self.parentFSMState.removeChild(self.fsm)
del self.parentFSMState
del self.fsm
ModelPool.garbageCollect()
TexturePool.garbageCollect()
def enter(self, requestStatus):
self.zoneId = requestStatus['zoneId']
self.fsm.enterInitialState()
messenger.send('enterToonInterior')
self.accept('doorDoneEvent', self.handleDoorDoneEvent)
self.accept('DistributedDoor_doorTrigger', self.handleDoorTrigger)
volume = requestStatus.get('musicVolume', 0.7)
base.playMusic(self.loader.activityMusic, looping=1, volume=volume)
self._telemLimiter = TLGatherAllAvs('ToonInterior', RotationLimitToH)
NametagGlobals.setWant2dNametags(True)
self.fsm.request(requestStatus['how'], [requestStatus])
def exit(self):
self.ignoreAll()
messenger.send('exitToonInterior')
self._telemLimiter.destroy()
del self._telemLimiter
NametagGlobals.setWant2dNametags(False)
self.loader.activityMusic.stop()
def setState(self, state):
self.fsm.request(state)
def enterTutorial(self, requestStatus):
self.fsm.request('walk')
base.localAvatar.b_setParent(ToontownGlobals.SPRender)
globalClock.tick()
base.transitions.irisIn()
messenger.send('enterTutorialInterior')
def exitTutorial(self):
pass
def doRequestLeave(self, requestStatus):
self.fsm.request('trialerFA', [requestStatus])
def enterDFACallback(self, requestStatus, doneStatus):
self.dfa.exit()
del self.dfa
ds = doneStatus['mode']
if ds == 'complete':
self.fsm.request('NPCFA', [requestStatus])
elif ds == 'incomplete':
self.fsm.request('DFAReject')
else:
self.notify.error('Unknown done status for DownloadForceAcknowledge: ' + `doneStatus`)
def enterNPCFA(self, requestStatus):
self.acceptOnce(self.npcfaDoneEvent, self.enterNPCFACallback, [requestStatus])
self.npcfa = NPCForceAcknowledge.NPCForceAcknowledge(self.npcfaDoneEvent)
self.npcfa.enter()
def exitNPCFA(self):
self.ignore(self.npcfaDoneEvent)
def enterNPCFACallback(self, requestStatus, doneStatus):
self.npcfa.exit()
del self.npcfa
if doneStatus['mode'] == 'complete':
outHow = {'teleportIn': 'teleportOut',
'tunnelIn': 'tunnelOut',
'doorIn': 'doorOut'}
self.fsm.request(outHow[requestStatus['how']], [requestStatus])
elif doneStatus['mode'] == 'incomplete':
self.fsm.request('NPCFAReject')
else:
self.notify.error('Unknown done status for NPCForceAcknowledge: ' + `doneStatus`)
def enterNPCFAReject(self):
self.fsm.request('walk')
def exitNPCFAReject(self):
pass
def enterHFA(self, requestStatus):
self.acceptOnce(self.hfaDoneEvent, self.enterHFACallback, [requestStatus])
self.hfa = HealthForceAcknowledge.HealthForceAcknowledge(self.hfaDoneEvent)
self.hfa.enter(1)
def exitHFA(self):
self.ignore(self.hfaDoneEvent)
def enterHFACallback(self, requestStatus, doneStatus):
self.hfa.exit()
del self.hfa
if doneStatus['mode'] == 'complete':
outHow = {'teleportIn': 'teleportOut',
'tunnelIn': 'tunnelOut',
'doorIn': 'doorOut'}
self.fsm.request(outHow[requestStatus['how']], [requestStatus])
elif doneStatus['mode'] == 'incomplete':
self.fsm.request('HFAReject')
else:
self.notify.error('Unknown done status for HealthForceAcknowledge: ' + `doneStatus`)
def enterHFAReject(self):
self.fsm.request('walk')
def exitHFAReject(self):
pass
def enterTeleportIn(self, requestStatus):
if ZoneUtil.isPetshop(self.zoneId):
base.localAvatar.setPosHpr(0, 0, ToontownGlobals.FloorOffset, 45.0, 0.0, 0.0)
else:
base.localAvatar.setPosHpr(2.5, 11.5, ToontownGlobals.FloorOffset, 45.0, 0.0, 0.0)
Place.Place.enterTeleportIn(self, requestStatus)
def enterTeleportOut(self, requestStatus):
Place.Place.enterTeleportOut(self, requestStatus, self.__teleportOutDone)
def __teleportOutDone(self, requestStatus):
hoodId = requestStatus['hoodId']
zoneId = requestStatus['zoneId']
shardId = requestStatus['shardId']
if hoodId == self.loader.hood.id and zoneId == self.zoneId and shardId == None:
self.fsm.request('teleportIn', [requestStatus])
elif hoodId == ToontownGlobals.MyEstate:
self.getEstateZoneAndGoHome(requestStatus)
else:
self.doneStatus = requestStatus
messenger.send(self.doneEvent)
def goHomeFailed(self, task):
self.notifyUserGoHomeFailed()
self.ignore('setLocalEstateZone')
self.doneStatus['avId'] = -1
self.doneStatus['zoneId'] = self.getZoneId()
self.fsm.request('teleportIn', [self.doneStatus])
return Task.done
def exitTeleportOut(self):
Place.Place.exitTeleportOut(self)
| [
"[email protected]"
] | |
4cd3b162fb3f2dcf97349017329fb4abb9623ee9 | 327d83545dc9f309de74f4e670cc7a92db954f17 | /python-alg/algorithms/models/node.py | 7c26f409dc734939ce35dd032ac69f064a338444 | [] | no_license | felipe-basina/algoritmos | eb936c45aaa6ae45c514d43e31cabbad5d8ee874 | a4ee5b30801f54f42f71f23c963781d7bed899eb | refs/heads/master | 2023-06-10T17:33:56.420717 | 2021-06-24T01:17:56 | 2021-06-24T01:17:56 | 360,247,224 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 497 | py | class Node:
def __init__(self, data):
self.data = data
self.next = None # References to another node
def __str__(self):
return "[data={}:next={}] ".format(self.data, self.next)
class LinkedList:
def __init__(self):
self.head = None
self.tail = None
def insert_node(self, data):
node = Node(data)
if not self.head:
self.head = node
else:
self.tail.next = node
self.tail = node
| [
"[email protected]"
] | |
1462fd76543b2b6609f1a9102b0c572db25685d5 | a1080c28573e1a59ec418ad3b0b0bf18e035dc41 | /LeetCode/Largest Number_366617438.py | 72e6404214711f50d16b2cd8c159aa203f976682 | [] | no_license | AumkarG/Algorithms-and-Data-Structures | 8c6fc21218897d2361fed1512dc6bb13eabd8842 | 03603ad579564ef213c58edd57cb8753cf8f86ba | refs/heads/master | 2023-03-04T09:48:25.167519 | 2021-02-14T17:16:18 | 2021-02-14T17:16:18 | 330,424,446 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 330 | py |
def custom_compare(x, y):
return int(x+y)-int(y+x)
class Solution(object):
def largestNumber(self, nums):
nums=[str(i) for i in nums]
nums=sorted(nums, cmp=custom_compare,reverse=True)
if(nums[0]=='0'):
return '0'
s=""
for i in nums:
s+=i
return s
| [
"aumkaar,[email protected]"
] | aumkaar,[email protected] |
3fc636e9d82d6fb11367eecaec1349a89d61de26 | c6969585b7edec377a389da46825a7389b5e8e12 | /examples/twisted/websocket/echo_wsgi/server.py | 55d5c779b9fc775e710c53630db196a5f4ba4dc2 | [
"MIT"
] | permissive | hzruandd/AutobahnPython | 38b35a9d413d53dde9271ec436ccd3d8d07da74e | 942d27eb6a87084cb8964fd2c9abaae67807be13 | refs/heads/master | 2021-01-16T21:43:49.398537 | 2015-09-13T19:16:21 | 2015-09-13T19:16:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,335 | py | ###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
import uuid
import sys
from twisted.python import log
from twisted.internet import reactor
from twisted.web.server import Site
from twisted.web.wsgi import WSGIResource
from flask import Flask, render_template
from autobahn.twisted.websocket import WebSocketServerFactory, \
WebSocketServerProtocol
from autobahn.twisted.resource import WebSocketResource, \
WSGIRootResource, \
HTTPChannelHixie76Aware
##
# Our WebSocket Server protocol
##
class EchoServerProtocol(WebSocketServerProtocol):
def onMessage(self, payload, isBinary):
self.sendMessage(payload, isBinary)
##
# Our WSGI application .. in this case Flask based
##
app = Flask(__name__)
app.secret_key = str(uuid.uuid4())
@app.route('/')
def page_home():
return render_template('index.html')
if __name__ == "__main__":
if len(sys.argv) > 1 and sys.argv[1] == 'debug':
log.startLogging(sys.stdout)
debug = True
else:
debug = False
app.debug = debug
if debug:
log.startLogging(sys.stdout)
##
# create a Twisted Web resource for our WebSocket server
##
wsFactory = WebSocketServerFactory(u"ws://127.0.0.1:8080",
debug=debug,
debugCodePaths=debug)
wsFactory.protocol = EchoServerProtocol
wsFactory.setProtocolOptions(allowHixie76=True) # needed if Hixie76 is to be supported
wsResource = WebSocketResource(wsFactory)
##
# create a Twisted Web WSGI resource for our Flask server
##
wsgiResource = WSGIResource(reactor, reactor.getThreadPool(), app)
##
# create a root resource serving everything via WSGI/Flask, but
# the path "/ws" served by our WebSocket stuff
##
rootResource = WSGIRootResource(wsgiResource, {'ws': wsResource})
##
# create a Twisted Web Site and run everything
##
site = Site(rootResource)
site.protocol = HTTPChannelHixie76Aware # needed if Hixie76 is to be supported
reactor.listenTCP(8080, site)
reactor.run()
| [
"[email protected]"
] | |
ad9332db347bf438e8ee4f9c260de6368599a431 | 223c05418090665e9aedb754783cbb55bc3555c1 | /277-Find-the-Celebrity.py | aa6318bbf4b60a4419c8c132d0522012d1b958c3 | [] | no_license | dkrotx/leetcode | 227639030aa62c80adf9412aa50cba4e4ae13034 | 501c347004c140a82a95461e1dbcef6775b3d9da | refs/heads/master | 2021-06-05T23:37:10.369386 | 2019-12-04T17:59:14 | 2019-12-04T17:59:14 | 111,242,268 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 865 | py | # The knows API is already defined for you.
# @param a, person a
# @param b, person b
# @return a boolean, whether a knows b
# def knows(a, b):
class Solution(object):
def findCelebrity(self, n):
"""
:type n: int
:rtype: int
"""
guests = list(range(n))
while len(guests) >= 2:
a = guests.pop()
b = guests.pop()
if knows(a, b):
if not knows(b, a):
guests.append(b)
else:
if knows(b, a):
guests.append(a)
""" check what celebrity doesn't know anyone """
if guests:
x = guests[0]
for i in range(n):
if x != i and knows(x, i):
return -1
return x
return -1
| [
"[email protected]"
] | |
6ea3185e6c1f56dd935d1047fdc2829d8e96362c | 62b736eff115a6d9cfd323c1b396c94f8a9302fe | /tkinter/simple_grid_window.py | 3c50166b69166d56d82b070fb5b1d328c7360f03 | [] | no_license | ccnelson/Python | dccbb9a2c00f8124216f2f4d4202b94907134083 | ebd0c401b23aee7467332d692588f02cda0ff935 | refs/heads/master | 2023-04-02T09:09:55.716686 | 2021-03-28T18:16:15 | 2021-03-28T18:16:15 | 184,681,086 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 339 | py | import tkinter as tk
root = tk.Tk()
tk.Label(root, text="Username").grid(row=0, sticky=tk.W)
tk.Label(root, text="Password").grid(row=1, sticky=tk.W)
tk.Entry(root).grid(row=0, column=1, sticky=tk.E)
tk.Entry(root).grid(row=1, column=1, sticky=tk.E)
tk.Button(root, text="Login").grid(row=2, column=1, sticky=tk.E)
root.mainloop()
| [
"[email protected]"
] | |
03ac8e612ab1fe00f08e2288e8e207e0a263bd26 | 49536aafb22a77a6caf249c7fadef46d63d24dfe | /tensorflow/tensorflow/contrib/slim/python/slim/data/dataset_data_provider_test.py | 9b9f6d8b299b60cd67a647e0dc0d0302815a82fb | [
"Apache-2.0"
] | permissive | wangzhi01/deeplearning-1 | 4e5ad93f0d9ecd302b74352f80fe1fa6ae70bf0d | 46ab82253d956953b8aa98e97ceb6cd290e82288 | refs/heads/master | 2020-05-28T03:14:55.687567 | 2018-09-12T16:52:09 | 2018-09-12T16:52:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,177 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.data.dataset_data_provider."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
from tensorflow.contrib.slim.python.slim import queues
from tensorflow.contrib.slim.python.slim.data import dataset
from tensorflow.contrib.slim.python.slim.data import dataset_data_provider
from tensorflow.contrib.slim.python.slim.data import test_utils
from tensorflow.contrib.slim.python.slim.data import tfexample_decoder
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
def _resize_image(image, height, width):
image = array_ops.expand_dims(image, 0)
image = image_ops.resize_bilinear(image, [height, width])
return array_ops.squeeze(image, [0])
def _create_tfrecord_dataset(tmpdir):
if not gfile.Exists(tmpdir):
gfile.MakeDirs(tmpdir)
data_sources = test_utils.create_tfrecord_files(tmpdir, num_files=1)
keys_to_features = {
'image/encoded':
parsing_ops.FixedLenFeature(
shape=(), dtype=dtypes.string, default_value=''),
'image/format':
parsing_ops.FixedLenFeature(
shape=(), dtype=dtypes.string, default_value='jpeg'),
'image/class/label':
parsing_ops.FixedLenFeature(
shape=[1],
dtype=dtypes.int64,
default_value=array_ops.zeros(
[1], dtype=dtypes.int64))
}
items_to_handlers = {
'image': tfexample_decoder.Image(),
'label': tfexample_decoder.Tensor('image/class/label'),
}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
return dataset.Dataset(
data_sources=data_sources,
reader=io_ops.TFRecordReader,
decoder=decoder,
num_samples=100,
items_to_descriptions=None)
class DatasetDataProviderTest(test.TestCase):
def testTFRecordDataset(self):
dataset_dir = tempfile.mkdtemp(prefix=os.path.join(self.get_temp_dir(),
'tfrecord_dataset'))
height = 300
width = 280
with self.test_session():
test_dataset = _create_tfrecord_dataset(dataset_dir)
provider = dataset_data_provider.DatasetDataProvider(test_dataset)
key, image, label = provider.get(['record_key', 'image', 'label'])
image = _resize_image(image, height, width)
with session.Session('') as sess:
with queues.QueueRunners(sess):
key, image, label = sess.run([key, image, label])
split_key = key.decode('utf-8').split(':')
self.assertEqual(2, len(split_key))
self.assertEqual(test_dataset.data_sources[0], split_key[0])
self.assertTrue(split_key[1].isdigit())
self.assertListEqual([height, width, 3], list(image.shape))
self.assertListEqual([1], list(label.shape))
def testTFRecordSeparateGetDataset(self):
dataset_dir = tempfile.mkdtemp(prefix=os.path.join(self.get_temp_dir(),
'tfrecord_separate_get'))
height = 300
width = 280
with self.test_session():
provider = dataset_data_provider.DatasetDataProvider(
_create_tfrecord_dataset(dataset_dir))
[image] = provider.get(['image'])
[label] = provider.get(['label'])
image = _resize_image(image, height, width)
with session.Session('') as sess:
with queues.QueueRunners(sess):
image, label = sess.run([image, label])
self.assertListEqual([height, width, 3], list(image.shape))
self.assertListEqual([1], list(label.shape))
def testConflictingRecordKeyItem(self):
dataset_dir = tempfile.mkdtemp(prefix=os.path.join(self.get_temp_dir(),
'tfrecord_dataset'))
with self.test_session():
with self.assertRaises(ValueError):
dataset_data_provider.DatasetDataProvider(
_create_tfrecord_dataset(dataset_dir), record_key='image')
if __name__ == '__main__':
test.main()
| [
"[email protected]"
] | |
2d7a4891643b36d60a90eca1ad6434ec699c9d0f | d402d51fed36f5f2b4f8bd18d8d60f38963f61c7 | /model/CNN/resnet.py | 0d0b8fdf5e8a235176e4c0a89d11e266f8448060 | [] | no_license | bruceyang2012/imageclassification | 9a8751bb0924d0a952e3e4d65ae47f5ad7514cf7 | 1a09a46089f0fb57dff67dd5c492b8ae0641689d | refs/heads/master | 2023-06-17T10:41:46.402216 | 2021-07-12T07:48:27 | 2021-07-12T07:48:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,510 | py | import torch
import torch.nn as nn
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152', 'resnext50_32x4d', 'resnext101_32x8d',
'wide_resnet50_2', 'wide_resnet101_2']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
}
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x):
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def forward(self, x):
return self._forward_impl(x)
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
# if pretrained:
# # state_dict = load_state_dict_from_url(model_urls[arch],
# # progress=progress)
# # resnet 50 pth
# state_dict = torch.load("weights/resnet50.pth")
# model.load_state_dict(state_dict)
return model
def resnet18(pretrained=False, progress=True, **kwargs):
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
**kwargs)
def resnet34(pretrained=False, progress=True, **kwargs):
r"""ResNet-34 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet50(pretrained=False, progress=True, **kwargs):
r"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet101(pretrained=False, progress=True, **kwargs):
r"""ResNet-101 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress,
**kwargs)
def resnet152(pretrained=False, progress=True, **kwargs):
r"""ResNet-152 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress,
**kwargs)
def resnext50_32x4d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-50 32x4d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 4
return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def resnext101_32x8d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-101 32x8d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
def wide_resnet50_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-50-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def wide_resnet101_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-101-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
if __name__ == "__main__":
image = torch.randn(1, 3, 224, 224)
model = resnet18(pretrained=True, num_classes=67)
# image = image.cuda()
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# model = model.to(device)
# output = model(image)
# print(output.size())
print(model)
| [
"[email protected]"
] | |
ae5634dc045f83121134040f072c8b310c823b31 | 9f03e20a34599816358891adc6bcce29cd48aed6 | /test/test_config.py | 7b1fa6d485b48999903d072b0fe69863aaa2e6d9 | [
"MIT"
] | permissive | projg2/flaggie | 9917adf783dca7c18471ad8f822a35e8afec351f | 4485aed08c4e33347d88736fdd4a9914b8349908 | refs/heads/master | 2023-08-08T12:20:58.729741 | 2023-07-27T14:15:01 | 2023-07-27T14:15:01 | 72,996,715 | 14 | 2 | MIT | 2023-03-20T06:05:47 | 2016-11-06T14:40:07 | Python | UTF-8 | Python | false | false | 4,723 | py | # (c) 2022-2023 Michał Górny
# Released under the terms of the MIT license
import dataclasses
import os
import stat
import pytest
from flaggie.config import (TokenType, ConfigLine, find_config_files,
parse_config_file, dump_config_line,
ConfigFile, read_config_files, save_config_files,
)
@pytest.mark.parametrize(
"layout,expected",
[([], ["package.use/99local.conf"]),
(["package.use"], None),
(["package.use/a.conf", "package.use/b.conf"], None),
(["package.use/a/foo.conf", "package.use/b/foo.conf"], None),
# even though "a+" sorts before "a/", directories take precedence
(["package.use/a/foo.conf", "package.use/a+"], None),
# hidden and backup files should be ignored
(["package.use/.foo", "package.use/foo.conf", "package.use/foo.conf~"],
["package.use/foo.conf"]),
# corner case: package.use yielding no valid files
(["package.use/.foo"], ["package.use/99local.conf"]),
])
def test_find_config(tmp_path, layout, expected):
confdir = tmp_path / "etc/portage"
for f in layout:
path = confdir / f
path.parent.mkdir(parents=True, exist_ok=True)
with open(path, "wb"):
pass
if expected is None:
expected = layout
assert find_config_files(tmp_path, TokenType.USE_FLAG
) == [confdir / x for x in expected]
TEST_CONFIG_FILE = [
"#initial comment\n",
" # comment with whitespace\n",
"\n",
"*/* foo bar baz # global flags\n",
"*/* FROBNICATE_TARGETS: frob1 frob2\n",
" dev-foo/bar weird#flag other # actual comment # more comment\n",
"dev-foo/baz mixed LONG: too EMPTY:\n"
]
PARSED_TEST_CONFIG_FILE = [
ConfigLine(comment="initial comment"),
ConfigLine(comment=" comment with whitespace"),
ConfigLine(),
ConfigLine("*/*", ["foo", "bar", "baz"], [], " global flags"),
ConfigLine("*/*", [], [("FROBNICATE_TARGETS", ["frob1", "frob2"])]),
ConfigLine("dev-foo/bar", ["weird#flag", "other"], [],
" actual comment # more comment"),
ConfigLine("dev-foo/baz", ["mixed"], [("LONG", ["too"]), ("EMPTY", [])]),
]
for raw_line, line in zip(TEST_CONFIG_FILE, PARSED_TEST_CONFIG_FILE):
line._raw_line = raw_line
def test_parse_config_file():
assert list(parse_config_file(TEST_CONFIG_FILE)) == PARSED_TEST_CONFIG_FILE
def test_dump_config_line():
assert [dump_config_line(x) for x in parse_config_file(TEST_CONFIG_FILE)
] == [x.lstrip(" ") for x in TEST_CONFIG_FILE]
def test_read_config_files(tmp_path):
with open(tmp_path / "config", "w") as f:
f.write("".join(TEST_CONFIG_FILE))
with open(tmp_path / "config2", "w") as f:
pass
assert list(read_config_files([tmp_path / "config", tmp_path / "config2"])
) == [
ConfigFile(tmp_path / "config", PARSED_TEST_CONFIG_FILE),
ConfigFile(tmp_path / "config2", []),
]
def test_save_config_files_no_modification(tmp_path):
config_files = [
ConfigFile(tmp_path / "config", PARSED_TEST_CONFIG_FILE),
ConfigFile(tmp_path / "config2", []),
]
save_config_files(config_files)
assert all(not config_file.path.exists() for config_file in config_files)
def invalidate_config_lines(lines: list[ConfigLine],
*line_nos: int,
) -> list[ConfigLine]:
out = list(lines)
for x in line_nos:
out[x] = dataclasses.replace(out[x])
out[x].invalidate()
return out
@pytest.mark.parametrize("write", [False, True])
def test_save_config_files(tmp_path, write):
config_files = [
ConfigFile(tmp_path / "config",
invalidate_config_lines(PARSED_TEST_CONFIG_FILE, 1, 5),
modified=True),
ConfigFile(tmp_path / "config2",
[ConfigLine("dev-foo/bar", ["new"], [])],
modified=True),
ConfigFile(tmp_path / "config3", []),
]
for conf in config_files:
with open(conf.path, "w") as f:
os.fchmod(f.fileno(), 0o400)
f.write("<original content>")
save_config_files(config_files,
confirm_cb=lambda orig_file, temp_file: write)
expected = ["<original content>" for _ in config_files]
if write:
expected[:2] = [
"".join(x.lstrip(" ") for x in TEST_CONFIG_FILE),
"dev-foo/bar new\n",
]
assert [conf.path.read_text() for conf in config_files] == expected
assert [stat.S_IMODE(os.stat(conf.path).st_mode) for conf in config_files
] == [0o400 for _ in config_files]
| [
"[email protected]"
] | |
00fe334671f8b26e7570ae945446b1944bf41d48 | 9893602fdad77858441c08de15980174e1bd3216 | /examples/gym/tests/test_mp_speed.py | ca36a13a6947aaba69b88c87289009cf7bab731f | [
"MIT"
] | permissive | batermj/TensorArtist | b61a1fa71325b7dc538318160a0924e1b3d5c3d5 | 7654eb026f6d87f64e28ca152d006ef7625b0f45 | refs/heads/master | 2020-03-29T02:28:59.101255 | 2017-11-01T12:37:49 | 2017-11-01T12:37:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,360 | py | # -*- coding:utf8 -*-
# File : test_mp_speed.py
# Author : Jiayuan Mao
# Email : [email protected]
# Date : 10/08/2017
#
# This file is part of TensorArtist.
from tartist import image
from tartist.app import rl
import time
import multiprocessing.pool as mppool
def make_player(dump_dir=None):
def resize_state(s):
return image.resize(s, (84, 84), interpolation='NEAREST')
p = rl.GymRLEnviron('Enduro-v0', dump_dir=dump_dir)
p = rl.MapStateProxyRLEnviron(p, resize_state)
p = rl.HistoryFrameProxyRLEnviron(p, 4)
p = rl.LimitLengthProxyRLEnviron(p, 4000)
return p
def actor(s):
return 1
def worker(i):
p = make_player()
l = 0
for i in range(1):
p.play_one_episode(func=actor)
l += p.stats['length'][-1]
return l
def test_mp():
pool = mppool.Pool(4)
start_time = time.time()
lengths = pool.map(worker, range(4))
finish_time = time.time()
print('Multiprocessing: total_length={}, time={:.2f}s.'.format(sum(lengths), finish_time - start_time))
def test_mt():
pool = mppool.ThreadPool(4)
start_time = time.time()
lengths = pool.map(worker, range(4))
finish_time = time.time()
print('Multithreading: total_length={}, time={:.2f}s.'.format(sum(lengths), finish_time - start_time))
if __name__ == '__main__':
test_mp()
test_mt()
| [
"[email protected]"
] | |
38562e9aaea1b41c2e4b85cc909df95320520890 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /e8TFAMbTTaEr7JSgd_24.py | 1ca289103b566112414504dfb983bcf3e86bd3fb | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 132 | py |
def left_digit(num):
num=list(num)
for x in num:
try:
x=int(x)
return x
except ValueError:
continue
| [
"[email protected]"
] | |
1440e3fdc04667cea54daecb56015389ec30e3d1 | 1e0a8a929f8ea69e476d8a8c5f3455aaf5317de6 | /tests/app/crud/test_dog_crud.py | afd0b8d1e8870ae8f2425c8541e38e56329efd68 | [
"MIT"
] | permissive | jearistiz/guane-intern-fastapi | aa41400fa22076111e96be695fde0a1ff6f118d0 | 269adc3ee6a78a262b4e19e7df291fd920fae2e1 | refs/heads/master | 2023-06-25T08:58:03.729614 | 2023-06-11T15:28:59 | 2023-06-11T15:28:59 | 370,229,796 | 63 | 9 | MIT | 2021-06-11T01:28:52 | 2021-05-24T04:45:23 | Python | UTF-8 | Python | false | false | 625 | py | from sqlalchemy.orm import Session
from app import crud
from mock_data.db_test_data import adopted_dogs_dicts
from tests.utils.handle_db_test import HandleDBTest
from tests.utils.parse_dict import update_dict_fmt_item
class TestDogCrud(HandleDBTest):
def test_get_adopter(self, db: Session):
adopted_dogs_out = crud.dog.get_adopted(db)
for adopted_dog_out in adopted_dogs_out:
adopted_dog_dict = adopted_dog_out._asdict()
adopted_dog_dict.pop('id')
update_dict_fmt_item(adopted_dog_dict, 'create_date', str)
assert adopted_dog_dict in adopted_dogs_dicts
| [
"[email protected]"
] | |
b1101dc270e2d33a82dcf6c6848da1264ae388c2 | 456433ac78b70cb8ae076ae166a85e349f181d7f | /systems/KURSSKLAD/KURSTERM/DOCFREESELECT/templates/task.py | f70b8d39f033caef223f1634035ffd6063851dbf | [] | no_license | shybkoi/WMS-Demo | 854c1679b121c68323445b60f3992959f922be8d | 2525559c4f56654acfbc21b41b3f5e40387b89e0 | refs/heads/master | 2021-01-23T01:51:20.074825 | 2017-03-23T11:51:18 | 2017-03-23T11:51:18 | 85,937,726 | 0 | 0 | null | null | null | null | WINDOWS-1251 | Python | false | false | 18,895 | py | #!/usr/bin/env python
# -*- coding: cp1251 -*-
##################################################
## DEPENDENCIES
import sys
import os
import os.path
from os.path import getmtime, exists
import time
import types
import __builtin__
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import DummyTransaction
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
from systems.KURSSKLAD.cheetahutils import viewQuantity
from systems.KURSSKLAD.KURSTERM.templates.main import main
##################################################
## MODULE CONSTANTS
try:
True, False
except NameError:
True, False = (1==1), (1==0)
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.0rc8'
__CHEETAH_versionTuple__ = (2, 0, 0, 'candidate', 8)
__CHEETAH_genTime__ = 1482336168.4860001
__CHEETAH_genTimestamp__ = 'Wed Dec 21 18:02:48 2016'
__CHEETAH_src__ = 'systems\\KURSSKLAD\\KURSTERM\\DOCFREESELECT\\templates\\task.tmpl'
__CHEETAH_srcLastModified__ = 'Wed Dec 21 09:10:13 2016'
__CHEETAH_docstring__ = 'Autogenerated by CHEETAH: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class task(main):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
main.__init__(self, *args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def mainData(self, **KWS):
## CHEETAH: generated from #def mainData at line 6, col 1.
trans = KWS.get("trans")
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
write('''
''')
if False:
_('От')
_v = VFFSL(SL,"_",False)('От') # "$_('\xce\xf2')" on line 8, col 4
if _v is not None: write(_filter(_v, rawExpr="$_('\xce\xf2')")) # from line 8, col 4.
write(''': <b><u>''')
_v = VFFSL(SL,"FROMNAME",True) # '$FROMNAME' on line 8, col 20
if _v is not None: write(_filter(_v, rawExpr='$FROMNAME')) # from line 8, col 20.
write('''</u></b><br>
''')
if VFFSL(SL,"varExists",False)('$THROUGHNAME') and VFFSL(SL,"THROUGHNAME",True): # generated from line 9, col 4
write(''' ''')
if False:
_('Через')
_v = VFFSL(SL,"_",False)('Через') # "$_('\xd7\xe5\xf0\xe5\xe7')" on line 10, col 5
if _v is not None: write(_filter(_v, rawExpr="$_('\xd7\xe5\xf0\xe5\xe7')")) # from line 10, col 5.
write(''': <b>''')
_v = VFFSL(SL,"THROUGHNAME",True) # '$THROUGHNAME' on line 10, col 21
if _v is not None: write(_filter(_v, rawExpr='$THROUGHNAME')) # from line 10, col 21.
write('''</b><br>
''')
if VFFSL(SL,"TONAME",True): # generated from line 12, col 4
write(''' ''')
if False:
_('Кому')
_v = VFFSL(SL,"_",False)('Кому') # "$_('\xca\xee\xec\xf3')" on line 13, col 5
if _v is not None: write(_filter(_v, rawExpr="$_('\xca\xee\xec\xf3')")) # from line 13, col 5.
write(''': ''')
_v = VFFSL(SL,"TONAME",True) # '$TONAME' on line 13, col 17
if _v is not None: write(_filter(_v, rawExpr='$TONAME')) # from line 13, col 17.
write('''<br>
''')
write(''' <b>''')
_orig_filter_65385326 = _filter
filterName = 'DateFilter'
if self._CHEETAH__filters.has_key("DateFilter"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
_v = VFFSL(SL,"DOCDATE",True) # '$DOCDATE' on line 15, col 25
if _v is not None: write(_filter(_v, rawExpr='$DOCDATE')) # from line 15, col 25.
_filter = _orig_filter_65385326
write('''</b> \xb9<b>''')
_v = VFFSL(SL,"DOCNUM",True) # '$DOCNUM' on line 15, col 54
if _v is not None: write(_filter(_v, rawExpr='$DOCNUM')) # from line 15, col 54.
write('''</b>
''')
if VFFSL(SL,"varExists",False)('$ALGORITHM') and VFFSL(SL,"ALGORITHM",True): # generated from line 16, col 2
write('''\t\t<u>(''')
_v = VFFSL(SL,"ALGORITHM",True) # '$ALGORITHM' on line 17, col 7
if _v is not None: write(_filter(_v, rawExpr='$ALGORITHM')) # from line 17, col 7.
write(''')</u>
''')
write('''\t<br>
<form action="dfsTaskScan">
<input type="hidden" name="id" value="''')
_v = VFFSL(SL,"TID",True) # '$TID' on line 21, col 47
if _v is not None: write(_filter(_v, rawExpr='$TID')) # from line 21, col 47.
write('''">
''')
if False:
_('ШК')
_v = VFFSL(SL,"_",False)('ШК') # "$_('\xd8\xca')" on line 22, col 9
if _v is not None: write(_filter(_v, rawExpr="$_('\xd8\xca')")) # from line 22, col 9.
write(''': <input type=\'text\' name=\'barcode\' id=\'barcode:scan\' title="''')
if False:
_('Товар')
_v = VFFSL(SL,"_",False)('Товар') # "$_('\xd2\xee\xe2\xe0\xf0')" on line 22, col 78
if _v is not None: write(_filter(_v, rawExpr="$_('\xd2\xee\xe2\xe0\xf0')")) # from line 22, col 78.
write('''">
</form>
''')
if VFFSL(SL,"showList",True)=='0': # generated from line 24, col 5
write(''' <a href="dfsTask?id=''')
_v = VFFSL(SL,"TID",True) # '$TID' on line 25, col 29
if _v is not None: write(_filter(_v, rawExpr='$TID')) # from line 25, col 29.
write('''&showList=1">''')
if False:
_('Товары')
_v = VFFSL(SL,"_",False)('Товары') # "$_('\xd2\xee\xe2\xe0\xf0\xfb')" on line 25, col 46
if _v is not None: write(_filter(_v, rawExpr="$_('\xd2\xee\xe2\xe0\xf0\xfb')")) # from line 25, col 46.
write('''</a>
''')
else: # generated from line 26, col 5
write(''' <a href="dfsTask?id=''')
_v = VFFSL(SL,"TID",True) # '$TID' on line 27, col 29
if _v is not None: write(_filter(_v, rawExpr='$TID')) # from line 27, col 29.
write('''&showList=0">''')
if False:
_('Скрыть')
_v = VFFSL(SL,"_",False)('Скрыть') # "$_('\xd1\xea\xf0\xfb\xf2\xfc')" on line 27, col 46
if _v is not None: write(_filter(_v, rawExpr="$_('\xd1\xea\xf0\xfb\xf2\xfc')")) # from line 27, col 46.
write('''</a>
''')
if VFFSL(SL,"showList",True)!='11': # generated from line 28, col 7
write(''' <a href="dfsTask?id=''')
_v = VFFSL(SL,"TID",True) # '$TID' on line 29, col 29
if _v is not None: write(_filter(_v, rawExpr='$TID')) # from line 29, col 29.
write('''&showList=11">''')
if False:
_('Различия')
_v = VFFSL(SL,"_",False)('Различия') # "$_('\xd0\xe0\xe7\xeb\xe8\xf7\xe8\xff')" on line 29, col 47
if _v is not None: write(_filter(_v, rawExpr="$_('\xd0\xe0\xe7\xeb\xe8\xf7\xe8\xff')")) # from line 29, col 47.
write('''</a>
''')
else: # generated from line 30, col 7
write(''' <a href="dfsTask?id=''')
_v = VFFSL(SL,"TID",True) # '$TID' on line 31, col 29
if _v is not None: write(_filter(_v, rawExpr='$TID')) # from line 31, col 29.
write('''&showList=10">''')
if False:
_('Все')
_v = VFFSL(SL,"_",False)('Все') # "$_('\xc2\xf1\xe5')" on line 31, col 47
if _v is not None: write(_filter(_v, rawExpr="$_('\xc2\xf1\xe5')")) # from line 31, col 47.
write('''</a>
''')
write('''
''')
if VFFSL(SL,"varExists",False)('$datalist'): # generated from line 35, col 4
write(''' <table>
<thead>
<tr>
<th>''')
if False:
_('Код')
_v = VFFSL(SL,"_",False)('Код') # "$_('\xca\xee\xe4')" on line 39, col 21
if _v is not None: write(_filter(_v, rawExpr="$_('\xca\xee\xe4')")) # from line 39, col 21.
write('''</th>
<th>''')
if False:
_('План')
_v = VFFSL(SL,"_",False)('План') # "$_('\xcf\xeb\xe0\xed')" on line 40, col 21
if _v is not None: write(_filter(_v, rawExpr="$_('\xcf\xeb\xe0\xed')")) # from line 40, col 21.
write('''</th>
<th>''')
if False:
_('Факт')
_v = VFFSL(SL,"_",False)('Факт') # "$_('\xd4\xe0\xea\xf2')" on line 41, col 21
if _v is not None: write(_filter(_v, rawExpr="$_('\xd4\xe0\xea\xf2')")) # from line 41, col 21.
write('''</th>
</tr>
</thead>
<tbody>
''')
for item in VFFSL(SL,"datalist",True): # generated from line 45, col 9
if VFFSL(SL,"item.QFACT",True): # generated from line 46, col 13
qFact = float(VFFSL(SL,"item.QFACT",True))
else: # generated from line 48, col 13
qFact = 0.0000
if VFFSL(SL,"item.QDOC",True): # generated from line 51, col 13
qDoc = float(VFFSL(SL,"item.QDOC",True))
else: # generated from line 53, col 13
qDoc = 0.0000
if VFFSL(SL,"showList",True) != '11' or VFFSL(SL,"qFact",True) != VFFSL(SL,"qDoc",True): # generated from line 56, col 11
write(''' <tr>
<td><a href="dfsTaskWares?tid=''')
_v = VFFSL(SL,"TID",True) # '$TID' on line 58, col 47
if _v is not None: write(_filter(_v, rawExpr='$TID')) # from line 58, col 47.
write('''&wid=''')
_v = VFFSL(SL,"item.WID",True) # '$item.WID' on line 58, col 56
if _v is not None: write(_filter(_v, rawExpr='$item.WID')) # from line 58, col 56.
write('''" title="''')
_v = VFN(VFFSL(SL,"item.WNAME",True),"replace",False)('"',"'") # '$item.WNAME.replace(\'"\',"\'")' on line 58, col 74
if _v is not None: write(_filter(_v, rawExpr='$item.WNAME.replace(\'"\',"\'")')) # from line 58, col 74.
write('''">''')
_v = VFFSL(SL,"item.WCODE",True) # '$item.WCODE' on line 58, col 104
if _v is not None: write(_filter(_v, rawExpr='$item.WCODE')) # from line 58, col 104.
write('''</a></td>
<td>''')
_v = VFFSL(SL,"viewQuantity",False)(VFFSL(SL,"item.QDOC",True),VFFSL(SL,"item.VUFACTOR",True),VFFSL(SL,"item.VUCODE",True),VFFSL(SL,"item.MUFACTOR",True),VFFSL(SL,"item.MUCODE",True)) # '$viewQuantity($item.QDOC,$item.VUFACTOR,$item.VUCODE,$item.MUFACTOR,$item.MUCODE)' on line 59, col 21
if _v is not None: write(_filter(_v, rawExpr='$viewQuantity($item.QDOC,$item.VUFACTOR,$item.VUCODE,$item.MUFACTOR,$item.MUCODE)')) # from line 59, col 21.
write('''</td>
''')
if VFFSL(SL,"qFact",True) < 0.00001: # generated from line 60, col 15
write(''' <td style="background-color:yellow;">''')
_v = VFFSL(SL,"viewQuantity",False)(VFFSL(SL,"item.QFACT",True),VFFSL(SL,"item.VUFACTOR",True),VFFSL(SL,"item.VUCODE",True),VFFSL(SL,"item.MUFACTOR",True),VFFSL(SL,"item.MUCODE",True)) # '$viewQuantity($item.QFACT,$item.VUFACTOR,$item.VUCODE,$item.MUFACTOR,$item.MUCODE)' on line 61, col 54
if _v is not None: write(_filter(_v, rawExpr='$viewQuantity($item.QFACT,$item.VUFACTOR,$item.VUCODE,$item.MUFACTOR,$item.MUCODE)')) # from line 61, col 54.
write('''</td>
''')
elif VFFSL(SL,"qFact",True) > VFFSL(SL,"qDoc",True): # generated from line 62, col 15
write(''' <td style="background-color:tomato;">''')
_v = VFFSL(SL,"viewQuantity",False)(VFFSL(SL,"item.QFACT",True),VFFSL(SL,"item.VUFACTOR",True),VFFSL(SL,"item.VUCODE",True),VFFSL(SL,"item.MUFACTOR",True),VFFSL(SL,"item.MUCODE",True)) # '$viewQuantity($item.QFACT,$item.VUFACTOR,$item.VUCODE,$item.MUFACTOR,$item.MUCODE)' on line 63, col 54
if _v is not None: write(_filter(_v, rawExpr='$viewQuantity($item.QFACT,$item.VUFACTOR,$item.VUCODE,$item.MUFACTOR,$item.MUCODE)')) # from line 63, col 54.
write('''</td>
''')
elif VFFSL(SL,"qDoc",True) > VFFSL(SL,"qFact",True) : # generated from line 64, col 15
write(''' <td style="background-color:tomato;">''')
_v = VFFSL(SL,"viewQuantity",False)(VFFSL(SL,"item.QFACT",True),VFFSL(SL,"item.VUFACTOR",True),VFFSL(SL,"item.VUCODE",True),VFFSL(SL,"item.MUFACTOR",True),VFFSL(SL,"item.MUCODE",True)) # '$viewQuantity($item.QFACT,$item.VUFACTOR,$item.VUCODE,$item.MUFACTOR,$item.MUCODE)' on line 65, col 54
if _v is not None: write(_filter(_v, rawExpr='$viewQuantity($item.QFACT,$item.VUFACTOR,$item.VUCODE,$item.MUFACTOR,$item.MUCODE)')) # from line 65, col 54.
write('''</td>
''')
else: # generated from line 66, col 15
write(''' <td style="background-color:limegreen;">''')
_v = VFFSL(SL,"viewQuantity",False)(VFFSL(SL,"item.QFACT",True),VFFSL(SL,"item.VUFACTOR",True),VFFSL(SL,"item.VUCODE",True),VFFSL(SL,"item.MUFACTOR",True),VFFSL(SL,"item.MUCODE",True)) # '$viewQuantity($item.QFACT,$item.VUFACTOR,$item.VUCODE,$item.MUFACTOR,$item.MUCODE)' on line 67, col 57
if _v is not None: write(_filter(_v, rawExpr='$viewQuantity($item.QFACT,$item.VUFACTOR,$item.VUCODE,$item.MUFACTOR,$item.MUCODE)')) # from line 67, col 57.
write('''</td>
''')
write(''' </tr>
''')
write(''' </tbody>
</table>
''')
else : # generated from line 74, col 4
write(''' <br>
''')
write('''
<a href="javascript: if (confirm(\'''')
if False:
_('Вы уверены?')
_v = VFFSL(SL,"_",False)('Вы уверены?') # "$_('\xc2\xfb \xf3\xe2\xe5\xf0\xe5\xed\xfb?')" on line 78, col 39
if _v is not None: write(_filter(_v, rawExpr="$_('\xc2\xfb \xf3\xe2\xe5\xf0\xe5\xed\xfb?')")) # from line 78, col 39.
write("""')) location.href='dfsTaskEnd?id=""")
_v = VFFSL(SL,"TID",True) # '$TID' on line 78, col 90
if _v is not None: write(_filter(_v, rawExpr='$TID')) # from line 78, col 90.
write('''\'">''')
if False:
_('Завершить')
_v = VFFSL(SL,"_",False)('Завершить') # "$_('\xc7\xe0\xe2\xe5\xf0\xf8\xe8\xf2\xfc')" on line 78, col 97
if _v is not None: write(_filter(_v, rawExpr="$_('\xc7\xe0\xe2\xe5\xf0\xf8\xe8\xf2\xfc')")) # from line 78, col 97.
write('''</a>
''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
def writeBody(self, **KWS):
## CHEETAH: main method generated for this template
trans = KWS.get("trans")
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
write('''
''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_task= 'writeBody'
## END CLASS DEFINITION
if not hasattr(task, '_initCheetahAttributes'):
templateAPIClass = getattr(task, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(task)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=task()).run()
| [
"[email protected]"
] | |
e9d4a1e15e50708818c91043e99270b983a6336f | e66770daf4d1679c735cfab1ac24dd1f5107bd83 | /Chapter02/Ch02_Code/GUI_add_padding.py | ed137f365e58ba1817dfca017a7f893771e77317 | [] | no_license | CodedQuen/Python-GUI-Programming-Cookbook | c038eb6cec4945ff4f2b09e1551f9db712dd2502 | f02b0f9916fb8272edc7ed4704eecce53ae0231c | refs/heads/master | 2022-05-27T19:35:35.004455 | 2020-05-05T01:00:51 | 2020-05-05T01:00:51 | 261,329,651 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,147 | py | '''
May 2017
@author: Burkhard A. Meier
'''
#======================
# imports
#======================
import tkinter as tk
from tkinter import ttk
from tkinter import scrolledtext
# Create instance
win = tk.Tk()
# Add a title
win.title("Python GUI")
# Modify adding a Label
a_label = ttk.Label(win, text="A Label")
a_label.grid(column=0, row=0)
# Modified Button Click Function
def click_me():
action.configure(text='Hello ' + name.get() + ' ' +
number_chosen.get())
# Changing our Label
ttk.Label(win, text="Enter a name:").grid(column=0, row=0)
# Adding a Textbox Entry widget
name = tk.StringVar()
name_entered = ttk.Entry(win, width=12, textvariable=name)
name_entered.grid(column=0, row=1)
# Adding a Button
action = ttk.Button(win, text="Click Me!", command=click_me)
action.grid(column=2, row=1) # <= change column to 2
# Creating three checkbuttons
ttk.Label(win, text="Choose a number:").grid(column=1, row=0)
number = tk.StringVar()
number_chosen = ttk.Combobox(win, width=12, textvariable=number, state='readonly')
number_chosen['values'] = (1, 2, 4, 42, 100)
number_chosen.grid(column=1, row=1)
number_chosen.current(0)
chVarDis = tk.IntVar()
check1 = tk.Checkbutton(win, text="Disabled", variable=chVarDis, state='disabled')
check1.select()
check1.grid(column=0, row=4, sticky=tk.W)
chVarUn = tk.IntVar()
check2 = tk.Checkbutton(win, text="UnChecked", variable=chVarUn)
check2.deselect()
check2.grid(column=1, row=4, sticky=tk.W)
chVarEn = tk.IntVar()
check3 = tk.Checkbutton(win, text="Enabled", variable=chVarEn)
check3.deselect()
check3.grid(column=2, row=4, sticky=tk.W)
# GUI Callback function
def checkCallback(*ignoredArgs):
# only enable one checkbutton
if chVarUn.get(): check3.configure(state='disabled')
else: check3.configure(state='normal')
if chVarEn.get(): check2.configure(state='disabled')
else: check2.configure(state='normal')
# trace the state of the two checkbuttons
chVarUn.trace('w', lambda unused0, unused1, unused2 : checkCallback())
chVarEn.trace('w', lambda unused0, unused1, unused2 : checkCallback())
# Using a scrolled Text control
scrol_w = 30
scrol_h = 3
scr = scrolledtext.ScrolledText(win, width=scrol_w, height=scrol_h, wrap=tk.WORD)
scr.grid(column=0, row=5, sticky='WE', columnspan=3) # now row=5
# First, we change our Radiobutton global variables into a list
colors = ["Blue", "Gold", "Red"]
# We have also changed the callback function to be zero-based, using the list
# instead of module-level global variables
# Radiobutton Callback
def radCall():
radSel=radVar.get()
if radSel == 0: win.configure(background=colors[0]) # now zero-based
elif radSel == 1: win.configure(background=colors[1]) # and using list
elif radSel == 2: win.configure(background=colors[2])
# create three Radiobuttons using one variable
radVar = tk.IntVar()
# Next we are selecting a non-existing index value for radVar
radVar.set(99)
# Now we are creating all three Radiobutton widgets within one loop
for col in range(3):
curRad = tk.Radiobutton(win, text=colors[col], variable=radVar,
value=col, command=radCall)
curRad.grid(column=col, row=6, sticky=tk.W) # now row=6
# Create a container to hold labels
buttons_frame = ttk.LabelFrame(win, text=' Labels in a Frame ')
buttons_frame.grid(column=0, row=7, padx=20, pady=40) # padx, pady
# Place labels into the container element - vertically
ttk.Label(buttons_frame, text="Label1").grid(column=0, row=0)
ttk.Label(buttons_frame, text="Label2").grid(column=0, row=1)
ttk.Label(buttons_frame, text="Label3").grid(column=0, row=2)
name_entered.focus() # Place cursor into name Entry
#======================
# Start GUI
#======================
win.mainloop()
| [
"[email protected]"
] | |
7792841836eb91ce3be5aa927a1a37b5f335c11b | ac54aa0127a47fb59211fba9e6cb8431d9d864cd | /muscn/settings/base.py | be10c07ea832ffd447b82e096349324c124e21c9 | [] | no_license | xiringlama/manutd.org.np | 8919e3c1ad0494f88b819089686a756d67d38598 | f394f16edb96c05e2e864dcec1ec52532cd35ac2 | refs/heads/master | 2021-07-12T00:33:17.197706 | 2017-10-16T14:45:10 | 2017-10-16T14:45:10 | 107,222,122 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,820 | py | import os
BASE_DIR = os.path.join(os.path.dirname(os.path.dirname(__file__)), '..')
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static'),
]
INSTALLED_APPS = (
'jet',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'django.contrib.sitemaps',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.facebook',
'allauth.socialaccount.providers.google',
'allauth.socialaccount.providers.twitter',
'froala_editor',
'dj_pagination',
'webstack_django_sorting',
'auditlog',
'versatileimagefield',
'rest_framework',
'rest_framework.authtoken',
'solo',
'fcm',
'anymail',
'adminsortable2',
'apps.core',
'apps.users',
'apps.payment',
'apps.page',
'apps.dashboard',
'apps.stats',
'apps.events',
'apps.post',
'apps.partner',
'apps.team',
'apps.timeline',
'apps.webhook',
'apps.gallery',
'apps.key',
'apps.push_notification',
'apps.contact',
)
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'dj_pagination.middleware.PaginationMiddleware',
'auditlog.middleware.AuditlogMiddleware',
'django.middleware.security.SecurityMiddleware',
]
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
ROOT_URLCONF = 'muscn.urls'
WSGI_APPLICATION = 'muscn.wsgi.application'
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Kathmandu'
USE_I18N = True
USE_L10N = True
USE_TZ = True
REST_FRAMEWORK = {
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 25,
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
),
'DEFAULT_PERMISSION_CLASSES': (
'apps.key.permissions.DistributedKeyAuthentication',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
'rest_framework.authentication.SessionAuthentication',
)
}
from .user_settings import * # noqa
from django.contrib.messages import constants as messages
MESSAGE_TAGS = {
messages.ERROR: 'danger'
}
ESEWA_SCD = 'manutd'
FCM_MAX_RECIPIENTS = 10000
ALIASES = [
'Manchester United',
'Man Utd',
'Man United',
'MUFC',
]
# TEMPLATE_DEBUG = False
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
SESSION_COOKIE_HTTPONLY = True
SESSION_COOKIE_NAME = 'sci'
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_HTTPONLY = True
CSRF_COOKIE_NAME = 'ct'
CSRF_COOKIE_SECURE = True
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://127.0.0.1:6379/7",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
}
}
}
SESSION_ENGINE = "django.contrib.sessions.backends.cache"
SESSION_CACHE_ALIAS = "default"
SOLO_CACHE = 'default'
SOLO_CACHE_PREFIX = 'solo'
SOLO_CACHE_TIMEOUT = 60 * 60 * 24
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
},
# 'opbeat': {
# 'level': 'WARNING',
# 'class': 'opbeat.contrib.django.handlers.OpbeatHandler',
# },
},
'loggers': {
'django': {
# 'handlers': ['mail_admins', 'opbeat'],
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# E-mail settings
DEFAULT_FROM_EMAIL = 'MUSC Nepal<[email protected]>'
SERVER_EMAIL = DEFAULT_FROM_EMAIL
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.zoho.com'
EMAIL_PORT = 587
EMAIL_HOST_USER = '[email protected]'
EMAIL_USE_TLS = True
SITE_ID = 1
STATIC_ROOT = os.path.join(BASE_DIR, '..', 'static')
MEDIA_ROOT = os.path.join(BASE_DIR, '..', 'media')
MEDIA_URL = '/media/'
| [
"[email protected]"
] | |
948de2c172a457ca55617d1ab99b85b378c019ce | 1840eff4a604161d56fba2747520686a5a008301 | /src/apps/dh/urls.py | 6c7f4b97920cad4dcd334b410b7cf1cbfb3c53eb | [] | no_license | jinchuika/app-suni | f9291fd2e5ecc9915178141039da19444769cb85 | 0e37786d7173abe820fd10b094ffcc2db9593a9c | refs/heads/master | 2023-08-24T21:52:04.490613 | 2023-08-14T21:32:22 | 2023-08-14T21:32:22 | 68,239,483 | 7 | 6 | null | 2023-08-14T21:32:24 | 2016-09-14T20:10:09 | Python | UTF-8 | Python | false | false | 619 | py | from django.conf.urls import url
from apps.dh.views import *
from django.views.decorators.cache import cache_page
urlpatterns = [
url(r'^evento/add/$', EventoDHCreateView.as_view(), name='evento_dh_add'),
url(r'^evento/(?P<pk>\d+)/$', EventoDHDetailView.as_view(), name='evento_dh_detail'),
url(r'^evento/(?P<pk>\d+)/edit$', EventoDHUpdateView.as_view(), name='evento_dh_update'),
url(r'^evento/calendario/home$', cache_page(5)(EventoDHCalendarHomeView.as_view()), name='evento_dh_calendario_home'),
url(r'^calendario/$', cache_page(5)(CalendarioDHView.as_view()), name='evento_dh_calendario'),
]
| [
"[email protected]"
] | |
d601e47ed1794cce231d067d5f51a3a2fa9dd4ab | 2d05050d0ada29f7680b4df20c10bb85b0530e45 | /python/tvm/tir/schedule/_type_checker.py | 5c51b1b09fedd38a6aa9032abcb9c340522bcdb3 | [
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"Zlib",
"LLVM-exception",
"BSD-2-Clause"
] | permissive | apache/tvm | 87cb617f9a131fa44e1693303aaddf70e7a4c403 | d75083cd97ede706338ab413dbc964009456d01b | refs/heads/main | 2023-09-04T11:24:26.263032 | 2023-09-04T07:26:00 | 2023-09-04T07:26:00 | 70,746,484 | 4,575 | 1,903 | Apache-2.0 | 2023-09-14T19:06:33 | 2016-10-12T22:20:28 | Python | UTF-8 | Python | false | false | 12,491 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Type checking functionality"""
import collections
import collections.abc
import functools
import inspect
from typing import Any, Callable, Dict, List, Optional, Tuple, TypeVar, Union
import typing
def _is_none_type(type_: Any) -> bool:
return type_ is None or type_ is type(None)
def _get_subtypes(type_: Any) -> Any:
# TODO(@tvm-team): This is hot fix to support subtle difference between python versions
# Would be nice to find a better way if possible
if hasattr(typing, "_SpecialGenericAlias"):
if hasattr(typing, "get_args"):
subtypes = typing.get_args(type_) # type: ignore
else:
subtypes = type_.__args__
else:
subtypes = type_.__args__
return subtypes
if hasattr(typing, "_GenericAlias"):
# For python versions 3.7 onward, check the __origin__ attribute.
class _Subtype:
@staticmethod
def _origin(type_: Any) -> Any:
if hasattr(typing, "_SpecialGenericAlias"):
if isinstance(type_, typing._SpecialGenericAlias): # type: ignore # pylint: disable=protected-access
return type_.__origin__
if isinstance(type_, typing._GenericAlias): # type: ignore # pylint: disable=protected-access
return type_.__origin__
return None
@staticmethod
def list_(type_: Any) -> Any:
if _Subtype._origin(type_) is list:
if hasattr(typing, "get_args"):
(subtype,) = typing.get_args(type_) # type: ignore
else:
(subtype,) = type_.__args__
return [subtype]
return None
@staticmethod
def dict_(type_: Any) -> Any:
if _Subtype._origin(type_) is dict:
if hasattr(typing, "get_args"):
(ktype, vtype) = typing.get_args(type_) # type: ignore
else:
(ktype, vtype) = type_.__args__
return [ktype, vtype]
return None
@staticmethod
def tuple_(type_: Any) -> Optional[List[type]]:
if _Subtype._origin(type_) is tuple:
subtypes = _get_subtypes(type_)
return subtypes
return None
@staticmethod
def optional( # pylint: disable=missing-function-docstring
type_: Any,
) -> Optional[List[type]]:
if _Subtype._origin(type_) is Union:
subtypes = _get_subtypes(type_)
if len(subtypes) == 2 and _is_none_type(subtypes[1]):
return [subtypes[0]]
return None
@staticmethod
def union(type_: Any) -> Optional[List[type]]: # pylint: disable=missing-function-docstring
if _Subtype._origin(type_) is Union:
subtypes = _get_subtypes(type_)
if len(subtypes) != 2 or not _is_none_type(subtypes[1]):
return list(subtypes)
return None
@staticmethod
def callable(type_: Any) -> Optional[List[type]]:
if _Subtype._origin(type_) is collections.abc.Callable:
subtypes = _get_subtypes(type_)
return subtypes
return None
elif hasattr(typing, "_Union"):
# For python 3.6 and below, check the __name__ attribute, or CallableMeta.
class _Subtype: # type: ignore
@staticmethod
def list_(type_: Any) -> Optional[List[type]]:
if isinstance(type_, typing.GenericMeta): # type: ignore # pylint: disable=no-member
if type_.__name__ == "List":
(subtype,) = type_.__args__ # type: ignore # pylint: disable=no-member
return [subtype]
return None
@staticmethod
def dict_(type_: Any) -> Optional[List[type]]:
if isinstance(type_, typing.GenericMeta): # type: ignore # pylint: disable=no-member
if type_.__name__ == "Dict":
(ktype, vtype) = type_.__args__ # type: ignore # pylint: disable=no-member
return [ktype, vtype]
return None
@staticmethod
def tuple_(type_: Any) -> Optional[List[type]]:
if isinstance(type_, typing.GenericMeta): # type: ignore # pylint: disable=no-member
if type_.__name__ == "Tuple":
subtypes = type_.__args__ # type: ignore # pylint: disable=no-member
return subtypes
return None
@staticmethod
def optional(type_: Any) -> Optional[List[type]]:
if isinstance(type_, typing._Union): # type: ignore # pylint: disable=no-member,protected-access
subtypes = type_.__args__
if len(subtypes) == 2 and _is_none_type(subtypes[1]):
return [subtypes[0]]
return None
@staticmethod
def union(type_: Any) -> Optional[List[type]]:
if isinstance(type_, typing._Union): # type: ignore # pylint: disable=no-member,protected-access
subtypes = type_.__args__
if len(subtypes) != 2 or not _is_none_type(subtypes[1]):
return list(subtypes)
return None
@staticmethod
def callable(type_: Any) -> Optional[List[type]]:
if isinstance(type_, typing.CallableMeta): # type: ignore # pylint: disable=no-member,protected-access
subtypes = type_.__args__
return subtypes
return None
def _dispatcher(type_: Any) -> Tuple[str, List[type]]:
if _is_none_type(type_):
return "none", []
subtype = _Subtype.list_(type_)
if subtype is not None:
return "list", subtype
subtype = _Subtype.dict_(type_)
if subtype is not None:
return "dict", subtype
subtype = _Subtype.tuple_(type_)
if subtype is not None:
return "tuple", subtype
subtype = _Subtype.optional(type_)
if subtype is not None:
return "optional", subtype
subtype = _Subtype.union(type_)
if subtype is not None:
return "union", subtype
subtype = _Subtype.callable(type_)
if subtype is not None:
return "callable", subtype
return "atomic", [type_]
def callable_str(*subtypes):
if subtypes:
*arg_types, return_type = subtypes
arg_str = ", ".join(_type2str(arg_type) for arg_type in arg_types)
return_type_str = _type2str(return_type)
return f"Callable[[{arg_str}], {return_type_str}]"
else:
return "Callable"
_TYPE2STR: Dict[Any, Callable] = {
"none": lambda: "None",
"atomic": lambda t: str(t.__name__),
"callable": callable_str,
"list": lambda t: f"List[{_type2str(t)}]",
"dict": lambda k, v: f"Dict[{_type2str(k)}, {_type2str(v)}]",
"tuple": lambda *t: f"Tuple[{', '.join([_type2str(x) for x in t])}]",
"optional": lambda t: f"Optional[{_type2str(t)}]",
"union": lambda *t: f"Union[{', '.join([_type2str(x) for x in t])}]",
}
def _type2str(type_: Any) -> str:
key, subtypes = _dispatcher(type_)
return _TYPE2STR[key](*subtypes)
def _val2type(value: Any):
if isinstance(value, list):
types = set(_val2type(x) for x in value)
if len(types) == 1:
return List[types.pop()] # type: ignore
return List[Union[tuple(types)]] # type: ignore
if isinstance(value, tuple):
types = tuple(_val2type(x) for x in value) # type: ignore
return Tuple[types]
return type(value)
def _type_check_err(x: Any, name: str, expected: Any) -> str:
return (
f'"{name}" has wrong type. '
f'Expected "{_type2str(expected)}", '
f'but gets: "{_type2str(_val2type(x))}"'
)
def _type_check_vtable() -> Dict[str, Callable]:
def _type_check_none(v: Any, name: str) -> Optional[str]:
return None if v is None else _type_check_err(v, name, None)
def _type_check_atomic(v: Any, name: str, type_: Any) -> Optional[str]:
return None if isinstance(v, type_) else _type_check_err(v, name, type_)
def _type_check_callable(v: Any, name: str, *_subtypes: Any) -> Optional[str]:
# Current implementation only validates that the argument is
# callable, and doesn't validate the arguments accepted by the
# callable, if any.
return None if callable(v) else _type_check_err(v, name, Callable)
def _type_check_list(v: List[Any], name: str, type_: Any) -> Optional[str]:
if not isinstance(v, (list, tuple)):
return _type_check_err(v, name, list)
for i, x in enumerate(v):
error_msg = _type_check(x, f"{name}[{i}]", type_)
if error_msg is not None:
return error_msg
return None
def _type_check_dict(dict_obj: Dict[Any, Any], name: str, *types: Any) -> Optional[str]:
ktype_, vtype_ = types
if not isinstance(dict_obj, dict):
return _type_check_err(dict_obj, name, dict)
for k, v in dict_obj.items():
error_msg = _type_check(k, f"{name}[{k}]", ktype_)
if error_msg is not None:
return error_msg
error_msg = _type_check(v, f"{name}[{k}]", vtype_)
if error_msg is not None:
return error_msg
return None
def _type_check_tuple(v: Any, name: str, *types: Any) -> Optional[str]:
if not isinstance(v, tuple):
return _type_check_err(v, name, Tuple[types])
if len(types) != len(v):
return _type_check_err(v, name, Tuple[types])
for i, (x, type_) in enumerate(zip(v, types)):
error_msg = _type_check(x, f"{name}[{i}]", type_)
if error_msg is not None:
return error_msg
return None
def _type_check_optional(v: Any, name: str, type_: Any) -> Optional[str]:
return None if v is None else _type_check(v, name, type_)
def _type_check_union(v: Any, name: str, *types: Any) -> Optional[str]:
for type_ in types:
error_msg = _type_check(v, name, type_)
if error_msg is None:
return None
return _type_check_err(v, name, Union[types])
return {
"none": _type_check_none,
"atomic": _type_check_atomic,
"callable": _type_check_callable,
"list": _type_check_list,
"dict": _type_check_dict,
"tuple": _type_check_tuple,
"optional": _type_check_optional,
"union": _type_check_union,
}
_TYPE_CHECK: Dict[Any, Callable] = _type_check_vtable()
def _type_check(v: Any, name: str, type_: Any) -> Optional[str]:
key, subtypes = _dispatcher(type_)
return _TYPE_CHECK[key](v, name, *subtypes)
FType = TypeVar("FType", bound=Callable[..., Any])
def type_checked(func: FType) -> FType:
"""Type check the input arguments of a function."""
sig = inspect.signature(func)
@functools.wraps(func)
def wrap(*args, **kwargs):
bound_args = sig.bind(*args, **kwargs)
bound_args.apply_defaults()
for param in sig.parameters.values():
if param.annotation != inspect.Signature.empty:
error_msg = _type_check(
bound_args.arguments[param.name],
param.name,
param.annotation,
)
if error_msg is not None:
error_msg = f'In "{func.__qualname__}", {error_msg}'
raise TypeError(error_msg)
return func(*args, **kwargs)
return wrap # type: ignore
| [
"[email protected]"
] | |
2afc06ad0c4f9e8735adae24182886d198a029d3 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03284/s870585423.py | e77617648221cc20c5db9167bc932d1b07055052 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 328 | py | import sys
read = sys.stdin.read
readline = sys.stdin.readline
readlines = sys.stdin.readlines
sys.setrecursionlimit(10 ** 9)
INF = 1 << 60
MOD = 1000000007
def main():
N, K = map(int, readline().split())
if N % K == 0:
print(0)
else:
print(1)
return
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
d390872599b775908541f61bca80c5d479995e3e | f900a9f48fe24c6a581bcb28ad1885cfe5743f80 | /Chapter_3/Pg_89_Try_It_Yourself.py | 18af1c66106cd96e52bca0048c6335ef2c061c01 | [] | no_license | Anjali-225/PythonCrashCourse | 76e63415e789f38cee019cd3ea155261ae2e8398 | f9b9649fe0b758c04861dad4d88058d48837a365 | refs/heads/master | 2022-12-03T21:35:07.428613 | 2020-08-18T11:42:58 | 2020-08-18T11:42:58 | 288,430,981 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,879 | py | #3-4
guest = ['Joe','Eric','Sarah','Helly']
message = f"{guest[0]}, you are invited to dinner"
print(message)
message = f"{guest[1]}, you are invited to dinner"
print(message)
message = f"{guest[2]}, you are invited to dinner"
print(message)
message = f"{guest[3]}, you are invited to dinner\n"
print(message)
#3-5
print(f"{guest[1]} can not make it to the dinner\n")
del guest[1]
message = f"{guest[0]}, you are invited to dinner"
print(message)
message = f"{guest[1]}, you are invited to dinner"
print(message)
message = f"{guest[2]}, you are invited to dinner\n"
print(message)
#3-6
guest.insert(0, 'Bob')
guest.insert(2, 'James')
guest.append('Shrek')
message = f"{guest[0]}, you are invited to dinner since we have a bigger table"
print(message)
message = f"{guest[1]}, you are invited to dinner since we have a bigger table"
print(message)
message = f"{guest[2]}, you are invited to dinner since we have a bigger table"
print(message)
message = f"{guest[3]}, you are invited to dinner since we have a bigger table"
print(message)
message = f"{guest[4]}, you are invited to dinner since we have a bigger table"
print(message)
message = f"{guest[5]}, you are invited to dinner since we have a bigger table\n"
print(message)
#3-7
print("We can only add two people for dinner")
popped1 = guest.pop()
print(f"Sorry {popped1}, we can not invite you for dinner anymore")
popped2 = guest.pop()
print(f"Sorry {popped2}, we can not invite you for dinner anymore")
popped3 = guest.pop()
print(f"Sorry {popped3}, we can not invite you for dinner anymore")
popped4 = guest.pop()
print(f"Sorry {popped4}, we can not invite you for dinner anymore\n")
print(f"{guest[0]}, you are still invited for dinner")
print(f"{guest[1]}, you are still invited for dinner\n")
#del guest[1]
#del guest[0]
print(guest)
| [
"[email protected]"
] | |
03ee2745ee616d850f07343d7d489f52af1d54c3 | 7d1e23982439f530f3c615f3ac94f59861bc2325 | /controller.py | 70d1ac19194843aea3eac826c23ea21ef5b3ffdb | [] | no_license | daskorod/RPG_project | 3104afafd19038f1c9da0d9aca6f489f9629093b | 3b42a7f3131830d3b728f5d65332750fa032ec03 | refs/heads/master | 2020-05-24T04:24:33.198371 | 2017-11-30T19:29:48 | 2017-11-30T19:29:48 | 84,820,405 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,385 | py | import pygame
import sys
import sounds
class Holy_Spirit ():
def __init__ (self):
self.auto = True
self.k_space = False
self.stage1_flag = False
self.stage2_flag = True
self.flag = 0
self.left = False
self.right = False
self.up = False
self.down = False
self.k_1 = False
self.k_2 = False
self.k_3 = False
self.k_4 = False
self.k_n = False
self.k_e = False
self.k_i = False
self.k_c = False
self.k_q = False
self.button_up = True
self.clic = False
self.k_j = False
self.k_esc = False
self.e_cntrl = False
self.up_is = False
self.down_is = False
self.k_1_control = False
self.k_2_control = False
self.k_3_control = False
self.k_a = False
self.move_cntrl_a = False
# R = False
# L = False
# U = False
# D = False
# self.direction = [R,L,U,D]
self.current_location = ''
self.move_cntrl = False
self.e_lock = False
def control (self):
if self.k_esc == True:
sys.exit ()
for e in pygame.event.get ():
if e.type == pygame.QUIT:
sys.exit ()
if e.type == pygame.KEYDOWN:
if e.key == pygame.K_SPACE:
self.k_space = True
if e.type == pygame.KEYUP:
if e.key == pygame.K_SPACE:
self.k_space = False
if e.type == pygame.KEYDOWN:
if e.key == pygame.K_LEFT:
self.left = True
if self.move_cntrl_a == False:
self.move_cntrl = True
self.move_cntrl_a = True
if e.key == pygame.K_RIGHT:
self.right = True
if self.move_cntrl_a == False:
self.move_cntrl = True
self.move_cntrl_a = True
if e.type == pygame.KEYDOWN:
if e.key == pygame.K_UP:
self.up = True
self.up_is = True
if self.move_cntrl_a == False:
self.move_cntrl = True
self.move_cntrl_a = True
if e.key == pygame.K_DOWN:
self.down = True
self.down_is = True
if self.move_cntrl_a == False:
self.move_cntrl = True
self.move_cntrl_a = True
if e.type == pygame.KEYUP:
if e.key == pygame.K_LEFT:
self.left = False
self.move_cntrl_a = False
if e.key == pygame.K_RIGHT:
self.right = False
self.move_cntrl_a = False
if e.type == pygame.KEYUP:
if e.key == pygame.K_UP:
self.up = False
self.up_is = False
self.move_cntrl_a = False
if e.key == pygame.K_DOWN:
self.down = False
self.down_is = False
self.move_cntrl_a = False
if e.type == pygame.KEYDOWN:
if e.key == pygame.K_1 and self.k_1_control == False:
self.k_1 = True
self.k_1_control = True
self.clic = True
if e.key == pygame.K_2 and self.k_2_control == False:
self.k_2 = True
self.k_2_control = True
self.clic = True
if e.key == pygame.K_3 and self.k_3_control == False:
self.k_3 = True
self.k_3_control = True
self.clic = True
if e.key == pygame.K_4:
self.k_4 = True
if e.key == pygame.K_n:
self.k_n = True
if e.key == pygame.K_e and self.e_lock == False:
#self.e_cntrl = False
self.k_e = True
sounds.clic2.play()
self.e_lock = True
if e.key == pygame.K_i:
self.k_i = True
if e.key == pygame.K_a:
self.k_a = True
if e.key == pygame.K_c:
self.k_c = True
if e.key == pygame.K_j:
self.k_j = True
if e.key == pygame.K_q:
self.k_q = True
if e.key == pygame.K_ESCAPE:
self.k_esc = True
if e.type == pygame.KEYUP:
if e.key == pygame.K_1:
self.k_1 = False
self.button_up = True
self.k_1_control = False
if e.key == pygame.K_2:
self.k_2 = False
self.button_up = True
self.k_2_control = False
if e.key == pygame.K_3:
self.k_3 = False
self.button_up = True
self.k_3_control = False
if e.key == pygame.K_4:
self.k_4 = False
self.button_up = True
if e.key == pygame.K_n:
self.k_n = False
if e.key == pygame.K_e:
self.e_cntrl = True
self.k_e = False
self.e_lock = False
if e.key == pygame.K_i:
self.k_i = False
if e.key == pygame.K_a:
self.k_a = False
if e.key == pygame.K_c:
self.k_c = False
if e.key == pygame.K_j:
self.k_j = False
if e.key == pygame.K_q:
self.k_q = False
if e.key == pygame.K_ESCAPE:
self.k_esc = False | [
"[email protected]"
] | |
cdbded1453eefce5a9932e58c56c62071e4acfa4 | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-gsn-edf/gsn-edf_ut=3.5_rd=0.65_rw=0.06_rn=4_u=0.075-0.35_p=harmonic-2/sched=RUN_trial=72/params.py | 3e12d7ff33ac0d25b94cbf315139e7c926365d5c | [] | no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 255 | py | {'cpus': 4,
'duration': 30,
'final_util': '3.518429',
'max_util': '3.5',
'periods': 'harmonic-2',
'release_master': False,
'res_distr': '0.65',
'res_nmb': '4',
'res_weight': '0.06',
'scheduler': 'GSN-EDF',
'trial': 72,
'utils': 'uni-medium-3'}
| [
"[email protected]"
] | |
f8929bcfb574e4ad6472200e8721328615419355 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/tree-big-6279.py | 0ee32f9d4e13b8b1b73f473249d96e94703c6449 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,289 | py | # Binary-search trees
class TreeNode(object):
value:int = 0
left:"TreeNode" = None
right:"TreeNode" = None
def insert(self:"TreeNode", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode(x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode(x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode2(object):
value:int = 0
value2:int = 0
left:"TreeNode2" = None
left2:"TreeNode2" = None
right:"TreeNode2" = None
right2:"TreeNode2" = None
def insert(self:"TreeNode2", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode2(x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode2(x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode2", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode2(x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode2(x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode2", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode2", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode3(object):
value:int = 0
value2:int = 0
value3:int = 0
left:"TreeNode3" = None
left2:"TreeNode3" = None
left3:"TreeNode3" = None
right:"TreeNode3" = None
right2:"TreeNode3" = None
right3:"TreeNode3" = None
def insert(self:"TreeNode3", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode3", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode3", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode3", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode3", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode3", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode4(object):
value:int = 0
value2:int = 0
value3:int = 0
value4:int = 0
left:"TreeNode4" = None
left2:"TreeNode4" = None
left3:"TreeNode4" = None
left4:"TreeNode4" = None
right:"TreeNode4" = None
right2:"TreeNode4" = None
right3:"TreeNode4" = None
right4:"TreeNode4" = None
def insert(self:"TreeNode4", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode4", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode4", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert4(self:"TreeNode4", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode4", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode4", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode4", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains4(self:"TreeNode4", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode5(object):
value:int = 0
value2:int = 0
value3:int = 0
value4:int = 0
value5:int = 0
left:"TreeNode5" = None
left2:"TreeNode5" = None
left3:"TreeNode5" = None
left4:"TreeNode5" = None
left5:"TreeNode5" = None
right:"TreeNode5" = None
right2:"TreeNode5" = None
right3:"TreeNode5" = None
right4:"TreeNode5" = None
right5:"TreeNode5" = None
def insert(self:"TreeNode5", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode5", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode5", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert4(self:"TreeNode5", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert5(self:"TreeNode5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode5", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode5", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode5", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains4(self:"TreeNode5", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains5(self:"TreeNode5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class Tree(object):
root:TreeNode = None
size:int = 0
def insert(self:"Tree", x:int) -> object:
if self.root is None:
self.root = makeNode(x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree2(object):
root:TreeNode2 = None
root2:TreeNode2 = None
size:int = 0
size2:int = 0
def insert(self:"Tree2", x:int) -> object:
if self.root is None:
self.root = makeNode2(x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree2", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode2(x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree2", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree2", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree3(object):
root:TreeNode3 = None
root2:TreeNode3 = None
root3:TreeNode3 = None
size:int = 0
size2:int = 0
size3:int = 0
def insert(self:"Tree3", x:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree3", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree3", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree3", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree3", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree3", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree4(object):
root:TreeNode4 = None
root2:TreeNode4 = None
root3:TreeNode4 = None
root4:TreeNode4 = None
size:int = 0
size2:int = 0
size3:int = 0
size4:int = 0
def insert(self:"Tree4", x:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree4", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree4", x:int, x2:int, x3:int) -> object:
if self.root is None:
$Exp.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert4(self:"Tree4", x:int, x2:int, x3:int, x4:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree4", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree4", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree4", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains4(self:"Tree4", x:int, x2:int, x3:int, x4:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree5(object):
root:TreeNode5 = None
root2:TreeNode5 = None
root3:TreeNode5 = None
root4:TreeNode5 = None
root5:TreeNode5 = None
size:int = 0
size2:int = 0
size3:int = 0
size4:int = 0
size5:int = 0
def insert(self:"Tree5", x:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree5", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree5", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert4(self:"Tree5", x:int, x2:int, x3:int, x4:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert5(self:"Tree5", x:int, x2:int, x3:int, x4:int, x5:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree5", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree5", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree5", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains4(self:"Tree5", x:int, x2:int, x3:int, x4:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains5(self:"Tree5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def makeNode(x: int) -> TreeNode:
b:TreeNode = None
b = TreeNode()
b.value = x
return b
def makeNode2(x: int, x2: int) -> TreeNode2:
b:TreeNode2 = None
b2:TreeNode2 = None
b = TreeNode2()
b.value = x
return b
def makeNode3(x: int, x2: int, x3: int) -> TreeNode3:
b:TreeNode3 = None
b2:TreeNode3 = None
b3:TreeNode3 = None
b = TreeNode3()
b.value = x
return b
def makeNode4(x: int, x2: int, x3: int, x4: int) -> TreeNode4:
b:TreeNode4 = None
b2:TreeNode4 = None
b3:TreeNode4 = None
b4:TreeNode4 = None
b = TreeNode4()
b.value = x
return b
def makeNode5(x: int, x2: int, x3: int, x4: int, x5: int) -> TreeNode5:
b:TreeNode5 = None
b2:TreeNode5 = None
b3:TreeNode5 = None
b4:TreeNode5 = None
b5:TreeNode5 = None
b = TreeNode5()
b.value = x
return b
# Input parameters
n:int = 100
n2:int = 100
n3:int = 100
n4:int = 100
n5:int = 100
c:int = 4
c2:int = 4
c3:int = 4
c4:int = 4
c5:int = 4
# Data
t:Tree = None
t2:Tree = None
t3:Tree = None
t4:Tree = None
t5:Tree = None
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
k:int = 37813
k2:int = 37813
k3:int = 37813
k4:int = 37813
k5:int = 37813
# Crunch
t = Tree()
while i < n:
t.insert(k)
k = (k * 37813) % 37831
if i % c != 0:
t.insert(i)
i = i + 1
print(t.size)
for i in [4, 8, 15, 16, 23, 42]:
if t.contains(i):
print(i)
| [
"[email protected]"
] | |
374137ee28176ba0c2168a43b68a6388dd3a8e5a | 1dc4fbb38f333c665abd0f7e4f9919ad1b349c37 | /calories/taskapp/celery.py | 2931a91b8d7ded7aebc3e8e3d816fafd07496a4b | [
"BSD-3-Clause",
"MIT"
] | permissive | zulfiyagit/calories | 2dd98a4b160fa88ba592ec150e8e6f640a3fb2e9 | 0719d61891a93eff7c06a9356c26f42b99019444 | refs/heads/master | 2020-12-29T18:52:20.372303 | 2016-03-28T20:54:20 | 2016-03-28T20:54:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,358 | py |
from __future__ import absolute_import
import os
from celery import Celery
from django.apps import AppConfig
from django.conf import settings
if not settings.configured:
# set the default Django settings module for the 'celery' program.
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.local") # pragma: no cover
app = Celery('calories')
class CeleryConfig(AppConfig):
name = 'calories.taskapp'
verbose_name = 'Celery Config'
def ready(self):
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS, force=True)
if hasattr(settings, 'RAVEN_CONFIG'):
# Celery signal registration
from raven import Client as RavenClient
from raven.contrib.celery import register_signal as raven_register_signal
from raven.contrib.celery import register_logger_signal as raven_register_logger_signal
raven_client = RavenClient(dsn=settings.RAVEN_CONFIG['DSN'])
raven_register_logger_signal(raven_client)
raven_register_signal(raven_client)
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request)) # pragma: no cover
| [
"[email protected]"
] | |
fb329c76b6c441fb363aee300ad275687f9b6472 | d3efc82dfa61fb82e47c82d52c838b38b076084c | /Autocase_Result/MEDIUM/YW_ZXBMM_SZSJ_081.py | 5286dc8bfa248725178097270b3c4f214c05c938 | [] | no_license | nantongzyg/xtp_test | 58ce9f328f62a3ea5904e6ed907a169ef2df9258 | ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f | refs/heads/master | 2022-11-30T08:57:45.345460 | 2020-07-30T01:43:30 | 2020-07-30T01:43:30 | 280,388,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,010 | py | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
sys.path.append("/home/yhl2/workspace/xtp_test/xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test/service")
from ServiceConfig import *
from mainService import *
from QueryStkPriceQty import *
from log import *
sys.path.append("/home/yhl2/workspace/xtp_test/mysql")
from CaseParmInsertMysql import *
sys.path.append("/home/yhl2/workspace/xtp_test/utils")
from QueryOrderErrorMsg import queryOrderErrorMsg
class YW_ZXBMM_SZSJ_081(xtp_test_case):
# YW_ZXBMM_SZSJ_081
def test_YW_ZXBMM_SZSJ_081(self):
title = '五档即成转撤销买--已成(费用=min)'
# 定义当前测试用例的期待值
# 期望状态:初始、未成交、部成、全成、部撤已报、部撤、已报待撤、已撤、废单、撤废、内部撤单
# xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'期望状态': '全成',
'errorID': 0,
'errorMSG': '',
'是否生成报单': '是',
'是否是撤废': '否',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title)
# 定义委托参数信息------------------------------------------
# 参数:证券代码、市场、证券类型、证券状态、交易状态、买卖方向(B买S卖)、期望状态、Api
stkparm = QueryStkPriceQty('002051', '2', '1', '2', '0', 'B', case_goal['期望状态'], Api)
# 如果下单参数获取失败,则用例失败
if stkparm['返回结果'] is False:
rs = {
'用例测试结果': stkparm['返回结果'],
'测试错误原因': '获取下单参数失败,' + stkparm['错误原因'],
}
self.assertEqual(rs['用例测试结果'], True)
else:
wt_reqs = {
'business_type': Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_CASH'],
'order_client_id':2,
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SZ_A'],
'ticker': stkparm['证券代码'],
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_BUY'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_BEST5_OR_CANCEL'],
'price': stkparm['涨停价'],
'quantity': 700,
'position_effect': Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_INIT']
}
ParmIni(Api, case_goal['期望状态'], wt_reqs['price_type'])
CaseParmInsertMysql(case_goal, wt_reqs)
rs = serviceTest(Api, case_goal, wt_reqs)
logger.warning('执行结果为' + str(rs['用例测试结果']) + ','
+ str(rs['用例错误源']) + ',' + str(rs['用例错误原因']))
self.assertEqual(rs['用例测试结果'], True) # 0
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
662681f8d9d3bea85b90b39c2599f1cef5f7dfc0 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03438/s156659931.py | 39826d4f4203b8b35ce295fa8eb7b93a219c170d | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | from sys import stdin
n = int(stdin.readline().rstrip())
a = list(map(int,stdin.readline().rstrip().split()))
b = list(map(int,stdin.readline().rstrip().split()))
A = sum(a);B = sum(b)
count_1 = 0
count_2 = 0
total = B-A
for i,j in zip(a,b):
if i < j:
count_1 += (j-i+1)//2
else:
count_2 += i-j
count = max(count_1,count_2)
if count <= total:
print("Yes")
else:
print("No") | [
"[email protected]"
] | |
db59646f14d746ecbb38317653de635b14662ea4 | c823e437ffd46aa3b1465819686ee50fd1932214 | /src/transformers/models/oneformer/modeling_oneformer.py | 9e72003a9f9d5d9981679751e7baf4b3dcd5d2a3 | [
"Apache-2.0"
] | permissive | nateraw/transformers | f03258d62c4773732514e443d98f1684d3467bfd | 7fd902d3351b81775112cd6b526bc32cf9ba856d | refs/heads/main | 2023-03-19T00:31:55.123718 | 2023-01-20T22:16:42 | 2023-01-20T22:16:42 | 564,090,117 | 5 | 0 | Apache-2.0 | 2022-11-10T01:00:04 | 2022-11-10T01:00:03 | null | UTF-8 | Python | false | false | 142,017 | py | # coding=utf-8
# Copyright 2022 SHI Labs and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch OneFormer model."""
import copy
import math
import warnings
from dataclasses import dataclass
from typing import Dict, List, Optional, Tuple
import numpy as np
import torch
from torch import Tensor, nn
from torch.cuda.amp import autocast
from transformers import AutoBackbone
from transformers.utils import logging
from ...activations import ACT2FN
from ...modeling_outputs import BaseModelOutput
from ...modeling_utils import PreTrainedModel
from ...utils import (
ModelOutput,
add_start_docstrings,
add_start_docstrings_to_model_forward,
is_scipy_available,
replace_return_docstrings,
requires_backends,
)
from .configuration_oneformer import OneFormerConfig
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "OneFormerConfig"
_CHECKPOINT_FOR_DOC = "shi-labs/oneformer_ade20k_swin_tiny"
_IMAGE_PROCESSOR_FOR_DOC = "OneFormerImageProcessor"
ONEFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = [
"shi-labs/oneformer_ade20k_swin_tiny",
# See all OneFormer models at https://huggingface.co/models?filter=oneformer
]
if is_scipy_available():
from scipy.optimize import linear_sum_assignment
def _get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
def multiscale_deform_attn_core_pytorch(
value: Tensor, value_spatial_shapes: Tensor, sampling_locations: Tensor, attention_weights: Tensor
) -> Tensor:
batch_size, _, num_heads, hidden_dim = value.shape
_, num_queries, num_heads, num_levels, num_points, _ = sampling_locations.shape
value_list = value.split([height * width for height, width in value_spatial_shapes], dim=1)
sampling_grids = 2 * sampling_locations - 1
sampling_value_list = []
for level_id, (height, width) in enumerate(value_spatial_shapes):
# batch_size, height*width, num_heads, hidden_dim
# -> batch_size, height*width, num_heads*hidden_dim
# -> batch_size, num_heads*hidden_dim, height*width
# -> batch_size*num_heads, hidden_dim, height, width
value_l_ = (
value_list[level_id].flatten(2).transpose(1, 2).reshape(batch_size * num_heads, hidden_dim, height, width)
)
# batch_size, num_queries, num_heads, num_points, 2
# -> batch_size, num_heads, num_queries, num_points, 2
# -> batch_size*num_heads, num_queries, num_points, 2
sampling_grid_l_ = sampling_grids[:, :, :, level_id].transpose(1, 2).flatten(0, 1)
# batch_size*num_heads, hidden_dim, num_queries, num_points
sampling_value_l_ = nn.functional.grid_sample(
value_l_, sampling_grid_l_, mode="bilinear", padding_mode="zeros", align_corners=False
)
sampling_value_list.append(sampling_value_l_)
# (batch_size, num_queries, num_heads, num_levels, num_points)
# -> (batch_size, num_heads, num_queries, num_levels, num_points)
# -> (batch_size, num_heads, 1, num_queries, num_levels*num_points)
attention_weights = attention_weights.transpose(1, 2).reshape(
batch_size * num_heads, 1, num_queries, num_levels * num_points
)
output = (
(torch.stack(sampling_value_list, dim=-2).flatten(-2) * attention_weights)
.sum(-1)
.view(batch_size, num_heads * hidden_dim, num_queries)
)
return output.transpose(1, 2).contiguous()
# Copied from transformers.models.maskformer.modeling_maskformer.dice_loss
def dice_loss(inputs: Tensor, labels: Tensor, num_masks: int) -> Tensor:
r"""
Compute the DICE loss, similar to generalized IOU for masks as follows:
$$ \mathcal{L}_{\text{dice}(x, y) = 1 - \frac{2 * x \cap y }{x \cup y + 1}} $$
In practice, since `labels` is a binary mask, (only 0s and 1s), dice can be computed as follow
$$ \mathcal{L}_{\text{dice}(x, y) = 1 - \frac{2 * x * y }{x + y + 1}} $$
Args:
inputs (`torch.Tensor`):
A tensor representing a mask.
labels (`torch.Tensor`):
A tensor with the same shape as inputs. Stores the binary classification labels for each element in inputs
(0 for the negative class and 1 for the positive class).
num_masks (`int`):
The number of masks present in the current batch, used for normalization.
Returns:
`torch.Tensor`: The computed loss.
"""
probs = inputs.sigmoid().flatten(1)
numerator = 2 * (probs * labels).sum(-1)
denominator = probs.sum(-1) + labels.sum(-1)
loss = 1 - (numerator + 1) / (denominator + 1)
loss = loss.sum() / num_masks
return loss
# Copied from transformers.models.mask2former.modeling_mask2former.sigmoid_cross_entropy_loss
def sigmoid_cross_entropy_loss(inputs: torch.Tensor, labels: torch.Tensor, num_masks: int) -> torch.Tensor:
r"""
Args:
inputs (`torch.Tensor`):
A float tensor of arbitrary shape.
labels (`torch.Tensor`):
A tensor with the same shape as inputs. Stores the binary classification labels for each element in inputs
(0 for the negative class and 1 for the positive class).
Returns:
loss (`torch.Tensor`): The computed loss.
"""
criterion = nn.BCEWithLogitsLoss(reduction="none")
cross_entropy_loss = criterion(inputs, labels)
loss = cross_entropy_loss.mean(1).sum() / num_masks
return loss
# Copied from transformers.models.maskformer.modeling_maskformer.pair_wise_dice_loss
def pair_wise_dice_loss(inputs: Tensor, labels: Tensor) -> Tensor:
"""
A pair wise version of the dice loss, see `dice_loss` for usage.
Args:
inputs (`torch.Tensor`):
A tensor representing a mask
labels (`torch.Tensor`):
A tensor with the same shape as inputs. Stores the binary classification labels for each element in inputs
(0 for the negative class and 1 for the positive class).
Returns:
`torch.Tensor`: The computed loss between each pairs.
"""
inputs = inputs.sigmoid().flatten(1)
numerator = 2 * torch.einsum("nc,mc->nm", inputs, labels)
# using broadcasting to get a [num_queries, NUM_CLASSES] matrix
denominator = inputs.sum(-1)[:, None] + labels.sum(-1)[None, :]
loss = 1 - (numerator + 1) / (denominator + 1)
return loss
# Copied from transformers.models.mask2former.modeling_mask2former.pair_wise_sigmoid_cross_entropy_loss
def pair_wise_sigmoid_cross_entropy_loss(inputs: torch.Tensor, labels: torch.Tensor) -> torch.Tensor:
r"""
A pair wise version of the cross entropy loss, see `sigmoid_cross_entropy_loss` for usage.
Args:
inputs (`torch.Tensor`):
A tensor representing a mask.
labels (`torch.Tensor`):
A tensor with the same shape as inputs. Stores the binary classification labels for each element in inputs
(0 for the negative class and 1 for the positive class).
Returns:
loss (`torch.Tensor`): The computed loss between each pairs.
"""
height_and_width = inputs.shape[1]
criterion = nn.BCEWithLogitsLoss(reduction="none")
cross_entropy_loss_pos = criterion(inputs, torch.ones_like(inputs))
cross_entropy_loss_neg = criterion(inputs, torch.zeros_like(inputs))
loss = torch.einsum("nc,mc->nm", cross_entropy_loss_pos, labels) + torch.einsum(
"nc,mc->nm", cross_entropy_loss_neg, (1 - labels)
)
loss = loss / height_and_width
return loss
# Copied from transformers.models.mask2former.modeling_mask2former.sample_point
def sample_point(
input_features: torch.Tensor, point_coordinates: torch.Tensor, add_dim=False, **kwargs
) -> torch.Tensor:
"""
A wrapper around `torch.nn.functional.grid_sample` to support 3D point_coordinates tensors.
Args:
input_features (`torch.Tensor` of shape (batch_size, channels, height, width)):
A tensor that contains features map on a height * width grid
point_coordinates (`torch.Tensor` of shape (batch_size, num_points, 2) or (batch_size, grid_height, grid_width,:
2)):
A tensor that contains [0, 1] * [0, 1] normalized point coordinates
add_dim (`bool`):
boolean value to keep track of added dimension
Returns:
point_features (`torch.Tensor` of shape (batch_size, channels, num_points) or (batch_size, channels,
height_grid, width_grid):
A tensor that contains features for points in `point_coordinates`.
"""
if point_coordinates.dim() == 3:
add_dim = True
point_coordinates = point_coordinates.unsqueeze(2)
# use nn.function.grid_sample to get features for points in `point_coordinates` via bilinear interpolation
point_features = torch.nn.functional.grid_sample(input_features, 2.0 * point_coordinates - 1.0, **kwargs)
if add_dim:
point_features = point_features.squeeze(3)
return point_features
# Refactored from https://github.com/SHI-Labs/OneFormer/blob/33ebb56ed34f970a30ae103e786c0cb64c653d9a/oneformer/modeling/matcher.py#L93
class OneFormerHungarianMatcher(nn.Module):
def __init__(
self, cost_class: float = 1.0, cost_mask: float = 1.0, cost_dice: float = 1.0, num_points: int = 12544
):
"""This class computes an assignment between the labels and the predictions of the network.
For efficiency reasons, the labels don't include the no_object. Because of this, in general, there are more
predictions than labels. In this case, we do a 1-to-1 matching of the best predictions, while the others are
un-matched (and thus treated as non-objects).
Params:
cost_class (float, *optional*, defaults to 1.0):
This is the relative weight of the classification error in the matching cost.
cost_mask (float, *optional*, defaults to 1.0):
This is the relative weight of the sigmoid ce loss of the binary mask in the matching cost.
cost_dice (float, *optional*, defaults to 1.0):
This is the relative weight of the dice loss of the binary mask in the matching cost
num_points (int, *optional*, defaults to 12544):
Number of points to be sampled for dice and mask loss matching cost.
"""
super().__init__()
if cost_class == 0 and cost_mask == 0 and cost_dice == 0:
raise ValueError("All costs cant be 0")
self.cost_class = cost_class
self.cost_mask = cost_mask
self.cost_dice = cost_dice
self.num_points = num_points
@torch.no_grad()
def forward(self, masks_queries_logits, class_queries_logits, mask_labels, class_labels) -> List[Tuple[Tensor]]:
"""Performs the matching
Params:
masks_queries_logits (`torch.Tensor`):
A tensor` of dim `batch_size, num_queries, num_labels` with the
classification logits.
class_queries_logits (`torch.Tensor`):
A tensor` of dim `batch_size, num_queries, height, width` with the
predicted masks.
class_labels (`torch.Tensor`):
A tensor` of dim `num_target_boxes` (where num_target_boxes is the number
of ground-truth objects in the target) containing the class labels.
mask_labels (`torch.Tensor`):
A tensor` of dim `num_target_boxes, height, width` containing the target
masks.
Returns:
`List[Tuple[Tensor]]`: A list of size batch_size, containing tuples of (index_i, index_j) where:
- index_i is the indices of the selected predictions (in order)
- index_j is the indices of the corresponding selected labels (in order)
For each batch element, it holds:
len(index_i) = len(index_j) = min(num_queries, num_targets).
"""
indices: List[Tuple[np.array]] = []
num_queries = class_queries_logits.shape[1]
preds_masks = masks_queries_logits
preds_probs = class_queries_logits
# iterate through batch size
for pred_probs, pred_mask, target_mask, labels in zip(preds_probs, preds_masks, mask_labels, class_labels):
pred_probs = pred_probs.softmax(-1)
# Compute the classification cost. Contrary to the loss, we don't use the NLL,
# but approximate it in 1 - proba[target class].
# The 1 is a constant that doesn't change the matching, it can be ommitted.
cost_class = -pred_probs[:, labels]
pred_mask = pred_mask[:, None]
target_mask = target_mask[:, None].to(pred_mask.device)
# all masks share the same set of points for efficient matching!
point_coords = torch.rand(1, self.num_points, 2, device=pred_mask.device)
# get ground truth labels
target_mask = sample_point(
target_mask,
point_coords.repeat(target_mask.shape[0], 1, 1),
align_corners=False,
).squeeze(1)
pred_mask = sample_point(
pred_mask,
point_coords.repeat(pred_mask.shape[0], 1, 1),
align_corners=False,
).squeeze(1)
with autocast(enabled=False):
pred_mask = pred_mask.float()
target_mask = target_mask.float()
# compute the sigmoid ce loss
cost_mask = pair_wise_sigmoid_cross_entropy_loss(pred_mask, target_mask)
# Compute the dice loss
cost_dice = pair_wise_dice_loss(pred_mask, target_mask)
# final cost matrix
cost_matrix = self.cost_mask * cost_mask + self.cost_class * cost_class + self.cost_dice * cost_dice
cost_matrix = cost_matrix.reshape(num_queries, -1).cpu()
# do the assigmented using the hungarian algorithm in scipy
assigned_indices: Tuple[np.array] = linear_sum_assignment(cost_matrix.cpu())
indices.append(assigned_indices)
# It could be stacked in one tensor
matched_indices = [
(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices
]
return matched_indices
class OneFormerLoss(nn.Module):
def __init__(
self,
num_classes: int,
matcher: OneFormerHungarianMatcher,
weight_dict: Dict[str, float],
eos_coef: float,
num_points: int,
oversample_ratio: float,
importance_sample_ratio: float,
contrastive_temperature: float = None,
):
"""
This class computes the losses using the class predictions, mask predictions and the contrastive queries.
Oneformer calculates the classification CE loss on the class predictions. Mask predictions are used for
calculating the binary CE loss and dice loss. The contrastive queries are used for calculating the contrastive
loss.
Args:
num_labels (`int`):
The number of classes.
matcher (`OneFormerHungarianMatcher`):
A torch module that computes the assigments between the predictions and labels.
weight_dict (`Dict[str, float]`):
A dictionary of weights to be applied to the different losses.
eos_coef (`float`):
Weight to apply to the null class.
num_points (`int`):
Number of points to be sampled for dice and mask loss calculations.
oversample_ratio (`float`):
Required for pointwise loss calculation.
importance_sample_ratio (`float`):
Required for pointwise loss calculation.
contrastive_temperature (`float`):
Temperature for scaling the contrastive logits.
"""
requires_backends(self, ["scipy"])
super().__init__()
self.num_classes = num_classes
self.matcher = matcher
self.weight_dict = weight_dict
self.eos_coef = eos_coef
empty_weight = torch.ones(self.num_classes + 1)
empty_weight[-1] = self.eos_coef
self.register_buffer("empty_weight", empty_weight)
# pointwise mask loss parameters
self.num_points = num_points
self.oversample_ratio = oversample_ratio
self.importance_sample_ratio = importance_sample_ratio
self.contrastive_temperature = contrastive_temperature
if self.contrastive_temperature is not None:
self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / contrastive_temperature))
def _max_by_axis(self, the_list: List[List[int]]) -> List[int]:
maxes = the_list[0]
for sublist in the_list[1:]:
for index, item in enumerate(sublist):
maxes[index] = max(maxes[index], item)
return maxes
def _pad_images_to_max_in_batch(self, tensors: List[Tensor]) -> Tuple[Tensor, Tensor]:
# get the maximum size in the batch
max_size = self._max_by_axis([list(tensor.shape) for tensor in tensors])
batch_size = len(tensors)
# compute finel size
batch_shape = [batch_size] + max_size
b, _, h, w = batch_shape
# get metadata
dtype = tensors[0].dtype
device = tensors[0].device
padded_tensors = torch.zeros(batch_shape, dtype=dtype, device=device)
padding_masks = torch.ones((b, h, w), dtype=torch.bool, device=device)
# pad the tensors to the size of the biggest one
for tensor, padded_tensor, padding_mask in zip(tensors, padded_tensors, padding_masks):
padded_tensor[: tensor.shape[0], : tensor.shape[1], : tensor.shape[2]].copy_(tensor)
padding_mask[: tensor.shape[1], : tensor.shape[2]] = False
return padded_tensors, padding_masks
def loss_contrastive(self, contrastive_queries_logits: Tensor, text_queries: Tensor):
"""Compute the query-text contrastive loss.
Args:
contrastive_queries_logits (`torch.Tensor`):
A tensor of shape `batch_size, num_queries, hidden_dim`
text_queries (`torch.Tensor`):
A tensor of shape `batch_size, num_queries, hidden_dim`
Returns:
`Dict[str, Tensor]`: A dict of `torch.Tensor` containing the following key:
- **loss_contrastive** -- The query-text contrastive loss computed using task-guided queries
and text queries derived from input text list.
"""
image_queries = contrastive_queries_logits.float()
# [batch_size, hidden_dim]
image_queries = nn.functional.normalize(image_queries.flatten(1), dim=-1)
text_queries = nn.functional.normalize(text_queries.flatten(1), dim=-1)
logit_scale = torch.clamp(self.logit_scale.exp(), max=100)
logits_per_text = torch.matmul(text_queries, image_queries.t()) * logit_scale
logits_per_img = logits_per_text.t()
loss_img = nn.functional.cross_entropy(
logits_per_img, torch.arange(len(logits_per_img), device=logits_per_text.device)
)
loss_text = nn.functional.cross_entropy(
logits_per_text, torch.arange(len(logits_per_text), device=logits_per_text.device)
)
loss_contrastive = loss_img + loss_text
losses = {"loss_contrastive": loss_contrastive}
return losses
def loss_labels(
self, class_queries_logits: Tensor, class_labels: List[Tensor], indices: Tuple[np.array]
) -> Dict[str, Tensor]:
"""Compute the losses related to the labels using cross entropy.
Args:
class_queries_logits (`torch.Tensor`):
A tensor of shape `batch_size, num_queries, num_labels`
class_labels (`List[torch.Tensor]`):
List of class labels of shape `(labels)`.
indices (`Tuple[np.array])`:
The indices computed by the Hungarian matcher.
Returns:
`Dict[str, Tensor]`: A dict of `torch.Tensor` containing the following key:
- **loss_cross_entropy** -- The loss computed using cross entropy on the predicted and ground truth labels.
"""
pred_logits = class_queries_logits
batch_size, num_queries, _ = pred_logits.shape
criterion = nn.CrossEntropyLoss(weight=self.empty_weight)
idx = self._get_predictions_permutation_indices(indices)
# shape = (batch_size, num_queries)
target_classes_o = torch.cat([target[j] for target, (_, j) in zip(class_labels, indices)])
# shape = (batch_size, num_queries)
target_classes = torch.full(
(batch_size, num_queries), fill_value=self.num_classes, dtype=torch.int64, device=pred_logits.device
)
target_classes[idx] = target_classes_o
# permute pred_logits (batch_size, num_queries, num_labels) -> (batch_size, num_labels, num_queries)
pred_logits_transposed = pred_logits.transpose(1, 2)
loss_ce = criterion(pred_logits_transposed, target_classes)
losses = {"loss_cross_entropy": loss_ce}
return losses
def loss_masks(
self, masks_queries_logits: Tensor, mask_labels: List[Tensor], indices: Tuple[np.array], num_masks: int
) -> Dict[str, Tensor]:
"""Compute the losses related to the masks using focal and dice loss.
Args:
masks_queries_logits (`torch.Tensor`):
A tensor of shape `batch_size, num_queries, height, width`
mask_labels (`torch.Tensor`):
List of mask labels of shape `(labels, height, width)`.
indices (`Tuple[np.array])`:
The indices computed by the Hungarian matcher.
num_masks (`int)`:
The number of masks, used for normalization.
Returns:
`Dict[str, Tensor]`: A dict of `torch.Tensor` containing two keys:
- **loss_mask** -- The loss computed using sigmoid ce loss on the predicted and ground truth masks.
- **loss_dice** -- The loss computed using dice loss on the predicted on the predicted and ground truth
masks.
"""
src_idx = self._get_predictions_permutation_indices(indices)
tgt_idx = self._get_targets_permutation_indices(indices)
# shape (batch_size * num_queries, height, width)
pred_masks = masks_queries_logits[src_idx]
# shape (batch_size, num_queries, height, width)
# pad all and stack the targets to the num_labels dimension
# upsample predictions to the target size, we have to add one dim to use interpolate
target_masks, _ = self._pad_images_to_max_in_batch(mask_labels)
target_masks = target_masks[tgt_idx]
pred_masks = pred_masks[:, None]
target_masks = target_masks[:, None]
with torch.no_grad():
# sample point_coords
point_coords = self.sample_points_using_uncertainty(
pred_masks,
self.calculate_uncertainty,
self.num_points,
self.oversample_ratio,
self.importance_sample_ratio,
)
# get ground-truth labels
point_labels = sample_point(target_masks, point_coords, align_corners=False).squeeze(1)
point_logits = sample_point(pred_masks, point_coords, align_corners=False).squeeze(1)
losses = {
"loss_mask": sigmoid_cross_entropy_loss(point_logits, point_labels, num_masks),
"loss_dice": dice_loss(point_logits, point_labels, num_masks),
}
del pred_masks
del target_masks
return losses
# Copied from transformers.models.mask2former.modeling_mask2former.Mask2FormerLoss.calculate_uncertainty
def calculate_uncertainty(self, logits: torch.Tensor) -> torch.Tensor:
"""
In Mask2Former paper, uncertainty is estimated as L1 distance between 0.0 and the logit prediction in 'logits'
for the foreground class in `classes`.
Args:
logits (`torch.Tensor`):
A tensor of shape (R, 1, ...) for class-specific or class-agnostic, where R is the total number of predicted masks in all images and C is:
the number of foreground classes. The values are logits.
Returns:
scores (`torch.Tensor`): A tensor of shape (R, 1, ...) that contains uncertainty scores with the most
uncertain locations having the highest uncertainty score.
"""
uncertainty_scores = -(torch.abs(logits))
return uncertainty_scores
# Copied from transformers.models.mask2former.modeling_mask2former.Mask2FormerLoss.sample_points_using_uncertainty
def sample_points_using_uncertainty(
self,
logits: torch.Tensor,
uncertainty_function,
num_points: int,
oversample_ratio: int,
importance_sample_ratio: float,
) -> torch.Tensor:
"""
This function is meant for sampling points in [0, 1] * [0, 1] coordinate space based on their uncertainty. The
uncertainty is calculated for each point using the passed `uncertainty function` that takes points logit
prediction as input.
Args:
logits (`float`):
Logit predictions for P points.
uncertainty_function:
A function that takes logit predictions for P points and returns their uncertainties.
num_points (`int`):
The number of points P to sample.
oversample_ratio (`int`):
Oversampling parameter.
importance_sample_ratio (`float`):
Ratio of points that are sampled via importance sampling.
Returns:
point_coordinates (`torch.Tensor`):
Coordinates for P sampled points.
"""
num_boxes = logits.shape[0]
num_points_sampled = int(num_points * oversample_ratio)
# Get random point coordinates
point_coordinates = torch.rand(num_boxes, num_points_sampled, 2, device=logits.device)
# Get sampled prediction value for the point coordinates
point_logits = sample_point(logits, point_coordinates, align_corners=False)
# Calculate the uncertainties based on the sampled prediction values of the points
point_uncertainties = uncertainty_function(point_logits)
num_uncertain_points = int(importance_sample_ratio * num_points)
num_random_points = num_points - num_uncertain_points
idx = torch.topk(point_uncertainties[:, 0, :], k=num_uncertain_points, dim=1)[1]
shift = num_points_sampled * torch.arange(num_boxes, dtype=torch.long, device=logits.device)
idx += shift[:, None]
point_coordinates = point_coordinates.view(-1, 2)[idx.view(-1), :].view(num_boxes, num_uncertain_points, 2)
if num_random_points > 0:
point_coordinates = torch.cat(
[point_coordinates, torch.rand(num_boxes, num_random_points, 2, device=logits.device)],
dim=1,
)
return point_coordinates
def _get_predictions_permutation_indices(self, indices):
# permute predictions following indices
batch_indices = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)])
predictions_indices = torch.cat([src for (src, _) in indices])
return batch_indices, predictions_indices
def _get_targets_permutation_indices(self, indices):
# permute labels following indices
batch_indices = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)])
target_indices = torch.cat([tgt for (_, tgt) in indices])
return batch_indices, target_indices
def forward(
self,
masks_queries_logits: Tensor,
class_queries_logits: Tensor,
contrastive_queries_logits: Tensor,
mask_labels: List[Tensor],
class_labels: List[Tensor],
text_queries: Tensor,
auxiliary_predictions: Optional[Dict[str, Tensor]] = None,
calculate_contrastive_loss: bool = True,
) -> Dict[str, Tensor]:
"""
This performs the loss computation.
Args:
masks_queries_logits (`torch.Tensor`):
A tensor of shape `batch_size, num_queries, height, width`
class_queries_logits (`torch.Tensor`):
A tensor of shape `batch_size, num_queries, num_labels`
contrastive_queries_logits (`torch.Tensor`):
A tensor of shape `batch_size, num_queries, hidden_dim`
mask_labels (`torch.Tensor`):
List of mask labels of shape `(labels, height, width)`.
class_labels (`List[torch.Tensor]`):
List of class labels of shape `(labels)`.
text_queries (`torch.Tensor`):
A tensor of shape `batch_size, num_queries, hidden_dim`
auxiliary_predictions (`Dict[str, torch.Tensor]`, *optional*):
if `use_auxiliary_loss` was set to `true` in [`OneFormerConfig`], then it contains the logits from the
inner layers of the Detr's Decoder.
calculate_contrastive_loss (`bool`, *optional*, defaults to `True`):
Whether or not to calculate the contrastive loss.
Returns:
`Dict[str, Tensor]`: A dict of `torch.Tensor` containing two keys:
- **loss_cross_entropy** -- The loss computed using cross entropy on the predicted and ground truth labels.
- **loss_mask** -- The loss computed using sigmoid ce loss on the predicted and ground truth masks.
- **loss_dice** -- The loss computed using dice loss on the predicted on the predicted and ground truth
masks.
- **loss_contrastive** -- The query-text contrstive loss computed using object and text queries.
if `use_auxiliary_loss` was set to `true` in [`OneFormerConfig`], the dictionary contains addional losses
for each auxiliary predictions.
"""
# retrieve the matching between the outputs of the last layer and the labels
indices = self.matcher(masks_queries_logits, class_queries_logits, mask_labels, class_labels)
# compute the average number of target masks for normalization purposes
num_masks = self.get_num_masks(class_labels, device=class_labels[0].device)
# get all the losses
losses: Dict[str, Tensor] = {
**self.loss_masks(masks_queries_logits, mask_labels, indices, num_masks),
**self.loss_labels(class_queries_logits, class_labels, indices),
}
if calculate_contrastive_loss:
losses = {**losses, **self.loss_contrastive(contrastive_queries_logits, text_queries)}
# in case of auxiliary losses, we repeat this process with the output of each intermediate layer.
if auxiliary_predictions is not None:
for idx, aux_outputs in enumerate(auxiliary_predictions):
masks_queries_logits = aux_outputs["masks_queries_logits"]
class_queries_logits = aux_outputs["class_queries_logits"]
loss_dict = self.forward(
masks_queries_logits,
class_queries_logits,
None,
mask_labels,
class_labels,
None,
calculate_contrastive_loss=False,
)
loss_dict = {f"{key}_{idx}": value for key, value in loss_dict.items()}
losses.update(loss_dict)
return losses
def get_num_masks(self, class_labels: torch.Tensor, device: torch.device) -> torch.Tensor:
"""
Computes the average number of target masks across the batch, for normalization purposes.
"""
num_masks = sum([len(classes) for classes in class_labels])
num_masks_pt = torch.as_tensor([num_masks], dtype=torch.float, device=device)
return num_masks_pt
@dataclass
class OneFormerTransformerDecoderOutput(BaseModelOutput):
"""
Base class for outputs of the Transformer decoder. This class adds attributes for class predictions, mask
predictions and contrastive logits to BaseModelOutputWithCrossAttentions.
Args:
object_logits (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_dim)`):
Queries representation for the region proposals.
contrastive_logits (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_dim)`):
Queries representation for the contrastive loss.
prediction_masks (`torch.FloatTensor` of shape `(batch_size, num_queries, height, width)`):
Mask predictions from last layer of the transformer decoder.
prediction_class (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes+1)`):
Class predictions from last layer of the transformer decoder.
auxiliary_predictions (Tuple of Dict of `str, torch.FloatTensor`, *optional*):
Tuple of class and mask predictions from each layer of the transformer decoder.
"""
object_queries: torch.FloatTensor = None
contrastive_logits: Optional[torch.FloatTensor] = None
prediction_masks: torch.FloatTensor = None
prediction_class: torch.FloatTensor = None
auxiliary_predictions: Optional[Tuple[Dict[str, torch.FloatTensor]]] = None
@dataclass
# Copied from transformers.models.mask2former.modeling_mask2former.Mask2FormerPixelDecoderOutput with Mask2->One
class OneFormerPixelDecoderOutput(ModelOutput):
"""
OneFormer's pixel decoder module output, practically a Multi-Scale Deformable Attention based decoder. It returns
the mask features and the multiscale features.
Args:
multi_scale_features (`tuple(torch.FloatTensor)`):
Tuple of multi-scale features of scales [1/8, 1/16, 1/32] and shape `(batch_size, num_channels, height,
width)`from the Multi-Scale Deformable Attenntion based Pixel Decoder.
mask_features (`torch.FloatTensor`):
Tensor of shape `(batch_size, num_channels, height, width)`, 1/4 scale features from the last Pixel Decoder
Layer.
attentions (`tuple(torch.FloatTensor)`, *optional*):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`. Attentions weights from pixel decoder. Returned when `output_attentions=True` is passed
or when `config.output_attentions=True`
"""
multi_scale_features: Tuple[torch.FloatTensor] = None
mask_features: torch.FloatTensor = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class OneFormerPixelLevelModuleOutput(ModelOutput):
"""
OneFormer's pixel level module output. It returns both the last and (optionally) the hidden states from the
`encoder` and `decoder`. By default, the `encoder` is a Swin/Dinat Backbone and the `decoder` is a Multi-Scale
Deformable Attention based decoder.
Args:
encoder_features (List of `(torch.FloatTensor)`):
List of `torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`. Hidden-states (also
called feature maps) of the model at the output of each stage.
decoder_features (List of `(torch.FloatTensor)`):
List of `torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`. Hidden-states (also
called feature maps) of the model at the output of each stage.
decoder_last_feature (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)):
1/4 scale features from the last Pixel Decoder Layer.
"""
encoder_features: List[torch.FloatTensor] = None
decoder_features: List[torch.FloatTensor] = None
decoder_last_feature: torch.FloatTensor = None
@dataclass
class OneFormerModelOutput(ModelOutput):
"""
Class for outputs of [`OneFormerModel`]. This class returns all the needed hidden states to compute the logits.
Args:
encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the encoder
model at the output of each stage.
pixel_decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the pixel
decoder model at the output of each stage.
transformer_decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, sequence_length, hidden_size)`. Hidden-states (also called feature maps) of the
transformer decoder at the output of each stage.
transformer_decoder_object_queries (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_dim)`)
Output object queries from the last layer in the transformer decoder.
transformer_decoder_contrastive_queries (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_dim)`)
Contrastive queries from the transformer decoder.
transformer_decoder_mask_predictions (`torch.FloatTensor` of shape `(batch_size, num_queries, height, width)`)
Mask Predictions from the last layer in the transformer decoder.
transformer_decoder_class_predictions (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes+1)`):
Class Predictions from the last layer in the transformer decoder.
transformer_decoder_auxiliary_predictions (Tuple of Dict of `str, torch.FloatTensor`, *optional*):
Tuple of class and mask predictions from each layer of the transformer decoder.
text_queries (`torch.FloatTensor`, *optional* of shape `(batch_size, num_queries, hidden_dim)`)
Text queries derived from the input text list used for calculating contrastive loss during training.
task_token (`torch.FloatTensor` of shape `(batch_size, hidden_dim)`)
1D task token to condition the queries.
attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `tuple(torch.FloatTensor)` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`. Self and Cross Attentions weights from transformer decoder.
"""
encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
pixel_decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
transformer_decoder_hidden_states: Optional[torch.FloatTensor] = None
transformer_decoder_object_queries: torch.FloatTensor = None
transformer_decoder_contrastive_queries: Optional[torch.FloatTensor] = None
transformer_decoder_mask_predictions: torch.FloatTensor = None
transformer_decoder_class_predictions: torch.FloatTensor = None
transformer_decoder_auxiliary_predictions: Optional[Tuple[Dict[str, torch.FloatTensor]]] = None
text_queries: Optional[torch.FloatTensor] = None
task_token: torch.FloatTensor = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class OneFormerForUniversalSegmentationOutput(ModelOutput):
"""
Class for outputs of [`OneFormerForUniversalSegmentationOutput`].
This output can be directly passed to [`~OneFormerImageProcessor.post_process_semantic_segmentation`] or
[`~OneFormerImageProcessor.post_process_instance_segmentation`] or
[`~OneFormerImageProcessor.post_process_panoptic_segmentation`] depending on the task. Please, see
[`~OneFormerImageProcessor] for details regarding usage.
Args:
loss (`torch.Tensor`, *optional*):
The computed loss, returned when labels are present.
class_queries_logits (`torch.FloatTensor`):
A tensor of shape `(batch_size, num_queries, num_labels + 1)` representing the proposed classes for each
query. Note the `+ 1` is needed because we incorporate the null class.
masks_queries_logits (`torch.FloatTensor`):
A tensor of shape `(batch_size, num_queries, height, width)` representing the proposed masks for each
query.
auxiliary_predictions (List of Dict of `str, torch.FloatTensor`, *optional*):
List of class and mask predictions from each layer of the transformer decoder.
encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the encoder
model at the output of each stage.
pixel_decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the pixel
decoder model at the output of each stage.
transformer_decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, sequence_length, hidden_size)`. Hidden-states (also called feature maps) of the
transformer decoder at the output of each stage.
transformer_decoder_object_queries (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_dim)`)
Output object queries from the last layer in the transformer decoder.
transformer_decoder_contrastive_queries (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_dim)`)
Contrastive queries from the transformer decoder.
transformer_decoder_mask_predictions (`torch.FloatTensor` of shape `(batch_size, num_queries, height, width)`)
Mask Predictions from the last layer in the transformer decoder.
transformer_decoder_class_predictions (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes+1)`):
Class Predictions from the last layer in the transformer decoder.
transformer_decoder_auxiliary_predictions (List of Dict of `str, torch.FloatTensor`, *optional*):
List of class and mask predictions from each layer of the transformer decoder.
text_queries (`torch.FloatTensor`, *optional* of shape `(batch_size, num_queries, hidden_dim)`)
Text queries derived from the input text list used for calculating contrastive loss during training.
task_token (`torch.FloatTensor` of shape `(batch_size, hidden_dim)`)
1D task token to condition the queries.
attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `tuple(torch.FloatTensor)` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`. Self and Cross Attentions weights from transformer decoder.
"""
loss: Optional[torch.FloatTensor] = None
class_queries_logits: torch.FloatTensor = None
masks_queries_logits: torch.FloatTensor = None
auxiliary_predictions: List[Dict[str, torch.FloatTensor]] = None
encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
pixel_decoder_hidden_states: Optional[List[torch.FloatTensor]] = None
transformer_decoder_hidden_states: Optional[torch.FloatTensor] = None
transformer_decoder_object_queries: torch.FloatTensor = None
transformer_decoder_contrastive_queries: Optional[torch.FloatTensor] = None
transformer_decoder_mask_predictions: torch.FloatTensor = None
transformer_decoder_class_predictions: torch.FloatTensor = None
transformer_decoder_auxiliary_predictions: Optional[List[Dict[str, torch.FloatTensor]]] = None
text_queries: Optional[torch.FloatTensor] = None
task_token: torch.FloatTensor = None
attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
# Modified from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrFrozenBatchNorm2d with DeformableDetr->OneFormerPixelDecoder
class OneFormerPixelDecoderFrozenBatchNorm2d(nn.Module):
"""
BatchNorm2d where the batch statistics and the affine parameters are fixed.
Copy-paste from torchvision.misc.ops with added eps before rqsrt, without which any other models than
torchvision.models.resnet[18,34,50,101] produce nans.
"""
def __init__(self, n):
super().__init__()
self.register_buffer("weight", torch.ones(n))
self.register_buffer("bias", torch.zeros(n))
self.register_buffer("running_mean", torch.zeros(n))
self.register_buffer("running_var", torch.ones(n))
def _load_from_state_dict(
self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
):
num_batches_tracked_key = prefix + "num_batches_tracked"
if num_batches_tracked_key in state_dict:
del state_dict[num_batches_tracked_key]
super()._load_from_state_dict(
state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
)
def forward(self, x):
weight = self.weight.reshape(1, -1, 1, 1)
bias = self.bias.reshape(1, -1, 1, 1)
running_var = self.running_var.reshape(1, -1, 1, 1)
running_mean = self.running_mean.reshape(1, -1, 1, 1)
epsilon = 1e-5
scale = weight * (running_var + epsilon).rsqrt()
bias = bias - running_mean * scale
return x * scale + bias
# Modified from transformers.models.detr.modeling_deformable_detr.DeformableDetrMultiscaleDeformableAttention with DeformableDetr->OneFormerPixelDecoderEncoder
class OneFormerPixelDecoderEncoderMultiscaleDeformableAttention(nn.Module):
"""
Multiscale deformable attention as proposed in Deformable DETR.
"""
def __init__(self, embed_dim: int, num_heads: int, n_levels: int, n_points: int):
super().__init__()
if embed_dim % num_heads != 0:
raise ValueError(
f"embed_dim (d_model) must be divisible by num_heads, but got {embed_dim} and {num_heads}"
)
dim_per_head = embed_dim // num_heads
# check if dim_per_head is power of 2
if not ((dim_per_head & (dim_per_head - 1) == 0) and dim_per_head != 0):
warnings.warn(
"You'd better set embed_dim (d_model) in DeformableDetrMultiscaleDeformableAttention to make the"
" dimension of each attention head a power of 2 which is more efficient in the authors' CUDA"
" implementation."
)
self.im2col_step = 128
self.d_model = embed_dim
self.n_levels = n_levels
self.n_heads = num_heads
self.n_points = n_points
self.sampling_offsets = nn.Linear(embed_dim, num_heads * n_levels * n_points * 2)
self.attention_weights = nn.Linear(embed_dim, num_heads * n_levels * n_points)
self.value_proj = nn.Linear(embed_dim, embed_dim)
self.output_proj = nn.Linear(embed_dim, embed_dim)
def with_pos_embed(self, tensor: torch.Tensor, position_embeddings: Optional[Tensor]):
return tensor if position_embeddings is None else tensor + position_embeddings
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states=None,
encoder_attention_mask=None,
position_embeddings: Optional[torch.Tensor] = None,
reference_points=None,
spatial_shapes=None,
level_start_index=None,
output_attentions: bool = False,
):
# add position embeddings to the hidden states before projecting to queries and keys
if position_embeddings is not None:
hidden_states = self.with_pos_embed(hidden_states, position_embeddings)
batch_size, num_queries, _ = hidden_states.shape
batch_size, sequence_length, _ = encoder_hidden_states.shape
if (spatial_shapes[:, 0] * spatial_shapes[:, 1]).sum() != sequence_length:
raise ValueError(
"Make sure to align the spatial shapes with the sequence length of the encoder hidden states"
)
value = self.value_proj(encoder_hidden_states)
if attention_mask is not None:
# we invert the attention_mask
value = value.masked_fill(attention_mask[..., None], float(0))
value = value.view(batch_size, sequence_length, self.n_heads, self.d_model // self.n_heads)
sampling_offsets = self.sampling_offsets(hidden_states).view(
batch_size, num_queries, self.n_heads, self.n_levels, self.n_points, 2
)
attention_weights = self.attention_weights(hidden_states).view(
batch_size, num_queries, self.n_heads, self.n_levels * self.n_points
)
attention_weights = nn.functional.softmax(attention_weights, -1).view(
batch_size, num_queries, self.n_heads, self.n_levels, self.n_points
)
# batch_size, num_queries, n_heads, n_levels, n_points, 2
if reference_points.shape[-1] == 2:
offset_normalizer = torch.stack([spatial_shapes[..., 1], spatial_shapes[..., 0]], -1)
sampling_locations = (
reference_points[:, :, None, :, None, :]
+ sampling_offsets / offset_normalizer[None, None, None, :, None, :]
)
elif reference_points.shape[-1] == 4:
sampling_locations = (
reference_points[:, :, None, :, None, :2]
+ sampling_offsets / self.n_points * reference_points[:, :, None, :, None, 2:] * 0.5
)
else:
raise ValueError(f"Last dim of reference_points must be 2 or 4, but got {reference_points.shape[-1]}")
# CPU
output = multiscale_deform_attn_core_pytorch(value, spatial_shapes, sampling_locations, attention_weights)
output = self.output_proj(output)
return output, attention_weights
class OneFormerPixelDecoderEncoderLayer(nn.Module):
def __init__(self, config: OneFormerConfig):
super().__init__()
self.embed_dim = config.conv_dim
self.self_attn = OneFormerPixelDecoderEncoderMultiscaleDeformableAttention(
embed_dim=self.embed_dim,
num_heads=config.num_attention_heads,
n_levels=3,
n_points=4,
)
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.dropout = config.dropout
self.activation_fn = nn.functional.relu
self.activation_dropout = config.dropout
self.fc1 = nn.Linear(self.embed_dim, config.encoder_feedforward_dim)
self.fc2 = nn.Linear(config.encoder_feedforward_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
self.is_training = config.is_training
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
position_embeddings: torch.Tensor = None,
reference_points=None,
spatial_shapes=None,
level_start_index=None,
output_attentions: bool = False,
):
"""
Args:
hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Input to the layer.
attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
Attention mask.
position_embeddings (`torch.FloatTensor`, *optional*):
Position embeddings, to be added to `hidden_states`.
reference_points (`torch.FloatTensor`, *optional*):
Reference points.
spatial_shapes (`torch.LongTensor`, *optional*):
Spatial shapes of the backbone feature maps.
level_start_index (`torch.LongTensor`, *optional*):
Level start index.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
# Apply Multi-scale Deformable Attention Module on the multi-scale feature maps.
hidden_states, attn_weights = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
encoder_hidden_states=hidden_states,
encoder_attention_mask=attention_mask,
position_embeddings=position_embeddings,
reference_points=reference_points,
spatial_shapes=spatial_shapes,
level_start_index=level_start_index,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.is_training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.is_training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.is_training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
if self.is_training:
if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any():
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
# Modified from from transformers.models.detr.modeling_deformable_detr.DeformableDetrEncoder with DeformableDetrEncoder->OneFormerPixelDecoderEncoderOnly
class OneFormerPixelDecoderEncoderOnly(nn.Module):
"""
Transformer encoder consisting of *config.encoder_layers* deformable attention layers. Each layer is a
[`OneFormerPixelDecoderEncoderLayer`].
The encoder updates the flattened multi-scale feature maps through multiple deformable attention layers.
Args:
config: OneFormerConfig
"""
def __init__(self, config: OneFormerConfig):
super().__init__()
self.config = config
self.dropout = config.dropout
self.layers = nn.ModuleList([OneFormerPixelDecoderEncoderLayer(config) for _ in range(config.encoder_layers)])
@staticmethod
def get_reference_points(spatial_shapes, valid_ratios, device):
"""
Get reference points for each feature map. Used in decoder.
Args:
spatial_shapes (`torch.LongTensor` of shape `(num_feature_levels, 2)`):
Spatial shapes of each feature map.
valid_ratios (`torch.FloatTensor` of shape `(batch_size, num_feature_levels, 2)`):
Valid ratios of each feature map.
device (`torch.device`):
Device on which to create the tensors.
Returns:
`torch.FloatTensor` of shape `(batch_size, num_queries, num_feature_levels, 2)`
"""
reference_points_list = []
for lvl, (height, width) in enumerate(spatial_shapes):
ref_y, ref_x = torch.meshgrid(
torch.linspace(0.5, height - 0.5, height, dtype=torch.float32, device=device),
torch.linspace(0.5, width - 0.5, width, dtype=torch.float32, device=device),
)
ref_y = ref_y.reshape(-1)[None] / (valid_ratios[:, None, lvl, 1] * height)
ref_x = ref_x.reshape(-1)[None] / (valid_ratios[:, None, lvl, 0] * width)
ref = torch.stack((ref_x, ref_y), -1)
reference_points_list.append(ref)
reference_points = torch.cat(reference_points_list, 1)
reference_points = reference_points[:, :, None] * valid_ratios[:, None]
return reference_points
def forward(
self,
inputs_embeds=None,
attention_mask=None,
position_embeddings=None,
spatial_shapes=None,
level_start_index=None,
valid_ratios=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Args:
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Flattened feature map (output of the backbone + projection layer) that is passed to the encoder.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding pixel features. Mask values selected in `[0, 1]`:
- 1 for pixel features that are real (i.e. **not masked**),
- 0 for pixel features that are padding (i.e. **masked**).
[What are attention masks?](../glossary#attention-mask)
position_embeddings (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Position embeddings that are added to the queries and keys in each self-attention layer.
spatial_shapes (`torch.LongTensor` of shape `(num_feature_levels, 2)`):
Spatial shapes of each feature map.
level_start_index (`torch.LongTensor` of shape `(num_feature_levels)`):
Starting index of each feature map.
valid_ratios (`torch.FloatTensor` of shape `(batch_size, num_feature_levels, 2)`):
Ratio of valid area in each feature level.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
hidden_states = inputs_embeds
reference_points = self.get_reference_points(spatial_shapes, valid_ratios, device=inputs_embeds.device)
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
for i, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
layer_outputs = encoder_layer(
hidden_states,
attention_mask,
position_embeddings=position_embeddings,
reference_points=reference_points,
spatial_shapes=spatial_shapes,
level_start_index=level_start_index,
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
)
# Modified from from transformers.models.mask2former.modeling_mask2former.Mask2FormerPixelDecoder with Mask2->One
class OneFormerPixelDecoder(nn.Module):
def __init__(self, config: OneFormerConfig, feature_channels):
super().__init__()
self.config = config
# positional encoding
self.position_embedding = OneFormerSinePositionEmbedding(num_pos_feats=config.conv_dim // 2, normalize=True)
self.num_feature_levels = 3
transformer_in_channels = feature_channels[-self.num_feature_levels :]
self.transformer_feature_strides = config.strides[-self.num_feature_levels :]
self.feature_channels = feature_channels
self.level_embed = nn.Parameter(torch.Tensor(self.num_feature_levels, config.conv_dim))
# Create input projection layers
if self.num_feature_levels > 1:
input_projections_list = []
for in_channels in transformer_in_channels[::-1]:
input_projections_list.append(
nn.Sequential(
nn.Conv2d(in_channels, config.conv_dim, kernel_size=1),
nn.GroupNorm(32, config.conv_dim),
)
)
self.input_projections = nn.ModuleList(input_projections_list)
else:
self.input_projections = nn.ModuleList(
[
nn.Sequential(
nn.Conv2d(transformer_in_channels[-1], config.conv_dim, kernel_size=1),
nn.GroupNorm(32, config.conv_dim),
)
]
)
self.encoder = OneFormerPixelDecoderEncoderOnly(config)
self.mask_projection = nn.Conv2d(
config.conv_dim,
config.mask_dim,
kernel_size=1,
stride=1,
padding=0,
)
self.common_stride = config.common_stride
# extra fpn levels
stride = min(self.transformer_feature_strides)
self.num_fpn_levels = int(np.log2(stride) - np.log2(self.common_stride))
lateral_convs = []
output_convs = []
for idx, in_channels in enumerate(self.feature_channels[: self.num_fpn_levels]):
lateral_conv = nn.Sequential(
nn.Conv2d(
in_channels,
config.conv_dim,
kernel_size=1,
bias=False,
),
nn.GroupNorm(32, config.conv_dim),
)
output_conv = nn.Sequential(
nn.Conv2d(
config.conv_dim,
config.conv_dim,
kernel_size=3,
stride=1,
padding=1,
bias=False,
),
nn.GroupNorm(32, config.conv_dim),
nn.ReLU(),
)
self.add_module("adapter_{}".format(idx + 1), lateral_conv)
self.add_module("layer_{}".format(idx + 1), output_conv)
lateral_convs.append(lateral_conv)
output_convs.append(output_conv)
# Place convs into top-down order (from low to high resolution)
# to make the top-down computation in forward clearer.
self.lateral_convs = lateral_convs[::-1]
self.output_convs = output_convs[::-1]
def get_valid_ratio(self, mask):
"""Get the valid ratio of all feature maps."""
_, height, width = mask.shape
valid_height = torch.sum(~mask[:, :, 0], 1)
valid_width = torch.sum(~mask[:, 0, :], 1)
valid_ratio_heigth = valid_height.float() / height
valid_ratio_width = valid_width.float() / width
valid_ratio = torch.stack([valid_ratio_width, valid_ratio_heigth], -1)
return valid_ratio
def forward(
self,
features,
encoder_outputs=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
# Then, apply 1x1 convolution to reduce the channel dimension to d_model (256 by default)
sources = []
position_embeddings_list = []
for level, source in enumerate(features[::-1][: self.num_feature_levels]):
feats = source.float()
sources.append(self.input_projections[level](feats))
position_embeddings_list.append(self.position_embedding(feats))
masks = [torch.zeros((x.size(0), x.size(2), x.size(3)), device=x.device, dtype=torch.bool) for x in sources]
# Prepare encoder inputs (by flattening)
source_flatten = []
mask_flatten = []
lvl_pos_embed_flatten = []
spatial_shapes = []
for level, (source, mask, pos_embed) in enumerate(zip(sources, masks, position_embeddings_list)):
batch_size, num_channels, height, width = source.shape
spatial_shape = (height, width)
spatial_shapes.append(spatial_shape)
source = source.flatten(2).transpose(1, 2)
mask = mask.flatten(1)
pos_embed = pos_embed.flatten(2).transpose(1, 2)
lvl_pos_embed = pos_embed + self.level_embed[level].view(1, 1, -1)
lvl_pos_embed_flatten.append(lvl_pos_embed)
source_flatten.append(source)
mask_flatten.append(mask)
source_flatten = torch.cat(source_flatten, 1)
mask_flatten = torch.cat(mask_flatten, 1)
lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1)
spatial_shapes = torch.as_tensor(spatial_shapes, dtype=torch.long, device=source_flatten.device)
level_start_index = torch.cat((spatial_shapes.new_zeros((1,)), spatial_shapes.prod(1).cumsum(0)[:-1]))
valid_ratios = torch.stack([self.get_valid_ratio(m) for m in masks], 1)
valid_ratios = valid_ratios.float()
# Fourth, sent source_flatten + mask_flatten + lvl_pos_embed_flatten (backbone + proj layer output) through encoder
# Also provide spatial_shapes, level_start_index and valid_ratios
if encoder_outputs is None:
encoder_outputs = self.encoder(
inputs_embeds=source_flatten,
attention_mask=mask_flatten,
position_embeddings=lvl_pos_embed_flatten,
spatial_shapes=spatial_shapes,
level_start_index=level_start_index,
valid_ratios=valid_ratios,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
y = encoder_outputs.last_hidden_state
bs = y.shape[0]
split_size_or_sections = [None] * self.num_feature_levels
for i in range(self.num_feature_levels):
if i < self.num_feature_levels - 1:
split_size_or_sections[i] = level_start_index[i + 1] - level_start_index[i]
else:
split_size_or_sections[i] = y.shape[1] - level_start_index[i]
y = torch.split(y, split_size_or_sections, dim=1)
out = []
multi_scale_features = []
num_cur_levels = 0
for i, z in enumerate(y):
out.append(z.transpose(1, 2).view(bs, -1, spatial_shapes[i][0], spatial_shapes[i][1]))
# append `out` with extra FPN levels
# Reverse feature maps into top-down order (from low to high resolution)
for idx, feats in enumerate(features[: self.num_fpn_levels][::-1]):
feats = feats.float()
lateral_conv = self.lateral_convs[idx]
output_conv = self.output_convs[idx]
cur_fpn = lateral_conv(feats)
# Following FPN implementation, we use nearest upsampling here
y = cur_fpn + nn.functional.interpolate(
out[-1], size=cur_fpn.shape[-2:], mode="bilinear", align_corners=False
)
y = output_conv(y)
out.append(y)
for o in out:
if num_cur_levels < self.num_feature_levels:
multi_scale_features.append(o)
num_cur_levels += 1
return OneFormerPixelDecoderOutput(
mask_features=self.mask_projection(out[-1]),
multi_scale_features=multi_scale_features,
attentions=encoder_outputs.attentions,
)
# Modified from from transformers.models.mask2former.modeling_mask2former.Mask2FormerPixelLevelModule with Mask2->One
class OneFormerPixelLevelModule(nn.Module):
def __init__(self, config: OneFormerConfig):
"""
Pixel Level Module proposed in [Masked-attention Mask Transformer for Universal Image
Segmentation](https://arxiv.org/abs/2112.01527). It runs the input image through a backbone and a pixel
decoder, generating multi-scale feature maps and pixel embeddings.
Args:
config ([`OneFormerConfig`]):
The configuration used to instantiate this model.
"""
super().__init__()
backbone_config = config.backbone_config
self.encoder = AutoBackbone.from_config(backbone_config)
self.decoder = OneFormerPixelDecoder(config, feature_channels=self.encoder.channels)
def forward(self, pixel_values: Tensor, output_hidden_states: bool = False) -> OneFormerPixelLevelModuleOutput:
features: List[Tensor] = self.encoder(pixel_values).feature_maps
decoder_output: OneFormerPixelDecoderOutput = self.decoder(features, output_hidden_states=output_hidden_states)
return OneFormerPixelLevelModuleOutput(
encoder_features=tuple(features),
decoder_features=decoder_output.multi_scale_features,
decoder_last_feature=decoder_output.mask_features,
)
# Modified from transformers.models.detr.modeling_detr.DetrAttention with Detr->OneFormer
class OneFormerAttention(nn.Module):
"""
Multi-headed attention from 'Attention Is All You Need' paper. Here, we add position embeddings to the queries and
keys (as explained in the DETR paper).
"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
is_decoder: bool = False,
bias: bool = True,
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
if self.head_dim * num_heads != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
f" {num_heads})."
)
self.scaling = self.head_dim**-0.5
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
def _shape(self, tensor: torch.Tensor, seq_len: int, batch_size: int):
return tensor.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def with_pos_embed(self, tensor: torch.Tensor, position_embeddings: Optional[Tensor]):
return tensor if position_embeddings is None else tensor + position_embeddings
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_embeddings: Optional[torch.Tensor] = None,
key_value_states: Optional[torch.Tensor] = None,
key_value_position_embeddings: Optional[torch.Tensor] = None,
output_attentions: bool = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
hidden_states = hidden_states.permute(1, 0, 2) if hidden_states is not None else None
position_embeddings = position_embeddings.permute(1, 0, 2) if position_embeddings is not None else None
key_value_states = key_value_states.permute(1, 0, 2) if key_value_states is not None else None
key_value_position_embeddings = (
key_value_position_embeddings.permute(1, 0, 2) if key_value_position_embeddings is not None else None
)
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
batch_size, target_len, embed_dim = hidden_states.size()
# add position embeddings to the hidden states before projecting to queries and keys
if position_embeddings is not None:
hidden_states_original = hidden_states
hidden_states = self.with_pos_embed(hidden_states, position_embeddings)
# add key-value position embeddings to the key value states
if key_value_position_embeddings is not None:
key_value_states_original = key_value_states
key_value_states = self.with_pos_embed(key_value_states, key_value_position_embeddings)
# get query proj
query_states = self.q_proj(hidden_states) * self.scaling
# get key, value proj
if is_cross_attention:
# cross_attentions
key_states = self._shape(self.k_proj(key_value_states), -1, batch_size)
value_states = self._shape(self.v_proj(key_value_states_original), -1, batch_size)
else:
# self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, batch_size)
value_states = self._shape(self.v_proj(hidden_states_original), -1, batch_size)
proj_shape = (batch_size * self.num_heads, -1, self.head_dim)
query_states = self._shape(query_states, target_len, batch_size).view(*proj_shape)
key_states = key_states.view(*proj_shape)
value_states = value_states.view(*proj_shape)
source_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
if attn_weights.size() != (batch_size * self.num_heads, target_len, source_len):
raise ValueError(
f"Attention weights should be of size {(batch_size * self.num_heads, target_len, source_len)}, but is"
f" {attn_weights.size()}"
)
if attention_mask is not None:
if attention_mask.size() != (batch_size * self.num_heads, target_len, source_len):
raise ValueError(
f"Attention mask should be of size {(target_len, batch_size * self.num_heads, source_len)}, but is"
f" {attention_mask.size()}"
)
attn_weights += attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
if output_attentions:
# this operation is a bit awkward, but it's required to
# make sure that attn_weights keeps its gradient.
# In order to do so, attn_weights have to reshaped
# twice and have to be reused in the following
attn_weights_reshaped = attn_weights.view(batch_size, self.num_heads, target_len, source_len)
attn_weights = attn_weights_reshaped.view(batch_size * self.num_heads, target_len, source_len)
else:
attn_weights_reshaped = None
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_probs, value_states)
if attn_output.size() != (batch_size * self.num_heads, target_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(batch_size, self.num_heads, target_len, self.head_dim)}, but is"
f" {attn_output.size()}"
)
attn_output = attn_output.view(batch_size, self.num_heads, target_len, self.head_dim)
attn_output = attn_output.transpose(1, 2)
attn_output = attn_output.reshape(batch_size, target_len, embed_dim)
attn_output = self.out_proj(attn_output).permute(1, 0, 2)
return attn_output, attn_weights_reshaped
class OneFormerTransformerDecoderSelfAttentionLayer(nn.Module):
def __init__(self, embed_dim, num_heads, dropout=0.0, activation="relu", normalize_before=False):
super().__init__()
self.self_attn = OneFormerAttention(embed_dim=embed_dim, num_heads=num_heads, dropout=dropout, is_decoder=True)
self.norm = nn.LayerNorm(embed_dim)
self.dropout = nn.Dropout(dropout)
self.activation = ACT2FN[activation]
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(
self,
output,
output_mask: Optional[Tensor] = None,
output_key_padding_mask: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None,
):
output2, attention_weights = self.self_attn(
hidden_states=output, position_embeddings=query_pos, attention_mask=output_mask, output_attentions=True
)
output = output + self.dropout(output2)
output = self.norm(output)
return output, attention_weights
def forward_pre(
self,
output,
output_mask: Optional[Tensor] = None,
output_key_padding_mask: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None,
):
output2 = self.norm(output)
output2, attention_weights = self.self_attn(
hidden_states=output2, position_embeddings=query_pos, attention_mask=output_mask, output_attentions=True
)
output = output + self.dropout(output2)
return output, attention_weights
def forward(
self,
output,
output_mask: Optional[Tensor] = None,
output_key_padding_mask: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None,
):
if self.normalize_before:
return self.forward_pre(output, output_mask, output_key_padding_mask, query_pos)
return self.forward_post(output, output_mask, output_key_padding_mask, query_pos)
class OneFormerTransformerDecoderCrossAttentionLayer(nn.Module):
def __init__(self, embed_dim, num_heads, dropout=0.0, activation="relu", normalize_before=False):
super().__init__()
self.multihead_attn = nn.MultiheadAttention(embed_dim, num_heads, dropout=dropout)
self.norm = nn.LayerNorm(embed_dim)
self.dropout = nn.Dropout(dropout)
self.activation = ACT2FN[activation]
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(
self,
output,
memory,
memory_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None,
):
output2, attention_weights = self.multihead_attn(
query=self.with_pos_embed(output, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory,
attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask,
)
output = output + self.dropout(output2)
output = self.norm(output)
return output, attention_weights
def forward_pre(
self,
output,
memory,
memory_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None,
):
output2 = self.norm(output)
output2, attention_weights = self.multihead_attn(
query=self.with_pos_embed(output2, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory,
attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask,
)
output = output + self.dropout(output2)
return output, attention_weights
def forward(
self,
output,
memory,
memory_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None,
):
if self.normalize_before:
return self.forward_pre(output, memory, memory_mask, memory_key_padding_mask, pos, query_pos)
return self.forward_post(output, memory, memory_mask, memory_key_padding_mask, pos, query_pos)
class OneFormerTransformerDecoderFFNLayer(nn.Module):
def __init__(self, d_model, dim_feedforward=2048, dropout=0.0, activation="relu", normalize_before=False):
super().__init__()
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm = nn.LayerNorm(d_model)
self.activation = ACT2FN[activation]
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(self, output):
output2 = self.linear2(self.dropout(self.activation(self.linear1(output))))
output = output + self.dropout(output2)
output = self.norm(output)
return output
def forward_pre(self, output):
output2 = self.norm(output)
output2 = self.linear2(self.dropout(self.activation(self.linear1(output2))))
output = output + self.dropout(output2)
return output
def forward(self, output):
if self.normalize_before:
return self.forward_pre(output)
return self.forward_post(output)
class OneFormerMLPPredictionHead(nn.Module):
def __init__(self, input_dim: int, hidden_dim: int, output_dim: int, num_layers: int = 3):
"""
A classic Multi Layer Perceptron (MLP).
Args:
input_dim (`int`):
The input dimensions.
hidden_dim (`int`):
The hidden dimensions.
output_dim (`int`):
The output dimensions.
num_layers (int, *optional*, defaults to 3):
The number of layers.
"""
super().__init__()
in_dims = [input_dim] + [hidden_dim] * (num_layers - 1)
out_dims = [hidden_dim] * (num_layers - 1) + [output_dim]
layers = []
for i, (in_dim, out_dim) in enumerate(zip(in_dims, out_dims)):
layers.append(
PredictionBlock(in_dim, out_dim, activation=nn.ReLU() if i < num_layers - 1 else nn.Identity())
)
self.layers = nn.Sequential(*layers)
def forward(self, input: Tensor) -> Tensor:
return self.layers(input)
# refactored from original implementation
class OneFormerTransformerDecoderLayer(nn.Module):
def __init__(self, config: OneFormerConfig):
super().__init__()
self.embed_dim = config.hidden_dim
self.num_feature_levels = 3
self.cross_attn = OneFormerTransformerDecoderCrossAttentionLayer(
embed_dim=self.embed_dim,
num_heads=config.num_attention_heads,
dropout=0.0,
normalize_before=config.pre_norm,
)
self.self_attn = OneFormerTransformerDecoderSelfAttentionLayer(
embed_dim=self.embed_dim,
num_heads=config.num_attention_heads,
dropout=0.0,
normalize_before=config.pre_norm,
)
self.ffn = OneFormerTransformerDecoderFFNLayer(
d_model=self.embed_dim,
dim_feedforward=config.dim_feedforward,
dropout=0.0,
normalize_before=config.pre_norm,
)
def forward(
self,
index: int,
output: torch.Tensor,
multi_stage_features: List[torch.Tensor],
multi_stage_positional_embeddings: List[torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
query_embeddings: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = False,
):
"""
Args:
index (`int`): index of the layer in the Transformer decoder.
output (`torch.FloatTensor`): the object queries of shape `(N, batch, hidden_dim)`
multi_stage_features (`List[torch.Tensor]`): the multi-scale features from the pixel decoder.
multi_stage_positional_embeddings (`List[torch.Tensor]`):
positional embeddings for the multi_stage_features
attention_mask (`torch.FloatTensor`): attention mask for the masked cross attention layer
query_embeddings (`torch.FloatTensor`, *optional*):
position embeddings that are added to the queries and keys in the self-attention layer.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
level_index = index % self.num_feature_levels
attention_mask[torch.where(attention_mask.sum(-1) == attention_mask.shape[-1])] = False
# Masked Cross Attention
output, cross_attn_weights = self.cross_attn(
output,
multi_stage_features[level_index],
memory_mask=attention_mask,
memory_key_padding_mask=None, # here we do not apply masking on padded region
pos=multi_stage_positional_embeddings[level_index],
query_pos=query_embeddings,
)
# Self Attention
output, self_attn_weights = self.self_attn(
output,
output_mask=None,
output_key_padding_mask=None,
query_pos=query_embeddings,
)
# Fully Connected
output = self.ffn(output)
outputs = (output,)
if output_attentions:
outputs += (self_attn_weights, cross_attn_weights)
return outputs
class OneFormerTransformerDecoderQueryTransformerDecoder(nn.Module):
def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False):
super().__init__()
self.layers = _get_clones(decoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
self.return_intermediate = return_intermediate
def forward(
self,
output,
memory,
output_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
output_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None,
):
intermediate = []
for layer in self.layers:
output = layer(
output,
memory,
output_mask=output_mask,
memory_mask=memory_mask,
output_key_padding_mask=output_key_padding_mask,
memory_key_padding_mask=memory_key_padding_mask,
pos=pos,
query_pos=query_pos,
)
if self.return_intermediate:
intermediate.append(self.norm(output))
if self.norm is not None:
output = self.norm(output)
if self.return_intermediate:
intermediate.pop()
intermediate.append(output)
if self.return_intermediate:
return torch.stack(intermediate)
return output.unsqueeze(0)
class OneFormerTransformerDecoderQueryTransformerDecoderLayer(nn.Module):
def __init__(
self,
d_model,
nhead,
dim_feedforward=2048,
dropout=0.1,
activation="relu",
normalize_before=False,
):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.norm3 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
self.activation = ACT2FN[activation]
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(
self,
output,
memory,
output_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
output_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None,
):
q = k = self.with_pos_embed(output, query_pos)
output2 = self.self_attn(q, k, value=output, attn_mask=output_mask, key_padding_mask=output_key_padding_mask)
output2 = output2[0]
output = output + self.dropout1(output2)
output = self.norm1(output)
output2 = self.multihead_attn(
query=self.with_pos_embed(output, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory,
attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask,
)
output2 = output2[0]
output = output + self.dropout2(output2)
output = self.norm2(output)
output2 = self.linear2(self.dropout(self.activation(self.linear1(output))))
output = output + self.dropout3(output2)
output = self.norm3(output)
return output
def forward_pre(
self,
output,
memory,
output_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
output_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None,
):
output2 = self.norm1(output)
q = k = self.with_pos_embed(output2, query_pos)
output2 = self.self_attn(q, k, value=output2, attn_mask=output_mask, key_padding_mask=output_key_padding_mask)
output2 = output2[0]
output = output + self.dropout1(output2)
output2 = self.norm2(output)
output2 = self.multihead_attn(
query=self.with_pos_embed(output2, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory,
attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask,
)
output2 = output2[0]
output = output + self.dropout2(output2)
output2 = self.norm3(output)
output2 = self.linear2(self.dropout(self.activation(self.linear1(output2))))
output = output + self.dropout3(output2)
return output
def forward(
self,
output,
memory,
output_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
output_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None,
):
if self.normalize_before:
return self.forward_pre(
output,
memory,
output_mask,
memory_mask,
output_key_padding_mask,
memory_key_padding_mask,
pos,
query_pos,
)
return self.forward_post(
output,
memory,
output_mask,
memory_mask,
output_key_padding_mask,
memory_key_padding_mask,
pos,
query_pos,
)
class OneFormerTransformerDecoderQueryTransformer(nn.Module):
def __init__(
self,
d_model=512,
nhead=8,
num_decoder_layers=6,
dim_feedforward=2048,
dropout=0.1,
activation="relu",
normalize_before=False,
return_intermediate_dec=False,
):
super().__init__()
decoder_layer = OneFormerTransformerDecoderQueryTransformerDecoderLayer(
d_model, nhead, dim_feedforward, dropout, activation, normalize_before
)
decoder_norm = nn.LayerNorm(d_model)
self.decoder = OneFormerTransformerDecoderQueryTransformerDecoder(
decoder_layer,
num_decoder_layers,
decoder_norm,
return_intermediate=return_intermediate_dec,
)
self.d_model = d_model
self.nhead = nhead
def forward(self, src, mask, query_embed, pos_embed, task_token=None):
batch_size = src.shape[0]
src = src.flatten(2).permute(2, 0, 1)
pos_embed = pos_embed.flatten(2).permute(2, 0, 1)
query_embed = query_embed.unsqueeze(1).repeat(1, batch_size, 1)
if mask is not None:
mask = mask.flatten(1)
if task_token is None:
queries = torch.zeros_like(query_embed)
else:
queries = task_token.repeat(query_embed.shape[0], 1, 1)
queries = self.decoder(queries, src, memory_key_padding_mask=mask, pos=pos_embed, query_pos=query_embed)
return queries.transpose(1, 2)
class OneFormerTransformerDecoder(nn.Module):
"""
Transformer decoder
"""
def __init__(self, in_channels: int, config: OneFormerConfig):
super().__init__()
self.config = config
self.dropout = config.dropout
self.num_heads = config.num_attention_heads
self.is_training = config.is_training
self.use_task_norm = config.use_task_norm
self.use_auxiliary_loss = config.use_auxiliary_loss
self.query_transformer = OneFormerTransformerDecoderQueryTransformer(
d_model=config.hidden_dim,
dropout=config.dropout,
nhead=config.num_attention_heads,
dim_feedforward=config.dim_feedforward,
num_decoder_layers=config.query_dec_layers,
normalize_before=config.pre_norm,
return_intermediate_dec=False,
)
self.decoder_norm = nn.LayerNorm(config.hidden_dim)
self.num_feature_levels = 3
self.layers = nn.ModuleList(
[OneFormerTransformerDecoderLayer(config) for _ in range(config.decoder_layers - 1)]
)
self.query_input_projection = nn.Conv2d(in_channels, config.hidden_dim, kernel_size=1)
self.class_embed = nn.Linear(config.hidden_dim, config.num_labels + 1)
self.mask_embed = OneFormerMLPPredictionHead(
config.hidden_dim,
config.hidden_dim,
config.mask_dim,
3,
)
def forward(
self,
task_token=None,
multi_stage_features=None,
multi_stage_positional_embeddings=None,
mask_features=None,
query_features=None,
query_embeddings=None,
query_embedder=None,
size_list=None,
output_attentions=None,
):
if self.use_task_norm:
task_token = self.decoder_norm(task_token)
object_queries = self.query_transformer(
query_features,
None,
query_embedder.weight[:-1],
self.query_input_projection(mask_features),
task_token if self.use_task_norm else None,
)
object_queries = object_queries[0].permute(1, 0, 2)
queries = torch.cat([object_queries, task_token], dim=0)
output = queries.clone()
intermediate_class_predictions = []
intermediate_mask_predictions = []
# prediction heads on learnable query features
outputs_class, outputs_mask, attention_mask = self.forward_prediction_heads(
output, mask_features, attention_mask_target_size=size_list[0]
)
intermediate_class_predictions.append(outputs_class)
intermediate_mask_predictions.append(outputs_mask)
attentions = ()
for index, layer in enumerate(self.layers):
layer_outputs = layer(
index=index,
output=output,
multi_stage_features=multi_stage_features,
multi_stage_positional_embeddings=multi_stage_positional_embeddings,
attention_mask=attention_mask,
query_embeddings=query_embeddings,
output_attentions=output_attentions,
)
output = layer_outputs[0]
attentions += (layer_outputs[1:],)
outputs_class, outputs_mask, attention_mask = self.forward_prediction_heads(
output, mask_features, attention_mask_target_size=size_list[(index + 1) % self.num_feature_levels]
)
intermediate_class_predictions.append(outputs_class)
intermediate_mask_predictions.append(outputs_mask)
if not len(intermediate_mask_predictions) == len(self.layers) + 1:
raise ValueError(
"Intermediate predictions in the transformer decoder must have the same number of elements as number"
" of layers"
)
object_queries = layer_outputs[0].permute(1, 0, 2)
contrastive_logits = queries.permute(1, 0, 2)
return OneFormerTransformerDecoderOutput(
object_queries=object_queries,
contrastive_logits=contrastive_logits,
prediction_masks=intermediate_mask_predictions[-1],
prediction_class=intermediate_class_predictions[-1],
auxiliary_predictions=self._get_aux_predictions(
intermediate_class_predictions, intermediate_mask_predictions
)
if self.use_auxiliary_loss
else None,
attentions=attentions,
)
def forward_prediction_heads(self, output, mask_features, attention_mask_target_size):
decoder_output = self.decoder_norm(output)
decoder_output = decoder_output.transpose(0, 1)
outputs_class = self.class_embed(decoder_output)
mask_embed = self.mask_embed(decoder_output)
outputs_mask = torch.einsum("bqc,bchw->bqhw", mask_embed, mask_features)
attention_mask = nn.functional.interpolate(
outputs_mask, size=attention_mask_target_size, mode="bilinear", align_corners=False
)
# must use bool type
# If a BoolTensor is provided, positions with ``True`` are not allowed to attend while ``False`` values will be unchanged.
attention_mask = (
attention_mask.sigmoid().flatten(2).unsqueeze(1).repeat(1, self.num_heads, 1, 1).flatten(0, 1) < 0.5
).bool()
attention_mask = attention_mask.detach()
return outputs_class, outputs_mask, attention_mask
@torch.jit.unused
def _get_aux_predictions(self, outputs_class, outputs_seg_masks):
# this is a workaround to make torchscript happy, as torchscript
# doesn't support dictionary with non-homogeneous values, such
# as a dict having both a Tensor and a list.
aux_list = [
{"class_queries_logits": a, "masks_queries_logits": b}
for a, b in zip(outputs_class[:-1], outputs_seg_masks[:-1])
]
return tuple(aux_list)
class OneFormerTransformerModule(nn.Module):
"""
The OneFormer's transformer module.
"""
def __init__(self, in_features: int, config: OneFormerConfig):
super().__init__()
hidden_dim = config.hidden_dim
self.num_feature_levels = 3
self.position_embedder = OneFormerSinePositionEmbedding(num_pos_feats=hidden_dim // 2, normalize=True)
self.queries_embedder = nn.Embedding(config.num_queries, hidden_dim)
self.input_projections = []
for _ in range(self.num_feature_levels):
if in_features != hidden_dim or config.enforce_input_proj:
self.input_projections.append(nn.Conv2d(in_features, hidden_dim, kernel_size=1))
else:
self.input_projections.append(nn.Sequential())
self.decoder = OneFormerTransformerDecoder(in_channels=in_features, config=config)
self.level_embed = nn.Embedding(self.num_feature_levels, hidden_dim)
def forward(
self,
multi_scale_features: List[Tensor],
mask_features: Tensor,
task_token: Tensor,
output_attentions: bool = False,
) -> OneFormerTransformerDecoderOutput:
if not len(multi_scale_features) == self.num_feature_levels:
raise ValueError(
f"Number of elements in multi_scale_features ({len(multi_scale_features)}) and num_feature_levels"
f" ({self.num_feature_levels}) do not match!"
)
multi_stage_features = []
multi_stage_positional_embeddings = []
size_list = []
for i in range(self.num_feature_levels):
size_list.append(multi_scale_features[i].shape[-2:])
multi_stage_positional_embeddings.append(self.position_embedder(multi_scale_features[i], None).flatten(2))
multi_stage_features.append(
self.input_projections[i](multi_scale_features[i]).flatten(2)
+ self.level_embed.weight[i][None, :, None]
)
# flatten NxCxHxW to HWxNxC
multi_stage_positional_embeddings[-1] = multi_stage_positional_embeddings[-1].permute(2, 0, 1)
multi_stage_features[-1] = multi_stage_features[-1].permute(2, 0, 1)
_, batch_size, _ = multi_stage_features[0].shape
# QxNxC
query_embeddings = self.queries_embedder.weight.unsqueeze(1).repeat(1, batch_size, 1)
task_token = task_token.unsqueeze(0)
query_features = self.position_embedder(mask_features, None)
return self.decoder(
task_token=task_token,
multi_stage_features=multi_stage_features,
multi_stage_positional_embeddings=multi_stage_positional_embeddings,
mask_features=mask_features,
query_features=query_features,
query_embeddings=query_embeddings,
query_embedder=self.queries_embedder,
size_list=size_list,
output_attentions=output_attentions,
)
# Copied from transformers.models.maskformer.modeling_maskformer.MaskFormerSinePositionEmbedding with Mask->One
class OneFormerSinePositionEmbedding(nn.Module):
"""
This is a more standard version of the position embedding, very similar to the one used by the Attention is all you
need paper, generalized to work on images.
"""
def __init__(
self, num_pos_feats: int = 64, temperature: int = 10000, normalize: bool = False, scale: Optional[float] = None
):
super().__init__()
if scale is not None and normalize is False:
raise ValueError("normalize should be True if scale is passed")
self.num_pos_feats = num_pos_feats
self.temperature = temperature
self.normalize = normalize
self.scale = 2 * math.pi if scale is None else scale
def forward(self, x: Tensor, mask: Optional[Tensor] = None) -> Tensor:
if mask is None:
mask = torch.zeros((x.size(0), x.size(2), x.size(3)), device=x.device, dtype=torch.bool)
not_mask = ~mask
y_embed = not_mask.cumsum(1, dtype=torch.float32)
x_embed = not_mask.cumsum(2, dtype=torch.float32)
if self.normalize:
eps = 1e-6
y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
dim_t = self.temperature ** (2 * torch.div(dim_t, 2, rounding_mode="floor") / self.num_pos_feats)
pos_x = x_embed[:, :, :, None] / dim_t
pos_y = y_embed[:, :, :, None] / dim_t
pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
return pos
# Copied from transformers.models.maskformer.modeling_maskformer.PredictionBlock
class PredictionBlock(nn.Module):
def __init__(self, in_dim: int, out_dim: int, activation: nn.Module) -> None:
super().__init__()
self.layers = [nn.Linear(in_dim, out_dim), activation]
# Maintain submodule indexing as if part of a Sequential block
for i, layer in enumerate(self.layers):
self.add_module(str(i), layer)
def forward(self, input: Tensor) -> Tensor:
hidden_state = input
for layer in self.layers:
hidden_state = layer(hidden_state)
return hidden_state
class OneFormerTextMapperAttention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.0, proj_drop=0.0):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
# NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights
self.scale = qk_scale or head_dim**-0.5
self.q_proj = nn.Linear(dim, dim, bias=qkv_bias)
self.k_proj = nn.Linear(dim, dim, bias=qkv_bias)
self.v_proj = nn.Linear(dim, dim, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, q, k, v):
batch_size, q_sequence_length, num_channels = q.shape
if not k.shape == v.shape:
raise ValueError(f"keys ({list(k.shape)}) and values ({list(v.shape)}) have different shapes!")
batch_size, k_sequence_length, num_channels = k.shape
q = self.q_proj(q).reshape(batch_size, q_sequence_length, self.num_heads, num_channels // self.num_heads)
k = self.k_proj(k).reshape(batch_size, k_sequence_length, self.num_heads, num_channels // self.num_heads)
v = self.v_proj(v).reshape(batch_size, k_sequence_length, self.num_heads, num_channels // self.num_heads)
attn = torch.einsum("bnkc,bmkc->bknm", q, k) * self.scale
attn = attn.softmax(dim=-1)
output = torch.einsum("bknm,bmkc->bnkc", attn, v).reshape(batch_size, q_sequence_length, num_channels)
output = self.proj(output)
output = self.proj_drop(output)
return output
class OneFormerTextTransformerDecoderLayer(nn.Module):
def __init__(
self,
d_model,
nhead,
dropout=0.1,
):
super().__init__()
self.self_attn = OneFormerTextMapperAttention(d_model, nhead, proj_drop=dropout)
self.cross_attn = OneFormerTextMapperAttention(d_model, nhead, proj_drop=dropout)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.norm3 = nn.LayerNorm(d_model)
self.dropout = nn.Dropout(dropout)
self.mlp = nn.Sequential(
nn.Linear(d_model, d_model * 4), nn.GELU(), nn.Dropout(dropout), nn.Linear(d_model * 4, d_model)
)
def forward(self, hidden_state, mem):
q = k = v = self.norm1(hidden_state)
hidden_state = hidden_state + self.self_attn(q, k, v)
q = self.norm2(hidden_state)
hidden_state = hidden_state + self.cross_attn(q, mem, mem)
hidden_state = hidden_state + self.dropout(self.mlp(self.norm3(hidden_state)))
return hidden_state
class OneFormerTextContextDecoder(nn.Module):
def __init__(
self, transformer_width=256, transformer_heads=4, transformer_layers=6, visual_dim=1024, dropout=0.1, **kwargs
):
super().__init__()
self.memory_proj = nn.Sequential(
nn.LayerNorm(visual_dim),
nn.Linear(visual_dim, transformer_width),
nn.LayerNorm(transformer_width),
)
self.text_proj = nn.Sequential(
nn.LayerNorm(visual_dim),
nn.Linear(visual_dim, transformer_width),
)
self.decoder = nn.ModuleList(
[
OneFormerTextTransformerDecoderLayer(transformer_width, transformer_heads, dropout)
for _ in range(transformer_layers)
]
)
self.out_proj = nn.Sequential(nn.LayerNorm(transformer_width), nn.Linear(transformer_width, visual_dim))
def forward(self, text, visual):
visual = self.memory_proj(visual)
hidden_state = self.text_proj(text)
for layer in self.decoder:
hidden_state = layer(hidden_state, visual)
return self.out_proj(hidden_state)
class OneFormerTextMLP(nn.Module):
def __init__(
self,
hidden_size: Optional[int] = None,
intermediate_size: Optional[int] = None,
output_size: Optional[int] = None,
):
super().__init__()
self.activation_fn = ACT2FN["quick_gelu"]
hidden_size = hidden_size
intermediate_size = intermediate_size
output_size = output_size
self.fc1 = nn.Linear(hidden_size, intermediate_size)
self.fc2 = nn.Linear(intermediate_size, output_size)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.fc1(hidden_states)
hidden_states = self.activation_fn(hidden_states)
hidden_states = self.fc2(hidden_states)
return hidden_states
class OneFormerTextTransformerLayer(nn.Module):
def __init__(self, width: int, heads: int, attn_mask: torch.Tensor):
super().__init__()
self.self_attn = nn.MultiheadAttention(width, heads)
self.layer_norm1 = nn.LayerNorm(width)
self.mlp = OneFormerTextMLP(width, width * 4, width)
self.layer_norm2 = nn.LayerNorm(width)
self.attn_mask = attn_mask
def forward(
self,
hidden_states: torch.Tensor,
key_padding_mask: Optional[torch.Tensor] = None,
) -> torch.FloatTensor:
residual = hidden_states
hidden_states = self.layer_norm1(hidden_states)
hidden_states = self.self_attn(
hidden_states,
hidden_states,
hidden_states,
need_weights=False,
key_padding_mask=key_padding_mask,
)[0]
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.layer_norm2(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
class OneFormerTextTransformer(nn.Module):
def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None, use_checkpoint=False):
super().__init__()
self.width = width
self.num_layers = layers
self.layers = nn.Sequential(*[OneFormerTextTransformerLayer(width, heads, attn_mask) for _ in range(layers)])
self.use_checkpoint = use_checkpoint
def forward(self, hidden_states: torch.Tensor):
for layer in self.layers:
if self.use_checkpoint:
hidden_states = torch.utils.checkpoint.checkpoint(layer, hidden_states)
else:
hidden_states = layer(hidden_states)
return hidden_states
class OneFormerTextEncoder(nn.Module):
def __init__(
self,
context_length: int,
width: int,
layers: int,
vocab_size,
use_checkpoint=False,
):
super().__init__()
heads = width // 64
self.context_length = context_length
self.width = width
self.transformer = OneFormerTextTransformer(
width=width,
layers=layers,
heads=heads,
attn_mask=self.build_attention_mask(),
use_checkpoint=use_checkpoint,
)
self.positional_embedding = nn.Parameter(torch.empty(self.context_length, width))
self.ln_final = nn.LayerNorm(width)
self.token_embedding = nn.Embedding(vocab_size, width)
def build_attention_mask(self):
# lazily create causal attention mask, with full attention between the vision tokens
# pytorch uses additive attention mask; fill with -inf
mask = torch.empty(self.context_length, self.context_length)
mask.fill_(float("-inf"))
mask.triu_(1) # zero out the lower diagonal
return mask
def forward(self, text):
hidden_state = self.token_embedding(text)
hidden_state = hidden_state + self.positional_embedding
hidden_state = hidden_state.permute(1, 0, 2)
hidden_state = self.transformer(hidden_state)
hidden_state = hidden_state.permute(1, 0, 2)
hidden_state = self.ln_final(hidden_state)
hidden_state = hidden_state[torch.arange(hidden_state.shape[0]), text.argmax(dim=-1)]
return hidden_state
class OneFormerTextMapper(nn.Module):
def __init__(self, config: OneFormerConfig):
super().__init__()
self.text_encoder = OneFormerTextEncoder(
context_length=config.text_encoder_context_length,
width=config.text_encoder_width,
layers=config.text_encoder_num_layers,
vocab_size=config.text_encoder_vocab_size,
)
self.text_projector = OneFormerMLPPredictionHead(
config.text_encoder_width,
config.hidden_dim,
config.hidden_dim,
config.text_encoder_proj_layers,
)
if config.text_encoder_n_ctx > 0:
self.prompt_ctx = nn.Embedding(
config.text_encoder_n_ctx,
config.text_encoder_width,
)
else:
self.prompt_ctx = None
def forward(
self,
inputs: Tensor,
) -> Tensor:
text_queries = self.encode_text(inputs)
return text_queries
def encode_text(self, text):
if text.ndim is None:
raise ValueError("text must not be NoneType")
if text.ndim not in [2, 3]:
raise ValueError("Number of dimensions in text must be 2 or 3")
squeeze_dim = False
num_text = 1
if text.ndim == 3:
num_text = text.shape[1]
batch_size, num_text, hidden_dim = text.shape
text = text.reshape(batch_size * num_text, hidden_dim)
squeeze_dim = True
# [batch_size, num_channels]
encoded_text = self.text_encoder(text)
text_queries = self.text_projector(encoded_text)
if squeeze_dim:
_, hidden_dim = text_queries.shape
text_queries = text_queries.reshape(batch_size, num_text, hidden_dim)
if self.prompt_ctx is not None:
text_queries_ctx = self.prompt_ctx.weight.unsqueeze(0).repeat(text_queries.shape[0], 1, 1)
text_queries = torch.cat([text_queries, text_queries_ctx], dim=1)
return text_queries
class OneFormerTaskModel(nn.Module):
def __init__(self, config: OneFormerConfig):
super().__init__()
self.task_mlp = OneFormerMLPPredictionHead(
config.task_seq_len,
config.hidden_dim,
config.hidden_dim,
2,
)
def forward(self, inputs: Tensor) -> Tensor:
task_tokens = self.task_mlp(inputs.float())
return task_tokens
ONEFORMER_START_DOCSTRING = r"""
This model is a PyTorch [nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a
regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.
Parameters:
config ([`OneFormerConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
ONEFORMER_INPUTS_DOCSTRING = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`OneFormerProcessor`]. See
[`OneFormerProcessor.__call__`] for details.
task_inputs (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
Task inputs. Task inputs can be obtained using [`OneFormerImageProcessor`]. See
[`OneFormerProcessor.__call__`] for details.
pixel_mask (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*):
Mask to avoid performing attention on padding pixel values. Mask values selected in `[0, 1]`:
- 1 for pixels that are real (i.e. **not masked**),
- 0 for pixels that are padding (i.e. **masked**).
[What are attention masks?](../glossary#attention-mask)
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of Detr's decoder attention layers.
return_dict (`bool`, *optional*):
Whether or not to return a [`~OneFormerModelOutput`] instead of a plain tuple.
"""
class OneFormerPreTrainedModel(PreTrainedModel):
config_class = OneFormerConfig
base_model_prefix = "model"
main_input_name = "pixel_values"
def _init_weights(self, module: nn.Module):
xavier_std = self.config.init_xavier_std
std = self.config.init_std
if isinstance(module, OneFormerTransformerModule):
if module.input_projections is not None:
for input_projection in module.input_projections:
if not isinstance(input_projection, nn.Sequential):
nn.init.xavier_uniform_(input_projection.weight, gain=xavier_std)
nn.init.constant_(input_projection.bias, 0)
elif isinstance(module, OneFormerTransformerDecoder):
nn.init.xavier_uniform_(module.query_input_projection.weight, gain=xavier_std)
nn.init.constant_(module.query_input_projection.bias, 0)
elif isinstance(module, OneFormerPixelDecoderEncoderMultiscaleDeformableAttention):
nn.init.constant_(module.sampling_offsets.weight.data, 0.0)
thetas = torch.arange(module.n_heads, dtype=torch.float32) * (2.0 * math.pi / module.n_heads)
grid_init = torch.stack([thetas.cos(), thetas.sin()], -1)
grid_init = (
(grid_init / grid_init.abs().max(-1, keepdim=True)[0])
.view(module.n_heads, 1, 1, 2)
.repeat(1, module.n_levels, module.n_points, 1)
)
for i in range(module.n_points):
grid_init[:, :, i, :] *= i + 1
with torch.no_grad():
module.sampling_offsets.bias = nn.Parameter(grid_init.view(-1))
nn.init.constant_(module.attention_weights.weight.data, 0.0)
nn.init.constant_(module.attention_weights.bias.data, 0.0)
nn.init.xavier_uniform_(module.value_proj.weight.data)
nn.init.constant_(module.value_proj.bias.data, 0.0)
nn.init.xavier_uniform_(module.output_proj.weight.data)
nn.init.constant_(module.output_proj.bias.data, 0.0)
elif isinstance(module, OneFormerPixelDecoderEncoderOnly):
for p in module.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
elif isinstance(module, OneFormerPixelDecoder):
for p in module.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
nn.init.normal_(module.level_embed, std=0)
elif isinstance(module, OneFormerTransformerDecoderSelfAttentionLayer):
for p in module.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p, gain=xavier_std)
elif isinstance(module, OneFormerTransformerDecoderCrossAttentionLayer):
for p in module.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p, gain=xavier_std)
elif isinstance(module, OneFormerTransformerDecoderFFNLayer):
for p in module.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p, gain=xavier_std)
elif isinstance(module, OneFormerTransformerDecoderQueryTransformer):
for p in module.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p, gain=xavier_std)
elif isinstance(module, OneFormerPixelLevelModule):
for submodule in module.modules():
if isinstance(submodule, (nn.Conv2d, nn.Linear)):
submodule.weight.data.normal_(mean=0.0, std=std)
if submodule.bias is not None:
submodule.bias.data.zero_()
elif isinstance(module, OneFormerTextContextDecoder):
for submodule in module.modules():
if isinstance(submodule, nn.Linear):
nn.init.trunc_normal_(submodule.weight, std=0.02)
if isinstance(submodule, nn.Linear) and submodule.bias is not None:
nn.init.constant_(submodule.bias, 0)
elif isinstance(submodule, nn.LayerNorm):
nn.init.constant_(submodule.bias, 0)
nn.init.constant_(submodule.weight, 1.0)
elif isinstance(module, OneFormerTextTransformer):
proj_std = (module.width**-0.5) * ((2 * module.num_layers) ** -0.5)
attn_std = module.width**-0.5
fc_std = (2 * module.width) ** -0.5
for layer in module.layers:
nn.init.normal_(layer.self_attn.in_proj_weight, std=attn_std)
nn.init.normal_(layer.self_attn.out_proj.weight, std=proj_std)
nn.init.normal_(layer.mlp.fc1.weight, std=fc_std)
nn.init.normal_(layer.mlp.fc2.weight, std=proj_std)
elif isinstance(module, OneFormerTextEncoder):
nn.init.normal_(module.token_embedding.weight, std=0.02)
nn.init.normal_(module.positional_embedding, std=0.01)
if hasattr(module, "reference_points"):
nn.init.xavier_uniform_(module.reference_points.weight.data, gain=1.0)
nn.init.constant_(module.reference_points.bias.data, 0.0)
elif isinstance(module, OneFormerTaskModel):
for submodule in module.modules():
if isinstance(module, OneFormerMLPPredictionHead):
for submodule in module.modules():
if isinstance(submodule, nn.Linear):
nn.init.xavier_uniform_(submodule.weight, gain=xavier_std)
nn.init.constant_(submodule.bias, 0)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, nn.MultiheadAttention):
module.in_proj_weight.data.normal_(mean=0.0, std=std)
module.in_proj_bias.data.zero_()
elif isinstance(module, (nn.Linear, nn.Conv2d, nn.BatchNorm2d)):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
@add_start_docstrings(
"The bare OneFormer Model outputting raw hidden-states without any specific head on top.",
ONEFORMER_START_DOCSTRING,
)
class OneFormerModel(OneFormerPreTrainedModel):
main_input_name = ["pixel_values", "task_inputs"]
def __init__(self, config: OneFormerConfig):
super().__init__(config)
self.pixel_level_module = OneFormerPixelLevelModule(config)
self.transformer_module = OneFormerTransformerModule(in_features=config.conv_dim, config=config)
self.task_encoder = OneFormerTaskModel(config)
self.is_training = config.is_training
if self.is_training:
self.text_mapper = OneFormerTextMapper(config)
else:
self.text_mapper = None
self.post_init()
@add_start_docstrings_to_model_forward(ONEFORMER_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=OneFormerModelOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
pixel_values: Tensor,
task_inputs: Tensor,
text_inputs: Optional[Tensor] = None,
pixel_mask: Optional[Tensor] = None,
output_hidden_states: Optional[bool] = None,
output_attentions: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> OneFormerModelOutput:
r"""
Returns:
`OneFormerModelOutput`
Example:
```python
>>> import torch
>>> from PIL import Image
>>> import requests
>>> from transformers import OneFormerProcessor, OneFormerModel
>>> # download texting image
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> # load processor for preprocessing the inputs
>>> processor = OneFormerProcessor.from_pretrained("shi-labs/oneformer_ade20k_swin_tiny")
>>> model = OneFormerModel.from_pretrained("shi-labs/oneformer_ade20k_swin_tiny")
>>> inputs = processor(image, ["semantic"], return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**inputs)
>>> mask_predictions = outputs.transformer_decoder_mask_predictions
>>> class_predictions = outputs.transformer_decoder_class_predictions
>>> f"👉 Mask Predictions Shape: {list(mask_predictions.shape)}, Class Predictions Shape: {list(class_predictions.shape)}"
'👉 Mask Predictions Shape: [1, 150, 128, 171], Class Predictions Shape: [1, 150, 151]'
```"""
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
batch_size, _, height, width = pixel_values.shape
if pixel_mask is None:
pixel_mask = torch.ones((batch_size, height, width), device=pixel_values.device)
pixel_level_module_output = self.pixel_level_module(pixel_values, output_hidden_states)
multi_scale_features = pixel_level_module_output.decoder_features
mask_features = pixel_level_module_output.decoder_last_feature
task_token = self.task_encoder(task_inputs)
if self.is_training:
text_queries = self.text_mapper(text_inputs)
else:
text_queries = None
transformer_module_output = self.transformer_module(
multi_scale_features=multi_scale_features,
mask_features=mask_features,
task_token=task_token,
output_attentions=output_attentions,
)
queries = transformer_module_output.object_queries
encoder_hidden_states = None
pixel_decoder_hidden_states = None
transformer_decoder_hidden_states = None
if output_hidden_states:
encoder_hidden_states = pixel_level_module_output.encoder_features
pixel_decoder_hidden_states = (pixel_level_module_output.decoder_last_feature,)
for f in pixel_level_module_output.decoder_features:
pixel_decoder_hidden_states += (f,)
transformer_decoder_hidden_states = transformer_module_output.auxiliary_predictions
output = OneFormerModelOutput(
encoder_hidden_states=encoder_hidden_states,
pixel_decoder_hidden_states=pixel_decoder_hidden_states,
transformer_decoder_hidden_states=transformer_decoder_hidden_states,
transformer_decoder_object_queries=queries,
transformer_decoder_contrastive_queries=transformer_module_output.contrastive_logits,
transformer_decoder_mask_predictions=transformer_module_output.prediction_masks,
transformer_decoder_class_predictions=transformer_module_output.prediction_class,
transformer_decoder_auxiliary_predictions=transformer_module_output.auxiliary_predictions,
text_queries=text_queries,
task_token=task_token,
attentions=transformer_module_output.attentions,
)
if not return_dict:
output = tuple(v for v in output.values())
return output
@add_start_docstrings(
"OneFormer Model for instance, semantic and panoptic image segmentation.",
ONEFORMER_START_DOCSTRING,
)
class OneFormerForUniversalSegmentation(OneFormerPreTrainedModel):
main_input_name = ["pixel_values", "task_inputs"]
def __init__(self, config: OneFormerConfig):
super().__init__(config)
self.model = OneFormerModel(config)
self.matcher = OneFormerHungarianMatcher(
cost_class=config.class_weight,
cost_dice=config.dice_weight,
cost_mask=config.mask_weight,
num_points=config.train_num_points,
)
self.weight_dict: Dict[str, float] = {
"loss_cross_entropy": config.class_weight,
"loss_mask": config.mask_weight,
"loss_dice": config.dice_weight,
"loss_contrastive": config.contrastive_weight,
}
self.criterion = OneFormerLoss(
num_classes=config.num_labels,
matcher=self.matcher,
weight_dict=self.weight_dict,
eos_coef=config.no_object_weight,
num_points=config.train_num_points,
oversample_ratio=config.oversample_ratio,
importance_sample_ratio=config.importance_sample_ratio,
contrastive_temperature=config.contrastive_temperature,
)
self.post_init()
def get_loss_dict(
self,
masks_queries_logits: Tensor,
class_queries_logits: Tensor,
contrastive_queries_logits: Tensor,
mask_labels: Tensor,
class_labels: Tensor,
text_queries: Tensor,
auxiliary_predictions: Dict[str, Tensor],
calculate_contrastive_loss: bool,
) -> Dict[str, Tensor]:
loss_dict: Dict[str, Tensor] = self.criterion(
masks_queries_logits=masks_queries_logits,
class_queries_logits=class_queries_logits,
contrastive_queries_logits=contrastive_queries_logits,
mask_labels=mask_labels,
class_labels=class_labels,
text_queries=text_queries,
auxiliary_predictions=auxiliary_predictions,
calculate_contrastive_loss=calculate_contrastive_loss,
)
# weight each loss by `self.weight_dict[<LOSS_NAME>]` including auxiliary losses
for key, weight in self.weight_dict.items():
for loss_key, loss in loss_dict.items():
if key in loss_key:
loss *= weight
return loss_dict
def get_loss(self, loss_dict: Dict[str, Tensor]) -> Tensor:
return sum(loss_dict.values())
@add_start_docstrings_to_model_forward(ONEFORMER_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=OneFormerForUniversalSegmentationOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
pixel_values: Tensor,
task_inputs: Tensor,
text_inputs: Optional[Tensor] = None,
mask_labels: Optional[List[Tensor]] = None,
class_labels: Optional[List[Tensor]] = None,
pixel_mask: Optional[Tensor] = None,
output_auxiliary_logits: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
output_attentions: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> OneFormerForUniversalSegmentationOutput:
r"""
text_inputs (`List[torch.Tensor]`, *optional*):
Tensor fof shape `(num_queries, sequence_length)` to be fed to a model
mask_labels (`List[torch.Tensor]`, *optional*):
List of mask labels of shape `(num_labels, height, width)` to be fed to a model
class_labels (`List[torch.LongTensor]`, *optional*):
list of target class labels of shape `(num_labels, height, width)` to be fed to a model. They identify the
labels of `mask_labels`, e.g. the label of `mask_labels[i][j]` if `class_labels[i][j]`.
Returns:
`OneFormerUniversalSegmentationOutput`
Example:
Universal segmentation example:
```python
>>> from transformers import OneFormerProcessor, OneFormerForUniversalSegmentation
>>> from PIL import Image
>>> import requests
>>> import torch
>>> # load OneFormer fine-tuned on ADE20k for universal segmentation
>>> processor = OneFormerProcessor.from_pretrained("shi-labs/oneformer_ade20k_swin_tiny")
>>> model = OneFormerForUniversalSegmentation.from_pretrained("shi-labs/oneformer_ade20k_swin_tiny")
>>> url = (
... "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
... )
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> # Semantic Segmentation
>>> inputs = processor(image, ["semantic"], return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**inputs)
>>> # model predicts class_queries_logits of shape `(batch_size, num_queries)`
>>> # and masks_queries_logits of shape `(batch_size, num_queries, height, width)`
>>> class_queries_logits = outputs.class_queries_logits
>>> masks_queries_logits = outputs.masks_queries_logits
>>> # you can pass them to processor for semantic postprocessing
>>> predicted_semantic_map = processor.post_process_semantic_segmentation(
... outputs, target_sizes=[image.size[::-1]]
... )[0]
>>> f"👉 Semantic Predictions Shape: {list(predicted_semantic_map.shape)}"
'👉 Semantic Predictions Shape: [512, 683]'
>>> # Instance Segmentation
>>> inputs = processor(image, ["instance"], return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**inputs)
>>> # model predicts class_queries_logits of shape `(batch_size, num_queries)`
>>> # and masks_queries_logits of shape `(batch_size, num_queries, height, width)`
>>> class_queries_logits = outputs.class_queries_logits
>>> masks_queries_logits = outputs.masks_queries_logits
>>> # you can pass them to processor for instance postprocessing
>>> predicted_instance_map = processor.post_process_instance_segmentation(
... outputs, target_sizes=[image.size[::-1]]
... )[0]["segmentation"]
>>> f"👉 Instance Predictions Shape: {list(predicted_instance_map.shape)}"
'👉 Instance Predictions Shape: [512, 683]'
>>> # Panoptic Segmentation
>>> inputs = processor(image, ["panoptic"], return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**inputs)
>>> # model predicts class_queries_logits of shape `(batch_size, num_queries)`
>>> # and masks_queries_logits of shape `(batch_size, num_queries, height, width)`
>>> class_queries_logits = outputs.class_queries_logits
>>> masks_queries_logits = outputs.masks_queries_logits
>>> # you can pass them to processor for panoptic postprocessing
>>> predicted_panoptic_map = processor.post_process_panoptic_segmentation(
... outputs, target_sizes=[image.size[::-1]]
... )[0]["segmentation"]
>>> f"👉 Panoptic Predictions Shape: {list(predicted_panoptic_map.shape)}"
'👉 Panoptic Predictions Shape: [512, 683]'
```
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.model(
pixel_values=pixel_values,
task_inputs=task_inputs,
text_inputs=text_inputs,
pixel_mask=pixel_mask,
output_hidden_states=output_hidden_states or self.config.use_auxiliary_loss,
output_attentions=output_attentions,
return_dict=True,
)
loss, loss_dict, auxiliary_predictions = None, None, None
class_queries_logits = outputs.transformer_decoder_class_predictions
masks_queries_logits = outputs.transformer_decoder_mask_predictions
contrastive_queries_logits = outputs.transformer_decoder_contrastive_queries
auxiliary_predictions = outputs.transformer_decoder_auxiliary_predictions
text_queries = outputs.text_queries
if mask_labels is not None and class_labels is not None:
loss_dict: Dict[str, Tensor] = self.get_loss_dict(
masks_queries_logits=masks_queries_logits,
class_queries_logits=class_queries_logits,
contrastive_queries_logits=contrastive_queries_logits,
mask_labels=mask_labels,
class_labels=class_labels,
text_queries=text_queries,
auxiliary_predictions=auxiliary_predictions,
calculate_contrastive_loss=self.config.contrastive_temperature is not None,
)
loss = self.get_loss(loss_dict)
output_auxiliary_logits = (
self.config.output_auxiliary_logits if output_auxiliary_logits is None else output_auxiliary_logits
)
if not output_auxiliary_logits:
auxiliary_predictions = None
output = OneFormerForUniversalSegmentationOutput(
class_queries_logits=class_queries_logits,
masks_queries_logits=masks_queries_logits,
auxiliary_predictions=auxiliary_predictions,
loss=loss,
**outputs,
)
if not return_dict:
output = tuple(v for v in output.values())
if loss is not None:
output = ((loss)) + output
return output
| [
"[email protected]"
] | |
951bf4d60636cb348271486a74c5e08e4e35b053 | 5d3556043828ec87b4f28b92895904c20b083d2e | /electoral_constituencies/models.py | 0881ebae4ecf60958bfb14f53fce930e1ee322a7 | [] | no_license | Election-Portal/Election-Portal-Webapp | 826c926807bb54568c5db4861a01eaba217fc00b | 90c6a3da19f4bd029f0bd98ea2ca98ab095abbab | refs/heads/master | 2022-12-06T19:38:53.693793 | 2021-05-16T06:16:57 | 2021-05-16T06:16:57 | 139,657,468 | 3 | 3 | null | 2022-11-22T02:48:27 | 2018-07-04T02:16:56 | JavaScript | UTF-8 | Python | false | false | 1,683 | py | from django.db import models
from political_divisions.models import Province, District
from political_parties.models import PoliticalParty
# Create your models here.
class Sabha(models.Model):
name = models.CharField("Name", max_length=50)
district = models.ForeignKey(District,on_delete=models.CASCADE, related_name="SabhaDistrictSet")
province = models.ForeignKey(Province, related_name="sabha_province_set",on_delete=models.CASCADE)
area = models.IntegerField()
population = models.IntegerField()
voters = models.IntegerField()
is_marginal = models.BooleanField(default=False)
class PradeshSabha(Sabha):
winner = models.CharField("Member of Provincial Assembly", max_length=50)
won_political_party = models.ForeignKey(PoliticalParty, related_name = "pradeshsabha_won_political_party_set",on_delete=models.CASCADE)
class Meta:
verbose_name = "Pradesh Sabha"
verbose_name_plural = "Pradesh Sabhas"
def __str__(self):
return self.name
class PratinidhiSabha(Sabha):
winner = models.CharField("Member of House of Representative", max_length=50)
won_political_party = models.ForeignKey(PoliticalParty, related_name = "pratinidhisabha_won_political_party_set",on_delete=models.CASCADE)
pradeshsabha_ka = models.ForeignKey(PradeshSabha, related_name="pratinidhisabha_pradeshsabha_ka",on_delete=models.CASCADE)
pradeshsabha_kha = models.ForeignKey(PradeshSabha, related_name="pratinidhisabha_pradeshsabha_kha",on_delete=models.CASCADE)
class Meta:
verbose_name = "Pratinidhi Sabha"
verbose_name_plural = "Pratinidhi Sabhas"
def __str__(self):
return self.name
| [
"[email protected]"
] | |
7c874e9bd36d9da166eb7dbe16f7b4208a2ca064 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_55/446.py | 9f01e2cc099049a3b984b3b6a4598d3fb9b46375 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 872 | py |
FILE = open("C-small-attempt0.in","r")
OUTPUT = open("C-small-attempt0.out","w")
cases = FILE.readline()
def rideCoaster():
global k
global groups
groupsOnCoaster = []
numberOfRiders = 0
while(len(groups) > 0 and numberOfRiders + groups[0] <= k):
groupCount = groups.pop(0)
numberOfRiders += groupCount
groupsOnCoaster.append(groupCount)
groups.extend(groupsOnCoaster)
return numberOfRiders
for i in range(0,int(cases)):
temp = FILE.readline().split(" ")
temp2 = FILE.readline().split(" ")
r = int(temp[0])
k = int(temp[1])
n = int(temp[2])
groups = []
for j in temp2:
groups.append(int(j))
moneyMade = 0
for j in range(0,r):
moneyMade += rideCoaster()
OUTPUT.write('Case #' + str(i + 1) + ': ' + str(moneyMade) + '\n')
FILE.close()
OUTPUT.close()
| [
"[email protected]"
] | |
ae4b4ce6d77449745f49c95d0a0c4c087506107c | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/10/usersdata/132/9950/submittedfiles/testes.py | af2fd0de3a5a21e6725e176347e0e0201d8e9097 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 181 | py | # -*- coding: utf-8 -*-
from __future__ import division
a=input('digite um numero:')
i=1
while(i*i+1*i+2)<a:
if (i*i+1*i+2)==a:
print('s')
else: print('n')
i=i+1 | [
"[email protected]"
] | |
a2d9dd9b79e3f5e7b06713e7c0d82ccb5958531c | 7f4fb112bc9ab2b90f5f2248f43285ce9ac2e0a0 | /src/igem/neutronics/water/container/borosilicate-glass-backfill/5cm/50wt/plot_all.in.one_cask.thickness_dose.rate_t4045_bottom.py | a45cb45d0aafb14cb4f353d47ae77969fc35a8da | [] | no_license | TheDoctorRAB/plot | dd3b5134c91c8fa7032fcc077c5427b26a80e49d | ed6746d511222c03e79f93548fe3ecd4286bf7b1 | refs/heads/master | 2021-07-11T10:21:19.347531 | 2020-07-16T17:13:15 | 2020-07-16T17:13:15 | 20,462,189 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,321 | py | ########################################################################
# R.A.Borrelli
# @TheDoctorRAB
# rev.11.March.2015
########################################################################
#
# Plot routine
# All in one file, with no separate control input, lib files
# Plot data is contained in a separate data file, read on command line
# Set up for a secondary y axis if needed
#
########################################################################
#
#
#
#######
#
# imports
#
# plot
#
import numpy
import matplotlib
import matplotlib.pyplot as plot
from matplotlib.ticker import MultipleLocator
#
#######
#
# command line
#
from sys import argv
script,plot_datafile=argv #column 0 is the x values then odd columns contain dose/flux
#
#######
#
# screen resolution
#
import Tkinter
root=Tkinter.Tk()
#
########################################################################
#
#
#
#######
#
# screen resolution
#
###
#
# pixels
#
width=root.winfo_screenwidth()
height=root.winfo_screenheight()
#
###
#
# mm
#
width_mm=root.winfo_screenmmwidth()
height_mm=root.winfo_screenmmheight()
#
###
#
# in
#
width_in=width_mm/25.4
height_in=height_mm/25.4
#
###
#
# dpi
#
width_dpi=width/width_in
height_dpi=height/height_in
#
dpi_values=(96,120,144,168,192)
current_dpi=width_dpi
minimum=1000
#
for dval in dpi_values:
difference=abs(dval-width_dpi)
if difference<minimum:
minimum=difference
current_dpi=dval
#
#######
#
# output to screen
#
print('width: %i px, height: %i px'%(width,height))
print('width: %i mm, height: %i mm'%(width_mm,height_mm))
print('width: %0.f in, height: %0.f in'%(width_in,height_in))
print('width: %0.f dpi, height: %0.f dpi'%(width_dpi,height_dpi))
print('size is %0.f %0.f'%(width,height))
print('current DPI is %0.f' % (current_dpi))
#
#######
#
# open the plot data file(s)
# add plot_dataN for each plot_datafileN
#
plot_data=numpy.loadtxt(plot_datafile,dtype=float)
#
#######
#
# graph parameters
#
###
#
# font sizes
#
matplotlib.rcParams.update({'font.size': 48}) #axis numbers
#
title_fontsize=54 #plot title
axis_fontsize=48 #axis labels
annotate_fontsize=48 #annotation
#
###
#
# set up for two y axis
#
fig,left_axis=plot.subplots()
# right_axis=left_axis.twinx()
#
###
#
# plot text
#
title='Dose rate - Bottom surface'
xtitle='Wall thickness [cm]'
ytitle='Dose rate [$\mu$Sv/h]'
#
###
#
# legend
# add linecolorN for each plot_dataN
# add curve_textN for each plot_dataN
#
line_color0='blue' #color
line_color1='orange' #color
line_color2='red' #color
line_color3='green' #color
line_color4='cyan' #color
#
curve_text0='10 wt% $B_4C$' #legend text
curve_text1='30 wt% $B_4C$' #legend text
curve_text2='50 wt% $B_4C$' #legend text
curve_text3='70 wt% $B_4C$' #legend text
curve_text4='90 wt% $B_4C$' #legend text
#
legend_location='lower left' #location of legend on grid
legend_font=42
#
###
#
# annotate
# position of the annotation dependent on axis domain and range
#
annotate_title='T-4045'
annotate_x=23
annotate_y=1100
#
annotate_title2='Water-Glass backfill'
annotate_x2=23
annotate_y2=700
#
annotate_title3='50 wt% $^{10}B$'
annotate_x3=23
annotate_y3=400
#
annotate_title4='5cm thick concrete'
annotate_x4=23
annotate_y4=0.02
#
###
#
# axis domain and range
#
xmin=1
xmax=31
#
ymin=0.01
ymax=3000
#
###
#
# axis ticks
#
xmajortick=5
ymajortick=5000
#
xminortick=1
yminortick=1000
#
###
#
# grid linewidth
#
major_grid_linewidth=2.5
minor_grid_linewidth=2.1
#
major_grid_tick_length=7
minor_grid_tick_length=5
#
###
#
# curve linewidth
#
curve_linewidth=4.0
#
#######
#
# set plot diagnostics
#
###
#
# titles
#
plot.title(title,fontsize=title_fontsize)
left_axis.set_xlabel(xtitle,fontsize=axis_fontsize)
left_axis.set_ylabel(ytitle,fontsize=axis_fontsize)
# right_axis.set_ylabel()
#
###
#
# grid
#
left_axis.grid(which='major',axis='both',linewidth=major_grid_linewidth)
left_axis.grid(which='minor',axis='both',linewidth=minor_grid_linewidth)
#
left_axis.tick_params(axis='both',which='major',direction='inout',length=major_grid_tick_length)
left_axis.tick_params(axis='both',which='minor',direction='inout',length=minor_grid_tick_length)
#
###
#
# axis domain and range
#
plot.xlim(xmin,xmax)
left_axis.axis(ymin=ymin,ymax=ymax)
###
#
# axis ticks
#
left_axis.xaxis.set_major_locator(MultipleLocator(xmajortick))
left_axis.xaxis.set_minor_locator(MultipleLocator(xminortick))
left_axis.yaxis.set_major_locator(MultipleLocator(ymajortick))
left_axis.yaxis.set_minor_locator(MultipleLocator(yminortick))
#
###
#
# log scale option
# xmin,ymin !=0 for log scale
#
#left_axis.set_xscale('log')
left_axis.set_yscale('log')
#
###
#
# annotation
# comment out if not needed
#
left_axis.annotate(annotate_title,xy=(annotate_x,annotate_y),xytext=(annotate_x,annotate_y),fontsize=annotate_fontsize)
left_axis.annotate(annotate_title2,xy=(annotate_x2,annotate_y2),xytext=(annotate_x2,annotate_y2),fontsize=annotate_fontsize)
left_axis.annotate(annotate_title3,xy=(annotate_x3,annotate_y3),xytext=(annotate_x3,annotate_y3),fontsize=annotate_fontsize)
left_axis.annotate(annotate_title4,xy=(annotate_x4,annotate_y4),xytext=(annotate_x4,annotate_y4),fontsize=annotate_fontsize)
#
#######
#
# plot data
#
left_axis.plot(plot_data[:,0],plot_data[:,1],marker='o',color=line_color0,label=curve_text0,linewidth=curve_linewidth,markersize=20)
left_axis.plot(plot_data[:,0],plot_data[:,3],marker='o',color=line_color1,label=curve_text1,linewidth=curve_linewidth,markersize=20)
left_axis.plot(plot_data[:,0],plot_data[:,5],marker='o',color=line_color2,label=curve_text2,linewidth=curve_linewidth,markersize=20)
left_axis.plot(plot_data[:,0],plot_data[:,7],marker='o',color=line_color3,label=curve_text3,linewidth=curve_linewidth,markersize=20)
left_axis.plot(plot_data[:,0],plot_data[:,9],marker='o',color=line_color4,label=curve_text4,linewidth=curve_linewidth,markersize=20)
left_axis.legend(loc=legend_location,fontsize=legend_font) #legend needs to be after all the plot data
plot.get_current_fig_manager().resize(width,height)
plot.gcf().set_size_inches((0.01*width),(0.01*height))
#
#######
#
# save
#
plot.savefig(title,dpi=current_dpi)
#
#######
#
# plot to screen
#
# # plot.show()
#
########################################################################
#
# EOF
#
########################################################################
| [
"[email protected]"
] | |
8c420f57c5fe425f7d0fa0dae48e942a44174687 | ceeeb927544c474163347254b11485cc945ea951 | /core/migrations/0002_alter_user_managers.py | ce6eb500dce47749534bad897ccb645b67da9adf | [] | no_license | alisamadzadeh46/filestore | ecc8d84ca16e8a8a51af0b74446a0c3b88cda646 | 4f31e51b2d028cd5f79b6af06d05568a8af7e9e1 | refs/heads/main | 2023-06-22T18:38:08.179128 | 2021-07-26T16:03:19 | 2021-07-26T16:03:19 | 377,806,835 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 324 | py | # Generated by Django 3.2.4 on 2021-06-18 16:17
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.AlterModelManagers(
name='user',
managers=[
],
),
]
| [
"[email protected]"
] | |
eb105e8f898024dd5992253c0332f1e6987e2698 | 875bb84440094ce058a2ec25a661a7da6bb2e129 | /algo_py/boj/bj1647.py | e51a516645ce66ea00558949de82a9b29f3f55b6 | [] | no_license | shg9411/algo | 150e4291a7ba15990f17ca043ae8ab59db2bf97b | 8e19c83b1dbc0ffde60d3a3b226c4e6cbbe89a7d | refs/heads/master | 2023-06-22T00:24:08.970372 | 2021-07-20T06:07:29 | 2021-07-20T06:07:29 | 221,694,017 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 603 | py | import sys
input = sys.stdin.readline
def find(x):
if parent[x] == x:
return x
parent[x] = find(parent[x])
return parent[x]
def union(x, y):
parent[find(y)] = parent[find(x)]
def kruskal():
cnt = res = 0
for c,a,b in q:
if find(a) == find(b):
continue
cnt += 1
res += c
union(a, b)
if cnt == N-2:
break
return res
N, M = map(int, input().split())
parent = [i for i in range(N+1)]
q = []
for _ in range(M):
A, B, C = map(int, input().split())
q.append((C,A,B))
q.sort()
print(kruskal())
| [
"[email protected]"
] | |
62ac4885dae7b11cc8dde424e4969271cc97bbc6 | 51b838412b7d9d38e398fefff92a0f17b3e040d7 | /enso/enso/utils/strings.py | 64d7d425ff3f7eef188c554f110433ea14749552 | [
"BSD-2-Clause"
] | permissive | thdoan/enso-portable | ed87bb30f3fe5d95e8dc6f3c4fa2a1a3a46f37fc | 2dd6db78f40811d78fe9a162ec95eac14bda2250 | refs/heads/master | 2020-04-05T19:01:50.058547 | 2015-01-11T16:46:56 | 2015-01-11T16:46:56 | 28,119,291 | 8 | 5 | null | null | null | null | UTF-8 | Python | false | false | 5,804 | py | # Copyright (c) 2008, Humanized, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Enso nor the names of its contributors may
# be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY Humanized, Inc. ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Humanized, Inc. BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
#
# enso.utils.strings
#
# ----------------------------------------------------------------------------
"""
Various string utility methods.
"""
# ----------------------------------------------------------------------------
# Constants
# ----------------------------------------------------------------------------
# Double "smart quotes".
OPEN_QUOTE = u"\u201C"
CLOSE_QUOTE = u"\u201D"
# Single "smart quotes".
OPEN_SINGLE_QUOTE = u"\u2018"
CLOSE_SINGLE_QUOTE = u"\u2019"
# ----------------------------------------------------------------------------
# String utility functions
# ----------------------------------------------------------------------------
def smartQuote( text ):
"""
Replaces regular quotes in text with "smart quotes", i.e., left and right
facing quotes, and returns the result as a unicode object.
NOTE: This uses a very simple algorithm; if you are trying to quote
an arbitrary chunk of text, it would be best to use this function
on your formatting string, e.g., use this on:
' %s ' - output from blah command
before you apply the formatting operation that dumps unknown text.
"""
text = _smartDoubleQuote( text )
text = _smartSingleQuote( text )
return text
def _smartSingleQuote( inText ):
"""
Replaces single quotes with "smart quotes", i.e., forward
and back facing quotes, except for single quotes that are
parts of certain contractions.
"""
# Explicitly copy the text and cast it to unicode.
outText = unicode( inText[:] )
# There are two usages of single quote marks; for
# quotations, and for contractions.
# First, we escape the contraction cases. Then,
# without those pesky apostrophes, we will be free
# and clear to replace the remaining single quotes
# with smart quotes.
cases = [ "'s", "'t", "'nt", "I'm", "'ve", "'re", ]
for case in cases:
tempText = "<<|%s|>>" % case.replace( "'", "" )
outText = outText.replace( case, tempText )
# Now that there are no apostrophes, we can run through
# the text, replacing each pair of single quotes with
# opening and closing 'smart single quotes'.
while outText.count( "'" ) > 0:
outText = outText.replace( "'", OPEN_SINGLE_QUOTE, 1)
outText = outText.replace( "'", CLOSE_SINGLE_QUOTE, 1)
# Now we have to replace the contraction escape sequences
# with the original contractions.
for case in cases:
tempText = "<<|%s|>>" % case.replace( "'", "" )
outText = outText.replace( tempText, case )
return outText
def _smartDoubleQuote( inText ):
"""
Replaces double quotes with "smart quotes", i.e., forward
and back facing quotes.
"""
# Explicitly copy the text and cast it to unicode.
outText = unicode( inText[:] )
while outText.count( "\"" ) > 0:
outText = outText.replace( "\"", OPEN_QUOTE, 1)
outText = outText.replace( "\"", CLOSE_QUOTE, 1)
return outText
def stringRatio( a, b ):
"""
Calculates the string ratio of a to b.
If the strings are equal, returns 1.0. If they have no similarity
whatsoever, returns 0.0. Otherwise, returns a number in-between.
"""
if a == b:
return 1.0
elif a in b:
return float( len(a) ) / len(b)
elif b in a:
return float( len(b) ) / len(a)
else:
# The following code is actually identical to this code:
#
# import difflib
# seqMatch = difflib.SequenceMatcher( False, a, b )
# ratio = seqMatch.real_quick_ratio()
# return ratio
#
# But has been copied from difflib and pasted inline here for
# efficiency purposes.
la, lb = len(a), len(b)
length = la + lb
if length:
return 2.0 * (min(la, lb)) / length
return 1.0
def stringRatioBestMatch( item, sequence ):
"""
Uses a string ratio algorithm to find to the best match
to item among the elements of sequence.
"""
ratios = [ stringRatio( item, element ) \
for element in sequence ]
return sequence[ ratios.index( min(ratios) ) ]
| [
"[email protected]"
] | |
4889e9d816eea1681649c7ec54bc5f45d77208e7 | 01e4fad56b16110843b1906d90cec29b64c6c72d | /thonnycontrib/pyboard/api_stubs/hashlib.py | dbf78816260dc2903521f26add9e8de42d6682fc | [
"MIT"
] | permissive | jurajhalama/thonny-pyboard | 1a753b24293068d35fdf28e0ff73ce877e1ce7a1 | d967ffe653ef3678ae4ab8ed6a58ca696b47b6a6 | refs/heads/master | 2020-06-19T21:12:15.168310 | 2019-06-10T08:41:54 | 2019-06-10T08:41:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 86 | py |
class sha256:
''
def digest():
pass
def update():
pass
| [
"[email protected]"
] | |
a61cdb49b970d13ed67911410634ebf20bf05b5f | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /H5AQQhFhvLWMp9giA_3.py | db920cb06f0b0e787060313a1e8d53e1e434e67d | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 39 | py |
import re
pattern = r'^\s*|\s*$'
| [
"[email protected]"
] | |
3562f25f6517f4afb0b4ec5ceb853d99f9d34116 | a331ac86bf0dc281b1b819f70110deb873833698 | /python/higher/test/setup.py | e4e45f1aa39b2122f55e2f893e2600f14fb9fc1a | [] | no_license | sunhuachuang/study-demo | f0c2bbaca78a6735442039a33a051a8b715f8490 | 822dfec043d53678c62f5dce407477f9fdd42873 | refs/heads/master | 2020-07-22T06:16:00.361964 | 2018-01-08T09:50:50 | 2018-01-08T09:50:50 | 66,520,404 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 283 | py | from setuptools import setup, find_packages
setup(
name="test",
version="0.0.1",
keywords=("test", ),
description="test package",
url="https://github.com/sunhuachuang",
author="sun",
author_email="[email protected]",
packages=find_packages()
)
| [
"[email protected]"
] | |
2aced5c4391eace15eacc74cf045c411483606cb | 17993dcca87d490bc9841437309f309a5592ab38 | /Codes/logistic_regression/mnist_gd_tfdata.py | 73d698f2f762ef720a43606f260da1deb67e8ee2 | [] | no_license | dreamlikexin/machine_learning | bc86ea15ef8552ad1be78a5bc65fb74a2cdb274e | 850e87025270847210b6ad188d2da181983a72c7 | refs/heads/master | 2022-01-16T09:51:20.538340 | 2019-06-19T16:27:26 | 2019-06-19T16:27:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 760 | py | import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
from machine_learning.logistic_regression.lib.logistic_regression_gd import LogisticRegression
import machine_learning.logistic_regression.lib.classification_metrics as metrics
mnist = input_data.read_data_sets("MNIST_data/", one_hot=False)
X_train, y_train = mnist.train.images, mnist.train.labels
X_test, y_test = mnist.test.images, mnist.test.labels
y_train = (y_train == 6).astype(np.int).reshape(-1,1)
y_test = (y_test == 6).astype(np.int).reshape(-1,1)
model = LogisticRegression()
model.fit(X_train, y_train, eta=0.01, N=3000)
proba = model.predict_proba(X_test)
entropy = metrics.cross_entropy(y_test, proba)
print("cross entropy = {}".format(entropy))
| [
"[email protected]"
] | |
b90c426c5c0356cb66726af358af4424b301208b | 1c83920efda583d0dcedda2ac9d91235094685e2 | /web/appauth/urls.py | 09dc1c47d4ea8a5f66990da9a38e74d9f4098dfe | [] | no_license | eshandas/django_project_template | d866d2d8c5e206b0430e6130bc470042af50b7fa | 09786f6201d8e83199a2c0b7a83b6b6b0c8fd285 | refs/heads/master | 2022-07-22T14:39:50.521081 | 2019-08-06T11:00:19 | 2019-08-06T11:00:19 | 65,455,207 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 664 | py | from django.urls import path
from .views import (
Login,
Logout,
ForgotPassword,
ResetPassword,
ChangePassword,
UserInfo,
Dashboard,
)
app_name = 'appauth'
urlpatterns = (
path('login/', Login.as_view(), name='login'),
path('logout/', Logout.as_view(), name='logout'),
path('password/forgot/', ForgotPassword.as_view(), name='forgot_password'),
path('password/reset/', ResetPassword.as_view(), name='reset_password'),
path('password/change/', ChangePassword.as_view(), name='change_password'),
path('info/', UserInfo.as_view(), name='userinfo'),
path('dashboard/', Dashboard.as_view(), name='dashboard'),
) | [
"[email protected]"
] | |
cf863da28d00b3b02a014b33d5711b05ca1f763f | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp-with-texts/Wellfleet-WFMPC-MIB.py | cd9109a973e88291045a215d99c3d7cdaa215b2e | [
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 25,685 | py | #
# PySNMP MIB module Wellfleet-WFMPC-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/Wellfleet-WFMPC-MIB
# Produced by pysmi-0.3.4 at Wed May 1 15:42:17 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, SingleValueConstraint, ConstraintsUnion, ConstraintsIntersection, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsUnion", "ConstraintsIntersection", "ValueSizeConstraint")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
IpAddress, iso, Integer32, Counter64, Gauge32, ModuleIdentity, NotificationType, MibIdentifier, MibScalar, MibTable, MibTableRow, MibTableColumn, ObjectIdentity, TimeTicks, Bits, Counter32, Unsigned32 = mibBuilder.importSymbols("SNMPv2-SMI", "IpAddress", "iso", "Integer32", "Counter64", "Gauge32", "ModuleIdentity", "NotificationType", "MibIdentifier", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ObjectIdentity", "TimeTicks", "Bits", "Counter32", "Unsigned32")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
wfmpcObjects, = mibBuilder.importSymbols("Wellfleet-COMMON-MIB", "wfmpcObjects")
wfmpcEntryTable = MibTable((1, 3, 6, 1, 4, 1, 18, 3, 5, 9, 14, 1), )
if mibBuilder.loadTexts: wfmpcEntryTable.setStatus('mandatory')
if mibBuilder.loadTexts: wfmpcEntryTable.setDescription('MPC base entry table. ')
wfmpcEntry = MibTableRow((1, 3, 6, 1, 4, 1, 18, 3, 5, 9, 14, 1, 1), ).setIndexNames((0, "Wellfleet-WFMPC-MIB", "wfmpcSlot"))
if mibBuilder.loadTexts: wfmpcEntry.setStatus('mandatory')
if mibBuilder.loadTexts: wfmpcEntry.setDescription('An MPC base entry description')
wfmpcDelete = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 9, 14, 1, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("created", 1), ("deleted", 2))).clone('created')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfmpcDelete.setStatus('mandatory')
if mibBuilder.loadTexts: wfmpcDelete.setDescription('Indication to create or delete an MPC base Entry from the MIB ')
wfmpcDisable = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 9, 14, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone('enabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfmpcDisable.setStatus('mandatory')
if mibBuilder.loadTexts: wfmpcDisable.setDescription('Enables or Disables MPC Subsystem.')
wfmpcSlot = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 9, 14, 1, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfmpcSlot.setStatus('mandatory')
if mibBuilder.loadTexts: wfmpcSlot.setDescription('The Slot number of the slot on which this MPC is instantiated.')
wfmpcCct = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 9, 14, 1, 1, 4), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfmpcCct.setStatus('mandatory')
if mibBuilder.loadTexts: wfmpcCct.setDescription('This corresponds to the circuit number associated with the ATM Driver ')
wfmpcMsgNum = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 9, 14, 1, 1, 5), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfmpcMsgNum.setStatus('mandatory')
if mibBuilder.loadTexts: wfmpcMsgNum.setDescription('Send the gived message to DestId.')
wfmpcMsgSendEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 9, 14, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone('disabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfmpcMsgSendEnable.setStatus('mandatory')
if mibBuilder.loadTexts: wfmpcMsgSendEnable.setDescription('Send the MPOA message to MPS if enabled.')
wfmpcMPSAtmAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 9, 14, 1, 1, 7), OctetString().clone('0x390000000000000000000000000000A2CB2C2804')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfmpcMPSAtmAddr.setStatus('mandatory')
if mibBuilder.loadTexts: wfmpcMPSAtmAddr.setDescription("The remote MPS's ATM Address to which we have to send MPOA Resolution Request.")
wfmpcSetupVCtoMPS = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 9, 14, 1, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone('disabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfmpcSetupVCtoMPS.setStatus('mandatory')
if mibBuilder.loadTexts: wfmpcSetupVCtoMPS.setDescription('(Re)Establish VC to MPS if SetupVC enabled.')
wfmpcMPCAtmAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 9, 14, 1, 1, 9), OctetString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfmpcMPCAtmAddr.setStatus('mandatory')
if mibBuilder.loadTexts: wfmpcMPCAtmAddr.setDescription("This MPC (emulator)'s Data ATM Address which is sent back in Cache Imposition Reply.")
wfmpcCIPNackFlag = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 9, 14, 1, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone('disabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfmpcCIPNackFlag.setStatus('mandatory')
if mibBuilder.loadTexts: wfmpcCIPNackFlag.setDescription('Flag which indicates CIP Nack or CIP Ack.')
wfmpcStatisticsTable = MibTable((1, 3, 6, 1, 4, 1, 18, 3, 5, 9, 14, 2), )
if mibBuilder.loadTexts: wfmpcStatisticsTable.setStatus('mandatory')
if mibBuilder.loadTexts: wfmpcStatisticsTable.setDescription('This table represents the statistical information for the MPCs, which this agent manages.')
wfmpcStatisticsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 18, 3, 5, 9, 14, 2, 1), ).setIndexNames((0, "Wellfleet-WFMPC-MIB", "wfmpcStatSlot"), (0, "Wellfleet-WFMPC-MIB", "wfmpcStatIndex"))
if mibBuilder.loadTexts: wfmpcStatisticsEntry.setStatus('mandatory')
if mibBuilder.loadTexts: wfmpcStatisticsEntry.setDescription('Each row in this table contains statistics for one MPOA server.')
wfmpcStatIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 9, 14, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfmpcStatIndex.setStatus('mandatory')
if mibBuilder.loadTexts: wfmpcStatIndex.setDescription(' This is the mpc index ')
wfmpcStatRxMpoaResolveRequests = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 9, 14, 2, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfmpcStatRxMpoaResolveRequests.setStatus('mandatory')
if mibBuilder.loadTexts: wfmpcStatRxMpoaResolveRequests.setDescription('The number of MPOA Resolve Requests received by this MPC which are translated to NHRP resolve requests. Discontinuities in the value of this counter can occur at re-initialization of the management system, and/or re-initialization of the MPC, and at other times, as indicated by the value of wfmpcDiscontinuityTime.')
wfmpcStatTxMpoaResolveReplyAcks = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 9, 14, 2, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfmpcStatTxMpoaResolveReplyAcks.setReference('Multiprotocol Over ATM. AF-MPOA-0087.000. Page 62.')
if mibBuilder.loadTexts: wfmpcStatTxMpoaResolveReplyAcks.setStatus('mandatory')
if mibBuilder.loadTexts: wfmpcStatTxMpoaResolveReplyAcks.setDescription('The number of MPOA Resolve Replies transmitted by this MPC which contain the MPOA CIE Code of 0x00 - Success. Discontinuities in the value of this counter can occur at re-initialization of the management system, and/or re-initialization of the MPC, and at other times, as indicated by the value of mpcDiscontinuityTime.')
wfmpcStatTxMpoaResolveReplyInsufECResources = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 9, 14, 2, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfmpcStatTxMpoaResolveReplyInsufECResources.setReference('Multiprotocol Over ATM. AF-MPOA-0087.000. Page 62.')
if mibBuilder.loadTexts: wfmpcStatTxMpoaResolveReplyInsufECResources.setStatus('mandatory')
if mibBuilder.loadTexts: wfmpcStatTxMpoaResolveReplyInsufECResources.setDescription('The number of MPOA Resolve Replies transmitted by this MPC which contain the MPOA CIE Code of 0x81 - Insufficient resources to accept egress cache entry. Discontinuities in the value of this counter can occur at re-initialization of the management system, and/or re-initialization of the MPC, and at other times, as indicated by the value of mpcDiscontinuityTime.')
wfmpcStatTxMpoaResolveReplyInsufSCResources = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 9, 14, 2, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfmpcStatTxMpoaResolveReplyInsufSCResources.setReference('Multiprotocol Over ATM. AF-MPOA-0087.000. Page 62.')
if mibBuilder.loadTexts: wfmpcStatTxMpoaResolveReplyInsufSCResources.setStatus('mandatory')
if mibBuilder.loadTexts: wfmpcStatTxMpoaResolveReplyInsufSCResources.setDescription('The number of MPOA Resolve Replies transmitted by this MPC which contain the MPOA CIE Code of 0x82 - Insufficent resources to accept shortcut. Discontinuities in the value of this counter can occur at re-initialization of the management system, and/or re-initialization of the MPC, and at other times, as indicated by the value of mpcDiscontinuityTime.')
wfmpcStatTxMpoaResolveReplyInsufEitherResources = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 9, 14, 2, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfmpcStatTxMpoaResolveReplyInsufEitherResources.setReference('Multiprotocol Over ATM. AF-MPOA-0087.000. Page 62.')
if mibBuilder.loadTexts: wfmpcStatTxMpoaResolveReplyInsufEitherResources.setStatus('mandatory')
if mibBuilder.loadTexts: wfmpcStatTxMpoaResolveReplyInsufEitherResources.setDescription('The number of MPOA Resolve Replies transmitted by this MPC which contain the MPOA CIE CODE of 0x83 - Insufficient resources to accept either shortcut or egress cache entry. Discontinuities in the value of this counter can occur at re-initialization of the management system, and/or re-initialization of the MPC, and at other times, as indicated by the value of mpcDiscontinuityTime.')
wfmpcStatTxMpoaResolveReplyUnsupportedInetProt = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 9, 14, 2, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfmpcStatTxMpoaResolveReplyUnsupportedInetProt.setReference('Multiprotocol Over ATM. AF-MPOA-0087.000. Page 62.')
if mibBuilder.loadTexts: wfmpcStatTxMpoaResolveReplyUnsupportedInetProt.setStatus('mandatory')
if mibBuilder.loadTexts: wfmpcStatTxMpoaResolveReplyUnsupportedInetProt.setDescription('The number of MPOA Resolve Replies transmitted by this MPC which contain the MPOA CIE CODE of 0x84 - Unsupported Internetwork Layer protocol. Discontinuities in the value of this counter can occur at re-initialization of the management system, and/or re-initialization of the MPC, and at other times, as indicated by the value of mpcDiscontinuityTime.')
wfmpcStatTxMpoaResolveReplyUnsupportedMacEncaps = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 9, 14, 2, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfmpcStatTxMpoaResolveReplyUnsupportedMacEncaps.setReference('Multiprotocol Over ATM. AF-MPOA-0087.000. Page 62.')
if mibBuilder.loadTexts: wfmpcStatTxMpoaResolveReplyUnsupportedMacEncaps.setStatus('mandatory')
if mibBuilder.loadTexts: wfmpcStatTxMpoaResolveReplyUnsupportedMacEncaps.setDescription('The number of MPOA Resolve Replies transmitted by this MPC which contain the MPOA CIE CODE of 0x85 - Unsupported MAC layer encapsulation. Discontinuities in the value of this counter can occur at re-initialization of the management system, and/or re-initialization of the MPC, and at other times, as indicated by the value of mpcDiscontinuityTime.')
wfmpcStatTxMpoaResolveReplyUnspecifiedOther = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 9, 14, 2, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfmpcStatTxMpoaResolveReplyUnspecifiedOther.setReference('Multiprotocol Over ATM. AF-MPOA-0087.000. Page 62.')
if mibBuilder.loadTexts: wfmpcStatTxMpoaResolveReplyUnspecifiedOther.setStatus('mandatory')
if mibBuilder.loadTexts: wfmpcStatTxMpoaResolveReplyUnspecifiedOther.setDescription('The number of MPOA Resolve Replies transmitted by this MPC which contain the MPOA CIE CODE of 0x88 - Unspecified/Other. Discontinuities in the value of this counter can occur at re-initialization of the management system, and/or re-initialization of the MPC, and at other times, as indicated by the value of mpcDiscontinuityTime.')
wfmpcStatTxMpoaResolveReplyOtherOther = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 9, 14, 2, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfmpcStatTxMpoaResolveReplyOtherOther.setReference('Multiprotocol Over ATM. AF-MPOA-0087.000. Page 62.')
if mibBuilder.loadTexts: wfmpcStatTxMpoaResolveReplyOtherOther.setStatus('mandatory')
if mibBuilder.loadTexts: wfmpcStatTxMpoaResolveReplyOtherOther.setDescription('The number of MPOA Resolve Replies transmitted by this MPC which are not counted above. NOTE - this would include NHRP errors. Discontinuities in the value of this counter can occur at re-initialization of the management system, and/or re-initialization of the MPC, and at other times, as indicated by the value of mpcDiscontinuityTime.')
wfmpcStatGiveupTimeExpireds = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 9, 14, 2, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfmpcStatGiveupTimeExpireds.setStatus('mandatory')
if mibBuilder.loadTexts: wfmpcStatGiveupTimeExpireds.setDescription('The number of times the MPC Give up Time (MPC-p6) has expired while waiting for a reply from a re-originated MPOA resolution request, i.e. a reply for a translated NHRP resolution request. Discontinuities in the value of this counter can occur at re-initialization of the management system, and/or re-initialization of the MPC, and at other times, as indicated by the value of mpcDiscontinuityTime.')
wfmpcStatTxMpoaImpRequests = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 9, 14, 2, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfmpcStatTxMpoaImpRequests.setStatus('mandatory')
if mibBuilder.loadTexts: wfmpcStatTxMpoaImpRequests.setDescription('The number of MPOA Cache Imposition Requests transmitted by this MPC. Discontinuities in the value of this counter can occur at re-initialization of the management system, and/or re-initialization of the MPC, and at other times, as indicated by the value of mpcDiscontinuityTime.')
wfmpcStatRxMpoaImpReplyAcks = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 9, 14, 2, 1, 13), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfmpcStatRxMpoaImpReplyAcks.setStatus('mandatory')
if mibBuilder.loadTexts: wfmpcStatRxMpoaImpReplyAcks.setDescription('The number of successful MPOA Cache Imposition Replies received by this MPC which contain an MPOA CIE Code of 0x00, Success. Discontinuities in the value of this counter can occur at re-initialization of the management system, and/or re-initialization of the MPC, and at other times, as indicated by the value of mpcDiscontinuityTime.')
wfmpcStatRxMpoaImpReplyInsufECResources = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 9, 14, 2, 1, 14), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfmpcStatRxMpoaImpReplyInsufECResources.setReference('Multiprotocol Over ATM. AF-MPOA-0087.000. Page 62.')
if mibBuilder.loadTexts: wfmpcStatRxMpoaImpReplyInsufECResources.setStatus('mandatory')
if mibBuilder.loadTexts: wfmpcStatRxMpoaImpReplyInsufECResources.setDescription('The number of MPOA Cache Imposition Replies received by this MPC which contain the MPOA CIE Code of 0x81, Insufficient resources to accept egress cache entry. Discontinuities in the value of this counter can occur at re-initialization of the management system, and/or re-initialization of the MPC, and at other times, as indicated by the value of mpcDiscontinuityTime.')
wfmpcStatRxMpoaImpReplyInsufSCResources = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 9, 14, 2, 1, 15), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfmpcStatRxMpoaImpReplyInsufSCResources.setReference('Multiprotocol Over ATM. AF-MPOA-0087.000. Page 62.')
if mibBuilder.loadTexts: wfmpcStatRxMpoaImpReplyInsufSCResources.setStatus('mandatory')
if mibBuilder.loadTexts: wfmpcStatRxMpoaImpReplyInsufSCResources.setDescription('The number of MPOA Cache Imposition Replies received by this MPC which contain the MPOA CIE Code of 0x82 - Insufficient resources to accept shortcut. Discontinuities in the value of this counter can occur at re-initialization of the management system, and/or re-initialization of the MPC, and at other times, as indicated by the value of mpcDiscontinuityTime.')
wfmpcStatRxMpoaImpReplyInsufEitherResources = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 9, 14, 2, 1, 16), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfmpcStatRxMpoaImpReplyInsufEitherResources.setReference('Multiprotocol Over ATM. AF-MPOA-0087.000. Page 62.')
if mibBuilder.loadTexts: wfmpcStatRxMpoaImpReplyInsufEitherResources.setStatus('mandatory')
if mibBuilder.loadTexts: wfmpcStatRxMpoaImpReplyInsufEitherResources.setDescription('The number of MPOA Cache Imposition Replies received by this MPC which contain the MPOA CIE Code of 0x83 - Insufficient resources to accept either shortcut or egress cache entry. Discontinuities in the value of this counter can occur at re-initialization of the management system, and/or re-initialization of the MPC, and at other times, as indicated by the value of mpcDiscontinuityTime.')
wfmpcStatRxMpoaImpReplyUnsupportedInetProt = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 9, 14, 2, 1, 17), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfmpcStatRxMpoaImpReplyUnsupportedInetProt.setReference('Multiprotocol Over ATM. AF-MPOA-0087.000. Page 62.')
if mibBuilder.loadTexts: wfmpcStatRxMpoaImpReplyUnsupportedInetProt.setStatus('mandatory')
if mibBuilder.loadTexts: wfmpcStatRxMpoaImpReplyUnsupportedInetProt.setDescription('The number of MPOA Cache Imposition Replies received by this MPC which contain the MPOA CIE Code of 0x84 - Unsupported Internetwork Layer protocol. Discontinuities in the value of this counter can occur at re-initialization of the management system, and/or re-initialization of the MPC, and at other times, as indicated by the value of mpcDiscontinuityTime.')
wfmpcStatRxMpoaImpReplyUnsupportedMacEncaps = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 9, 14, 2, 1, 18), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfmpcStatRxMpoaImpReplyUnsupportedMacEncaps.setReference('Multiprotocol Over ATM. AF-MPOA-0087.000. Page 62.')
if mibBuilder.loadTexts: wfmpcStatRxMpoaImpReplyUnsupportedMacEncaps.setStatus('mandatory')
if mibBuilder.loadTexts: wfmpcStatRxMpoaImpReplyUnsupportedMacEncaps.setDescription('The number of MPOA Cache Imposition Replies received by this MPC which contain the MPOA CIE Code of 0x85 - Unsupported MAC layer encapsulation. Discontinuities in the value of this counter can occur at re-initialization of the management system, and/or re-initialization of the MPC, and at other times, as indicated by the value of mpcDiscontinuityTime.')
wfmpcStatRxMpoaImpReplyUnspecifiedOther = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 9, 14, 2, 1, 19), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfmpcStatRxMpoaImpReplyUnspecifiedOther.setReference('Multiprotocol Over ATM. AF-MPOA-0087.000. Page 62.')
if mibBuilder.loadTexts: wfmpcStatRxMpoaImpReplyUnspecifiedOther.setStatus('mandatory')
if mibBuilder.loadTexts: wfmpcStatRxMpoaImpReplyUnspecifiedOther.setDescription('The number of MPOA Cache Imposition Replies received by this MPC which contain the MPOA CIE Code of 0x88 - Unspecified/Other. Discontinuities in the value of this counter can occur at re-initialization of the management system, and/or re-initialization of the MPC, and at other times, as indicated by the value of mpcDiscontinuityTime.')
wfmpcStatRxMpoaImpReplyOtherOther = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 9, 14, 2, 1, 20), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfmpcStatRxMpoaImpReplyOtherOther.setReference('Multiprotocol Over ATM. AF-MPOA-0087.000. Page 62.')
if mibBuilder.loadTexts: wfmpcStatRxMpoaImpReplyOtherOther.setStatus('mandatory')
if mibBuilder.loadTexts: wfmpcStatRxMpoaImpReplyOtherOther.setDescription('The number of MPOA Cache Imposition Replies received by this MPC which are not counted previously. NOTE - this would include NHRP errors. Discontinuities in the value of this counter can occur at re-initialization of the management system, and/or re-initialization of the MPC, and at other times, as indicated by the value of mpcDiscontinuityTime.')
wfmpcStatRxMpoaEgressCachePurgeRequests = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 9, 14, 2, 1, 21), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfmpcStatRxMpoaEgressCachePurgeRequests.setStatus('mandatory')
if mibBuilder.loadTexts: wfmpcStatRxMpoaEgressCachePurgeRequests.setDescription('The number of MPOA Egress Cache Purges Requests received by this MPC. Discontinuities in the value of this counter can occur at re-initialization of the management system, and/or re-initialization of the MPC, and at other times, as indicated by the value of mpcDiscontinuityTime.')
wfmpcStatTxMpoaEgressCachePurgeReplies = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 9, 14, 2, 1, 22), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfmpcStatTxMpoaEgressCachePurgeReplies.setStatus('mandatory')
if mibBuilder.loadTexts: wfmpcStatTxMpoaEgressCachePurgeReplies.setDescription('The number of MPOA Egress Cache Purge Replies transmitted by this MPC. Discontinuities in the value of this counter can occur at re-initialization of the management system, and/or re-initialization of the MPC, and at other times, as indicated by the value of mpcDiscontinuityTime.')
wfmpcStatTxMpoaTriggers = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 9, 14, 2, 1, 23), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfmpcStatTxMpoaTriggers.setReference('Multiprotocol Over ATM. AF-MPOA-0087.000. Section 4.7.2.1 MPOA Trigger')
if mibBuilder.loadTexts: wfmpcStatTxMpoaTriggers.setStatus('mandatory')
if mibBuilder.loadTexts: wfmpcStatTxMpoaTriggers.setDescription('The number of MPOA Trigger messages transmitted by this MPC. Discontinuities in the value of this counter can occur at re-initialization of the management system, and/or re-initialization of the MPC, and at other times, as indicated by the value of mpcDiscontinuityTime.')
wfmpcStatSlot = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 9, 14, 2, 1, 24), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfmpcStatSlot.setStatus('mandatory')
if mibBuilder.loadTexts: wfmpcStatSlot.setDescription('The Slot number of the slot on which this MPC is instantiated.')
mibBuilder.exportSymbols("Wellfleet-WFMPC-MIB", wfmpcMPCAtmAddr=wfmpcMPCAtmAddr, wfmpcMsgNum=wfmpcMsgNum, wfmpcDelete=wfmpcDelete, wfmpcStatGiveupTimeExpireds=wfmpcStatGiveupTimeExpireds, wfmpcStatTxMpoaResolveReplyInsufECResources=wfmpcStatTxMpoaResolveReplyInsufECResources, wfmpcStatRxMpoaImpReplyInsufECResources=wfmpcStatRxMpoaImpReplyInsufECResources, wfmpcStatisticsTable=wfmpcStatisticsTable, wfmpcCct=wfmpcCct, wfmpcSlot=wfmpcSlot, wfmpcStatRxMpoaImpReplyUnsupportedInetProt=wfmpcStatRxMpoaImpReplyUnsupportedInetProt, wfmpcStatisticsEntry=wfmpcStatisticsEntry, wfmpcStatIndex=wfmpcStatIndex, wfmpcStatTxMpoaResolveReplyUnspecifiedOther=wfmpcStatTxMpoaResolveReplyUnspecifiedOther, wfmpcEntry=wfmpcEntry, wfmpcStatRxMpoaEgressCachePurgeRequests=wfmpcStatRxMpoaEgressCachePurgeRequests, wfmpcDisable=wfmpcDisable, wfmpcStatTxMpoaResolveReplyOtherOther=wfmpcStatTxMpoaResolveReplyOtherOther, wfmpcSetupVCtoMPS=wfmpcSetupVCtoMPS, wfmpcStatRxMpoaImpReplyAcks=wfmpcStatRxMpoaImpReplyAcks, wfmpcStatRxMpoaImpReplyOtherOther=wfmpcStatRxMpoaImpReplyOtherOther, wfmpcStatTxMpoaTriggers=wfmpcStatTxMpoaTriggers, wfmpcStatRxMpoaResolveRequests=wfmpcStatRxMpoaResolveRequests, wfmpcStatRxMpoaImpReplyInsufEitherResources=wfmpcStatRxMpoaImpReplyInsufEitherResources, wfmpcCIPNackFlag=wfmpcCIPNackFlag, wfmpcStatRxMpoaImpReplyUnsupportedMacEncaps=wfmpcStatRxMpoaImpReplyUnsupportedMacEncaps, wfmpcMsgSendEnable=wfmpcMsgSendEnable, wfmpcMPSAtmAddr=wfmpcMPSAtmAddr, wfmpcStatSlot=wfmpcStatSlot, wfmpcStatTxMpoaResolveReplyInsufSCResources=wfmpcStatTxMpoaResolveReplyInsufSCResources, wfmpcStatTxMpoaImpRequests=wfmpcStatTxMpoaImpRequests, wfmpcEntryTable=wfmpcEntryTable, wfmpcStatTxMpoaResolveReplyUnsupportedInetProt=wfmpcStatTxMpoaResolveReplyUnsupportedInetProt, wfmpcStatTxMpoaResolveReplyAcks=wfmpcStatTxMpoaResolveReplyAcks, wfmpcStatTxMpoaResolveReplyUnsupportedMacEncaps=wfmpcStatTxMpoaResolveReplyUnsupportedMacEncaps, wfmpcStatTxMpoaEgressCachePurgeReplies=wfmpcStatTxMpoaEgressCachePurgeReplies, wfmpcStatTxMpoaResolveReplyInsufEitherResources=wfmpcStatTxMpoaResolveReplyInsufEitherResources, wfmpcStatRxMpoaImpReplyInsufSCResources=wfmpcStatRxMpoaImpReplyInsufSCResources, wfmpcStatRxMpoaImpReplyUnspecifiedOther=wfmpcStatRxMpoaImpReplyUnspecifiedOther)
| [
"[email protected]"
] | |
44d5f850f17713244033a26d848e397da519eccd | af41ca2086f7da6ca036921b2e2cec89e0e5d522 | /src/Pyro4/utils/flameserver.py | 8d278f20be023c123746e028a09a4f2d8f4c0f03 | [
"MIT"
] | permissive | irmen/Pyro4 | 023830905bb0d8fc25aed8e990631268f7fbe52c | 8ec0db055d76ae1512239710b1e30883ee6bd74b | refs/heads/master | 2023-08-22T10:18:47.878310 | 2023-06-04T16:00:32 | 2023-06-04T16:00:32 | 11,037,154 | 667 | 105 | MIT | 2022-06-26T14:23:01 | 2013-06-28T20:25:58 | Python | UTF-8 | Python | false | false | 2,593 | py | """
Pyro FLAME: Foreign Location Automatic Module Exposer.
Easy but potentially very dangerous way of exposing remote modules and builtins.
This is the commandline server.
You can start this module as a script from the command line, to easily get a
flame server running:
:command:`python -m Pyro4.utils.flameserver`
or simply: :command:`pyro4-flameserver`
You have to explicitly enable Flame first though by setting the FLAME_ENABLED config item.
Pyro - Python Remote Objects. Copyright by Irmen de Jong ([email protected]).
"""
from __future__ import print_function
import sys
import os
import warnings
from Pyro4.configuration import config
from Pyro4 import core
from Pyro4.utils import flame
def main(args=None, returnWithoutLooping=False):
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-H", "--host", default="localhost", help="hostname to bind server on (default=%default)")
parser.add_option("-p", "--port", type="int", default=0, help="port to bind server on")
parser.add_option("-u", "--unixsocket", help="Unix domain socket name to bind server on")
parser.add_option("-q", "--quiet", action="store_true", default=False, help="don't output anything")
parser.add_option("-k", "--key", help="the HMAC key to use (deprecated)")
options, args = parser.parse_args(args)
if options.key:
warnings.warn("using -k to supply HMAC key on the command line is a security problem "
"and is deprecated since Pyro 4.72. See the documentation for an alternative.")
if "PYRO_HMAC_KEY" in os.environ:
if options.key:
raise SystemExit("error: don't use -k and PYRO_HMAC_KEY at the same time")
options.key = os.environ["PYRO_HMAC_KEY"]
if not options.quiet:
print("Starting Pyro Flame server.")
hmac = (options.key or "").encode("utf-8")
if not hmac and not options.quiet:
print("Warning: HMAC key not set. Anyone can connect to this server!")
config.SERIALIZERS_ACCEPTED = {"pickle"} # flame requires pickle serializer, doesn't work with the others.
daemon = core.Daemon(host=options.host, port=options.port, unixsocket=options.unixsocket)
if hmac:
daemon._pyroHmacKey = hmac
uri = flame.start(daemon)
if not options.quiet:
print("server uri: %s" % uri)
print("server is running.")
if returnWithoutLooping:
return daemon, uri # for unit testing
else:
daemon.requestLoop()
daemon.close()
return 0
if __name__ == "__main__":
sys.exit(main())
| [
"[email protected]"
] | |
d6a5eef6d6e045d62b9c8acd509ebff2eb2df38e | 1bd073f585706c31c406bceb81eb400f8ac27c1d | /tools/Polygraphy/examples/api/00_inference_with_tensorrt/load_and_run.py | e6a2a4542f7d1efcfc9ee553b1ebadc2d5df6384 | [
"BSD-3-Clause",
"MIT",
"ISC",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | neevaco/TensorRT | 7b5e54c6a7cc6d0fc545e47ab7cf6656f23d5e19 | 650a4a6ed29403bec1a55663b48ef41a075d0b3c | refs/heads/neeva | 2023-05-29T19:20:26.431716 | 2022-08-19T23:09:26 | 2022-08-26T19:09:39 | 526,771,012 | 0 | 0 | Apache-2.0 | 2022-08-19T23:09:27 | 2022-08-19T22:49:25 | null | UTF-8 | Python | false | false | 1,848 | py | #!/usr/bin/env python3
#
# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This script loads the TensorRT engine built by `build_and_run.py` and runs inference.
"""
import numpy as np
from polygraphy.backend.common import BytesFromPath
from polygraphy.backend.trt import EngineFromBytes, TrtRunner
def main():
# Just as we did when building, we can compose multiple loaders together
# to achieve the behavior we want. Specifically, we want to load a serialized
# engine from a file, then deserialize it into a TensorRT engine.
load_engine = EngineFromBytes(BytesFromPath("identity.engine"))
# Inference remains virtually exactly the same as before:
with TrtRunner(load_engine) as runner:
inp_data = np.ones(shape=(1, 1, 2, 2), dtype=np.float32)
# NOTE: The runner owns the output buffers and is free to reuse them between `infer()` calls.
# Thus, if you want to store results from multiple inferences, you should use `copy.deepcopy()`.
outputs = runner.infer(feed_dict={"x": inp_data})
assert np.array_equal(outputs["y"], inp_data) # It's an identity model!
print("Inference succeeded!")
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
c8b052e45e49f5ed4a2b6da595120102c8858fdf | aef8eb6681e555ecb61ac67151e4c54d6fdd1023 | /plots/plotsTommy/reco/limit_from_plot.py | af021609fd4aa6c7cba619ecabb02483a7fbc6ee | [] | no_license | HephyAnalysisSW/TopEFT | 0e2dc89f7a43bacf50c77a042f56663e9d4f3404 | 53174807c96dffa6654e4dc63bef92f2b71706ee | refs/heads/master | 2022-11-07T02:41:53.120759 | 2020-03-31T08:08:27 | 2020-03-31T08:08:27 | 98,643,866 | 0 | 3 | null | 2019-10-14T09:02:09 | 2017-07-28T11:38:23 | Python | UTF-8 | Python | false | false | 6,928 | py | #!/usr/bin/env python
''' Make limit from plot
'''
#
# Standard imports and batch mode
#
import ROOT, os
ROOT.gROOT.SetBatch(True)
ROOT.gROOT.LoadMacro("$CMSSW_BASE/src/StopsDilepton/tools/scripts/tdrstyle.C")
ROOT.setTDRStyle()
import array
from RootTools.core.standard import *
from TopEFT.Tools.user import plot_directory
from TopEFT.Tools.cardFileWriter import cardFileWriter
#
# Arguments
#
import argparse
argParser = argparse.ArgumentParser(description = "Argument parser")
argParser.add_argument('--logLevel', action='store', default='INFO', nargs='?', choices=['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG', 'TRACE', 'NOTSET'], help="Log level for logging")
argParser.add_argument('--input', action='store', default='./mlp1.root', help="Input file.")
argParser.add_argument('--output', action='store', default='./mlp1.txt', help="Output card file.")
argParser.add_argument('--refLumi', action='store', type=float, default=300, help="Lumi used in the input file.")
args = argParser.parse_args()
# Logger
import TopEFT.Tools.logger as logger
import RootTools.core.logger as logger_rt
logger = logger.get_logger( args.logLevel, logFile = None)
logger_rt = logger_rt.get_logger(args.logLevel, logFile = None)
# load histos
gDir = ROOT.gDirectory
rFile = ROOT.TFile.Open( args.input )
if rFile:
# first & only TObject in file is the canvas
try:
canvas = rFile.Get(rFile.GetListOfKeys().At(0).GetName())
histos = [ canvas.GetListOfPrimitives().At(i) for i in range(canvas.GetListOfPrimitives().GetSize()) ]
histos = filter( lambda h: type(h)==ROOT.TH1F, histos)
except:
logger.error( "Could not load input file %s", args.input)
sys.exit(-1)
else:
logger.error( "Could not load input file %s", args.input)
sys.exit(-1)
# nicer name
histos[0].SetName( "signal" )
for i_histo, histo in enumerate(histos[1:]):
histo.SetName("bkg_%i"%i_histo)
# signal is first, the last histo is a copy
logger.info( "Loaded %i histos from file %s", len(histos), args.input)
histos = histos[:-1]
# un-stack
for i_histo, histo in enumerate(histos[:-1]):
histos[i_histo+1].Scale(-1)
histo.Add( histos[i_histo+1] )
histos[i_histo+1].Scale(-1)
# compute differences
h_signal, h_backgrounds = histos[0], histos[1:]
logger.info("Total signal %s %f", h_signal.GetName(), h_signal.Integral())
for i_h, h in enumerate(h_backgrounds):
logger.info( "Total bkg %i %s: %f", i_h, h.GetName(), h.Integral() )
result = {}
lumi_factor = 136./300.
signal_strengths = [0, 0.25, 0.5, 0.75, 1., 1.5, 2, 2.2]
for signal_strength in signal_strengths:
c = cardFileWriter.cardFileWriter()
bkg_sys = 1.1
bkg_shape_sys = 1.1
for i in range(len(h_backgrounds)):
c.addUncertainty('bkg_sys_%i'%i, 'lnN')
c.addUncertainty('bkg_shape_sys_%i'%i, 'lnN')
c.addUncertainty('sig_sys', 'lnN')
sig_sys = 1.25
for i_bin in range(1, 1+h_signal.GetNbinsX()):
c.addBin('Bin'+str(i_bin), [h.GetName() for h in h_backgrounds], 'Bin'+str(i_bin))
y_signal = h_signal.GetBinContent(i_bin)
y_backgrounds = [ h.GetBinContent(i_bin) for h in h_backgrounds ]
# Assume we observe the background
c.specifyObservation('Bin'+str(i_bin), int(round(lumi_factor*(y_signal+sum(y_backgrounds)))))
for i_h, h in enumerate(h_backgrounds):
c.specifyExpectation('Bin'+str(i_bin), h.GetName(), lumi_factor*h.GetBinContent(i_bin))
c.specifyUncertainty('bkg_sys_%i'%i_h, 'Bin'+str(i_bin),h.GetName(),bkg_sys)
c.specifyUncertainty('bkg_shape_sys_%i'%i_h, 'Bin'+str(i_bin),h.GetName(),1+(bkg_shape_sys-1)*(i_bin)/(h_signal.GetNbinsX()))
c.specifyExpectation('Bin'+str(i_bin), h_signal.GetName(), lumi_factor*signal_strength*h_signal.GetBinContent(i_bin))
c.specifyUncertainty('sig_sys','Bin'+str(i_bin),h_signal.GetName(),sig_sys)
c.addUncertainty('Lumi', 'lnN')
c.specifyFlatUncertainty('Lumi', 1.03)
c.writeToFile(args.output)
result[signal_strength] = c.calcNLL(rm=False)
def getIntersections(func, level, x_min=0, x_max=4, stepsize=0.001):
intersections = []
x_val = x_min
while x_val < x_max:
x_val += stepsize
intersection = func.GetX(level, x_val-stepsize, x_val)
if (x_val-stepsize+stepsize/10000.) < intersection < (x_val-stepsize/10000.):
intersections.append(intersection)
return intersections
c1 = ROOT.TCanvas()
ROOT.gPad.SetRightMargin(0.15)
x = array.array('d', signal_strengths )
y = array.array('d', [-2*result[strength]['nll'] for strength in signal_strengths] )
g = ROOT.TGraph(len(x),x,y)
#funStr = "[1]*(x-1)+[2]*(x-1)**2+[3]*(x-1)**4"
funStr = "[0]*(x-1)**2+[1]*(x-1)**3+[2]*(x-1)**4"
fun = ROOT.TF1("name", funStr, 0, signal_strengths[-1])
fun.SetTitle("")
g.Fit(fun)
parameters = [fun.GetParameter(i) for i in range(fun.GetNpar())]
fun.Draw("")
fun.SetLineColor(ROOT.kBlue-2)
fun.GetYaxis().SetRangeUser( 0, 50)
delta = 0.001
x_min, x_max = min(signal_strengths), max(signal_strengths)
# find TF1 segments under threshold level**2
levels = [1, 2, 5]
intervals = {}
for level in levels:
intersections = getIntersections(fun, level**2, x_min, x_max, delta/20.)
intervals[level] = []
for i,v in enumerate(intersections):
if i > len(intersections)-2: break
if fun.GetMinimum(intersections[i], intersections[i+1]) < 0.99:
#intervals.append((intersections[i], intersections[i+1]))
intervals[level].append(ROOT.TF1('', funStr, intersections[i], intersections[i+1]))
intervals[level][-1].SetParameters(*parameters)
for interval in intervals[2]:
interval.SetFillColorAlpha(ROOT.kYellow,0.9)
interval.SetLineColor(ROOT.kOrange-2)
interval.SetFillStyle(1111)
interval.Draw("f1same")
for interval in intervals[1]:
interval.SetFillColorAlpha(ROOT.kGreen+1,0.9)
interval.SetLineColor(ROOT.kCyan-3)
interval.SetFillStyle(1111)
interval.Draw("f1same")
stuff = []
tex = ROOT.TLatex()
#tex.SetNDC()
tex.SetTextSize(0.03)
tex.SetTextColor(ROOT.kGray+2)
#tex.SetTextAngle(90)
tex.SetTextAlign(12) # align right
for level in [ 1, 2, 3, 4, 5 ]:
l = ROOT.TLine(x_min, level**2, x_max, level**2)
l.SetLineStyle( ROOT.kDashed )
l.SetLineColor(ROOT.kGray+2)
stuff.append(l)
l.Draw("same")
tex.DrawLatex(x_max*2.2/2.5, level**2+1, "%i#sigma"%level)
tex.Draw()
stuff.append(tex)
fun.GetYaxis().SetTitle("q")
fun.GetXaxis().SetTitle("#mu_{tWZ}")
fun.GetXaxis().SetLabelSize(0.04)
fun.GetYaxis().SetLabelSize(0.04)
fun.SetLineWidth(2)
fun.Draw("same")
c1.Print("/afs/hephy.at/user/r/rschoefbeck/www/etc/ll.png")
c1.Print("/afs/hephy.at/user/r/rschoefbeck/www/etc/ll.pdf")
c1.Print("/afs/hephy.at/user/r/rschoefbeck/www/etc/ll.root")
| [
"[email protected]"
] | |
ce6d3961163e58a9f22bb9d51ea651701b5b7154 | 9b64f0f04707a3a18968fd8f8a3ace718cd597bc | /huaweicloud-sdk-iam/huaweicloudsdkiam/v3/model/list_user_login_protects_request.py | 66abf386c7fd819e50d5432578b575a2dd82df3a | [
"Apache-2.0"
] | permissive | jaminGH/huaweicloud-sdk-python-v3 | eeecb3fb0f3396a475995df36d17095038615fba | 83ee0e4543c6b74eb0898079c3d8dd1c52c3e16b | refs/heads/master | 2023-06-18T11:49:13.958677 | 2021-07-16T07:57:47 | 2021-07-16T07:57:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,065 | py | # coding: utf-8
import re
import six
class ListUserLoginProtectsRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
}
attribute_map = {
}
def __init__(self):
"""ListUserLoginProtectsRequest - a model defined in huaweicloud sdk"""
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
import simplejson as json
return json.dumps(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListUserLoginProtectsRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
c95b725df892a32e5f2b3b50a98bab78b289cd2f | be79025b3bc13fecc54d03db4786c83056bd0b41 | /pydir/Special.py | 316457ff4915b6c58a99669e6bb6c79a81085941 | [] | no_license | brainysmurf/pydir | 695875016094d72fe2b4a68d60510129940b664b | 35acb56f2f311407efbcfb0c403756425f18e723 | refs/heads/master | 2021-01-10T21:01:53.988603 | 2011-08-14T03:33:34 | 2011-08-14T03:33:34 | 2,203,880 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 93 | py | def yo():
"""
Program {0}
""".format("Yes")
pass
print(yo.__doc__)
help(yo)
| [
"[email protected]"
] | |
17d31d1319d4f86626104931e9945e669c8ebd41 | ef41528b736f0ac7927fb110211f016f51362b9a | /Mux_src/aScript_Add_Songs_in_Path.py | de61edc3f4d8b63faeb42e08fd424baa9ff9b05d | [] | no_license | rduvalwa5/Mux | d243edb10121dcd991f5129c367bf918c4bd31e7 | 2dba11861f91e4bdc1ef28279132a6d8dd4ccf54 | refs/heads/master | 2023-05-10T06:53:20.249532 | 2023-04-26T23:53:21 | 2023-04-26T23:53:21 | 73,444,154 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 603 | py | '''
Created on Mar 16, 2017
@author: rduvalwa2
'''
from Music_Get_Functions import musicGet_Functions
if __name__ == "__main__":
mux = musicGet_Functions()
myPath = "/Users/rduvalwa2/Music/Music/Media.localized/The Statler Brothers/The Best of the Statler Bros_"
album = "The Best of the Statler Bros_"
artist = "The Statler Brothers"
genre = "Country"
inType = "Itunes"
Medium = 'Download'
# mux.add_songs_in_path(myPath, album, artist, genre, inType, Medium)
mux.add_album(album, artist, genre, inType, Medium)
mux.add_artist(artist, genre)
| [
"[email protected]"
] | |
9176f63c57c1e41f0f2eda2e4230db2966440a0b | 64bf39b96a014b5d3f69b3311430185c64a7ff0e | /intro-ansible/venv2/lib/python3.8/site-packages/ansible/plugins/test/mathstuff.py | b8251ad42ca082239b0a74e1ffec429da98f11f7 | [
"MIT"
] | permissive | SimonFangCisco/dne-dna-code | 7072eba7da0389e37507b7a2aa5f7d0c0735a220 | 2ea7d4f00212f502bc684ac257371ada73da1ca9 | refs/heads/master | 2023-03-10T23:10:31.392558 | 2021-02-25T15:04:36 | 2021-02-25T15:04:36 | 342,274,373 | 0 | 0 | MIT | 2021-02-25T14:39:22 | 2021-02-25T14:39:22 | null | UTF-8 | Python | false | false | 1,327 | py | # (c) 2016, Ansible, Inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import math
def issubset(a, b):
return set(a) <= set(b)
def issuperset(a, b):
return set(a) >= set(b)
def isnotanumber(x):
try:
return math.isnan(x)
except TypeError:
return False
class TestModule:
''' Ansible math jinja2 tests '''
def tests(self):
return {
# set theory
'issubset': issubset,
'subset': issubset,
'issuperset': issuperset,
'superset': issuperset,
'isnan': isnotanumber,
'nan': isnotanumber,
}
| [
"[email protected]"
] | |
4169ad39d56c6d5f14227d8946f7bbe55d190baf | c3432a248c8a7a43425c0fe1691557c0936ab380 | /2022/two_pointer/1806_부분합.py | b9e0e04008b3c43ba286237ccdafd8891c2652f6 | [] | no_license | Parkyunhwan/BaekJoon | 13cb3af1f45212d7c418ecc4b927f42615b14a74 | 9a882c568f991c9fed3df45277f091626fcc2c94 | refs/heads/master | 2022-12-24T21:47:47.052967 | 2022-12-20T16:16:59 | 2022-12-20T16:16:59 | 232,264,447 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 570 | py | import sys
N, S = map(int, input().split())
arr = list(map(int, input().split()))
def solve():
left = 0
right = 0
sum_value = 0
min_length = sys.maxsize
while left < N:
if sum_value < S:
if right == N:
break
sum_value += arr[right]
right += 1
elif sum_value >= S:
sum_value -= arr[left]
min_length = min(min_length, right - left)
left += 1
return min_length
result = solve()
if result != sys.maxsize:
print(result)
else:
print(0)
| [
"[email protected]"
] | |
f8455f61ade57470067945166776dbd8bbb6fabf | bec402eb6b6ae6cecf53a6d3190568526584a18c | /coursera/models.py | cc8c7cf0c31d1d9e816567887ed6636eeebeb5ce | [] | no_license | Lozoniuk/bionic_python_django | c29ab730bbeacb7d80be935650a3e9b36be3b679 | 80755b7524d650d99effdf69cc243bd0cdf9b7f5 | refs/heads/master | 2020-04-01T21:25:09.823735 | 2014-11-20T12:23:27 | 2014-11-20T12:23:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 197 | py | from django.db import models
from django.contrib.auth.models import User
class Student(User):
group = models.CharField(max_length=200)
def __unicode__(self):
return self.username | [
"[email protected]"
] | |
772b1aa1376317fe86b2851e740a3595c3ade06e | fba6a51f946e71a48c4f7564079ca6e718dbec15 | /documenters_aggregator/middlewares.py | 98b6425d8773422d676178b71d4fbbf8b684378f | [
"MIT"
] | permissive | milti/documenters-aggregator | 2eb87122d18240dbcc27f82561e98a8754009779 | 96ac8cd2f3d1192d414f1196567a0d4b154f7eda | refs/heads/master | 2020-12-30T09:27:28.384703 | 2017-08-16T00:44:43 | 2017-08-16T00:44:43 | 100,422,679 | 0 | 0 | null | 2017-08-15T21:58:38 | 2017-08-15T21:58:38 | null | UTF-8 | Python | false | false | 1,919 | py | # -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class DocumentersAggregatorSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| [
"[email protected]"
] | |
acb51f21a28766d559c06b940092f26692e89267 | dd9a63a074384d465175ea6dc5343a2a5848d3c1 | /pyexcel/sheets/nominablesheet.py | 7dcada7cf79990eb4de1c77444cdeb68fac2c131 | [
"BSD-3-Clause"
] | permissive | nikolas/pyexcel | c95ef11b1f83156c563c04b4ee1957dd886c625f | 72bda1b3787584592243a7892817feadcdce210f | refs/heads/master | 2021-01-21T17:53:36.336541 | 2016-08-01T12:21:05 | 2016-08-01T12:21:05 | 64,692,577 | 1 | 0 | null | 2016-08-01T18:44:27 | 2016-08-01T18:44:27 | null | UTF-8 | Python | false | false | 25,410 | py | """
pyexcel.sheets.nominablesheet
~~~~~~~~~~~~~~~~~~~
Building on top of formattablesheet, adding named columns and rows support
:copyright: (c) 2014-2015 by Onni Software Ltd.
:license: New BSD License, see LICENSE for more details
"""
from .matrix import Row, Column, Matrix
from ..formatters import (
ColumnFormatter,
RowFormatter,
NamedColumnFormatter,
NamedRowFormatter)
from .._compact import is_string, OrderedDict, PY2, is_array_type
from .._compact import is_tuple_consists_of_strings
from ..iterators import (
ColumnIndexIterator,
RowIndexIterator,
NamedRowIterator,
NamedColumnIterator
)
from ..constants import (
MESSAGE_NOT_IMPLEMENTED_02,
MESSAGE_DATA_ERROR_ORDEREDDICT_IS_EXPECTED,
DEFAULT_NAME)
def names_to_indices(names, series):
if isinstance(names, str):
indices = series.index(names)
elif (isinstance(names, list) and
isinstance(names[0], str)):
# translate each row name to index
indices = [series.index(astr) for astr in names]
else:
return names
return indices
def make_names_unique(alist):
duplicates = {}
new_names = []
for item in alist:
if not is_string(type(item)):
item = str(item)
if item in duplicates:
duplicates[item] = duplicates[item] + 1
new_names.append("%s-%d" % (item, duplicates[item]))
else:
duplicates[item] = 0
new_names.append(item)
return new_names
class NamedRow(Row):
"""Series Sheet would have Named Row instead of Row
Here is an example to merge sheets. Suppose we have the
following three files::
>>> import pyexcel as pe
>>> data = [[1,2,3],[4,5,6],[7,8,9]]
>>> s = pe.Sheet(data)
>>> s.save_as("1.csv")
>>> data2 = [['a','b','c'],['d','e','f'],['g','h','i']]
>>> s2 = pe.Sheet(data2)
>>> s2.save_as("2.csv")
>>> data3=[[1.1, 2.2, 3.3],[4.4, 5.5, 6.6],[7.7, 8.8, 9.9]]
>>> s3=pe.Sheet(data3)
>>> s3.save_as("3.csv")
>>> merged = pe.Sheet()
>>> for file in ["1.csv", "2.csv", "3.csv"]:
... r = pe.get_sheet(file_name=file)
... merged.row += r
>>> merged.save_as("merged.csv")
Now let's verify what we had::
>>> sheet = pe.get_sheet(file_name="merged.csv")
this is added to overcome doctest's inability to handle
python 3's unicode::
>>> sheet.format(lambda v: str(v))
>>> sheet
merged.csv:
+-----+-----+-----+
| 1 | 2 | 3 |
+-----+-----+-----+
| 4 | 5 | 6 |
+-----+-----+-----+
| 7 | 8 | 9 |
+-----+-----+-----+
| a | b | c |
+-----+-----+-----+
| d | e | f |
+-----+-----+-----+
| g | h | i |
+-----+-----+-----+
| 1.1 | 2.2 | 3.3 |
+-----+-----+-----+
| 4.4 | 5.5 | 6.6 |
+-----+-----+-----+
| 7.7 | 8.8 | 9.9 |
+-----+-----+-----+
.. testcleanup::
>>> import os
>>> os.unlink("1.csv")
>>> os.unlink("2.csv")
>>> os.unlink("3.csv")
>>> os.unlink("merged.csv")
"""
def select(self, names):
"""Delete row indices other than specified
Examples:
>>> import pyexcel as pe
>>> data = [[1],[2],[3],[4],[5],[6],[7],[9]]
>>> sheet = pe.Sheet(data)
>>> sheet
pyexcel sheet:
+---+
| 1 |
+---+
| 2 |
+---+
| 3 |
+---+
| 4 |
+---+
| 5 |
+---+
| 6 |
+---+
| 7 |
+---+
| 9 |
+---+
>>> sheet.row.select([1,2,3,5])
>>> sheet
pyexcel sheet:
+---+
| 2 |
+---+
| 3 |
+---+
| 4 |
+---+
| 6 |
+---+
>>> data = [
... ['a', 1],
... ['b', 1],
... ['c', 1]
... ]
>>> sheet = pe.Sheet(data, name_rows_by_column=0)
>>> sheet.row.select(['a', 'b'])
>>> sheet
pyexcel sheet:
+---+---+
| a | 1 |
+---+---+
| b | 1 |
+---+---+
"""
if is_array_type(names, str):
indices = names_to_indices(names, self.ref.rownames)
Row.select(self, indices)
else:
Row.select(self, names)
def __delitem__(self, column_name):
"""
Examples::
>>> import pyexcel as pe
>>> data = [
... ['a', 1],
... ['b', 1],
... ['c', 1]
... ]
>>> sheet = pe.Sheet(data, name_rows_by_column=0)
>>> del sheet.row['a', 'b']
>>> sheet
pyexcel sheet:
+---+---+
| c | 1 |
+---+---+
"""
if is_string(type(column_name)):
self.ref.delete_named_row_at(column_name)
elif is_tuple_consists_of_strings(column_name):
indices = names_to_indices(list(column_name), self.ref.rownames)
Row.__delitem__(self, indices)
else:
Row.__delitem__(self, column_name)
def __setitem__(self, str_or_aslice, c):
if is_string(type(str_or_aslice)):
self.ref.set_named_row_at(str_or_aslice, c)
else:
Row.__setitem__(self, str_or_aslice, c)
def __getitem__(self, str_or_aslice):
if is_string(type(str_or_aslice)):
return self.ref.named_row_at(str_or_aslice)
else:
return Row.__getitem__(self, str_or_aslice)
def __iadd__(self, other):
"""Overload += sign
:param list other: the row header must be the first element.
:return: self
"""
if isinstance(other, OrderedDict):
self.ref.extend_rows(other)
else:
Row.__iadd__(self, other)
return self
def __add__(self, other):
"""Overload += sign
:return: self
"""
self.__iadd__(other)
return self.ref
def format(self,
row_index=None, formatter=None,
format_specs=None, on_demand=False):
"""Format a row
"""
def handle_one_formatter(rows, theformatter, on_demand):
new_indices = rows
if len(self.ref.rownames) > 0:
new_indices = names_to_indices(rows, self.ref.rownames)
aformatter = RowFormatter(new_indices, theformatter)
if on_demand:
self.ref.add_formatter(aformatter)
else:
self.ref.apply_formatter(aformatter)
if row_index is not None:
handle_one_formatter(row_index, formatter, on_demand)
elif format_specs:
for spec in format_specs:
if len(spec) == 3:
handle_one_formatter(spec[0], spec[1],
on_demand)
else:
handle_one_formatter(spec[0], spec[1],
on_demand)
class NamedColumn(Column):
"""Series Sheet would have Named Column instead of Column
example::
import pyexcel as pe
r = pe.SeriesReader("example.csv")
print(r.column["column 1"])
"""
def select(self, names):
"""Delete columns other than specified
Examples:
>>> import pyexcel as pe
>>> data = [[1,2,3,4,5,6,7,9]]
>>> sheet = pe.Sheet(data)
>>> sheet
pyexcel sheet:
+---+---+---+---+---+---+---+---+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 9 |
+---+---+---+---+---+---+---+---+
>>> sheet.column.select([1,2,3,5])
>>> sheet
pyexcel sheet:
+---+---+---+---+
| 2 | 3 | 4 | 6 |
+---+---+---+---+
>>> data = [
... ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'],
... [1,2,3,4,5,6,7,9],
... ]
>>> sheet = pe.Sheet(data, name_columns_by_row=0)
>>> sheet
pyexcel sheet:
+---+---+---+---+---+---+---+---+
| a | b | c | d | e | f | g | h |
+===+===+===+===+===+===+===+===+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 9 |
+---+---+---+---+---+---+---+---+
>>> del sheet.column['a', 'b', 'i', 'f'] # doctest:+ELLIPSIS
Traceback (most recent call last):
...
ValueError: ...
>>> sheet.column.select(['a', 'c', 'e', 'h'])
>>> sheet
pyexcel sheet:
+---+---+---+---+
| a | c | e | h |
+===+===+===+===+
| 1 | 3 | 5 | 9 |
+---+---+---+---+
"""
if is_array_type(names, str):
indices = names_to_indices(names, self.ref.colnames)
Column.select(self, indices)
else:
Column.select(self, names)
def __delitem__(self, str_or_aslice):
"""
Example::
>>> import pyexcel as pe
>>> data = [
... ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'],
... [1,2,3,4,5,6,7,9],
... ]
>>> sheet = pe.Sheet(data, name_columns_by_row=0)
>>> sheet
pyexcel sheet:
+---+---+---+---+---+---+---+---+
| a | b | c | d | e | f | g | h |
+===+===+===+===+===+===+===+===+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 9 |
+---+---+---+---+---+---+---+---+
>>> del sheet.column['a', 'b', 'i', 'f'] # doctest:+ELLIPSIS
Traceback (most recent call last):
...
ValueError: ...
>>> del sheet.column['a', 'c', 'e', 'h']
>>> sheet
pyexcel sheet:
+---+---+---+---+
| b | d | f | g |
+===+===+===+===+
| 2 | 4 | 6 | 7 |
+---+---+---+---+
"""
if is_string(type(str_or_aslice)):
self.ref.delete_named_column_at(str_or_aslice)
elif is_tuple_consists_of_strings(str_or_aslice):
indices = names_to_indices(list(str_or_aslice), self.ref.colnames)
Column.__delitem__(self, indices)
else:
Column.__delitem__(self, str_or_aslice)
def __setitem__(self, str_or_aslice, c):
if is_string(type(str_or_aslice)):
self.ref.set_named_column_at(str_or_aslice, c)
else:
Column.__setitem__(self, str_or_aslice, c)
def __getitem__(self, str_or_aslice):
if is_string(type(str_or_aslice)):
return self.ref.named_column_at(str_or_aslice)
else:
return Column.__getitem__(self, str_or_aslice)
def __iadd__(self, other):
"""Overload += sign
:param list other: the column header must be the first element.
:return: self
"""
if isinstance(other, OrderedDict):
self.ref.extend_columns(other)
else:
Column.__iadd__(self, other)
return self
def __add__(self, other):
"""Overload += sign
:return: self
"""
self.__iadd__(other)
return self.ref
def format(self,
column_index=None, formatter=None,
format_specs=None):
"""Format a column
"""
def handle_one_formatter(columns, aformatter):
new_indices = columns
if len(self.ref.colnames) > 0:
new_indices = names_to_indices(columns, self.ref.colnames)
theformatter = ColumnFormatter(new_indices, aformatter)
self.ref.apply_formatter(theformatter)
if column_index is not None:
handle_one_formatter(column_index, formatter)
elif format_specs:
for spec in format_specs:
handle_one_formatter(spec[0], spec[1])
VALID_SHEET_PARAMETERS = ['name_columns_by_row',
'name_rows_by_column',
'colnames',
'rownames',
'transpose_before',
'transpose_after']
class NominableSheet(Matrix):
"""Allow dictionary group of the content
"""
def __init__(self, sheet=None, name=DEFAULT_NAME,
name_columns_by_row=-1,
name_rows_by_column=-1,
colnames=None,
rownames=None,
transpose_before=False,
transpose_after=False):
"""Constructor
:param sheet: two dimensional array
:param name: this becomes the sheet name.
:param name_columns_by_row: use a row to name all columns
:param name_rows_by_column: use a column to name all rows
:param colnames: use an external list of strings to name the columns
:param rownames: use an external list of strings to name the rows
"""
self.init(
sheet=sheet,
name=name,
name_columns_by_row=name_columns_by_row,
name_rows_by_column=name_rows_by_column,
colnames=colnames,
rownames=rownames,
transpose_before=transpose_before,
transpose_after=transpose_after
)
def init(self, sheet=None, name=DEFAULT_NAME,
name_columns_by_row=-1,
name_rows_by_column=-1,
colnames=None,
rownames=None,
transpose_before=False,
transpose_after=False):
# this get rid of phatom data by not specifying sheet
if sheet is None:
sheet = []
Matrix.__init__(self, sheet)
if transpose_before:
self.transpose()
self.name = name
self._column_names = []
self._row_names = []
self.named_row = NamedRow(self)
self.named_column = NamedColumn(self)
if name_columns_by_row != -1:
if colnames:
raise NotImplementedError(MESSAGE_NOT_IMPLEMENTED_02)
self.name_columns_by_row(name_columns_by_row)
else:
if colnames:
self._column_names = colnames
if name_rows_by_column != -1:
if rownames:
raise NotImplementedError(MESSAGE_NOT_IMPLEMENTED_02)
self.name_rows_by_column(name_rows_by_column)
else:
if rownames:
self._row_names = rownames
if transpose_after:
self.transpose()
@property
def row(self):
"""Row representation. see :class:`NamedRow`
examples::
>>> import pyexcel as pe
>>> data = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
>>> sheet = pe.Sheet(data)
>>> sheet.row[1]
[4, 5, 6]
>>> sheet.row[0:3]
[[1, 2, 3], [4, 5, 6], [7, 8, 9]]
>>> sheet.row += [11, 12, 13]
>>> sheet.row[3]
[11, 12, 13]
>>> sheet.row[0:4] = [0, 0, 0] # set all to zero
>>> sheet.row[3]
[0, 0, 0]
>>> sheet.row[0] = ['a', 'b', 'c'] # set one row
>>> sheet.row[0]
['a', 'b', 'c']
>>> del sheet.row[0] # delete first row
>>> sheet.row[0] # now, second row becomes the first
[0, 0, 0]
>>> del sheet.row[0:]
>>> sheet.row[0] # nothing left
Traceback (most recent call last):
...
IndexError
"""
return self.named_row
@row.setter
def row(self, value):
# dummy setter to enable self.row += ..
pass
@property
def column(self):
"""Column representation. see :class:`NamedColumn`"""
return self.named_column
@column.setter
def column(self, value):
# dummy setter to enable self.column += ..
pass
def name_columns_by_row(self, row_index):
"""Use the elements of a specified row to represent individual columns
The specified row will be deleted from the data
:param int row_index: the index of the row that has the column names
"""
self.row_index = row_index
self._column_names = make_names_unique(self.row_at(row_index))
del self.row[row_index]
def name_rows_by_column(self, column_index):
"""Use the elements of a specified column to represent individual rows
The specified column will be deleted from the data
:param int column_index: the index of the column that has the row names
"""
self.column_index = column_index
self._row_names = make_names_unique(self.column_at(column_index))
del self.column[column_index]
@property
def colnames(self):
"""Return column names"""
return self._column_names
@colnames.setter
def colnames(self, value):
"""Set column names"""
self._column_names = make_names_unique(value)
@property
def rownames(self):
"""Return row names"""
return self._row_names
@rownames.setter
def rownames(self, value):
"""Set row names"""
self._row_names = make_names_unique(value)
def named_column_at(self, name):
"""Get a column by its name """
index = name
if is_string(type(index)):
index = self.colnames.index(name)
column_array = self.column_at(index)
return column_array
def set_named_column_at(self, name, column_array):
"""
Take the first row as column names
Given name to identify the column index, set the column to
the given array except the column name.
"""
index = name
if is_string(type(index)):
index = self.colnames.index(name)
self.set_column_at(index, column_array)
def delete_columns(self, column_indices):
"""Delete one or more columns
:param list column_indices: a list of column indices
"""
Matrix.delete_columns(self, column_indices)
if len(self._column_names) > 0:
new_series = [self._column_names[i]
for i in range(0, len(self._column_names))
if i not in column_indices]
self._column_names = new_series
def delete_rows(self, row_indices):
"""Delete one or more rows
:param list row_indices: a list of row indices
"""
Matrix.delete_rows(self, row_indices)
if len(self._row_names) > 0:
new_series = [self._row_names[i]
for i in range(0, len(self._row_names))
if i not in row_indices]
self._row_names = new_series
def delete_named_column_at(self, name):
"""Works only after you named columns by a row
Given name to identify the column index, set the column to
the given array except the column name.
:param str name: a column name
"""
if isinstance(name, int):
if len(self.rownames) > 0:
self.rownames.pop(name)
self.delete_columns([name])
else:
index = self.colnames.index(name)
self.colnames.pop(index)
Matrix.delete_columns(self, [index])
def named_row_at(self, name):
"""Get a row by its name """
index = name
# if is_string(type(index)):
index = self.rownames.index(name)
row_array = self.row_at(index)
return row_array
def set_named_row_at(self, name, row_array):
"""
Take the first column as row names
Given name to identify the row index, set the row to
the given array except the row name.
"""
index = name
if is_string(type(index)):
index = self.rownames.index(name)
self.set_row_at(index, row_array)
def delete_named_row_at(self, name):
"""Take the first column as row names
Given name to identify the row index, set the row to
the given array except the row name.
"""
if isinstance(name, int):
if len(self.rownames) > 0:
self.rownames.pop(name)
self.delete_rows([name])
else:
index = self.rownames.index(name)
self.rownames.pop(index)
Matrix.delete_rows(self, [index])
def apply_formatter(self, aformatter):
"""Apply the formatter immediately.
:param Formatter aformatter: a custom formatter
"""
aformatter = self._translate_named_formatter(aformatter)
Matrix.apply_formatter(self, aformatter)
def _translate_named_formatter(self, aformatter):
if isinstance(aformatter, NamedColumnFormatter):
series = self.colnames
elif isinstance(aformatter, NamedRowFormatter):
series = self.rownames
else:
series = None
if series:
indices = names_to_indices(aformatter.indices, series)
aformatter.update_index(indices)
return aformatter
def extend_rows(self, rows):
"""Take ordereddict to extend named rows
:param ordereddist/list rows: a list of rows.
"""
incoming_data = []
if isinstance(rows, OrderedDict):
keys = rows.keys()
for k in keys:
self.rownames.append(k)
incoming_data.append(rows[k])
Matrix.extend_rows(self, incoming_data)
elif len(self.rownames) > 0:
raise TypeError(MESSAGE_DATA_ERROR_ORDEREDDICT_IS_EXPECTED)
else:
Matrix.extend_rows(self, rows)
def extend_columns_with_rows(self, rows):
"""Put rows on the right most side of the data"""
if len(self.colnames) > 0:
headers = rows.pop(self.row_index)
self._column_names += headers
Matrix.extend_columns_with_rows(self, rows)
def extend_columns(self, columns):
"""Take ordereddict to extend named columns
:param ordereddist/list columns: a list of columns
"""
incoming_data = []
if isinstance(columns, OrderedDict):
keys = columns.keys()
for k in keys:
self.colnames.append(k)
incoming_data.append(columns[k])
Matrix.extend_columns(self, incoming_data)
elif len(self.colnames) > 0:
raise TypeError(MESSAGE_DATA_ERROR_ORDEREDDICT_IS_EXPECTED)
else:
Matrix.extend_columns(self, columns)
def __iter__(self):
if len(self._column_names) > 0:
return ColumnIndexIterator(self)
elif len(self._row_names) > 0:
return RowIndexIterator(self)
else:
return Matrix.__iter__(self)
def to_array(self):
"""Returns an array after filtering"""
from ..utils import to_array
ret = []
ret += to_array(self.rows())
if len(self.rownames) > 0:
ret = map(lambda value: [value[0]] + value[1],
zip(self.rownames, ret))
if not PY2:
ret = list(ret)
if len(self.colnames) > 0:
if len(self.rownames) > 0:
ret.insert(0, [""] + self.colnames)
else:
ret.insert(0, self.colnames)
return ret
def to_records(self, custom_headers=None):
"""Returns the content as an array of dictionaries
"""
from ..utils import to_records
return to_records(self, custom_headers)
def to_dict(self, row=False):
"""Returns a dictionary"""
from ..utils import to_dict
if row:
return to_dict(RowIndexIterator(self))
else:
return to_dict(ColumnIndexIterator(self))
def __getitem__(self, aset):
if isinstance(aset, tuple):
if isinstance(aset[0], str):
row = self.rownames.index(aset[0])
else:
row = aset[0]
if isinstance(aset[1], str):
column = self.colnames.index(aset[1])
else:
column = aset[1]
return self.cell_value(row, column)
else:
return Matrix.__getitem__(self, aset)
def __setitem__(self, aset, c):
if isinstance(aset, tuple):
if isinstance(aset[0], str):
row = self.rownames.index(aset[0])
else:
row = aset[0]
if isinstance(aset[1], str):
column = self.colnames.index(aset[1])
else:
column = aset[1]
self.cell_value(row, column, c)
else:
Matrix.__setitem__(self, aset, c)
def named_rows(self):
return NamedRowIterator(self)
def named_columns(self):
return NamedColumnIterator(self)
| [
"[email protected]"
] | |
b243c1eb7fd87338027443c852b9034370f180e6 | cc81cc2e5b52bbfe4d13ed6b37859965f7e9408a | /resources/tests.py | 0bdef30d6c623104d15067e803172c8249d07683 | [] | no_license | youpiyoful/pythonclassmates | edb966e3cdf71d00277e49efb1e16aa4c16bbb5e | 08fae9c919b6376239c150697dc9719520c06b1c | refs/heads/master | 2023-02-14T17:22:09.349226 | 2021-01-14T02:56:16 | 2021-01-14T02:56:16 | 323,906,893 | 0 | 1 | null | 2021-01-14T01:21:34 | 2020-12-23T13:17:05 | Python | UTF-8 | Python | false | false | 2,896 | py | """Integration tests of the resources app pages."""
from wagtail.images.blocks import ImageChooserBlock
from wagtail.tests.utils import WagtailPageTests
from wagtail.tests.utils.form_data import nested_form_data, streamfield
from blog.models import BlogPage, PostPage
from home.models import HomePage
from events.models import EventPage, EventsPage
from resources.models import ResourcesPage, ResourcePage
# from flex.models import FlexPage
class ResourcesPageTests(WagtailPageTests):
"""Resources page behavior test."""
def test_can_create_only_resource_page(self):
"""test we can create only resourcePage from ResourcesPage"""
self.assertCanNotCreateAt(ResourcesPage, HomePage)
self.assertCanNotCreateAt(ResourcesPage, EventPage)
self.assertCanNotCreateAt(ResourcesPage, EventsPage)
self.assertCanNotCreateAt(ResourcesPage, BlogPage)
# self.assertCanNotCreateAt(BlogPage, FlexPage)
self.assertCanNotCreateAt(ResourcesPage, ResourcesPage)
self.assertAllowedSubpageTypes(BlogPage, {PostPage})
def test_can_create_resource_page(self):
"""
We are testing that it's possible to create
a resource page from the resources part.
"""
self.assertCanCreateAt(ResourcesPage, ResourcePage)
class ResourcePageTests(WagtailPageTests):
"""Resource page behavior test."""
def test_can_not_create_any_page(self):
"""
we are testing that no child page
can be created from resourcePage.
"""
self.assertCanNotCreateAt(ResourcePage, HomePage)
self.assertCanNotCreateAt(ResourcePage, EventPage)
self.assertCanNotCreateAt(ResourcePage, EventsPage)
self.assertCanNotCreateAt(ResourcePage, BlogPage)
self.assertCanNotCreateAt(ResourcePage, PostPage)
# self.assertCanNotCreateAt(PostPage, FlexPage)
self.assertCanNotCreateAt(ResourcePage, ResourcesPage)
self.assertAllowedSubpageTypes(ResourcePage, {})
def test_can_only_be_created_in_resources_page_parent(self):
"""
Test that the resource page cannot be
created in a parent other than the resourcespage.
"""
self.assertAllowedParentPageTypes(
ResourcePage, {ResourcesPage}
)
# def test_can_create_post_page(self):
# """ Test PostPageCreation are ok"""
# # Assert that a ContentPage can be made here, with this POST data
# self.assertCanCreate(BlogPage, PostPage, nested_form_data({
# 'custom_title': 'About us',
# 'content': streamfield([
# ('text', 'Lorem ipsum dolor sit amet'),
# ])
# # 'blog_image': ImageChooserBlock
# }))
# custom_title
# blog_image
# description
# content
# categories
# tags
# content_panels | [
"[email protected]"
] | |
2be30b19aaefb84da5148586d9978e42bae4eaf9 | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/insights/get_activity_log_alert.py | d81b36925eeb8c51d68e403b066ae1877a85315e | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,196 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetActivityLogAlertResult',
'AwaitableGetActivityLogAlertResult',
'get_activity_log_alert',
'get_activity_log_alert_output',
]
@pulumi.output_type
class GetActivityLogAlertResult:
"""
An Activity Log Alert rule resource.
"""
def __init__(__self__, actions=None, condition=None, description=None, enabled=None, id=None, location=None, name=None, scopes=None, tags=None, type=None):
if actions and not isinstance(actions, dict):
raise TypeError("Expected argument 'actions' to be a dict")
pulumi.set(__self__, "actions", actions)
if condition and not isinstance(condition, dict):
raise TypeError("Expected argument 'condition' to be a dict")
pulumi.set(__self__, "condition", condition)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if enabled and not isinstance(enabled, bool):
raise TypeError("Expected argument 'enabled' to be a bool")
pulumi.set(__self__, "enabled", enabled)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if scopes and not isinstance(scopes, list):
raise TypeError("Expected argument 'scopes' to be a list")
pulumi.set(__self__, "scopes", scopes)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def actions(self) -> 'outputs.ActionListResponse':
"""
The actions that will activate when the condition is met.
"""
return pulumi.get(self, "actions")
@property
@pulumi.getter
def condition(self) -> 'outputs.AlertRuleAllOfConditionResponse':
"""
The condition that will cause this alert to activate.
"""
return pulumi.get(self, "condition")
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
A description of this Activity Log Alert rule.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def enabled(self) -> Optional[bool]:
"""
Indicates whether this Activity Log Alert rule is enabled. If an Activity Log Alert rule is not enabled, then none of its actions will be activated.
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter
def id(self) -> str:
"""
The resource Id.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
The location of the resource. Since Azure Activity Log Alerts is a global service, the location of the rules should always be 'global'.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def scopes(self) -> Sequence[str]:
"""
A list of resource IDs that will be used as prefixes. The alert will only apply to Activity Log events with resource IDs that fall under one of these prefixes. This list must include at least one item.
"""
return pulumi.get(self, "scopes")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
The tags of the resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
class AwaitableGetActivityLogAlertResult(GetActivityLogAlertResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetActivityLogAlertResult(
actions=self.actions,
condition=self.condition,
description=self.description,
enabled=self.enabled,
id=self.id,
location=self.location,
name=self.name,
scopes=self.scopes,
tags=self.tags,
type=self.type)
def get_activity_log_alert(activity_log_alert_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetActivityLogAlertResult:
"""
An Activity Log Alert rule resource.
API Version: 2020-10-01.
:param str activity_log_alert_name: The name of the Activity Log Alert rule.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
__args__ = dict()
__args__['activityLogAlertName'] = activity_log_alert_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:insights:getActivityLogAlert', __args__, opts=opts, typ=GetActivityLogAlertResult).value
return AwaitableGetActivityLogAlertResult(
actions=__ret__.actions,
condition=__ret__.condition,
description=__ret__.description,
enabled=__ret__.enabled,
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
scopes=__ret__.scopes,
tags=__ret__.tags,
type=__ret__.type)
@_utilities.lift_output_func(get_activity_log_alert)
def get_activity_log_alert_output(activity_log_alert_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetActivityLogAlertResult]:
"""
An Activity Log Alert rule resource.
API Version: 2020-10-01.
:param str activity_log_alert_name: The name of the Activity Log Alert rule.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
...
| [
"[email protected]"
] | |
e497ee7c6fc1d99e7cec72d1a9672e3eaba1d033 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p04001/s666000652.py | 8febb46752a063e6eaf83e2495a6012824ec9336 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,148 | py | # -*- coding: utf-8 -*-
S = input()
n = len(S)
sum_formula = 0
# bit全探索
for bit in range(2 ** (n-1)):
nums = []
# 何ビットシフトさせるか
num = S[0]
for shift in range(n-1):
if not ((bit >> shift)&1):
num = num + S[shift+1]
else:
nums.append(int(num))
num = S[shift+1]
nums.append(int(num))
sum_formula += sum(nums)
print(sum_formula)
# 再帰関数
def calc_all_formula(S, i, lists):
'''
listsの要素: ([数字list], temp_str)
'''
new_lists = []
# i - i+1番目の間に+が入る
new_lists.extend(
[(lst[0]+[int(lst[1])], S[i+1]) for lst in lists]
)
new_lists.extend(
[(lst[0], lst[1]+S[i+1]) for lst in lists]
)
# base case
if i == len(S)-2:
new_lists = [
lst[0]+[int(lst[1])] for lst in new_lists
]
return new_lists
return calc_all_formula(S, i+1, new_lists)
# base_cand = [([], S[0])]
# if len(S) >= 2:
# sum_formula = sum([sum(lst) for lst in calc_all_formula(S, 0, base_cand)])
# print(sum_formula)
# else:
# print(int(S)) | [
"[email protected]"
] | |
627bfc86c47a4d66c3aa8d88de13b29fe6301b04 | 2a171178942a19afe9891c2425dce208ae04348b | /kubernetes/test/test_v1beta1_job_status.py | cfc5fd57f823f4d33a882fee43bf94f753ee86de | [
"Apache-2.0"
] | permissive | ouccema/client-python | ac3f1dee1c5ad8d82f15aeecb87a2f5f219ca4f4 | d7f33ec53e302e66674df581904a3c5b1fcf3945 | refs/heads/master | 2021-01-12T03:17:54.274888 | 2017-01-03T22:13:14 | 2017-01-03T22:13:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,436 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.5.0-snapshot
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1beta1_job_status import V1beta1JobStatus
class TestV1beta1JobStatus(unittest.TestCase):
""" V1beta1JobStatus unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1beta1JobStatus(self):
"""
Test V1beta1JobStatus
"""
model = kubernetes.client.models.v1beta1_job_status.V1beta1JobStatus()
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
c24bb6886a48aaa9e47428e2df5af60d5ed73c55 | 63daf225819636397fda6ef7e52783331c27f295 | /taobao-sdk/top/api/rest/ItemcatsAuthorizeGetRequest.py | fd5db84b60c429ab09a6969487e4d598ee750905 | [] | no_license | cash2one/language-Python | e332ecfb4e9321a11407b29987ee64d44e552b15 | 8adb4f2fd2f023f9cc89b4edce1da5f71a3332ab | refs/heads/master | 2021-06-16T15:15:08.346420 | 2017-04-20T02:44:16 | 2017-04-20T02:44:16 | 112,173,361 | 1 | 0 | null | 2017-11-27T09:08:57 | 2017-11-27T09:08:57 | null | UTF-8 | Python | false | false | 310 | py | '''
Created by auto_sdk on 2014.03.04
'''
from top.api.base import RestApi
class ItemcatsAuthorizeGetRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.fields = None
def getapiname(self):
return 'taobao.itemcats.authorize.get'
| [
"[email protected]"
] | |
c8085220e7b6b96d11824d7bb4b34a4c007bc174 | 1cceb7c7e6d708dc9de0084739f582ccde2633a3 | /examples/test_no_sensors.py | 647a1dd10226b958ce9207ed3fbddf1dcae15cd9 | [
"BSD-3-Clause"
] | permissive | zjtomy/python-rpi-greenhouse | 5144d0267aecfa2de240832f3350ec67a5e23955 | e263f3b1c933e2d5a174f5b1de1cbb9fc3e8e56f | refs/heads/master | 2020-06-28T08:11:48.233302 | 2015-12-04T13:40:25 | 2015-12-04T13:40:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 167 | py | from rpi_greenhouse import GreenhouseIndicator
from time import sleep
indicator = GreenhouseIndicator()
while True:
indicator.show_status_on_leds()
sleep(5)
| [
"[email protected]"
] | |
e261f7fae236ff20693fddc7323d3fc97bb5d8e5 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2851/47329/233364.py | 52a521838bcc0f87cedcdc749af9060f67225042 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 80 | py | n = int(input())
print(max([sum(map(int, input().split())) for i in range(n)]))
| [
"[email protected]"
] | |
72763d657d04cba848de805950347bceacb614cb | 79aa4b99a48bb16a907916ad63c902443420541a | /0022.py | d6ffc37f8e6df753e4e3fb2d944ef83a56d7bdfd | [] | no_license | mach8686devops/leetcode-100 | 62dec66c719d7cfa120ca9505701df49d8d5b982 | f90526c9b073165b86b933cdf7d1dc496e68f2c6 | refs/heads/main | 2023-04-11T06:28:15.059587 | 2021-04-13T12:11:54 | 2021-04-13T12:11:54 | 329,346,572 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 872 | py | from functools import lru_cache
from typing import List
class Solution:
@lru_cache(None)
def generateParenthesis(self, n: int) -> List[str]:
if n == 0:
return ['']
ans = []
for c in range(n):
for left in self.generateParenthesis(c):
for right in self.generateParenthesis(n - 1 - c):
ans.append('({}){}'.format(left, right))
return ans
# 回溯法的代码套路是使用两个变量: res 和 path,res 表示最终的结果,
# path 保存已经走过的路径。如果搜到一个状态满足题目要求,就把 path 放到 res 中。
#
# 代码后面的判断条件都是 if,而不是 elif,因为是满足两个条件的任意一个就可以继续向下搜索,
# 而不是同时只能满足其中的一个。
print(Solution().generateParenthesis(n=3))
| [
"[email protected]"
] | |
988438f1bea17c5b2c089e07dac7e8713136fdc0 | d91f0dbfeb72e9069803efbe7d230d3836b6759b | /FormExam/FormExam/FormExam/wsgi.py | 53e80fb58b7662495aa858acdf1b1d75ff1263be | [] | no_license | daiki-0520/django_practice | 98efda419fb98fd6097e69cab4bb046c653082bf | 379622a962bcfe08b143c35b84c7e6f60d97df7c | refs/heads/master | 2023-03-22T05:41:01.857022 | 2021-03-19T06:24:03 | 2021-03-19T06:24:03 | 341,800,971 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | """
WSGI config for FormExam project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'FormExam.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
3e2c7d114bfedc67dae2e3410fac2792652dc324 | a7b66311c2ce113789933ec3162f1128b2862f13 | /numeric/scripts/ode_ivp.py | 2441368ba7dc04cf56439a1acc9e817fc837164e | [
"MIT"
] | permissive | ChanJeunlam/geolearn | 214b2c42359ea1164b39117fad2d7470adeb6d35 | 791caa54eb70920823ea7d46714dc8a3e7fa7445 | refs/heads/master | 2023-07-16T04:13:15.526364 | 2021-08-16T05:24:18 | 2021-08-16T05:24:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,155 | py | import numpy as np
import matplotlib.pyplot as plt
from numeric import ode
# ODE system using euler forwarding
def F(t):
return np.array([t*np.exp(-2*t), np.exp(-t)])
def dF(t, y):
return np.array([-y[0]*2+y[1]**2, y[0]-y[1]-t*y[1]**2])
ny = 2
t = np.arange(0, 10, 0.01)
y = np.ndarray([len(t), ny])
y[0, :] = [0, 1]
for k in range(1, len(t)):
yy = y[k-1, :]
tt = t[k-1]
dt = t[k]-t[k-1]
y[k, :] = ode.forward2(tt, yy, dt, dF)
fig, ax = plt.subplots(1, 1)
f = F(t)
ax.plot(t, f[0, :], 'b')
ax.plot(t, f[1, :], 'b')
ax.plot(t, y[:, 0], 'r')
ax.plot(t, y[:, 1], 'r')
fig.show()
# throw
g = 10
y0 = [10, 0]
def F(t):
return np.array([-g*t+y0[0], -1/2*g*t**2+y0[0]*t+y0[1]]).T
def dF(t, y):
return np.array([-g, y[0]])
def throw(y0, t1, dt=0.01):
ny = 2
t = np.arange(0, t1, 0.01)
y = np.ndarray([len(t), ny])
y[0, :] = y0
for k in range(1, len(t)):
yy = y[k-1, :]
tt = t[k-1]
dt = t[k]-t[k-1]
y[k, :] = ode.forward1(tt, yy, dt, dF)
return t, y
t, y = throw(y0, 1)
fig, ax = plt.subplots(1, 1)
f = F(t)
ax.plot(t, f, 'b')
ax.plot(t, y, 'r')
fig.show()
| [
"[email protected]"
] | |
efaa18429722ce4503c8ba19e35771809d9e0396 | e6e57bf7d4eda37f1188ab72ff249675f40029ee | /algorithms_and_data_structures/strings/Longest Substring Without Repeating Chars.py | eb64961b7d75fe8faebb2d570e7abbd1b9e84eb2 | [] | no_license | juanpedrovel/bomboclap | 4e186331ef1c26c8522e44c21d6a33358471786b | 99db02266c31dd14357ef6a575d35fcf55718617 | refs/heads/master | 2020-04-19T21:16:38.141830 | 2019-01-31T00:31:24 | 2019-01-31T00:31:24 | 168,436,881 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 662 | py | class Solution:
def lengthOfLongestSubstring(self, s):
"""
:type s: str
:rtype: int
"""
if not s:
return 0
table = {}
max_lenght = 0
index = 0
for i in range(len(s)):
if s[i] not in table:
table[s[i]] = i
else:
if i - index > max_lenght:
max_lenght = i - index
index = max(table[s[i]] + 1, index)
table[s[i]] = i
max_lenght = max(max_lenght, len(s) - index)
return max_lenght
time = "abba"
k = 2
d = Solution()
print(d.lengthOfLongestSubstring(time)) | [
"[email protected]"
] | |
01365cc5d8a7bed6566b16093c7c394beaa19bc0 | 18b3d06a8a93839f7e7a1cf536a71bfc0adf8e20 | /devel/lib/python2.7/dist-packages/msgs_demo/msg/_AutoDockingActionFeedback.py | 6b0cdfd25c794b66c7f618c1b775e595acc632eb | [] | no_license | akingse/ros_tutorial_ws | dc52cbbf443f7823a0abd9223fef076cf959a24e | 7c776d2f62af0455a899c80e171d5210e0a8b382 | refs/heads/main | 2023-03-01T04:48:54.510004 | 2021-02-08T14:08:18 | 2021-02-08T14:09:30 | 337,094,532 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,247 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from msgs_demo/AutoDockingActionFeedback.msg. Do not edit."""
import codecs
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import actionlib_msgs.msg
import genpy
import msgs_demo.msg
import std_msgs.msg
class AutoDockingActionFeedback(genpy.Message):
_md5sum = "6f7132467beab5153587d9ebb8d8262d"
_type = "msgs_demo/AutoDockingActionFeedback"
_has_header = True # flag to mark the presence of a Header object
_full_text = """# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
Header header
actionlib_msgs/GoalStatus status
AutoDockingFeedback feedback
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
# 0: no frame
# 1: global frame
string frame_id
================================================================================
MSG: actionlib_msgs/GoalStatus
GoalID goal_id
uint8 status
uint8 PENDING = 0 # The goal has yet to be processed by the action server
uint8 ACTIVE = 1 # The goal is currently being processed by the action server
uint8 PREEMPTED = 2 # The goal received a cancel request after it started executing
# and has since completed its execution (Terminal State)
uint8 SUCCEEDED = 3 # The goal was achieved successfully by the action server (Terminal State)
uint8 ABORTED = 4 # The goal was aborted during execution by the action server due
# to some failure (Terminal State)
uint8 REJECTED = 5 # The goal was rejected by the action server without being processed,
# because the goal was unattainable or invalid (Terminal State)
uint8 PREEMPTING = 6 # The goal received a cancel request after it started executing
# and has not yet completed execution
uint8 RECALLING = 7 # The goal received a cancel request before it started executing,
# but the action server has not yet confirmed that the goal is canceled
uint8 RECALLED = 8 # The goal received a cancel request before it started executing
# and was successfully cancelled (Terminal State)
uint8 LOST = 9 # An action client can determine that a goal is LOST. This should not be
# sent over the wire by an action server
#Allow for the user to associate a string with GoalStatus for debugging
string text
================================================================================
MSG: actionlib_msgs/GoalID
# The stamp should store the time at which this goal was requested.
# It is used by an action server when it tries to preempt all
# goals that were requested before a certain time
time stamp
# The id provides a way to associate feedback and
# result message with specific goal requests. The id
# specified must be unique.
string id
================================================================================
MSG: msgs_demo/AutoDockingFeedback
# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
#feedback
string state
string text
"""
__slots__ = ['header','status','feedback']
_slot_types = ['std_msgs/Header','actionlib_msgs/GoalStatus','msgs_demo/AutoDockingFeedback']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,status,feedback
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(AutoDockingActionFeedback, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.header is None:
self.header = std_msgs.msg.Header()
if self.status is None:
self.status = actionlib_msgs.msg.GoalStatus()
if self.feedback is None:
self.feedback = msgs_demo.msg.AutoDockingFeedback()
else:
self.header = std_msgs.msg.Header()
self.status = actionlib_msgs.msg.GoalStatus()
self.feedback = msgs_demo.msg.AutoDockingFeedback()
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self
buff.write(_get_struct_2I().pack(_x.status.goal_id.stamp.secs, _x.status.goal_id.stamp.nsecs))
_x = self.status.goal_id.id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self.status.status
buff.write(_get_struct_B().pack(_x))
_x = self.status.text
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self.feedback.state
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self.feedback.text
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.status is None:
self.status = actionlib_msgs.msg.GoalStatus()
if self.feedback is None:
self.feedback = msgs_demo.msg.AutoDockingFeedback()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 8
(_x.status.goal_id.stamp.secs, _x.status.goal_id.stamp.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status.goal_id.id = str[start:end].decode('utf-8', 'rosmsg')
else:
self.status.goal_id.id = str[start:end]
start = end
end += 1
(self.status.status,) = _get_struct_B().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status.text = str[start:end].decode('utf-8', 'rosmsg')
else:
self.status.text = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.feedback.state = str[start:end].decode('utf-8', 'rosmsg')
else:
self.feedback.state = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.feedback.text = str[start:end].decode('utf-8', 'rosmsg')
else:
self.feedback.text = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self
buff.write(_get_struct_2I().pack(_x.status.goal_id.stamp.secs, _x.status.goal_id.stamp.nsecs))
_x = self.status.goal_id.id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self.status.status
buff.write(_get_struct_B().pack(_x))
_x = self.status.text
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self.feedback.state
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self.feedback.text
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.status is None:
self.status = actionlib_msgs.msg.GoalStatus()
if self.feedback is None:
self.feedback = msgs_demo.msg.AutoDockingFeedback()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 8
(_x.status.goal_id.stamp.secs, _x.status.goal_id.stamp.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status.goal_id.id = str[start:end].decode('utf-8', 'rosmsg')
else:
self.status.goal_id.id = str[start:end]
start = end
end += 1
(self.status.status,) = _get_struct_B().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status.text = str[start:end].decode('utf-8', 'rosmsg')
else:
self.status.text = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.feedback.state = str[start:end].decode('utf-8', 'rosmsg')
else:
self.feedback.state = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.feedback.text = str[start:end].decode('utf-8', 'rosmsg')
else:
self.feedback.text = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_2I = None
def _get_struct_2I():
global _struct_2I
if _struct_2I is None:
_struct_2I = struct.Struct("<2I")
return _struct_2I
_struct_3I = None
def _get_struct_3I():
global _struct_3I
if _struct_3I is None:
_struct_3I = struct.Struct("<3I")
return _struct_3I
_struct_B = None
def _get_struct_B():
global _struct_B
if _struct_B is None:
_struct_B = struct.Struct("<B")
return _struct_B
| [
"[email protected]"
] | |
dd8ba826e8603a45322205319804915132768d87 | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/domain/SettleCardInfo.py | c423d25f0cd7537346ab3f05842c9ac0b29eede3 | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 6,111 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class SettleCardInfo(object):
def __init__(self):
self._account_branch_name = None
self._account_holder_name = None
self._account_inst_city = None
self._account_inst_id = None
self._account_inst_name = None
self._account_inst_province = None
self._account_no = None
self._account_type = None
self._bank_code = None
self._usage_type = None
@property
def account_branch_name(self):
return self._account_branch_name
@account_branch_name.setter
def account_branch_name(self, value):
self._account_branch_name = value
@property
def account_holder_name(self):
return self._account_holder_name
@account_holder_name.setter
def account_holder_name(self, value):
self._account_holder_name = value
@property
def account_inst_city(self):
return self._account_inst_city
@account_inst_city.setter
def account_inst_city(self, value):
self._account_inst_city = value
@property
def account_inst_id(self):
return self._account_inst_id
@account_inst_id.setter
def account_inst_id(self, value):
self._account_inst_id = value
@property
def account_inst_name(self):
return self._account_inst_name
@account_inst_name.setter
def account_inst_name(self, value):
self._account_inst_name = value
@property
def account_inst_province(self):
return self._account_inst_province
@account_inst_province.setter
def account_inst_province(self, value):
self._account_inst_province = value
@property
def account_no(self):
return self._account_no
@account_no.setter
def account_no(self, value):
self._account_no = value
@property
def account_type(self):
return self._account_type
@account_type.setter
def account_type(self, value):
self._account_type = value
@property
def bank_code(self):
return self._bank_code
@bank_code.setter
def bank_code(self, value):
self._bank_code = value
@property
def usage_type(self):
return self._usage_type
@usage_type.setter
def usage_type(self, value):
self._usage_type = value
def to_alipay_dict(self):
params = dict()
if self.account_branch_name:
if hasattr(self.account_branch_name, 'to_alipay_dict'):
params['account_branch_name'] = self.account_branch_name.to_alipay_dict()
else:
params['account_branch_name'] = self.account_branch_name
if self.account_holder_name:
if hasattr(self.account_holder_name, 'to_alipay_dict'):
params['account_holder_name'] = self.account_holder_name.to_alipay_dict()
else:
params['account_holder_name'] = self.account_holder_name
if self.account_inst_city:
if hasattr(self.account_inst_city, 'to_alipay_dict'):
params['account_inst_city'] = self.account_inst_city.to_alipay_dict()
else:
params['account_inst_city'] = self.account_inst_city
if self.account_inst_id:
if hasattr(self.account_inst_id, 'to_alipay_dict'):
params['account_inst_id'] = self.account_inst_id.to_alipay_dict()
else:
params['account_inst_id'] = self.account_inst_id
if self.account_inst_name:
if hasattr(self.account_inst_name, 'to_alipay_dict'):
params['account_inst_name'] = self.account_inst_name.to_alipay_dict()
else:
params['account_inst_name'] = self.account_inst_name
if self.account_inst_province:
if hasattr(self.account_inst_province, 'to_alipay_dict'):
params['account_inst_province'] = self.account_inst_province.to_alipay_dict()
else:
params['account_inst_province'] = self.account_inst_province
if self.account_no:
if hasattr(self.account_no, 'to_alipay_dict'):
params['account_no'] = self.account_no.to_alipay_dict()
else:
params['account_no'] = self.account_no
if self.account_type:
if hasattr(self.account_type, 'to_alipay_dict'):
params['account_type'] = self.account_type.to_alipay_dict()
else:
params['account_type'] = self.account_type
if self.bank_code:
if hasattr(self.bank_code, 'to_alipay_dict'):
params['bank_code'] = self.bank_code.to_alipay_dict()
else:
params['bank_code'] = self.bank_code
if self.usage_type:
if hasattr(self.usage_type, 'to_alipay_dict'):
params['usage_type'] = self.usage_type.to_alipay_dict()
else:
params['usage_type'] = self.usage_type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = SettleCardInfo()
if 'account_branch_name' in d:
o.account_branch_name = d['account_branch_name']
if 'account_holder_name' in d:
o.account_holder_name = d['account_holder_name']
if 'account_inst_city' in d:
o.account_inst_city = d['account_inst_city']
if 'account_inst_id' in d:
o.account_inst_id = d['account_inst_id']
if 'account_inst_name' in d:
o.account_inst_name = d['account_inst_name']
if 'account_inst_province' in d:
o.account_inst_province = d['account_inst_province']
if 'account_no' in d:
o.account_no = d['account_no']
if 'account_type' in d:
o.account_type = d['account_type']
if 'bank_code' in d:
o.bank_code = d['bank_code']
if 'usage_type' in d:
o.usage_type = d['usage_type']
return o
| [
"[email protected]"
] | |
79cb03ca3c03bf897ace62b065716c192a9ae22a | 2a1b8a671aceda6bc446f8ce26400aa84fa444a6 | /Packs/CrowdStrikeFalconX/Integrations/CrowdStrikeFalconX/test_data/context.py | 3a246853c6d4bb1965a56beadd3cab66b0c8e349 | [
"MIT"
] | permissive | demisto/content | 6d4722d46f0ff0beea2748e9f7de585bf91a78b4 | 890def5a0e0ae8d6eaa538148249ddbc851dbb6b | refs/heads/master | 2023-09-04T00:02:25.618032 | 2023-09-03T21:56:22 | 2023-09-03T21:56:22 | 60,525,392 | 1,023 | 1,921 | MIT | 2023-09-14T20:55:24 | 2016-06-06T12:17:02 | Python | UTF-8 | Python | false | false | 44,910 | py | from CommonServerPython import DemistoException
SEND_UPLOADED_FILE_TO_SENDBOX_ANALYSIS_CONTEXT = {
'submitted_id': 'id',
'state': 'created',
'created_timestamp': '2020-05-12T15:34:11Z',
'environment_id': 160,
'sha256': 'sha256',
'file_name': 'test.pdf'
}
SEND_URL_TO_SANDBOX_ANALYSIS_CONTEXT = {
'submitted_id': 'id',
'state': 'created',
'created_timestamp': '2020-05-12T16:40:52Z',
'environment_id': 160,
'url_name': 'https://www.google.com'
}
GET_REPORT_SUMMARY_CONTEXT = {
'File(val.MD5 && val.MD5 == obj.MD5 || val.SHA1 && val.SHA1 == obj.SHA1 '
'|| val.SHA256 && val.SHA256 == obj.SHA256 || val.SHA512 && val.SHA512 == obj.SHA512 '
'|| val.CRC32 && val.CRC32 == obj.CRC32 || val.CTPH && val.CTPH == obj.CTPH'
' || val.SSDeep && val.SSDeep == obj.SSDeep)': [
{'SHA256': 'sha256', 'Company': 'Microsoft Corporation', 'ProductName': 'Microsoft Windows Operating System',
'Signature': {'Authentihash': '', 'Copyright': 'Microsoft Corporation. All rights reserved.',
'Description': 'Microsoft Smartcard Certificate Propagation Service',
'FileVersion': '10.0.19041.844 (WinBuild.160101.0800)', 'InternalName': 'certprop.dll',
'OriginalName': 'certprop.dll'},
'Hashes': [{'type': 'SHA256', 'value': 'sha256'}]
}
],
'DBotScore(val.Indicator && val.Indicator == obj.Indicator && val.Vendor == obj.Vendor && val.Type == obj.Type)': [
{'Indicator': 'sha256', 'Type': 'file', 'Vendor': '', 'Score': 2, 'Reliability': 'B - Usually reliable'}],
'csfalconx.resource(val.id && val.id == obj.id)': {
'environment_description': 'Windows 10 64 bit',
'environment_id': 160, 'sha256': 'sha256',
'submission_type': 'page_url',
'submit_url': 'hxxps://www.google.com', 'threat_score': 13,
'created_timestamp': '2020-03-16T17:04:48Z', 'id': 'id',
'ioc_report_broad_csv_artifact_id': 'ioc_report_broad_csv_artifact_id',
'ioc_report_broad_json_artifact_id': 'ioc_report_broad_json_artifact_id',
'ioc_report_broad_maec_artifact_id': 'ioc_report_broad_maec_artifact_id',
'ioc_report_broad_stix_artifact_id': 'ioc_report_broad_stix_artifact_id',
'ioc_report_strict_csv_artifact_id': 'ioc_report_strict_csv_artifact_id',
'ioc_report_strict_json_artifact_id': 'ioc_report_strict_json_artifact_id',
'ioc_report_strict_maec_artifact_id': 'ioc_report_strict_maec_artifact_id',
'ioc_report_strict_stix_artifact_id': 'ioc_report_strict_stix_artifact_id',
'verdict': 'suspicious'}}
GET_ANALYSIS_STATUS_CONTEXT = {
'csfalconx.resource(val.id && val.id == obj.id)':
{
'id': 'id',
'state': 'success',
'created_timestamp': '2020-03-16T17:04:48Z',
'environment_id': 160
}
}
CHECK_QUOTA_STATUS_CONTEXT = {
'csfalconx.resource(val.id && val.id == obj.id)':
{
'total': 100,
'used': 47,
'in_progress': 2
}
}
FIND_SANDBOX_REPORTS_CONTEXT = {
'csfalconx.resource(val.id && val.id == obj.id)':
{
'resources': ['resources1', 'resources2', 'resources3', 'resources4']
}
}
FIND_SANDBOX_REPORTS_HASH_CONTEXT = {
'csfalconx.resource(val.id && val.id == obj.id)': {
'resources': ['resources1', 'resources2', 'resources3', 'resources4'],
'FindReport': [{'sha256': 'hash1', 'reportIds': ['resources1', 'resources2', 'resources3', 'resources4']}]
}
}
FIND_SANDBOX_REPORTS_NOT_FOUND_HASH_CONTEXT = {
'csfalconx.resource(val.id && val.id == obj.id)': {
'resources': [],
'FindReport': [{'sha256': 'hash1', 'reportIds': []}]
}
}
FIND_SUBMISSION_ID_CONTEXT = {
'csfalconx.resource(val.id && val.id == obj.id)':
{
'resources': ['resources1', 'resources2', 'resources3', 'resources4']
}
}
GET_FULL_REPORT_CONTEXT_EXTENDED = {'environment_description': 'Windows 10 64 bit', 'environment_id': 160,
'sha256': 'sha256', 'submission_type': 'page_url',
'submit_url': 'hxxps://www.google.com', 'threat_score': 13,
'architecture': 'WINDOWS', 'classification': ['91.6% (.URL) Windows URL shortcut',
'8.3% (.INI) Generic INI configuration'],
'contacted_hosts': [{'address': '111.27.12.67',
'associated_runtime': [{'name': 'name.exe', 'pid': 6428},
{'name': 'name.exe', 'pid': 9372}],
'country': 'United States', 'port': 443, 'protocol': 'TCP'},
{'address': '111.27.12.67',
'associated_runtime': [{'name': 'name.exe', 'pid': 6428},
{'name': 'name.exe', 'pid': 9372}],
'country': 'United States', 'port': 80, 'protocol': 'TCP'},
{'address': '111.27.12.67',
'associated_runtime': [{'name': 'name.exe', 'pid': 6428}],
'country': 'United States', 'port': 443, 'protocol': 'TCP'},
{'address': '111.27.12.67',
'associated_runtime': [{'name': 'name.exe', 'pid': 6428}],
'country': 'United States', 'port': 443, 'protocol': 'TCP'},
{'address': '111.27.12.67',
'associated_runtime': [{'name': 'name.exe', 'pid': 6428}],
'country': 'United States', 'port': 443, 'protocol': 'TCP'},
{'address': '111.27.12.67',
'associated_runtime': [{'name': 'name.exe', 'pid': 6428}],
'country': 'United States', 'port': 443, 'protocol': 'TCP'},
{'address': '111.27.12.67',
'associated_runtime': [{'name': 'name.exe', 'pid': 6428}],
'country': 'United States', 'port': 443, 'protocol': 'TCP'}],
'dns_requests': [{'address': '111.111.1.1', 'country': 'United States',
'domain': 'googleads.g.doubleclick.net',
'registrar_creation_timestamp': '1996-01-16T00:00:00+00:00',
'registrar_name': 'registrar_name',
'registrar_organization': 'registrar_organization'},
{'address': '172.217.7.163', 'country': 'United States',
'domain': 'domain'},
{'address': '111.27.12.67', 'country': 'United States',
'domain': 'ssl.gstatic.com',
'registrar_creation_timestamp': '2008-02-11T00:00:00+00:00',
'registrar_name': 'registrar_name',
'registrar_organization': 'Google Inc.'},
{'address': '172.217.14.163', 'country': 'United States',
'domain': 'www.gstatic.com',
'registrar_creation_timestamp': '2008-02-11T00:00:00+00:00',
'registrar_name': 'registrar_name',
'registrar_organization': 'registrar_organization'}],
'http_requests': [
{'header': 'header', 'host': 'host', 'host_ip': '111.27.12.67', 'host_port': 80,
'method': 'GET', 'url': 'url'},
{'header': 'header', 'host': 'host', 'host_ip': '111.27.12.67', 'host_port': 80,
'method': 'GET', 'url': 'url'},
{'header': 'header', 'host': 'ocsp.pki.goog', 'host_ip': '172.217.7.163',
'host_port': 80, 'method': 'GET', 'url': 'url'},
{'header': 'header', 'host': 'ocsp.pki.goog', 'host_ip': '172.217.7.163',
'host_port': 80, 'method': 'GET', 'url': 'url'},
{'header': 'header', 'host': 'ocsp.pki.goog', 'host_ip': '172.217.7.163',
'host_port': 80, 'method': 'GET', 'url': 'url'},
{'header': 'header', 'host': 'ocsp.pki.goog', 'host_ip': '172.217.7.163',
'host_port': 80, 'method': 'GET', 'url': 'url'},
{'header': 'header', 'host': 'ocsp.pki.goog', 'host_ip': '172.217.7.163',
'host_port': 80, 'method': 'GET', 'url': 'url'},
{'header': 'header', 'host': 'ocsp.pki.goog', 'host_ip': '172.217.7.163',
'host_port': 80, 'method': 'GET', 'url': 'url'}], 'incidents': [
{'details': ['Contacts 4 domains and 4 hosts'], 'name': 'Network Behavior'}], 'processes': [
{'command_line': 'command_line', 'icon_artifact_id': 'icon_artifact_id', 'name': 'rundll32.exe',
'normalized_path': 'normalized_path.exe', 'pid': 6648, 'process_flags': [{'name': 'Reduced Monitoring'}],
'sha256': 'sha256', 'uid': '00074182-00006648'}], 'screenshots_artifact_ids': ['screenshots_artifact_ids1',
'screenshots_artifact_ids2',
'screenshots_artifact_ids3',
'screenshots_artifact_ids4'],
'signatures': [
{'category': 'General', 'description': 'description', 'identifier': 'network-0',
'name': 'Contacts domains', 'origin': 'Network Traffic', 'relevance': 1,
'threat_level_human': 'informative', 'type': 7},
{'category': 'General', 'description': 'description', 'identifier': 'network-1',
'name': 'Contacts server', 'origin': 'Network Traffic', 'relevance': 1,
'threat_level_human': 'informative', 'type': 7},
{'category': 'Network Related', 'description': 'description',
'identifier': 'string-3', 'name': 'Found potential URL in binary/memory',
'origin': 'String', 'relevance': 10, 'threat_level_human': 'informative',
'type': 2}, {'category': 'External Systems', 'description': 'description',
'identifier': 'suricata-0', 'name': 'Detected Suricata Alert',
'origin': 'Suricata Alerts', 'relevance': 10,
'threat_level_human': 'informative', 'type': 18},
{'category': 'Ransomware/Banking', 'description': 'description',
'identifier': 'string-12',
'name': 'Detected text artifact in screenshot that indicate file could be ransomware',
'origin': 'String', 'relevance': 10, 'threat_level': 1,
'threat_level_human': 'suspicious', 'type': 2},
{'category': 'Network Related', 'description': 'description',
'identifier': 'network-23',
'name': 'Sends traffic on typical HTTP outbound port, but without HTTP header',
'origin': 'Network Traffic', 'relevance': 5, 'threat_level': 1,
'threat_level_human': 'suspicious', 'type': 7}],
'created_timestamp': '2020-03-16T17:04:48Z', 'id': 'id',
'ioc_report_broad_csv_artifact_id': 'ioc_report_broad_csv_artifact_id',
'ioc_report_broad_json_artifact_id': 'ioc_report_broad_json_artifact_id',
'ioc_report_broad_maec_artifact_id': 'ioc_report_broad_maec_artifact_id',
'ioc_report_broad_stix_artifact_id': 'ioc_report_broad_stix_artifact_id',
'ioc_report_strict_csv_artifact_id': 'ioc_report_strict_csv_artifact_id',
'ioc_report_strict_json_artifact_id': 'ioc_report_strict_json_artifact_id',
'ioc_report_strict_maec_artifact_id': 'ioc_report_strict_maec_artifact_id',
'ioc_report_strict_stix_artifact_id': 'ioc_report_strict_stix_artifact_id',
'verdict': 'no specific threat',
'sandbox': {'architecture': 'WINDOWS',
'classification': [
'91.6% (.URL) Windows URL shortcut',
'8.3% (.INI) Generic INI configuration'],
'contacted_hosts': [
{'address': '111.27.12.67',
'associated_runtime': [
{'name': 'name.exe', 'pid': 6428},
{'name': 'name.exe', 'pid': 9372}],
'country': 'United States',
'port': 443, 'protocol': 'TCP'},
{'address': '111.27.12.67',
'associated_runtime': [
{'name': 'name.exe', 'pid': 6428},
{'name': 'name.exe', 'pid': 9372}],
'country': 'United States',
'port': 80, 'protocol': 'TCP'},
{'address': '111.27.12.67',
'associated_runtime': [
{'name': 'name.exe', 'pid': 6428}],
'country': 'United States',
'port': 443, 'protocol': 'TCP'},
{'address': '111.27.12.67',
'associated_runtime': [
{'name': 'name.exe', 'pid': 6428}],
'country': 'United States',
'port': 443, 'protocol': 'TCP'},
{'address': '111.27.12.67',
'associated_runtime': [
{'name': 'name.exe', 'pid': 6428}],
'country': 'United States',
'port': 443, 'protocol': 'TCP'},
{'address': '111.27.12.67',
'associated_runtime': [
{'name': 'name.exe', 'pid': 6428}],
'country': 'United States',
'port': 443, 'protocol': 'TCP'},
{'address': '111.27.12.67', 'associated_runtime': [
{'name': 'name.exe',
'pid': 6428}],
'country': 'United States',
'port': 443, 'protocol': 'TCP'}],
'dns_requests': [
{'address': '111.111.1.1',
'country': 'United States',
'domain': 'googleads.g.doubleclick.net',
'registrar_creation_timestamp': '1996-01-16T00:00:00+00:00',
'registrar_name': 'registrar_name',
'registrar_organization': 'registrar_organization'},
{'address': '172.217.7.163',
'country': 'United States',
'domain': 'domain'},
{'address': '111.27.12.67',
'country': 'United States',
'domain': 'ssl.gstatic.com',
'registrar_creation_timestamp': '2008-02-11T00:00:00+00:00',
'registrar_name': 'registrar_name',
'registrar_organization': 'Google Inc.'},
{'address': '172.217.14.163',
'country': 'United States',
'domain': 'www.gstatic.com',
'registrar_creation_timestamp': '2008-02-11T00:00:00+00:00',
'registrar_name': 'registrar_name',
'registrar_organization': 'registrar_organization'}],
'http_requests': [
{'header': 'header',
'host': 'host',
'host_ip': '111.27.12.67',
'host_port': 80,
'method': 'GET',
'url': 'url'},
{'header': 'header',
'host': 'host',
'host_ip': '111.27.12.67',
'host_port': 80,
'method': 'GET',
'url': 'url'},
{'header': 'header',
'host': 'ocsp.pki.goog',
'host_ip': '172.217.7.163',
'host_port': 80,
'method': 'GET',
'url': 'url'},
{'header': 'header',
'host': 'ocsp.pki.goog',
'host_ip': '172.217.7.163',
'host_port': 80,
'method': 'GET',
'url': 'url'},
{'header': 'header',
'host': 'ocsp.pki.goog',
'host_ip': '172.217.7.163',
'host_port': 80,
'method': 'GET',
'url': 'url'},
{'header': 'header',
'host': 'ocsp.pki.goog',
'host_ip': '172.217.7.163',
'host_port': 80,
'method': 'GET',
'url': 'url'},
{'header': 'header',
'host': 'ocsp.pki.goog',
'host_ip': '172.217.7.163',
'host_port': 80,
'method': 'GET',
'url': 'url'},
{'header': 'header',
'host': 'ocsp.pki.goog',
'host_ip': '172.217.7.163',
'host_port': 80,
'method': 'GET',
'url': 'url'}],
'incidents': [{'details': [
'Contacts 4 domains and 4 hosts'],
'name': 'Network Behavior'}],
'processes': [
{'command_line': 'command_line',
'icon_artifact_id': 'icon_artifact_id',
'name': 'rundll32.exe',
'normalized_path': 'normalized_path.exe',
'pid': 6648, 'process_flags': [{
'name': 'Reduced Monitoring'}],
'sha256': 'sha256',
'uid': '00074182-00006648'}],
'screenshots_artifact_ids': [
'screenshots_artifact_ids1',
'screenshots_artifact_ids2',
'screenshots_artifact_ids3',
'screenshots_artifact_ids4'],
'signatures': [{'category': 'General',
'description': 'description',
'identifier': 'network-0',
'name': 'Contacts domains',
'origin': 'Network Traffic',
'relevance': 1,
'threat_level_human': 'informative',
'type': 7},
{'category': 'General',
'description': 'description',
'identifier': 'network-1',
'name': 'Contacts server',
'origin': 'Network Traffic',
'relevance': 1,
'threat_level_human': 'informative',
'type': 7}, {
'category': 'Network Related',
'description': 'description',
'identifier': 'string-3',
'name': 'Found potential URL in binary/memory',
'origin': 'String',
'relevance': 10,
'threat_level_human': 'informative',
'type': 2}, {
'category': 'External Systems',
'description': 'description',
'identifier': 'suricata-0',
'name': 'Detected Suricata Alert',
'origin': 'Suricata Alerts',
'relevance': 10,
'threat_level_human': 'informative',
'type': 18}, {
'category': 'Ransomware/Banking',
'description': 'description',
'identifier': 'string-12',
'name': 'Detected text artifact in screenshot that indicate file could be ransomware',
'origin': 'String',
'relevance': 10,
'threat_level': 1,
'threat_level_human': 'suspicious',
'type': 2}, {
'category': 'Network Related',
'description': 'description',
'identifier': 'network-23',
'name': 'Sends traffic on typical HTTP outbound port, but without HTTP header',
'origin': 'Network Traffic',
'relevance': 5,
'threat_level': 1,
'threat_level_human': 'suspicious',
'type': 7}]}}
GET_FULL_REPORT_CONTEXT = {'environment_description': 'Windows 10 64 bit', 'environment_id': 160, 'sha256': 'sha256',
'submission_type': 'page_url', 'submit_url': 'hxxps://www.google.com', 'threat_score': 13,
'architecture': 'WINDOWS',
'classification': ['91.6% (.URL) Windows URL shortcut',
'8.3% (.INI) Generic INI configuration'],
'contacted_hosts': [{'address': '111.27.12.67',
'associated_runtime': [{'name': 'name.exe', 'pid': 6428},
{'name': 'name.exe', 'pid': 9372}],
'country': 'United States', 'port': 443, 'protocol': 'TCP'},
{'address': '111.27.12.67',
'associated_runtime': [
{'name': 'name.exe',
'pid': 6428},
{'name': 'name.exe',
'pid': 9372}],
'country': 'United States',
'port': 80, 'protocol': 'TCP'},
{'address': '111.27.12.67',
'associated_runtime': [{'name': 'name.exe', 'pid': 6428}],
'country': 'United States', 'port': 443, 'protocol': 'TCP'},
{'address': '111.27.12.67',
'associated_runtime': [{'name': 'name.exe', 'pid': 6428}],
'country': 'United States', 'port': 443, 'protocol': 'TCP'},
{'address': '111.27.12.67',
'associated_runtime': [{'name': 'name.exe', 'pid': 6428}],
'country': 'United States', 'port': 443, 'protocol': 'TCP'},
{'address': '111.27.12.67',
'associated_runtime': [{'name': 'name.exe', 'pid': 6428}],
'country': 'United States', 'port': 443, 'protocol': 'TCP'},
{'address': '111.27.12.67',
'associated_runtime': [{'name': 'name.exe', 'pid': 6428}],
'country': 'United States', 'port': 443, 'protocol': 'TCP'}],
'dns_requests': [
{'address': '111.111.1.1', 'country': 'United States',
'domain': 'googleads.g.doubleclick.net',
'registrar_creation_timestamp': '1996-01-16T00:00:00+00:00',
'registrar_name': 'registrar_name',
'registrar_organization': 'registrar_organization'},
{'address': '172.217.7.163', 'country': 'United States', 'domain': 'domain'},
{'address': '111.27.12.67', 'country': 'United States', 'domain': 'ssl.gstatic.com',
'registrar_creation_timestamp': '2008-02-11T00:00:00+00:00',
'registrar_name': 'registrar_name',
'registrar_organization': 'Google Inc.'},
{'address': '172.217.14.163', 'country': 'United States', 'domain': 'www.gstatic.com',
'registrar_creation_timestamp': '2008-02-11T00:00:00+00:00',
'registrar_name': 'registrar_name',
'registrar_organization': 'registrar_organization'}],
'http_requests': [
{'header': 'header', 'host': 'host', 'host_ip': '111.27.12.67', 'host_port': 80,
'method': 'GET', 'url': 'url'},
{'header': 'header', 'host': 'host', 'host_ip': '111.27.12.67', 'host_port': 80,
'method': 'GET', 'url': 'url'},
{'header': 'header', 'host': 'ocsp.pki.goog', 'host_ip': '172.217.7.163',
'host_port': 80, 'method': 'GET',
'url': 'url'},
{'header': 'header', 'host': 'ocsp.pki.goog', 'host_ip': '172.217.7.163',
'host_port': 80, 'method': 'GET',
'url': 'url'},
{'header': 'header', 'host': 'ocsp.pki.goog', 'host_ip': '172.217.7.163',
'host_port': 80, 'method': 'GET',
'url': 'url'},
{'header': 'header', 'host': 'ocsp.pki.goog', 'host_ip': '172.217.7.163',
'host_port': 80, 'method': 'GET',
'url': 'url'},
{'header': 'header', 'host': 'ocsp.pki.goog', 'host_ip': '172.217.7.163',
'host_port': 80, 'method': 'GET',
'url': 'url'},
{'header': 'header', 'host': 'ocsp.pki.goog', 'host_ip': '172.217.7.163',
'host_port': 80, 'method': 'GET',
'url': 'url'}],
'incidents': [{'details': ['Contacts 4 domains and 4 hosts'], 'name': 'Network Behavior'}],
'processes': [{'command_line': 'command_line', 'icon_artifact_id': 'icon_artifact_id',
'name': 'rundll32.exe',
'normalized_path': 'normalized_path.exe', 'pid': 6648,
'process_flags': [{'name': 'Reduced Monitoring'}], 'sha256': 'sha256',
'uid': '00074182-00006648'}],
'screenshots_artifact_ids': ['screenshots_artifact_ids1', 'screenshots_artifact_ids2',
'screenshots_artifact_ids3',
'screenshots_artifact_ids4'],
'created_timestamp': '2020-03-16T17:04:48Z', 'id': 'id',
'ioc_report_broad_csv_artifact_id': 'ioc_report_broad_csv_artifact_id',
'ioc_report_broad_json_artifact_id': 'ioc_report_broad_json_artifact_id',
'ioc_report_broad_maec_artifact_id': 'ioc_report_broad_maec_artifact_id',
'ioc_report_broad_stix_artifact_id': 'ioc_report_broad_stix_artifact_id',
'ioc_report_strict_csv_artifact_id': 'ioc_report_strict_csv_artifact_id',
'ioc_report_strict_json_artifact_id': 'ioc_report_strict_json_artifact_id',
'ioc_report_strict_maec_artifact_id': 'ioc_report_strict_maec_artifact_id',
'ioc_report_strict_stix_artifact_id': 'ioc_report_strict_stix_artifact_id',
'verdict': 'no specific threat',
'sandbox': {'architecture': 'WINDOWS',
'classification': ['91.6% (.URL) Windows URL shortcut',
'8.3% (.INI) Generic INI configuration'],
'contacted_hosts': [
{'address': '111.27.12.67',
'associated_runtime': [{'name': 'name.exe', 'pid': 6428},
{'name': 'name.exe', 'pid': 9372}],
'country': 'United States', 'port': 443, 'protocol': 'TCP'},
{'address': '111.27.12.67',
'associated_runtime': [{'name': 'name.exe', 'pid': 6428},
{'name': 'name.exe', 'pid': 9372}],
'country': 'United States', 'port': 80, 'protocol': 'TCP'},
{'address': '111.27.12.67',
'associated_runtime': [{'name': 'name.exe', 'pid': 6428}],
'country': 'United States', 'port': 443, 'protocol': 'TCP'},
{'address': '111.27.12.67',
'associated_runtime': [{'name': 'name.exe', 'pid': 6428}],
'country': 'United States', 'port': 443, 'protocol': 'TCP'},
{'address': '111.27.12.67',
'associated_runtime': [{'name': 'name.exe', 'pid': 6428}],
'country': 'United States', 'port': 443, 'protocol': 'TCP'},
{'address': '111.27.12.67',
'associated_runtime': [{'name': 'name.exe', 'pid': 6428}],
'country': 'United States', 'port': 443, 'protocol': 'TCP'},
{'address': '111.27.12.67',
'associated_runtime': [{'name': 'name.exe', 'pid': 6428}],
'country': 'United States', 'port': 443,
'protocol': 'TCP'}],
'dns_requests': [
{'address': '111.111.1.1', 'country': 'United States',
'domain': 'googleads.g.doubleclick.net',
'registrar_creation_timestamp': '1996-01-16T00:00:00+00:00',
'registrar_name': 'registrar_name',
'registrar_organization': 'registrar_organization'},
{'address': '172.217.7.163', 'country': 'United States', 'domain': 'domain'},
{'address': '111.27.12.67', 'country': 'United States',
'domain': 'ssl.gstatic.com',
'registrar_creation_timestamp': '2008-02-11T00:00:00+00:00',
'registrar_name': 'registrar_name',
'registrar_organization': 'Google Inc.'},
{'address': '172.217.14.163', 'country': 'United States',
'domain': 'www.gstatic.com',
'registrar_creation_timestamp': '2008-02-11T00:00:00+00:00',
'registrar_name': 'registrar_name',
'registrar_organization': 'registrar_organization'}],
'http_requests': [
{'header': 'header', 'host': 'host', 'host_ip': '111.27.12.67',
'host_port': 80,
'method': 'GET',
'url': 'url'},
{'header': 'header', 'host': 'host', 'host_ip': '111.27.12.67',
'host_port': 80,
'method': 'GET',
'url': 'url'},
{'header': 'header', 'host': 'ocsp.pki.goog', 'host_ip': '172.217.7.163',
'host_port': 80, 'method': 'GET',
'url': 'url'},
{'header': 'header', 'host': 'ocsp.pki.goog', 'host_ip': '172.217.7.163',
'host_port': 80, 'method': 'GET',
'url': 'url'},
{'header': 'header', 'host': 'ocsp.pki.goog', 'host_ip': '172.217.7.163',
'host_port': 80, 'method': 'GET',
'url': 'url'},
{'header': 'header', 'host': 'ocsp.pki.goog', 'host_ip': '172.217.7.163',
'host_port': 80, 'method': 'GET',
'url': 'url'},
{'header': 'header', 'host': 'ocsp.pki.goog', 'host_ip': '172.217.7.163',
'host_port': 80, 'method': 'GET',
'url': 'url'},
{'header': 'header', 'host': 'ocsp.pki.goog', 'host_ip': '172.217.7.163',
'host_port': 80, 'method': 'GET',
'url': 'url'}],
'incidents': [
{'details': ['Contacts 4 domains and 4 hosts'], 'name': 'Network Behavior'}],
'processes': [
{'command_line': 'command_line', 'icon_artifact_id': 'icon_artifact_id',
'name': 'rundll32.exe',
'normalized_path': 'normalized_path.exe', 'pid': 6648,
'process_flags': [{'name': 'Reduced Monitoring'}], 'sha256': 'sha256',
'uid': '00074182-00006648'}],
'screenshots_artifact_ids': ['screenshots_artifact_ids1',
'screenshots_artifact_ids2',
'screenshots_artifact_ids3',
'screenshots_artifact_ids4']}}
MULTIPLE_ERRORS_RESULT = DemistoException(
'403: access denied, authorization failed\n401: test error #1\n402: test error #2')
| [
"[email protected]"
] | |
e137ef7c38d70656f375646a2aa4195efccc728c | 6d54a7b26d0eb82152a549a6a9dfde656687752c | /src/controller/python/test/test_scripts/commissioning_window_test.py | 6a113aede20baf05365072828d9a20694ebdf7a0 | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | project-chip/connectedhomeip | 81a123d675cf527773f70047d1ed1c43be5ffe6d | ea3970a7f11cd227ac55917edaa835a2a9bc4fc8 | refs/heads/master | 2023-09-01T11:43:37.546040 | 2023-09-01T08:01:32 | 2023-09-01T08:01:32 | 244,694,174 | 6,409 | 1,789 | Apache-2.0 | 2023-09-14T20:56:31 | 2020-03-03T17:05:10 | C++ | UTF-8 | Python | false | false | 4,141 | py | #!/usr/bin/env python3
#
# Copyright (c) 2022 Project CHIP Authors
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Commissioning test.
import asyncio
import os
import sys
from optparse import OptionParser
from base import BaseTestHelper, FailIfNot, TestFail, TestTimeout, logger
# The thread network dataset tlv for testing, splitted into T-L-V.
TEST_THREAD_NETWORK_DATASET_TLV = "0e080000000000010000" + \
"000300000c" + \
"35060004001fffe0" + \
"0208fedcba9876543210" + \
"0708fd00000000001234" + \
"0510ffeeddccbbaa99887766554433221100" + \
"030e54657374696e674e6574776f726b" + \
"0102d252" + \
"041081cb3b2efa781cc778397497ff520fa50c0302a0ff"
# Network id, for the thread network, current a const value, will be changed to XPANID of the thread network.
TEST_THREAD_NETWORK_ID = "fedcba9876543210"
TEST_DISCRIMINATOR = 3840
ENDPOINT_ID = 0
LIGHTING_ENDPOINT_ID = 1
GROUP_ID = 0
async def main():
optParser = OptionParser()
optParser.add_option(
"-t",
"--timeout",
action="store",
dest="testTimeout",
default=75,
type='int',
help="The program will return with timeout after specified seconds.",
metavar="<timeout-second>",
)
optParser.add_option(
"--address",
action="store",
dest="deviceAddress",
default='',
type='str',
help="Address of the first device",
)
optParser.add_option(
"-p",
"--paa-trust-store-path",
action="store",
dest="paaTrustStorePath",
default='',
type='str',
help="Path that contains valid and trusted PAA Root Certificates.",
metavar="<paa-trust-store-path>"
)
(options, remainingArgs) = optParser.parse_args(sys.argv[1:])
timeoutTicker = TestTimeout(options.testTimeout)
timeoutTicker.start()
test = BaseTestHelper(
nodeid=112233, paaTrustStorePath=options.paaTrustStorePath, testCommissioner=False)
FailIfNot(test.SetNetworkCommissioningParameters(dataset=TEST_THREAD_NETWORK_DATASET_TLV),
"Failed to finish network commissioning")
logger.info("Commissioning DUT from first commissioner")
FailIfNot(test.TestPaseOnly(ip=options.deviceAddress, setuppin=20202021, nodeid=1),
"Unable to establish PASE connection to device")
FailIfNot(test.TestCommissionOnly(nodeid=1), "Unable to commission device")
logger.info("Creating controller on a new fabric")
FailIfNot(test.CreateNewFabricController(), "Unable to create new controller")
logger.info("Testing RevokeCommissioning")
FailIfNot(await test.TestRevokeCommissioningWindow(ip=options.deviceAddress,
setuppin=20202021,
nodeid=1),
"RevokeCommissioning test failed")
logger.info("Test Enhanced Commissioning Window")
FailIfNot(test.TestEnhancedCommissioningWindow(ip=options.deviceAddress, nodeid=1), "EnhancedCommissioningWindow open failed")
timeoutTicker.stop()
logger.info("Test finished")
# TODO: Python device controller cannot be shutdown clean sometimes and will block on AsyncDNSResolverSockets shutdown.
# Call os._exit(0) to force close it.
os._exit(0)
if __name__ == "__main__":
try:
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
loop.close()
except Exception as ex:
logger.exception(ex)
TestFail("Exception occurred when running tests.")
| [
"[email protected]"
] | |
bf46a05ff7701f589104c9dbb546b75c67f08206 | 6e843cd6fa659696220fe5db8b20c14a4bae7465 | /rest/photograph/views.py | 033088113bfe0dc21cc479c1aece3a84f172faf0 | [
"MIT"
] | permissive | cmu-lib/campi | 024f178c00e6ff6a8e477622ec882cbfa9177c06 | c2c4bfbf2d5932dde1dde985d7b5c8786b9f2ab2 | refs/heads/master | 2023-01-13T19:05:01.151517 | 2020-10-09T13:37:27 | 2020-10-09T13:37:27 | 254,439,760 | 13 | 0 | MIT | 2020-11-25T04:12:18 | 2020-04-09T17:43:35 | Python | UTF-8 | Python | false | false | 12,932 | py | from django.db.models import (
Count,
Prefetch,
OuterRef,
Exists,
ExpressionWrapper,
BooleanField,
F,
)
from django.db.models.functions import Extract
from django.contrib.postgres.search import SearchRank, SearchQuery
from rest_framework import viewsets
from rest_framework.response import Response
from rest_framework.decorators import action
from rest_framework.pagination import LimitOffsetPagination
from photograph import serializers, models
import collection
from django_filters import rest_framework as filters
from campi.views import GetSerializerClassMixin
import tagging.models
from cv.models import CloseMatchSetMembership
class PhotographFilter(filters.FilterSet):
directory = filters.ModelMultipleChoiceFilter(
queryset=collection.models.Directory.objects.all()
)
all_directories = filters.ModelChoiceFilter(
queryset=collection.models.Directory.objects.all()
)
date_taken_early = filters.DateFromToRangeFilter()
date_taken_late = filters.DateFromToRangeFilter()
digitized_date = filters.DateFromToRangeFilter()
job = filters.ModelChoiceFilter(queryset=collection.models.Job.objects.all())
tag = filters.ModelChoiceFilter(
queryset=tagging.models.Tag.objects.all(), field_name="photograph_tags__tag"
)
image_path = filters.CharFilter(lookup_expr="icontains")
gcv_object = filters.ModelChoiceFilter(
queryset=models.ObjectAnnotationLabel.objects.all(),
field_name="objectannotation__label",
)
gcv_label = filters.ModelChoiceFilter(
queryset=models.PhotoLabel.objects.all(), field_name="label_annotations__label"
)
in_close_match_set = filters.BooleanFilter(field_name="in_close_match_set")
image_text = filters.CharFilter(method="get_text")
def get_text(self, queryset, name, value):
if value is None:
return queryset
else:
return (
queryset.annotate(
rank=SearchRank(F("image_search_text"), SearchQuery(value))
)
.filter(rank__gt=0)
.order_by("-rank")
)
def prepare_photograph_qs(qs):
ordered_tags = tagging.models.PhotographTag.objects.select_related(
"tag", "user_last_modified"
).order_by("-last_updated")
ordered_decisions = tagging.models.TaggingDecision.objects.order_by("-created_on")
ordered_labels = models.PhotoLabelAnnotation.objects.select_related(
"label"
).order_by("-score")
qs = (
qs.select_related("directory", "job")
.annotate(
in_close_match_set=Exists(
CloseMatchSetMembership.objects.filter(
state=CloseMatchSetMembership.ACCEPTED, photograph=OuterRef("pk"),
)
)
)
.prefetch_related(
Prefetch("photograph_tags", queryset=ordered_tags),
Prefetch("decisions", queryset=ordered_decisions),
Prefetch("label_annotations", queryset=ordered_labels),
)
.distinct()
)
return qs
def prepare_photograph_detail_qs(qs):
object_annotations = models.ObjectAnnotation.objects.select_related("label").all()
ordered_tags = tagging.models.PhotographTag.objects.select_related(
"tag", "user_last_modified"
).order_by("-last_updated")
ordered_decisions = tagging.models.TaggingDecision.objects.order_by("-created_on")
ordered_labels = models.PhotoLabelAnnotation.objects.select_related(
"label"
).order_by("-score")
ordered_text_annotations = models.TextAnnotation.objects.order_by("sequence")
qs = (
qs.select_related("directory", "job")
.annotate(
in_close_match_set=Exists(
CloseMatchSetMembership.objects.filter(
state=CloseMatchSetMembership.ACCEPTED, photograph=OuterRef("pk")
)
)
)
.prefetch_related(
Prefetch("photograph_tags", queryset=ordered_tags),
Prefetch("decisions", queryset=ordered_decisions),
Prefetch("objectannotation", queryset=object_annotations),
Prefetch("label_annotations", queryset=ordered_labels),
"faceannotation",
Prefetch("textannotation", queryset=ordered_text_annotations),
)
.distinct()
)
return qs
class PhotographViewSet(GetSerializerClassMixin, viewsets.ModelViewSet):
queryset = prepare_photograph_qs(models.Photograph.objects.all())
ordering_fields = ["date_taken_early", "date_taken_late", "digitized_date"]
filterset_class = PhotographFilter
serializer_class = serializers.PhotographDetailSerializer
serializer_action_classes = {
"list": serializers.PhotographListSerializer,
"retrieve": serializers.PhotographDetailSerializer,
}
queryset_action_classes = {
"list": queryset,
"retrieve": prepare_photograph_detail_qs(models.Photograph.objects.all()),
}
@action(detail=False, methods=["get"], name="Get range of years")
def digitized_date_range(self, request):
years_array = (
self.filterset_class(request.GET, queryset=self.get_queryset())
.qs.annotate(year=Extract("digitized_date", "year"))
.values("year")
.order_by("year")
.annotate(n=Count("year"))
)
return Response(years_array)
class FaceAnnotationFilter(filters.FilterSet):
photograph = filters.ModelChoiceFilter(queryset=models.Photograph.objects.all())
detection_confidence = filters.RangeFilter()
joy_likelihood = filters.NumberFilter(lookup_expr="gte")
sorrow_likelihood = filters.NumberFilter(lookup_expr="gte")
anger_likelihood = filters.NumberFilter(lookup_expr="gte")
surprise_likelihood = filters.NumberFilter(lookup_expr="gte")
headwear_likelihood = filters.NumberFilter(lookup_expr="gte")
class FaceAnnotationViewset(viewsets.ModelViewSet):
queryset = models.FaceAnnotation.objects.prefetch_related(
Prefetch("photograph", prepare_photograph_qs(models.Photograph.objects.all()))
).all()
filterset_class = FaceAnnotationFilter
serializer_class = serializers.FaceAnnotationSerializer
ordering_fields = [
"photograph",
"detection_confidence",
"joy_likelihood",
"sorrow_likelihood",
"anger_likelihood",
"surprise_likelihood",
"under_exposed_likelihood",
"blurred_likelihood",
"headwear_likelihood",
]
class ObjectAnnotationFilter(filters.FilterSet):
photograph = filters.ModelChoiceFilter(queryset=models.Photograph.objects.all())
score = filters.RangeFilter()
label = filters.CharFilter(field_name="label__label", lookup_expr="icontains")
class ObjectAnnotationViewset(viewsets.ModelViewSet):
queryset = (
models.ObjectAnnotation.objects.select_related("label")
.prefetch_related(
Prefetch(
"photograph", prepare_photograph_qs(models.Photograph.objects.all())
)
)
.all()
)
filterset_class = ObjectAnnotationFilter
serializer_class = serializers.ObjectAnnotationSerializer
ordering_fields = ["photograph", "score", "photograph__date_taken_early"]
class ObjectAnnotationLabelViewset(viewsets.ModelViewSet):
queryset = models.ObjectAnnotationLabel.objects.order_by("label").annotate(
n_annotations=Count("annotations", distinct=True),
n_images=Count("annotations__photograph", distinct=True),
)
serializer_class = serializers.ObjectAnnotationLabelSerializer
ordering_fields = ["label", "n_annotations", "n_images"]
pagination_class = None
class ObjectAnnotationLabelFilter(filters.FilterSet):
label = filters.CharFilter(field_name="label", lookup_expr="icontains")
job = filters.ModelChoiceFilter(
queryset=collection.models.Job.objects.all(), method="by_job"
)
def by_job(self, queryset, name, value):
if value is None:
return queryset
else:
return queryset.filter(
Exists(
collection.models.Job.objects.filter(
id=value.id, photographs__objectannotation__label=OuterRef("pk")
)
)
)
directory = filters.ModelChoiceFilter(
queryset=collection.models.Directory.objects.all(), method="by_directory"
)
def by_directory(self, queryset, name, value):
if value is None:
return queryset
else:
return queryset.filter(
Exists(
collection.models.Directory.objects.filter(
id=value.id,
immediate_photographs__objectannotation__label=OuterRef("pk"),
)
)
)
tag = filters.ModelChoiceFilter(
queryset=tagging.models.Tag.objects.all(), method="by_tag"
)
def by_tag(self, queryset, name, value):
if value is None:
return queryset
else:
return queryset.filter(
Exists(
tagging.models.Tag.objects.filter(
id=value.id,
photograph_tags__photograph__objectannotation__label=OuterRef(
"pk"
),
)
)
)
gcv_label = filters.ModelChoiceFilter(
queryset=models.PhotoLabel.objects.all(), method="by_gcv_label"
)
def by_gcv_label(self, queryset, name, value):
if value is None:
return queryset
else:
return queryset.filter(
Exists(
models.PhotoLabel.objects.filter(
id=value.id,
annotations__photograph__objectannotation__label=OuterRef("pk"),
)
)
)
class PaginatedObjectAnnotationLabelViewset(ObjectAnnotationLabelViewset):
filterset_class = ObjectAnnotationLabelFilter
pagination_class = LimitOffsetPagination
class PhotoLabelFilter(filters.FilterSet):
label = filters.CharFilter(field_name="label", lookup_expr="icontains")
gcv_object = filters.ModelChoiceFilter(
queryset=models.ObjectAnnotationLabel.objects.all(), method="by_gcv_object"
)
def by_job(self, queryset, name, value):
if value is None:
return queryset
else:
return queryset.filter(
Exists(
collection.models.Job.objects.filter(
id=value.id,
photographs__label_annotations__label=OuterRef("pk"),
)
)
)
directory = filters.ModelChoiceFilter(
queryset=collection.models.Directory.objects.all(), method="by_directory"
)
def by_directory(self, queryset, name, value):
if value is None:
return queryset
else:
return queryset.filter(
Exists(
collection.models.Directory.objects.filter(
id=value.id,
immediate_photographs__label_annotations__label=OuterRef("pk"),
)
)
)
tag = filters.ModelChoiceFilter(
queryset=tagging.models.Tag.objects.all(), method="by_tag"
)
def by_tag(self, queryset, name, value):
if value is None:
return queryset
else:
return queryset.filter(
Exists(
tagging.models.Tag.objects.filter(
id=value.id,
photograph_tags__photograph__label_annotations__label=OuterRef(
"pk"
),
)
)
)
gcv_object = filters.ModelChoiceFilter(
queryset=models.ObjectAnnotationLabel.objects.all(), method="by_gcv_object"
)
def by_gcv_object(self, queryset, name, value):
if value is None:
return queryset
else:
return queryset.filter(
Exists(
models.ObjectAnnotationLabel.objects.filter(
id=value.id,
annotations__photograph__label_annotations__label=OuterRef(
"pk"
),
)
)
)
class PhotoLabelViewset(viewsets.ModelViewSet):
queryset = models.PhotoLabel.objects.annotate(
n_images=Count("annotations__photograph", distinct=True)
).all()
filterset_class = PhotoLabelFilter
serializer_class = serializers.PhotoLabelSerializer
ordering_fields = ["label", "n_images"]
| [
"[email protected]"
] | |
d13a948f9a23c48970fce20715c43920dd560cef | cb4cfcece4bc14f591b038adbc7fadccaf447a1d | /FFL.py | ca7c42027fa2499d1ac1f01774358f322ab53de3 | [] | no_license | psycho-pomp/CodeChef | ba88cc8e15b3e87d39ad0c4665c6892620c09d22 | 881edddded0bc8820d22f42b94b9959fd6912c88 | refs/heads/master | 2023-03-21T06:46:14.455055 | 2021-03-11T12:07:48 | 2021-03-11T12:07:48 | 275,214,989 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 422 | py | # cook your dish here
t=int(input())
for _ in range(t):
n,s=map(int,input().split())
p=list(map(int,input().split()))
a=list(map(int,input().split()))
forward=101
defender=101
for i in range(n):
if a[i]==0:
defender=min(defender,p[i])
else:
forward=min(forward,p[i])
if s+defender+forward>100:
print('no')
else:
print("yes")
| [
"[email protected]"
] | |
ae6f6eb11eddca39b273b09aef1c744440f99616 | a97fb0584709e292a475defc8506eeb85bb24339 | /source code/code/ch1713.py | 6b4765d3aa02f8de2a92852301a82b8975666146 | [] | no_license | AAQ6291/PYCATCH | bd297858051042613739819ed70c535901569079 | 27ec4094be785810074be8b16ef84c85048065b5 | refs/heads/master | 2020-03-26T13:54:57.051016 | 2018-08-17T09:05:19 | 2018-08-17T09:05:19 | 144,963,014 | 0 | 0 | null | null | null | null | BIG5 | Python | false | false | 1,037 | py | #!/usr/bin/env python
# -*- coding: cp950 -*-
# 載入wx模組
import wx
class myApp(wx.App):
def OnInit(self):
frame = myFrame()
frame.Show()
return True
# 定義myFrame並繼承wx.Frame類別
class myFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(
self,
None,
-1,
'Progress Bar',
size=(320, 150))
# 建立panel
panel = wx.Panel(self, -1)
# 計算目前進度的count變數
self.count = 0
# 建立 Progress Bar元件
self.gauge = wx.Gauge(
panel,
-1,
50,
pos = (5, 50),
size = (300, 20),
style=wx.GA_HORIZONTAL)
# 監聽事件
self.Bind(wx.EVT_IDLE, self.OnIdle)
# OnIdle事件函數
def OnIdle(self, event):
self.count += 1
if self.count >= 100:
self.count = 0
# 更新進度
self.gauge.SetValue(self.count)
def main():
app = myApp()
app.MainLoop()
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
86bd165356c0d04df9db767185f55bfd03bdff46 | 93684882400d0249ad733249f5b2c8dbd230110f | /ClassExercise & studio/chapter 10/ex.04.py | 4c70f122bd5ee54fdb03df271fb9de2af6b7e6e9 | [] | no_license | SmileShmily/LaunchCode-summerofcode-Unit1 | c492bbed966547cc8c1be7f15d7a23cb989d407b | 03474cf77b0dae2bcfaf8513711d3fec72bd4166 | refs/heads/master | 2021-01-16T23:19:23.413010 | 2017-06-29T02:49:19 | 2017-06-29T02:49:19 | 95,730,117 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 333 | py | '''Write a function to count how many odd numbers are in a list.
'''
import random
def countOdd(lst):
odd = 0
for e in lst:
if e % 2 != 0:
odd = odd + 1
return odd
# make a random list to test the function
lst = []
for i in range(100):
lst.append(random.randint(0, 1000))
print(countOdd(lst))
| [
"[email protected]"
] | |
34dbac9104ddf3232cb07a538b70a5491f9efc58 | 0d86675d6f69836db9a7cd5244ebc7307d7f997a | /open_spiel/python/algorithms/psro_v2/utils.py | 6959c12d3adaa76b18373a5ea2d3617408b5e70c | [
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] | permissive | sarahperrin/open_spiel | a7a4ecde1156b458d144989d3d7ef1814577741b | 6f3551fd990053cf2287b380fb9ad0b2a2607c18 | refs/heads/master | 2021-12-25T04:12:19.270095 | 2021-12-09T16:17:43 | 2021-12-09T16:17:43 | 235,547,820 | 3 | 0 | Apache-2.0 | 2020-01-22T10:17:41 | 2020-01-22T10:17:40 | null | UTF-8 | Python | false | false | 15,286 | py | # Copyright 2019 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Various general utility functions."""
import random
import numpy as np
from open_spiel.python.algorithms import get_all_states
from open_spiel.python.algorithms import policy_aggregator
from open_spiel.python.algorithms import policy_aggregator_joint
from open_spiel.python.egt import alpharank
from open_spiel.python.egt import utils as alpharank_utils
def empty_list_generator(number_dimensions):
result = []
for _ in range(number_dimensions - 1):
result = [result]
return result
def random_choice(outcomes, probabilities):
"""Samples from discrete probability distribution.
`numpy.choice` does not seem optimized for repeated calls, this code
had higher performance.
Args:
outcomes: List of categorical outcomes.
probabilities: Discrete probability distribtuion as list of floats.
Returns:
Entry of `outcomes` sampled according to the distribution.
"""
cumsum = np.cumsum(probabilities)
return outcomes[np.searchsorted(cumsum/cumsum[-1], random.random())]
def sample_strategy(total_policies,
probabilities_of_playing_policies,
probs_are_marginal=True):
"""Samples strategies given probabilities.
Uses independent sampling if probs_are_marginal, and joint sampling otherwise.
Args:
total_policies: if probs_are_marginal, this is a list, each element a list
of each player's policies. If not, this is a list of joint policies. In
both cases the policy orders must match that of
probabilities_of_playing_policies.
probabilities_of_playing_policies: if probs_are_marginal, this is a list,
with the k-th element also a list specifying the play probabilities of the
k-th player's policies. If not, this is a list of play probabilities of
the joint policies specified by total_policies.
probs_are_marginal: a boolean indicating if player-wise marginal
probabilities are provided in probabilities_of_playing_policies. If False,
then play_probabilities is assumed to specify joint distribution.
Returns:
sampled_policies: A list specifying a single sampled joint strategy.
"""
if probs_are_marginal:
return sample_strategy_marginal(total_policies,
probabilities_of_playing_policies)
else:
return sample_strategy_joint(total_policies,
probabilities_of_playing_policies)
def sample_strategy_marginal(total_policies, probabilities_of_playing_policies):
"""Samples strategies given marginal probabilities.
Uses independent sampling if probs_are_marginal, and joint sampling otherwise.
Args:
total_policies: A list, each element a list of each player's policies.
probabilities_of_playing_policies: This is a list, with the k-th element
also a list specifying the play probabilities of the k-th player's
policies.
Returns:
sampled_policies: A list specifying a single sampled joint strategy.
"""
num_players = len(total_policies)
sampled_policies = []
for k in range(num_players):
current_policies = total_policies[k]
current_probabilities = probabilities_of_playing_policies[k]
sampled_policy_k = random_choice(current_policies, current_probabilities)
sampled_policies.append(sampled_policy_k)
return sampled_policies
def sample_random_tensor_index(probabilities_of_index_tensor):
shape = probabilities_of_index_tensor.shape
reshaped_probas = probabilities_of_index_tensor.reshape(-1)
strat_list = list(range(len(reshaped_probas)))
chosen_index = random_choice(strat_list, reshaped_probas)
return np.unravel_index(chosen_index, shape)
def sample_strategy_joint(total_policies, probabilities_of_playing_policies):
"""Samples strategies given joint probabilities.
Uses independent sampling if probs_are_marginal, and joint sampling otherwise.
Args:
total_policies: A list, each element a list of each player's policies.
probabilities_of_playing_policies: This is a list of play probabilities of
the joint policies specified by total_policies.
Returns:
sampled_policies: A list specifying a single sampled joint strategy.
"""
sampled_index = sample_random_tensor_index(probabilities_of_playing_policies)
sampled_policies = []
for player in range(len(sampled_index)):
ind = sampled_index[player]
sampled_policies.append(total_policies[player][ind])
return sampled_policies
def softmax(x):
return np.exp(x) / np.sum(np.exp(x))
def round_maintain_sum(x):
"""Returns element-wise rounded version y of a vector x, with sum(x)==sum(y).
E.g., if x = array([3.37625333, 2.27920304, 4.34454364]), note sum(x) == 10.
However, naively doing y = np.round(x) yields sum(y) == 9. In this function,
however, the rounded counterpart y will have sum(y) == 10.
Args:
x: a vector.
"""
y = np.floor(x)
sum_diff = round(sum(x)) - sum(y) # Difference of original vs. floored sum
indices = np.argsort(y - x)[:int(sum_diff)] # Indices with highest difference
y[indices] += 1 # Add the missing mass to the elements with the most missing
return y
def get_alpharank_marginals(payoff_tables, pi):
"""Returns marginal strategy rankings for each player given joint rankings pi.
Args:
payoff_tables: List of meta-game payoff tables for a K-player game, where
each table has dim [n_strategies_player_1 x ... x n_strategies_player_K].
These payoff tables may be asymmetric.
pi: The vector of joint rankings as computed by alpharank. Each element i
corresponds to a unique integer ID representing a given strategy profile,
with profile_to_id mappings provided by
alpharank_utils.get_id_from_strat_profile().
Returns:
pi_marginals: List of np.arrays of player-wise marginal strategy masses,
where the k-th player's np.array has shape [n_strategies_player_k].
"""
num_populations = len(payoff_tables)
if num_populations == 1:
return pi
else:
num_strats_per_population = alpharank_utils.get_num_strats_per_population(
payoff_tables, payoffs_are_hpt_format=False)
num_profiles = alpharank_utils.get_num_profiles(num_strats_per_population)
pi_marginals = [np.zeros(n) for n in num_strats_per_population]
for i_strat in range(num_profiles):
strat_profile =\
alpharank_utils.get_strat_profile_from_id(num_strats_per_population,
i_strat)
for i_player in range(num_populations):
pi_marginals[i_player][strat_profile[i_player]] += pi[i_strat]
return pi_marginals
def remove_epsilon_negative_probs(probs, epsilon=1e-9):
"""Removes negative probabilities that occur due to precision errors."""
if len(probs[probs < 0]) > 0: # pylint: disable=g-explicit-length-test
# Ensures these negative probabilities aren't large in magnitude, as that is
# unexpected and likely not due to numerical precision issues
print("Probabilities received were: {}".format(probs[probs < 0]))
assert np.alltrue(np.min(probs[probs < 0]) > -1.*epsilon),\
"Negative Probabilities received were: {}".format(probs[probs < 0])
probs[probs < 0] = 0
probs = probs / np.sum(probs)
return probs
def get_joint_strategy_from_marginals(probabilities):
"""Returns a joint strategy tensor from a list of marginals.
Args:
probabilities: list of list of probabilities, one for each player.
Returns:
A joint strategy from a list of marginals.
"""
probas = []
for i in range(len(probabilities)):
probas_shapes = [1] * len(probabilities)
probas_shapes[i] = -1
probas.append(np.array(probabilities[i]).reshape(probas_shapes))
return np.product(probas)
def alpharank_strategy(solver, return_joint=False, **unused_kwargs):
"""Returns AlphaRank distribution on meta game matrix.
This method works for general games.
Args:
solver: GenPSROSolver instance.
return_joint: a boolean specifying whether to return player-wise
marginals.
Returns:
marginals: a list, specifying for each player the alpharank marginal
distributions on their strategies.
joint_distr: a list, specifying the joint alpharank distributions for all
strategy profiles.
"""
meta_games = solver.get_meta_game()
meta_games = [np.asarray(x) for x in meta_games]
if solver.symmetric_game:
meta_games = [meta_games[0]]
# Get alpharank distribution via alpha-sweep
joint_distr = alpharank.sweep_pi_vs_epsilon(
meta_games)
joint_distr = remove_epsilon_negative_probs(joint_distr)
marginals = 2 * [joint_distr]
joint_distr = get_joint_strategy_from_marginals(marginals)
if return_joint:
return marginals, joint_distr
else:
return joint_distr
else:
joint_distr = alpharank.sweep_pi_vs_epsilon(meta_games)
joint_distr = remove_epsilon_negative_probs(joint_distr)
if return_joint:
marginals = get_alpharank_marginals(meta_games, joint_distr)
return marginals, joint_distr
else:
return joint_distr
def get_strategy_profile_ids(payoff_tables):
num_strats_per_population =\
alpharank_utils.get_num_strats_per_population(payoff_tables,
payoffs_are_hpt_format=False)
return range(alpharank_utils.get_num_profiles(num_strats_per_population))
def get_joint_policies_from_id_list(payoff_tables, policies, profile_id_list):
"""Returns a list of joint policies, given a list of integer IDs.
Args:
payoff_tables: List of payoff tables, one per player.
policies: A list of policies, one per player.
profile_id_list: list of integer IDs, each corresponding to a joint policy.
These integers correspond to those in get_strategy_profile_ids().
Returns:
selected_joint_policies: A list, with each element being a joint policy
instance (i.e., a list of policies, one per player).
"""
num_strats_per_population =\
alpharank_utils.get_num_strats_per_population(payoff_tables,
payoffs_are_hpt_format=False)
np.testing.assert_array_equal(num_strats_per_population,
[len(p) for p in policies])
num_players = len(policies)
selected_joint_policies = []
for profile_id in profile_id_list:
# Compute the profile associated with the integer profile_id
policy_profile = alpharank_utils.get_strat_profile_from_id(
num_strats_per_population, profile_id)
# Append the joint policy corresponding to policy_profile
selected_joint_policies.append(
[policies[k][policy_profile[k]] for k in range(num_players)])
return selected_joint_policies
def compute_states_and_info_states_if_none(game,
all_states=None,
state_to_information_state=None):
"""Returns all_states and/or state_to_information_state for the game.
To recompute everything, pass in None for both all_states and
state_to_information_state. Otherwise, this function will use the passed in
values to reconstruct either of them.
Args:
game: The open_spiel game.
all_states: The result of calling get_all_states.get_all_states. Cached for
improved performance.
state_to_information_state: A dict mapping str(state) to
state.information_state for every state in the game. Cached for improved
performance.
"""
if all_states is None:
all_states = get_all_states.get_all_states(
game,
depth_limit=-1,
include_terminals=False,
include_chance_states=False)
if state_to_information_state is None:
state_to_information_state = {
state: all_states[state].information_state_string()
for state in all_states
}
return all_states, state_to_information_state
def aggregate_policies(game, total_policies, probabilities_of_playing_policies):
"""Aggregate the players' policies.
Specifically, returns a single callable policy object that is
realization-equivalent to playing total_policies with
probabilities_of_playing_policies. I.e., aggr_policy is a joint policy that
can be called at any information state [via
action_probabilities(state, player_id)].
Args:
game: The open_spiel game.
total_policies: A list of list of all policy.Policy strategies used for
training, where the n-th entry of the main list is a list of policies
available to the n-th player.
probabilities_of_playing_policies: A list of arrays representing, per
player, the probabilities of playing each policy in total_policies for the
same player.
Returns:
A callable object representing the policy.
"""
aggregator = policy_aggregator.PolicyAggregator(game)
return aggregator.aggregate(
range(len(probabilities_of_playing_policies)), total_policies,
probabilities_of_playing_policies)
def marginal_to_joint(policies):
"""Marginal policies to joint policies.
Args:
policies: List of list of policies, one list per player.
Returns:
Joint policies in the right order (np.reshape compatible).
"""
shape = tuple([len(a) for a in policies])
num_players = len(shape)
total_length = np.prod(shape)
indexes = np.array(list(range(total_length)))
joint_indexes = np.unravel_index(indexes, shape)
joint_policies = []
for joint_index in zip(*joint_indexes):
joint_policies.append([
policies[player][joint_index[player]] for player in range(num_players)
])
return joint_policies
def aggregate_joint_policies(game, total_policies,
probabilities_of_playing_policies):
"""Aggregate the players' joint policies.
Specifically, returns a single callable policy object that is
realization-equivalent to playing total_policies with
probabilities_of_playing_policies. I.e., aggr_policy is a joint policy that
can be called at any information state [via
action_probabilities(state, player_id)].
Args:
game: The open_spiel game.
total_policies: A list of list of all policy.Policy strategies used for
training, where the n-th entry of the main list is a list of policies, one
entry for each player.
probabilities_of_playing_policies: A list of floats representing the
probabilities of playing each joint strategy in total_policies.
Returns:
A callable object representing the policy.
"""
aggregator = policy_aggregator_joint.JointPolicyAggregator(game)
return aggregator.aggregate(
range(len(total_policies[0])), total_policies,
probabilities_of_playing_policies)
| [
"DeepMind technologies Ltd"
] | DeepMind technologies Ltd |
0b428bafc96d69a7ec8a727903cea42cf0da8fd4 | 3c0f93b707e94b8e1363588a1699ab7d663d8f98 | /bin/extract-extrema | 9c4d337bd4e25c292cd676ab2f7aee1be00d2d98 | [
"MIT"
] | permissive | reedessick/universality | 3b9c39dbf3d15fe6fe824ac124cbecec5d3f1836 | d4eaf5ee6ea1ecf79f0ec32ea5e62093c95d4b57 | refs/heads/master | 2023-07-13T19:31:38.277188 | 2023-06-25T23:03:14 | 2023-06-25T23:03:14 | 118,136,331 | 2 | 2 | MIT | 2023-04-25T17:29:33 | 2018-01-19T14:39:26 | Python | UTF-8 | Python | false | false | 3,553 | #!/usr/bin/env python3
"""an executable to extract extrema from a single CSV
"""
__author__ = "Reed Essick ([email protected])"
#-------------------------------------------------
import os
import numpy as np
from argparse import ArgumentParser
### non-standard libraries
from universality.utils import io
from universality.properties import extrema
#-------------------------------------------------
parser = ArgumentParser(description=__doc__)
import os
import numpy as np
from argparse import ArgumentParser
### non-standard libraries
from universality.utils import io
from universality.properties import extrema
#-------------------------------------------------
parser = ArgumentParser(description=__doc__)
# required arguments
rgroup = parser.add_argument_group('required arguments')
rgroup.add_argument('inpath', type=str)
rgroup.add_argument('outpath', type=str)
rgroup.add_argument('columns', nargs='+', type=str,
help='columns to extract')
rgroup.add_argument('--default-values', nargs=3, type=str, action='append', default=[],
help='set a default value if no extrema are found within the specified ranges. \
eg, "--default-value column default_max_val default_min_val"')
rgroup.add_argument('--new-column', type=str, default=[], action='append', nargs=3,
help='the name of the new column that will be added to each file. \
Supply the name of the column original and then the names of the new columns for max and min. \
eg, "--new-column old_column max_old_column min_old_column"')
rgroup.add_argument('--column-range', nargs=3, type=str, default=[], action='append',
help='restrict the extremization to within this range for this column. \
e.g.: "--column-range baryon_density 2.8e14 2.8e15". Can be repeated to specify multiple ranges')
# verbosity arguments
vgroup = parser.add_argument_group('verbosity arguments')
vgroup.add_argument('-v', '--verbose', default=False, action='store_true')
vgroup.add_argument('-V', '--Verbose', default=False, action='store_true')
args = parser.parse_args()
### finish parsing
Ncol = len(args.columns)
new_columns = dict((a, [b, c]) for a, b, c in args.new_column)
if args.default_values:
default_values = default_values = dict((a, (float(b), float(c))) for a, b, c in args.default_values)
for col in args.columns:
if col not in default_values.keys():
raise ValueError('must specify --default-values for either all columns or no columns! missing %s'%col)
default_values = [default_values[col] for col in args.columns]
else:
default_values = None
if os.path.dirname(args.outpath) and (not os.path.exists(os.path.dirname(args.outpath))):
os.makedirs(os.path.dirname(args.outpath))
loadcolumns = [col for col in args.columns]
ranges = dict()
for column, m, M in args.column_range:
if key not in loadcolumns:
loadcolumns.append(key)
ranges[loadcolumns.index(column)] = (float(m), float(M))
args.verbose |= args.Verbose
#-------------------------------------------------
if args.verbose:
print('reading samples from: '+args.inpath)
data, _ = io.load(args.inpath, loadcolumns)
N = len(data)
#------------------------
if args.verbose:
print('extracting maxima and minima')
ans = extrema.data2extrema(
data,
Ncol,
default_values=default_values,
static_ranges=ranges,
)
outcols = extrema.outputcolumns(
args.columns,
custom_names=new_columns,
)
#------------------------
if args.verbose:
print('writing samples to: '+args.outpath)
io.write(args.outpath, ans, outcols)
| [
"[email protected]"
] | ||
add0946623b02c0745122b9f79576c4775ae89d3 | be0898ceaee2a7758ffe0365b976f597b2ad26dd | /rls/common/when.py | e669f55384cf6128b09564efcdf3616c92ce3403 | [
"Apache-2.0"
] | permissive | violet712/RLs | 1edaa6427108e3e36d513cb6038be771837ecca4 | 25cc97c96cbb19fe859c9387b7547cbada2c89f2 | refs/heads/master | 2023-08-25T12:04:24.174034 | 2021-10-03T15:37:32 | 2021-10-03T15:37:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,196 | py | # Added from https://github.com/danijar/dreamerv2/blob/main/dreamerv2/common/when.py
class Every:
def __init__(self, every):
self._every = every
self._last = None
def __call__(self, step):
step = int(step)
if not self._every:
return False
if self._last is None:
self._last = step
return True
if step >= self._last + self._every:
self._last += self._every
return True
return False
class Once:
def __init__(self):
self._once = True
def __call__(self):
if self._once:
self._once = False
return True
return False
class Until:
def __init__(self, until=None):
self._until = until
def __call__(self, step):
step = int(step)
if not self._until:
return True
return step < self._until
if __name__ == '__main__':
e = Every(10)
for i in range(100):
if e(i):
print(i)
o = Once()
if o():
print('first')
if o():
print('second')
u = Until(1)
for i in range(10):
if u(i):
print(i)
| [
"[email protected]"
] | |
1c8e159794e5cfb1b219726ba7ee4901f03e4a45 | e4ec5b6cf3cfe2568ef0b5654c019e398b4ecc67 | /azure-cli/2.0.18/libexec/lib/python3.6/site-packages/azure/mgmt/monitor/models/log_profile_resource.py | f9ecb885b923e3bdda411b695f0559bb08ac013c | [] | no_license | EnjoyLifeFund/macHighSierra-cellars | 59051e496ed0e68d14e0d5d91367a2c92c95e1fb | 49a477d42f081e52f4c5bdd39535156a2df52d09 | refs/heads/master | 2022-12-25T19:28:29.992466 | 2017-10-10T13:00:08 | 2017-10-10T13:00:08 | 96,081,471 | 3 | 1 | null | 2022-12-17T02:26:21 | 2017-07-03T07:17:34 | null | UTF-8 | Python | false | false | 3,481 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class LogProfileResource(Resource):
"""The log profile resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Azure resource Id
:vartype id: str
:ivar name: Azure resource name
:vartype name: str
:ivar type: Azure resource type
:vartype type: str
:param location: Resource location
:type location: str
:param tags: Resource tags
:type tags: dict
:param storage_account_id: the resource id of the storage account to which
you would like to send the Activity Log.
:type storage_account_id: str
:param service_bus_rule_id: The service bus rule ID of the service bus
namespace in which you would like to have Event Hubs created for streaming
the Activity Log. The rule ID is of the format: '{service bus resource
ID}/authorizationrules/{key name}'.
:type service_bus_rule_id: str
:param locations: List of regions for which Activity Log events should be
stored or streamed. It is a comma separated list of valid ARM locations
including the 'global' location.
:type locations: list of str
:param categories: the categories of the logs. These categories are
created as is convenient to the user. Some values are: 'Write', 'Delete',
and/or 'Action.'
:type categories: list of str
:param retention_policy: the retention policy for the events in the log.
:type retention_policy: :class:`RetentionPolicy
<azure.mgmt.monitor.models.RetentionPolicy>`
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'locations': {'required': True},
'categories': {'required': True},
'retention_policy': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'storage_account_id': {'key': 'properties.storageAccountId', 'type': 'str'},
'service_bus_rule_id': {'key': 'properties.serviceBusRuleId', 'type': 'str'},
'locations': {'key': 'properties.locations', 'type': '[str]'},
'categories': {'key': 'properties.categories', 'type': '[str]'},
'retention_policy': {'key': 'properties.retentionPolicy', 'type': 'RetentionPolicy'},
}
def __init__(self, location, locations, categories, retention_policy, tags=None, storage_account_id=None, service_bus_rule_id=None):
super(LogProfileResource, self).__init__(location=location, tags=tags)
self.storage_account_id = storage_account_id
self.service_bus_rule_id = service_bus_rule_id
self.locations = locations
self.categories = categories
self.retention_policy = retention_policy
| [
"[email protected]"
] | |
1374e0b0e8afa74b17c2f850934167d58caa106d | 1ac87c808c33336338f359f0233593a8e603f45e | /allauth/socialaccount/providers/github/models.py | 9db7841b25cbd32da8ff47e04218751cb5eb7996 | [
"MIT"
] | permissive | chhabrakadabra/django-allauth | 140a10de87061f7d60e1fa692b98b64afb9f987e | 19c7070c1b777b7ad69c2c6fd59571cc5a18f4e4 | refs/heads/master | 2021-01-15T20:19:26.777224 | 2012-06-19T04:26:29 | 2012-06-19T04:26:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 373 | py | from allauth.socialaccount import providers
from allauth.socialaccount.providers.base import Provider, ProviderAccount
class GitHubAccount(ProviderAccount):
pass
class GitHubProvider(Provider):
id = 'github'
name = 'GitHub'
package = 'allauth.socialaccount.providers.github'
account_class = GitHubAccount
providers.registry.register(GitHubProvider)
| [
"[email protected]"
] | |
0ea1c176cc778405f57d3480c00c550dd52c912d | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /070_oop/001_classes/_exercises/_templates/ITDN Python RUS/002_Nasledovanie i Polimorfizm/07-complex-hierarchy-mro.py | fdcb6405dbda80c5c907ab54361512d72eeb6716 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 1,916 | py | # # -*- coding: utf-8 -*-
#
#
# """
# Ещё один пример использования super и построения интерпретатором линеаризации.
# Иерархия классов в данном примере:
# object
# / \
# / \
# A B
# \ /|
# \ / |
# \ / |
# \ / |
# C |
# \ |
# \ |
# \ |
# \ |
# \|
# D
# |
# E
# """
#
#
#
# ___ gen_init ___
# """
# Декоратор gen_init, который добавляет автоматически
# сгенерированный конструктор.
# Декоратор -- это функция, которая принимает функцию или класс
# и возвращает другой объект, который будет привязан к имени изначального.
# Обычно используется для изменения поведения фукнции (путём создания
# новой функции, которая вызывает изначальную) или модификации класса
# (и происходит в данном примере).
#
# :param cls: модифицируемый класс
# :return: класс с добавленным конструктором
# """
#
# ___ -
# print('Entered' ___. -n "constructor")
# s... ___ ____. -
# print('Quit', ___. -n "constructor")
# ___. - _ init
# r_ ___
#
#
# 0?
# c__ A o..
# p___
#
#
# 0?
# c_ B o..
# p___
#
#
# 0?
# c_ C A B
# p_
#
#
# 0?
# c_ D C B
# p_
#
#
# 0?
# c_ E D
# p_
#
#
# print E. -m
# obj _ E | [
"[email protected]"
] | |
3497af07bb08348af9c4922ec8bd53d41bfe1fa4 | 7bd5ca970fbbe4a3ed0c7dadcf43ba8681a737f3 | /atcoder/other/lang_test_v1/d.py | 6fd5f61c90ff0b66f7c278d91505477cd77d5fe2 | [] | no_license | roiti46/Contest | c0c35478cd80f675965d10b1a371e44084f9b6ee | c4b850d76796c5388d2e0d2234f90dc8acfaadfa | refs/heads/master | 2021-01-17T13:23:30.551754 | 2017-12-10T13:06:42 | 2017-12-10T13:06:42 | 27,001,893 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 719 | py | # -*- coding: utf-8 -*-
import sys,copy,math,heapq,itertools as it,fractions,re,bisect,collections as coll
N = int(raw_input())
md = [map(int, raw_input().split("/")) for i in xrange(N)]
days = [31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
day = 1
yobi = 6
hurikae = 0
seq = 0
ans = 0
for month in xrange(1, 13):
for day in xrange(1, days[month - 1] + 1):
if [month, day] in md:
if 5 <= yobi:
hurikae += 1
seq += 1
elif 5 <= yobi:
seq += 1
else:
if hurikae:
seq += 1
hurikae -= 1
else:
seq = 0
ans = max(ans, seq)
yobi = (yobi + 1) % 7
print ans
| [
"[email protected]"
] | |
bfffbcc498ac8cd37e2b430fca1a96b35ea64a56 | 384d0be5ac54b306b945cf38c10d9b0a44c975ea | /devstack/tools/jenkins/jenkins_home/print_summary.py | 1d71a4a8ebc7b254f088afcd8a132aed0bbd2b19 | [] | no_license | ashokcse/openstack-bill | 05ae313637b3cfecba946d2a9b32e8c7609fc721 | 1a3d7575d4b341f64fa1764ed47e47a7504a9bcc | refs/heads/master | 2021-01-18T14:05:24.696165 | 2012-09-12T11:29:20 | 2012-09-12T11:29:20 | 5,424,267 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,208 | py | #!/usr/bin/python
import urllib
import json
import sys
def print_usage():
print "Usage: %s [jenkins_url (eg. http://50.56.12.202:8080/)]"\
% sys.argv[0]
sys.exit()
def fetch_blob(url):
return json.loads(urllib.urlopen(url + '/api/json').read())
if len(sys.argv) < 2:
print_usage()
BASE_URL = sys.argv[1]
root = fetch_blob(BASE_URL)
results = {}
for job_url in root['jobs']:
job = fetch_blob(job_url['url'])
if job.get('activeConfigurations'):
(tag, name) = job['name'].split('-')
if not results.get(tag):
results[tag] = {}
if not results[tag].get(name):
results[tag][name] = []
for config_url in job['activeConfigurations']:
config = fetch_blob(config_url['url'])
log_url = ''
if config.get('lastBuild'):
log_url = config['lastBuild']['url'] + 'console'
results[tag][name].append({'test': config['displayName'],
'status': config['color'],
'logUrl': log_url,
'healthReport': config['healthReport']})
print json.dumps(results)
| [
"[email protected]"
] | |
9f79fa06ef66ddb7cd1d963ace3346532d9816b1 | dfcaf26ef27684c7f1e69b7e90ac55094158115d | /paper/pnoise.py | d921844ebbea85d25dbd7430145b0a9781503021 | [] | no_license | msyriac/halofg | e038bbacf9332091087be657b39f274cb5507c01 | d1aaf54624a3d8bae9eeba667c6e895621a06f24 | refs/heads/master | 2021-01-21T06:10:04.264115 | 2018-04-02T02:26:23 | 2018-04-02T02:26:23 | 101,939,544 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,538 | py | from __future__ import print_function
from orphics import maps,io,cosmology
from enlib import enmap
import numpy as np
import os,sys
import healpy as hp
proot = "/gpfs01/astro/workarea/msyriac/data/planck/"
import matplotlib as mpl
from cycler import cycler
mpl.rcParams['axes.prop_cycle'] = cycler(color=['#2424f0','#df6f0e','#3cc03c','#d62728','#b467bd','#ac866b','#e397d9','#9f9f9f','#ecdd72','#77becf'])
try:
lss,smica_nls = np.loadtxt(proot+"smica_nls.txt",unpack=True)
lsl,lgmca_nls = np.loadtxt(proot+"lgmca_nls.txt",unpack=True)
except:
mask = hp.read_map(proot+"COM_Mask_Lensing_2048_R2.00.fits")
fsky = mask.sum()*1./mask.size
# smica
smica1 = hp.read_map(proot+"COM_CMB_IQU-smica-field-Int_2048_R2.01_ringhalf-1.fits")*1e6
smica2 = hp.read_map(proot+"COM_CMB_IQU-smica-field-Int_2048_R2.01_ringhalf-2.fits")*1e6
autos = (hp.anafast(smica1*mask)+hp.anafast(smica2*mask))/2.
k12 = hp.anafast(smica1*mask,smica2*mask)
smica_nls = ((autos-k12)/2.)/fsky
print(smica_nls)
lss = np.arange(len(smica_nls))
# lgmca
lgmcan = hp.read_map(proot+"WPR2_CMB_noise_muK.fits") * mask
lgmca_nls = hp.anafast(lgmcan)/fsky
lsl = np.arange(len(lgmca_nls))
io.save_cols(proot+"smica_nls.txt",(lss,smica_nls))
io.save_cols(proot+"lgmca_nls.txt",(lsl,lgmca_nls))
cc = cosmology.Cosmology(lmax=6000,pickling=True,dimensionless=False)
ells = np.arange(0,3000,1)
cltt = cc.theory.lCl('TT',ells)
spbeam = maps.gauss_beam(lss,5.0)
lpbeam = maps.gauss_beam(lsl,5.0)
pl = io.Plotter(yscale='log',xlabel="$\\ell$",ylabel="$\\ell(\\ell+1)C^{TT}_{\\ell}/2\\pi\ (\\mu K-\\mathrm{rad})^2$",ftsize=17)
pl.add(ells,cltt*ells*(ells+1.)/2./np.pi,color="k",lw=2)
pl.add(lss,smica_nls*lss*(lss+1.)/2./np.pi/spbeam**2.,label="SMICA")
pl.add(lsl,lgmca_nls*lsl*(lsl+1.)/2./np.pi/lpbeam**2.,label="LGMCA")
abeam = maps.gauss_beam(ells,1.5)
for noise in [6.,10.,20.]:
lknee = 3000
alpha = -4.
nls = cosmology.white_noise_with_atm_func(ells,noise,lknee,alpha,dimensionless=False,TCMB=2.7255e6)
pl.add(ells,nls*ells*(ells+1.)/2./np.pi/abeam**2.,ls="--",lw=2,label=str(noise)+" $\\mu K$-arcmin")
noise = 45.
abeam = maps.gauss_beam(ells,5.0)
nls = cosmology.white_noise_with_atm_func(ells,noise,0,1,dimensionless=False,TCMB=2.7255e6)
# pl.add(ells,nls*ells*(ells+1.)/2./np.pi/abeam**2.,ls="--",lw=2,label="LGCMA estimate")
pl.legend(loc='lower right',labsize=12)
pl._ax.set_xlim(0,3000)
pl._ax.set_ylim(1,1e4)
pl.done(io.dout_dir+"smicalgmca.pdf")
| [
"[email protected]"
] | |
cb1f949112c1df4a50c10d67945f99d7b80e5ba8 | 448fd7b58f53b6b8394a2a4a8f6325c3b731afa8 | /EXE_RP/PROJECT.DailyControls/assetssum.py | 01820f1bebe0360217d12028334f0fb3798ecb9a | [] | no_license | webclinic017/TraderSoftwareRP | 7c4a5833226f54c84d941830adc26263e984f957 | 3996bb4b1add72901530079d0a2b7aa6a7b33680 | refs/heads/master | 2022-04-17T13:29:03.724522 | 2020-04-12T07:48:00 | 2020-04-12T07:48:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,830 | py |
import os, sys, glob, csv, subprocess, datetime, shutil, subprocess, time
###################
path = os.getcwd() + '/'
test = path + 'test/'
print path
#########
footerfile = open(bconfig + 'footer.' + countrytag + '.' + currency + '.txt', 'r')
sfarea = path + 'DATA/SFDATA/'
outfile = open(test + '.assetsINV.' + periodstart + '.' + countrytag + '.html', 'w')
LineItemFooter = ''
billinglines = adetaillines = ' '
AssetProductName = 'bla'
date_format = "%d-%m-%Y"
today = datetime.date.today()
todayf = today.strftime(date_format)
old_date_format = "%Y%m%d"
todayoldf = today.strftime(old_date_format)
todaystring = str(todayf)
todaystringold = str(todayoldf)
todaystringold = '20120503'
filedate = '20120503'
dateToCheck = todaystringold
prevdate = todaystring
month = dateToCheck[0:6]
sfmonth = sfarea + month + '/'
###### dates set here #############################
from datetime import timedelta
from datetime import datetime
##########################
##invoiceharddate = todaystring
##invoice_sequence = '.' + periodstart + '.01'
##specialperiodend = '01-08-2012'
##
##paybyDAYS = 20
#### set quarterly monthly etc here with ndays ##
##ndays = numbermonths * 365/12
##
##pstart = datetime.strptime(periodstart, date_format)
##pendpartial = datetime.strptime(specialperiodend, date_format)
##
##cycleend = (pstart + timedelta(days=ndays)).strftime(date_format)
###payby_date = periodstart + 20
##payby_date = (pstart + timedelta(days=paybyDAYS)).strftime(date_format)
##delta = pendpartial - pstart
##partialmth = ('%3.1f' %((delta.days / 30)))
##print periodstart, numbermonths, ndays, cycleend, partialmth, payby_date
##periodend = cycleend
import datetime
#################################
filedate = dateToCheck
sfdata = path + 'DATA/SFDATA/' + month + '/' + filedate + '/' + filedate + '.sf.'
accountfile = open(sfdata + 'Accounts.csv', 'r')
assetfile = open(sfdata + 'assets.csv', 'r')
productfile = open(sfdata + 'products.csv', 'r')
contactsfile = open(sfdata + 'contacts.csv', 'r')
###############################################
def csvASSETSToLines():
csvr = csv.reader( assetfile )
assetlines = []
for row in csvr:
assetlines.append( row )
assetfile.close()
return assetlines
assetlines = csvASSETSToLines ()
totalines = str(len(assetlines))
#############################
##actcount = 0
##pui_tuples = []
##for line in accountlines:
## actcount += 1
## if actcount < 100:
## count = 0
## status = line[35]
## territory = line[34]
## sagecode = line[95]
## currency = line[23]
## nameacct = line[3]
## acctid = line[0]
## billrule = line[115]
## status = line[35]
## billingcycle = line[103]
## payabledate = line[124]
## currencyBillable = line[23]
## outstandings = line[117]
## slabillrule = line[123]
## ItemRate = 1
## ##########################
## assetlines = []
## for row in assetlinesCC:
## if row[2] == acctid :
## assetlines.append( row )
## totalines = str(len(assetlines))
## #################################
## total = rate = totalClasses = assetclasses = totalProdAssets = 0
## puilevels ={}
## totalClasses = 0
#################################
for aline in assetlines:
AssetProductID = aline[3]
count = count + 1
assetstatus = aline[17]
assetcontact = aline[1]
assetName = aline[12]
#################################
machineid = aline[20]
expiration = aline[21]
exchanges = aline[27]
assetrole = aline[41]
license = aline[42]
name = aline[12]
proddesc = aline[19]
#product = aline[69] + proddesc
startdate = aline[135]
enddate = aline[136]
rate = assetclasses = 0
if aline[134] != '':
rate = float(aline[134])
if aline[62] != '':
assetclasses = float(aline[62])
for bline in productlines:
ProductProductID = bline[0]
if AssetProductID == ProductProductID:
AssetProductName = bline[1]
if assetstatus == 'Production':
for contactline in contactlines:
#print contactline[0], assetcontact, contactline[7]
if contactline[0] == assetcontact:
contactname = contactline[7]
lastmodname = contactline[34]
#print contactline[0], assetcontact, contactline[7], lastmodname
totalprice = rate * numbermonths
total += totalprice
totalClasses = float(assetclasses) + totalClasses
totalProdAssets += 1
#totalClasses = 0
empty = ' '
if 'Classic_PUI' in slabillrule:
puirate = rate
puibreak = aline[138]
bla = 'bla'
AssetProductName = ' PUI Scale'
if puirate > 1:
#puilevels += [str(puirate) + ' ' + str(puibreak)]
puilevels[puirate] = puibreak
#pui_tuples.append(puirate, puibreak)
format = "<tr><td>%-20s </td><td>%-20s </td><td>%-8s </td><td><div align=\"center\">%6d </td></tr>\n"
adetailline = (format % (contactname, assetName, periodend, assetclasses))
else:
bla = bla
else:
format = "<tr><td>%-20s </td><td>%-20s </td><td>%-8s </td><td>%-8s </td><td><div align=\"center\">%6d </td><td><div align=\"right\">%8.2f </td><td><div align=\"right\">%8.2f</td></tr>\n"
billingline = (format % (AssetProductName, contactname, periodstart, periodend, numbermonths, rate, totalprice))
format = "<tr><td>%-20s </td><td>%-20s </td><td>%-8s </td><td><div align=\"center\">%6d </td></tr>\n"
adetailline = (format % (contactname, assetName, periodend, assetclasses))
format = "<tr><td>%-20s </td><td>%-20s </td><td>%-8s </td><td>%-8s </td><td><div align=\"center\">%6s </td><td><div align=\"right\">%8s </td><td><div align=\"right\">%8s</td></tr>\n"
billingline2 = (format % (proddesc, empty, empty, empty, empty, empty, empty))
### pui billinglines ####
#billingline = (format % (AssetProductName, contactname, startdate, enddate, assetclasses, rate, totalprice))
billinglines = billinglines + billingline +billingline2
adetaillines = adetaillines + adetailline
print adetaillines
##### html blocks n tags ##############
logofile = '<div align=\"right\"><img src=\"logoactant.JPG\" alt=\"some_text\"/></div align=\"right\">'
#s2 = '\n\n'
s1 = '<br />\n'
s2 = '<br /><br />\n\n'
#s1 = '\n'
starthtml = ' <!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.01 Transitional//EN\" \"http://www.w3.org/TR/html4/loose.dtd\">\n <html lang=\"EN\" dir=\"ltr\">'
endhtml = ' </html>'
printa = '<p>'
printa_center= '<p align="center">'
printb = '</p>'
bolda = '<strong>'
boldb = '</strong>'
tablea = '<table width=\"730\" border=\"0\" cellspacing=\"0\" cellpadding=\"0\" frame=\"box\"> '
tablegraya = '<table width=\"730\" border=\"0\" cellspacing=\"0\" cellpadding=\"0\" frame=\"box\" bgcolor=\"#cccccc\"> '
tableacenter = '<table width=\"300\" border=\"0\" cellspacing=\"0\" cellpadding=\"0\" frame=\"box\" align=\"center\"> '
tableb = '</table>'
rowa = '<tr>'
rowb = '</tr>'
cola = '<td>'
colb = '</td>'
colab = colb + cola
tablenice = '<table="void" cellspacing="2" cellpadding="2" width="300" align="center">'
centercella = '<tr><td><i><strong><font face="arial, helvetica, sans-serif"></font><font size="4"><div align="center">'
centersmallcella = '<tr><td><i><strong><font face="arial, helvetica, sans-serif"></font><font size="1"><div align="center">'
centercellb = '</div></font></i></strong></td></tr>'
###############
######## create header in a table form ######
#tablenice = '<table border="2" cellspacing="2" cellpadding="2" width="300" bgcolor="#cc9999" align="center">'
headerEU = tablenice
for litem in [hname, hstreet, hzip, hcountry]:
headerEU = headerEU + centercella
headerEU = headerEU + litem
headerEU = headerEU + centercellb
headerEU = headerEU + centersmallcella + hvat
headerEU = headerEU + centercellb
headerEU = headerEU + tableb
#################################
custaddrblock = s1 + line[3] + s1 + line[7] + s1 + line[8] + s1+ line[9] + line[10] + ' ' + line[11] + s2
contactblock = 'CONTACT: ' + line[105] + s1 + 'email: ' + line[106] + s1
invoiceperiod = 'Invoice Period: ' + periodstart + ' to ' + periodend + s1
invoiceDateBlock = 'Invoice Number: ' + invoice_sequence + s1 + 'Invoice Date: ' + invoiceharddate
payableBlock = 'Invoice Due Date: ' + payby_date
customerBlock = 'Customer code: ' + line[95] + s1
customerBlock = s1
controlline = 'Controlline' + sagecode + 'totalbill= ' + str(total) + ' ' + nameacct + ' ' + territory + ' |total assetlines ==' + totalines + ' |rundate = ' + filedate
payus_string = 'We kindly ask you to pay the oustanding balance in ' + bolda + currencyBillable +boldb + ' in full by:'
#############
#formathead = "%-20s %-20s %-8s %-8s %6s %8s %8s\n<br />%s\n<br />%s \n"
#formathead = "<tr><td>%-20s </td><td>%-20s </td><td>%-8s </td><td>%-8s </td><td>%6s </td><td>%10s </td><td>%10s</td></tr>\n"
formathead = "<b><tr bgcolor=\"grey\"><td><b>%-20s </b></td><td><b>%-20s </b></td><td><b><div align=\"center\">%-8s </div></b></td><td><b>%-8s </b></td><td><b>%6s </b></td><td><b><div align=\"center\">%10s </b></td><td><b><div align=\"right\">%10s</b></td></tr></b>\n"
LineItemHeader = (formathead % ('Item/Descript.','Username','Billing','Period','Months','Rate', 'SubTotl'))
#############
#tformat = "%-20s %-20s %-8s %-8s %6d %-8s %8.2f\n"
tformat = "<tr><td><b>%-20s </b></td><td>%-20s </td><td>%-8s </td><td>%-8s </td><td>%6s </td><td>%10s </td><td><b><div align=\"right\">%8.2f<b></td></tr>\n"
from operator import itemgetter, attrgetter
#print sorted(pui_tuples, key=itemgetter(1),reverse=True)
print sorted(puilevels, reverse=True)
print puilevels.values()
for puir in puilevels:
print puilevels[puir]
print totalClasses
stocks = totalClasses - float(puilevels[puir])
print stocks
print puir
print totalClasses
totallines = (tformat % ('Sub Total ', ' ', str(totalProdAssets), '', '', '', total))
totallines0VAT = (tformat % ('VAT ', ' ', ' ', '', '', '', 0.00))
totallinesVAT = (tformat % ('Total incl. vat ', ' ', ' ', '', '', '', total))
######################## start letter HTML ######
infolines = s1
allletters = s1
letter = s1 + starthtml ### take out the letter here to create indiv letters
###################
letter = letter + headerEU +s1 + logofile
letter = letter + s2 + custaddrblock + contactblock + invoiceperiod + invoiceDateBlock + customerBlock + payableBlock + s2
#letter = letter + tablegraya + LineItemHeader + tableb + tablea + billinglines + s1 + tableb
letter = letter + tablea + LineItemHeader + billinglines + s1 + totallines + s1 + totallines0VAT + s1 + totallinesVAT + tableb
letter = letter + s1
letter = letter + s1 + payus_string + s1 + payby_date + s1
letter = letter + printa + bolda + str(footerfile.read()) + boldb + printb + s1
letter = letter + s1 + endhtml
#print letter
#### open file to write to #####
outfile.write('\n' + letter)
footerfile.close()
outfile.close()
infolist.close() ### need to remove these closers when createing indiv letters...
dateToCheck = '20120529'
benchmark = datetime.datetime.strptime(dateToCheck,"%Y%m%d")
print benchmark.date()
print datetime.date.today()
#if benchmark.date() > datetime.date.today() :
#print "later"
| [
"[email protected]"
] | |
c4833fd78cb4f1385717b3b9920ad922d0188f62 | b6b30fb06124883b074144c419b43d9182efcdff | /CV/optical_flow.py | 25910a23c86c039d5993508aecf7a546dac33369 | [] | no_license | JohnnySunkel/BlueSky | da9f5107034289bfbdd3ba40458f9b9bd8d01a13 | 5a20eba9ef7509a5a7b7af86e7be848242e1a72f | refs/heads/master | 2021-07-07T09:57:37.256950 | 2020-09-02T23:06:46 | 2020-09-02T23:06:46 | 166,883,639 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,754 | py | import cv2
import numpy as np
# Define a function to track the object
def start_tracking():
# Initialize the video capture object
cap = cv2.VideoCapture(0)
# Define the scaling factor for the frames
scaling_factor = 0.5
# Number of frames to track
num_frames_to_track = 5
# Number of frames to skip
num_frames_jump = 2
# Initialize variables
tracking_paths = []
frame_index = 0
# Define tracking parameters
tracking_params = dict(winSize = (11, 11), maxLevel = 2,
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,
10, 0.03))
# Iterate until the user presses the 'Esc' key
while True:
# Capture the current frame
_, frame = cap.read()
# Resize the frame
frame = cv2.resize(frame, None, fx = scaling_factor,
fy = scaling_factor,
interpolation = cv2.INTER_AREA)
# Convert to grayscale
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Create a copy of the frame
output_img = frame.copy()
if len(tracking_paths) > 0:
# Get images
prev_img, current_img = prev_gray, frame_gray
# Organize the feature points
feature_points_0 = np.float32([tp[-1] for tp in \
tracking_paths]).reshape(-1, 1, 2)
# Compute optical flow
feature_points_1, _, _ = cv2.calcOpticalFlowPyrLK(
prev_img, current_img, feature_points_0,
None, **tracking_params)
# Compute reverse optical flow
feature_points_0_rev, _, _ = cv2.calcOpticalFlowPyrLK(
current_img, prev_img, feature_points_1,
None, **tracking_params)
# Compute the difference between forward and
# reverse optical flow
diff_feature_points = abs(feature_points_0 - \
feature_points_0_rev).reshape(-1, 2).max(-1)
# Extract the good feature points
good_points = diff_feature_points < 1
# Initialize the variable for the new tracking paths
new_tracking_paths = []
# Iterate through all the good feature points
for tp, (x, y), good_points_flag in zip(tracking_paths,
feature_points_1.reshape(-1, 2), good_points):
# If the flag is not True, then continue
if not good_points_flag:
continue
# Append the x and y coordinates and check if
# its length greater than the threshold
tp.append((x, y))
if len(tp) > num_frames_to_track:
del tp[0]
new_tracking_paths.append(tp)
# Draw a circle around the feature points
cv2.circle(output_img, (x, y), 3, (0, 255, 0), -1)
# Update the tracking paths
tracking_paths = new_tracking_paths
# Draw lines
cv2.polylines(output_img, [np.int32(tp) for tp in \
tracking_paths], False, (0, 150, 0))
# Go into this 'if' condition after skipping the
# right number of frames
if not frame_index % num_frames_jump:
# Create a mask and draw the circles
mask = np.zeros_like(frame_gray)
mask[:] = 255
for x, y in [np.int32(tp[-1]) for tp in tracking_paths]:
cv2.circle(mask, (x, y), 6, 0, -1)
# Compute good features to track
feature_points = cv2.goodFeaturesToTrack(frame_gray,
mask = mask, maxCorners = 500, qualityLevel = 0.3,
minDistance = 7, blockSize = 7)
# Check if the feature points exist. If so, then append
# them to the tracking paths
if feature_points is not None:
for x, y in np.float32(feature_points).reshape(-1, 2):
tracking_paths.append([(x, y)])
# Update variables
frame_index += 1
prev_gray = frame_gray
# Display output
cv2.imshow('Optical Flow', output_img)
# Check if the user pressed the 'Esc' key
c = cv2.waitKey(1)
if c == 27:
break
if __name__ == '__main__':
# Start the tracker
start_tracking()
# Close all windows
cv2.destroyAllWindows()
| [
"[email protected]"
] | |
a0f701624c8c135034af750da087fe9d8ef8866d | 54985d2bc6649272d877bfb10df0572a97fb934e | /tests/record_expert_demonstrations.py | 9fcc1349c43f65519a88ba2dd37a699007a2a190 | [
"MIT"
] | permissive | panlybero/gym-novel-gridworlds | d28a9b282c37327d98bfb19e87a9b1d35bf0aae9 | b375cb674337b6cd3c33b165d323cf98b46095e3 | refs/heads/master | 2022-12-04T23:48:10.462777 | 2020-07-19T23:15:12 | 2020-07-19T23:15:12 | 283,211,610 | 0 | 0 | MIT | 2020-07-28T12:52:44 | 2020-07-28T12:52:43 | null | UTF-8 | Python | false | false | 1,929 | py | import time
import gym
import gym_novel_gridworlds
import keyboard
from stable_baselines import DQN
from stable_baselines.gail import generate_expert_traj
from constant import ENV_KEY
"""
Generate Expert Trajectories from a model
"""
# env_id = 'NovelGridworld-v2'
# model = DQN('MlpPolicy', env_id, verbose=1)
#
# # Train a DQN agent for 1e5 timesteps and generate 10 trajectories
# # data will be saved in a numpy archive named `expert_+env_id.npz`
# generate_expert_traj(model, 'expert_'+env_id, n_timesteps=int(10), n_episodes=5)
"""
Generate Expert Trajectories from a human expert player
"""
env_id = 'NovelGridworld-v5'
env = gym.make(env_id)
KEY_ACTION_DICT = ENV_KEY[env_id]
def print_play_keys(action_str):
print("Press a key to play: ")
for key, key_id in KEY_ACTION_DICT.items():
print(key, ": ", action_str[key_id])
def human_expert(_obs):
"""
Random agent. It samples actions randomly
from the action space of the environment.
:param _obs: (np.ndarray) Current observation
:return: (np.ndarray) action taken by the expert
"""
while True:
env.render()
print_play_keys(env.action_str)
time.sleep(0.2)
key_pressed = keyboard.read_key()
# return index of action if valid key is pressed
if key_pressed:
if key_pressed in KEY_ACTION_DICT:
return KEY_ACTION_DICT[key_pressed]
elif key_pressed == "esc":
print("You pressed esc, exiting!!")
break
else:
print("You pressed wrong key. Press Esc key to exit, OR:")
# Data will be saved in a numpy archive named `expert_+env_id.npz`
# when using something different than an RL expert,
# you must pass the environment object explicitly
env.render()
episodes = 50
generate_expert_traj(human_expert, 'expert_' + env_id+'_'+str(episodes)+'demos', env, n_episodes=episodes)
| [
"[email protected]"
] | |
1d9396075c10dcb708d2f88d7167d560926b1b1b | ae101caf03a373e3ad6a2ea0fddd99038574e73f | /Original.__init__.py | 0b4c2cc912389abe4e056e8a1d7be94cc1fcf062 | [] | no_license | internetimagery/todo | df9be99c34761191c4322eb315681fcd4e9a1cf8 | ed09c14f231da5613ec86a2a6ff6683d9a663eb1 | refs/heads/master | 2020-05-30T22:48:07.645472 | 2015-12-29T10:44:40 | 2015-12-29T10:44:40 | 34,398,039 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26,390 | py | # coding: utf-8
# Todo script for Maya
# Created by Jason Dixon.
# 02.05.15
# https://github.com/internetimagery/todo
import maya.utils as utils
import maya.cmds as cmds
import webbrowser
import subprocess
import threading
import traceback
import urlparse
import difflib
import random
import addons
import base64
import todo
import json
import time
import sys
import os
import re
def unique(item):
"""
Only keep one instance open at a time
"""
items = {}
def UniqueItem(*args, **kwargs):
if (item in items and sys.getrefcount(items[item]) < 3) or item not in items:
items[item] = item(*args, **kwargs)
return items[item]
return UniqueItem
def embedImage():
"""
Grab a random image and embed it in the scene.
"""
path = os.path.join(os.path.dirname(__file__), "images")
images = [os.path.join(path, f) for f in os.listdir(path) if f.endswith(".png")]
if images:
image = random.choice(images)
with open(image, "rb") as f:
image = "<img src=\\\"data:image/png;base64,%s\\\">" % base64.b64encode(f.read())
return "cmds.text(hl=True, l=\"%s\", h=100, w=100)" % image
else:
return "cmds.iconTextStaticLabel(image=\"envChrome.svg\", h=100, w=100) # file.svg looks nice too..."
def SaveAs():
"""
Save prompt
"""
return cmds.fileDialog2(ds=2, sff="Maya ASCII", ff="Maya Files (*.ma *.mb);;Maya ASCII (*.ma);;Maya Binary (*.mb);;")
class FileInfo(dict):
"""
Fileinfo interface
"""
def __init__(s):
s.update(dict((k, v.decode("unicode_escape")) for k, v in (lambda x: zip(x[::2], x[1::2]))(cmds.fileInfo(q=True))))
def __setitem__(s, k, v):
cmds.fileInfo(k, v)
super(FileInfo, s).__setitem__(k, v)
def __delitem__(s, k):
cmds.fileInfo(rm=k)
super(FileInfo, s).__delitem__(k)
def FileOpen(path):
"""
Open a file
"""
def savePrompt():
p = cmds.setParent(q=True)
cmds.columnLayout(adj=True, p=p)
cmds.rowLayout(nc=2)
cmds.columnLayout()
eval(embedImage())
cmds.setParent("..")
cmds.columnLayout(adj=True)
cmds.text(al="left", hl=True, l="""
<h3>There are unsaved changes in your scene.</h3>
<div>Would you like to save before leaving?</div>""", h=70)
cmds.rowLayout(nc=3, h=30)
cmds.button(l="Yes please!".center(20), c="cmds.layoutDialog(dismiss=\"yes\")")
cmds.button(l="No Thanks".center(20), c="cmds.layoutDialog(dismiss=\"no\")")
cmds.button(l="Cancel".center(20), c="cmds.layoutDialog(dismiss=\"cancel\")")
cmds.setParent("..")
cmds.setParent("..")
if os.path.isfile(path):
if path[-3:] in [".ma", ".mb"]: # Make a special exception for maya files.
if cmds.file(mf=True, q=True): # File is modified. Need to make some changes.
answer = cmds.layoutDialog(ui=savePrompt, t="Excuse me one moment...")
if answer == "yes":
if not cmds.file(q=True, sn=True):
loc = SaveAs()
if loc:
cmds.file(rn=loc[0])
else:
return
cmds.file(save=True)
elif answer == "no":
pass
else:
return
cmds.file(path, o=True, f=True)
else:
universalOpen(path)
def universalOpen(command):
"""
Open file in different OS's
"""
try:
os.startfile(command) # Open file on windows
except AttributeError: # Open file on anything else
for com in [["open"], ["xdg-open"], ["gnome-open"], ["kde-open"], ["exo-open"]]:
try:
return subprocess.Popen(com + [command])
except OSError:
pass
webbrowser.open(command)
class Settings(object):
"""
Settings interface
"""
def __init__(s):
s.info = FileInfo()
s.update = None # command to update on settings change
try:
s.data = json.loads(s.info["TODO_SETTINGS"])
except (ValueError, KeyError):
s.data = {}
def get(s, k, d=None):
return s.data.get(k, d)
def set(s, k, v):
s.data[k] = v
s.info["TODO_SETTINGS"] = json.dumps(s.data)
if s.update:
utils.executeDeferred(s.update)
class Popup(object):
"""
Create a one time popup
"""
def __init__(s, message):
s.uid = "TODO_POPUP_%s" % int((time.time() * 100)) # Generate unique ID
s.message = message
def stringify(s, data):
return "python(\"%s\");" % data.replace("\\", "\\\\").replace("\"", "\\\"").replace("\n", r"\n")
def __enter__(s):
s.job = cmds.scriptNode(n=s.uid, st=2, bs="")
s.code = """
import maya.cmds as cmds
uid = "%(uid)s"
job = "%(job)s"
if cmds.fileInfo(uid, q=True) == ["ok"]:
def makepopup():
p = cmds.setParent(q=True)
cmds.rowLayout(nc=2, ad2=2, p=p)
cmds.columnLayout()
%(image)s
cmds.setParent("..")
cmds.columnLayout(adj=True)
cmds.text(al="left", hl=True, l=\"\"\"%(message)s\"\"\", h=70)
cmds.button(l="Thanks", c="cmds.layoutDialog(dismiss=\\"gone\\")", h=30)
cmds.setParent("..")
cmds.layoutDialog(ui=makepopup, t="Welcome Back")
if cmds.objExists(job):
cmds.delete(job)
cmds.fileInfo(rm=uid)
""" % {"uid": s.uid, "job": s.job, "image": embedImage(), "message": s.message}
cmds.scriptNode(s.job, e=True, bs=s.stringify(s.code))
cmds.fileInfo(s.uid, "ok")
return s
def __exit__(s, err, val, trace):
"""
Remove those things from the scene
"""
cmds.fileInfo(rm=s.uid)
if cmds.objExists(s.job):
cmds.delete(s.job)
class safeOut(object):
"""
Protect output during threads
"""
def __init__(s):
s.oldOut = sys.stdout
sys.stdout = s
def write(s, *t):
t = "".join(t)
if len(t.rstrip()):
utils.executeDeferred(lambda: s.oldOut.write("%s\n" % t))
def __enter__(s):
return s
def __exit__(s, errType, errVal, trace):
sys.stdout = s.oldOut
if errType:
s.write("Uh oh... there was a problem. :(")
s.write("%s :: %s" % (errType.__name__, errVal))
for t in traceback.format_tb(trace):
s.write(t)
return True
class safeCMDS(object):
"""
Protect usage of cmds in threads.
"""
def __getattr__(s, n):
if hasattr(cmds, n):
at = getattr(cmds, n)
return lambda *a, **kw: utils.executeInMainThreadWithResult(lambda: at(*a, **kw))
raise AttributeError
class TimeSlider(object):
"""
Timeslider functionality
"""
def frame(s, frame):
cmds.currentTime(frame)
def range(s, start, end):
"""
Set frame range
"""
cmds.playbackOptions(e=True, min=start, max=end)
class Todo(object):
"""
Single Todo
"""
def __init__(s, name):
s.name = name
"""
Add to the GUI
"""
def attach(s, parent):
cmds.columnLayout(adj=True, p=parent)
cmds.iconTextButton(
h=30,
ann="Click to view the Todo scripts settings. Settings are saved with the Maya scene, so you will need to set them for each scene.",
image="attributes.png",
label="Settings ->",
style="iconAndTextHorizontal",
c=s._buildSettings)
cmds.separator()
text = cmds.textField(
aie=True,
ed=True,
h=30,
ann="Type a task into the box.",
ec=lambda x: not s.createTodo(x) or clear())
cmds.button(
label="Create a new TODO",
h=20,
ann="Type a task into the box.",
c=lambda x: not s.createTodo(cmds.textField(text, q=True, tx=True)) or clear())
cmds.setParent("..")
pass
#@unique
class MainWindow(object):
"""
Main GUI Window
"""
def __init__(s):
s.registerHooks() # Load our hooks
if cmds.dockControl("TODO_WINDOW", ex=True):
cmds.deleteUI("TODO_WINDOW")
s.fireHook("app.end")
print "Window exists. Closing and opening a new window."
s.page = "" # Page we are on.
s._refresh() # Initialize saved data
s.fireHook("app.start")
s.regex = {} # Compiled regexes
title = "Todo"
try:
with open(os.path.join(os.path.dirname(__file__), "quotes.json"), "r") as f: # Motivation!
title = random.choice(json.load(f))
except (IOError, ValueError):
print "No inspirational quotes loaded."
window = cmds.window(title=title, rtf=True)
s.container = cmds.columnLayout(adj=True)
s.wrapper = ""
allowed_areas = ['right', 'left']
s.dock = cmds.dockControl("TODO_WINDOW", a='left', content=window, aa=allowed_areas, fl=True, l=title, fcc=s.moveDock, vcc=s.closeDock)
s._buildTodo()
if s.location == 'float':
cmds.dockControl(s.dock, e=True, fl=True)
elif s.location in allowed_areas:
cmds.dockControl(s.dock, e=True, a=s.location, fl=False)
cmds.scriptJob(e=["PostSceneRead", s._refresh], p=s.dock)
cmds.scriptJob(e=["NewSceneOpened", s._refresh], p=s.dock)
def _clear(s):
"""
Clear the layout ready to be refreshed
"""
if cmds.layout(s.wrapper, ex=True):
cmds.deleteUI(s.wrapper)
s.wrapper = cmds.columnLayout(adj=True, p=s.container)
def _refresh(s):
"""
Refresh the data
"""
s.data = FileInfo() # scene stored data
s.settings = Settings() # Settings wrapper for data
if s.page == "todo":
s._buildTodo()
if s.page == "settings":
s._buildSettings()
def _buildTodo(s, *args):
"""
Load up the TODO layout
"""
s.page = "todo"
s._clear()
cmds.columnLayout(adj=True)
cmds.columnLayout(adj=True)
cmds.iconTextButton(
h=30,
ann="Click to view the Todo scripts settings. Settings are saved with the Maya scene, so you will need to set them for each scene.",
image="attributes.png",
label="Settings ->",
style="iconAndTextHorizontal",
c=s._buildSettings)
cmds.separator()
text = cmds.textField(
aie=True,
ed=True,
h=30,
ann="Type a task into the box.",
ec=lambda x: not s.createTodo(x) or clear())
cmds.button(
label="Create a new TODO",
h=20,
ann="Type a task into the box.",
c=lambda x: not s.createTodo(cmds.textField(text, q=True, tx=True)) or clear())
cmds.setParent("..")
def clear(): # Clear the text field
cmds.textField(text, e=True, tx="")
s.todowrap = cmds.columnLayout(adj=True)
# Todo items in here!
s.todoContainer = ""
cmds.setParent("..")
cmds.setParent(s.wrapper)
s._buidTodoTasks()
def _buidTodoTasks(s):
"""
Refresh the todo task section of the window (fixes bug with lambda never returning)
"""
if cmds.scrollLayout(s.todoContainer, ex=True):
cmds.deleteUI(s.todoContainer)
s.regex["uid"] = s.regex.get("uid", re.compile("^TODO_\d+"))
s.todoContainer = cmds.scrollLayout(bgc=[0.2, 0.2, 0.2], cr=True, p=s.todowrap)
sorter = cmds.columnLayout(adj=True, p=s.todoContainer)
unsort = cmds.columnLayout(adj=True, p=s.todoContainer)
sort_data = {}
def stateChange(section, state): # Save state of sections
s.fireHook("app.changeSection", settings=s._buidTodoTasks)
data = s.settings.get("Todo.SectionState", {})
data[section] = state
s.settings.set("Todo.SectionState", data)
def section(title, state): # Build a section for each piece
title = title.strip()
if title in sort_data:
return sort_data[title]
else:
sort_data[title] = cmds.frameLayout(l=title, p=sorter, cll=True, cl=state, cc=lambda: stateChange(title, True), ec=lambda: stateChange(title, False))
return sort_data[title]
s.fireHook("app.buildList")
currState = s.settings.get("Todo.SectionState", {})
state = {}
for v in sorted([s._parseTodo(s.data[k], uid=k) for k in s.data.keys() if k and s.regex["uid"].match(k)], key=lambda x: x["label"]):
if v["token"] or v["hashtag"]:
if v["token"]:
state[v["token"]] = currState[v["token"]] if v["token"] in currState else False
s.addTodo(v, section(v["token"], state[v["token"]]))
if v["hashtag"]:
for h in v["hashtag"]:
state[h] = currState[h] if h in currState else False
s.addTodo(v, section(h, state[h]))
else: # Unsorted todos
s.addTodo(v, unsort)
s.settings.set("Todo.SectionState", state)
def _buildSettings(s, *args):
"""
Load the settings page
"""
s.page = "settings"
s._clear()
cmds.columnLayout(adj=True, p=s.wrapper)
cmds.iconTextButton(
h=30,
ann="Click to return to your Todo list.",
image="revealSelected.png",
label="<- Todo",
style="iconAndTextHorizontal",
c=s._buildTodo)
cmds.separator()
cmds.text(label="Settings are unique to each Maya scene.", h=50)
frame = cmds.frameLayout(l="Archive options:")
# Settings module
s.fireHook("settings.archive", settings=s._buildSettings, callback=lambda x: cmds.setParent(frame))
cmds.setParent("..")
cmds.frameLayout(l="Feedback:")
cmds.iconTextButton(
image="party.png",
ann="Have any feedback? Good or bad. Love to hear it! :)",
l="Leave Feedback...",
style="iconAndTextHorizontal",
c=lambda: universalOpen("mailto:[email protected]?subject=Todo Feedback")) # TODO errors when no folder is chosen because of 0 index
def _parseTodo(s, label, **kwargs):
"""
Parse out metadata from Todo
"""
def build_reg():
reg = "(?P<token>\A\w+(?=:\s))?" # Token
reg += "(?P<hashtag>(?<=#)\s?\w+)?" # Hashtag
reg += "(?P<url>https?://[^\s]+)?" # Url
frr = "(?:(?P<range1>\d+)\s*(?:[^\d\s]|to|and|through)\s*(?P<range2>\d+))" # Frame range
fr = "(?P<frame>\d+)" # Frame
reg += "(?:%s|%s)?" % (frr, fr)
reg += "(?P<file>(?:[a-zA-Z]:|\\.{1,2})?[^\t\r\n:|]+\.\w+)?" # Filename?
return re.compile(reg)
s.regex["label"] = s.regex.get("label", build_reg())
parse = s.regex["label"].finditer(label)
result = kwargs # Add extra additional custom arguments
result["token"] = ""
result["hashtag"] = []
result["url"] = ""
result["file"] = "" # Default
result["frame"] = None
result["framerange"] = []
replace = {} # Make the output nicer by removing certain tags
if parse:
for p in parse:
m = p.groupdict()
if m["token"]: # Match tokens
result["token"] = m["token"]
replace[m["token"] + ":"] = ""
if m["hashtag"]: # Grab all hashtags, avoiding duplicates
if m["hashtag"] not in result["hashtag"]:
result["hashtag"].append(m["hashtag"].strip())
replace["#" + m["hashtag"]] = ""
if m["url"]: # Looking for a website?
result["url"] = m["url"]
replace[m["url"]] = urlparse.urlparse(m["url"]).netloc
if m["range1"] and m["range2"]: # Frame range?
result["framerange"] = sorted([m["range1"], m["range2"]])
if m["frame"]:
result["frame"] = m["frame"]
if m["file"] and not result["file"]:
path = m["file"].split(" ")
scene = os.path.dirname(cmds.file(q=True, sn=True))
refPaths = dict((os.path.basename(f), f) for f in cmds.file(l=True, q=True)[1:]) # Listing of all files
refNames = refPaths.keys()
for i in range(len(path)): # Try figure out if a path is being requested
p = " ".join(path[i:])
closeMatch = difflib.get_close_matches(p, refNames, 1, 0.9) # Fuzzy search filenames
if closeMatch: # Have we found a reference file?
rpath = os.path.realpath(refPaths[closeMatch[0]])
else: # ... or perhaps another file somewhere else on the system?
rpath = os.path.realpath(os.path.join(scene, p))
if os.path.isfile(rpath):
result["file"] = rpath
replace[p] = os.path.basename(p)
result["label"] = label.strip()
if replace:
for r in replace:
result["label"] = result["label"].replace(r, replace[r])
return result
def addTodo(s, todo, parent):
"""
Insert a todo
"""
wrapper = cmds.rowLayout(nc=4, ad4=1, p=parent)
cmds.iconTextButton(
image="fileSave.png",
h=30,
style="iconAndTextHorizontal",
label=todo["label"],
fn="fixedWidthFont",
ann="Click to check off and save.\nTODO: %s" % todo["label"],
c=lambda: s.activateTodo(todo["uid"], wrapper))
if todo["file"]:
cmds.iconTextButton(
image="openScript.png",
style="iconOnly",
w=30,
ann="Open file: %s" % todo["file"],
c=lambda: FileOpen(todo["file"]))
elif todo["url"]:
cmds.iconTextButton(
image="SP_ComputerIcon.png",
style="iconOnly",
w=30,
ann="Open url: %s" % todo["url"],
c=lambda: webbrowser.open(todo["url"], new=2))
elif todo["framerange"]:
cmds.iconTextButton(
image="traxFrameRange.png",
style="iconOnly",
w=30,
ann="Jump to frame range (%s to %s)." % (todo["framerange"][0], todo["framerange"][1]),
c=lambda: TimeSlider().range(todo["framerange"][0], todo["framerange"][1]))
elif todo["frame"] or todo["frame"] is 0: # Extra convenience button
cmds.iconTextButton(
image="centerCurrentTime.png",
style="iconOnly",
w=30,
ann="Go to frame %s." % todo["frame"],
c=lambda: TimeSlider().frame(todo["frame"]))
cmds.iconTextButton(
image="setEdEditMode.png",
style="iconOnly",
w=30,
ann="Edit Todo.",
c=lambda: s.editTodo(todo["uid"], wrapper))
cmds.iconTextButton(
image="removeRenderable.png",
style="iconOnly",
w=30,
ann="Delete Todo without saving.",
c=lambda: s.removeTodo(todo["uid"]))
cmds.setParent("..")
def editTodo(s, uid, gui):
"""
Change a todos information
"""
def update(uid, label):
meta = s._parseTodo(label, uid=uid)
if meta["label"]:
s.data[uid] = label
s.fireHook("todo.edit", meta, faf=True)
s._buidTodoTasks()
else:
cmds.confirmDialog(title="Whoops...", message="You need to add some text for your Todo.")
for ui in cmds.rowLayout(gui, q=True, ca=True):
cmds.deleteUI(ui)
cmds.rowLayout(gui, e=True, nc=2)
text = cmds.textField(p=gui, tx=s.data[uid])
cmds.button(l="Ok", p=gui, c=lambda x: update(uid, cmds.textField(text, q=True, tx=True)))
def createTodo(s, txt):
"""
Create a new Todo
"""
f = cmds.file(q=True, sn=True)
name = "%(name)s_%(stamp)s" % {"name": "TODO", "stamp": int(time.time() * 100)}
meta = s._parseTodo(txt, uid=name)
if meta["label"]:
s.data[name] = txt
s.fireHook("todo.create", meta, faf=True)
s._buidTodoTasks()
return True # Return True to retain input
else:
cmds.confirmDialog(title="Whoops...", message="You need to add some text for your Todo.")
return False
def removeTodo(s, uid):
"""
Remove a Todo
"""
meta = s._parseTodo(s.data[uid], uid=uid)
s.fireHook("todo.delete", meta, faf=True)
del s.data[uid]
s._buidTodoTasks()
def activateTodo(s, uid, gui):
"""
Trigger the todo archive process
"""
cmds.rowLayout(gui, e=True, en=False)
def performArchive():
s.fireHook("todo.complete", todo=tempmeta, faf=True)
closeTodo()
def closeTodo(): # Animate todo closed. Fancy.
if cmds.layout(gui, ex=True):
height = cmds.layout(gui, q=True, h=True)
for i in range(20):
i = (100 - i*5) / 100.0
cmds.layout(gui, e=True, h=height * i)
cmds.refresh()
time.sleep(0.01)
s._buidTodoTasks()
temp = s.data[uid] # hold onto todo data
tempmeta = s._parseTodo(temp, uid=uid)
del s.data[uid] # Remove todo from memory
if os.path.isfile(cmds.file(q=True, sn=True)): # Check the scene is not untitled and still exists
process = cmds.scriptJob(e=['SceneSaved', performArchive], ro=True)
try:
message = """
<div>- This Scene was last saved on <em>%(time)s</em>.</div>
<div>- Completing the task: <code>%(todo)s</code></div>
<div>- The file <strong>has not been modified since.</strong></div><br>
""" % {"time": time.ctime(), "todo": tempmeta["label"]}
with Popup(message):
cmds.file(save=True) # Save the scene
except RuntimeError: # If scene save was canceled or failed. Reset everything
if cmds.scriptJob(ex=process):
cmds.scriptJob(kill=process)
s.data[uid] = temp
s._buidTodoTasks()
else:
performArchive()
def moveDock(s): # Update dock location information
"""
Track dock movement
"""
if cmds.dockControl(s.dock, q=True, fl=True):
s.location = "float"
print "Floating Dock."
else:
area = cmds.dockControl(s.dock, q=True, a=True)
s.location = area
print "Docking %s." % area
def closeDock(s, *loop):
"""
Gracefully close window
"""
visible = cmds.dockControl(s.dock, q=True, vis=True)
if not visible and loop:
cmds.scriptJob(ie=s.closeDock, p=s.dock, ro=True)
elif not visible:
cmds.deleteUI(s.dock, ctl=True)
s.fireHook("app.end")
print "Window closed."
def registerHooks(s):
"""
Grab any hooks
"""
s.hooks = {}
if addons.modules:
for name in addons.modules:
mod = addons.modules[name]
mod.cmds = safeCMDS()
try:
hooks = mod.hooks()
for hook in hooks:
s.hooks[hook] = s.hooks.get(hook, []) + [hooks[hook]]
except (AttributeError, TypeError):
print "Module %s is misisng a \"hooks\" function." % name
def fireHook(s, hook, todo=None, faf=False, settings=None, callback=None):
"""
Use a hook
hook = hookname, todo = todo meta data, faf = fire and forget the tasks, callback = run after each task has completed, settings = callback if setting a settings option.
"""
def fire(func):
result = None
with safeOut():
result = func(mayaFile, todo, s.settings)
if callback:
callback(result)
return result
result = []
threads = []
s.settings.update = settings
if hook in s.hooks:
path = os.path.realpath(cmds.file(q=True, sn=True)) # Scene name
mayaFile = os.path.realpath(path) if os.path.isfile(path) else None
for h in s.hooks[hook]:
if faf:
th = threading.Thread(
target=lambda x: result.append(fire(x)),
args=(h,))
th.daemon = True
th.start()
threads.append(th)
else:
result.append(fire(h))
if threads and False: # Block threads? TODO: stop maya from crashing...
for th in threads:
th.join()
return result
def location():
"""
Window location
"""
def fget(s):
if cmds.optionVar(ex="todo_window_location"):
return cmds.optionVar(q="todo_window_location")
else:
return "float"
def fset(s, value):
cmds.optionVar(sv=["todo_window_location", value])
def fdel(s):
if cmds.optionVar(ex="todo_window_location"):
cmds.optionVar(rm="todo_window_location")
return locals()
location = property(**location())
| [
"[email protected]"
] | |
b8f64295f2228f7b120165c28fa4d3c69a7d8e41 | 31780af7a5558523def1aae5f25df3e0b084be9b | /reg1.py | 71aae85831ae6835718ea909d9d9ae25a68bd9aa | [] | no_license | sevilaybayatli/PYTHS19 | 1796615ff939f2e98ce657feeaa3efd47a2e66c6 | ae0607e215a0d8205475d124c0362c39881e5eda | refs/heads/master | 2020-07-23T16:12:17.922548 | 2020-03-23T22:03:00 | 2020-03-23T22:03:00 | 207,624,169 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 207 | py | import re
def checkOfString(string):
chare=re.compile(r'[^a-zA-Z0-9.]')
string=chare.search(string)
return not bool(string)
print(checkOfString('AgfsrWCB12.'))
print(checkOfString('*"q@aQ'))
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.