ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a2ea122f01cb5680dc97307e94fd9cddb4075fa | '''
Created on 05.01.2014
@author: root
'''
from org.askalon.jlibcloud.compute.wrapperInterfaces.base import StorageVolume as JStorageVolume
from javaimpl.compute.utils import none_check
class StorageVolumeImpl(JStorageVolume):
'''
classdocs
'''
def __init__(self, volume):
'''
Constructor
'''
#keep a reference to access in jython
self.volume = volume
self.obj = volume
if hasattr(volume, 'uuid'):
self.uuidp = none_check(volume.uuid, "")
else:
self.uuidp = ""
if hasattr(volume, 'id'):
self.idp = none_check(volume.id, "")
else:
self.idp = ""
if hasattr(volume, 'name'):
self.namep = none_check(volume.name, "")
else:
self.namep = ""
if hasattr(volume, 'size'):
self.sizep = none_check(volume.size, -1)
else:
self.sizep = -1
if hasattr(volume, 'extra'):
self.extrap = volume.extra
else:
self.extrap = {}
if hasattr(volume, '__repr__()'):
self.reprp = volume.__repr__()
else:
self.reprp = str(volume)
def getUUID(self):
return self.uuidp
def getId(self):
return self.idp
def getName(self):
return self.namep
def getSizeGB(self):
return self.sizep
def getExtra(self):
return self.extrap
def attach(self, node, device=None):
return self.volume.attach(node.node, device)
def detach(self):
return self.volume.detach()
def destroy(self):
return self.volume.destroy()
def listSnapshots(self):
return self.volume.list_snapshots()
def createSnapshot(self, name):
return self.volume.snapshot(name)
def toString(self):
return self.reprp |
py | 1a2ea2af726cdd1dd54a38cdc9d44649d4b224ba | from __future__ import annotations
import copy as _copy
import enum
import logging as _logging
import os
import pathlib
import typing
from dataclasses import dataclass
from datetime import datetime
from inspect import getfullargspec as _getargspec
import six as _six
from flytekit.common import constants as _constants
from flytekit.common import interface as _interface
from flytekit.common import sdk_bases as _sdk_bases
from flytekit.common import utils as _common_utils
from flytekit.common.core.identifier import WorkflowExecutionIdentifier
from flytekit.common.exceptions import scopes as _exception_scopes
from flytekit.common.exceptions import user as _user_exceptions
from flytekit.common.tasks import output as _task_output
from flytekit.common.tasks import task as _base_task
from flytekit.common.types import helpers as _type_helpers
from flytekit.configuration import internal as _internal_config
from flytekit.configuration import resources as _resource_config
from flytekit.configuration import sdk as _sdk_config
from flytekit.configuration import secrets
from flytekit.engines import loader as _engine_loader
from flytekit.interfaces.stats import taggable
from flytekit.models import literals as _literal_models
from flytekit.models import task as _task_models
class SecretsManager(object):
"""
This provides a secrets resolution logic at runtime.
The resolution order is
- Try env var first. The env var should have the configuration.SECRETS_ENV_PREFIX. The env var will be all upper
cased
- If not then try the file where the name matches lower case
``configuration.SECRETS_DEFAULT_DIR/<group>/configuration.SECRETS_FILE_PREFIX<key>``
All configuration values can always be overridden by injecting an environment variable
"""
def __init__(self):
self._base_dir = str(secrets.SECRETS_DEFAULT_DIR.get()).strip()
self._file_prefix = str(secrets.SECRETS_FILE_PREFIX.get()).strip()
self._env_prefix = str(secrets.SECRETS_ENV_PREFIX.get()).strip()
def get(self, group: str, key: str) -> str:
"""
Retrieves a secret using the resolution order -> Env followed by file. If not found raises a ValueError
"""
self.check_group_key(group, key)
env_var = self.get_secrets_env_var(group, key)
fpath = self.get_secrets_file(group, key)
v = os.environ.get(env_var)
if v is not None:
return v
if os.path.exists(fpath):
with open(fpath, "r") as f:
return f.read().strip()
raise ValueError(
f"Unable to find secret for key {key} in group {group} " f"in Env Var:{env_var} and FilePath: {fpath}"
)
def get_secrets_env_var(self, group: str, key: str) -> str:
"""
Returns a string that matches the ENV Variable to look for the secrets
"""
self.check_group_key(group, key)
return f"{self._env_prefix}{group.upper()}_{key.upper()}"
def get_secrets_file(self, group: str, key: str) -> str:
"""
Returns a path that matches the file to look for the secrets
"""
self.check_group_key(group, key)
return os.path.join(self._base_dir, group.lower(), f"{self._file_prefix}{key.lower()}")
@staticmethod
def check_group_key(group: str, key: str):
if group is None or group == "":
raise ValueError("secrets group is a mandatory field.")
if key is None or key == "":
raise ValueError("secrets key is a mandatory field.")
# TODO: Clean up working dir name
class ExecutionParameters(object):
"""
This is a run-time user-centric context object that is accessible to every @task method. It can be accessed using
.. code-block:: python
flytekit.current_context()
This object provides the following
* a statsd handler
* a logging handler
* the execution ID as an :py:class:`flytekit.models.core.identifier.WorkflowExecutionIdentifier` object
* a working directory for the user to write arbitrary files to
Please do not confuse this object with the :py:class:`flytekit.FlyteContext` object.
"""
@dataclass(init=False)
class Builder(object):
stats: taggable.TaggableStats
execution_date: datetime
logging: _logging
execution_id: str
attrs: typing.Dict[str, typing.Any]
working_dir: typing.Union[os.PathLike, _common_utils.AutoDeletingTempDir]
def __init__(self, current: typing.Optional[ExecutionParameters] = None):
self.stats = current.stats if current else None
self.execution_date = current.execution_date if current else None
self.working_dir = current.working_directory if current else None
self.execution_id = current.execution_id if current else None
self.logging = current.logging if current else None
self.attrs = current._attrs if current else {}
def add_attr(self, key: str, v: typing.Any) -> ExecutionParameters.Builder:
self.attrs[key] = v
return self
def build(self) -> ExecutionParameters:
if not isinstance(self.working_dir, _common_utils.AutoDeletingTempDir):
pathlib.Path(self.working_dir).mkdir(parents=True, exist_ok=True)
return ExecutionParameters(
execution_date=self.execution_date,
stats=self.stats,
tmp_dir=self.working_dir,
execution_id=self.execution_id,
logging=self.logging,
**self.attrs,
)
@staticmethod
def new_builder(current: ExecutionParameters = None) -> Builder:
return ExecutionParameters.Builder(current=current)
def builder(self) -> Builder:
return ExecutionParameters.Builder(current=self)
def __init__(self, execution_date, tmp_dir, stats, execution_id, logging, **kwargs):
"""
Args:
execution_date: Date when the execution is running
tmp_dir: temporary directory for the execution
stats: handle to emit stats
execution_id: Identifier for the xecution
logging: handle to logging
"""
self._stats = stats
self._execution_date = execution_date
self._working_directory = tmp_dir
self._execution_id = execution_id
self._logging = logging
# AutoDeletingTempDir's should be used with a with block, which creates upon entry
self._attrs = kwargs
# It is safe to recreate the Secrets Manager
self._secrets_manager = SecretsManager()
@property
def stats(self) -> taggable.TaggableStats:
"""
A handle to a special statsd object that provides usefully tagged stats.
TODO: Usage examples and better comments
"""
return self._stats
@property
def logging(self) -> _logging:
"""
A handle to a useful logging object.
TODO: Usage examples
"""
return self._logging
@property
def working_directory(self) -> _common_utils.AutoDeletingTempDir:
"""
A handle to a special working directory for easily producing temporary files.
TODO: Usage examples
TODO: This does not always return a AutoDeletingTempDir
"""
return self._working_directory
@property
def execution_date(self) -> datetime:
"""
This is a datetime representing the time at which a workflow was started. This is consistent across all tasks
executed in a workflow or sub-workflow.
.. note::
Do NOT use this execution_date to drive any production logic. It might be useful as a tag for data to help
in debugging.
"""
return self._execution_date
@property
def execution_id(self) -> str:
"""
This is the identifier of the workflow execution within the underlying engine. It will be consistent across all
task executions in a workflow or sub-workflow execution.
.. note::
Do NOT use this execution_id to drive any production logic. This execution ID should only be used as a tag
on output data to link back to the workflow run that created it.
"""
return self._execution_id
@property
def secrets(self) -> SecretsManager:
return self._secrets_manager
def __getattr__(self, attr_name: str) -> typing.Any:
"""
This houses certain task specific context. For example in Spark, it houses the SparkSession, etc
"""
attr_name = attr_name.upper()
if self._attrs and attr_name in self._attrs:
return self._attrs[attr_name]
raise AssertionError(f"{attr_name} not available as a parameter in Flyte context - are you in right task-type?")
def has_attr(self, attr_name: str) -> bool:
attr_name = attr_name.upper()
if self._attrs and attr_name in self._attrs:
return True
return False
def get(self, key: str) -> typing.Any:
"""
Returns task specific context if present else raise an error. The returned context will match the key
"""
return self.__getattr__(attr_name=key)
class SdkRunnableContainer(_task_models.Container, metaclass=_sdk_bases.ExtendedSdkType):
"""
This is not necessarily a local-only Container object. So long as configuration is present, you can use this object
"""
def __init__(
self,
command,
args,
resources,
env,
config,
):
super(SdkRunnableContainer, self).__init__("", command, args, resources, env or {}, config)
@property
def args(self):
"""
:rtype: list[Text]
"""
return _sdk_config.SDK_PYTHON_VENV.get() + self._args
@property
def image(self):
"""
:rtype: Text
"""
return _internal_config.IMAGE.get()
@property
def env(self):
"""
:rtype: dict[Text,Text]
"""
env = super(SdkRunnableContainer, self).env.copy()
env.update(
{
_internal_config.CONFIGURATION_PATH.env_var: _internal_config.CONFIGURATION_PATH.get(),
_internal_config.IMAGE.env_var: _internal_config.IMAGE.get(),
# TODO: Phase out the below. Propeller will set these and these are not SDK specific
_internal_config.PROJECT.env_var: _internal_config.PROJECT.get(),
_internal_config.DOMAIN.env_var: _internal_config.DOMAIN.get(),
_internal_config.NAME.env_var: _internal_config.NAME.get(),
_internal_config.VERSION.env_var: _internal_config.VERSION.get(),
}
)
return env
@classmethod
def get_resources(
cls,
storage_request=None,
cpu_request=None,
gpu_request=None,
memory_request=None,
storage_limit=None,
cpu_limit=None,
gpu_limit=None,
memory_limit=None,
):
"""
:param Text storage_request:
:param Text cpu_request:
:param Text gpu_request:
:param Text memory_request:
:param Text storage_limit:
:param Text cpu_limit:
:param Text gpu_limit:
:param Text memory_limit:
"""
requests = []
if storage_request:
requests.append(
_task_models.Resources.ResourceEntry(_task_models.Resources.ResourceName.STORAGE, storage_request)
)
if cpu_request:
requests.append(_task_models.Resources.ResourceEntry(_task_models.Resources.ResourceName.CPU, cpu_request))
if gpu_request:
requests.append(_task_models.Resources.ResourceEntry(_task_models.Resources.ResourceName.GPU, gpu_request))
if memory_request:
requests.append(
_task_models.Resources.ResourceEntry(_task_models.Resources.ResourceName.MEMORY, memory_request)
)
limits = []
if storage_limit:
limits.append(
_task_models.Resources.ResourceEntry(_task_models.Resources.ResourceName.STORAGE, storage_limit)
)
if cpu_limit:
limits.append(_task_models.Resources.ResourceEntry(_task_models.Resources.ResourceName.CPU, cpu_limit))
if gpu_limit:
limits.append(_task_models.Resources.ResourceEntry(_task_models.Resources.ResourceName.GPU, gpu_limit))
if memory_limit:
limits.append(
_task_models.Resources.ResourceEntry(_task_models.Resources.ResourceName.MEMORY, memory_limit)
)
return _task_models.Resources(limits=limits, requests=requests)
class SdkRunnableTaskStyle(enum.Enum):
V0 = 0
V1 = 1
class SdkRunnableTask(_base_task.SdkTask, metaclass=_sdk_bases.ExtendedSdkType):
"""
This class includes the additional logic for building a task that executes in Python code. It has even more
validation checks to ensure proper behavior than it's superclasses.
Since an SdkRunnableTask is assumed to run by hooking into Python code, we will provide additional shortcuts and
methods on this object.
"""
def __init__(
self,
task_function,
task_type,
discovery_version,
retries,
interruptible,
deprecated,
storage_request,
cpu_request,
gpu_request,
memory_request,
storage_limit,
cpu_limit,
gpu_limit,
memory_limit,
discoverable,
timeout,
environment,
custom,
):
"""
:param task_function: Function container user code. This will be executed via the SDK's engine.
:param Text task_type: string describing the task type
:param Text discovery_version: string describing the version for task discovery purposes
:param int retries: Number of retries to attempt
:param bool interruptible: Specify whether task is interruptible
:param Text deprecated:
:param Text storage_request:
:param Text cpu_request:
:param Text gpu_request:
:param Text memory_request:
:param Text storage_limit:
:param Text cpu_limit:
:param Text gpu_limit:
:param Text memory_limit:
:param bool discoverable:
:param datetime.timedelta timeout:
:param dict[Text, Text] environment:
:param dict[Text, T] custom:
"""
# Circular dependency
from flytekit import __version__
self._task_function = task_function
super(SdkRunnableTask, self).__init__(
task_type,
_task_models.TaskMetadata(
discoverable,
_task_models.RuntimeMetadata(
_task_models.RuntimeMetadata.RuntimeType.FLYTE_SDK,
__version__,
"python",
),
timeout,
_literal_models.RetryStrategy(retries),
interruptible,
discovery_version,
deprecated,
),
# TODO: If we end up using SdkRunnableTask for the new code, make sure this is set correctly.
_interface.TypedInterface({}, {}),
custom,
container=self._get_container_definition(
storage_request=storage_request,
cpu_request=cpu_request,
gpu_request=gpu_request,
memory_request=memory_request,
storage_limit=storage_limit,
cpu_limit=cpu_limit,
gpu_limit=gpu_limit,
memory_limit=memory_limit,
environment=environment,
),
)
self.id._name = "{}.{}".format(self.task_module, self.task_function_name)
self._has_fast_registered = False
# TODO: Remove this in the future, I don't think we'll be using this.
self._task_style = SdkRunnableTaskStyle.V0
_banned_inputs = {}
_banned_outputs = {}
@_exception_scopes.system_entry_point
def add_inputs(self, inputs):
"""
Adds the inputs to this task. This can be called multiple times, but it will fail if an input with a given
name is added more than once, a name collides with an output, or if the name doesn't exist as an arg name in
the wrapped function.
:param dict[Text, flytekit.models.interface.Variable] inputs: names and variables
"""
self._validate_inputs(inputs)
self.interface.inputs.update(inputs)
@classmethod
def promote_from_model(cls, base_model):
# TODO: If the task exists in this container, we should be able to retrieve it.
raise _user_exceptions.FlyteAssertion("Cannot promote a base object to a runnable task.")
@property
def task_style(self):
return self._task_style
@property
def task_function(self):
return self._task_function
@property
def task_function_name(self):
"""
:rtype: Text
"""
return self.task_function.__name__
@property
def task_module(self):
"""
:rtype: Text
"""
return self._task_function.__module__
def validate(self):
super(SdkRunnableTask, self).validate()
missing_args = self._missing_mapped_inputs_outputs()
if len(missing_args) > 0:
raise _user_exceptions.FlyteAssertion(
"The task {} is invalid because not all inputs and outputs in the "
"task function definition were specified in @outputs and @inputs. "
"We are missing definitions for {}.".format(self, missing_args)
)
@_exception_scopes.system_entry_point
def unit_test(self, **input_map):
"""
:param dict[Text, T] input_map: Python Std input from users. We will cast these to the appropriate Flyte
literals.
:returns: Depends on the behavior of the specific task in the unit engine.
"""
return (
_engine_loader.get_engine("unit")
.get_task(self)
.execute(
_type_helpers.pack_python_std_map_to_literal_map(
input_map,
{
k: _type_helpers.get_sdk_type_from_literal_type(v.type)
for k, v in _six.iteritems(self.interface.inputs)
},
)
)
)
@_exception_scopes.system_entry_point
def local_execute(self, **input_map):
"""
:param dict[Text, T] input_map: Python Std input from users. We will cast these to the appropriate Flyte
literals.
:rtype: dict[Text, T]
:returns: The output produced by this task in Python standard format.
"""
return (
_engine_loader.get_engine("local")
.get_task(self)
.execute(
_type_helpers.pack_python_std_map_to_literal_map(
input_map,
{
k: _type_helpers.get_sdk_type_from_literal_type(v.type)
for k, v in _six.iteritems(self.interface.inputs)
},
)
)
)
def _execute_user_code(self, context, inputs):
"""
:param flytekit.engines.common.EngineContext context:
:param dict[Text, T] inputs: This variable is a bit of a misnomer, since it's both inputs and outputs. The
dictionary passed here will be passed to the user-defined function, and will have values that are a
variety of types. The T's here are Python std values for inputs. If there isn't a native Python type for
something (like Schema or Blob), they are the Flyte classes. For outputs they are OutputReferences.
(Note that these are not the same OutputReferences as in BindingData's)
:rtype: Any: the returned object from user code.
:returns: This function must return a dictionary mapping 'filenames' to Flyte Interface Entities. These
entities will be used by the engine to pass data from node to node, populate metadata, etc. etc.. Each
engine will have different behavior. For instance, the Flyte engine will upload the entities to a remote
working directory (with the names provided), which will in turn allow Flyte Propeller to push along the
workflow. Where as local engine will merely feed the outputs directly into the next node.
"""
if self.task_style == SdkRunnableTaskStyle.V0:
return _exception_scopes.user_entry_point(self.task_function)(
ExecutionParameters(
execution_date=context.execution_date,
# TODO: it might be better to consider passing the full struct
execution_id=_six.text_type(WorkflowExecutionIdentifier.promote_from_model(context.execution_id)),
stats=context.stats,
logging=context.logging,
tmp_dir=context.working_directory,
),
**inputs,
)
@_exception_scopes.system_entry_point
def execute(self, context, inputs):
"""
:param flytekit.engines.common.EngineContext context:
:param flytekit.models.literals.LiteralMap inputs:
:rtype: dict[Text, flytekit.models.common.FlyteIdlEntity]
:returns: This function must return a dictionary mapping 'filenames' to Flyte Interface Entities. These
entities will be used by the engine to pass data from node to node, populate metadata, etc. etc.. Each
engine will have different behavior. For instance, the Flyte engine will upload the entities to a remote
working directory (with the names provided), which will in turn allow Flyte Propeller to push along the
workflow. Where as local engine will merely feed the outputs directly into the next node.
"""
inputs_dict = _type_helpers.unpack_literal_map_to_sdk_python_std(
inputs, {k: _type_helpers.get_sdk_type_from_literal_type(v.type) for k, v in self.interface.inputs.items()}
)
outputs_dict = {
name: _task_output.OutputReference(_type_helpers.get_sdk_type_from_literal_type(variable.type))
for name, variable in _six.iteritems(self.interface.outputs)
}
# Old style - V0: If annotations are used to define outputs, do not append outputs to the inputs dict
if not self.task_function.__annotations__ or "return" not in self.task_function.__annotations__:
inputs_dict.update(outputs_dict)
self._execute_user_code(context, inputs_dict)
return {
_constants.OUTPUT_FILE_NAME: _literal_models.LiteralMap(
literals={k: v.sdk_value for k, v in _six.iteritems(outputs_dict)}
)
}
@_exception_scopes.system_entry_point
def fast_register(self, project, domain, name, digest, additional_distribution, dest_dir) -> str:
"""
The fast register call essentially hijacks the task container commandline.
Say an existing task container definition had a commandline like so:
flyte_venv pyflyte-execute --task-module app.workflows.my_workflow --task-name my_task
The fast register command introduces a wrapper call to fast-execute the original commandline like so:
flyte_venv pyflyte-fast-execute --additional-distribution s3://my-s3-bucket/foo/bar/12345.tar.gz --
flyte_venv pyflyte-execute --task-module app.workflows.my_workflow --task-name my_task
At execution time pyflyte-fast-execute will ensure the additional distribution (i.e. the fast-registered code)
exists before calling the original task commandline.
:param Text project: The project in which to register this task.
:param Text domain: The domain in which to register this task.
:param Text name: The name to give this task.
:param Text digest: The version in which to register this task.
:param Text additional_distribution: User-specified location for remote source code distribution.
:param Text The optional location for where to install the additional distribution at runtime
:rtype: Text: Registered identifier.
"""
original_container = self.container
container = _copy.deepcopy(original_container)
args = ["pyflyte-fast-execute", "--additional-distribution", additional_distribution]
if dest_dir:
args += ["--dest-dir", dest_dir]
args += ["--"] + container.args
container._args = args
self._container = container
try:
registered_id = self.register(project, domain, name, digest)
except Exception:
self._container = original_container
raise
self._has_fast_registered = True
self._container = original_container
return str(registered_id)
@property
def has_fast_registered(self) -> bool:
return self._has_fast_registered
def _get_container_definition(
self,
storage_request=None,
cpu_request=None,
gpu_request=None,
memory_request=None,
storage_limit=None,
cpu_limit=None,
gpu_limit=None,
memory_limit=None,
environment=None,
cls=None,
):
"""
:param Text storage_request:
:param Text cpu_request:
:param Text gpu_request:
:param Text memory_request:
:param Text storage_limit:
:param Text cpu_limit:
:param Text gpu_limit:
:param Text memory_limit:
:param dict[Text,Text] environment:
:param cls Optional[type]: Type of container to instantiate. Generally should subclass SdkRunnableContainer.
:rtype: flytekit.models.task.Container
"""
storage_limit = storage_limit or _resource_config.DEFAULT_STORAGE_LIMIT.get()
storage_request = storage_request or _resource_config.DEFAULT_STORAGE_REQUEST.get()
cpu_limit = cpu_limit or _resource_config.DEFAULT_CPU_LIMIT.get()
cpu_request = cpu_request or _resource_config.DEFAULT_CPU_REQUEST.get()
gpu_limit = gpu_limit or _resource_config.DEFAULT_GPU_LIMIT.get()
gpu_request = gpu_request or _resource_config.DEFAULT_GPU_REQUEST.get()
memory_limit = memory_limit or _resource_config.DEFAULT_MEMORY_LIMIT.get()
memory_request = memory_request or _resource_config.DEFAULT_MEMORY_REQUEST.get()
resources = SdkRunnableContainer.get_resources(
storage_request, cpu_request, gpu_request, memory_request, storage_limit, cpu_limit, gpu_limit, memory_limit
)
return (cls or SdkRunnableContainer)(
command=[],
args=[
"pyflyte-execute",
"--task-module",
self.task_module,
"--task-name",
self.task_function_name,
"--inputs",
"{{.input}}",
"--output-prefix",
"{{.outputPrefix}}",
"--raw-output-data-prefix",
"{{.rawOutputDataPrefix}}",
],
resources=resources,
env=environment,
config={},
)
def _validate_inputs(self, inputs):
"""
This method should be overridden in sub-classes that intend to do additional checks on inputs. If validation
fails, this function should raise an informative exception.
:param dict[Text, flytekit.models.interface.Variable] inputs: Input variables to validate
:raises: flytekit.common.exceptions.user.FlyteValidationException
"""
super(SdkRunnableTask, self)._validate_inputs(inputs)
for k, v in _six.iteritems(inputs):
if not self._is_argname_in_function_definition(k):
raise _user_exceptions.FlyteValidationException(
"The input named '{}' was not specified in the task function. Therefore, this input cannot be "
"provided to the task.".format(k)
)
if _type_helpers.get_sdk_type_from_literal_type(v.type) in type(self)._banned_inputs:
raise _user_exceptions.FlyteValidationException(
"The input '{}' is not an accepted input type.".format(v)
)
def _validate_outputs(self, outputs):
"""
This method should be overridden in sub-classes that intend to do additional checks on outputs. If validation
fails, this function should raise an informative exception.
:param dict[Text, flytekit.models.interface.Variable] outputs: Output variables to validate
:raises: flytekit.common.exceptions.user.FlyteValidationException
"""
super(SdkRunnableTask, self)._validate_outputs(outputs)
for k, v in _six.iteritems(outputs):
if not self._is_argname_in_function_definition(k):
raise _user_exceptions.FlyteValidationException(
"The output named '{}' was not specified in the task function. Therefore, this output cannot be "
"provided to the task.".format(k)
)
if _type_helpers.get_sdk_type_from_literal_type(v.type) in type(self)._banned_outputs:
raise _user_exceptions.FlyteValidationException(
"The output '{}' is not an accepted output type.".format(v)
)
def _get_kwarg_inputs(self):
# Trim off first parameter as it is reserved for workflow_parameters
return set(_getargspec(self.task_function).args[1:])
def _is_argname_in_function_definition(self, key):
return key in self._get_kwarg_inputs()
def _missing_mapped_inputs_outputs(self):
# Trim off first parameter as it is reserved for workflow_parameters
args = self._get_kwarg_inputs()
inputs_and_outputs = set(self.interface.outputs.keys()) | set(self.interface.inputs.keys())
return args ^ inputs_and_outputs
|
py | 1a2ea3181e94fd94bc50e28da8a2d0ae58cf4fe6 | # A sample recursive neural network for text classification
# @Time: 8/13/2020
# @Author: lnblanke
# @Email: [email protected]
# @File: cnn.py
import numpy as np
import tensorflow as tf
from blocks import RNN, Dense
from model import Model
import os
path = os.path.join("glove.6B.100d.txt")
embedding_indices = {}
with open(path) as f:
for line in f:
word, coef = line.split(maxsplit = 1)
coef = np.fromstring(coef, "f", sep = " ")
embedding_indices[word] = coef
def embedding(x):
word_idx = tf.keras.datasets.imdb.get_word_index()
embedding_dim = 100
l, w = x.shape
embed = np.zeros((l, w, embedding_dim))
vec_to_word = {vec + 3: ww for ww, vec in word_idx.items()}
vec_to_word[0] = "<pad>"
vec_to_word[1] = "<sos>"
vec_to_word[2] = "<unk>"
for i in range(l):
for j in range(w):
embedding_vec = embedding_indices.get(vec_to_word[x[i][j]])
if embedding_vec is not None:
embed[i][j] = embedding_vec
return embed
word_size = 15000
(train_x, train_y), (test_x, test_y) = tf.keras.datasets.imdb.load_data(num_words = word_size)
max_len = 300
train_x = tf.keras.preprocessing.sequence.pad_sequences(train_x, max_len)[:1000]
train_y = train_y[:1000]
test_x = tf.keras.preprocessing.sequence.pad_sequences(test_x, max_len)[:200]
test_y = test_y[:200]
train_x_embed = embedding(train_x)
test_x_embed = embedding(test_x)
rate = 1e-2 # Learning rate
epoch = 100 # Learning epochs
patience = 10 # Early stop patience
model = Model("RNN")
model.add(RNN(input_size = 100, output_size = 64, units = 128))
model.add(Dense(64, 2, activation = "softmax"))
if __name__ == '__main__':
model.fit(train_x_embed, train_y, loss_func = "cross entropy loss", epochs = epoch, learning_rate = rate,
patience = patience)
pred = model.predict(test_x_embed)
print("Accuracy: %.2f" % (np.sum(pred == test_y) / len(test_y) * 100) + "%")
|
py | 1a2ea31bf03207358eb5c02a59efac22ead89f6f | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for CreateDocument
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-dialogflow
# [START dialogflow_v2_generated_Documents_CreateDocument_sync]
from google.cloud import dialogflow_v2
def sample_create_document():
# Create a client
client = dialogflow_v2.DocumentsClient()
# Initialize request argument(s)
document = dialogflow_v2.Document()
document.content_uri = "content_uri_value"
document.display_name = "display_name_value"
document.mime_type = "mime_type_value"
document.knowledge_types = "AGENT_FACING_SMART_REPLY"
request = dialogflow_v2.CreateDocumentRequest(
parent="parent_value",
document=document,
)
# Make the request
operation = client.create_document(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
# [END dialogflow_v2_generated_Documents_CreateDocument_sync]
|
py | 1a2ea33be5b1c12b6b57d8e9ec275581e3599fc4 | #Download Data and Save Data
import urllib
print "downloading with urllib"
dl_url = http://s3.amazonaws.com/open_data/*
urllib.urlretrieve(dl_url)
# file1 = opendata_giftcards000.gz
# file2 = opendata_giving_page_projects000.gz
# file3 = opendata_giving_pages000.gz
# file4 = opendata_essays000.gz
# file5 = opendata_resources000.gz
# file6 = opendata_donations000.gz
# file7 = opendata_projects000.gz
# http://s3.amazonaws.com/open_data/opendata_giftcards000.gz
# http://s3.amazonaws.com/open_data/opendata_giving_page_projects000.gz
# http://s3.amazonaws.com/open_data/opendata_giving_pages000.gz
# http://s3.amazonaws.com/open_data/opendata_essays000.gz
# http://s3.amazonaws.com/open_data/opendata_resources000.gz
# http://s3.amazonaws.com/open_data/opendata_donations000.gz
# http://s3.amazonaws.com/open_data/opendata_projects000.gz |
py | 1a2ea363968078399a54b10dc83f806c9fe801d3 | """Initial migration
Revision ID: e4ef83148109
Revises:
Create Date: 2021-11-25 12:45:30.514576
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'e4ef83148109'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
|
py | 1a2ea3684f76dacee9cee3bfdd0a4cf2e4102106 | import os
from setuptools import setup, find_packages
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django_admin_monitoring',
version='0.1.3',
packages=find_packages(),
include_package_data=True,
license='MIT License',
description='A simple Django app that provides ability to monitor such things as user feedback in admin',
long_description=README,
url='https://github.com/eternalfame/django_admin_monitoring',
author='Vyacheslav Sukhenko',
author_email='[email protected]',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
) |
py | 1a2ea48516f54a1066414fdda5b0d25b2432255c | # Authors: Peter Prettenhofer <[email protected]> (main author)
# Mathieu Blondel (partial_fit support)
#
# License: BSD 3 clause
"""Classification and regression using Stochastic Gradient Descent (SGD)."""
import numpy as np
import warnings
from abc import ABCMeta, abstractmethod
from joblib import Parallel
from ..base import clone, is_classifier
from ._base import LinearClassifierMixin, SparseCoefMixin
from ._base import make_dataset
from ..base import BaseEstimator, RegressorMixin
from ..utils import check_array, check_random_state, check_X_y
from ..utils.extmath import safe_sparse_dot
from ..utils.multiclass import _check_partial_fit_first_call
from ..utils.validation import check_is_fitted, _check_sample_weight
from ..utils.validation import _deprecate_positional_args
from ..utils.fixes import delayed
from ..exceptions import ConvergenceWarning
from ..model_selection import StratifiedShuffleSplit, ShuffleSplit
from ._sgd_fast import _plain_sgd
from ..utils import compute_class_weight
from ._sgd_fast import Hinge
from ._sgd_fast import SquaredHinge
from ._sgd_fast import Log
from ._sgd_fast import ModifiedHuber
from ._sgd_fast import SquaredLoss
from ._sgd_fast import Huber
from ._sgd_fast import EpsilonInsensitive
from ._sgd_fast import SquaredEpsilonInsensitive
from ..utils.fixes import _joblib_parallel_args
from ..utils import deprecated
LEARNING_RATE_TYPES = {"constant": 1, "optimal": 2, "invscaling": 3,
"adaptive": 4, "pa1": 5, "pa2": 6}
PENALTY_TYPES = {"none": 0, "l2": 2, "l1": 1, "elasticnet": 3}
DEFAULT_EPSILON = 0.1
# Default value of ``epsilon`` parameter.
MAX_INT = np.iinfo(np.int32).max
class _ValidationScoreCallback:
"""Callback for early stopping based on validation score"""
def __init__(self, estimator, X_val, y_val, sample_weight_val,
classes=None):
self.estimator = clone(estimator)
self.estimator.t_ = 1 # to pass check_is_fitted
if classes is not None:
self.estimator.classes_ = classes
self.X_val = X_val
self.y_val = y_val
self.sample_weight_val = sample_weight_val
def __call__(self, coef, intercept):
est = self.estimator
est.coef_ = coef.reshape(1, -1)
est.intercept_ = np.atleast_1d(intercept)
return est.score(self.X_val, self.y_val, self.sample_weight_val)
class BaseSGD(SparseCoefMixin, BaseEstimator, metaclass=ABCMeta):
"""Base class for SGD classification and regression."""
@_deprecate_positional_args
def __init__(self, loss, *, penalty='l2', alpha=0.0001, C=1.0,
l1_ratio=0.15, fit_intercept=True, max_iter=1000, tol=1e-3,
shuffle=True, verbose=0, epsilon=0.1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
early_stopping=False, validation_fraction=0.1,
n_iter_no_change=5, warm_start=False, average=False):
self.loss = loss
self.penalty = penalty
self.learning_rate = learning_rate
self.epsilon = epsilon
self.alpha = alpha
self.C = C
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.shuffle = shuffle
self.random_state = random_state
self.verbose = verbose
self.eta0 = eta0
self.power_t = power_t
self.early_stopping = early_stopping
self.validation_fraction = validation_fraction
self.n_iter_no_change = n_iter_no_change
self.warm_start = warm_start
self.average = average
self.max_iter = max_iter
self.tol = tol
# current tests expect init to do parameter validation
# but we are not allowed to set attributes
self._validate_params()
def set_params(self, **kwargs):
"""Set and validate the parameters of estimator.
Parameters
----------
**kwargs : dict
Estimator parameters.
Returns
-------
self : object
Estimator instance.
"""
super().set_params(**kwargs)
self._validate_params()
return self
@abstractmethod
def fit(self, X, y):
"""Fit model."""
def _validate_params(self, for_partial_fit=False):
"""Validate input params. """
if not isinstance(self.shuffle, bool):
raise ValueError("shuffle must be either True or False")
if not isinstance(self.early_stopping, bool):
raise ValueError("early_stopping must be either True or False")
if self.early_stopping and for_partial_fit:
raise ValueError("early_stopping should be False with partial_fit")
if self.max_iter is not None and self.max_iter <= 0:
raise ValueError("max_iter must be > zero. Got %f" % self.max_iter)
if not (0.0 <= self.l1_ratio <= 1.0):
raise ValueError("l1_ratio must be in [0, 1]")
if self.alpha < 0.0:
raise ValueError("alpha must be >= 0")
if self.n_iter_no_change < 1:
raise ValueError("n_iter_no_change must be >= 1")
if not (0.0 < self.validation_fraction < 1.0):
raise ValueError("validation_fraction must be in range (0, 1)")
if self.learning_rate in ("constant", "invscaling", "adaptive"):
if self.eta0 <= 0.0:
raise ValueError("eta0 must be > 0")
if self.learning_rate == "optimal" and self.alpha == 0:
raise ValueError("alpha must be > 0 since "
"learning_rate is 'optimal'. alpha is used "
"to compute the optimal learning rate.")
# raises ValueError if not registered
self._get_penalty_type(self.penalty)
self._get_learning_rate_type(self.learning_rate)
if self.loss not in self.loss_functions:
raise ValueError("The loss %s is not supported. " % self.loss)
def _get_loss_function(self, loss):
"""Get concrete ``LossFunction`` object for str ``loss``. """
try:
loss_ = self.loss_functions[loss]
loss_class, args = loss_[0], loss_[1:]
if loss in ('huber', 'epsilon_insensitive',
'squared_epsilon_insensitive'):
args = (self.epsilon, )
return loss_class(*args)
except KeyError as e:
raise ValueError("The loss %s is not supported. " % loss) from e
def _get_learning_rate_type(self, learning_rate):
try:
return LEARNING_RATE_TYPES[learning_rate]
except KeyError as e:
raise ValueError("learning rate %s "
"is not supported. " % learning_rate) from e
def _get_penalty_type(self, penalty):
penalty = str(penalty).lower()
try:
return PENALTY_TYPES[penalty]
except KeyError as e:
raise ValueError("Penalty %s is not supported. " % penalty) from e
def _allocate_parameter_mem(self, n_classes, n_features, coef_init=None,
intercept_init=None):
"""Allocate mem for parameters; initialize if provided."""
if n_classes > 2:
# allocate coef_ for multi-class
if coef_init is not None:
coef_init = np.asarray(coef_init, order="C")
if coef_init.shape != (n_classes, n_features):
raise ValueError("Provided ``coef_`` does not match "
"dataset. ")
self.coef_ = coef_init
else:
self.coef_ = np.zeros((n_classes, n_features),
dtype=np.float64, order="C")
# allocate intercept_ for multi-class
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, order="C")
if intercept_init.shape != (n_classes, ):
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init
else:
self.intercept_ = np.zeros(n_classes, dtype=np.float64,
order="C")
else:
# allocate coef_ for binary problem
if coef_init is not None:
coef_init = np.asarray(coef_init, dtype=np.float64,
order="C")
coef_init = coef_init.ravel()
if coef_init.shape != (n_features,):
raise ValueError("Provided coef_init does not "
"match dataset.")
self.coef_ = coef_init
else:
self.coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
# allocate intercept_ for binary problem
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, dtype=np.float64)
if intercept_init.shape != (1,) and intercept_init.shape != ():
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init.reshape(1,)
else:
self.intercept_ = np.zeros(1, dtype=np.float64, order="C")
# initialize average parameters
if self.average > 0:
self._standard_coef = self.coef_
self._standard_intercept = self.intercept_
self._average_coef = np.zeros(self.coef_.shape,
dtype=np.float64,
order="C")
self._average_intercept = np.zeros(self._standard_intercept.shape,
dtype=np.float64,
order="C")
def _make_validation_split(self, y):
"""Split the dataset between training set and validation set.
Parameters
----------
y : ndarray of shape (n_samples, )
Target values.
Returns
-------
validation_mask : ndarray of shape (n_samples, )
Equal to 1 on the validation set, 0 on the training set.
"""
n_samples = y.shape[0]
validation_mask = np.zeros(n_samples, dtype=np.uint8)
if not self.early_stopping:
# use the full set for training, with an empty validation set
return validation_mask
if is_classifier(self):
splitter_type = StratifiedShuffleSplit
else:
splitter_type = ShuffleSplit
cv = splitter_type(test_size=self.validation_fraction,
random_state=self.random_state)
idx_train, idx_val = next(cv.split(np.zeros(shape=(y.shape[0], 1)), y))
if idx_train.shape[0] == 0 or idx_val.shape[0] == 0:
raise ValueError(
"Splitting %d samples into a train set and a validation set "
"with validation_fraction=%r led to an empty set (%d and %d "
"samples). Please either change validation_fraction, increase "
"number of samples, or disable early_stopping."
% (n_samples, self.validation_fraction, idx_train.shape[0],
idx_val.shape[0]))
validation_mask[idx_val] = 1
return validation_mask
def _make_validation_score_cb(self, validation_mask, X, y, sample_weight,
classes=None):
if not self.early_stopping:
return None
return _ValidationScoreCallback(
self, X[validation_mask], y[validation_mask],
sample_weight[validation_mask], classes=classes)
# mypy error: Decorated property not supported
@deprecated("Attribute standard_coef_ was deprecated " # type: ignore
"in version 0.23 and will be removed in 1.0 "
"(renaming of 0.25).")
@property
def standard_coef_(self):
return self._standard_coef
# mypy error: Decorated property not supported
@deprecated( # type: ignore
"Attribute standard_intercept_ was deprecated "
"in version 0.23 and will be removed in 1.0 (renaming of 0.25)."
)
@property
def standard_intercept_(self):
return self._standard_intercept
# mypy error: Decorated property not supported
@deprecated("Attribute average_coef_ was deprecated " # type: ignore
"in version 0.23 and will be removed in 1.0 "
"(renaming of 0.25).")
@property
def average_coef_(self):
return self._average_coef
# mypy error: Decorated property not supported
@deprecated("Attribute average_intercept_ was deprecated " # type: ignore
"in version 0.23 and will be removed in 1.0 "
"(renaming of 0.25).")
@property
def average_intercept_(self):
return self._average_intercept
def _prepare_fit_binary(est, y, i):
"""Initialization for fit_binary.
Returns y, coef, intercept, average_coef, average_intercept.
"""
y_i = np.ones(y.shape, dtype=np.float64, order="C")
y_i[y != est.classes_[i]] = -1.0
average_intercept = 0
average_coef = None
if len(est.classes_) == 2:
if not est.average:
coef = est.coef_.ravel()
intercept = est.intercept_[0]
else:
coef = est._standard_coef.ravel()
intercept = est._standard_intercept[0]
average_coef = est._average_coef.ravel()
average_intercept = est._average_intercept[0]
else:
if not est.average:
coef = est.coef_[i]
intercept = est.intercept_[i]
else:
coef = est._standard_coef[i]
intercept = est._standard_intercept[i]
average_coef = est._average_coef[i]
average_intercept = est._average_intercept[i]
return y_i, coef, intercept, average_coef, average_intercept
def fit_binary(est, i, X, y, alpha, C, learning_rate, max_iter,
pos_weight, neg_weight, sample_weight, validation_mask=None,
random_state=None):
"""Fit a single binary classifier.
The i'th class is considered the "positive" class.
Parameters
----------
est : Estimator object
The estimator to fit
i : int
Index of the positive class
X : numpy array or sparse matrix of shape [n_samples,n_features]
Training data
y : numpy array of shape [n_samples, ]
Target values
alpha : float
The regularization parameter
C : float
Maximum step size for passive aggressive
learning_rate : string
The learning rate. Accepted values are 'constant', 'optimal',
'invscaling', 'pa1' and 'pa2'.
max_iter : int
The maximum number of iterations (epochs)
pos_weight : float
The weight of the positive class
neg_weight : float
The weight of the negative class
sample_weight : numpy array of shape [n_samples, ]
The weight of each sample
validation_mask : numpy array of shape [n_samples, ], default=None
Precomputed validation mask in case _fit_binary is called in the
context of a one-vs-rest reduction.
random_state : int, RandomState instance, default=None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
"""
# if average is not true, average_coef, and average_intercept will be
# unused
y_i, coef, intercept, average_coef, average_intercept = \
_prepare_fit_binary(est, y, i)
assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
random_state = check_random_state(random_state)
dataset, intercept_decay = make_dataset(
X, y_i, sample_weight, random_state=random_state)
penalty_type = est._get_penalty_type(est.penalty)
learning_rate_type = est._get_learning_rate_type(learning_rate)
if validation_mask is None:
validation_mask = est._make_validation_split(y_i)
classes = np.array([-1, 1], dtype=y_i.dtype)
validation_score_cb = est._make_validation_score_cb(
validation_mask, X, y_i, sample_weight, classes=classes)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(MAX_INT)
tol = est.tol if est.tol is not None else -np.inf
coef, intercept, average_coef, average_intercept, n_iter_ = _plain_sgd(
coef, intercept, average_coef, average_intercept, est.loss_function_,
penalty_type, alpha, C, est.l1_ratio, dataset, validation_mask,
est.early_stopping, validation_score_cb, int(est.n_iter_no_change),
max_iter, tol, int(est.fit_intercept), int(est.verbose),
int(est.shuffle), seed, pos_weight, neg_weight, learning_rate_type,
est.eta0, est.power_t, est.t_, intercept_decay, est.average)
if est.average:
if len(est.classes_) == 2:
est._average_intercept[0] = average_intercept
else:
est._average_intercept[i] = average_intercept
return coef, intercept, n_iter_
class BaseSGDClassifier(LinearClassifierMixin, BaseSGD, metaclass=ABCMeta):
loss_functions = {
"hinge": (Hinge, 1.0),
"squared_hinge": (SquaredHinge, 1.0),
"perceptron": (Hinge, 0.0),
"log": (Log, ),
"modified_huber": (ModifiedHuber, ),
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
@_deprecate_positional_args
def __init__(self, loss="hinge", *, penalty='l2', alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, max_iter=1000, tol=1e-3,
shuffle=True, verbose=0, epsilon=DEFAULT_EPSILON, n_jobs=None,
random_state=None, learning_rate="optimal", eta0=0.0,
power_t=0.5, early_stopping=False,
validation_fraction=0.1, n_iter_no_change=5,
class_weight=None, warm_start=False, average=False):
super().__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, max_iter=max_iter, tol=tol,
shuffle=shuffle, verbose=verbose, epsilon=epsilon,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, early_stopping=early_stopping,
validation_fraction=validation_fraction,
n_iter_no_change=n_iter_no_change, warm_start=warm_start,
average=average)
self.class_weight = class_weight
self.n_jobs = n_jobs
def _partial_fit(self, X, y, alpha, C,
loss, learning_rate, max_iter,
classes, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, accept_sparse='csr', dtype=np.float64,
order="C", accept_large_sparse=False)
n_samples, n_features = X.shape
_check_partial_fit_first_call(self, classes)
n_classes = self.classes_.shape[0]
# Allocate datastructures from input arguments
self._expanded_class_weight = compute_class_weight(
self.class_weight, classes=self.classes_, y=y)
sample_weight = _check_sample_weight(sample_weight, X)
if getattr(self, "coef_", None) is None or coef_init is not None:
self._allocate_parameter_mem(n_classes, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous "
"data %d." % (n_features, self.coef_.shape[-1]))
self.loss_function_ = self._get_loss_function(loss)
if not hasattr(self, "t_"):
self.t_ = 1.0
# delegate to concrete training procedure
if n_classes > 2:
self._fit_multiclass(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight,
max_iter=max_iter)
elif n_classes == 2:
self._fit_binary(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight,
max_iter=max_iter)
else:
raise ValueError(
"The number of classes has to be greater than one;"
" got %d class" % n_classes)
return self
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
self._validate_params()
if hasattr(self, "classes_"):
self.classes_ = None
X, y = self._validate_data(X, y, accept_sparse='csr',
dtype=np.float64, order="C",
accept_large_sparse=False)
# labels can be encoded as float, int, or string literals
# np.unique sorts in asc order; largest class id is positive class
classes = np.unique(y)
if self.warm_start and hasattr(self, "coef_"):
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self._standard_coef = self.coef_
self._standard_intercept = self.intercept_
self._average_coef = None
self._average_intercept = None
# Clear iteration count for multiple call to fit.
self.t_ = 1.0
self._partial_fit(X, y, alpha, C, loss, learning_rate, self.max_iter,
classes, sample_weight, coef_init, intercept_init)
if (self.tol is not None and self.tol > -np.inf
and self.n_iter_ == self.max_iter):
warnings.warn("Maximum number of iteration reached before "
"convergence. Consider increasing max_iter to "
"improve the fit.",
ConvergenceWarning)
return self
def _fit_binary(self, X, y, alpha, C, sample_weight,
learning_rate, max_iter):
"""Fit a binary classifier on X and y. """
coef, intercept, n_iter_ = fit_binary(self, 1, X, y, alpha, C,
learning_rate, max_iter,
self._expanded_class_weight[1],
self._expanded_class_weight[0],
sample_weight,
random_state=self.random_state)
self.t_ += n_iter_ * X.shape[0]
self.n_iter_ = n_iter_
# need to be 2d
if self.average > 0:
if self.average <= self.t_ - 1:
self.coef_ = self._average_coef.reshape(1, -1)
self.intercept_ = self._average_intercept
else:
self.coef_ = self._standard_coef.reshape(1, -1)
self._standard_intercept = np.atleast_1d(intercept)
self.intercept_ = self._standard_intercept
else:
self.coef_ = coef.reshape(1, -1)
# intercept is a float, need to convert it to an array of length 1
self.intercept_ = np.atleast_1d(intercept)
def _fit_multiclass(self, X, y, alpha, C, learning_rate,
sample_weight, max_iter):
"""Fit a multi-class classifier by combining binary classifiers
Each binary classifier predicts one class versus all others. This
strategy is called OvA (One versus All) or OvR (One versus Rest).
"""
# Precompute the validation split using the multiclass labels
# to ensure proper balancing of the classes.
validation_mask = self._make_validation_split(y)
# Use joblib to fit OvA in parallel.
# Pick the random seed for each job outside of fit_binary to avoid
# sharing the estimator random state between threads which could lead
# to non-deterministic behavior
random_state = check_random_state(self.random_state)
seeds = random_state.randint(MAX_INT, size=len(self.classes_))
result = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
**_joblib_parallel_args(require="sharedmem"))(
delayed(fit_binary)(self, i, X, y, alpha, C, learning_rate,
max_iter, self._expanded_class_weight[i],
1., sample_weight,
validation_mask=validation_mask,
random_state=seed)
for i, seed in enumerate(seeds))
# take the maximum of n_iter_ over every binary fit
n_iter_ = 0.
for i, (_, intercept, n_iter_i) in enumerate(result):
self.intercept_[i] = intercept
n_iter_ = max(n_iter_, n_iter_i)
self.t_ += n_iter_ * X.shape[0]
self.n_iter_ = n_iter_
if self.average > 0:
if self.average <= self.t_ - 1.0:
self.coef_ = self._average_coef
self.intercept_ = self._average_intercept
else:
self.coef_ = self._standard_coef
self._standard_intercept = np.atleast_1d(self.intercept_)
self.intercept_ = self._standard_intercept
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Perform one epoch of stochastic gradient descent on given samples.
Internally, this method uses ``max_iter = 1``. Therefore, it is not
guaranteed that a minimum of the cost function is reached after calling
it once. Matters such as objective convergence and early stopping
should be handled by the user.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of the training data.
y : ndarray of shape (n_samples,)
Subset of the target values.
classes : ndarray of shape (n_classes,), default=None
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
sample_weight : array-like, shape (n_samples,), default=None
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self :
Returns an instance of self.
"""
self._validate_params(for_partial_fit=True)
if self.class_weight in ['balanced']:
raise ValueError("class_weight '{0}' is not supported for "
"partial_fit. In order to use 'balanced' weights,"
" use compute_class_weight('{0}', "
"classes=classes, y=y). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.".format(self.class_weight))
return self._partial_fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss,
learning_rate=self.learning_rate, max_iter=1,
classes=classes, sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray of shape (n_samples,)
Target values.
coef_init : ndarray of shape (n_classes, n_features), default=None
The initial coefficients to warm-start the optimization.
intercept_init : ndarray of shape (n_classes,), default=None
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), default=None
Weights applied to individual samples.
If not provided, uniform weights are assumed. These weights will
be multiplied with class_weight (passed through the
constructor) if class_weight is specified.
Returns
-------
self :
Returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init, intercept_init=intercept_init,
sample_weight=sample_weight)
class SGDClassifier(BaseSGDClassifier):
"""Linear classifiers (SVM, logistic regression, etc.) with SGD training.
This estimator implements regularized linear models with stochastic
gradient descent (SGD) learning: the gradient of the loss is estimated
each sample at a time and the model is updated along the way with a
decreasing strength schedule (aka learning rate). SGD allows minibatch
(online/out-of-core) learning via the `partial_fit` method.
For best results using the default learning rate schedule, the data should
have zero mean and unit variance.
This implementation works with data represented as dense or sparse arrays
of floating point values for the features. The model it fits can be
controlled with the loss parameter; by default, it fits a linear support
vector machine (SVM).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, default='hinge'
The loss function to be used. Defaults to 'hinge', which gives a
linear SVM.
The possible options are 'hinge', 'log', 'modified_huber',
'squared_hinge', 'perceptron', or a regression loss: 'squared_loss',
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
The 'log' loss gives logistic regression, a probabilistic classifier.
'modified_huber' is another smooth loss that brings tolerance to
outliers as well as probability estimates.
'squared_hinge' is like hinge but is quadratically penalized.
'perceptron' is the linear loss used by the perceptron algorithm.
The other losses are designed for regression but can be useful in
classification as well; see
:class:`~sklearn.linear_model.SGDRegressor` for a description.
More details about the losses formulas can be found in the
:ref:`User Guide <sgd_mathematical_formulation>`.
penalty : {'l2', 'l1', 'elasticnet'}, default='l2'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float, default=0.0001
Constant that multiplies the regularization term. The higher the
value, the stronger the regularization.
Also used to compute the learning rate when set to `learning_rate` is
set to 'optimal'.
l1_ratio : float, default=0.15
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Only used if `penalty` is 'elasticnet'.
fit_intercept : bool, default=True
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered.
max_iter : int, default=1000
The maximum number of passes over the training data (aka epochs).
It only impacts the behavior in the ``fit`` method, and not the
:meth:`partial_fit` method.
.. versionadded:: 0.19
tol : float, default=1e-3
The stopping criterion. If it is not None, training will stop
when (loss > best_loss - tol) for ``n_iter_no_change`` consecutive
epochs.
.. versionadded:: 0.19
shuffle : bool, default=True
Whether or not the training data should be shuffled after each epoch.
verbose : int, default=0
The verbosity level.
epsilon : float, default=0.1
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
n_jobs : int, default=None
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
random_state : int, RandomState instance, default=None
Used for shuffling the data, when ``shuffle`` is set to ``True``.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
learning_rate : str, default='optimal'
The learning rate schedule:
- 'constant': `eta = eta0`
- 'optimal': `eta = 1.0 / (alpha * (t + t0))`
where t0 is chosen by a heuristic proposed by Leon Bottou.
- 'invscaling': `eta = eta0 / pow(t, power_t)`
- 'adaptive': eta = eta0, as long as the training keeps decreasing.
Each time n_iter_no_change consecutive epochs fail to decrease the
training loss by tol or fail to increase validation score by tol if
early_stopping is True, the current learning rate is divided by 5.
.. versionadded:: 0.20
Added 'adaptive' option
eta0 : double, default=0.0
The initial learning rate for the 'constant', 'invscaling' or
'adaptive' schedules. The default value is 0.0 as eta0 is not used by
the default schedule 'optimal'.
power_t : double, default=0.5
The exponent for inverse scaling learning rate [default 0.5].
early_stopping : bool, default=False
Whether to use early stopping to terminate training when validation
score is not improving. If set to True, it will automatically set aside
a stratified fraction of training data as validation and terminate
training when validation score returned by the `score` method is not
improving by at least tol for n_iter_no_change consecutive epochs.
.. versionadded:: 0.20
Added 'early_stopping' option
validation_fraction : float, default=0.1
The proportion of training data to set aside as validation set for
early stopping. Must be between 0 and 1.
Only used if `early_stopping` is True.
.. versionadded:: 0.20
Added 'validation_fraction' option
n_iter_no_change : int, default=5
Number of iterations with no improvement to wait before early stopping.
.. versionadded:: 0.20
Added 'n_iter_no_change' option
class_weight : dict, {class_label: weight} or "balanced", default=None
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
warm_start : bool, default=False
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
See :term:`the Glossary <warm_start>`.
Repeatedly calling fit or partial_fit when warm_start is True can
result in a different solution than when calling fit a single time
because of the way the data is shuffled.
If a dynamic learning rate is used, the learning rate is adapted
depending on the number of samples already seen. Calling ``fit`` resets
this counter, while ``partial_fit`` will result in increasing the
existing counter.
average : bool or int, default=False
When set to True, computes the averaged SGD weights accross all
updates and stores the result in the ``coef_`` attribute. If set to
an int greater than 1, averaging will begin once the total number of
samples seen reaches `average`. So ``average=10`` will begin
averaging after seeing 10 samples.
Attributes
----------
coef_ : ndarray of shape (1, n_features) if n_classes == 2 else \
(n_classes, n_features)
Weights assigned to the features.
intercept_ : ndarray of shape (1,) if n_classes == 2 else (n_classes,)
Constants in decision function.
n_iter_ : int
The actual number of iterations before reaching the stopping criterion.
For multiclass fits, it is the maximum over every binary fit.
loss_function_ : concrete ``LossFunction``
classes_ : array of shape (n_classes,)
t_ : int
Number of weight updates performed during training.
Same as ``(n_iter_ * n_samples)``.
See Also
--------
sklearn.svm.LinearSVC : Linear support vector classification.
LogisticRegression : Logistic regression.
Perceptron : Inherits from SGDClassifier. ``Perceptron()`` is equivalent to
``SGDClassifier(loss="perceptron", eta0=1, learning_rate="constant",
penalty=None)``.
Examples
--------
>>> import numpy as np
>>> from sklearn.linear_model import SGDClassifier
>>> from sklearn.preprocessing import StandardScaler
>>> from sklearn.pipeline import make_pipeline
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> Y = np.array([1, 1, 2, 2])
>>> # Always scale the input. The most convenient way is to use a pipeline.
>>> clf = make_pipeline(StandardScaler(),
... SGDClassifier(max_iter=1000, tol=1e-3))
>>> clf.fit(X, Y)
Pipeline(steps=[('standardscaler', StandardScaler()),
('sgdclassifier', SGDClassifier())])
>>> print(clf.predict([[-0.8, -1]]))
[1]
"""
@_deprecate_positional_args
def __init__(self, loss="hinge", *, penalty='l2', alpha=0.0001,
l1_ratio=0.15,
fit_intercept=True, max_iter=1000, tol=1e-3, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, n_jobs=None,
random_state=None, learning_rate="optimal", eta0=0.0,
power_t=0.5, early_stopping=False, validation_fraction=0.1,
n_iter_no_change=5, class_weight=None, warm_start=False,
average=False):
super().__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, max_iter=max_iter, tol=tol,
shuffle=shuffle, verbose=verbose, epsilon=epsilon, n_jobs=n_jobs,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, early_stopping=early_stopping,
validation_fraction=validation_fraction,
n_iter_no_change=n_iter_no_change, class_weight=class_weight,
warm_start=warm_start, average=average)
def _check_proba(self):
if self.loss not in ("log", "modified_huber"):
raise AttributeError("probability estimates are not available for"
" loss=%r" % self.loss)
@property
def predict_proba(self):
"""Probability estimates.
This method is only available for log loss and modified Huber loss.
Multiclass probability estimates are derived from binary (one-vs.-rest)
estimates by simple normalization, as recommended by Zadrozny and
Elkan.
Binary probability estimates for loss="modified_huber" are given by
(clip(decision_function(X), -1, 1) + 1) / 2. For other loss functions
it is necessary to perform proper probability calibration by wrapping
the classifier with
:class:`~sklearn.calibration.CalibratedClassifierCV` instead.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data for prediction.
Returns
-------
ndarray of shape (n_samples, n_classes)
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
References
----------
Zadrozny and Elkan, "Transforming classifier scores into multiclass
probability estimates", SIGKDD'02,
http://www.research.ibm.com/people/z/zadrozny/kdd2002-Transf.pdf
The justification for the formula in the loss="modified_huber"
case is in the appendix B in:
http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf
"""
self._check_proba()
return self._predict_proba
def _predict_proba(self, X):
check_is_fitted(self)
if self.loss == "log":
return self._predict_proba_lr(X)
elif self.loss == "modified_huber":
binary = (len(self.classes_) == 2)
scores = self.decision_function(X)
if binary:
prob2 = np.ones((scores.shape[0], 2))
prob = prob2[:, 1]
else:
prob = scores
np.clip(scores, -1, 1, prob)
prob += 1.
prob /= 2.
if binary:
prob2[:, 0] -= prob
prob = prob2
else:
# the above might assign zero to all classes, which doesn't
# normalize neatly; work around this to produce uniform
# probabilities
prob_sum = prob.sum(axis=1)
all_zero = (prob_sum == 0)
if np.any(all_zero):
prob[all_zero, :] = 1
prob_sum[all_zero] = len(self.classes_)
# normalize
prob /= prob_sum.reshape((prob.shape[0], -1))
return prob
else:
raise NotImplementedError("predict_(log_)proba only supported when"
" loss='log' or loss='modified_huber' "
"(%r given)" % self.loss)
@property
def predict_log_proba(self):
"""Log of probability estimates.
This method is only available for log loss and modified Huber loss.
When loss="modified_huber", probability estimates may be hard zeros
and ones, so taking the logarithm is not possible.
See ``predict_proba`` for details.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input data for prediction.
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in
`self.classes_`.
"""
self._check_proba()
return self._predict_log_proba
def _predict_log_proba(self, X):
return np.log(self.predict_proba(X))
def _more_tags(self):
return {
'_xfail_checks': {
'check_sample_weights_invariance':
'zero sample_weight is not equivalent to removing samples',
}
}
class BaseSGDRegressor(RegressorMixin, BaseSGD):
loss_functions = {
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
@_deprecate_positional_args
def __init__(self, loss="squared_loss", *, penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, max_iter=1000, tol=1e-3,
shuffle=True, verbose=0, epsilon=DEFAULT_EPSILON,
random_state=None, learning_rate="invscaling", eta0=0.01,
power_t=0.25, early_stopping=False, validation_fraction=0.1,
n_iter_no_change=5, warm_start=False, average=False):
super().__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, max_iter=max_iter, tol=tol,
shuffle=shuffle, verbose=verbose, epsilon=epsilon,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, early_stopping=early_stopping,
validation_fraction=validation_fraction,
n_iter_no_change=n_iter_no_change, warm_start=warm_start,
average=average)
def _partial_fit(self, X, y, alpha, C, loss, learning_rate,
max_iter, sample_weight, coef_init, intercept_init):
X, y = self._validate_data(X, y, accept_sparse="csr", copy=False,
order='C', dtype=np.float64,
accept_large_sparse=False)
y = y.astype(np.float64, copy=False)
n_samples, n_features = X.shape
sample_weight = _check_sample_weight(sample_weight, X)
# Allocate datastructures from input arguments
if getattr(self, "coef_", None) is None:
self._allocate_parameter_mem(1, n_features, coef_init,
intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous "
"data %d." % (n_features, self.coef_.shape[-1]))
if self.average > 0 and getattr(self, "_average_coef", None) is None:
self._average_coef = np.zeros(n_features,
dtype=np.float64,
order="C")
self._average_intercept = np.zeros(1, dtype=np.float64, order="C")
self._fit_regressor(X, y, alpha, C, loss, learning_rate,
sample_weight, max_iter)
return self
def partial_fit(self, X, y, sample_weight=None):
"""Perform one epoch of stochastic gradient descent on given samples.
Internally, this method uses ``max_iter = 1``. Therefore, it is not
guaranteed that a minimum of the cost function is reached after calling
it once. Matters such as objective convergence and early stopping
should be handled by the user.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of training data
y : numpy array of shape (n_samples,)
Subset of target values
sample_weight : array-like, shape (n_samples,), default=None
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
self._validate_params(for_partial_fit=True)
return self._partial_fit(X, y, self.alpha, C=1.0,
loss=self.loss,
learning_rate=self.learning_rate, max_iter=1,
sample_weight=sample_weight, coef_init=None,
intercept_init=None)
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
self._validate_params()
if self.warm_start and getattr(self, "coef_", None) is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = 1.0
self._partial_fit(X, y, alpha, C, loss, learning_rate,
self.max_iter, sample_weight, coef_init,
intercept_init)
if (self.tol is not None and self.tol > -np.inf
and self.n_iter_ == self.max_iter):
warnings.warn("Maximum number of iteration reached before "
"convergence. Consider increasing max_iter to "
"improve the fit.",
ConvergenceWarning)
return self
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : ndarray of shape (n_samples,)
Target values
coef_init : ndarray of shape (n_features,), default=None
The initial coefficients to warm-start the optimization.
intercept_init : ndarray of shape (1,), default=None
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init,
intercept_init=intercept_init,
sample_weight=sample_weight)
def _decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
ndarray of shape (n_samples,)
Predicted target values per element in X.
"""
check_is_fitted(self)
X = check_array(X, accept_sparse='csr')
scores = safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
return scores.ravel()
def predict(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
ndarray of shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _fit_regressor(self, X, y, alpha, C, loss, learning_rate,
sample_weight, max_iter):
dataset, intercept_decay = make_dataset(X, y, sample_weight)
loss_function = self._get_loss_function(loss)
penalty_type = self._get_penalty_type(self.penalty)
learning_rate_type = self._get_learning_rate_type(learning_rate)
if not hasattr(self, "t_"):
self.t_ = 1.0
validation_mask = self._make_validation_split(y)
validation_score_cb = self._make_validation_score_cb(
validation_mask, X, y, sample_weight)
random_state = check_random_state(self.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
tol = self.tol if self.tol is not None else -np.inf
if self.average:
coef = self._standard_coef
intercept = self._standard_intercept
average_coef = self._average_coef
average_intercept = self._average_intercept
else:
coef = self.coef_
intercept = self.intercept_
average_coef = None # Not used
average_intercept = [0] # Not used
coef, intercept, average_coef, average_intercept, self.n_iter_ = \
_plain_sgd(coef,
intercept[0],
average_coef,
average_intercept[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
validation_mask, self.early_stopping,
validation_score_cb,
int(self.n_iter_no_change),
max_iter, tol,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay, self.average)
self.t_ += self.n_iter_ * X.shape[0]
if self.average > 0:
self._average_intercept = np.atleast_1d(average_intercept)
self._standard_intercept = np.atleast_1d(intercept)
if self.average <= self.t_ - 1.0:
# made enough updates for averaging to be taken into account
self.coef_ = average_coef
self.intercept_ = np.atleast_1d(average_intercept)
else:
self.coef_ = coef
self.intercept_ = np.atleast_1d(intercept)
else:
self.intercept_ = np.atleast_1d(intercept)
class SGDRegressor(BaseSGDRegressor):
"""Linear model fitted by minimizing a regularized empirical loss with SGD
SGD stands for Stochastic Gradient Descent: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a decreasing strength schedule (aka learning rate).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
This implementation works with data represented as dense numpy arrays of
floating point values for the features.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, default='squared_loss'
The loss function to be used. The possible values are 'squared_loss',
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'
The 'squared_loss' refers to the ordinary least squares fit.
'huber' modifies 'squared_loss' to focus less on getting outliers
correct by switching from squared to linear loss past a distance of
epsilon. 'epsilon_insensitive' ignores errors less than epsilon and is
linear past that; this is the loss function used in SVR.
'squared_epsilon_insensitive' is the same but becomes squared loss past
a tolerance of epsilon.
More details about the losses formulas can be found in the
:ref:`User Guide <sgd_mathematical_formulation>`.
penalty : {'l2', 'l1', 'elasticnet'}, default='l2'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float, default=0.0001
Constant that multiplies the regularization term. The higher the
value, the stronger the regularization.
Also used to compute the learning rate when set to `learning_rate` is
set to 'optimal'.
l1_ratio : float, default=0.15
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Only used if `penalty` is 'elasticnet'.
fit_intercept : bool, default=True
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered.
max_iter : int, default=1000
The maximum number of passes over the training data (aka epochs).
It only impacts the behavior in the ``fit`` method, and not the
:meth:`partial_fit` method.
.. versionadded:: 0.19
tol : float, default=1e-3
The stopping criterion. If it is not None, training will stop
when (loss > best_loss - tol) for ``n_iter_no_change`` consecutive
epochs.
.. versionadded:: 0.19
shuffle : bool, default=True
Whether or not the training data should be shuffled after each epoch.
verbose : int, default=0
The verbosity level.
epsilon : float, default=0.1
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
random_state : int, RandomState instance, default=None
Used for shuffling the data, when ``shuffle`` is set to ``True``.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
learning_rate : string, default='invscaling'
The learning rate schedule:
- 'constant': `eta = eta0`
- 'optimal': `eta = 1.0 / (alpha * (t + t0))`
where t0 is chosen by a heuristic proposed by Leon Bottou.
- 'invscaling': `eta = eta0 / pow(t, power_t)`
- 'adaptive': eta = eta0, as long as the training keeps decreasing.
Each time n_iter_no_change consecutive epochs fail to decrease the
training loss by tol or fail to increase validation score by tol if
early_stopping is True, the current learning rate is divided by 5.
.. versionadded:: 0.20
Added 'adaptive' option
eta0 : double, default=0.01
The initial learning rate for the 'constant', 'invscaling' or
'adaptive' schedules. The default value is 0.01.
power_t : double, default=0.25
The exponent for inverse scaling learning rate.
early_stopping : bool, default=False
Whether to use early stopping to terminate training when validation
score is not improving. If set to True, it will automatically set aside
a fraction of training data as validation and terminate
training when validation score returned by the `score` method is not
improving by at least `tol` for `n_iter_no_change` consecutive
epochs.
.. versionadded:: 0.20
Added 'early_stopping' option
validation_fraction : float, default=0.1
The proportion of training data to set aside as validation set for
early stopping. Must be between 0 and 1.
Only used if `early_stopping` is True.
.. versionadded:: 0.20
Added 'validation_fraction' option
n_iter_no_change : int, default=5
Number of iterations with no improvement to wait before early stopping.
.. versionadded:: 0.20
Added 'n_iter_no_change' option
warm_start : bool, default=False
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
See :term:`the Glossary <warm_start>`.
Repeatedly calling fit or partial_fit when warm_start is True can
result in a different solution than when calling fit a single time
because of the way the data is shuffled.
If a dynamic learning rate is used, the learning rate is adapted
depending on the number of samples already seen. Calling ``fit`` resets
this counter, while ``partial_fit`` will result in increasing the
existing counter.
average : bool or int, default=False
When set to True, computes the averaged SGD weights accross all
updates and stores the result in the ``coef_`` attribute. If set to
an int greater than 1, averaging will begin once the total number of
samples seen reaches `average`. So ``average=10`` will begin
averaging after seeing 10 samples.
Attributes
----------
coef_ : ndarray of shape (n_features,)
Weights assigned to the features.
intercept_ : ndarray of shape (1,)
The intercept term.
average_coef_ : ndarray of shape (n_features,)
Averaged weights assigned to the features. Only available
if ``average=True``.
.. deprecated:: 0.23
Attribute ``average_coef_`` was deprecated
in version 0.23 and will be removed in 1.0 (renaming of 0.25).
average_intercept_ : ndarray of shape (1,)
The averaged intercept term. Only available if ``average=True``.
.. deprecated:: 0.23
Attribute ``average_intercept_`` was deprecated
in version 0.23 and will be removed in 1.0 (renaming of 0.25).
n_iter_ : int
The actual number of iterations before reaching the stopping criterion.
t_ : int
Number of weight updates performed during training.
Same as ``(n_iter_ * n_samples)``.
Examples
--------
>>> import numpy as np
>>> from sklearn.linear_model import SGDRegressor
>>> from sklearn.pipeline import make_pipeline
>>> from sklearn.preprocessing import StandardScaler
>>> n_samples, n_features = 10, 5
>>> rng = np.random.RandomState(0)
>>> y = rng.randn(n_samples)
>>> X = rng.randn(n_samples, n_features)
>>> # Always scale the input. The most convenient way is to use a pipeline.
>>> reg = make_pipeline(StandardScaler(),
... SGDRegressor(max_iter=1000, tol=1e-3))
>>> reg.fit(X, y)
Pipeline(steps=[('standardscaler', StandardScaler()),
('sgdregressor', SGDRegressor())])
See Also
--------
Ridge, ElasticNet, Lasso, sklearn.svm.SVR
"""
@_deprecate_positional_args
def __init__(self, loss="squared_loss", *, penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, max_iter=1000, tol=1e-3,
shuffle=True, verbose=0, epsilon=DEFAULT_EPSILON,
random_state=None, learning_rate="invscaling", eta0=0.01,
power_t=0.25, early_stopping=False, validation_fraction=0.1,
n_iter_no_change=5, warm_start=False, average=False):
super().__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, max_iter=max_iter, tol=tol,
shuffle=shuffle, verbose=verbose, epsilon=epsilon,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, early_stopping=early_stopping,
validation_fraction=validation_fraction,
n_iter_no_change=n_iter_no_change, warm_start=warm_start,
average=average)
def _more_tags(self):
return {
'_xfail_checks': {
'check_sample_weights_invariance':
'zero sample_weight is not equivalent to removing samples',
}
}
|
py | 1a2ea4a95b867ca5afe641e37e6aa7e734f024ea | """
Compai core functions
"""
from functools import partial, reduce, wraps
from operator import itemgetter
from typing import Callable, List, TypeVar
T = TypeVar('T')
def compose(*F: List[Callable]):
"""Compose the list of functions in F from right to left
Arguments:
F: List of functions
Examples:
```pycon
>>> compose(lambda x: x + 1, lambda x: x * 2)(5)
11
>>>
```
"""
return reduce(lambda f, g: lambda x: f(g(x)), F)
def fmap(f):
return partial(map, f)
def ffilter(f):
return partial(filter, f)
def none_map(func, if_none=None):
"""Returns a function that will call func if the argument is not none, and return if_none otherwise.
Examples:
```pycon
>>> f = none_map(str, if_none=const(1))
>>> f(1)
'1'
>>> f(None)
1
>>>
```
"""
@wraps(func)
def _func(x):
if x is not None:
return func(x)
return if_none()
return _func
def tupled(func):
"""Returns a tupled version of the function.
Examples:
```pycon
>>> tupled(lambda a, b: a + b)((2, 3))
5
>>>
```
"""
@wraps(func)
def _func(x):
return func(*x)
return _func
def tuple_map(*fs):
"""Returns a function that will apply every f_i for evey element of the tuple argument.
Examples:
```pycon
>>> inc = lambda x: x + 1
>>> tuple_map(None, inc)((1, 2))
(1, 3)
>>> tuple_map(inc)((1, 2))
(2, 2)
>>>
```
"""
return compose(
tuple,
fmap(
tupled(lambda i, v: fs[i](v) if i < len(fs) and fs[i] else v),
),
enumerate,
)
def dict_map(**fs):
"""Map especific elements in a dict.
Examples:
```pycon
>>> dict_map(a=int, b=str)(dict(a='1', b=123, c=True))
{'a': 1, 'b': '123', 'c': True}
>>>
```
"""
def _change_dict(d):
d = d.copy()
for k, f in fs.items():
if k in d:
d[k] = f(d[k])
return d
return _change_dict
def identity(x):
return x
def apply(f, *args):
return f(*args)
def const(x: T) -> Callable[..., T]:
"""Returns a function that will always return `x`.
Arguments:
x: Any value
Examples:
```pycon
>>> f = const('foo')
>>> f()
'foo'
>>> f(1, a='brr')
'foo'
>>>
```
"""
return lambda *_, **__: x
def length(xs):
"""Returns the length of xs.
Examples:
```pycon
>>> length([1, 2, 3])
3
>>> length(range(10))
10
>>> length(None for _ in range(10))
10
>>>
```
"""
len_ = getattr(xs, '__len__', None)
def default_len():
return sum(1 for _ in xs)
return compose(
apply,
none_map(identity, if_none=const(default_len))
)(len_)
def swap(x):
return itemgetter(1, 0)(x)
|
py | 1a2ea62077698c22303df4b8b2247d3b05a6f55f | from datetime import datetime, timedelta
import pytest
import pytz
from kaffepause.breaks.selectors import get_pending_break_invitations
from kaffepause.breaks.test.factories import BreakFactory, BreakInvitationFactory
pytestmark = pytest.mark.django_db
def test_get_break_invitations_awaiting_reply_returns_unanswered_invitations(user):
"""Should return all non-expired break invitations the user has not replied to."""
unanswered_break_invitation = BreakInvitationFactory()
unanswered_break_invitation.subject.connect(BreakFactory())
unanswered_break_invitation.addressees.connect(user)
an_hour_ago = datetime.now(pytz.utc) - timedelta(hours=10)
expired_break = BreakFactory()
expired_break.starting_at = an_hour_ago
expired_break.save()
expired_break_invitation = BreakInvitationFactory()
expired_break_invitation.subject.connect(expired_break)
expired_break_invitation.addressees.connect(user)
accepted_break_invitation = BreakInvitationFactory()
accepted_break_invitation.subject.connect(BreakFactory())
accepted_break_invitation.addressees.connect(user)
accepted_break_invitation.acceptees.connect(user)
declined_break_invitation = BreakInvitationFactory()
declined_break_invitation.subject.connect(BreakFactory())
declined_break_invitation.addressees.connect(user)
declined_break_invitation.declinees.connect(user)
actual_break_invitations = get_pending_break_invitations(actor=user)
assert unanswered_break_invitation in actual_break_invitations
assert expired_break_invitation not in actual_break_invitations
assert accepted_break_invitation not in actual_break_invitations
assert declined_break_invitation not in actual_break_invitations
def test_get_break_invitations_awaiting_reply_returns_unanswered_invitations_expired_five_minutes_ago(
user,
):
"""Should return unanswered invitations who's break has started within 5 minutes ago."""
two_minutes_ago = datetime.now(pytz.utc) - timedelta(minutes=2)
non_expired_break = BreakFactory()
non_expired_break.starting_at = two_minutes_ago
non_expired_break.save()
non_expired_break_invitation = BreakInvitationFactory()
non_expired_break_invitation.subject.connect(non_expired_break)
non_expired_break_invitation.addressees.connect(user)
ten_minutes_ago = datetime.now(pytz.utc) - timedelta(minutes=10)
expired_break = BreakFactory()
expired_break.starting_at = ten_minutes_ago
expired_break.save()
expired_break_invitation = BreakInvitationFactory()
expired_break_invitation.subject.connect(expired_break)
expired_break_invitation.addressees.connect(user)
actual_break_invitations = get_pending_break_invitations(actor=user)
assert non_expired_break_invitation in actual_break_invitations
assert expired_break_invitation not in actual_break_invitations
|
py | 1a2ea6f5dd39841efa40a1e50731abfd2df8685c | import numpy as np
from scipy.sparse import diags
from scipy.sparse import kron
from scipy.sparse import eye
from .two_particles import TwoParticles
from ..util.constants import *
from .. import Eigenstates
class TwoFermions(TwoParticles):
def get_eigenstates(self, H, max_states, eigenvalues, eigenvectors):
eigenvectors = eigenvectors.T.reshape(( max_states, *[H.N]*H.ndim) )
# Normalize the eigenvectors
eigenvectors = eigenvectors/np.sqrt(H.dx**H.ndim)
energies = []
eigenstates_array = []
#antisymmetrize eigenvectors: This is made by applying (𝜓(r1 , s1, r2 , s2) - 𝜓(r2 , s2, r1 , s1))/sqrt(2) to each state.
for i in range(max_states):
eigenstate_tmp = (eigenvectors[i] - eigenvectors[i].swapaxes(0,1))/np.sqrt(2)
norm = np.sum(eigenstate_tmp*eigenstate_tmp)*H.dx**H.ndim
TOL = 0.02
# check if is eigenstate_tmp is a normalizable eigenstate. (norm shouldn't be zero)
if norm > TOL :
# for some reason when the eigenstate is degenerated it isn't normalized
#print("norm",norm)
eigenstate_tmp = eigenstate_tmp/np.sqrt(norm)
if eigenstates_array != []: #check if it's the first eigenstate
inner_product = np.sum(eigenstates_array[-1]* eigenstate_tmp)*H.dx**H.ndim
#print("inner_product",inner_product)
else:
inner_product = 0
if np.abs(inner_product) < TOL: # check if is eigenstate_tmp is repeated. (inner_product should be zero)
eigenstates_array += [eigenstate_tmp]
energies += [eigenvalues[i]]
if H.spatial_ndim == 1:
type = "TwoIdenticalParticles1D"
elif H.spatial_ndim == 2:
type = "TwoIdenticalParticles2D"
eigenstates = Eigenstates(energies, eigenstates_array, H.extent, H.N, type)
return eigenstates |
py | 1a2ea70915788a9b5c94a3368918c20c33a3fba8 | from .Helper import StudyManage,StageControl
from .Helper.StageControl import CStageControl
from .DataStruct.DataSet import CFlowDict
|
py | 1a2ea73f55442ae5ef4c6061276deb17c954c9ee | #!/usr/bin/env python
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
d = generate_distutils_setup(
## don't do this unless you want a globally visible script
# scripts=['bin/myscript'],
packages=['rqt_smach'],
package_dir={'': 'src'},
scripts=['scripts/rqt_smach']
)
setup(**d)
|
py | 1a2ea7930e307d1b563ec44c435e5d27b999a6bf | """
Copyright 2018 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
""" Activation generator helper classes for TCAV"""
'''
The following class was modified to enable numeric class labels
'''
from abc import ABCMeta
from abc import abstractmethod
from multiprocessing import dummy as multiprocessing
import os.path
import numpy as np
import PIL.Image
import tensorflow as tf
class ActivationGeneratorInterface(object):
"""Interface for an activation generator for a model"""
__metaclass__ = ABCMeta
@abstractmethod
def process_and_load_activations(self, bottleneck_names, concepts):
pass
@abstractmethod
def get_model(self):
pass
class ActivationGeneratorBase(ActivationGeneratorInterface):
"""Basic abstract activation generator for a model"""
def __init__(self, model, acts_dir, max_examples=500):
self.model = model
self.acts_dir = acts_dir
self.max_examples = max_examples
def get_model(self):
return self.model
@abstractmethod
def get_examples_for_concept(self, concept):
pass
def get_activations_for_concept(self, concept, bottleneck):
examples = self.get_examples_for_concept(concept)
return self.get_activations_for_examples(examples, bottleneck)
def get_activations_for_examples(self, examples, bottleneck):
acts = self.model.run_examples(examples, bottleneck)
return self.model.reshape_activations(acts).squeeze()
def process_and_load_activations(self, bottleneck_names, concepts):
acts = {}
if self.acts_dir and not tf.gfile.Exists(self.acts_dir):
tf.gfile.MakeDirs(self.acts_dir)
for concept in concepts:
if concept not in acts:
acts[concept] = {}
for bottleneck_name in bottleneck_names:
acts_path = os.path.join(self.acts_dir, 'acts_{}_{}'.format(
concept, bottleneck_name)) if self.acts_dir else None
if acts_path and tf.gfile.Exists(acts_path):
with tf.gfile.Open(acts_path, 'rb') as f:
acts[concept][bottleneck_name] = np.load(f).squeeze()
tf.logging.info('Loaded {} shape {}'.format(
acts_path, acts[concept][bottleneck_name].shape))
else:
acts[concept][bottleneck_name] = self.get_activations_for_concept(
concept, bottleneck_name)
if acts_path:
tf.logging.info('{} does not exist, Making one...'.format(
acts_path))
with tf.gfile.Open(acts_path, 'wb') as f:
np.save(f, acts[concept][bottleneck_name], allow_pickle=False)
return acts
class ImageActivationGenerator(ActivationGeneratorBase):
"""Activation generator for a basic image model"""
def __init__(self, model, source_dir, acts_dir, max_examples=10):
self.source_dir = source_dir
super(ImageActivationGenerator, self).__init__(
model, acts_dir, max_examples)
def get_examples_for_concept(self, concept):
concept_dir = os.path.join(self.source_dir, concept)
print(concept_dir, concept)
img_paths = [os.path.join(concept_dir, d)
for d in tf.gfile.ListDirectory(concept_dir)]
imgs = self.load_images_from_files(img_paths, self.max_examples,
shape=self.model.get_image_shape()[:2])
return imgs
def load_image_from_file(self, filename, shape):
"""Given a filename, try to open the file. If failed, return None.
Args:
filename: location of the image file
shape: the shape of the image file to be scaled
Returns:
the image if succeeds, None if fails.
Rasies:
exception if the image was not the right shape.
"""
if not tf.gfile.Exists(filename):
tf.logging.error('Cannot find file: {}'.format(filename))
return None
try:
img = np.array(PIL.Image.open(tf.gfile.Open(filename, 'rb')).resize(
shape, PIL.Image.BILINEAR))
# Normalize pixel values to between 0 and 1.
img = np.float32(img) / 255.0
if not (len(img.shape) == 3 and img.shape[2] == 3):
return None
else:
return img
except Exception as e:
tf.logging.info(e)
return None
return img
def load_images_from_files(self, filenames, max_imgs=500,
do_shuffle=True, run_parallel=True,
shape=(299, 299),
num_workers=100):
"""Return image arrays from filenames.
Args:
filenames: locations of image files.
max_imgs: maximum number of images from filenames.
do_shuffle: before getting max_imgs files, shuffle the names or not
run_parallel: get images in parallel or not
shape: desired shape of the image
num_workers: number of workers in parallelization.
Returns:
image arrays
"""
imgs = []
# First shuffle a copy of the filenames.
filenames = filenames[:]
if do_shuffle:
np.random.shuffle(filenames)
if run_parallel:
pool = multiprocessing.Pool(num_workers)
imgs = pool.map(
lambda filename: self.load_image_from_file(filename, shape),
filenames[:max_imgs])
imgs = [img for img in imgs if img is not None]
else:
for filename in filenames:
img = self.load_image_from_file(filename, shape)
if img is not None:
imgs.append(img)
if len(imgs) >= max_imgs:
break
return np.array(imgs)
|
py | 1a2ea9756620b796158af7da3296c4fe6127c95d | import base64
import logging
from urllib import urlencode
from dateutil.tz import tzutc
import httplib2
from sharpy.exceptions import AccessDenied
from sharpy.exceptions import BadRequest
from sharpy.exceptions import CheddarError
from sharpy.exceptions import CheddarFailure
from sharpy.exceptions import NaughtyGateway
from sharpy.exceptions import NotFound
from sharpy.exceptions import PreconditionFailed
from sharpy.exceptions import UnprocessableEntity
client_log = logging.getLogger('SharpyClient')
class Client(object):
default_endpoint = 'https://cheddargetter.com/xml'
def __init__(self, username, password, product_code, cache=None,
timeout=None, endpoint=None):
'''
username - Your cheddargetter username (probably an email address)
password - Your cheddargetter password
product_code - The product code for the product you want to work with
cache - A file system path or an object which implements the httplib2
cache API (optional)
timeout - Socket level timout in seconds (optional)
endpoint - An alternate API endpoint (optional)
'''
self.username = username
self.password = password
self.product_code = product_code
self.endpoint = endpoint or self.default_endpoint
self.cache = cache
self.timeout = timeout
super(Client, self).__init__()
def build_url(self, path, params=None):
'''
Constructs the url for a cheddar API resource
'''
url = u'%s/%s/productCode/%s' % (
self.endpoint,
path,
self.product_code,
)
if params:
for key, value in params.items():
url = u'%s/%s/%s' % (url, key, value)
return url
def format_datetime(self, to_format):
if to_format == 'now':
str_dt = to_format
else:
if getattr(to_format, 'tzinfo', None) is not None:
utc_value = to_format.astimezone(tzutc())
else:
utc_value = to_format
str_dt = utc_value.strftime('%Y-%m-%dT%H:%M:%S+00:00')
return str_dt
def format_date(self, to_format):
if to_format == 'now':
str_dt = to_format
else:
if getattr(to_format, 'tzinfo', None) is not None:
utc_value = to_format.astimezone(tzutc())
else:
utc_value = to_format
str_dt = utc_value.strftime('%Y-%m-%d')
return str_dt
def make_request(self, path, params=None, data=None, method=None):
'''
Makes a request to the cheddar api using the authentication and
configuration settings available.
'''
# Setup values
url = self.build_url(path, params)
client_log.debug('Requesting: %s' % url)
method = method or 'GET'
body = None
headers = {}
cleaned_data = None
if data:
method = 'POST'
body = urlencode(data)
headers = {
'content-type':
'application/x-www-form-urlencoded; charset=UTF-8',
}
# Clean credit card info from when the request gets logged
# (remove ccv and only show last four of card num)
cleaned_data = data.copy()
if 'subscription[ccCardCode]' in cleaned_data:
del cleaned_data['subscription[ccCardCode]']
if 'subscription[ccNumber]' in cleaned_data:
ccNum = cleaned_data['subscription[ccNumber]']
cleaned_data['subscription[ccNumber]'] = ccNum[-4:]
client_log.debug('Request Method: %s' % method)
client_log.debug('Request Body (Cleaned Data): %s' % cleaned_data)
# Setup http client
h = httplib2.Http(cache=self.cache, timeout=self.timeout)
# Skip the normal http client behavior and send auth headers
# immediately to save an http request.
headers['Authorization'] = "Basic %s" % base64.standard_b64encode(
self.username + ':' + self.password).strip()
# Make request
response, content = h.request(url, method, body=body, headers=headers)
status = response.status
client_log.debug('Response Status: %d' % status)
client_log.debug('Response Content: %s' % content)
if status != 200 and status != 302:
exception_class = CheddarError
if status == 401:
exception_class = AccessDenied
elif status == 400:
exception_class = BadRequest
elif status == 404:
exception_class = NotFound
elif status == 412:
exception_class = PreconditionFailed
elif status == 500:
exception_class = CheddarFailure
elif status == 502:
exception_class = NaughtyGateway
elif status == 422:
exception_class = UnprocessableEntity
raise exception_class(response, content)
response.content = content
return response
|
py | 1a2eaa285371140e1b57d39a53c34529b0e71209 | from fastbook import *
from fastai.vision.widgets import *
def create_dataloader(path):
print(" Creating dataloader.. ")
db = DataBlock(
blocks=(ImageBlock, CategoryBlock),
get_items=get_image_files,
splitter=RandomSplitter(valid_pct=0.2, seed=42),
get_y=parent_label,
item_tfms=Resize(128))
db = db.new(
item_tfms=RandomResizedCrop(224, min_scale=0.5),
batch_tfms=aug_transforms())
dls = db.dataloaders(path)
return dls
def train_model(dls , save_model_name = "animals_prediction.pkl"):
print(" Training Model .. ")
learn = cnn_learner(dls, resnet18, metrics=error_rate)
learn.fine_tune(4)
learn.export(save_model_name)
return learn
if __name__ == "__main__":
path = Path("DATA")
animals_path = (path/"animals")
dls = create_dataloader(animals_path)
model = train_model(dls ,"animals_prediction.pkl")
|
py | 1a2eaa6573349bf9471d42646a7167c9f0a978c9 | # -*- coding: utf-8 -*-
import argparse
def parse_opts():
parser = argparse.ArgumentParser()
parser.add_argument(
'-videos_path',
nargs='+',
type=str,
help='视频所在文件夹')
parser.add_argument(
'-target_path',
nargs='+',
type=str,
help='要写入的文件夹')
parser.add_argument(
'-cut_start',
nargs='+',
default=0,
type=int,
help='从第几帧开始截取')
parser.add_argument(
'-cut_end',
nargs='+',
default=0,
type=int,
help='截取到第几帧')
args = parser.parse_args()
return args
|
py | 1a2eaaea2d5e98378645ecfb7ae2cc544a174dad | # Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved.
# Python
import copy
import json
import logging
import re
from collections import OrderedDict
from datetime import timedelta
# OAuth2
from oauthlib import oauth2
from oauthlib.common import generate_token
# Django
from django.conf import settings
from django.contrib.auth import update_session_auth_hash
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist, ValidationError as DjangoValidationError
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import force_text
from django.utils.text import capfirst
from django.utils.timezone import now
from django.utils.functional import cached_property
# Django REST Framework
from rest_framework.exceptions import ValidationError, PermissionDenied
from rest_framework.relations import ManyRelatedField
from rest_framework import fields
from rest_framework import serializers
from rest_framework import validators
from rest_framework.utils.serializer_helpers import ReturnList
# Django-Polymorphic
from polymorphic.models import PolymorphicModel
# AWX
from awx.main.access import get_user_capabilities
from awx.main.constants import (
SCHEDULEABLE_PROVIDERS,
ANSI_SGR_PATTERN,
ACTIVE_STATES,
CENSOR_VALUE,
)
from awx.main.models import (
ActivityStream, AdHocCommand, AdHocCommandEvent, Credential, CredentialInputSource,
CredentialType, CustomInventoryScript, Group, Host, Instance,
InstanceGroup, Inventory, InventorySource, InventoryUpdate,
InventoryUpdateEvent, Job, JobEvent, JobHostSummary, JobLaunchConfig,
JobTemplate, Label, Notification, NotificationTemplate,
OAuth2AccessToken, OAuth2Application, Organization, Project,
ProjectUpdate, ProjectUpdateEvent, RefreshToken, Role, Schedule,
SystemJob, SystemJobEvent, SystemJobTemplate, Team, UnifiedJob,
UnifiedJobTemplate, WorkflowJob, WorkflowJobNode,
WorkflowJobTemplate, WorkflowJobTemplateNode, StdoutMaxBytesExceeded
)
from awx.main.models.base import VERBOSITY_CHOICES, NEW_JOB_TYPE_CHOICES
from awx.main.models.rbac import (
get_roles_on_resource, role_summary_fields_generator
)
from awx.main.fields import ImplicitRoleField, JSONBField
from awx.main.utils import (
get_type_for_model, get_model_for_type,
camelcase_to_underscore, getattrd, parse_yaml_or_json,
has_model_field_prefetched, extract_ansible_vars, encrypt_dict,
prefetch_page_capabilities, get_external_account)
from awx.main.utils.filters import SmartFilter
from awx.main.redact import UriCleaner, REPLACE_STR
from awx.main.validators import vars_validate_or_raise
from awx.api.versioning import reverse
from awx.api.fields import (BooleanNullField, CharNullField, ChoiceNullField,
VerbatimField, DeprecatedCredentialField)
logger = logging.getLogger('awx.api.serializers')
# Fields that should be summarized regardless of object type.
DEFAULT_SUMMARY_FIELDS = ('id', 'name', 'description')# , 'created_by', 'modified_by')#, 'type')
# Keys are fields (foreign keys) where, if found on an instance, summary info
# should be added to the serialized data. Values are a tuple of field names on
# the related object to include in the summary data (if the field is present on
# the related object).
SUMMARIZABLE_FK_FIELDS = {
'organization': DEFAULT_SUMMARY_FIELDS,
'user': ('id', 'username', 'first_name', 'last_name'),
'application': ('id', 'name'),
'team': DEFAULT_SUMMARY_FIELDS,
'inventory': DEFAULT_SUMMARY_FIELDS + ('has_active_failures',
'total_hosts',
'hosts_with_active_failures',
'total_groups',
'groups_with_active_failures',
'has_inventory_sources',
'total_inventory_sources',
'inventory_sources_with_failures',
'organization_id',
'kind',
'insights_credential_id',),
'host': DEFAULT_SUMMARY_FIELDS + ('has_active_failures',
'has_inventory_sources'),
'group': DEFAULT_SUMMARY_FIELDS + ('has_active_failures',
'total_hosts',
'hosts_with_active_failures',
'total_groups',
'groups_with_active_failures',
'has_inventory_sources'),
'project': DEFAULT_SUMMARY_FIELDS + ('status', 'scm_type'),
'source_project': DEFAULT_SUMMARY_FIELDS + ('status', 'scm_type'),
'project_update': DEFAULT_SUMMARY_FIELDS + ('status', 'failed',),
'credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'credential_type_id'),
'job': DEFAULT_SUMMARY_FIELDS + ('status', 'failed', 'elapsed', 'type'),
'job_template': DEFAULT_SUMMARY_FIELDS,
'workflow_job_template': DEFAULT_SUMMARY_FIELDS,
'workflow_job': DEFAULT_SUMMARY_FIELDS,
'schedule': DEFAULT_SUMMARY_FIELDS + ('next_run',),
'unified_job_template': DEFAULT_SUMMARY_FIELDS + ('unified_job_type',),
'last_job': DEFAULT_SUMMARY_FIELDS + ('finished', 'status', 'failed', 'license_error'),
'last_job_host_summary': DEFAULT_SUMMARY_FIELDS + ('failed',),
'last_update': DEFAULT_SUMMARY_FIELDS + ('status', 'failed', 'license_error'),
'current_update': DEFAULT_SUMMARY_FIELDS + ('status', 'failed', 'license_error'),
'current_job': DEFAULT_SUMMARY_FIELDS + ('status', 'failed', 'license_error'),
'inventory_source': ('source', 'last_updated', 'status'),
'custom_inventory_script': DEFAULT_SUMMARY_FIELDS,
'source_script': ('name', 'description'),
'role': ('id', 'role_field'),
'notification_template': DEFAULT_SUMMARY_FIELDS,
'instance_group': {'id', 'name', 'controller_id'},
'insights_credential': DEFAULT_SUMMARY_FIELDS,
'source_credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'credential_type_id'),
'target_credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'credential_type_id'),
}
def reverse_gfk(content_object, request):
'''
Computes a reverse for a GenericForeignKey field.
Returns a dictionary of the form
{ '<type>': reverse(<type detail>) }
for example
{ 'organization': '/api/v2/organizations/1/' }
'''
if content_object is None or not hasattr(content_object, 'get_absolute_url'):
return {}
return {
camelcase_to_underscore(content_object.__class__.__name__): content_object.get_absolute_url(request=request)
}
class CopySerializer(serializers.Serializer):
name = serializers.CharField()
def validate(self, attrs):
name = attrs.get('name')
view = self.context.get('view', None)
obj = view.get_object()
if name == obj.name:
raise serializers.ValidationError(_(
'The original object is already named {}, a copy from'
' it cannot have the same name.'.format(name)
))
return attrs
class BaseSerializerMetaclass(serializers.SerializerMetaclass):
'''
Custom metaclass to enable attribute inheritance from Meta objects on
serializer base classes.
Also allows for inheriting or updating field lists from base class(es):
class Meta:
# Inherit all fields from base class.
fields = ('*',)
# Inherit all fields from base class and add 'foo'.
fields = ('*', 'foo')
# Inherit all fields from base class except 'bar'.
fields = ('*', '-bar')
# Define fields as 'foo' and 'bar'; ignore base class fields.
fields = ('foo', 'bar')
# Extra field kwargs dicts are also merged from base classes.
extra_kwargs = {
'foo': {'required': True},
'bar': {'read_only': True},
}
# If a subclass were to define extra_kwargs as:
extra_kwargs = {
'foo': {'required': False, 'default': ''},
'bar': {'label': 'New Label for Bar'},
}
# The resulting value of extra_kwargs would be:
extra_kwargs = {
'foo': {'required': False, 'default': ''},
'bar': {'read_only': True, 'label': 'New Label for Bar'},
}
# Extra field kwargs cannot be removed in subclasses, only replaced.
'''
@staticmethod
def _is_list_of_strings(x):
return isinstance(x, (list, tuple)) and all([isinstance(y, str) for y in x])
@staticmethod
def _is_extra_kwargs(x):
return isinstance(x, dict) and all([isinstance(k, str) and isinstance(v, dict) for k,v in x.items()])
@classmethod
def _update_meta(cls, base, meta, other=None):
for attr in dir(other):
if attr.startswith('_'):
continue
val = getattr(other, attr)
meta_val = getattr(meta, attr, None)
# Special handling for lists/tuples of strings (field names).
if cls._is_list_of_strings(val) and cls._is_list_of_strings(meta_val or []):
meta_val = meta_val or []
new_vals = []
except_vals = []
if base: # Merge values from all bases.
new_vals.extend([x for x in meta_val])
for v in val:
if not base and v == '*': # Inherit all values from previous base(es).
new_vals.extend([x for x in meta_val])
elif not base and v.startswith('-'): # Except these values.
except_vals.append(v[1:])
else:
new_vals.append(v)
val = []
for v in new_vals:
if v not in except_vals and v not in val:
val.append(v)
val = tuple(val)
# Merge extra_kwargs dicts from base classes.
elif cls._is_extra_kwargs(val) and cls._is_extra_kwargs(meta_val or {}):
meta_val = meta_val or {}
new_val = {}
if base:
for k,v in meta_val.items():
new_val[k] = copy.deepcopy(v)
for k,v in val.items():
new_val.setdefault(k, {}).update(copy.deepcopy(v))
val = new_val
# Any other values are copied in case they are mutable objects.
else:
val = copy.deepcopy(val)
setattr(meta, attr, val)
def __new__(cls, name, bases, attrs):
meta = type('Meta', (object,), {})
for base in bases[::-1]:
cls._update_meta(base, meta, getattr(base, 'Meta', None))
cls._update_meta(None, meta, attrs.get('Meta', meta))
attrs['Meta'] = meta
return super(BaseSerializerMetaclass, cls).__new__(cls, name, bases, attrs)
class BaseSerializer(serializers.ModelSerializer, metaclass=BaseSerializerMetaclass):
class Meta:
fields = ('id', 'type', 'url', 'related', 'summary_fields', 'created',
'modified', 'name', 'description')
summary_fields = ()
summarizable_fields = ()
# add the URL and related resources
type = serializers.SerializerMethodField()
url = serializers.SerializerMethodField()
related = serializers.SerializerMethodField('_get_related')
summary_fields = serializers.SerializerMethodField('_get_summary_fields')
# make certain fields read only
created = serializers.SerializerMethodField()
modified = serializers.SerializerMethodField()
def __init__(self, *args, **kwargs):
super(BaseSerializer, self).__init__(*args, **kwargs)
# The following lines fix the problem of being able to pass JSON dict into PrimaryKeyRelatedField.
data = kwargs.get('data', False)
if data:
for field_name, field_instance in self.fields.items():
if isinstance(field_instance, ManyRelatedField) and not field_instance.read_only:
if isinstance(data.get(field_name, False), dict):
raise serializers.ValidationError(_('Cannot use dictionary for %s' % field_name))
@property
def version(self):
return 2
def get_type(self, obj):
return get_type_for_model(self.Meta.model)
def get_types(self):
return [self.get_type(None)]
def get_type_choices(self):
type_name_map = {
'job': _('Playbook Run'),
'ad_hoc_command': _('Command'),
'project_update': _('SCM Update'),
'inventory_update': _('Inventory Sync'),
'system_job': _('Management Job'),
'workflow_job': _('Workflow Job'),
'workflow_job_template': _('Workflow Template'),
'job_template': _('Job Template')
}
choices = []
for t in self.get_types():
name = _(type_name_map.get(t, force_text(get_model_for_type(t)._meta.verbose_name).title()))
choices.append((t, name))
return choices
def get_url(self, obj):
if obj is None or not hasattr(obj, 'get_absolute_url'):
return ''
elif isinstance(obj, User):
return self.reverse('api:user_detail', kwargs={'pk': obj.pk})
else:
return obj.get_absolute_url(request=self.context.get('request'))
def filter_field_metadata(self, fields, method):
"""
Filter field metadata based on the request method.
This it intended to be extended by subclasses.
"""
return fields
def _get_related(self, obj):
return {} if obj is None else self.get_related(obj)
def _generate_named_url(self, url_path, obj, node):
url_units = url_path.split('/')
named_url = node.generate_named_url(obj)
url_units[4] = named_url
return '/'.join(url_units)
def get_related(self, obj):
res = OrderedDict()
view = self.context.get('view', None)
if view and (hasattr(view, 'retrieve') or view.request.method == 'POST') and \
type(obj) in settings.NAMED_URL_GRAPH:
original_url = self.get_url(obj)
res['named_url'] = self._generate_named_url(
original_url, obj, settings.NAMED_URL_GRAPH[type(obj)]
)
if getattr(obj, 'created_by', None):
res['created_by'] = self.reverse('api:user_detail', kwargs={'pk': obj.created_by.pk})
if getattr(obj, 'modified_by', None):
res['modified_by'] = self.reverse('api:user_detail', kwargs={'pk': obj.modified_by.pk})
return res
def _get_summary_fields(self, obj):
return {} if obj is None else self.get_summary_fields(obj)
def get_summary_fields(self, obj):
# Return values for certain fields on related objects, to simplify
# displaying lists of items without additional API requests.
summary_fields = OrderedDict()
for fk, related_fields in SUMMARIZABLE_FK_FIELDS.items():
try:
# A few special cases where we don't want to access the field
# because it results in additional queries.
if fk == 'job' and isinstance(obj, UnifiedJob):
continue
if fk == 'project' and (isinstance(obj, InventorySource) or
isinstance(obj, Project)):
continue
try:
fkval = getattr(obj, fk, None)
except ObjectDoesNotExist:
continue
if fkval is None:
continue
if fkval == obj:
continue
summary_fields[fk] = OrderedDict()
for field in related_fields:
fval = getattr(fkval, field, None)
if fval is None and field == 'type':
if isinstance(fkval, PolymorphicModel):
fkval = fkval.get_real_instance()
fval = get_type_for_model(fkval)
elif fval is None and field == 'unified_job_type' and isinstance(fkval, UnifiedJobTemplate):
fkval = fkval.get_real_instance()
fval = get_type_for_model(fkval._get_unified_job_class())
if fval is not None:
summary_fields[fk][field] = fval
# Can be raised by the reverse accessor for a OneToOneField.
except ObjectDoesNotExist:
pass
if getattr(obj, 'created_by', None):
summary_fields['created_by'] = OrderedDict()
for field in SUMMARIZABLE_FK_FIELDS['user']:
summary_fields['created_by'][field] = getattr(obj.created_by, field)
if getattr(obj, 'modified_by', None):
summary_fields['modified_by'] = OrderedDict()
for field in SUMMARIZABLE_FK_FIELDS['user']:
summary_fields['modified_by'][field] = getattr(obj.modified_by, field)
# RBAC summary fields
roles = {}
for field in obj._meta.get_fields():
if type(field) is ImplicitRoleField:
roles[field.name] = role_summary_fields_generator(obj, field.name)
if len(roles) > 0:
summary_fields['object_roles'] = roles
# Advance display of RBAC capabilities
if hasattr(self, 'show_capabilities'):
user_capabilities = self._obj_capability_dict(obj)
if user_capabilities:
summary_fields['user_capabilities'] = user_capabilities
return summary_fields
def _obj_capability_dict(self, obj):
"""
Returns the user_capabilities dictionary for a single item
If inside of a list view, it runs the prefetching algorithm for
the entire current page, saves it into context
"""
view = self.context.get('view', None)
parent_obj = None
if view and hasattr(view, 'parent_model') and hasattr(view, 'get_parent_object'):
parent_obj = view.get_parent_object()
if view and view.request and view.request.user:
capabilities_cache = {}
# if serializer has parent, it is ListView, apply page capabilities prefetch
if self.parent and hasattr(self, 'capabilities_prefetch') and self.capabilities_prefetch:
qs = self.parent.instance
if 'capability_map' not in self.context:
if hasattr(self, 'polymorphic_base'):
model = self.polymorphic_base.Meta.model
prefetch_list = self.polymorphic_base._capabilities_prefetch
else:
model = self.Meta.model
prefetch_list = self.capabilities_prefetch
self.context['capability_map'] = prefetch_page_capabilities(
model, qs, prefetch_list, view.request.user
)
if obj.id in self.context['capability_map']:
capabilities_cache = self.context['capability_map'][obj.id]
return get_user_capabilities(
view.request.user, obj, method_list=self.show_capabilities, parent_obj=parent_obj,
capabilities_cache=capabilities_cache
)
else:
# Contextual information to produce user_capabilities doesn't exist
return {}
def get_created(self, obj):
if obj is None:
return None
elif isinstance(obj, User):
return obj.date_joined
elif hasattr(obj, 'created'):
return obj.created
return None
def get_modified(self, obj):
if obj is None:
return None
elif isinstance(obj, User):
return obj.last_login # Not actually exposed for User.
elif hasattr(obj, 'modified'):
return obj.modified
return None
def get_extra_kwargs(self):
extra_kwargs = super(BaseSerializer, self).get_extra_kwargs()
if self.instance:
read_only_on_update_fields = getattr(self.Meta, 'read_only_on_update_fields', tuple())
for field_name in read_only_on_update_fields:
kwargs = extra_kwargs.get(field_name, {})
kwargs['read_only'] = True
extra_kwargs[field_name] = kwargs
return extra_kwargs
def build_standard_field(self, field_name, model_field):
# DRF 3.3 serializers.py::build_standard_field() -> utils/field_mapping.py::get_field_kwargs() short circuits
# when a Model's editable field is set to False. The short circuit skips choice rendering.
#
# This logic is to force rendering choice's on an uneditable field.
# Note: Consider expanding this rendering for more than just choices fields
# Note: This logic works in conjuction with
if hasattr(model_field, 'choices') and model_field.choices:
was_editable = model_field.editable
model_field.editable = True
field_class, field_kwargs = super(BaseSerializer, self).build_standard_field(field_name, model_field)
if hasattr(model_field, 'choices') and model_field.choices:
model_field.editable = was_editable
if was_editable is False:
field_kwargs['read_only'] = True
# Pass model field default onto the serializer field if field is not read-only.
if model_field.has_default() and not field_kwargs.get('read_only', False):
field_kwargs['default'] = field_kwargs['initial'] = model_field.get_default()
# Enforce minimum value of 0 for PositiveIntegerFields.
if isinstance(model_field, (models.PositiveIntegerField, models.PositiveSmallIntegerField)) and 'choices' not in field_kwargs:
field_kwargs['min_value'] = 0
# Use custom boolean field that allows null and empty string as False values.
if isinstance(model_field, models.BooleanField) and not field_kwargs.get('read_only', False):
field_class = BooleanNullField
# Use custom char or choice field that coerces null to an empty string.
if isinstance(model_field, (models.CharField, models.TextField)) and not field_kwargs.get('read_only', False):
if 'choices' in field_kwargs:
field_class = ChoiceNullField
else:
field_class = CharNullField
# Update the message used for the unique validator to use capitalized
# verbose name; keeps unique message the same as with DRF 2.x.
opts = self.Meta.model._meta.concrete_model._meta
for validator in field_kwargs.get('validators', []):
if isinstance(validator, validators.UniqueValidator):
unique_error_message = model_field.error_messages.get('unique', None)
if unique_error_message:
unique_error_message = unique_error_message % {
'model_name': capfirst(opts.verbose_name),
'field_label': capfirst(model_field.verbose_name),
}
validator.message = unique_error_message
return field_class, field_kwargs
def build_relational_field(self, field_name, relation_info):
field_class, field_kwargs = super(BaseSerializer, self).build_relational_field(field_name, relation_info)
# Don't include choices for foreign key fields.
field_kwargs.pop('choices', None)
return field_class, field_kwargs
def get_unique_together_validators(self):
# Allow the model's full_clean method to handle the unique together validation.
return []
def run_validation(self, data=fields.empty):
try:
return super(BaseSerializer, self).run_validation(data)
except ValidationError as exc:
# Avoid bug? in DRF if exc.detail happens to be a list instead of a dict.
raise ValidationError(detail=serializers.as_serializer_error(exc))
def get_validation_exclusions(self, obj=None):
# Borrowed from DRF 2.x - return model fields that should be excluded
# from model validation.
cls = self.Meta.model
opts = cls._meta.concrete_model._meta
exclusions = [field.name for field in opts.fields]
for field_name, field in self.fields.items():
field_name = field.source or field_name
if field_name not in exclusions:
continue
if field.read_only:
continue
if isinstance(field, serializers.Serializer):
continue
exclusions.remove(field_name)
# The clean_ methods cannot be ran on many-to-many models
exclusions.extend([field.name for field in opts.many_to_many])
return exclusions
def validate(self, attrs):
attrs = super(BaseSerializer, self).validate(attrs)
try:
# Create/update a model instance and run it's full_clean() method to
# do any validation implemented on the model class.
exclusions = self.get_validation_exclusions(self.instance)
obj = self.instance or self.Meta.model()
for k,v in attrs.items():
if k not in exclusions:
setattr(obj, k, v)
obj.full_clean(exclude=exclusions)
# full_clean may modify values on the instance; copy those changes
# back to attrs so they are saved.
for k in attrs.keys():
if k not in exclusions:
attrs[k] = getattr(obj, k)
except DjangoValidationError as exc:
# DjangoValidationError may contain a list or dict; normalize into a
# dict where the keys are the field name and the values are a list
# of error messages, then raise as a DRF ValidationError. DRF would
# normally convert any DjangoValidationError to a non-field specific
# error message; here we preserve field-specific errors raised from
# the model's full_clean method.
d = exc.update_error_dict({})
for k,v in d.items():
v = v if isinstance(v, list) else [v]
v2 = []
for e in v:
if isinstance(e, DjangoValidationError):
v2.extend(list(e))
elif isinstance(e, list):
v2.extend(e)
else:
v2.append(e)
d[k] = list(map(force_text, v2))
raise ValidationError(d)
return attrs
def reverse(self, *args, **kwargs):
kwargs['request'] = self.context.get('request')
return reverse(*args, **kwargs)
@property
def is_detail_view(self):
if 'view' in self.context:
if 'pk' in self.context['view'].kwargs:
return True
return False
class EmptySerializer(serializers.Serializer):
pass
class UnifiedJobTemplateSerializer(BaseSerializer):
# As a base serializer, the capabilities prefetch is not used directly
_capabilities_prefetch = [
'admin', 'execute',
{'copy': ['jobtemplate.project.use', 'jobtemplate.inventory.use',
'workflowjobtemplate.organization.workflow_admin']}
]
class Meta:
model = UnifiedJobTemplate
fields = ('*', 'last_job_run', 'last_job_failed',
'next_job_run', 'status')
def get_related(self, obj):
res = super(UnifiedJobTemplateSerializer, self).get_related(obj)
if obj.current_job:
res['current_job'] = obj.current_job.get_absolute_url(request=self.context.get('request'))
if obj.last_job:
res['last_job'] = obj.last_job.get_absolute_url(request=self.context.get('request'))
if obj.next_schedule:
res['next_schedule'] = obj.next_schedule.get_absolute_url(request=self.context.get('request'))
return res
def get_types(self):
if type(self) is UnifiedJobTemplateSerializer:
return ['project', 'inventory_source', 'job_template', 'system_job_template', 'workflow_job_template',]
else:
return super(UnifiedJobTemplateSerializer, self).get_types()
def get_sub_serializer(self, obj):
serializer_class = None
if type(self) is UnifiedJobTemplateSerializer:
if isinstance(obj, Project):
serializer_class = ProjectSerializer
elif isinstance(obj, InventorySource):
serializer_class = InventorySourceSerializer
elif isinstance(obj, JobTemplate):
serializer_class = JobTemplateSerializer
elif isinstance(obj, SystemJobTemplate):
serializer_class = SystemJobTemplateSerializer
elif isinstance(obj, WorkflowJobTemplate):
serializer_class = WorkflowJobTemplateSerializer
return serializer_class
def to_representation(self, obj):
serializer_class = self.get_sub_serializer(obj)
if serializer_class:
serializer = serializer_class(instance=obj, context=self.context)
# preserve links for list view
if self.parent:
serializer.parent = self.parent
serializer.polymorphic_base = self
# capabilities prefetch is only valid for these models
if isinstance(obj, (JobTemplate, WorkflowJobTemplate)):
serializer.capabilities_prefetch = self._capabilities_prefetch
else:
serializer.capabilities_prefetch = None
return serializer.to_representation(obj)
else:
return super(UnifiedJobTemplateSerializer, self).to_representation(obj)
class UnifiedJobSerializer(BaseSerializer):
show_capabilities = ['start', 'delete']
event_processing_finished = serializers.BooleanField(
help_text=_('Indicates whether all of the events generated by this '
'unified job have been saved to the database.'),
read_only=True
)
class Meta:
model = UnifiedJob
fields = ('*', 'unified_job_template', 'launch_type', 'status',
'failed', 'started', 'finished', 'elapsed', 'job_args',
'job_cwd', 'job_env', 'job_explanation',
'execution_node', 'controller_node',
'result_traceback', 'event_processing_finished')
extra_kwargs = {
'unified_job_template': {
'source': 'unified_job_template_id',
'label': 'unified job template',
},
'job_env': {
'read_only': True,
'label': 'job_env',
}
}
def get_types(self):
if type(self) is UnifiedJobSerializer:
return ['project_update', 'inventory_update', 'job', 'ad_hoc_command', 'system_job', 'workflow_job',]
else:
return super(UnifiedJobSerializer, self).get_types()
def get_related(self, obj):
res = super(UnifiedJobSerializer, self).get_related(obj)
if obj.unified_job_template:
res['unified_job_template'] = obj.unified_job_template.get_absolute_url(request=self.context.get('request'))
if obj.schedule:
res['schedule'] = obj.schedule.get_absolute_url(request=self.context.get('request'))
if isinstance(obj, ProjectUpdate):
res['stdout'] = self.reverse('api:project_update_stdout', kwargs={'pk': obj.pk})
elif isinstance(obj, InventoryUpdate):
res['stdout'] = self.reverse('api:inventory_update_stdout', kwargs={'pk': obj.pk})
elif isinstance(obj, Job):
res['stdout'] = self.reverse('api:job_stdout', kwargs={'pk': obj.pk})
elif isinstance(obj, AdHocCommand):
res['stdout'] = self.reverse('api:ad_hoc_command_stdout', kwargs={'pk': obj.pk})
if obj.workflow_job_id:
res['source_workflow_job'] = self.reverse('api:workflow_job_detail', kwargs={'pk': obj.workflow_job_id})
return res
def get_summary_fields(self, obj):
summary_fields = super(UnifiedJobSerializer, self).get_summary_fields(obj)
if obj.spawned_by_workflow:
summary_fields['source_workflow_job'] = {}
try:
summary_obj = obj.unified_job_node.workflow_job
except UnifiedJob.unified_job_node.RelatedObjectDoesNotExist:
return summary_fields
for field in SUMMARIZABLE_FK_FIELDS['job']:
val = getattr(summary_obj, field, None)
if val is not None:
summary_fields['source_workflow_job'][field] = val
return summary_fields
def get_sub_serializer(self, obj):
serializer_class = None
if type(self) is UnifiedJobSerializer:
if isinstance(obj, ProjectUpdate):
serializer_class = ProjectUpdateSerializer
elif isinstance(obj, InventoryUpdate):
serializer_class = InventoryUpdateSerializer
elif isinstance(obj, Job):
serializer_class = JobSerializer
elif isinstance(obj, AdHocCommand):
serializer_class = AdHocCommandSerializer
elif isinstance(obj, SystemJob):
serializer_class = SystemJobSerializer
elif isinstance(obj, WorkflowJob):
serializer_class = WorkflowJobSerializer
return serializer_class
def to_representation(self, obj):
serializer_class = self.get_sub_serializer(obj)
if serializer_class:
serializer = serializer_class(instance=obj, context=self.context)
# preserve links for list view
if self.parent:
serializer.parent = self.parent
serializer.polymorphic_base = self
# TODO: restrict models for capabilities prefetch, when it is added
ret = serializer.to_representation(obj)
else:
ret = super(UnifiedJobSerializer, self).to_representation(obj)
if 'elapsed' in ret:
if obj and obj.pk and obj.started and not obj.finished:
td = now() - obj.started
ret['elapsed'] = (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6) / (10 ** 6 * 1.0)
ret['elapsed'] = float(ret['elapsed'])
return ret
class UnifiedJobListSerializer(UnifiedJobSerializer):
class Meta:
fields = ('*', '-job_args', '-job_cwd', '-job_env', '-result_traceback', '-event_processing_finished')
def get_field_names(self, declared_fields, info):
field_names = super(UnifiedJobListSerializer, self).get_field_names(declared_fields, info)
# Meta multiple inheritance and -field_name options don't seem to be
# taking effect above, so remove the undesired fields here.
return tuple(x for x in field_names if x not in ('job_args', 'job_cwd', 'job_env', 'result_traceback', 'event_processing_finished'))
def get_types(self):
if type(self) is UnifiedJobListSerializer:
return ['project_update', 'inventory_update', 'job', 'ad_hoc_command', 'system_job', 'workflow_job']
else:
return super(UnifiedJobListSerializer, self).get_types()
def get_sub_serializer(self, obj):
serializer_class = None
if type(self) is UnifiedJobListSerializer:
if isinstance(obj, ProjectUpdate):
serializer_class = ProjectUpdateListSerializer
elif isinstance(obj, InventoryUpdate):
serializer_class = InventoryUpdateListSerializer
elif isinstance(obj, Job):
serializer_class = JobListSerializer
elif isinstance(obj, AdHocCommand):
serializer_class = AdHocCommandListSerializer
elif isinstance(obj, SystemJob):
serializer_class = SystemJobListSerializer
elif isinstance(obj, WorkflowJob):
serializer_class = WorkflowJobListSerializer
return serializer_class
def to_representation(self, obj):
serializer_class = self.get_sub_serializer(obj)
if serializer_class:
serializer = serializer_class(instance=obj, context=self.context)
ret = serializer.to_representation(obj)
else:
ret = super(UnifiedJobListSerializer, self).to_representation(obj)
if 'elapsed' in ret:
ret['elapsed'] = float(ret['elapsed'])
return ret
class UnifiedJobStdoutSerializer(UnifiedJobSerializer):
result_stdout = serializers.SerializerMethodField()
class Meta:
fields = ('result_stdout',)
def get_types(self):
if type(self) is UnifiedJobStdoutSerializer:
return ['project_update', 'inventory_update', 'job', 'ad_hoc_command', 'system_job']
else:
return super(UnifiedJobStdoutSerializer, self).get_types()
class UserSerializer(BaseSerializer):
password = serializers.CharField(required=False, default='', write_only=True,
help_text=_('Write-only field used to change the password.'))
ldap_dn = serializers.CharField(source='profile.ldap_dn', read_only=True)
external_account = serializers.SerializerMethodField(help_text=_('Set if the account is managed by an external service'))
is_system_auditor = serializers.BooleanField(default=False)
show_capabilities = ['edit', 'delete']
class Meta:
model = User
fields = ('*', '-name', '-description', '-modified',
'username', 'first_name', 'last_name',
'email', 'is_superuser', 'is_system_auditor', 'password', 'ldap_dn', 'last_login', 'external_account')
def to_representation(self, obj):
ret = super(UserSerializer, self).to_representation(obj)
ret.pop('password', None)
if obj and type(self) is UserSerializer:
ret['auth'] = obj.social_auth.values('provider', 'uid')
return ret
def get_validation_exclusions(self, obj=None):
ret = super(UserSerializer, self).get_validation_exclusions(obj)
ret.extend(['password', 'is_system_auditor'])
return ret
def validate_password(self, value):
if not self.instance and value in (None, ''):
raise serializers.ValidationError(_('Password required for new User.'))
return value
def _update_password(self, obj, new_password):
# For now we're not raising an error, just not saving password for
# users managed by LDAP who already have an unusable password set.
if getattr(settings, 'AUTH_LDAP_SERVER_URI', None):
try:
if obj.pk and obj.profile.ldap_dn and not obj.has_usable_password():
new_password = None
except AttributeError:
pass
if (getattr(settings, 'SOCIAL_AUTH_GOOGLE_OAUTH2_KEY', None) or
getattr(settings, 'SOCIAL_AUTH_GITHUB_KEY', None) or
getattr(settings, 'SOCIAL_AUTH_GITHUB_ORG_KEY', None) or
getattr(settings, 'SOCIAL_AUTH_GITHUB_TEAM_KEY', None) or
getattr(settings, 'SOCIAL_AUTH_SAML_ENABLED_IDPS', None)) and obj.social_auth.all():
new_password = None
if (getattr(settings, 'RADIUS_SERVER', None) or
getattr(settings, 'TACACSPLUS_HOST', None)) and obj.enterprise_auth.all():
new_password = None
if new_password:
obj.set_password(new_password)
obj.save(update_fields=['password'])
# Cycle the session key, but if the requesting user is the same
# as the modified user then inject a session key derived from
# the updated user to prevent logout. This is the logic used by
# the Django admin's own user_change_password view.
update_session_auth_hash(self.context['request'], obj)
elif not obj.password:
obj.set_unusable_password()
obj.save(update_fields=['password'])
def get_external_account(self, obj):
return get_external_account(obj)
def create(self, validated_data):
new_password = validated_data.pop('password', None)
is_system_auditor = validated_data.pop('is_system_auditor', None)
obj = super(UserSerializer, self).create(validated_data)
self._update_password(obj, new_password)
if is_system_auditor is not None:
obj.is_system_auditor = is_system_auditor
return obj
def update(self, obj, validated_data):
new_password = validated_data.pop('password', None)
is_system_auditor = validated_data.pop('is_system_auditor', None)
obj = super(UserSerializer, self).update(obj, validated_data)
self._update_password(obj, new_password)
if is_system_auditor is not None:
obj.is_system_auditor = is_system_auditor
return obj
def get_related(self, obj):
res = super(UserSerializer, self).get_related(obj)
res.update(dict(
teams = self.reverse('api:user_teams_list', kwargs={'pk': obj.pk}),
organizations = self.reverse('api:user_organizations_list', kwargs={'pk': obj.pk}),
admin_of_organizations = self.reverse('api:user_admin_of_organizations_list', kwargs={'pk': obj.pk}),
projects = self.reverse('api:user_projects_list', kwargs={'pk': obj.pk}),
credentials = self.reverse('api:user_credentials_list', kwargs={'pk': obj.pk}),
roles = self.reverse('api:user_roles_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:user_activity_stream_list', kwargs={'pk': obj.pk}),
access_list = self.reverse('api:user_access_list', kwargs={'pk': obj.pk}),
tokens = self.reverse('api:o_auth2_token_list', kwargs={'pk': obj.pk}),
authorized_tokens = self.reverse('api:user_authorized_token_list', kwargs={'pk': obj.pk}),
personal_tokens = self.reverse('api:user_personal_token_list', kwargs={'pk': obj.pk}),
))
return res
def _validate_ldap_managed_field(self, value, field_name):
if not getattr(settings, 'AUTH_LDAP_SERVER_URI', None):
return value
try:
is_ldap_user = bool(self.instance and self.instance.profile.ldap_dn)
except AttributeError:
is_ldap_user = False
if is_ldap_user:
ldap_managed_fields = ['username']
ldap_managed_fields.extend(getattr(settings, 'AUTH_LDAP_USER_ATTR_MAP', {}).keys())
ldap_managed_fields.extend(getattr(settings, 'AUTH_LDAP_USER_FLAGS_BY_GROUP', {}).keys())
if field_name in ldap_managed_fields:
if value != getattr(self.instance, field_name):
raise serializers.ValidationError(_('Unable to change %s on user managed by LDAP.') % field_name)
return value
def validate_username(self, value):
return self._validate_ldap_managed_field(value, 'username')
def validate_first_name(self, value):
return self._validate_ldap_managed_field(value, 'first_name')
def validate_last_name(self, value):
return self._validate_ldap_managed_field(value, 'last_name')
def validate_email(self, value):
return self._validate_ldap_managed_field(value, 'email')
def validate_is_superuser(self, value):
return self._validate_ldap_managed_field(value, 'is_superuser')
class UserActivityStreamSerializer(UserSerializer):
"""Changes to system auditor status are shown as separate entries,
so by excluding it from fields here we avoid duplication, which
would carry some unintended consequences.
"""
class Meta:
model = User
fields = ('*', '-is_system_auditor')
class BaseOAuth2TokenSerializer(BaseSerializer):
refresh_token = serializers.SerializerMethodField()
token = serializers.SerializerMethodField()
ALLOWED_SCOPES = ['read', 'write']
class Meta:
model = OAuth2AccessToken
fields = (
'*', '-name', 'description', 'user', 'token', 'refresh_token',
'application', 'expires', 'scope',
)
read_only_fields = ('user', 'token', 'expires', 'refresh_token')
extra_kwargs = {
'scope': {'allow_null': False, 'required': False},
'user': {'allow_null': False, 'required': True}
}
def get_token(self, obj):
request = self.context.get('request', None)
try:
if request.method == 'POST':
return obj.token
else:
return CENSOR_VALUE
except ObjectDoesNotExist:
return ''
def get_refresh_token(self, obj):
request = self.context.get('request', None)
try:
if not obj.refresh_token:
return None
elif request.method == 'POST':
return getattr(obj.refresh_token, 'token', '')
else:
return CENSOR_VALUE
except ObjectDoesNotExist:
return None
def get_related(self, obj):
ret = super(BaseOAuth2TokenSerializer, self).get_related(obj)
if obj.user:
ret['user'] = self.reverse('api:user_detail', kwargs={'pk': obj.user.pk})
if obj.application:
ret['application'] = self.reverse(
'api:o_auth2_application_detail', kwargs={'pk': obj.application.pk}
)
ret['activity_stream'] = self.reverse(
'api:o_auth2_token_activity_stream_list', kwargs={'pk': obj.pk}
)
return ret
def _is_valid_scope(self, value):
if not value or (not isinstance(value, str)):
return False
words = value.split()
for word in words:
if words.count(word) > 1:
return False # do not allow duplicates
if word not in self.ALLOWED_SCOPES:
return False
return True
def validate_scope(self, value):
if not self._is_valid_scope(value):
raise serializers.ValidationError(_(
'Must be a simple space-separated string with allowed scopes {}.'
).format(self.ALLOWED_SCOPES))
return value
def create(self, validated_data):
validated_data['user'] = self.context['request'].user
try:
return super(BaseOAuth2TokenSerializer, self).create(validated_data)
except oauth2.AccessDeniedError as e:
raise PermissionDenied(str(e))
class UserAuthorizedTokenSerializer(BaseOAuth2TokenSerializer):
class Meta:
extra_kwargs = {
'scope': {'allow_null': False, 'required': False},
'user': {'allow_null': False, 'required': True},
'application': {'allow_null': False, 'required': True}
}
def create(self, validated_data):
current_user = self.context['request'].user
validated_data['token'] = generate_token()
validated_data['expires'] = now() + timedelta(
seconds=settings.OAUTH2_PROVIDER['ACCESS_TOKEN_EXPIRE_SECONDS']
)
obj = super(UserAuthorizedTokenSerializer, self).create(validated_data)
obj.save()
if obj.application:
RefreshToken.objects.create(
user=current_user,
token=generate_token(),
application=obj.application,
access_token=obj
)
return obj
class OAuth2TokenSerializer(BaseOAuth2TokenSerializer):
def create(self, validated_data):
current_user = self.context['request'].user
validated_data['token'] = generate_token()
validated_data['expires'] = now() + timedelta(
seconds=settings.OAUTH2_PROVIDER['ACCESS_TOKEN_EXPIRE_SECONDS']
)
obj = super(OAuth2TokenSerializer, self).create(validated_data)
if obj.application and obj.application.user:
obj.user = obj.application.user
obj.save()
if obj.application:
RefreshToken.objects.create(
user=current_user,
token=generate_token(),
application=obj.application,
access_token=obj
)
return obj
class OAuth2TokenDetailSerializer(OAuth2TokenSerializer):
class Meta:
read_only_fields = ('*', 'user', 'application')
class UserPersonalTokenSerializer(BaseOAuth2TokenSerializer):
class Meta:
read_only_fields = ('user', 'token', 'expires', 'application')
def create(self, validated_data):
validated_data['token'] = generate_token()
validated_data['expires'] = now() + timedelta(
seconds=settings.OAUTH2_PROVIDER['ACCESS_TOKEN_EXPIRE_SECONDS']
)
validated_data['application'] = None
obj = super(UserPersonalTokenSerializer, self).create(validated_data)
obj.save()
return obj
class OAuth2ApplicationSerializer(BaseSerializer):
show_capabilities = ['edit', 'delete']
class Meta:
model = OAuth2Application
fields = (
'*', 'description', '-user', 'client_id', 'client_secret', 'client_type',
'redirect_uris', 'authorization_grant_type', 'skip_authorization', 'organization'
)
read_only_fields = ('client_id', 'client_secret')
read_only_on_update_fields = ('user', 'authorization_grant_type')
extra_kwargs = {
'user': {'allow_null': True, 'required': False},
'organization': {'allow_null': False},
'authorization_grant_type': {'allow_null': False, 'label': _('Authorization Grant Type')},
'client_secret': {
'label': _('Client Secret')
},
'client_type': {
'label': _('Client Type')
},
'redirect_uris': {
'label': _('Redirect URIs')
},
'skip_authorization': {
'label': _('Skip Authorization')
},
}
def to_representation(self, obj):
ret = super(OAuth2ApplicationSerializer, self).to_representation(obj)
request = self.context.get('request', None)
if request.method != 'POST' and obj.client_type == 'confidential':
ret['client_secret'] = CENSOR_VALUE
if obj.client_type == 'public':
ret.pop('client_secret', None)
return ret
def get_related(self, obj):
res = super(OAuth2ApplicationSerializer, self).get_related(obj)
res.update(dict(
tokens = self.reverse('api:o_auth2_application_token_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse(
'api:o_auth2_application_activity_stream_list', kwargs={'pk': obj.pk}
)
))
return res
def get_modified(self, obj):
if obj is None:
return None
return obj.updated
def _summary_field_tokens(self, obj):
token_list = [{'id': x.pk, 'token': CENSOR_VALUE, 'scope': x.scope} for x in obj.oauth2accesstoken_set.all()[:10]]
if has_model_field_prefetched(obj, 'oauth2accesstoken_set'):
token_count = len(obj.oauth2accesstoken_set.all())
else:
if len(token_list) < 10:
token_count = len(token_list)
else:
token_count = obj.oauth2accesstoken_set.count()
return {'count': token_count, 'results': token_list}
def get_summary_fields(self, obj):
ret = super(OAuth2ApplicationSerializer, self).get_summary_fields(obj)
ret['tokens'] = self._summary_field_tokens(obj)
return ret
class OrganizationSerializer(BaseSerializer):
show_capabilities = ['edit', 'delete']
class Meta:
model = Organization
fields = ('*', 'max_hosts', 'custom_virtualenv',)
def get_related(self, obj):
res = super(OrganizationSerializer, self).get_related(obj)
res.update(dict(
projects = self.reverse('api:organization_projects_list', kwargs={'pk': obj.pk}),
inventories = self.reverse('api:organization_inventories_list', kwargs={'pk': obj.pk}),
workflow_job_templates = self.reverse('api:organization_workflow_job_templates_list', kwargs={'pk': obj.pk}),
users = self.reverse('api:organization_users_list', kwargs={'pk': obj.pk}),
admins = self.reverse('api:organization_admins_list', kwargs={'pk': obj.pk}),
teams = self.reverse('api:organization_teams_list', kwargs={'pk': obj.pk}),
credentials = self.reverse('api:organization_credential_list', kwargs={'pk': obj.pk}),
applications = self.reverse('api:organization_applications_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:organization_activity_stream_list', kwargs={'pk': obj.pk}),
notification_templates = self.reverse('api:organization_notification_templates_list', kwargs={'pk': obj.pk}),
notification_templates_any = self.reverse('api:organization_notification_templates_any_list', kwargs={'pk': obj.pk}),
notification_templates_success = self.reverse('api:organization_notification_templates_success_list', kwargs={'pk': obj.pk}),
notification_templates_error = self.reverse('api:organization_notification_templates_error_list', kwargs={'pk': obj.pk}),
object_roles = self.reverse('api:organization_object_roles_list', kwargs={'pk': obj.pk}),
access_list = self.reverse('api:organization_access_list', kwargs={'pk': obj.pk}),
instance_groups = self.reverse('api:organization_instance_groups_list', kwargs={'pk': obj.pk}),
))
return res
def get_summary_fields(self, obj):
summary_dict = super(OrganizationSerializer, self).get_summary_fields(obj)
counts_dict = self.context.get('related_field_counts', None)
if counts_dict is not None and summary_dict is not None:
if obj.id not in counts_dict:
summary_dict['related_field_counts'] = {
'inventories': 0, 'teams': 0, 'users': 0,
'job_templates': 0, 'admins': 0, 'projects': 0}
else:
summary_dict['related_field_counts'] = counts_dict[obj.id]
return summary_dict
def validate(self, attrs):
obj = self.instance
view = self.context['view']
obj_limit = getattr(obj, 'max_hosts', None)
api_limit = attrs.get('max_hosts')
if not view.request.user.is_superuser:
if api_limit is not None and api_limit != obj_limit:
# Only allow superusers to edit the max_hosts field
raise serializers.ValidationError(_('Cannot change max_hosts.'))
return super(OrganizationSerializer, self).validate(attrs)
class ProjectOptionsSerializer(BaseSerializer):
class Meta:
fields = ('*', 'local_path', 'scm_type', 'scm_url', 'scm_branch',
'scm_clean', 'scm_delete_on_update', 'credential', 'timeout',)
def get_related(self, obj):
res = super(ProjectOptionsSerializer, self).get_related(obj)
if obj.credential:
res['credential'] = self.reverse('api:credential_detail',
kwargs={'pk': obj.credential.pk})
return res
def validate(self, attrs):
errors = {}
# Don't allow assigning a local_path used by another project.
# Don't allow assigning a local_path when scm_type is set.
valid_local_paths = Project.get_local_path_choices()
if self.instance:
scm_type = attrs.get('scm_type', self.instance.scm_type) or u''
else:
scm_type = attrs.get('scm_type', u'') or u''
if self.instance and not scm_type:
valid_local_paths.append(self.instance.local_path)
if scm_type:
attrs.pop('local_path', None)
if 'local_path' in attrs and attrs['local_path'] not in valid_local_paths:
errors['local_path'] = _('This path is already being used by another manual project.')
if errors:
raise serializers.ValidationError(errors)
return super(ProjectOptionsSerializer, self).validate(attrs)
def to_representation(self, obj):
ret = super(ProjectOptionsSerializer, self).to_representation(obj)
if obj is not None and 'credential' in ret and not obj.credential:
ret['credential'] = None
return ret
class ProjectSerializer(UnifiedJobTemplateSerializer, ProjectOptionsSerializer):
status = serializers.ChoiceField(choices=Project.PROJECT_STATUS_CHOICES, read_only=True)
last_update_failed = serializers.BooleanField(read_only=True)
last_updated = serializers.DateTimeField(read_only=True)
show_capabilities = ['start', 'schedule', 'edit', 'delete', 'copy']
capabilities_prefetch = [
'admin', 'update',
{'copy': 'organization.project_admin'}
]
class Meta:
model = Project
fields = ('*', 'organization', 'scm_update_on_launch',
'scm_update_cache_timeout', 'scm_revision', 'custom_virtualenv',) + \
('last_update_failed', 'last_updated') # Backwards compatibility
def get_related(self, obj):
res = super(ProjectSerializer, self).get_related(obj)
res.update(dict(
teams = self.reverse('api:project_teams_list', kwargs={'pk': obj.pk}),
playbooks = self.reverse('api:project_playbooks', kwargs={'pk': obj.pk}),
inventory_files = self.reverse('api:project_inventories', kwargs={'pk': obj.pk}),
update = self.reverse('api:project_update_view', kwargs={'pk': obj.pk}),
project_updates = self.reverse('api:project_updates_list', kwargs={'pk': obj.pk}),
scm_inventory_sources = self.reverse('api:project_scm_inventory_sources', kwargs={'pk': obj.pk}),
schedules = self.reverse('api:project_schedules_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:project_activity_stream_list', kwargs={'pk': obj.pk}),
notification_templates_any = self.reverse('api:project_notification_templates_any_list', kwargs={'pk': obj.pk}),
notification_templates_success = self.reverse('api:project_notification_templates_success_list', kwargs={'pk': obj.pk}),
notification_templates_error = self.reverse('api:project_notification_templates_error_list', kwargs={'pk': obj.pk}),
access_list = self.reverse('api:project_access_list', kwargs={'pk': obj.pk}),
object_roles = self.reverse('api:project_object_roles_list', kwargs={'pk': obj.pk}),
copy = self.reverse('api:project_copy', kwargs={'pk': obj.pk})
))
if obj.organization:
res['organization'] = self.reverse('api:organization_detail',
kwargs={'pk': obj.organization.pk})
# Backwards compatibility.
if obj.current_update:
res['current_update'] = self.reverse('api:project_update_detail',
kwargs={'pk': obj.current_update.pk})
if obj.last_update:
res['last_update'] = self.reverse('api:project_update_detail',
kwargs={'pk': obj.last_update.pk})
return res
def to_representation(self, obj):
ret = super(ProjectSerializer, self).to_representation(obj)
if 'scm_revision' in ret and obj.scm_type == '':
ret['scm_revision'] = ''
return ret
def validate(self, attrs):
def get_field_from_model_or_attrs(fd):
return attrs.get(fd, self.instance and getattr(self.instance, fd) or None)
organization = None
if 'organization' in attrs:
organization = attrs['organization']
elif self.instance:
organization = self.instance.organization
view = self.context.get('view', None)
if not organization and not view.request.user.is_superuser:
# Only allow super users to create orgless projects
raise serializers.ValidationError(_('Organization is missing'))
elif get_field_from_model_or_attrs('scm_type') == '':
for fd in ('scm_update_on_launch', 'scm_delete_on_update', 'scm_clean'):
if get_field_from_model_or_attrs(fd):
raise serializers.ValidationError({fd: _('Update options must be set to false for manual projects.')})
return super(ProjectSerializer, self).validate(attrs)
class ProjectPlaybooksSerializer(ProjectSerializer):
playbooks = serializers.SerializerMethodField(help_text=_('Array of playbooks available within this project.'))
class Meta:
model = Project
fields = ('playbooks',)
def get_playbooks(self, obj):
return obj.playbook_files if obj.scm_type else obj.playbooks
@property
def data(self):
ret = super(ProjectPlaybooksSerializer, self).data
ret = ret.get('playbooks', [])
return ReturnList(ret, serializer=self)
class ProjectInventoriesSerializer(ProjectSerializer):
inventory_files = serializers.ReadOnlyField(help_text=_(
'Array of inventory files and directories available within this project, '
'not comprehensive.'))
class Meta:
model = Project
fields = ('inventory_files',)
@property
def data(self):
ret = super(ProjectInventoriesSerializer, self).data
ret = ret.get('inventory_files', [])
return ReturnList(ret, serializer=self)
class ProjectUpdateViewSerializer(ProjectSerializer):
can_update = serializers.BooleanField(read_only=True)
class Meta:
fields = ('can_update',)
class ProjectUpdateSerializer(UnifiedJobSerializer, ProjectOptionsSerializer):
class Meta:
model = ProjectUpdate
fields = ('*', 'project', 'job_type', '-controller_node')
def get_related(self, obj):
res = super(ProjectUpdateSerializer, self).get_related(obj)
try:
res.update(dict(
project = self.reverse('api:project_detail', kwargs={'pk': obj.project.pk}),
))
except ObjectDoesNotExist:
pass
res.update(dict(
cancel = self.reverse('api:project_update_cancel', kwargs={'pk': obj.pk}),
scm_inventory_updates = self.reverse('api:project_update_scm_inventory_updates', kwargs={'pk': obj.pk}),
notifications = self.reverse('api:project_update_notifications_list', kwargs={'pk': obj.pk}),
events = self.reverse('api:project_update_events_list', kwargs={'pk': obj.pk}),
))
return res
class ProjectUpdateDetailSerializer(ProjectUpdateSerializer):
host_status_counts = serializers.SerializerMethodField(
help_text=_('A count of hosts uniquely assigned to each status.'),
)
playbook_counts = serializers.SerializerMethodField(
help_text=_('A count of all plays and tasks for the job run.'),
)
class Meta:
model = ProjectUpdate
fields = ('*', 'host_status_counts', 'playbook_counts',)
def get_playbook_counts(self, obj):
task_count = obj.project_update_events.filter(event='playbook_on_task_start').count()
play_count = obj.project_update_events.filter(event='playbook_on_play_start').count()
data = {'play_count': play_count, 'task_count': task_count}
return data
def get_host_status_counts(self, obj):
try:
counts = obj.project_update_events.only('event_data').get(event='playbook_on_stats').get_host_status_counts()
except ProjectUpdateEvent.DoesNotExist:
counts = {}
return counts
class ProjectUpdateListSerializer(ProjectUpdateSerializer, UnifiedJobListSerializer):
class Meta:
model = ProjectUpdate
fields = ('*', '-controller_node') # field removal undone by UJ serializer
class ProjectUpdateCancelSerializer(ProjectUpdateSerializer):
can_cancel = serializers.BooleanField(read_only=True)
class Meta:
fields = ('can_cancel',)
class BaseSerializerWithVariables(BaseSerializer):
def validate_variables(self, value):
return vars_validate_or_raise(value)
class InventorySerializer(BaseSerializerWithVariables):
show_capabilities = ['edit', 'delete', 'adhoc', 'copy']
capabilities_prefetch = [
'admin', 'adhoc',
{'copy': 'organization.inventory_admin'}
]
groups_with_active_failures = serializers.IntegerField(
read_only=True,
min_value=0,
help_text=_('This field has been deprecated and will be removed in a future release')
)
class Meta:
model = Inventory
fields = ('*', 'organization', 'kind', 'host_filter', 'variables', 'has_active_failures',
'total_hosts', 'hosts_with_active_failures', 'total_groups',
'groups_with_active_failures', 'has_inventory_sources',
'total_inventory_sources', 'inventory_sources_with_failures',
'insights_credential', 'pending_deletion',)
def get_related(self, obj):
res = super(InventorySerializer, self).get_related(obj)
res.update(dict(
hosts = self.reverse('api:inventory_hosts_list', kwargs={'pk': obj.pk}),
groups = self.reverse('api:inventory_groups_list', kwargs={'pk': obj.pk}),
root_groups = self.reverse('api:inventory_root_groups_list', kwargs={'pk': obj.pk}),
variable_data = self.reverse('api:inventory_variable_data', kwargs={'pk': obj.pk}),
script = self.reverse('api:inventory_script_view', kwargs={'pk': obj.pk}),
tree = self.reverse('api:inventory_tree_view', kwargs={'pk': obj.pk}),
inventory_sources = self.reverse('api:inventory_inventory_sources_list', kwargs={'pk': obj.pk}),
update_inventory_sources = self.reverse('api:inventory_inventory_sources_update', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:inventory_activity_stream_list', kwargs={'pk': obj.pk}),
job_templates = self.reverse('api:inventory_job_template_list', kwargs={'pk': obj.pk}),
ad_hoc_commands = self.reverse('api:inventory_ad_hoc_commands_list', kwargs={'pk': obj.pk}),
access_list = self.reverse('api:inventory_access_list', kwargs={'pk': obj.pk}),
object_roles = self.reverse('api:inventory_object_roles_list', kwargs={'pk': obj.pk}),
instance_groups = self.reverse('api:inventory_instance_groups_list', kwargs={'pk': obj.pk}),
copy = self.reverse('api:inventory_copy', kwargs={'pk': obj.pk})
))
if obj.insights_credential:
res['insights_credential'] = self.reverse('api:credential_detail', kwargs={'pk': obj.insights_credential.pk})
if obj.organization:
res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization.pk})
return res
def to_representation(self, obj):
ret = super(InventorySerializer, self).to_representation(obj)
if obj is not None and 'organization' in ret and not obj.organization:
ret['organization'] = None
return ret
def validate_host_filter(self, host_filter):
if host_filter:
try:
for match in JSONBField.get_lookups().keys():
if match == 'exact':
# __exact is allowed
continue
match = '__{}'.format(match)
if re.match(
'ansible_facts[^=]+{}='.format(match),
host_filter
):
raise models.base.ValidationError({
'host_filter': 'ansible_facts does not support searching with {}'.format(match)
})
SmartFilter().query_from_string(host_filter)
except RuntimeError as e:
raise models.base.ValidationError(e)
return host_filter
def validate(self, attrs):
kind = None
if 'kind' in attrs:
kind = attrs['kind']
elif self.instance:
kind = self.instance.kind
host_filter = None
if 'host_filter' in attrs:
host_filter = attrs['host_filter']
elif self.instance:
host_filter = self.instance.host_filter
if kind == 'smart' and not host_filter:
raise serializers.ValidationError({'host_filter': _(
'Smart inventories must specify host_filter')})
return super(InventorySerializer, self).validate(attrs)
class InventoryScriptSerializer(InventorySerializer):
class Meta:
fields = ()
class HostSerializer(BaseSerializerWithVariables):
show_capabilities = ['edit', 'delete']
capabilities_prefetch = ['inventory.admin']
class Meta:
model = Host
fields = ('*', 'inventory', 'enabled', 'instance_id', 'variables',
'has_active_failures', 'has_inventory_sources', 'last_job',
'last_job_host_summary', 'insights_system_id', 'ansible_facts_modified',)
read_only_fields = ('last_job', 'last_job_host_summary', 'insights_system_id',
'ansible_facts_modified',)
def build_relational_field(self, field_name, relation_info):
field_class, field_kwargs = super(HostSerializer, self).build_relational_field(field_name, relation_info)
# Inventory is read-only unless creating a new host.
if self.instance and field_name == 'inventory':
field_kwargs['read_only'] = True
field_kwargs.pop('queryset', None)
return field_class, field_kwargs
def get_related(self, obj):
res = super(HostSerializer, self).get_related(obj)
res.update(dict(
variable_data = self.reverse('api:host_variable_data', kwargs={'pk': obj.pk}),
groups = self.reverse('api:host_groups_list', kwargs={'pk': obj.pk}),
all_groups = self.reverse('api:host_all_groups_list', kwargs={'pk': obj.pk}),
job_events = self.reverse('api:host_job_events_list', kwargs={'pk': obj.pk}),
job_host_summaries = self.reverse('api:host_job_host_summaries_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:host_activity_stream_list', kwargs={'pk': obj.pk}),
inventory_sources = self.reverse('api:host_inventory_sources_list', kwargs={'pk': obj.pk}),
smart_inventories = self.reverse('api:host_smart_inventories_list', kwargs={'pk': obj.pk}),
ad_hoc_commands = self.reverse('api:host_ad_hoc_commands_list', kwargs={'pk': obj.pk}),
ad_hoc_command_events = self.reverse('api:host_ad_hoc_command_events_list', kwargs={'pk': obj.pk}),
insights = self.reverse('api:host_insights', kwargs={'pk': obj.pk}),
ansible_facts = self.reverse('api:host_ansible_facts_detail', kwargs={'pk': obj.pk}),
))
if obj.inventory:
res['inventory'] = self.reverse('api:inventory_detail', kwargs={'pk': obj.inventory.pk})
if obj.last_job:
res['last_job'] = self.reverse('api:job_detail', kwargs={'pk': obj.last_job.pk})
if obj.last_job_host_summary:
res['last_job_host_summary'] = self.reverse('api:job_host_summary_detail', kwargs={'pk': obj.last_job_host_summary.pk})
return res
def get_summary_fields(self, obj):
d = super(HostSerializer, self).get_summary_fields(obj)
try:
d['last_job']['job_template_id'] = obj.last_job.job_template.id
d['last_job']['job_template_name'] = obj.last_job.job_template.name
except (KeyError, AttributeError):
pass
if has_model_field_prefetched(obj, 'groups'):
group_list = sorted([{'id': g.id, 'name': g.name} for g in obj.groups.all()], key=lambda x: x['id'])[:5]
else:
group_list = [{'id': g.id, 'name': g.name} for g in obj.groups.all().order_by('id')[:5]]
group_cnt = obj.groups.count()
d.setdefault('groups', {'count': group_cnt, 'results': group_list})
d.setdefault('recent_jobs', [{
'id': j.job.id,
'name': j.job.job_template.name if j.job.job_template is not None else "",
'status': j.job.status,
'finished': j.job.finished,
} for j in obj.job_host_summaries.select_related('job__job_template').order_by('-created')[:5]])
return d
def _get_host_port_from_name(self, name):
# Allow hostname (except IPv6 for now) to specify the port # inline.
port = None
if name.count(':') == 1:
name, port = name.split(':')
try:
port = int(port)
if port < 1 or port > 65535:
raise ValueError
except ValueError:
raise serializers.ValidationError(_(u'Invalid port specification: %s') % force_text(port))
return name, port
def validate_name(self, value):
name = force_text(value or '')
# Validate here only, update in main validate method.
host, port = self._get_host_port_from_name(name)
return value
def validate_inventory(self, value):
if value.kind == 'smart':
raise serializers.ValidationError({"detail": _("Cannot create Host for Smart Inventory")})
return value
def validate_variables(self, value):
return vars_validate_or_raise(value)
def validate(self, attrs):
name = force_text(attrs.get('name', self.instance and self.instance.name or ''))
host, port = self._get_host_port_from_name(name)
if port:
attrs['name'] = host
variables = force_text(attrs.get('variables', self.instance and self.instance.variables or ''))
vars_dict = parse_yaml_or_json(variables)
vars_dict['ansible_ssh_port'] = port
attrs['variables'] = json.dumps(vars_dict)
return super(HostSerializer, self).validate(attrs)
def to_representation(self, obj):
ret = super(HostSerializer, self).to_representation(obj)
if not obj:
return ret
if 'inventory' in ret and not obj.inventory:
ret['inventory'] = None
if 'last_job' in ret and not obj.last_job:
ret['last_job'] = None
if 'last_job_host_summary' in ret and not obj.last_job_host_summary:
ret['last_job_host_summary'] = None
return ret
class AnsibleFactsSerializer(BaseSerializer):
class Meta:
model = Host
def to_representation(self, obj):
return obj.ansible_facts
class GroupSerializer(BaseSerializerWithVariables):
show_capabilities = ['copy', 'edit', 'delete']
capabilities_prefetch = ['inventory.admin', 'inventory.adhoc']
groups_with_active_failures = serializers.IntegerField(
read_only=True,
min_value=0,
help_text=_('This field has been deprecated and will be removed in a future release')
)
class Meta:
model = Group
fields = ('*', 'inventory', 'variables', 'has_active_failures',
'total_hosts', 'hosts_with_active_failures', 'total_groups',
'groups_with_active_failures', 'has_inventory_sources')
def build_relational_field(self, field_name, relation_info):
field_class, field_kwargs = super(GroupSerializer, self).build_relational_field(field_name, relation_info)
# Inventory is read-only unless creating a new group.
if self.instance and field_name == 'inventory':
field_kwargs['read_only'] = True
field_kwargs.pop('queryset', None)
return field_class, field_kwargs
def get_related(self, obj):
res = super(GroupSerializer, self).get_related(obj)
res.update(dict(
variable_data = self.reverse('api:group_variable_data', kwargs={'pk': obj.pk}),
hosts = self.reverse('api:group_hosts_list', kwargs={'pk': obj.pk}),
potential_children = self.reverse('api:group_potential_children_list', kwargs={'pk': obj.pk}),
children = self.reverse('api:group_children_list', kwargs={'pk': obj.pk}),
all_hosts = self.reverse('api:group_all_hosts_list', kwargs={'pk': obj.pk}),
job_events = self.reverse('api:group_job_events_list', kwargs={'pk': obj.pk}),
job_host_summaries = self.reverse('api:group_job_host_summaries_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:group_activity_stream_list', kwargs={'pk': obj.pk}),
inventory_sources = self.reverse('api:group_inventory_sources_list', kwargs={'pk': obj.pk}),
ad_hoc_commands = self.reverse('api:group_ad_hoc_commands_list', kwargs={'pk': obj.pk}),
))
if obj.inventory:
res['inventory'] = self.reverse('api:inventory_detail', kwargs={'pk': obj.inventory.pk})
return res
def validate_name(self, value):
if value in ('all', '_meta'):
raise serializers.ValidationError(_('Invalid group name.'))
return value
def validate_inventory(self, value):
if value.kind == 'smart':
raise serializers.ValidationError({"detail": _("Cannot create Group for Smart Inventory")})
return value
def to_representation(self, obj):
ret = super(GroupSerializer, self).to_representation(obj)
if obj is not None and 'inventory' in ret and not obj.inventory:
ret['inventory'] = None
return ret
class GroupTreeSerializer(GroupSerializer):
children = serializers.SerializerMethodField()
class Meta:
model = Group
fields = ('*', 'children')
def get_children(self, obj):
if obj is None:
return {}
children_qs = obj.children
children_qs = children_qs.select_related('inventory')
children_qs = children_qs.prefetch_related('inventory_source')
return GroupTreeSerializer(children_qs, many=True).data
class BaseVariableDataSerializer(BaseSerializer):
class Meta:
fields = ('variables',)
def to_representation(self, obj):
if obj is None:
return {}
ret = super(BaseVariableDataSerializer, self).to_representation(obj)
return parse_yaml_or_json(ret.get('variables', '') or '{}')
def to_internal_value(self, data):
data = {'variables': json.dumps(data)}
return super(BaseVariableDataSerializer, self).to_internal_value(data)
class InventoryVariableDataSerializer(BaseVariableDataSerializer):
class Meta:
model = Inventory
class HostVariableDataSerializer(BaseVariableDataSerializer):
class Meta:
model = Host
class GroupVariableDataSerializer(BaseVariableDataSerializer):
class Meta:
model = Group
class CustomInventoryScriptSerializer(BaseSerializer):
script = serializers.CharField(trim_whitespace=False)
show_capabilities = ['edit', 'delete', 'copy']
capabilities_prefetch = [
{'edit': 'admin'}
]
class Meta:
model = CustomInventoryScript
fields = ('*', "script", "organization")
def validate_script(self, value):
if not value.startswith("#!"):
raise serializers.ValidationError(_('Script must begin with a hashbang sequence: i.e.... #!/usr/bin/env python'))
return value
def to_representation(self, obj):
ret = super(CustomInventoryScriptSerializer, self).to_representation(obj)
if obj is None:
return ret
request = self.context.get('request', None)
if request.user not in obj.admin_role and \
not request.user.is_superuser and \
not request.user.is_system_auditor and \
not (obj.organization is not None and request.user in obj.organization.auditor_role):
ret['script'] = None
return ret
def get_related(self, obj):
res = super(CustomInventoryScriptSerializer, self).get_related(obj)
res.update(dict(
object_roles = self.reverse('api:inventory_script_object_roles_list', kwargs={'pk': obj.pk}),
copy = self.reverse('api:inventory_script_copy', kwargs={'pk': obj.pk}),
))
if obj.organization:
res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization.pk})
return res
class InventorySourceOptionsSerializer(BaseSerializer):
credential = DeprecatedCredentialField(
help_text=_('Cloud credential to use for inventory updates.')
)
class Meta:
fields = ('*', 'source', 'source_path', 'source_script', 'source_vars', 'credential',
'source_regions', 'instance_filters', 'group_by', 'overwrite', 'overwrite_vars',
'custom_virtualenv', 'timeout', 'verbosity')
def get_related(self, obj):
res = super(InventorySourceOptionsSerializer, self).get_related(obj)
if obj.credential: # TODO: remove when 'credential' field is removed
res['credential'] = self.reverse('api:credential_detail',
kwargs={'pk': obj.credential})
if obj.source_script:
res['source_script'] = self.reverse('api:inventory_script_detail', kwargs={'pk': obj.source_script.pk})
return res
def validate_source_vars(self, value):
ret = vars_validate_or_raise(value)
for env_k in parse_yaml_or_json(value):
if env_k in settings.INV_ENV_VARIABLE_BLACKLIST:
raise serializers.ValidationError(_("`{}` is a prohibited environment variable".format(env_k)))
return ret
def validate(self, attrs):
# TODO: Validate source, validate source_regions
errors = {}
source = attrs.get('source', self.instance and self.instance.source or '')
source_script = attrs.get('source_script', self.instance and self.instance.source_script or '')
if source == 'custom':
if source_script is None or source_script == '':
errors['source_script'] = _("If 'source' is 'custom', 'source_script' must be provided.")
else:
try:
if not self.instance:
dest_inventory = attrs.get('inventory', None)
if not dest_inventory:
errors['inventory'] = _("Must provide an inventory.")
else:
dest_inventory = self.instance.inventory
if dest_inventory and source_script.organization != dest_inventory.organization:
errors['source_script'] = _("The 'source_script' does not belong to the same organization as the inventory.")
except Exception:
errors['source_script'] = _("'source_script' doesn't exist.")
logger.exception('Problem processing source_script validation.')
if errors:
raise serializers.ValidationError(errors)
return super(InventorySourceOptionsSerializer, self).validate(attrs)
class InventorySourceSerializer(UnifiedJobTemplateSerializer, InventorySourceOptionsSerializer):
status = serializers.ChoiceField(choices=InventorySource.INVENTORY_SOURCE_STATUS_CHOICES, read_only=True)
last_update_failed = serializers.BooleanField(read_only=True)
last_updated = serializers.DateTimeField(read_only=True)
show_capabilities = ['start', 'schedule', 'edit', 'delete']
capabilities_prefetch = [
{'admin': 'inventory.admin'},
{'start': 'inventory.update'}
]
class Meta:
model = InventorySource
fields = ('*', 'name', 'inventory', 'update_on_launch', 'update_cache_timeout',
'source_project', 'update_on_project_update') + \
('last_update_failed', 'last_updated') # Backwards compatibility.
def get_related(self, obj):
res = super(InventorySourceSerializer, self).get_related(obj)
res.update(dict(
update = self.reverse('api:inventory_source_update_view', kwargs={'pk': obj.pk}),
inventory_updates = self.reverse('api:inventory_source_updates_list', kwargs={'pk': obj.pk}),
schedules = self.reverse('api:inventory_source_schedules_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:inventory_source_activity_stream_list', kwargs={'pk': obj.pk}),
hosts = self.reverse('api:inventory_source_hosts_list', kwargs={'pk': obj.pk}),
groups = self.reverse('api:inventory_source_groups_list', kwargs={'pk': obj.pk}),
notification_templates_any = self.reverse('api:inventory_source_notification_templates_any_list', kwargs={'pk': obj.pk}),
notification_templates_success = self.reverse('api:inventory_source_notification_templates_success_list', kwargs={'pk': obj.pk}),
notification_templates_error = self.reverse('api:inventory_source_notification_templates_error_list', kwargs={'pk': obj.pk}),
))
if obj.inventory:
res['inventory'] = self.reverse('api:inventory_detail', kwargs={'pk': obj.inventory.pk})
if obj.source_project_id is not None:
res['source_project'] = self.reverse('api:project_detail', kwargs={'pk': obj.source_project.pk})
# Backwards compatibility.
if obj.current_update:
res['current_update'] = self.reverse('api:inventory_update_detail',
kwargs={'pk': obj.current_update.pk})
if obj.last_update:
res['last_update'] = self.reverse('api:inventory_update_detail',
kwargs={'pk': obj.last_update.pk})
else:
res['credentials'] = self.reverse('api:inventory_source_credentials_list', kwargs={'pk': obj.pk})
return res
def get_group(self, obj): # TODO: remove in 3.3
if obj.deprecated_group:
return obj.deprecated_group.id
return None
def build_relational_field(self, field_name, relation_info):
field_class, field_kwargs = super(InventorySourceSerializer, self).build_relational_field(field_name, relation_info)
# SCM Project and inventory are read-only unless creating a new inventory.
if self.instance and field_name == 'inventory':
field_kwargs['read_only'] = True
field_kwargs.pop('queryset', None)
return field_class, field_kwargs
# TODO: remove when old 'credential' fields are removed
def build_field(self, field_name, info, model_class, nested_depth):
# have to special-case the field so that DRF will not automagically make it
# read-only because it's a property on the model.
if field_name == 'credential':
return self.build_standard_field(field_name, self.credential)
return super(InventorySourceOptionsSerializer, self).build_field(field_name, info, model_class, nested_depth)
def to_representation(self, obj):
ret = super(InventorySourceSerializer, self).to_representation(obj)
if obj is None:
return ret
if 'inventory' in ret and not obj.inventory:
ret['inventory'] = None
return ret
def validate_source_project(self, value):
if value and value.scm_type == '':
raise serializers.ValidationError(_("Cannot use manual project for SCM-based inventory."))
return value
def validate_update_on_project_update(self, value):
if value and self.instance and self.instance.schedules.exists():
raise serializers.ValidationError(_("Setting not compatible with existing schedules."))
return value
def validate_inventory(self, value):
if value and value.kind == 'smart':
raise serializers.ValidationError({"detail": _("Cannot create Inventory Source for Smart Inventory")})
return value
# TODO: remove when old 'credential' fields are removed
def create(self, validated_data):
deprecated_fields = {}
if 'credential' in validated_data:
deprecated_fields['credential'] = validated_data.pop('credential')
obj = super(InventorySourceSerializer, self).create(validated_data)
if deprecated_fields:
self._update_deprecated_fields(deprecated_fields, obj)
return obj
# TODO: remove when old 'credential' fields are removed
def update(self, obj, validated_data):
deprecated_fields = {}
if 'credential' in validated_data:
deprecated_fields['credential'] = validated_data.pop('credential')
obj = super(InventorySourceSerializer, self).update(obj, validated_data)
if deprecated_fields:
self._update_deprecated_fields(deprecated_fields, obj)
return obj
# TODO: remove when old 'credential' fields are removed
def _update_deprecated_fields(self, fields, obj):
if 'credential' in fields:
new_cred = fields['credential']
existing = obj.credentials.all()
if new_cred not in existing:
for cred in existing:
# Remove all other cloud credentials
obj.credentials.remove(cred)
if new_cred:
# Add new credential
obj.credentials.add(new_cred)
def validate(self, attrs):
deprecated_fields = {}
if 'credential' in attrs: # TODO: remove when 'credential' field removed
deprecated_fields['credential'] = attrs.pop('credential')
def get_field_from_model_or_attrs(fd):
return attrs.get(fd, self.instance and getattr(self.instance, fd) or None)
if get_field_from_model_or_attrs('source') != 'scm':
redundant_scm_fields = list(filter(
lambda x: attrs.get(x, None),
['source_project', 'source_path', 'update_on_project_update']
))
if redundant_scm_fields:
raise serializers.ValidationError(
{"detail": _("Cannot set %s if not SCM type." % ' '.join(redundant_scm_fields))}
)
attrs = super(InventorySourceSerializer, self).validate(attrs)
# Check type consistency of source and cloud credential, if provided
if 'credential' in deprecated_fields: # TODO: remove when v2 API is deprecated
cred = deprecated_fields['credential']
attrs['credential'] = cred
if cred is not None:
cred = Credential.objects.get(pk=cred)
view = self.context.get('view', None)
if (not view) or (not view.request) or (view.request.user not in cred.use_role):
raise PermissionDenied()
cred_error = InventorySource.cloud_credential_validation(
get_field_from_model_or_attrs('source'),
cred
)
if cred_error:
raise serializers.ValidationError({"credential": cred_error})
return attrs
class InventorySourceUpdateSerializer(InventorySourceSerializer):
can_update = serializers.BooleanField(read_only=True)
class Meta:
fields = ('can_update',)
class InventoryUpdateSerializer(UnifiedJobSerializer, InventorySourceOptionsSerializer):
custom_virtualenv = serializers.ReadOnlyField()
class Meta:
model = InventoryUpdate
fields = ('*', 'inventory', 'inventory_source', 'license_error', 'org_host_limit_error',
'source_project_update', 'custom_virtualenv', '-controller_node',)
def get_related(self, obj):
res = super(InventoryUpdateSerializer, self).get_related(obj)
try:
res.update(dict(
inventory_source = self.reverse(
'api:inventory_source_detail', kwargs={'pk': obj.inventory_source.pk}
),
))
except ObjectDoesNotExist:
pass
res.update(dict(
cancel = self.reverse('api:inventory_update_cancel', kwargs={'pk': obj.pk}),
notifications = self.reverse('api:inventory_update_notifications_list', kwargs={'pk': obj.pk}),
events = self.reverse('api:inventory_update_events_list', kwargs={'pk': obj.pk}),
))
if obj.source_project_update_id:
res['source_project_update'] = self.reverse('api:project_update_detail',
kwargs={'pk': obj.source_project_update.pk})
if obj.inventory:
res['inventory'] = self.reverse('api:inventory_detail', kwargs={'pk': obj.inventory.pk})
res['credentials'] = self.reverse('api:inventory_update_credentials_list', kwargs={'pk': obj.pk})
return res
class InventoryUpdateDetailSerializer(InventoryUpdateSerializer):
source_project = serializers.SerializerMethodField(
help_text=_('The project used for this job.'),
method_name='get_source_project_id'
)
class Meta:
model = InventoryUpdate
fields = ('*', 'source_project',)
def get_source_project(self, obj):
return getattrd(obj, 'source_project_update.unified_job_template', None)
def get_source_project_id(self, obj):
return getattrd(obj, 'source_project_update.unified_job_template.id', None)
def get_related(self, obj):
res = super(InventoryUpdateDetailSerializer, self).get_related(obj)
source_project_id = self.get_source_project_id(obj)
if source_project_id:
res['source_project'] = self.reverse('api:project_detail', kwargs={'pk': source_project_id})
return res
def get_summary_fields(self, obj):
summary_fields = super(InventoryUpdateDetailSerializer, self).get_summary_fields(obj)
source_project = self.get_source_project(obj)
if source_project:
summary_fields['source_project'] = {}
for field in SUMMARIZABLE_FK_FIELDS['project']:
value = getattr(source_project, field, None)
if value is not None:
summary_fields['source_project'][field] = value
cred = obj.credentials.first()
if cred:
summary_fields['credential'] = {
'id': cred.pk,
'name': cred.name,
'description': cred.description,
'kind': cred.kind,
'cloud': cred.credential_type.kind == 'cloud'
}
return summary_fields
class InventoryUpdateListSerializer(InventoryUpdateSerializer, UnifiedJobListSerializer):
class Meta:
model = InventoryUpdate
fields = ('*', '-controller_node') # field removal undone by UJ serializer
class InventoryUpdateCancelSerializer(InventoryUpdateSerializer):
can_cancel = serializers.BooleanField(read_only=True)
class Meta:
fields = ('can_cancel',)
class TeamSerializer(BaseSerializer):
show_capabilities = ['edit', 'delete']
class Meta:
model = Team
fields = ('*', 'organization')
def get_related(self, obj):
res = super(TeamSerializer, self).get_related(obj)
res.update(dict(
projects = self.reverse('api:team_projects_list', kwargs={'pk': obj.pk}),
users = self.reverse('api:team_users_list', kwargs={'pk': obj.pk}),
credentials = self.reverse('api:team_credentials_list', kwargs={'pk': obj.pk}),
roles = self.reverse('api:team_roles_list', kwargs={'pk': obj.pk}),
object_roles = self.reverse('api:team_object_roles_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:team_activity_stream_list', kwargs={'pk': obj.pk}),
access_list = self.reverse('api:team_access_list', kwargs={'pk': obj.pk}),
))
if obj.organization:
res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization.pk})
return res
def to_representation(self, obj):
ret = super(TeamSerializer, self).to_representation(obj)
if obj is not None and 'organization' in ret and not obj.organization:
ret['organization'] = None
return ret
class RoleSerializer(BaseSerializer):
class Meta:
model = Role
fields = ('*', '-created', '-modified')
read_only_fields = ('id', 'role_field', 'description', 'name')
def to_representation(self, obj):
ret = super(RoleSerializer, self).to_representation(obj)
if obj.object_id:
content_object = obj.content_object
if hasattr(content_object, 'username'):
ret['summary_fields']['resource_name'] = obj.content_object.username
if hasattr(content_object, 'name'):
ret['summary_fields']['resource_name'] = obj.content_object.name
content_model = obj.content_type.model_class()
ret['summary_fields']['resource_type'] = get_type_for_model(content_model)
ret['summary_fields']['resource_type_display_name'] = content_model._meta.verbose_name.title()
return ret
def get_related(self, obj):
ret = super(RoleSerializer, self).get_related(obj)
ret['users'] = self.reverse('api:role_users_list', kwargs={'pk': obj.pk})
ret['teams'] = self.reverse('api:role_teams_list', kwargs={'pk': obj.pk})
try:
if obj.content_object:
ret.update(reverse_gfk(obj.content_object, self.context.get('request')))
except AttributeError:
# AttributeError's happen if our content_object is pointing at
# a model that no longer exists. This is dirty data and ideally
# doesn't exist, but in case it does, let's not puke.
pass
return ret
class RoleSerializerWithParentAccess(RoleSerializer):
show_capabilities = ['unattach']
class ResourceAccessListElementSerializer(UserSerializer):
show_capabilities = [] # Clear fields from UserSerializer parent class
def to_representation(self, user):
'''
With this method we derive "direct" and "indirect" access lists. Contained
in the direct access list are all the roles the user is a member of, and
all of the roles that are directly granted to any teams that the user is a
member of.
The indirect access list is a list of all of the roles that the user is
a member of that are ancestors of any roles that grant permissions to
the resource.
'''
ret = super(ResourceAccessListElementSerializer, self).to_representation(user)
obj = self.context['view'].get_parent_object()
if self.context['view'].request is not None:
requesting_user = self.context['view'].request.user
else:
requesting_user = None
if 'summary_fields' not in ret:
ret['summary_fields'] = {}
def format_role_perm(role):
role_dict = { 'id': role.id, 'name': role.name, 'description': role.description}
try:
role_dict['resource_name'] = role.content_object.name
role_dict['resource_type'] = get_type_for_model(role.content_type.model_class())
role_dict['related'] = reverse_gfk(role.content_object, self.context.get('request'))
except AttributeError:
pass
if role.content_type is not None:
role_dict['user_capabilities'] = {'unattach': requesting_user.can_access(
Role, 'unattach', role, user, 'members', data={}, skip_sub_obj_read_check=False)}
else:
# Singleton roles should not be managed from this view, as per copy/edit rework spec
role_dict['user_capabilities'] = {'unattach': False}
return { 'role': role_dict, 'descendant_roles': get_roles_on_resource(obj, role)}
def format_team_role_perm(naive_team_role, permissive_role_ids):
ret = []
team_role = naive_team_role
if naive_team_role.role_field == 'admin_role':
team_role = naive_team_role.content_object.member_role
for role in team_role.children.filter(id__in=permissive_role_ids).all():
role_dict = {
'id': role.id,
'name': role.name,
'description': role.description,
'team_id': team_role.object_id,
'team_name': team_role.content_object.name,
'team_organization_name': team_role.content_object.organization.name,
}
if role.content_type is not None:
role_dict['resource_name'] = role.content_object.name
role_dict['resource_type'] = get_type_for_model(role.content_type.model_class())
role_dict['related'] = reverse_gfk(role.content_object, self.context.get('request'))
role_dict['user_capabilities'] = {'unattach': requesting_user.can_access(
Role, 'unattach', role, team_role, 'parents', data={}, skip_sub_obj_read_check=False)}
else:
# Singleton roles should not be managed from this view, as per copy/edit rework spec
role_dict['user_capabilities'] = {'unattach': False}
ret.append({ 'role': role_dict, 'descendant_roles': get_roles_on_resource(obj, team_role)})
return ret
team_content_type = ContentType.objects.get_for_model(Team)
content_type = ContentType.objects.get_for_model(obj)
direct_permissive_role_ids = Role.objects.filter(content_type=content_type, object_id=obj.id).values_list('id', flat=True)
all_permissive_role_ids = Role.objects.filter(content_type=content_type, object_id=obj.id).values_list('ancestors__id', flat=True)
direct_access_roles = user.roles \
.filter(id__in=direct_permissive_role_ids).all()
direct_team_roles = Role.objects \
.filter(content_type=team_content_type,
members=user,
children__in=direct_permissive_role_ids)
if content_type == team_content_type:
# When looking at the access list for a team, exclude the entries
# for that team. This exists primarily so we don't list the read role
# as a direct role when a user is a member or admin of a team
direct_team_roles = direct_team_roles.exclude(
children__content_type=team_content_type,
children__object_id=obj.id
)
indirect_team_roles = Role.objects \
.filter(content_type=team_content_type,
members=user,
children__in=all_permissive_role_ids) \
.exclude(id__in=direct_team_roles)
indirect_access_roles = user.roles \
.filter(id__in=all_permissive_role_ids) \
.exclude(id__in=direct_permissive_role_ids) \
.exclude(id__in=direct_team_roles) \
.exclude(id__in=indirect_team_roles)
ret['summary_fields']['direct_access'] \
= [format_role_perm(r) for r in direct_access_roles.distinct()] \
+ [y for x in (format_team_role_perm(r, direct_permissive_role_ids) for r in direct_team_roles.distinct()) for y in x] \
+ [y for x in (format_team_role_perm(r, all_permissive_role_ids) for r in indirect_team_roles.distinct()) for y in x]
ret['summary_fields']['indirect_access'] \
= [format_role_perm(r) for r in indirect_access_roles.distinct()]
return ret
class CredentialTypeSerializer(BaseSerializer):
show_capabilities = ['edit', 'delete']
managed_by_tower = serializers.ReadOnlyField()
class Meta:
model = CredentialType
fields = ('*', 'kind', 'namespace', 'name', 'managed_by_tower', 'inputs',
'injectors')
def validate(self, attrs):
if self.instance and self.instance.managed_by_tower:
raise PermissionDenied(
detail=_("Modifications not allowed for managed credential types")
)
if self.instance and self.instance.credentials.exists():
if 'inputs' in attrs and attrs['inputs'] != self.instance.inputs:
raise PermissionDenied(
detail= _("Modifications to inputs are not allowed for credential types that are in use")
)
ret = super(CredentialTypeSerializer, self).validate(attrs)
if 'kind' in attrs and attrs['kind'] not in ('cloud', 'net'):
raise serializers.ValidationError({
"kind": _("Must be 'cloud' or 'net', not %s") % attrs['kind']
})
fields = attrs.get('inputs', {}).get('fields', [])
for field in fields:
if field.get('ask_at_runtime', False):
raise serializers.ValidationError({"inputs": _("'ask_at_runtime' is not supported for custom credentials.")})
return ret
def get_related(self, obj):
res = super(CredentialTypeSerializer, self).get_related(obj)
res['credentials'] = self.reverse(
'api:credential_type_credential_list',
kwargs={'pk': obj.pk}
)
res['activity_stream'] = self.reverse(
'api:credential_type_activity_stream_list',
kwargs={'pk': obj.pk}
)
return res
def to_representation(self, data):
value = super(CredentialTypeSerializer, self).to_representation(data)
# translate labels and help_text for credential fields "managed by Tower"
if value.get('managed_by_tower'):
value['name'] = _(value['name'])
for field in value.get('inputs', {}).get('fields', []):
field['label'] = _(field['label'])
if 'help_text' in field:
field['help_text'] = _(field['help_text'])
return value
def filter_field_metadata(self, fields, method):
# API-created/modified CredentialType kinds are limited to
# `cloud` and `net`
if method in ('PUT', 'POST'):
fields['kind']['choices'] = list(filter(
lambda choice: choice[0] in ('cloud', 'net'),
fields['kind']['choices']
))
return fields
class CredentialSerializer(BaseSerializer):
show_capabilities = ['edit', 'delete', 'copy', 'use']
capabilities_prefetch = ['admin', 'use']
class Meta:
model = Credential
fields = ('*', 'organization', 'credential_type', 'inputs', 'kind', 'cloud')
extra_kwargs = {
'credential_type': {
'label': _('Credential Type'),
},
}
def to_representation(self, data):
value = super(CredentialSerializer, self).to_representation(data)
if 'inputs' in value:
value['inputs'] = data.display_inputs()
return value
def get_related(self, obj):
res = super(CredentialSerializer, self).get_related(obj)
if obj.organization:
res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization.pk})
res.update(dict(
activity_stream = self.reverse('api:credential_activity_stream_list', kwargs={'pk': obj.pk}),
access_list = self.reverse('api:credential_access_list', kwargs={'pk': obj.pk}),
object_roles = self.reverse('api:credential_object_roles_list', kwargs={'pk': obj.pk}),
owner_users = self.reverse('api:credential_owner_users_list', kwargs={'pk': obj.pk}),
owner_teams = self.reverse('api:credential_owner_teams_list', kwargs={'pk': obj.pk}),
copy = self.reverse('api:credential_copy', kwargs={'pk': obj.pk}),
input_sources = self.reverse('api:credential_input_source_sublist', kwargs={'pk': obj.pk}),
credential_type = self.reverse('api:credential_type_detail', kwargs={'pk': obj.credential_type.pk}),
))
parents = [role for role in obj.admin_role.parents.all() if role.object_id is not None]
if parents:
res.update({parents[0].content_type.name:parents[0].content_object.get_absolute_url(self.context.get('request'))})
elif len(obj.admin_role.members.all()) > 0:
user = obj.admin_role.members.all()[0]
res.update({'user': self.reverse('api:user_detail', kwargs={'pk': user.pk})})
return res
def get_summary_fields(self, obj):
summary_dict = super(CredentialSerializer, self).get_summary_fields(obj)
summary_dict['owners'] = []
for user in obj.admin_role.members.all():
summary_dict['owners'].append({
'id': user.pk,
'type': 'user',
'name': user.username,
'description': ' '.join([user.first_name, user.last_name]),
'url': self.reverse('api:user_detail', kwargs={'pk': user.pk}),
})
for parent in [role for role in obj.admin_role.parents.all() if role.object_id is not None]:
summary_dict['owners'].append({
'id': parent.content_object.pk,
'type': camelcase_to_underscore(parent.content_object.__class__.__name__),
'name': parent.content_object.name,
'description': parent.content_object.description,
'url': parent.content_object.get_absolute_url(self.context.get('request')),
})
return summary_dict
def get_validation_exclusions(self, obj=None):
ret = super(CredentialSerializer, self).get_validation_exclusions(obj)
for field in ('credential_type', 'inputs'):
if field in ret:
ret.remove(field)
return ret
def validate_credential_type(self, credential_type):
if self.instance and credential_type.pk != self.instance.credential_type.pk:
for rel in (
'ad_hoc_commands',
'insights_inventories',
'unifiedjobs',
'unifiedjobtemplates',
'projects',
'projectupdates',
'workflowjobnodes'
):
if getattr(self.instance, rel).count() > 0:
raise ValidationError(
_('You cannot change the credential type of the credential, as it may break the functionality'
' of the resources using it.'),
)
return credential_type
class CredentialSerializerCreate(CredentialSerializer):
user = serializers.PrimaryKeyRelatedField(
queryset=User.objects.all(),
required=False, default=None, write_only=True, allow_null=True,
help_text=_('Write-only field used to add user to owner role. If provided, '
'do not give either team or organization. Only valid for creation.'))
team = serializers.PrimaryKeyRelatedField(
queryset=Team.objects.all(),
required=False, default=None, write_only=True, allow_null=True,
help_text=_('Write-only field used to add team to owner role. If provided, '
'do not give either user or organization. Only valid for creation.'))
organization = serializers.PrimaryKeyRelatedField(
queryset=Organization.objects.all(),
required=False, default=None, allow_null=True,
help_text=_('Inherit permissions from organization roles. If provided on creation, '
'do not give either user or team.'))
class Meta:
model = Credential
fields = ('*', 'user', 'team')
def validate(self, attrs):
owner_fields = set()
for field in ('user', 'team', 'organization'):
if field in attrs:
if attrs[field]:
owner_fields.add(field)
else:
attrs.pop(field)
if not owner_fields:
raise serializers.ValidationError({"detail": _("Missing 'user', 'team', or 'organization'.")})
if attrs.get('team'):
attrs['organization'] = attrs['team'].organization
return super(CredentialSerializerCreate, self).validate(attrs)
def create(self, validated_data):
user = validated_data.pop('user', None)
team = validated_data.pop('team', None)
credential = super(CredentialSerializerCreate, self).create(validated_data)
if user:
credential.admin_role.members.add(user)
if team:
if not credential.organization or team.organization.id != credential.organization.id:
raise serializers.ValidationError({"detail": _("Credential organization must be set and match before assigning to a team")})
credential.admin_role.parents.add(team.admin_role)
credential.use_role.parents.add(team.member_role)
return credential
class CredentialInputSourceSerializer(BaseSerializer):
show_capabilities = ['delete']
class Meta:
model = CredentialInputSource
fields = (
'*',
'input_field_name',
'metadata',
'target_credential',
'source_credential',
'-name',
)
extra_kwargs = {
'input_field_name': {'required': True},
'target_credential': {'required': True},
'source_credential': {'required': True},
}
def get_related(self, obj):
res = super(CredentialInputSourceSerializer, self).get_related(obj)
res['source_credential'] = obj.source_credential.get_absolute_url(request=self.context.get('request'))
res['target_credential'] = obj.target_credential.get_absolute_url(request=self.context.get('request'))
return res
class UserCredentialSerializerCreate(CredentialSerializerCreate):
class Meta:
model = Credential
fields = ('*', '-team', '-organization')
class TeamCredentialSerializerCreate(CredentialSerializerCreate):
class Meta:
model = Credential
fields = ('*', '-user', '-organization')
class OrganizationCredentialSerializerCreate(CredentialSerializerCreate):
class Meta:
model = Credential
fields = ('*', '-user', '-team')
class LabelsListMixin(object):
def _summary_field_labels(self, obj):
label_list = [{'id': x.id, 'name': x.name} for x in obj.labels.all()[:10]]
if has_model_field_prefetched(obj, 'labels'):
label_ct = len(obj.labels.all())
else:
if len(label_list) < 10:
label_ct = len(label_list)
else:
label_ct = obj.labels.count()
return {'count': label_ct, 'results': label_list}
def get_summary_fields(self, obj):
res = super(LabelsListMixin, self).get_summary_fields(obj)
res['labels'] = self._summary_field_labels(obj)
return res
class JobOptionsSerializer(LabelsListMixin, BaseSerializer):
class Meta:
fields = ('*', 'job_type', 'inventory', 'project', 'playbook',
'forks', 'limit', 'verbosity', 'extra_vars', 'job_tags',
'force_handlers', 'skip_tags', 'start_at_task', 'timeout',
'use_fact_cache',)
def get_related(self, obj):
res = super(JobOptionsSerializer, self).get_related(obj)
res['labels'] = self.reverse('api:job_template_label_list', kwargs={'pk': obj.pk})
try:
if obj.inventory:
res['inventory'] = self.reverse('api:inventory_detail', kwargs={'pk': obj.inventory.pk})
except ObjectDoesNotExist:
setattr(obj, 'inventory', None)
try:
if obj.project:
res['project'] = self.reverse('api:project_detail', kwargs={'pk': obj.project.pk})
except ObjectDoesNotExist:
setattr(obj, 'project', None)
if isinstance(obj, UnifiedJobTemplate):
res['extra_credentials'] = self.reverse(
'api:job_template_extra_credentials_list',
kwargs={'pk': obj.pk}
)
res['credentials'] = self.reverse(
'api:job_template_credentials_list',
kwargs={'pk': obj.pk}
)
elif isinstance(obj, UnifiedJob):
res['extra_credentials'] = self.reverse('api:job_extra_credentials_list', kwargs={'pk': obj.pk})
res['credentials'] = self.reverse('api:job_credentials_list', kwargs={'pk': obj.pk})
return res
def to_representation(self, obj):
ret = super(JobOptionsSerializer, self).to_representation(obj)
if obj is None:
return ret
if 'inventory' in ret and not obj.inventory:
ret['inventory'] = None
if 'project' in ret and not obj.project:
ret['project'] = None
if 'playbook' in ret:
ret['playbook'] = ''
return ret
def validate(self, attrs):
if 'project' in self.fields and 'playbook' in self.fields:
project = attrs.get('project', self.instance and self.instance.project or None)
playbook = attrs.get('playbook', self.instance and self.instance.playbook or '')
if not project:
raise serializers.ValidationError({'project': _('This field is required.')})
if project and project.scm_type and playbook and force_text(playbook) not in project.playbook_files:
raise serializers.ValidationError({'playbook': _('Playbook not found for project.')})
if project and not project.scm_type and playbook and force_text(playbook) not in project.playbooks:
raise serializers.ValidationError({'playbook': _('Playbook not found for project.')})
if project and not playbook:
raise serializers.ValidationError({'playbook': _('Must select playbook for project.')})
ret = super(JobOptionsSerializer, self).validate(attrs)
return ret
class JobTemplateMixin(object):
'''
Provide recent jobs and survey details in summary_fields
'''
def _recent_jobs(self, obj):
# Exclude "joblets", jobs that ran as part of a sliced workflow job
uj_qs = obj.unifiedjob_unified_jobs.exclude(job__job_slice_count__gt=1).order_by('-created')
# Would like to apply an .only, but does not play well with non_polymorphic
# .only('id', 'status', 'finished', 'polymorphic_ctype_id')
optimized_qs = uj_qs.non_polymorphic()
return [{
'id': x.id, 'status': x.status, 'finished': x.finished,
# Make type consistent with API top-level key, for instance workflow_job
'type': x.get_real_instance_class()._meta.verbose_name.replace(' ', '_')
} for x in optimized_qs[:10]]
def get_summary_fields(self, obj):
d = super(JobTemplateMixin, self).get_summary_fields(obj)
if obj.survey_spec is not None and ('name' in obj.survey_spec and 'description' in obj.survey_spec):
d['survey'] = dict(title=obj.survey_spec['name'], description=obj.survey_spec['description'])
d['recent_jobs'] = self._recent_jobs(obj)
return d
class JobTemplateSerializer(JobTemplateMixin, UnifiedJobTemplateSerializer, JobOptionsSerializer):
show_capabilities = ['start', 'schedule', 'copy', 'edit', 'delete']
capabilities_prefetch = [
'admin', 'execute',
{'copy': ['project.use', 'inventory.use']}
]
status = serializers.ChoiceField(choices=JobTemplate.JOB_TEMPLATE_STATUS_CHOICES, read_only=True, required=False)
class Meta:
model = JobTemplate
fields = ('*', 'host_config_key', 'ask_diff_mode_on_launch', 'ask_variables_on_launch', 'ask_limit_on_launch', 'ask_tags_on_launch',
'ask_skip_tags_on_launch', 'ask_job_type_on_launch', 'ask_verbosity_on_launch', 'ask_inventory_on_launch',
'ask_credential_on_launch', 'survey_enabled', 'become_enabled', 'diff_mode',
'allow_simultaneous', 'custom_virtualenv', 'job_slice_count')
def get_related(self, obj):
res = super(JobTemplateSerializer, self).get_related(obj)
res.update(dict(
jobs = self.reverse('api:job_template_jobs_list', kwargs={'pk': obj.pk}),
schedules = self.reverse('api:job_template_schedules_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:job_template_activity_stream_list', kwargs={'pk': obj.pk}),
launch = self.reverse('api:job_template_launch', kwargs={'pk': obj.pk}),
notification_templates_any = self.reverse('api:job_template_notification_templates_any_list', kwargs={'pk': obj.pk}),
notification_templates_success = self.reverse('api:job_template_notification_templates_success_list', kwargs={'pk': obj.pk}),
notification_templates_error = self.reverse('api:job_template_notification_templates_error_list', kwargs={'pk': obj.pk}),
access_list = self.reverse('api:job_template_access_list', kwargs={'pk': obj.pk}),
survey_spec = self.reverse('api:job_template_survey_spec', kwargs={'pk': obj.pk}),
labels = self.reverse('api:job_template_label_list', kwargs={'pk': obj.pk}),
object_roles = self.reverse('api:job_template_object_roles_list', kwargs={'pk': obj.pk}),
instance_groups = self.reverse('api:job_template_instance_groups_list', kwargs={'pk': obj.pk}),
slice_workflow_jobs = self.reverse('api:job_template_slice_workflow_jobs_list', kwargs={'pk': obj.pk}),
copy = self.reverse('api:job_template_copy', kwargs={'pk': obj.pk}),
))
if obj.host_config_key:
res['callback'] = self.reverse('api:job_template_callback', kwargs={'pk': obj.pk})
return res
def validate(self, attrs):
def get_field_from_model_or_attrs(fd):
return attrs.get(fd, self.instance and getattr(self.instance, fd) or None)
inventory = get_field_from_model_or_attrs('inventory')
project = get_field_from_model_or_attrs('project')
if get_field_from_model_or_attrs('host_config_key') and not inventory:
raise serializers.ValidationError({'host_config_key': _(
"Cannot enable provisioning callback without an inventory set."
)})
prompting_error_message = _("Must either set a default value or ask to prompt on launch.")
if project is None:
raise serializers.ValidationError({'project': _("Job Templates must have a project assigned.")})
elif inventory is None and not get_field_from_model_or_attrs('ask_inventory_on_launch'):
raise serializers.ValidationError({'inventory': prompting_error_message})
return super(JobTemplateSerializer, self).validate(attrs)
def validate_extra_vars(self, value):
return vars_validate_or_raise(value)
def get_summary_fields(self, obj):
summary_fields = super(JobTemplateSerializer, self).get_summary_fields(obj)
all_creds = []
# Organize credential data into multitude of deprecated fields
extra_creds = []
if obj.pk:
for cred in obj.credentials.all():
summarized_cred = {
'id': cred.pk,
'name': cred.name,
'description': cred.description,
'kind': cred.kind,
'cloud': cred.credential_type.kind == 'cloud'
}
all_creds.append(summarized_cred)
if cred.credential_type.kind in ('cloud', 'net'):
extra_creds.append(summarized_cred)
if self.is_detail_view:
summary_fields['extra_credentials'] = extra_creds
summary_fields['credentials'] = all_creds
return summary_fields
class JobTemplateWithSpecSerializer(JobTemplateSerializer):
'''
Used for activity stream entries.
'''
class Meta:
model = JobTemplate
fields = ('*', 'survey_spec')
class JobSerializer(UnifiedJobSerializer, JobOptionsSerializer):
passwords_needed_to_start = serializers.ReadOnlyField()
artifacts = serializers.SerializerMethodField()
class Meta:
model = Job
fields = ('*', 'job_template', 'passwords_needed_to_start',
'allow_simultaneous', 'artifacts', 'scm_revision',
'instance_group', 'diff_mode', 'job_slice_number', 'job_slice_count')
def get_related(self, obj):
res = super(JobSerializer, self).get_related(obj)
res.update(dict(
job_events = self.reverse('api:job_job_events_list', kwargs={'pk': obj.pk}),
job_host_summaries = self.reverse('api:job_job_host_summaries_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:job_activity_stream_list', kwargs={'pk': obj.pk}),
notifications = self.reverse('api:job_notifications_list', kwargs={'pk': obj.pk}),
labels = self.reverse('api:job_label_list', kwargs={'pk': obj.pk}),
create_schedule = self.reverse('api:job_create_schedule', kwargs={'pk': obj.pk}),
))
try:
if obj.job_template:
res['job_template'] = self.reverse('api:job_template_detail',
kwargs={'pk': obj.job_template.pk})
except ObjectDoesNotExist:
setattr(obj, 'job_template', None)
if obj.can_cancel or True:
res['cancel'] = self.reverse('api:job_cancel', kwargs={'pk': obj.pk})
try:
if obj.project_update:
res['project_update'] = self.reverse(
'api:project_update_detail', kwargs={'pk': obj.project_update.pk}
)
except ObjectDoesNotExist:
pass
res['relaunch'] = self.reverse('api:job_relaunch', kwargs={'pk': obj.pk})
return res
def get_artifacts(self, obj):
if obj:
return obj.display_artifacts()
return {}
def to_internal_value(self, data):
# When creating a new job and a job template is specified, populate any
# fields not provided in data from the job template.
if not self.instance and isinstance(data, dict) and data.get('job_template', False):
try:
job_template = JobTemplate.objects.get(pk=data['job_template'])
except JobTemplate.DoesNotExist:
raise serializers.ValidationError({'job_template': _('Invalid job template.')})
data.setdefault('name', job_template.name)
data.setdefault('description', job_template.description)
data.setdefault('job_type', job_template.job_type)
if job_template.inventory:
data.setdefault('inventory', job_template.inventory.pk)
if job_template.project:
data.setdefault('project', job_template.project.pk)
data.setdefault('playbook', job_template.playbook)
if job_template.credential:
data.setdefault('credential', job_template.credential)
data.setdefault('forks', job_template.forks)
data.setdefault('limit', job_template.limit)
data.setdefault('verbosity', job_template.verbosity)
data.setdefault('extra_vars', job_template.extra_vars)
data.setdefault('job_tags', job_template.job_tags)
data.setdefault('force_handlers', job_template.force_handlers)
data.setdefault('skip_tags', job_template.skip_tags)
data.setdefault('start_at_task', job_template.start_at_task)
return super(JobSerializer, self).to_internal_value(data)
def to_representation(self, obj):
ret = super(JobSerializer, self).to_representation(obj)
if obj is None:
return ret
if 'job_template' in ret and not obj.job_template:
ret['job_template'] = None
if 'extra_vars' in ret:
ret['extra_vars'] = obj.display_extra_vars()
return ret
def get_summary_fields(self, obj):
summary_fields = super(JobSerializer, self).get_summary_fields(obj)
all_creds = []
# Organize credential data into multitude of deprecated fields
extra_creds = []
if obj.pk:
for cred in obj.credentials.all():
summarized_cred = {
'id': cred.pk,
'name': cred.name,
'description': cred.description,
'kind': cred.kind,
'cloud': cred.credential_type.kind == 'cloud'
}
all_creds.append(summarized_cred)
if cred.credential_type.kind in ('cloud', 'net'):
extra_creds.append(summarized_cred)
if self.is_detail_view:
summary_fields['extra_credentials'] = extra_creds
summary_fields['credentials'] = all_creds
return summary_fields
class JobDetailSerializer(JobSerializer):
host_status_counts = serializers.SerializerMethodField(
help_text=_('A count of hosts uniquely assigned to each status.'),
)
playbook_counts = serializers.SerializerMethodField(
help_text=_('A count of all plays and tasks for the job run.'),
)
custom_virtualenv = serializers.ReadOnlyField()
class Meta:
model = Job
fields = ('*', 'host_status_counts', 'playbook_counts', 'custom_virtualenv')
def get_playbook_counts(self, obj):
task_count = obj.job_events.filter(event='playbook_on_task_start').count()
play_count = obj.job_events.filter(event='playbook_on_play_start').count()
data = {'play_count': play_count, 'task_count': task_count}
return data
def get_host_status_counts(self, obj):
try:
counts = obj.job_events.only('event_data').get(event='playbook_on_stats').get_host_status_counts()
except JobEvent.DoesNotExist:
counts = {}
return counts
class JobCancelSerializer(BaseSerializer):
can_cancel = serializers.BooleanField(read_only=True)
class Meta:
model = Job
fields = ('can_cancel',)
class JobRelaunchSerializer(BaseSerializer):
passwords_needed_to_start = serializers.SerializerMethodField()
retry_counts = serializers.SerializerMethodField()
hosts = serializers.ChoiceField(
required=False, allow_null=True, default='all',
choices=[
('all', _('No change to job limit')),
('failed', _('All failed and unreachable hosts'))
],
write_only=True
)
credential_passwords = VerbatimField(required=True, write_only=True)
class Meta:
model = Job
fields = ('passwords_needed_to_start', 'retry_counts', 'hosts', 'credential_passwords',)
def validate_credential_passwords(self, value):
pnts = self.instance.passwords_needed_to_start
missing = set(pnts) - set(key for key in value if value[key])
if missing:
raise serializers.ValidationError(_(
'Missing passwords needed to start: {}'.format(', '.join(missing))
))
return value
def to_representation(self, obj):
res = super(JobRelaunchSerializer, self).to_representation(obj)
view = self.context.get('view', None)
if hasattr(view, '_raw_data_form_marker'):
password_keys = dict([(p, u'') for p in self.get_passwords_needed_to_start(obj)])
res.update(password_keys)
return res
def get_passwords_needed_to_start(self, obj):
if obj:
return obj.passwords_needed_to_start
return ''
def get_retry_counts(self, obj):
if obj.status in ACTIVE_STATES:
return _('Relaunch by host status not available until job finishes running.')
data = OrderedDict([])
for status in self.fields['hosts'].choices.keys():
data[status] = obj.retry_qs(status).count()
return data
def get_validation_exclusions(self, *args, **kwargs):
r = super(JobRelaunchSerializer, self).get_validation_exclusions(*args, **kwargs)
r.append('credential_passwords')
return r
def validate(self, attrs):
obj = self.instance
if obj.project is None:
raise serializers.ValidationError(dict(errors=[_("Job Template Project is missing or undefined.")]))
if obj.inventory is None or obj.inventory.pending_deletion:
raise serializers.ValidationError(dict(errors=[_("Job Template Inventory is missing or undefined.")]))
attrs = super(JobRelaunchSerializer, self).validate(attrs)
return attrs
class JobCreateScheduleSerializer(BaseSerializer):
can_schedule = serializers.SerializerMethodField()
prompts = serializers.SerializerMethodField()
class Meta:
model = Job
fields = ('can_schedule', 'prompts',)
def get_can_schedule(self, obj):
'''
Need both a job template and job prompts to schedule
'''
return obj.can_schedule
@staticmethod
def _summarize(res_name, obj):
summary = {}
for field in SUMMARIZABLE_FK_FIELDS[res_name]:
summary[field] = getattr(obj, field, None)
return summary
def get_prompts(self, obj):
try:
config = obj.launch_config
ret = config.prompts_dict(display=True)
if 'inventory' in ret:
ret['inventory'] = self._summarize('inventory', ret['inventory'])
if 'credentials' in ret:
all_creds = [self._summarize('credential', cred) for cred in ret['credentials']]
ret['credentials'] = all_creds
return ret
except JobLaunchConfig.DoesNotExist:
return {'all': _('Unknown, job may have been ran before launch configurations were saved.')}
class AdHocCommandSerializer(UnifiedJobSerializer):
class Meta:
model = AdHocCommand
fields = ('*', 'job_type', 'inventory', 'limit', 'credential',
'module_name', 'module_args', 'forks', 'verbosity', 'extra_vars',
'become_enabled', 'diff_mode', '-unified_job_template', '-description')
extra_kwargs = {
'name': {
'read_only': True,
},
}
def get_field_names(self, declared_fields, info):
field_names = super(AdHocCommandSerializer, self).get_field_names(declared_fields, info)
# Meta multiple inheritance and -field_name options don't seem to be
# taking effect above, so remove the undesired fields here.
return tuple(x for x in field_names if x not in ('unified_job_template', 'description'))
def build_standard_field(self, field_name, model_field):
field_class, field_kwargs = super(AdHocCommandSerializer, self).build_standard_field(field_name, model_field)
# Load module name choices dynamically from DB settings.
if field_name == 'module_name':
field_class = serializers.ChoiceField
module_name_choices = [(x, x) for x in settings.AD_HOC_COMMANDS]
module_name_default = 'command' if 'command' in [x[0] for x in module_name_choices] else ''
field_kwargs['choices'] = module_name_choices
field_kwargs['required'] = bool(not module_name_default)
field_kwargs['default'] = module_name_default or serializers.empty
field_kwargs['allow_blank'] = bool(module_name_default)
field_kwargs.pop('max_length', None)
return field_class, field_kwargs
def get_related(self, obj):
res = super(AdHocCommandSerializer, self).get_related(obj)
if obj.inventory_id:
res['inventory'] = self.reverse('api:inventory_detail', kwargs={'pk': obj.inventory_id})
if obj.credential_id:
res['credential'] = self.reverse('api:credential_detail', kwargs={'pk': obj.credential_id})
res.update(dict(
events = self.reverse('api:ad_hoc_command_ad_hoc_command_events_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:ad_hoc_command_activity_stream_list', kwargs={'pk': obj.pk}),
notifications = self.reverse('api:ad_hoc_command_notifications_list', kwargs={'pk': obj.pk}),
))
res['cancel'] = self.reverse('api:ad_hoc_command_cancel', kwargs={'pk': obj.pk})
res['relaunch'] = self.reverse('api:ad_hoc_command_relaunch', kwargs={'pk': obj.pk})
return res
def to_representation(self, obj):
ret = super(AdHocCommandSerializer, self).to_representation(obj)
if 'inventory' in ret and not obj.inventory_id:
ret['inventory'] = None
if 'credential' in ret and not obj.credential_id:
ret['credential'] = None
# For the UI, only module_name is returned for name, instead of the
# longer module name + module_args format.
if 'name' in ret:
ret['name'] = obj.module_name
return ret
def validate(self, attrs):
ret = super(AdHocCommandSerializer, self).validate(attrs)
return ret
def validate_extra_vars(self, value):
redacted_extra_vars, removed_vars = extract_ansible_vars(value)
if removed_vars:
raise serializers.ValidationError(_(
"{} are prohibited from use in ad hoc commands."
).format(", ".join(sorted(removed_vars, reverse=True))))
return vars_validate_or_raise(value)
class AdHocCommandDetailSerializer(AdHocCommandSerializer):
host_status_counts = serializers.SerializerMethodField(
help_text=_('A count of hosts uniquely assigned to each status.'),
)
class Meta:
model = AdHocCommand
fields = ('*', 'host_status_counts',)
def get_host_status_counts(self, obj):
try:
counts = obj.ad_hoc_command_events.only('event_data').get(event='playbook_on_stats').get_host_status_counts()
except AdHocCommandEvent.DoesNotExist:
counts = {}
return counts
class AdHocCommandCancelSerializer(AdHocCommandSerializer):
can_cancel = serializers.BooleanField(read_only=True)
class Meta:
fields = ('can_cancel',)
class AdHocCommandRelaunchSerializer(AdHocCommandSerializer):
class Meta:
fields = ()
def to_representation(self, obj):
if obj:
return dict([(p, u'') for p in obj.passwords_needed_to_start])
else:
return {}
class SystemJobTemplateSerializer(UnifiedJobTemplateSerializer):
class Meta:
model = SystemJobTemplate
fields = ('*', 'job_type',)
def get_related(self, obj):
res = super(SystemJobTemplateSerializer, self).get_related(obj)
res.update(dict(
jobs = self.reverse('api:system_job_template_jobs_list', kwargs={'pk': obj.pk}),
schedules = self.reverse('api:system_job_template_schedules_list', kwargs={'pk': obj.pk}),
launch = self.reverse('api:system_job_template_launch', kwargs={'pk': obj.pk}),
notification_templates_any = self.reverse('api:system_job_template_notification_templates_any_list', kwargs={'pk': obj.pk}),
notification_templates_success = self.reverse('api:system_job_template_notification_templates_success_list', kwargs={'pk': obj.pk}),
notification_templates_error = self.reverse('api:system_job_template_notification_templates_error_list', kwargs={'pk': obj.pk}),
))
return res
class SystemJobSerializer(UnifiedJobSerializer):
result_stdout = serializers.SerializerMethodField()
class Meta:
model = SystemJob
fields = ('*', 'system_job_template', 'job_type', 'extra_vars', 'result_stdout', '-controller_node',)
def get_related(self, obj):
res = super(SystemJobSerializer, self).get_related(obj)
if obj.system_job_template:
res['system_job_template'] = self.reverse('api:system_job_template_detail',
kwargs={'pk': obj.system_job_template.pk})
res['notifications'] = self.reverse('api:system_job_notifications_list', kwargs={'pk': obj.pk})
if obj.can_cancel or True:
res['cancel'] = self.reverse('api:system_job_cancel', kwargs={'pk': obj.pk})
res['events'] = self.reverse('api:system_job_events_list', kwargs={'pk': obj.pk})
return res
def get_result_stdout(self, obj):
try:
return obj.result_stdout
except StdoutMaxBytesExceeded as e:
return _(
"Standard Output too large to display ({text_size} bytes), "
"only download supported for sizes over {supported_size} bytes.").format(
text_size=e.total, supported_size=e.supported
)
class SystemJobCancelSerializer(SystemJobSerializer):
can_cancel = serializers.BooleanField(read_only=True)
class Meta:
fields = ('can_cancel',)
class WorkflowJobTemplateSerializer(JobTemplateMixin, LabelsListMixin, UnifiedJobTemplateSerializer):
show_capabilities = ['start', 'schedule', 'edit', 'copy', 'delete']
capabilities_prefetch = [
'admin', 'execute',
{'copy': 'organization.workflow_admin'}
]
class Meta:
model = WorkflowJobTemplate
fields = ('*', 'extra_vars', 'organization', 'survey_enabled', 'allow_simultaneous',
'ask_variables_on_launch', 'inventory', 'ask_inventory_on_launch',)
def get_related(self, obj):
res = super(WorkflowJobTemplateSerializer, self).get_related(obj)
res.update(dict(
workflow_jobs = self.reverse('api:workflow_job_template_jobs_list', kwargs={'pk': obj.pk}),
schedules = self.reverse('api:workflow_job_template_schedules_list', kwargs={'pk': obj.pk}),
launch = self.reverse('api:workflow_job_template_launch', kwargs={'pk': obj.pk}),
workflow_nodes = self.reverse('api:workflow_job_template_workflow_nodes_list', kwargs={'pk': obj.pk}),
labels = self.reverse('api:workflow_job_template_label_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:workflow_job_template_activity_stream_list', kwargs={'pk': obj.pk}),
notification_templates_any = self.reverse('api:workflow_job_template_notification_templates_any_list', kwargs={'pk': obj.pk}),
notification_templates_success = self.reverse('api:workflow_job_template_notification_templates_success_list', kwargs={'pk': obj.pk}),
notification_templates_error = self.reverse('api:workflow_job_template_notification_templates_error_list', kwargs={'pk': obj.pk}),
access_list = self.reverse('api:workflow_job_template_access_list', kwargs={'pk': obj.pk}),
object_roles = self.reverse('api:workflow_job_template_object_roles_list', kwargs={'pk': obj.pk}),
survey_spec = self.reverse('api:workflow_job_template_survey_spec', kwargs={'pk': obj.pk}),
copy = self.reverse('api:workflow_job_template_copy', kwargs={'pk': obj.pk}),
))
if obj.organization:
res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization.pk})
return res
def validate_extra_vars(self, value):
return vars_validate_or_raise(value)
class WorkflowJobTemplateWithSpecSerializer(WorkflowJobTemplateSerializer):
'''
Used for activity stream entries.
'''
class Meta:
model = WorkflowJobTemplate
fields = ('*', 'survey_spec')
class WorkflowJobSerializer(LabelsListMixin, UnifiedJobSerializer):
class Meta:
model = WorkflowJob
fields = ('*', 'workflow_job_template', 'extra_vars', 'allow_simultaneous',
'job_template', 'is_sliced_job',
'-execution_node', '-event_processing_finished', '-controller_node',
'inventory',)
def get_related(self, obj):
res = super(WorkflowJobSerializer, self).get_related(obj)
if obj.workflow_job_template:
res['workflow_job_template'] = self.reverse('api:workflow_job_template_detail',
kwargs={'pk': obj.workflow_job_template.pk})
res['notifications'] = self.reverse('api:workflow_job_notifications_list', kwargs={'pk': obj.pk})
if obj.job_template_id:
res['job_template'] = self.reverse('api:job_template_detail', kwargs={'pk': obj.job_template_id})
res['workflow_nodes'] = self.reverse('api:workflow_job_workflow_nodes_list', kwargs={'pk': obj.pk})
res['labels'] = self.reverse('api:workflow_job_label_list', kwargs={'pk': obj.pk})
res['activity_stream'] = self.reverse('api:workflow_job_activity_stream_list', kwargs={'pk': obj.pk})
res['relaunch'] = self.reverse('api:workflow_job_relaunch', kwargs={'pk': obj.pk})
if obj.can_cancel or True:
res['cancel'] = self.reverse('api:workflow_job_cancel', kwargs={'pk': obj.pk})
return res
def to_representation(self, obj):
ret = super(WorkflowJobSerializer, self).to_representation(obj)
if obj is None:
return ret
if 'extra_vars' in ret:
ret['extra_vars'] = obj.display_extra_vars()
return ret
class WorkflowJobListSerializer(WorkflowJobSerializer, UnifiedJobListSerializer):
class Meta:
fields = ('*', '-execution_node', '-controller_node',)
class WorkflowJobCancelSerializer(WorkflowJobSerializer):
can_cancel = serializers.BooleanField(read_only=True)
class Meta:
fields = ('can_cancel',)
class LaunchConfigurationBaseSerializer(BaseSerializer):
job_type = serializers.ChoiceField(allow_blank=True, allow_null=True, required=False, default=None,
choices=NEW_JOB_TYPE_CHOICES)
job_tags = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
limit = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
skip_tags = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
diff_mode = serializers.NullBooleanField(required=False, default=None)
verbosity = serializers.ChoiceField(allow_null=True, required=False, default=None,
choices=VERBOSITY_CHOICES)
exclude_errors = ()
class Meta:
fields = ('*', 'extra_data', 'inventory', # Saved launch-time config fields
'job_type', 'job_tags', 'skip_tags', 'limit', 'skip_tags', 'diff_mode', 'verbosity')
def get_related(self, obj):
res = super(LaunchConfigurationBaseSerializer, self).get_related(obj)
if obj.inventory_id:
res['inventory'] = self.reverse('api:inventory_detail', kwargs={'pk': obj.inventory_id})
res['credentials'] = self.reverse(
'api:{}_credentials_list'.format(get_type_for_model(self.Meta.model)),
kwargs={'pk': obj.pk}
)
return res
def _build_mock_obj(self, attrs):
mock_obj = self.Meta.model()
if self.instance:
for field in self.instance._meta.fields:
setattr(mock_obj, field.name, getattr(self.instance, field.name))
field_names = set(field.name for field in self.Meta.model._meta.fields)
for field_name, value in list(attrs.items()):
setattr(mock_obj, field_name, value)
if field_name not in field_names:
attrs.pop(field_name)
return mock_obj
def to_representation(self, obj):
ret = super(LaunchConfigurationBaseSerializer, self).to_representation(obj)
if obj is None:
return ret
if 'extra_data' in ret and obj.survey_passwords:
ret['extra_data'] = obj.display_extra_vars()
return ret
def get_summary_fields(self, obj):
summary_fields = super(LaunchConfigurationBaseSerializer, self).get_summary_fields(obj)
# Credential would be an empty dictionary in this case
summary_fields.pop('credential', None)
return summary_fields
def validate(self, attrs):
db_extra_data = {}
if self.instance:
db_extra_data = parse_yaml_or_json(self.instance.extra_data)
attrs = super(LaunchConfigurationBaseSerializer, self).validate(attrs)
ujt = None
if 'unified_job_template' in attrs:
ujt = attrs['unified_job_template']
elif self.instance:
ujt = self.instance.unified_job_template
# build additional field survey_passwords to track redacted variables
password_dict = {}
extra_data = parse_yaml_or_json(attrs.get('extra_data', {}))
if hasattr(ujt, 'survey_password_variables'):
# Prepare additional field survey_passwords for save
for key in ujt.survey_password_variables():
if key in extra_data:
password_dict[key] = REPLACE_STR
# Replace $encrypted$ submissions with db value if exists
if 'extra_data' in attrs:
if password_dict:
if not self.instance or password_dict != self.instance.survey_passwords:
attrs['survey_passwords'] = password_dict.copy()
# Force dict type (cannot preserve YAML formatting if passwords are involved)
# Encrypt the extra_data for save, only current password vars in JT survey
# but first, make a copy or else this is referenced by request.data, and
# user could get encrypted string in form data in API browser
attrs['extra_data'] = extra_data.copy()
encrypt_dict(attrs['extra_data'], password_dict.keys())
# For any raw $encrypted$ string, either
# - replace with existing DB value
# - raise a validation error
# - ignore, if default present
for key in password_dict.keys():
if attrs['extra_data'].get(key, None) == REPLACE_STR:
if key not in db_extra_data:
element = ujt.pivot_spec(ujt.survey_spec)[key]
# NOTE: validation _of_ the default values of password type
# questions not done here or on launch, but doing so could
# leak info about values, so it should not be added
if not ('default' in element and element['default']):
raise serializers.ValidationError(
{"extra_data": _('Provided variable {} has no database value to replace with.').format(key)})
else:
attrs['extra_data'][key] = db_extra_data[key]
# Build unsaved version of this config, use it to detect prompts errors
mock_obj = self._build_mock_obj(attrs)
accepted, rejected, errors = ujt._accept_or_ignore_job_kwargs(
_exclude_errors=self.exclude_errors, **mock_obj.prompts_dict())
# Remove all unprocessed $encrypted$ strings, indicating default usage
if 'extra_data' in attrs and password_dict:
for key, value in attrs['extra_data'].copy().items():
if value == REPLACE_STR:
if key in password_dict:
attrs['extra_data'].pop(key)
attrs.get('survey_passwords', {}).pop(key, None)
else:
errors.setdefault('extra_vars', []).append(
_('"$encrypted$ is a reserved keyword, may not be used for {var_name}."'.format(key))
)
# Launch configs call extra_vars extra_data for historical reasons
if 'extra_vars' in errors:
errors['extra_data'] = errors.pop('extra_vars')
if errors:
raise serializers.ValidationError(errors)
# Model `.save` needs the container dict, not the psuedo fields
if mock_obj.char_prompts:
attrs['char_prompts'] = mock_obj.char_prompts
return attrs
class WorkflowJobTemplateNodeSerializer(LaunchConfigurationBaseSerializer):
credential = DeprecatedCredentialField()
success_nodes = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
failure_nodes = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
always_nodes = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
exclude_errors = ('required',) # required variables may be provided by WFJT or on launch
class Meta:
model = WorkflowJobTemplateNode
fields = ('*', 'credential', 'workflow_job_template', '-name', '-description', 'id', 'url', 'related',
'unified_job_template', 'success_nodes', 'failure_nodes', 'always_nodes',)
def get_related(self, obj):
res = super(WorkflowJobTemplateNodeSerializer, self).get_related(obj)
res['success_nodes'] = self.reverse('api:workflow_job_template_node_success_nodes_list', kwargs={'pk': obj.pk})
res['failure_nodes'] = self.reverse('api:workflow_job_template_node_failure_nodes_list', kwargs={'pk': obj.pk})
res['always_nodes'] = self.reverse('api:workflow_job_template_node_always_nodes_list', kwargs={'pk': obj.pk})
if obj.unified_job_template:
res['unified_job_template'] = obj.unified_job_template.get_absolute_url(self.context.get('request'))
try:
res['workflow_job_template'] = self.reverse('api:workflow_job_template_detail', kwargs={'pk': obj.workflow_job_template.pk})
except WorkflowJobTemplate.DoesNotExist:
pass
return res
def build_field(self, field_name, info, model_class, nested_depth):
# have to special-case the field so that DRF will not automagically make it
# read-only because it's a property on the model.
if field_name == 'credential':
return self.build_standard_field(field_name,
self.credential)
return super(WorkflowJobTemplateNodeSerializer, self).build_field(field_name, info, model_class, nested_depth)
def build_relational_field(self, field_name, relation_info):
field_class, field_kwargs = super(WorkflowJobTemplateNodeSerializer, self).build_relational_field(field_name, relation_info)
# workflow_job_template is read-only unless creating a new node.
if self.instance and field_name == 'workflow_job_template':
field_kwargs['read_only'] = True
field_kwargs.pop('queryset', None)
return field_class, field_kwargs
def validate(self, attrs):
deprecated_fields = {}
if 'credential' in attrs: # TODO: remove when v2 API is deprecated
deprecated_fields['credential'] = attrs.pop('credential')
view = self.context.get('view')
attrs = super(WorkflowJobTemplateNodeSerializer, self).validate(attrs)
ujt_obj = None
if 'unified_job_template' in attrs:
ujt_obj = attrs['unified_job_template']
elif self.instance:
ujt_obj = self.instance.unified_job_template
if 'credential' in deprecated_fields: # TODO: remove when v2 API is deprecated
cred = deprecated_fields['credential']
attrs['credential'] = cred
if cred is not None:
if not ujt_obj.ask_credential_on_launch:
raise serializers.ValidationError({"credential": _(
"Related template is not configured to accept credentials on launch.")})
cred = Credential.objects.get(pk=cred)
view = self.context.get('view', None)
if (not view) or (not view.request) or (view.request.user not in cred.use_role):
raise PermissionDenied()
return attrs
def create(self, validated_data): # TODO: remove when v2 API is deprecated
deprecated_fields = {}
if 'credential' in validated_data:
deprecated_fields['credential'] = validated_data.pop('credential')
obj = super(WorkflowJobTemplateNodeSerializer, self).create(validated_data)
if 'credential' in deprecated_fields:
if deprecated_fields['credential']:
obj.credentials.add(deprecated_fields['credential'])
return obj
def update(self, obj, validated_data): # TODO: remove when v2 API is deprecated
deprecated_fields = {}
if 'credential' in validated_data:
deprecated_fields['credential'] = validated_data.pop('credential')
obj = super(WorkflowJobTemplateNodeSerializer, self).update(obj, validated_data)
if 'credential' in deprecated_fields:
existing = obj.credentials.filter(credential_type__kind='ssh')
new_cred = deprecated_fields['credential']
if new_cred not in existing:
for cred in existing:
obj.credentials.remove(cred)
if new_cred:
obj.credentials.add(new_cred)
return obj
class WorkflowJobNodeSerializer(LaunchConfigurationBaseSerializer):
credential = DeprecatedCredentialField()
success_nodes = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
failure_nodes = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
always_nodes = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
class Meta:
model = WorkflowJobNode
fields = ('*', 'credential', 'job', 'workflow_job', '-name', '-description', 'id', 'url', 'related',
'unified_job_template', 'success_nodes', 'failure_nodes', 'always_nodes',
'do_not_run',)
def get_related(self, obj):
res = super(WorkflowJobNodeSerializer, self).get_related(obj)
res['success_nodes'] = self.reverse('api:workflow_job_node_success_nodes_list', kwargs={'pk': obj.pk})
res['failure_nodes'] = self.reverse('api:workflow_job_node_failure_nodes_list', kwargs={'pk': obj.pk})
res['always_nodes'] = self.reverse('api:workflow_job_node_always_nodes_list', kwargs={'pk': obj.pk})
if obj.unified_job_template:
res['unified_job_template'] = obj.unified_job_template.get_absolute_url(self.context.get('request'))
if obj.job:
res['job'] = obj.job.get_absolute_url(self.context.get('request'))
if obj.workflow_job:
res['workflow_job'] = self.reverse('api:workflow_job_detail', kwargs={'pk': obj.workflow_job.pk})
return res
class WorkflowJobNodeListSerializer(WorkflowJobNodeSerializer):
pass
class WorkflowJobNodeDetailSerializer(WorkflowJobNodeSerializer):
pass
class WorkflowJobTemplateNodeDetailSerializer(WorkflowJobTemplateNodeSerializer):
'''
Influence the api browser sample data to not include workflow_job_template
when editing a WorkflowNode.
Note: I was not able to accomplish this through the use of extra_kwargs.
Maybe something to do with workflow_job_template being a relational field?
'''
def build_relational_field(self, field_name, relation_info):
field_class, field_kwargs = super(WorkflowJobTemplateNodeDetailSerializer, self).build_relational_field(field_name, relation_info)
if self.instance and field_name == 'workflow_job_template':
field_kwargs['read_only'] = True
field_kwargs.pop('queryset', None)
return field_class, field_kwargs
class JobListSerializer(JobSerializer, UnifiedJobListSerializer):
pass
class AdHocCommandListSerializer(AdHocCommandSerializer, UnifiedJobListSerializer):
pass
class SystemJobListSerializer(SystemJobSerializer, UnifiedJobListSerializer):
class Meta:
model = SystemJob
fields = ('*', '-controller_node') # field removal undone by UJ serializer
class JobHostSummarySerializer(BaseSerializer):
class Meta:
model = JobHostSummary
fields = ('*', '-name', '-description', 'job', 'host', 'host_name', 'changed',
'dark', 'failures', 'ok', 'processed', 'skipped', 'failed',
'ignored', 'rescued')
def get_related(self, obj):
res = super(JobHostSummarySerializer, self).get_related(obj)
res.update(dict(
job=self.reverse('api:job_detail', kwargs={'pk': obj.job.pk})))
if obj.host is not None:
res.update(dict(
host=self.reverse('api:host_detail', kwargs={'pk': obj.host.pk})
))
return res
def get_summary_fields(self, obj):
d = super(JobHostSummarySerializer, self).get_summary_fields(obj)
try:
d['job']['job_template_id'] = obj.job.job_template.id
d['job']['job_template_name'] = obj.job.job_template.name
except (KeyError, AttributeError):
pass
return d
class JobEventSerializer(BaseSerializer):
event_display = serializers.CharField(source='get_event_display2', read_only=True)
event_level = serializers.IntegerField(read_only=True)
class Meta:
model = JobEvent
fields = ('*', '-name', '-description', 'job', 'event', 'counter',
'event_display', 'event_data', 'event_level', 'failed',
'changed', 'uuid', 'parent_uuid', 'host', 'host_name', 'parent',
'playbook', 'play', 'task', 'role', 'stdout', 'start_line', 'end_line',
'verbosity')
def get_related(self, obj):
res = super(JobEventSerializer, self).get_related(obj)
res.update(dict(
job = self.reverse('api:job_detail', kwargs={'pk': obj.job_id}),
))
if obj.parent_id:
res['parent'] = self.reverse('api:job_event_detail', kwargs={'pk': obj.parent_id})
res['children'] = self.reverse('api:job_event_children_list', kwargs={'pk': obj.pk})
if obj.host_id:
res['host'] = self.reverse('api:host_detail', kwargs={'pk': obj.host_id})
if obj.hosts.exists():
res['hosts'] = self.reverse('api:job_event_hosts_list', kwargs={'pk': obj.pk})
return res
def get_summary_fields(self, obj):
d = super(JobEventSerializer, self).get_summary_fields(obj)
try:
d['job']['job_template_id'] = obj.job.job_template.id
d['job']['job_template_name'] = obj.job.job_template.name
except (KeyError, AttributeError):
pass
return d
def to_representation(self, obj):
ret = super(JobEventSerializer, self).to_representation(obj)
# Show full stdout for event detail view, truncate only for list view.
if hasattr(self.context.get('view', None), 'retrieve'):
return ret
# Show full stdout for playbook_on_* events.
if obj and obj.event.startswith('playbook_on'):
return ret
max_bytes = settings.EVENT_STDOUT_MAX_BYTES_DISPLAY
if max_bytes > 0 and 'stdout' in ret and len(ret['stdout']) >= max_bytes:
ret['stdout'] = ret['stdout'][:(max_bytes - 1)] + u'\u2026'
set_count = 0
reset_count = 0
for m in ANSI_SGR_PATTERN.finditer(ret['stdout']):
if m.string[m.start():m.end()] == u'\u001b[0m':
reset_count += 1
else:
set_count += 1
ret['stdout'] += u'\u001b[0m' * (set_count - reset_count)
return ret
class JobEventWebSocketSerializer(JobEventSerializer):
created = serializers.SerializerMethodField()
modified = serializers.SerializerMethodField()
event_name = serializers.CharField(source='event')
group_name = serializers.SerializerMethodField()
class Meta:
model = JobEvent
fields = ('*', 'event_name', 'group_name',)
def get_created(self, obj):
return obj.created.isoformat()
def get_modified(self, obj):
return obj.modified.isoformat()
def get_group_name(self, obj):
return 'job_events'
class ProjectUpdateEventSerializer(JobEventSerializer):
stdout = serializers.SerializerMethodField()
event_data = serializers.SerializerMethodField()
class Meta:
model = ProjectUpdateEvent
fields = ('*', '-name', '-description', '-job', '-job_id',
'-parent_uuid', '-parent', '-host', 'project_update')
def get_related(self, obj):
res = super(JobEventSerializer, self).get_related(obj)
res['project_update'] = self.reverse(
'api:project_update_detail', kwargs={'pk': obj.project_update_id}
)
return res
def get_stdout(self, obj):
return UriCleaner.remove_sensitive(obj.stdout)
def get_event_data(self, obj):
try:
return json.loads(
UriCleaner.remove_sensitive(
json.dumps(obj.event_data)
)
)
except Exception:
logger.exception("Failed to sanitize event_data")
return {}
class ProjectUpdateEventWebSocketSerializer(ProjectUpdateEventSerializer):
created = serializers.SerializerMethodField()
modified = serializers.SerializerMethodField()
event_name = serializers.CharField(source='event')
group_name = serializers.SerializerMethodField()
class Meta:
model = ProjectUpdateEvent
fields = ('*', 'event_name', 'group_name',)
def get_created(self, obj):
return obj.created.isoformat()
def get_modified(self, obj):
return obj.modified.isoformat()
def get_group_name(self, obj):
return 'project_update_events'
class AdHocCommandEventSerializer(BaseSerializer):
event_display = serializers.CharField(source='get_event_display', read_only=True)
class Meta:
model = AdHocCommandEvent
fields = ('*', '-name', '-description', 'ad_hoc_command', 'event',
'counter', 'event_display', 'event_data', 'failed',
'changed', 'uuid', 'host', 'host_name', 'stdout',
'start_line', 'end_line', 'verbosity')
def get_related(self, obj):
res = super(AdHocCommandEventSerializer, self).get_related(obj)
res.update(dict(
ad_hoc_command = self.reverse('api:ad_hoc_command_detail', kwargs={'pk': obj.ad_hoc_command_id}),
))
if obj.host:
res['host'] = self.reverse('api:host_detail', kwargs={'pk': obj.host.pk})
return res
def to_representation(self, obj):
ret = super(AdHocCommandEventSerializer, self).to_representation(obj)
# Show full stdout for event detail view, truncate only for list view.
if hasattr(self.context.get('view', None), 'retrieve'):
return ret
max_bytes = settings.EVENT_STDOUT_MAX_BYTES_DISPLAY
if max_bytes > 0 and 'stdout' in ret and len(ret['stdout']) >= max_bytes:
ret['stdout'] = ret['stdout'][:(max_bytes - 1)] + u'\u2026'
set_count = 0
reset_count = 0
for m in ANSI_SGR_PATTERN.finditer(ret['stdout']):
if m.string[m.start():m.end()] == u'\u001b[0m':
reset_count += 1
else:
set_count += 1
ret['stdout'] += u'\u001b[0m' * (set_count - reset_count)
return ret
class AdHocCommandEventWebSocketSerializer(AdHocCommandEventSerializer):
created = serializers.SerializerMethodField()
modified = serializers.SerializerMethodField()
event_name = serializers.CharField(source='event')
group_name = serializers.SerializerMethodField()
class Meta:
model = AdHocCommandEvent
fields = ('*', 'event_name', 'group_name',)
def get_created(self, obj):
return obj.created.isoformat()
def get_modified(self, obj):
return obj.modified.isoformat()
def get_group_name(self, obj):
return 'ad_hoc_command_events'
class InventoryUpdateEventSerializer(AdHocCommandEventSerializer):
class Meta:
model = InventoryUpdateEvent
fields = ('*', '-name', '-description', '-ad_hoc_command', '-host',
'-host_name', 'inventory_update')
def get_related(self, obj):
res = super(AdHocCommandEventSerializer, self).get_related(obj)
res['inventory_update'] = self.reverse(
'api:inventory_update_detail', kwargs={'pk': obj.inventory_update_id}
)
return res
class InventoryUpdateEventWebSocketSerializer(InventoryUpdateEventSerializer):
created = serializers.SerializerMethodField()
modified = serializers.SerializerMethodField()
event_name = serializers.CharField(source='event')
group_name = serializers.SerializerMethodField()
class Meta:
model = InventoryUpdateEvent
fields = ('*', 'event_name', 'group_name',)
def get_created(self, obj):
return obj.created.isoformat()
def get_modified(self, obj):
return obj.modified.isoformat()
def get_group_name(self, obj):
return 'inventory_update_events'
class SystemJobEventSerializer(AdHocCommandEventSerializer):
class Meta:
model = SystemJobEvent
fields = ('*', '-name', '-description', '-ad_hoc_command', '-host',
'-host_name', 'system_job')
def get_related(self, obj):
res = super(AdHocCommandEventSerializer, self).get_related(obj)
res['system_job'] = self.reverse(
'api:system_job_detail', kwargs={'pk': obj.system_job_id}
)
return res
class SystemJobEventWebSocketSerializer(SystemJobEventSerializer):
created = serializers.SerializerMethodField()
modified = serializers.SerializerMethodField()
event_name = serializers.CharField(source='event')
group_name = serializers.SerializerMethodField()
class Meta:
model = SystemJobEvent
fields = ('*', 'event_name', 'group_name',)
def get_created(self, obj):
return obj.created.isoformat()
def get_modified(self, obj):
return obj.modified.isoformat()
def get_group_name(self, obj):
return 'system_job_events'
class JobLaunchSerializer(BaseSerializer):
# Representational fields
passwords_needed_to_start = serializers.ReadOnlyField()
can_start_without_user_input = serializers.BooleanField(read_only=True)
variables_needed_to_start = serializers.ReadOnlyField()
credential_needed_to_start = serializers.SerializerMethodField()
inventory_needed_to_start = serializers.SerializerMethodField()
survey_enabled = serializers.SerializerMethodField()
job_template_data = serializers.SerializerMethodField()
defaults = serializers.SerializerMethodField()
# Accepted on launch fields
extra_vars = serializers.JSONField(required=False, write_only=True)
inventory = serializers.PrimaryKeyRelatedField(
queryset=Inventory.objects.all(),
required=False, write_only=True
)
credentials = serializers.PrimaryKeyRelatedField(
many=True, queryset=Credential.objects.all(),
required=False, write_only=True
)
credential_passwords = VerbatimField(required=False, write_only=True)
diff_mode = serializers.BooleanField(required=False, write_only=True)
job_tags = serializers.CharField(required=False, write_only=True, allow_blank=True)
job_type = serializers.ChoiceField(required=False, choices=NEW_JOB_TYPE_CHOICES, write_only=True)
skip_tags = serializers.CharField(required=False, write_only=True, allow_blank=True)
limit = serializers.CharField(required=False, write_only=True, allow_blank=True)
verbosity = serializers.ChoiceField(required=False, choices=VERBOSITY_CHOICES, write_only=True)
class Meta:
model = JobTemplate
fields = ('can_start_without_user_input', 'passwords_needed_to_start',
'extra_vars', 'inventory', 'limit', 'job_tags', 'skip_tags', 'job_type', 'verbosity', 'diff_mode',
'credentials', 'credential_passwords', 'ask_variables_on_launch', 'ask_tags_on_launch',
'ask_diff_mode_on_launch', 'ask_skip_tags_on_launch', 'ask_job_type_on_launch', 'ask_limit_on_launch',
'ask_verbosity_on_launch', 'ask_inventory_on_launch', 'ask_credential_on_launch',
'survey_enabled', 'variables_needed_to_start', 'credential_needed_to_start',
'inventory_needed_to_start', 'job_template_data', 'defaults', 'verbosity')
read_only_fields = (
'ask_diff_mode_on_launch', 'ask_variables_on_launch', 'ask_limit_on_launch', 'ask_tags_on_launch',
'ask_skip_tags_on_launch', 'ask_job_type_on_launch', 'ask_verbosity_on_launch',
'ask_inventory_on_launch', 'ask_credential_on_launch',)
def get_credential_needed_to_start(self, obj):
return False
def get_inventory_needed_to_start(self, obj):
return not (obj and obj.inventory)
def get_survey_enabled(self, obj):
if obj:
return obj.survey_enabled and 'spec' in obj.survey_spec
return False
def get_defaults(self, obj):
defaults_dict = {}
for field_name in JobTemplate.get_ask_mapping().keys():
if field_name == 'inventory':
defaults_dict[field_name] = dict(
name=getattrd(obj, '%s.name' % field_name, None),
id=getattrd(obj, '%s.pk' % field_name, None))
elif field_name == 'credentials':
for cred in obj.credentials.all():
cred_dict = dict(
id=cred.id,
name=cred.name,
credential_type=cred.credential_type.pk,
passwords_needed=cred.passwords_needed
)
if cred.credential_type.managed_by_tower and 'vault_id' in cred.credential_type.defined_fields:
cred_dict['vault_id'] = cred.get_input('vault_id', default=None)
defaults_dict.setdefault(field_name, []).append(cred_dict)
else:
defaults_dict[field_name] = getattr(obj, field_name)
return defaults_dict
def get_job_template_data(self, obj):
return dict(name=obj.name, id=obj.id, description=obj.description)
def validate_extra_vars(self, value):
return vars_validate_or_raise(value)
def validate(self, attrs):
template = self.context.get('template')
accepted, rejected, errors = template._accept_or_ignore_job_kwargs(
_exclude_errors=['prompts'], # make several error types non-blocking
**attrs)
self._ignored_fields = rejected
if template.inventory and template.inventory.pending_deletion is True:
errors['inventory'] = _("The inventory associated with this Job Template is being deleted.")
elif 'inventory' in accepted and accepted['inventory'].pending_deletion:
errors['inventory'] = _("The provided inventory is being deleted.")
# Prohibit providing multiple credentials of the same CredentialType.kind
# or multiples of same vault id
distinct_cred_kinds = []
for cred in accepted.get('credentials', []):
if cred.unique_hash() in distinct_cred_kinds:
errors.setdefault('credentials', []).append(_(
'Cannot assign multiple {} credentials.'
).format(cred.unique_hash(display=True)))
if cred.credential_type.kind not in ('ssh', 'vault', 'cloud', 'net'):
errors.setdefault('credentials', []).append(_(
'Cannot assign a Credential of kind `{}`'
).format(cred.credential_type.kind))
distinct_cred_kinds.append(cred.unique_hash())
# Prohibit removing credentials from the JT list (unsupported for now)
template_credentials = template.credentials.all()
if 'credentials' in attrs:
removed_creds = set(template_credentials) - set(attrs['credentials'])
provided_mapping = Credential.unique_dict(attrs['credentials'])
for cred in removed_creds:
if cred.unique_hash() in provided_mapping.keys():
continue # User replaced credential with new of same type
errors.setdefault('credentials', []).append(_(
'Removing {} credential at launch time without replacement is not supported. '
'Provided list lacked credential(s): {}.'
).format(cred.unique_hash(display=True), ', '.join([str(c) for c in removed_creds])))
# verify that credentials (either provided or existing) don't
# require launch-time passwords that have not been provided
if 'credentials' in accepted:
launch_credentials = accepted['credentials']
else:
launch_credentials = template_credentials
passwords = attrs.get('credential_passwords', {}) # get from original attrs
passwords_lacking = []
for cred in launch_credentials:
for p in cred.passwords_needed:
if p not in passwords:
passwords_lacking.append(p)
else:
accepted.setdefault('credential_passwords', {})
accepted['credential_passwords'][p] = passwords[p]
if len(passwords_lacking):
errors['passwords_needed_to_start'] = passwords_lacking
if errors:
raise serializers.ValidationError(errors)
if 'extra_vars' in accepted:
extra_vars_save = accepted['extra_vars']
else:
extra_vars_save = None
# Validate job against JobTemplate clean_ methods
accepted = super(JobLaunchSerializer, self).validate(accepted)
# Preserve extra_vars as dictionary internally
if extra_vars_save:
accepted['extra_vars'] = extra_vars_save
return accepted
class WorkflowJobLaunchSerializer(BaseSerializer):
can_start_without_user_input = serializers.BooleanField(read_only=True)
defaults = serializers.SerializerMethodField()
variables_needed_to_start = serializers.ReadOnlyField()
survey_enabled = serializers.SerializerMethodField()
extra_vars = VerbatimField(required=False, write_only=True)
inventory = serializers.PrimaryKeyRelatedField(
queryset=Inventory.objects.all(),
required=False, write_only=True
)
workflow_job_template_data = serializers.SerializerMethodField()
class Meta:
model = WorkflowJobTemplate
fields = ('ask_inventory_on_launch', 'can_start_without_user_input', 'defaults', 'extra_vars',
'inventory', 'survey_enabled', 'variables_needed_to_start',
'node_templates_missing', 'node_prompts_rejected',
'workflow_job_template_data', 'survey_enabled', 'ask_variables_on_launch')
read_only_fields = ('ask_inventory_on_launch', 'ask_variables_on_launch')
def get_survey_enabled(self, obj):
if obj:
return obj.survey_enabled and 'spec' in obj.survey_spec
return False
def get_defaults(self, obj):
defaults_dict = {}
for field_name in WorkflowJobTemplate.get_ask_mapping().keys():
if field_name == 'inventory':
defaults_dict[field_name] = dict(
name=getattrd(obj, '%s.name' % field_name, None),
id=getattrd(obj, '%s.pk' % field_name, None))
else:
defaults_dict[field_name] = getattr(obj, field_name)
return defaults_dict
def get_workflow_job_template_data(self, obj):
return dict(name=obj.name, id=obj.id, description=obj.description)
def validate(self, attrs):
template = self.instance
accepted, rejected, errors = template._accept_or_ignore_job_kwargs(**attrs)
self._ignored_fields = rejected
if template.inventory and template.inventory.pending_deletion is True:
errors['inventory'] = _("The inventory associated with this Workflow is being deleted.")
elif 'inventory' in accepted and accepted['inventory'].pending_deletion:
errors['inventory'] = _("The provided inventory is being deleted.")
if errors:
raise serializers.ValidationError(errors)
WFJT_extra_vars = template.extra_vars
WFJT_inventory = template.inventory
super(WorkflowJobLaunchSerializer, self).validate(attrs)
template.extra_vars = WFJT_extra_vars
template.inventory = WFJT_inventory
return accepted
class NotificationTemplateSerializer(BaseSerializer):
show_capabilities = ['edit', 'delete', 'copy']
capabilities_prefetch = [{'copy': 'organization.admin'}]
class Meta:
model = NotificationTemplate
fields = ('*', 'organization', 'notification_type', 'notification_configuration')
type_map = {"string": (str,),
"int": (int,),
"bool": (bool,),
"list": (list,),
"password": (str,),
"object": (dict, OrderedDict)}
def to_representation(self, obj):
ret = super(NotificationTemplateSerializer, self).to_representation(obj)
if 'notification_configuration' in ret:
ret['notification_configuration'] = obj.display_notification_configuration()
return ret
def get_related(self, obj):
res = super(NotificationTemplateSerializer, self).get_related(obj)
res.update(dict(
test = self.reverse('api:notification_template_test', kwargs={'pk': obj.pk}),
notifications = self.reverse('api:notification_template_notification_list', kwargs={'pk': obj.pk}),
copy = self.reverse('api:notification_template_copy', kwargs={'pk': obj.pk}),
))
if obj.organization:
res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization.pk})
return res
def _recent_notifications(self, obj):
return [{'id': x.id, 'status': x.status, 'created': x.created} for x in obj.notifications.all().order_by('-created')[:5]]
def get_summary_fields(self, obj):
d = super(NotificationTemplateSerializer, self).get_summary_fields(obj)
d['recent_notifications'] = self._recent_notifications(obj)
return d
def validate(self, attrs):
from awx.api.views import NotificationTemplateDetail
notification_type = None
if 'notification_type' in attrs:
notification_type = attrs['notification_type']
elif self.instance:
notification_type = self.instance.notification_type
else:
notification_type = None
if not notification_type:
raise serializers.ValidationError(_('Missing required fields for Notification Configuration: notification_type'))
notification_class = NotificationTemplate.CLASS_FOR_NOTIFICATION_TYPE[notification_type]
missing_fields = []
incorrect_type_fields = []
error_list = []
if 'notification_configuration' not in attrs:
return attrs
if self.context['view'].kwargs and isinstance(self.context['view'], NotificationTemplateDetail):
object_actual = self.context['view'].get_object()
else:
object_actual = None
for field, params in notification_class.init_parameters.items():
if field not in attrs['notification_configuration']:
if 'default' in params:
attrs['notification_configuration'][field] = params['default']
else:
missing_fields.append(field)
continue
field_val = attrs['notification_configuration'][field]
field_type = params['type']
expected_types = self.type_map[field_type]
if not type(field_val) in expected_types:
incorrect_type_fields.append((field, field_type))
continue
if field_type == "list" and len(field_val) < 1:
error_list.append(_("No values specified for field '{}'").format(field))
continue
if field_type == "password" and field_val == "$encrypted$" and object_actual is not None:
attrs['notification_configuration'][field] = object_actual.notification_configuration[field]
if missing_fields:
error_list.append(_("Missing required fields for Notification Configuration: {}.").format(missing_fields))
if incorrect_type_fields:
for type_field_error in incorrect_type_fields:
error_list.append(_("Configuration field '{}' incorrect type, expected {}.").format(type_field_error[0],
type_field_error[1]))
if error_list:
raise serializers.ValidationError(error_list)
return super(NotificationTemplateSerializer, self).validate(attrs)
class NotificationSerializer(BaseSerializer):
class Meta:
model = Notification
fields = ('*', '-name', '-description', 'notification_template', 'error', 'status', 'notifications_sent',
'notification_type', 'recipients', 'subject')
def get_related(self, obj):
res = super(NotificationSerializer, self).get_related(obj)
res.update(dict(
notification_template = self.reverse('api:notification_template_detail', kwargs={'pk': obj.notification_template.pk}),
))
return res
class LabelSerializer(BaseSerializer):
class Meta:
model = Label
fields = ('*', '-description', 'organization')
def get_related(self, obj):
res = super(LabelSerializer, self).get_related(obj)
if obj.organization:
res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization.pk})
return res
class SchedulePreviewSerializer(BaseSerializer):
class Meta:
model = Schedule
fields = ('rrule',)
# We reject rrules if:
# - DTSTART is not include
# - INTERVAL is not included
# - SECONDLY is used
# - TZID is used
# - BYDAY prefixed with a number (MO is good but not 20MO)
# - BYYEARDAY
# - BYWEEKNO
# - Multiple DTSTART or RRULE elements
# - Can't contain both COUNT and UNTIL
# - COUNT > 999
def validate_rrule(self, value):
rrule_value = value
multi_by_month_day = r".*?BYMONTHDAY[\:\=][0-9]+,-*[0-9]+"
multi_by_month = r".*?BYMONTH[\:\=][0-9]+,[0-9]+"
by_day_with_numeric_prefix = r".*?BYDAY[\:\=][0-9]+[a-zA-Z]{2}"
match_count = re.match(r".*?(COUNT\=[0-9]+)", rrule_value)
match_multiple_dtstart = re.findall(r".*?(DTSTART(;[^:]+)?\:[0-9]+T[0-9]+Z?)", rrule_value)
match_native_dtstart = re.findall(r".*?(DTSTART:[0-9]+T[0-9]+) ", rrule_value)
match_multiple_rrule = re.findall(r".*?(RRULE\:)", rrule_value)
if not len(match_multiple_dtstart):
raise serializers.ValidationError(_('Valid DTSTART required in rrule. Value should start with: DTSTART:YYYYMMDDTHHMMSSZ'))
if len(match_native_dtstart):
raise serializers.ValidationError(_('DTSTART cannot be a naive datetime. Specify ;TZINFO= or YYYYMMDDTHHMMSSZZ.'))
if len(match_multiple_dtstart) > 1:
raise serializers.ValidationError(_('Multiple DTSTART is not supported.'))
if not len(match_multiple_rrule):
raise serializers.ValidationError(_('RRULE required in rrule.'))
if len(match_multiple_rrule) > 1:
raise serializers.ValidationError(_('Multiple RRULE is not supported.'))
if 'interval' not in rrule_value.lower():
raise serializers.ValidationError(_('INTERVAL required in rrule.'))
if 'secondly' in rrule_value.lower():
raise serializers.ValidationError(_('SECONDLY is not supported.'))
if re.match(multi_by_month_day, rrule_value):
raise serializers.ValidationError(_('Multiple BYMONTHDAYs not supported.'))
if re.match(multi_by_month, rrule_value):
raise serializers.ValidationError(_('Multiple BYMONTHs not supported.'))
if re.match(by_day_with_numeric_prefix, rrule_value):
raise serializers.ValidationError(_("BYDAY with numeric prefix not supported."))
if 'byyearday' in rrule_value.lower():
raise serializers.ValidationError(_("BYYEARDAY not supported."))
if 'byweekno' in rrule_value.lower():
raise serializers.ValidationError(_("BYWEEKNO not supported."))
if 'COUNT' in rrule_value and 'UNTIL' in rrule_value:
raise serializers.ValidationError(_("RRULE may not contain both COUNT and UNTIL"))
if match_count:
count_val = match_count.groups()[0].strip().split("=")
if int(count_val[1]) > 999:
raise serializers.ValidationError(_("COUNT > 999 is unsupported."))
try:
Schedule.rrulestr(rrule_value)
except Exception as e:
raise serializers.ValidationError(_("rrule parsing failed validation: {}").format(e))
return value
class ScheduleSerializer(LaunchConfigurationBaseSerializer, SchedulePreviewSerializer):
show_capabilities = ['edit', 'delete']
timezone = serializers.SerializerMethodField()
until = serializers.SerializerMethodField()
class Meta:
model = Schedule
fields = ('*', 'unified_job_template', 'enabled', 'dtstart', 'dtend', 'rrule', 'next_run', 'timezone',
'until')
def get_timezone(self, obj):
return obj.timezone
def get_until(self, obj):
return obj.until
def get_related(self, obj):
res = super(ScheduleSerializer, self).get_related(obj)
res.update(dict(
unified_jobs = self.reverse('api:schedule_unified_jobs_list', kwargs={'pk': obj.pk}),
))
if obj.unified_job_template:
res['unified_job_template'] = obj.unified_job_template.get_absolute_url(self.context.get('request'))
try:
if obj.unified_job_template.project:
res['project'] = obj.unified_job_template.project.get_absolute_url(self.context.get('request'))
except ObjectDoesNotExist:
pass
if obj.inventory:
res['inventory'] = obj.inventory.get_absolute_url(self.context.get('request'))
elif obj.unified_job_template and getattr(obj.unified_job_template, 'inventory', None):
res['inventory'] = obj.unified_job_template.inventory.get_absolute_url(self.context.get('request'))
return res
def get_summary_fields(self, obj):
summary_fields = super(ScheduleSerializer, self).get_summary_fields(obj)
if 'inventory' in summary_fields:
return summary_fields
inventory = None
if obj.unified_job_template and getattr(obj.unified_job_template, 'inventory', None):
inventory = obj.unified_job_template.inventory
else:
return summary_fields
summary_fields['inventory'] = dict()
for field in SUMMARIZABLE_FK_FIELDS['inventory']:
summary_fields['inventory'][field] = getattr(inventory, field, None)
return summary_fields
def validate_unified_job_template(self, value):
if type(value) == InventorySource and value.source not in SCHEDULEABLE_PROVIDERS:
raise serializers.ValidationError(_('Inventory Source must be a cloud resource.'))
elif type(value) == Project and value.scm_type == '':
raise serializers.ValidationError(_('Manual Project cannot have a schedule set.'))
elif type(value) == InventorySource and value.source == 'scm' and value.update_on_project_update:
raise serializers.ValidationError(_(
'Inventory sources with `update_on_project_update` cannot be scheduled. '
'Schedule its source project `{}` instead.'.format(value.source_project.name)))
return value
class InstanceSerializer(BaseSerializer):
consumed_capacity = serializers.SerializerMethodField()
percent_capacity_remaining = serializers.SerializerMethodField()
jobs_running = serializers.IntegerField(
help_text=_('Count of jobs in the running or waiting state that '
'are targeted for this instance'),
read_only=True
)
jobs_total = serializers.IntegerField(
help_text=_('Count of all jobs that target this instance'),
read_only=True
)
class Meta:
model = Instance
read_only_fields = ('uuid', 'hostname', 'version')
fields = ("id", "type", "url", "related", "uuid", "hostname", "created", "modified", 'capacity_adjustment',
"version", "capacity", "consumed_capacity", "percent_capacity_remaining", "jobs_running", "jobs_total",
"cpu", "memory", "cpu_capacity", "mem_capacity", "enabled", "managed_by_policy")
def get_related(self, obj):
res = super(InstanceSerializer, self).get_related(obj)
res['jobs'] = self.reverse('api:instance_unified_jobs_list', kwargs={'pk': obj.pk})
res['instance_groups'] = self.reverse('api:instance_instance_groups_list', kwargs={'pk': obj.pk})
return res
def get_consumed_capacity(self, obj):
return obj.consumed_capacity
def get_percent_capacity_remaining(self, obj):
if not obj.capacity or obj.consumed_capacity >= obj.capacity:
return 0.0
else:
return float("{0:.2f}".format(((float(obj.capacity) - float(obj.consumed_capacity)) / (float(obj.capacity))) * 100))
class InstanceGroupSerializer(BaseSerializer):
committed_capacity = serializers.SerializerMethodField()
consumed_capacity = serializers.SerializerMethodField()
percent_capacity_remaining = serializers.SerializerMethodField()
jobs_running = serializers.IntegerField(
help_text=_('Count of jobs in the running or waiting state that '
'are targeted for this instance group'),
read_only=True
)
jobs_total = serializers.IntegerField(
help_text=_('Count of all jobs that target this instance group'),
read_only=True
)
instances = serializers.SerializerMethodField()
is_controller = serializers.BooleanField(
help_text=_('Indicates whether instance group controls any other group'),
read_only=True
)
is_isolated = serializers.BooleanField(
help_text=_('Indicates whether instances in this group are isolated.'
'Isolated groups have a designated controller group.'),
read_only=True
)
# NOTE: help_text is duplicated from field definitions, no obvious way of
# both defining field details here and also getting the field's help_text
policy_instance_percentage = serializers.IntegerField(
default=0, min_value=0, max_value=100, required=False, initial=0,
label=_('Policy Instance Percentage'),
help_text=_("Minimum percentage of all instances that will be automatically assigned to "
"this group when new instances come online.")
)
policy_instance_minimum = serializers.IntegerField(
default=0, min_value=0, required=False, initial=0,
label=_('Policy Instance Minimum'),
help_text=_("Static minimum number of Instances that will be automatically assign to "
"this group when new instances come online.")
)
policy_instance_list = serializers.ListField(
child=serializers.CharField(), required=False,
label=_('Policy Instance List'),
help_text=_("List of exact-match Instances that will be assigned to this group")
)
class Meta:
model = InstanceGroup
fields = ("id", "type", "url", "related", "name", "created", "modified",
"capacity", "committed_capacity", "consumed_capacity",
"percent_capacity_remaining", "jobs_running", "jobs_total",
"instances", "controller", "is_controller", "is_isolated",
"policy_instance_percentage", "policy_instance_minimum", "policy_instance_list")
def get_related(self, obj):
res = super(InstanceGroupSerializer, self).get_related(obj)
res['jobs'] = self.reverse('api:instance_group_unified_jobs_list', kwargs={'pk': obj.pk})
res['instances'] = self.reverse('api:instance_group_instance_list', kwargs={'pk': obj.pk})
if obj.controller_id:
res['controller'] = self.reverse('api:instance_group_detail', kwargs={'pk': obj.controller_id})
return res
def validate_policy_instance_list(self, value):
for instance_name in value:
if value.count(instance_name) > 1:
raise serializers.ValidationError(_('Duplicate entry {}.').format(instance_name))
if not Instance.objects.filter(hostname=instance_name).exists():
raise serializers.ValidationError(_('{} is not a valid hostname of an existing instance.').format(instance_name))
if Instance.objects.get(hostname=instance_name).is_isolated():
raise serializers.ValidationError(_('Isolated instances may not be added or removed from instances groups via the API.'))
if self.instance and self.instance.controller_id is not None:
raise serializers.ValidationError(_('Isolated instance group membership may not be managed via the API.'))
return value
def validate_name(self, value):
if self.instance and self.instance.name == 'tower' and value != 'tower':
raise serializers.ValidationError(_('tower instance group name may not be changed.'))
return value
def get_capacity_dict(self):
# Store capacity values (globally computed) in the context
if 'capacity_map' not in self.context:
ig_qs = None
jobs_qs = UnifiedJob.objects.filter(status__in=('running', 'waiting'))
if self.parent: # Is ListView:
ig_qs = self.parent.instance
self.context['capacity_map'] = InstanceGroup.objects.capacity_values(
qs=ig_qs, tasks=jobs_qs, breakdown=True)
return self.context['capacity_map']
def get_consumed_capacity(self, obj):
return self.get_capacity_dict()[obj.name]['running_capacity']
def get_committed_capacity(self, obj):
return self.get_capacity_dict()[obj.name]['committed_capacity']
def get_percent_capacity_remaining(self, obj):
if not obj.capacity:
return 0.0
consumed = self.get_consumed_capacity(obj)
if consumed >= obj.capacity:
return 0.0
else:
return float("{0:.2f}".format(
((float(obj.capacity) - float(consumed)) / (float(obj.capacity))) * 100)
)
def get_instances(self, obj):
return obj.instances.count()
class ActivityStreamSerializer(BaseSerializer):
changes = serializers.SerializerMethodField()
object_association = serializers.SerializerMethodField(
help_text=_("When present, shows the field name of the role or relationship that changed."))
object_type = serializers.SerializerMethodField(
help_text=_("When present, shows the model on which the role or relationship was defined."))
@cached_property
def _local_summarizable_fk_fields(self):
summary_dict = copy.copy(SUMMARIZABLE_FK_FIELDS)
# Special requests
summary_dict['group'] = summary_dict['group'] + ('inventory_id',)
for key in summary_dict.keys():
if 'id' not in summary_dict[key]:
summary_dict[key] = summary_dict[key] + ('id',)
field_list = list(summary_dict.items())
# Needed related fields that are not in the default summary fields
field_list += [
('workflow_job_template_node', ('id', 'unified_job_template_id')),
('label', ('id', 'name', 'organization_id')),
('notification', ('id', 'status', 'notification_type', 'notification_template_id')),
('o_auth2_access_token', ('id', 'user_id', 'description', 'application_id', 'scope')),
('o_auth2_application', ('id', 'name', 'description')),
('credential_type', ('id', 'name', 'description', 'kind', 'managed_by_tower')),
('ad_hoc_command', ('id', 'name', 'status', 'limit'))
]
return field_list
class Meta:
model = ActivityStream
fields = ('*', '-name', '-description', '-created', '-modified', 'timestamp', 'operation',
'changes', 'object1', 'object2', 'object_association', 'action_node', 'object_type')
def get_fields(self):
ret = super(ActivityStreamSerializer, self).get_fields()
for key, field in list(ret.items()):
if key == 'changes':
field.help_text = _('A summary of the new and changed values when an object is created, updated, or deleted')
if key == 'object1':
field.help_text = _('For create, update, and delete events this is the object type that was affected. '
'For associate and disassociate events this is the object type associated or disassociated with object2.')
if key == 'object2':
field.help_text = _('Unpopulated for create, update, and delete events. For associate and disassociate '
'events this is the object type that object1 is being associated with.')
if key == 'operation':
field.help_text = _('The action taken with respect to the given object(s).')
return ret
def get_changes(self, obj):
if obj is None:
return {}
try:
return json.loads(obj.changes)
except Exception:
logger.warn("Error deserializing activity stream json changes")
return {}
def get_object_association(self, obj):
if not obj.object_relationship_type:
return ""
elif obj.object_relationship_type.endswith('_role'):
# roles: these values look like
# "awx.main.models.inventory.Inventory.admin_role"
# due to historical reasons the UI expects just "role" here
return "role"
# default case: these values look like
# "awx.main.models.organization.Organization_notification_templates_success"
# so instead of splitting on period we have to take after the first underscore
try:
return obj.object_relationship_type.split(".")[-1].split("_", 1)[1]
except Exception:
logger.debug('Failed to parse activity stream relationship type {}'.format(obj.object_relationship_type))
return ""
def get_object_type(self, obj):
if not obj.object_relationship_type:
return ""
elif obj.object_relationship_type.endswith('_role'):
return camelcase_to_underscore(obj.object_relationship_type.rsplit('.', 2)[-2])
# default case: these values look like
# "awx.main.models.organization.Organization_notification_templates_success"
# so we have to take after the last period but before the first underscore.
try:
cls = obj.object_relationship_type.rsplit('.', 1)[0]
return camelcase_to_underscore(cls.split('_', 1))
except Exception:
logger.debug('Failed to parse activity stream relationship type {}'.format(obj.object_relationship_type))
return ""
def get_related(self, obj):
rel = {}
if obj.actor is not None:
rel['actor'] = self.reverse('api:user_detail', kwargs={'pk': obj.actor.pk})
for fk, __ in self._local_summarizable_fk_fields:
if not hasattr(obj, fk):
continue
m2m_list = self._get_rel(obj, fk)
if m2m_list:
rel[fk] = []
id_list = []
for thisItem in m2m_list:
if getattr(thisItem, 'id', None) in id_list:
continue
id_list.append(getattr(thisItem, 'id', None))
if hasattr(thisItem, 'get_absolute_url'):
rel_url = thisItem.get_absolute_url(self.context.get('request'))
else:
view_name = fk + '_detail'
rel_url = self.reverse('api:' + view_name, kwargs={'pk': thisItem.id})
rel[fk].append(rel_url)
if fk == 'schedule':
rel['unified_job_template'] = thisItem.unified_job_template.get_absolute_url(self.context.get('request'))
if obj.setting and obj.setting.get('category', None):
rel['setting'] = self.reverse(
'api:setting_singleton_detail',
kwargs={'category_slug': obj.setting['category']}
)
return rel
def _get_rel(self, obj, fk):
related_model = ActivityStream._meta.get_field(fk).related_model
related_manager = getattr(obj, fk)
if issubclass(related_model, PolymorphicModel) and hasattr(obj, '_prefetched_objects_cache'):
# HACK: manually fill PolymorphicModel caches to prevent running query multiple times
# unnecessary if django-polymorphic issue #68 is solved
if related_manager.prefetch_cache_name not in obj._prefetched_objects_cache:
obj._prefetched_objects_cache[related_manager.prefetch_cache_name] = list(related_manager.all())
return related_manager.all()
def get_summary_fields(self, obj):
summary_fields = OrderedDict()
for fk, related_fields in self._local_summarizable_fk_fields:
try:
if not hasattr(obj, fk):
continue
m2m_list = self._get_rel(obj, fk)
if m2m_list:
summary_fields[fk] = []
for thisItem in m2m_list:
if fk == 'job':
summary_fields['job_template'] = []
job_template_item = {}
job_template_fields = SUMMARIZABLE_FK_FIELDS['job_template']
job_template = getattr(thisItem, 'job_template', None)
if job_template is not None:
for field in job_template_fields:
fval = getattr(job_template, field, None)
if fval is not None:
job_template_item[field] = fval
summary_fields['job_template'].append(job_template_item)
if fk == 'workflow_job_template_node':
summary_fields['workflow_job_template'] = []
workflow_job_template_item = {}
workflow_job_template_fields = SUMMARIZABLE_FK_FIELDS['workflow_job_template']
workflow_job_template = getattr(thisItem, 'workflow_job_template', None)
if workflow_job_template is not None:
for field in workflow_job_template_fields:
fval = getattr(workflow_job_template, field, None)
if fval is not None:
workflow_job_template_item[field] = fval
summary_fields['workflow_job_template'].append(workflow_job_template_item)
if fk == 'schedule':
unified_job_template = getattr(thisItem, 'unified_job_template', None)
if unified_job_template is not None:
summary_fields[get_type_for_model(unified_job_template)] = {'id': unified_job_template.id,
'name': unified_job_template.name}
thisItemDict = {}
for field in related_fields:
fval = getattr(thisItem, field, None)
if fval is not None:
thisItemDict[field] = fval
summary_fields[fk].append(thisItemDict)
except ObjectDoesNotExist:
pass
if obj.actor is not None:
summary_fields['actor'] = dict(id = obj.actor.id,
username = obj.actor.username,
first_name = obj.actor.first_name,
last_name = obj.actor.last_name)
elif obj.deleted_actor:
summary_fields['actor'] = obj.deleted_actor.copy()
summary_fields['actor']['id'] = None
if obj.setting:
summary_fields['setting'] = [obj.setting]
return summary_fields
|
py | 1a2eab502e8edeb022d083bfdb0d6b42b5363cba | # copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import math
import tqdm
import numpy as np
from multiprocessing.pool import ThreadPool
import paddle.fluid as fluid
import paddlex.utils.logging as logging
import paddlex
import copy
import os.path as osp
from paddlex.cv.transforms import arrange_transforms
from collections import OrderedDict
from .faster_rcnn import FasterRCNN
from .utils.detection_eval import eval_results, bbox2out, mask2out
class MaskRCNN(FasterRCNN):
"""构建MaskRCNN,并实现其训练、评估、预测和模型导出。
Args:
num_classes (int): 包含了背景类的类别数。默认为81。
backbone (str): MaskRCNN的backbone网络,取值范围为['ResNet18', 'ResNet50',
'ResNet50_vd', 'ResNet101', 'ResNet101_vd', 'HRNet_W18']。默认为'ResNet50'。
with_fpn (bool): 是否使用FPN结构。默认为True。
aspect_ratios (list): 生成anchor高宽比的可选值。默认为[0.5, 1.0, 2.0]。
anchor_sizes (list): 生成anchor大小的可选值。默认为[32, 64, 128, 256, 512]。
input_channel (int): 输入图像的通道数量。默认为3。
"""
def __init__(self,
num_classes=81,
backbone='ResNet50',
with_fpn=True,
aspect_ratios=[0.5, 1.0, 2.0],
anchor_sizes=[32, 64, 128, 256, 512],
input_channel=3):
self.init_params = locals()
backbones = [
'ResNet18', 'ResNet50', 'ResNet50_vd', 'ResNet101', 'ResNet101_vd',
'HRNet_W18'
]
assert backbone in backbones, "backbone should be one of {}".format(
backbones)
super(FasterRCNN, self).__init__('detector')
self.backbone = backbone
self.num_classes = num_classes
self.with_fpn = with_fpn
self.anchor_sizes = anchor_sizes
self.labels = None
if with_fpn:
self.mask_head_resolution = 28
else:
self.mask_head_resolution = 14
self.fixed_input_shape = None
self.input_channel = input_channel
self.with_dcn = False
def build_net(self, mode='train'):
train_pre_nms_top_n = 2000 if self.with_fpn else 12000
test_pre_nms_top_n = 1000 if self.with_fpn else 6000
num_convs = 4 if self.with_fpn else 0
model = paddlex.cv.nets.detection.MaskRCNN(
backbone=self._get_backbone(self.backbone),
num_classes=self.num_classes,
mode=mode,
with_fpn=self.with_fpn,
train_pre_nms_top_n=train_pre_nms_top_n,
test_pre_nms_top_n=test_pre_nms_top_n,
num_convs=num_convs,
mask_head_resolution=self.mask_head_resolution,
fixed_input_shape=self.fixed_input_shape,
input_channel=self.input_channel)
inputs = model.generate_inputs()
if mode == 'train':
model_out = model.build_net(inputs)
loss = model_out['loss']
self.optimizer.minimize(loss)
outputs = OrderedDict(
[('loss', model_out['loss']),
('loss_cls', model_out['loss_cls']),
('loss_bbox', model_out['loss_bbox']),
('loss_mask', model_out['loss_mask']),
('loss_rpn_cls', model_out['loss_rpn_cls']), (
'loss_rpn_bbox', model_out['loss_rpn_bbox'])])
else:
outputs = model.build_net(inputs)
return inputs, outputs
def default_optimizer(self, learning_rate, warmup_steps, warmup_start_lr,
lr_decay_epochs, lr_decay_gamma,
num_steps_each_epoch):
if warmup_steps > lr_decay_epochs[0] * num_steps_each_epoch:
logging.error(
"In function train(), parameters should satisfy: warmup_steps <= lr_decay_epochs[0]*num_samples_in_train_dataset",
exit=False)
logging.error(
"See this doc for more information: https://github.com/PaddlePaddle/PaddleX/blob/develop/docs/appendix/parameters.md#notice",
exit=False)
logging.error(
"warmup_steps should less than {} or lr_decay_epochs[0] greater than {}, please modify 'lr_decay_epochs' or 'warmup_steps' in train function".
format(lr_decay_epochs[0] * num_steps_each_epoch, warmup_steps
// num_steps_each_epoch))
boundaries = [b * num_steps_each_epoch for b in lr_decay_epochs]
values = [(lr_decay_gamma**i) * learning_rate
for i in range(len(lr_decay_epochs) + 1)]
lr_decay = fluid.layers.piecewise_decay(
boundaries=boundaries, values=values)
lr_warmup = fluid.layers.linear_lr_warmup(
learning_rate=lr_decay,
warmup_steps=warmup_steps,
start_lr=warmup_start_lr,
end_lr=learning_rate)
optimizer = fluid.optimizer.Momentum(
learning_rate=lr_warmup,
momentum=0.9,
regularization=fluid.regularizer.L2Decay(1e-04))
return optimizer
def train(self,
num_epochs,
train_dataset,
train_batch_size=1,
eval_dataset=None,
save_interval_epochs=1,
log_interval_steps=2,
save_dir='output',
pretrain_weights='IMAGENET',
optimizer=None,
learning_rate=1.0 / 800,
warmup_steps=500,
warmup_start_lr=1.0 / 2400,
lr_decay_epochs=[8, 11],
lr_decay_gamma=0.1,
metric=None,
use_vdl=False,
early_stop=False,
early_stop_patience=5,
resume_checkpoint=None):
"""训练。
Args:
num_epochs (int): 训练迭代轮数。
train_dataset (paddlex.datasets): 训练数据读取器。
train_batch_size (int): 训练或验证数据batch大小。目前检测仅支持单卡评估,训练数据batch大小与
显卡数量之商为验证数据batch大小。默认值为1。
eval_dataset (paddlex.datasets): 验证数据读取器。
save_interval_epochs (int): 模型保存间隔(单位:迭代轮数)。默认为1。
log_interval_steps (int): 训练日志输出间隔(单位:迭代次数)。默认为20。
save_dir (str): 模型保存路径。默认值为'output'。
pretrain_weights (str): 若指定为路径时,则加载路径下预训练模型;若为字符串'IMAGENET',
则自动下载在ImageNet图片数据上预训练的模型权重;若为字符串'COCO',
则自动下载在COCO数据集上预训练的模型权重;若为None,则不使用预训练模型。默认为None。
optimizer (paddle.fluid.optimizer): 优化器。当该参数为None时,使用默认优化器:
fluid.layers.piecewise_decay衰减策略,fluid.optimizer.Momentum优化方法。
learning_rate (float): 默认优化器的学习率。默认为1.0/800。
warmup_steps (int): 默认优化器进行warmup过程的步数。默认为500。
warmup_start_lr (int): 默认优化器warmup的起始学习率。默认为1.0/2400。
lr_decay_epochs (list): 默认优化器的学习率衰减轮数。默认为[8, 11]。
lr_decay_gamma (float): 默认优化器的学习率衰减率。默认为0.1。
metric (bool): 训练过程中评估的方式,取值范围为['COCO', 'VOC']。
use_vdl (bool): 是否使用VisualDL进行可视化。默认值为False。
early_stop (bool): 是否使用提前终止训练策略。默认值为False。
early_stop_patience (int): 当使用提前终止训练策略时,如果验证集精度在`early_stop_patience`个epoch内
连续下降或持平,则终止训练。默认值为5。
resume_checkpoint (str): 恢复训练时指定上次训练保存的模型路径。若为None,则不会恢复训练。默认值为None。
Raises:
ValueError: 评估类型不在指定列表中。
ValueError: 模型从inference model进行加载。
"""
if metric is None:
if isinstance(train_dataset, paddlex.datasets.CocoDetection) or \
isinstance(train_dataset, paddlex.datasets.EasyDataDet):
metric = 'COCO'
else:
raise Exception(
"train_dataset should be datasets.COCODetection or datasets.EasyDataDet."
)
assert metric in ['COCO', 'VOC'], "Metric only support 'VOC' or 'COCO'"
self.metric = metric
if not self.trainable:
raise Exception("Model is not trainable from load_model method.")
self.labels = copy.deepcopy(train_dataset.labels)
self.labels.insert(0, 'background')
# 构建训练网络
if optimizer is None:
# 构建默认的优化策略
num_steps_each_epoch = train_dataset.num_samples // train_batch_size
optimizer = self.default_optimizer(
learning_rate=learning_rate,
warmup_steps=warmup_steps,
warmup_start_lr=warmup_start_lr,
lr_decay_epochs=lr_decay_epochs,
lr_decay_gamma=lr_decay_gamma,
num_steps_each_epoch=num_steps_each_epoch)
self.optimizer = optimizer
# 构建训练、验证、测试网络
self.build_program()
fuse_bn = True
if self.with_fpn and self.backbone in [
'ResNet18', 'ResNet50', 'HRNet_W18'
]:
fuse_bn = False
self.net_initialize(
startup_prog=fluid.default_startup_program(),
pretrain_weights=pretrain_weights,
fuse_bn=fuse_bn,
save_dir=save_dir,
resume_checkpoint=resume_checkpoint)
# 训练
self.train_loop(
num_epochs=num_epochs,
train_dataset=train_dataset,
train_batch_size=train_batch_size,
eval_dataset=eval_dataset,
save_interval_epochs=save_interval_epochs,
log_interval_steps=log_interval_steps,
save_dir=save_dir,
use_vdl=use_vdl,
early_stop=early_stop,
early_stop_patience=early_stop_patience)
def evaluate(self,
eval_dataset,
batch_size=1,
epoch_id=None,
metric=None,
return_details=False):
"""评估。
Args:
eval_dataset (paddlex.datasets): 验证数据读取器。
batch_size (int): 验证数据批大小。默认为1。当前只支持设置为1。
epoch_id (int): 当前评估模型所在的训练轮数。
metric (bool): 训练过程中评估的方式,取值范围为['COCO', 'VOC']。默认为None,
根据用户传入的Dataset自动选择,如为VOCDetection,则metric为'VOC';
如为COCODetection,则metric为'COCO'。
return_details (bool): 是否返回详细信息。默认值为False。
Returns:
tuple (metrics, eval_details) /dict (metrics): 当return_details为True时,返回(metrics, eval_details),
当return_details为False时,返回metrics。metrics为dict,包含关键字:'bbox_mmap'和'segm_mmap'
或者’bbox_map‘和'segm_map',分别表示预测框和分割区域平均准确率平均值在
各个IoU阈值下的结果取平均值的结果(mmAP)、平均准确率平均值(mAP)。eval_details为dict,
包含bbox、mask和gt三个关键字。其中关键字bbox的键值是一个列表,列表中每个元素代表一个预测结果,
一个预测结果是一个由图像id,预测框类别id, 预测框坐标,预测框得分组成的列表。
关键字mask的键值是一个列表,列表中每个元素代表各预测框内物体的分割结果,分割结果由图像id、
预测框类别id、表示预测框内各像素点是否属于物体的二值图、预测框得分。
而关键字gt的键值是真实标注框的相关信息。
"""
input_channel = getattr(self, 'input_channel', 3)
arrange_transforms(
model_type=self.model_type,
class_name=self.__class__.__name__,
transforms=eval_dataset.transforms,
mode='eval',
input_channel=input_channel)
if metric is None:
if hasattr(self, 'metric') and self.metric is not None:
metric = self.metric
else:
if isinstance(eval_dataset, paddlex.datasets.CocoDetection):
metric = 'COCO'
else:
raise Exception(
"eval_dataset should be datasets.COCODetection.")
assert metric in ['COCO', 'VOC'], "Metric only support 'VOC' or 'COCO'"
if batch_size > 1:
batch_size = 1
logging.warning(
"Mask RCNN supports batch_size=1 only during evaluating, so batch_size is forced to be set to 1."
)
data_generator = eval_dataset.generator(
batch_size=batch_size, drop_last=False)
total_steps = math.ceil(eval_dataset.num_samples * 1.0 / batch_size)
results = list()
logging.info(
"Start to evaluating(total_samples={}, total_steps={})...".format(
eval_dataset.num_samples, total_steps))
for step, data in tqdm.tqdm(
enumerate(data_generator()), total=total_steps):
images = np.array([d[0] for d in data]).astype('float32')
im_infos = np.array([d[1] for d in data]).astype('float32')
im_shapes = np.array([d[3] for d in data]).astype('float32')
feed_data = {
'image': images,
'im_info': im_infos,
'im_shape': im_shapes,
}
with fluid.scope_guard(self.scope):
outputs = self.exe.run(
self.test_prog,
feed=[feed_data],
fetch_list=list(self.test_outputs.values()),
return_numpy=False)
res = {
'bbox': (np.array(outputs[0]),
outputs[0].recursive_sequence_lengths()),
'mask': (np.array(outputs[1]),
outputs[1].recursive_sequence_lengths())
}
res_im_id = [d[2] for d in data]
res['im_info'] = (im_infos, [])
res['im_shape'] = (im_shapes, [])
res['im_id'] = (np.array(res_im_id), [])
results.append(res)
logging.debug("[EVAL] Epoch={}, Step={}/{}".format(epoch_id, step +
1, total_steps))
ap_stats, eval_details = eval_results(
results,
'COCO',
eval_dataset.coco_gt,
with_background=True,
resolution=self.mask_head_resolution)
if metric == 'VOC':
if isinstance(ap_stats[0], np.ndarray) and isinstance(ap_stats[1],
np.ndarray):
metrics = OrderedDict(
zip(['bbox_map', 'segm_map'],
[ap_stats[0][1], ap_stats[1][1]]))
else:
metrics = OrderedDict(
zip(['bbox_map', 'segm_map'], [0.0, 0.0]))
elif metric == 'COCO':
if isinstance(ap_stats[0], np.ndarray) and isinstance(ap_stats[1],
np.ndarray):
metrics = OrderedDict(
zip(['bbox_mmap', 'segm_mmap'],
[ap_stats[0][0], ap_stats[1][0]]))
else:
metrics = OrderedDict(
zip(['bbox_mmap', 'segm_mmap'], [0.0, 0.0]))
if return_details:
return metrics, eval_details
return metrics
@staticmethod
def _postprocess(res, batch_size, num_classes, mask_head_resolution,
labels):
clsid2catid = dict({i: i for i in range(num_classes)})
xywh_results = bbox2out([res], clsid2catid)
segm_results = mask2out([res], clsid2catid, mask_head_resolution)
preds = [[] for i in range(batch_size)]
import pycocotools.mask as mask_util
for index, xywh_res in enumerate(xywh_results):
image_id = xywh_res['image_id']
del xywh_res['image_id']
xywh_res['mask'] = mask_util.decode(segm_results[index][
'segmentation'])
xywh_res['category'] = labels[xywh_res['category_id']]
preds[image_id].append(xywh_res)
return preds
def predict(self, img_file, transforms=None):
"""预测。
Args:
img_file(str|np.ndarray): 预测图像路径,或者是解码后的排列格式为(H, W, C)且类型为float32且为BGR格式的数组。
transforms (paddlex.det.transforms): 数据预处理操作。
Returns:
lict: 预测结果列表,每个预测结果由预测框类别标签、预测框类别名称、
预测框坐标(坐标格式为[xmin, ymin, w, h])、
原图大小的预测二值图(1表示预测框类别,0表示背景类)、
预测框得分组成。
"""
if transforms is None and not hasattr(self, 'test_transforms'):
raise Exception("transforms need to be defined, now is None.")
if isinstance(img_file, (str, np.ndarray)):
images = [img_file]
else:
raise Exception("img_file must be str/np.ndarray")
if transforms is None:
transforms = self.test_transforms
input_channel = getattr(self, 'input_channel', 3)
im, im_resize_info, im_shape = FasterRCNN._preprocess(
images,
transforms,
self.model_type,
self.__class__.__name__,
input_channel=input_channel)
with fluid.scope_guard(self.scope):
result = self.exe.run(self.test_prog,
feed={
'image': im,
'im_info': im_resize_info,
'im_shape': im_shape
},
fetch_list=list(self.test_outputs.values()),
return_numpy=False,
use_program_cache=True)
res = {
k: (np.array(v), v.recursive_sequence_lengths())
for k, v in zip(list(self.test_outputs.keys()), result)
}
res['im_id'] = (np.array(
[[i] for i in range(len(images))]).astype('int32'), [])
res['im_shape'] = (np.array(im_shape), [])
preds = MaskRCNN._postprocess(res,
len(images), self.num_classes,
self.mask_head_resolution, self.labels)
return preds[0]
def batch_predict(self, img_file_list, transforms=None):
"""预测。
Args:
img_file_list(list|tuple): 对列表(或元组)中的图像同时进行预测,列表中的元素可以是图像路径
也可以是解码后的排列格式为(H,W,C)且类型为float32且为BGR格式的数组。
transforms (paddlex.det.transforms): 数据预处理操作。
Returns:
dict: 每个元素都为列表,表示各图像的预测结果。在各图像的预测结果列表中,每个预测结果由预测框类别标签、预测框类别名称、
预测框坐标(坐标格式为[xmin, ymin, w, h])、
原图大小的预测二值图(1表示预测框类别,0表示背景类)、
预测框得分组成。
"""
if transforms is None and not hasattr(self, 'test_transforms'):
raise Exception("transforms need to be defined, now is None.")
if not isinstance(img_file_list, (list, tuple)):
raise Exception("im_file must be list/tuple")
if transforms is None:
transforms = self.test_transforms
input_channel = getattr(self, 'input_channel', 3)
im, im_resize_info, im_shape = FasterRCNN._preprocess(
img_file_list,
transforms,
self.model_type,
self.__class__.__name__,
self.thread_pool,
input_channel=input_channel)
with fluid.scope_guard(self.scope):
result = self.exe.run(self.test_prog,
feed={
'image': im,
'im_info': im_resize_info,
'im_shape': im_shape
},
fetch_list=list(self.test_outputs.values()),
return_numpy=False,
use_program_cache=True)
res = {
k: (np.array(v), v.recursive_sequence_lengths())
for k, v in zip(list(self.test_outputs.keys()), result)
}
res['im_id'] = (np.array(
[[i] for i in range(len(img_file_list))]).astype('int32'), [])
res['im_shape'] = (np.array(im_shape), [])
preds = MaskRCNN._postprocess(res,
len(img_file_list), self.num_classes,
self.mask_head_resolution, self.labels)
return preds
|
py | 1a2eacc2f3916b6075bdbfc43d13fe6c53275fcf | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the BIP66 changeover logic."""
from test_framework.test_framework import eBitcashTestFramework
from test_framework.util import *
class BIP66Test(eBitcashTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 3
self.setup_clean_chain = False
self.extra_args = [[], ["-blockversion=2"], ["-blockversion=3"]]
def setup_network(self):
self.setup_nodes()
connect_nodes(self.nodes[1], 0)
connect_nodes(self.nodes[2], 0)
self.sync_all()
def run_test(self):
cnt = self.nodes[0].getblockcount()
# Mine some old-version blocks
self.nodes[1].generate(100)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 100):
raise AssertionError("Failed to mine 100 version=2 blocks")
# Mine 750 new-version blocks
for i in range(15):
self.nodes[2].generate(50)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 850):
raise AssertionError("Failed to mine 750 version=3 blocks")
# TODO: check that new DERSIG rules are not enforced
# Mine 1 new-version block
self.nodes[2].generate(1)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 851):
raise AssertionError("Failed to mine a version=3 blocks")
# TODO: check that new DERSIG rules are enforced
# Mine 198 new-version blocks
for i in range(2):
self.nodes[2].generate(99)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 1049):
raise AssertionError("Failed to mine 198 version=3 blocks")
# Mine 1 old-version block
self.nodes[1].generate(1)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 1050):
raise AssertionError("Failed to mine a version=2 block after 949 version=3 blocks")
# Mine 1 new-version blocks
self.nodes[2].generate(1)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 1051):
raise AssertionError("Failed to mine a version=3 block")
# Mine 1 old-version blocks. This should fail
assert_raises_jsonrpc(-1, "CreateNewBlock: TestBlockValidity failed: bad-version(0x00000002)", self.nodes[1].generate, 1)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 1051):
raise AssertionError("Accepted a version=2 block after 950 version=3 blocks")
# Mine 1 new-version blocks
self.nodes[2].generate(1)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 1052):
raise AssertionError("Failed to mine a version=3 block")
if __name__ == '__main__':
BIP66Test().main()
|
py | 1a2eace8ec481648fbd359e34435c38d92ba172b | import sqlite3
import json
import math
from sqlite3.dbapi2 import Error
from flask import Flask, request, Response, render_template
app = Flask(__name__)
def open_db():
db = sqlite3.connect('./transactions.db')
db.row_factory = sqlite3.Row
return db
@app.route('/', methods=['GET'])
def transactions():
return render_template('transactions.html')
@app.route('/categories', methods=['GET'])
def categories():
return render_template('categories.html')
@app.route('/api/transactions', methods=['GET'])
def get_transactions():
with open_db() as db:
results = db.execute('SELECT * FROM transactions WHERE date >= "2021-05-01" ORDER BY date ASC')
return Response(json.dumps([dict(idx) for idx in results.fetchall()]), mimetype='application/json')
@app.route('/api/transactions/<int:id>', methods=['PUT', 'PATCH'])
def update_transaction(id):
transaction = request.get_json(force=True)
with open_db() as db:
db.execute('UPDATE transactions SET category_id = ? WHERE id = ?', (transaction['category_id'], id))
db.commit()
return {'success': True}
@app.route('/api/categories', methods=['GET'])
def get_categories():
with open_db() as db:
results = db.execute('SELECT * FROM categories')
return Response(json.dumps([dict(idx) for idx in results.fetchall()]), mimetype='application/json')
@app.route('/api/categories', methods=['POST'])
def create_category():
category = request.get_json(force=True)
with open_db() as db:
db.execute('INSERT INTO categories (name) VALUES (?)', (category.get('name'),))
db.commit()
return {'success': True}
@app.route('/api/breakdown', methods=['GET'])
def get_breakdown():
group_by_first = request.args.get('group_by', 'month').lower()
with open_db() as db:
results = db.execute('''
SELECT
j.*
FROM (
SELECT
t.id,
t.date,
SUBSTR(t.date, 0, 8) as month,
t.amount,
REPLACE(t.description, ' ', ' ') as description,
t.category_id,
c.name as category_name,
t.source
FROM transactions t
INNER JOIN categories c on t.category_id = c.id
WHERE c.name NOT IN ('Income', 'Payments', 'Savings') AND t.date >= '2021-05'
) j
ORDER BY j.month ASC, j.category_name ASC
''')
# return Response(json.dumps([dict(idx) for idx in results.fetchall()], indent=2), mimetype='application/json')
transactions = [dict(idx) for idx in results.fetchall()]
if group_by_first == 'month':
first_group = 'month'
second_group = 'category_name'
elif group_by_first == 'category':
first_group = 'category_name'
second_group = 'month'
else:
return Response(Error('Invalid group by'))
aggregated_transactions = {}
for item in transactions:
item['description'] = item['description'].replace(' ', ' ', 10).replace('\t', ' ')
top_group_value = item.get(first_group)
second_group_value = item.get(second_group)
if top_group_value in aggregated_transactions.keys():
if second_group_value in aggregated_transactions[top_group_value].keys():
sub_group = aggregated_transactions[top_group_value][second_group_value]
sub_group['transactions'].append(item)
sub_group['summary']['amount'] += item['amount']
sub_group['summary']['total_transactions'] += 1
sub_group['summary']['min'] = min(sub_group['summary']['min'], item['amount'])
sub_group['summary']['max'] = max(sub_group['summary']['max'], item['amount'])
sub_group['summary']['avg'] = round(sub_group['summary']['amount'] / sub_group['summary']['total_transactions'], 2)
else:
aggregated_transactions[top_group_value][second_group_value] = {
'summary': {
'amount': item['amount'],
'total_transactions': 1,
'min': item['amount'],
'max': item['amount'],
'avg': item['amount']
},
'transactions': [item]
}
else:
aggregated_transactions[top_group_value] = {}
aggregated_transactions[top_group_value][second_group_value] = {
'summary': {
'amount': item['amount'],
'total_transactions': 1,
'min': item['amount'],
'max': item['amount'],
'avg': item['amount']
},
'transactions': [item]
}
return Response(json.dumps(aggregated_transactions, indent=2), mimetype='application/json')
|
bzl | 1a2eae09591d2dbe03c23e4a65058c61e49d5754 | load("@bazel_gazelle//:deps.bzl", "go_repository")
def go_repositories():
go_repository(
name = "co_honnef_go_tools",
importpath = "honnef.co/go/tools",
sum = "h1:/hemPrYIhOhy8zYrNj+069zDB68us2sMGsfkFJO0iZs=",
version = "v0.0.0-20190523083050-ea95bdfd59fc",
)
go_repository(
name = "com_github_burntsushi_toml",
importpath = "github.com/BurntSushi/toml",
sum = "h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=",
version = "v0.3.1",
)
go_repository(
name = "com_github_census_instrumentation_opencensus_proto",
importpath = "github.com/census-instrumentation/opencensus-proto",
sum = "h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk=",
version = "v0.2.1",
)
go_repository(
name = "com_github_client9_misspell",
importpath = "github.com/client9/misspell",
sum = "h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=",
version = "v0.3.4",
)
go_repository(
name = "com_github_cncf_udpa_go",
importpath = "github.com/cncf/udpa/go",
sum = "h1:WBZRG4aNOuI15bLRrCgN8fCq8E5Xuty6jGbmSNEvSsU=",
version = "v0.0.0-20191209042840-269d4d468f6f",
)
go_repository(
name = "com_github_davecgh_go_spew",
importpath = "github.com/davecgh/go-spew",
sum = "h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=",
version = "v1.1.0",
)
go_repository(
name = "com_github_envoyproxy_go_control_plane",
importpath = "github.com/envoyproxy/go-control-plane",
sum = "h1:rEvIZUSZ3fx39WIi3JkQqQBitGwpELBIYWeBVh6wn+E=",
version = "v0.9.4",
)
go_repository(
name = "com_github_envoyproxy_protoc_gen_validate",
importpath = "github.com/envoyproxy/protoc-gen-validate",
sum = "h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A=",
version = "v0.1.0",
)
go_repository(
name = "com_github_golang_glog",
importpath = "github.com/golang/glog",
sum = "h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=",
version = "v0.0.0-20160126235308-23def4e6c14b",
)
go_repository(
name = "com_github_golang_mock",
importpath = "github.com/golang/mock",
sum = "h1:G5FRp8JnTd7RQH5kemVNlMeyXQAztQ3mOWV95KxsXH8=",
version = "v1.1.1",
)
go_repository(
name = "com_github_golang_protobuf",
importpath = "github.com/golang/protobuf",
sum = "h1:F768QJ1E9tib+q5Sc8MkdJi1RxLTbRcTf8LJV56aRls=",
version = "v1.3.5",
)
go_repository(
name = "com_github_google_go_cmp",
importpath = "github.com/google/go-cmp",
sum = "h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=",
version = "v0.2.0",
)
go_repository(
name = "com_github_labstack_echo",
importpath = "github.com/labstack/echo",
sum = "h1:pGRcYk231ExFAyoAjAfD85kQzRJCRI8bbnE7CX5OEgg=",
version = "v3.3.10+incompatible",
)
go_repository(
name = "com_github_labstack_gommon",
importpath = "github.com/labstack/gommon",
sum = "h1:JEeO0bvc78PKdyHxloTKiF8BD5iGrH8T6MSeGvSgob0=",
version = "v0.3.0",
)
go_repository(
name = "com_github_mattn_go_colorable",
importpath = "github.com/mattn/go-colorable",
sum = "h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU=",
version = "v0.1.2",
)
go_repository(
name = "com_github_mattn_go_isatty",
importpath = "github.com/mattn/go-isatty",
sum = "h1:d5US/mDsogSGW37IV293h//ZFaeajb69h+EHFsv2xGg=",
version = "v0.0.9",
)
go_repository(
name = "com_github_pmezard_go_difflib",
importpath = "github.com/pmezard/go-difflib",
sum = "h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=",
version = "v1.0.0",
)
go_repository(
name = "com_github_prometheus_client_model",
importpath = "github.com/prometheus/client_model",
sum = "h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM=",
version = "v0.0.0-20190812154241-14fe0d1b01d4",
)
go_repository(
name = "com_github_stretchr_objx",
importpath = "github.com/stretchr/objx",
sum = "h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4=",
version = "v0.1.0",
)
go_repository(
name = "com_github_stretchr_testify",
importpath = "github.com/stretchr/testify",
sum = "h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=",
version = "v1.5.1",
)
go_repository(
name = "com_github_valyala_bytebufferpool",
importpath = "github.com/valyala/bytebufferpool",
sum = "h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=",
version = "v1.0.0",
)
go_repository(
name = "com_github_valyala_fasttemplate",
importpath = "github.com/valyala/fasttemplate",
sum = "h1:tY9CJiPnMXf1ERmG2EyK7gNUd+c6RKGD0IfU8WdUSz8=",
version = "v1.0.1",
)
go_repository(
name = "com_google_cloud_go",
importpath = "cloud.google.com/go",
sum = "h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ=",
version = "v0.26.0",
)
go_repository(
name = "in_gopkg_check_v1",
importpath = "gopkg.in/check.v1",
sum = "h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=",
version = "v0.0.0-20161208181325-20d25e280405",
)
go_repository(
name = "in_gopkg_yaml_v2",
importpath = "gopkg.in/yaml.v2",
sum = "h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=",
version = "v2.2.2",
)
go_repository(
name = "org_golang_google_appengine",
importpath = "google.golang.org/appengine",
sum = "h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=",
version = "v1.4.0",
)
go_repository(
name = "org_golang_google_genproto",
importpath = "google.golang.org/genproto",
sum = "h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE=",
version = "v0.0.0-20190819201941-24fa4b261c55",
)
go_repository(
name = "org_golang_google_grpc",
importpath = "google.golang.org/grpc",
sum = "h1:bO/TA4OxCOummhSf10siHuG7vJOiwh7SpRpFZDkOgl4=",
version = "v1.28.0",
)
go_repository(
name = "org_golang_x_crypto",
importpath = "golang.org/x/crypto",
sum = "h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=",
version = "v0.0.0-20190308221718-c2843e01d9a2",
)
go_repository(
name = "org_golang_x_exp",
importpath = "golang.org/x/exp",
sum = "h1:c2HOrn5iMezYjSlGPncknSEr/8x5LELb/ilJbXi9DEA=",
version = "v0.0.0-20190121172915-509febef88a4",
)
go_repository(
name = "org_golang_x_lint",
importpath = "golang.org/x/lint",
sum = "h1:XQyxROzUlZH+WIQwySDgnISgOivlhjIEwaQaJEJrrN0=",
version = "v0.0.0-20190313153728-d0100b6bd8b3",
)
go_repository(
name = "org_golang_x_net",
importpath = "golang.org/x/net",
sum = "h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628=",
version = "v0.0.0-20190311183353-d8887717615a",
)
go_repository(
name = "org_golang_x_oauth2",
importpath = "golang.org/x/oauth2",
sum = "h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs=",
version = "v0.0.0-20180821212333-d2e6202438be",
)
go_repository(
name = "org_golang_x_sync",
importpath = "golang.org/x/sync",
sum = "h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=",
version = "v0.0.0-20190423024810-112230192c58",
)
go_repository(
name = "org_golang_x_sys",
importpath = "golang.org/x/sys",
sum = "h1:aYOabOQFp6Vj6W1F80affTUvO9UxmJRx8K0gsfABByQ=",
version = "v0.0.0-20190813064441-fde4db37ae7a",
)
go_repository(
name = "org_golang_x_text",
importpath = "golang.org/x/text",
sum = "h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=",
version = "v0.3.0",
)
go_repository(
name = "org_golang_x_tools",
importpath = "golang.org/x/tools",
sum = "h1:5Beo0mZN8dRzgrMMkDp0jc8YXQKx9DiJ2k1dkvGsn5A=",
version = "v0.0.0-20190524140312-2c0ae7006135",
)
|
py | 1a2eae233953993f9be256fe74b5570407e68d01 | """A collection of tasks."""
import logging
from ..const import AddonState
from ..coresys import CoreSysAttributes
from ..exceptions import (
AddonsError,
AudioError,
CliError,
CoreDNSError,
HomeAssistantError,
MulticastError,
ObserverError,
)
from ..host.const import HostFeature
from ..jobs.decorator import Job, JobCondition
_LOGGER: logging.Logger = logging.getLogger(__name__)
HASS_WATCHDOG_API = "HASS_WATCHDOG_API"
RUN_UPDATE_SUPERVISOR = 29100
RUN_UPDATE_ADDONS = 57600
RUN_UPDATE_CLI = 28100
RUN_UPDATE_DNS = 30100
RUN_UPDATE_AUDIO = 30200
RUN_UPDATE_MULTICAST = 30300
RUN_UPDATE_OBSERVER = 30400
RUN_RELOAD_ADDONS = 10800
RUN_RELOAD_BACKUPS = 72000
RUN_RELOAD_HOST = 7600
RUN_RELOAD_UPDATER = 7200
RUN_RELOAD_INGRESS = 930
RUN_WATCHDOG_HOMEASSISTANT_DOCKER = 15
RUN_WATCHDOG_HOMEASSISTANT_API = 120
RUN_WATCHDOG_DNS_DOCKER = 30
RUN_WATCHDOG_AUDIO_DOCKER = 60
RUN_WATCHDOG_CLI_DOCKER = 60
RUN_WATCHDOG_OBSERVER_DOCKER = 60
RUN_WATCHDOG_MULTICAST_DOCKER = 60
RUN_WATCHDOG_ADDON_DOCKER = 30
RUN_WATCHDOG_ADDON_APPLICATON = 120
RUN_WATCHDOG_OBSERVER_APPLICATION = 180
RUN_REFRESH_ADDON = 15
RUN_CHECK_CONNECTIVITY = 30
class Tasks(CoreSysAttributes):
"""Handle Tasks inside Supervisor."""
def __init__(self, coresys):
"""Initialize Tasks."""
self.coresys = coresys
self._cache = {}
async def load(self):
"""Add Tasks to scheduler."""
# Update
self.sys_scheduler.register_task(self._update_addons, RUN_UPDATE_ADDONS)
self.sys_scheduler.register_task(self._update_supervisor, RUN_UPDATE_SUPERVISOR)
self.sys_scheduler.register_task(self._update_cli, RUN_UPDATE_CLI)
self.sys_scheduler.register_task(self._update_dns, RUN_UPDATE_DNS)
self.sys_scheduler.register_task(self._update_audio, RUN_UPDATE_AUDIO)
self.sys_scheduler.register_task(self._update_multicast, RUN_UPDATE_MULTICAST)
self.sys_scheduler.register_task(self._update_observer, RUN_UPDATE_OBSERVER)
# Reload
self.sys_scheduler.register_task(self.sys_store.reload, RUN_RELOAD_ADDONS)
self.sys_scheduler.register_task(self.sys_updater.reload, RUN_RELOAD_UPDATER)
self.sys_scheduler.register_task(self.sys_backups.reload, RUN_RELOAD_BACKUPS)
self.sys_scheduler.register_task(self.sys_host.reload, RUN_RELOAD_HOST)
self.sys_scheduler.register_task(self.sys_ingress.reload, RUN_RELOAD_INGRESS)
# Watchdog
self.sys_scheduler.register_task(
self._watchdog_homeassistant_docker, RUN_WATCHDOG_HOMEASSISTANT_DOCKER
)
self.sys_scheduler.register_task(
self._watchdog_homeassistant_api, RUN_WATCHDOG_HOMEASSISTANT_API
)
self.sys_scheduler.register_task(
self._watchdog_dns_docker, RUN_WATCHDOG_DNS_DOCKER
)
self.sys_scheduler.register_task(
self._watchdog_audio_docker, RUN_WATCHDOG_AUDIO_DOCKER
)
self.sys_scheduler.register_task(
self._watchdog_cli_docker, RUN_WATCHDOG_CLI_DOCKER
)
self.sys_scheduler.register_task(
self._watchdog_observer_docker, RUN_WATCHDOG_OBSERVER_DOCKER
)
self.sys_scheduler.register_task(
self._watchdog_observer_application, RUN_WATCHDOG_OBSERVER_APPLICATION
)
self.sys_scheduler.register_task(
self._watchdog_multicast_docker, RUN_WATCHDOG_MULTICAST_DOCKER
)
self.sys_scheduler.register_task(
self._watchdog_addon_docker, RUN_WATCHDOG_ADDON_DOCKER
)
self.sys_scheduler.register_task(
self._watchdog_addon_application, RUN_WATCHDOG_ADDON_APPLICATON
)
# Refresh
self.sys_scheduler.register_task(self._refresh_addon, RUN_REFRESH_ADDON)
# Connectivity
self.sys_scheduler.register_task(
self._check_connectivity, RUN_CHECK_CONNECTIVITY
)
_LOGGER.info("All core tasks are scheduled")
@Job(
conditions=[
JobCondition.HEALTHY,
JobCondition.FREE_SPACE,
JobCondition.INTERNET_HOST,
JobCondition.RUNNING,
]
)
async def _update_addons(self):
"""Check if an update is available for an Add-on and update it."""
for addon in self.sys_addons.all:
if not addon.is_installed or not addon.auto_update:
continue
# Evaluate available updates
if not addon.need_update:
continue
if not addon.test_update_schema():
_LOGGER.warning(
"Add-on %s will be ignored, schema tests failed", addon.slug
)
continue
# Run Add-on update sequential
# avoid issue on slow IO
_LOGGER.info("Add-on auto update process %s", addon.slug)
try:
await addon.update(backup=True)
except AddonsError:
_LOGGER.error("Can't auto update Add-on %s", addon.slug)
@Job(
conditions=[
JobCondition.FREE_SPACE,
JobCondition.INTERNET_HOST,
JobCondition.RUNNING,
]
)
async def _update_supervisor(self):
"""Check and run update of Supervisor Supervisor."""
if not self.sys_supervisor.need_update:
return
_LOGGER.info(
"Found new Supervisor version %s, updating",
self.sys_supervisor.latest_version,
)
await self.sys_supervisor.update()
async def _watchdog_homeassistant_docker(self):
"""Check running state of Docker and start if they is close."""
if not self.sys_homeassistant.watchdog:
# Watchdog is not enabled for Home Assistant
return
if self.sys_homeassistant.error_state:
# Home Assistant is in an error state, this is handled by the rollback feature
return
if not await self.sys_homeassistant.core.is_failed():
# The home assistant container is not in a failed state
return
if self.sys_homeassistant.core.in_progress:
# Home Assistant has a task in progress
return
if await self.sys_homeassistant.core.is_running():
# Home Assistant is running
return
_LOGGER.warning("Watchdog found a problem with Home Assistant Docker!")
try:
await self.sys_homeassistant.core.start()
except HomeAssistantError as err:
_LOGGER.error("Home Assistant watchdog reanimation failed!")
self.sys_capture_exception(err)
else:
return
_LOGGER.info("Rebuilding the Home Assistant Container")
await self.sys_homeassistant.core.rebuild()
async def _watchdog_homeassistant_api(self):
"""Create scheduler task for monitoring running state of API.
Try 2 times to call API before we restart Home-Assistant. Maybe we had
a delay in our system.
"""
if not self.sys_homeassistant.watchdog:
# Watchdog is not enabled for Home Assistant
return
if self.sys_homeassistant.error_state:
# Home Assistant is in an error state, this is handled by the rollback feature
return
if not await self.sys_homeassistant.core.is_running():
# The home assistant container is not running
return
if self.sys_homeassistant.core.in_progress:
# Home Assistant has a task in progress
return
if await self.sys_homeassistant.api.check_api_state():
# Home Assistant is running properly
return
# Init cache data
retry_scan = self._cache.get(HASS_WATCHDOG_API, 0)
# Look like we run into a problem
retry_scan += 1
if retry_scan == 1:
self._cache[HASS_WATCHDOG_API] = retry_scan
_LOGGER.warning("Watchdog miss API response from Home Assistant")
return
_LOGGER.error("Watchdog found a problem with Home Assistant API!")
try:
await self.sys_homeassistant.core.restart()
except HomeAssistantError as err:
_LOGGER.error("Home Assistant watchdog reanimation failed!")
self.sys_capture_exception(err)
finally:
self._cache[HASS_WATCHDOG_API] = 0
@Job(conditions=JobCondition.RUNNING)
async def _update_cli(self):
"""Check and run update of cli."""
if not self.sys_plugins.cli.need_update:
return
_LOGGER.info(
"Found new cli version %s, updating", self.sys_plugins.cli.latest_version
)
await self.sys_plugins.cli.update()
@Job(conditions=JobCondition.RUNNING)
async def _update_dns(self):
"""Check and run update of CoreDNS plugin."""
if not self.sys_plugins.dns.need_update:
return
_LOGGER.info(
"Found new CoreDNS plugin version %s, updating",
self.sys_plugins.dns.latest_version,
)
await self.sys_plugins.dns.update()
@Job(conditions=JobCondition.RUNNING)
async def _update_audio(self):
"""Check and run update of PulseAudio plugin."""
if not self.sys_plugins.audio.need_update:
return
_LOGGER.info(
"Found new PulseAudio plugin version %s, updating",
self.sys_plugins.audio.latest_version,
)
await self.sys_plugins.audio.update()
@Job(conditions=JobCondition.RUNNING)
async def _update_observer(self):
"""Check and run update of Observer plugin."""
if not self.sys_plugins.observer.need_update:
return
_LOGGER.info(
"Found new Observer plugin version %s, updating",
self.sys_plugins.observer.latest_version,
)
await self.sys_plugins.observer.update()
@Job(conditions=JobCondition.RUNNING)
async def _update_multicast(self):
"""Check and run update of multicast."""
if not self.sys_plugins.multicast.need_update:
return
_LOGGER.info(
"Found new Multicast version %s, updating",
self.sys_plugins.multicast.latest_version,
)
await self.sys_plugins.multicast.update()
async def _watchdog_dns_docker(self):
"""Check running state of Docker and start if they is close."""
# if CoreDNS is active
if await self.sys_plugins.dns.is_running() or self.sys_plugins.dns.in_progress:
return
_LOGGER.warning("Watchdog found a problem with CoreDNS plugin!")
# Detect loop
await self.sys_plugins.dns.loop_detection()
try:
await self.sys_plugins.dns.start()
except CoreDNSError:
_LOGGER.error("CoreDNS watchdog reanimation failed!")
async def _watchdog_audio_docker(self):
"""Check running state of Docker and start if they is close."""
# if PulseAudio plugin is active
if (
await self.sys_plugins.audio.is_running()
or self.sys_plugins.audio.in_progress
):
return
_LOGGER.warning("Watchdog found a problem with PulseAudio plugin!")
try:
await self.sys_plugins.audio.start()
except AudioError:
_LOGGER.error("PulseAudio watchdog reanimation failed!")
async def _watchdog_cli_docker(self):
"""Check running state of Docker and start if they is close."""
# if cli plugin is active
if await self.sys_plugins.cli.is_running() or self.sys_plugins.cli.in_progress:
return
_LOGGER.warning("Watchdog found a problem with cli plugin!")
try:
await self.sys_plugins.cli.start()
except CliError:
_LOGGER.error("CLI watchdog reanimation failed!")
async def _watchdog_observer_docker(self):
"""Check running state of Docker and start if they is close."""
# if observer plugin is active
if (
await self.sys_plugins.observer.is_running()
or self.sys_plugins.observer.in_progress
):
return
_LOGGER.warning("Watchdog/Docker found a problem with observer plugin!")
try:
await self.sys_plugins.observer.start()
except ObserverError:
_LOGGER.error("Observer watchdog reanimation failed!")
async def _watchdog_observer_application(self):
"""Check running state of application and rebuild if they is not response."""
# if observer plugin is active
if (
self.sys_plugins.observer.in_progress
or await self.sys_plugins.observer.check_system_runtime()
):
return
_LOGGER.warning("Watchdog/Application found a problem with observer plugin!")
try:
await self.sys_plugins.observer.rebuild()
except ObserverError:
_LOGGER.error("Observer watchdog reanimation failed!")
async def _watchdog_multicast_docker(self):
"""Check running state of Docker and start if they is close."""
# if multicast plugin is active
if (
await self.sys_plugins.multicast.is_running()
or self.sys_plugins.multicast.in_progress
):
return
_LOGGER.warning("Watchdog found a problem with Multicast plugin!")
try:
await self.sys_plugins.multicast.start()
except MulticastError:
_LOGGER.error("Multicast watchdog reanimation failed!")
async def _watchdog_addon_docker(self):
"""Check running state of Docker and start if they is close."""
for addon in self.sys_addons.installed:
# if watchdog need looking for
if not addon.watchdog or await addon.is_running():
continue
# if Addon have running actions
if addon.in_progress or addon.state != AddonState.STARTED:
continue
_LOGGER.warning("Watchdog found a problem with %s!", addon.slug)
try:
await addon.start()
except AddonsError as err:
_LOGGER.error("%s watchdog reanimation failed with %s", addon.slug, err)
self.sys_capture_exception(err)
async def _watchdog_addon_application(self):
"""Check running state of the application and start if they is hangs."""
for addon in self.sys_addons.installed:
# if watchdog need looking for
if not addon.watchdog or addon.state != AddonState.STARTED:
continue
# Init cache data
retry_scan = self._cache.get(addon.slug, 0)
# if Addon have running actions / Application work
if addon.in_progress or await addon.watchdog_application():
continue
# Look like we run into a problem
retry_scan += 1
if retry_scan == 1:
self._cache[addon.slug] = retry_scan
_LOGGER.warning(
"Watchdog missing application response from %s", addon.slug
)
return
_LOGGER.warning("Watchdog found a problem with %s application!", addon.slug)
try:
await addon.restart()
except AddonsError as err:
_LOGGER.error("%s watchdog reanimation failed with %s", addon.slug, err)
self.sys_capture_exception(err)
finally:
self._cache[addon.slug] = 0
async def _refresh_addon(self) -> None:
"""Refresh addon state."""
for addon in self.sys_addons.installed:
# if watchdog need looking for
if addon.watchdog or addon.state != AddonState.STARTED:
continue
# if Addon have running actions
if addon.in_progress or await addon.is_running():
continue
# Adjust state
addon.state = AddonState.STOPPED
async def _check_connectivity(self) -> None:
"""Check system connectivity."""
value = self._cache.get("connectivity", 0)
# Need only full check if not connected or each 10min
if value >= 600:
pass
elif (
self.sys_supervisor.connectivity
and self.sys_host.network.connectivity is None
) or (
self.sys_supervisor.connectivity
and self.sys_host.network.connectivity is not None
and self.sys_host.network.connectivity
):
self._cache["connectivity"] = value + RUN_CHECK_CONNECTIVITY
return
# Check connectivity
try:
await self.sys_supervisor.check_connectivity()
if HostFeature.NETWORK in self.sys_host.features:
await self.sys_host.network.check_connectivity()
finally:
self._cache["connectivity"] = 0
|
py | 1a2eae319cfbe17043e034104537c118c88238da | """Patch to fix MNIST download issue as described here:
- https://github.com/pytorch/ignite/issues/1737
- https://github.com/pytorch/vision/issues/3500
"""
import os
import subprocess as sp
import torch
from torchvision.datasets.mnist import MNIST, read_image_file, read_label_file
from torchvision.datasets.utils import extract_archive
def patched_download(self):
"""wget patched download method.
"""
if self._check_exists():
return
os.makedirs(self.raw_folder, exist_ok=True)
os.makedirs(self.processed_folder, exist_ok=True)
# download files
for url, md5 in self.resources:
filename = url.rpartition("/")[2]
download_root = os.path.expanduser(self.raw_folder)
extract_root = None
remove_finished = False
if extract_root is None:
extract_root = download_root
if not filename:
filename = os.path.basename(url)
# Use wget to download archives
sp.run(["wget", url, "-P", download_root])
archive = os.path.join(download_root, filename)
print("Extracting {} to {}".format(archive, extract_root))
extract_archive(archive, extract_root, remove_finished)
# process and save as torch files
print("Processing...")
training_set = (
read_image_file(os.path.join(self.raw_folder, "train-images-idx3-ubyte")),
read_label_file(os.path.join(self.raw_folder, "train-labels-idx1-ubyte")),
)
test_set = (
read_image_file(os.path.join(self.raw_folder, "t10k-images-idx3-ubyte")),
read_label_file(os.path.join(self.raw_folder, "t10k-labels-idx1-ubyte")),
)
with open(os.path.join(self.processed_folder, self.training_file), "wb") as f:
torch.save(training_set, f)
with open(os.path.join(self.processed_folder, self.test_file), "wb") as f:
torch.save(test_set, f)
print("Done!")
def main():
# Patch download method
MNIST.download = patched_download
# Download MNIST
MNIST(".", download=True)
if __name__ == "__main__":
main()
|
py | 1a2eae3d0fd3157590329c9965e9d4739f1f61d9 | from datetime import datetime
from os.path import dirname, join
import pytest # noqa
from city_scrapers_core.constants import COMMITTEE, PASSED
from city_scrapers_core.utils import file_response
from freezegun import freeze_time
from city_scrapers.spiders.cuya_emergency_services_advisory import (
CuyaEmergencyServicesAdvisorySpider,
)
test_response = file_response(
join(dirname(__file__), "files", "cuya_emergency_services_advisory.html"),
url="http://bc.cuyahogacounty.us/en-US/CC-EmergencySrvcsAdvsryBrd.aspx",
)
test_detail_response = file_response(
join(dirname(__file__), "files", "cuya_emergency_services_advisory_detail.html"),
url="http://bc.cuyahogacounty.us/en-US/091119-CCESAB-Comms-meeting.aspx",
)
spider = CuyaEmergencyServicesAdvisorySpider()
freezer = freeze_time("2019-09-25")
freezer.start()
parsed_items = [item for item in spider.parse(test_response)]
parsed_item = [item for item in spider._parse_detail(test_detail_response)][0]
freezer.stop()
def test_count():
assert len(parsed_items) == 30
def test_title():
assert parsed_item["title"] == "CCESAB Communications Committee"
def test_description():
assert parsed_item["description"] == ""
def test_start():
assert parsed_item["start"] == datetime(2019, 9, 11, 10, 15)
def test_end():
assert parsed_item["end"] == datetime(2019, 9, 11, 11, 30)
def test_time_notes():
assert parsed_item["time_notes"] == ""
def test_id():
assert (
parsed_item["id"]
== "cuya_emergency_services_advisory/201909111015/x/ccesab_communications_committee" # noqa
)
def test_status():
assert parsed_item["status"] == PASSED
def test_location():
assert parsed_item["location"] == {
"name": "The Cassidy Theatre",
"address": "6200 Pearl Road Parma Heights, OH 44130",
}
def test_source():
assert parsed_item["source"] == test_detail_response.url
def test_links():
assert parsed_item["links"] == [
{
"href": "http://bc.cuyahogacounty.us/ViewFile.aspx?file=7DSCAKoM0rqkeTzD%2f6%2f4cw%3d%3d", # noqa
"title": "Agenda",
}
]
def test_classification():
assert parsed_item["classification"] == COMMITTEE
def test_all_day():
assert parsed_item["all_day"] is False
|
py | 1a2eaf7dce5f6991f2e16b384403c2ea3c1ffc91 | # -*- coding: utf-8 -*-
# :Project: metapensiero.pj -- compatibility
# :Created: lun 30 mar 2020, 01:48:33
# :Author: Alberto Berti <[email protected]>
# :License: GNU General Public License version 3 or later
# :Copyright: © 2020 Alberto Berti
#
import ast
import sys
is_py36 = sys.version_info >= (3, 6)
if is_py36:
assign_types = (ast.Assign, ast.AnnAssign)
else:
assign_types = (ast.Assign,)
|
py | 1a2eaf9174fbdebd534e3827025ee1181b781017 | import math
from os.path import join as pjoin
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
class DoubleConv(nn.Module):
def __init__(self, in_channels, out_channels):
super(DoubleConv, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, 1, 1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, 3, 1, 1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2)
)
def forward(self, x):
return self.conv(x)
class SingleConv_no_pool(nn.Module):
def __init__(self, in_channels, out_channels):
super(SingleConv_no_pool, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, 1, 1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, 3, 1, 1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
)
def forward(self, x):
if x.size()[1]== 1:
x = x.repeat(1,3,1,1)
return self.conv(x)
class SingleConv_with_pool(nn.Module):
def __init__(self, in_channels, out_channels):
super(SingleConv_with_pool, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, 1, 1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2)
)
def forward(self, x):
return self.conv(x)
class UNET_encoder(nn.Module):
def __init__(self):
super().__init__()
width = 32
self.width = width
self.root = nn.Sequential(OrderedDict([
('unit1', SingleConv_no_pool(3, width))
]))
self.body = nn.Sequential(OrderedDict([
('block1', nn.Sequential(OrderedDict(
[('unit2', SingleConv_with_pool(width, width*2))]
))),
('block2', nn.Sequential(OrderedDict(
[('unit3', DoubleConv(width*2, width*4))]
))),
('block3', nn.Sequential(OrderedDict(
[('unit4', DoubleConv(width*4, width*8))]
))),
('block4', nn.Sequential(OrderedDict(
[('unit5', DoubleConv(width*8, width*16))]
))),
]))
def forward(self, x):
features = []
x = self.root(x)
b, c, in_size, _ = x.size()
features.append(x)
for i in range(len(self.body)-1):
x = self.body[i](x)
features.append(x)
x = self.body[-1](x)
return x, features[::-1]
class UNET_encoder_FETS(nn.Module):
def __init__(self):
super().__init__()
width = 32
self.width = width
self.root = nn.Sequential(OrderedDict([
('unit1', SingleConv_no_pool(4, width))
]))
self.body = nn.Sequential(OrderedDict([
('block1', nn.Sequential(OrderedDict(
[('unit2', SingleConv_with_pool(width, width*2))]
))),
('block2', nn.Sequential(OrderedDict(
[('unit3', DoubleConv(width*2, width*4))]
))),
('block3', nn.Sequential(OrderedDict(
[('unit4', DoubleConv(width*4, width*8))]
))),
('block4', nn.Sequential(OrderedDict(
[('unit5', DoubleConv(width*8, width*16))]
))),
]))
def forward(self, x):
features = []
x = self.root(x)
b, c, in_size, _ = x.size()
features.append(x)
for i in range(len(self.body)-1):
x = self.body[i](x)
features.append(x)
x = self.body[-1](x)
return x, features[::-1] |
py | 1a2eb12cd9ff66b3d062b24e3cc0580f5782c12d | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8-80 compliant>
from _bpy import types as bpy_types
import _bpy
StructRNA = bpy_types.bpy_struct
StructMetaPropGroup = bpy_types.bpy_struct_meta_idprop
# StructRNA = bpy_types.Struct
bpy_types.BlendDataLibraries.load = _bpy._library_load
bpy_types.BlendDataLibraries.write = _bpy._library_write
bpy_types.BlendData.user_map = _bpy._rna_id_collection_user_map
bpy_types.BlendData.batch_remove = _bpy._rna_id_collection_batch_remove
class Context(StructRNA):
__slots__ = ()
def copy(self):
from types import BuiltinMethodType
new_context = {}
generic_attrs = (
*StructRNA.__dict__.keys(),
"bl_rna", "rna_type", "copy",
)
for attr in dir(self):
if not (attr.startswith("_") or attr in generic_attrs):
value = getattr(self, attr)
if type(value) != BuiltinMethodType:
new_context[attr] = value
return new_context
class Library(bpy_types.ID):
__slots__ = ()
@property
def users_id(self):
"""ID data blocks which use this library"""
import bpy
# See: readblenentry.c, IDTYPE_FLAGS_ISLINKABLE,
# we could make this an attribute in rna.
attr_links = (
"actions", "armatures", "brushes", "cameras",
"curves", "grease_pencils", "collections", "images",
"lights", "lattices", "materials", "metaballs",
"meshes", "node_groups", "objects", "scenes",
"sounds", "speakers", "textures", "texts",
"fonts", "worlds",
)
return tuple(id_block
for attr in attr_links
for id_block in getattr(bpy.data, attr)
if id_block.library == self)
class Texture(bpy_types.ID):
__slots__ = ()
@property
def users_material(self):
"""Materials that use this texture"""
import bpy
return tuple(mat for mat in bpy.data.materials
if self in [slot.texture
for slot in mat.texture_slots
if slot]
)
@property
def users_object_modifier(self):
"""Object modifiers that use this texture"""
import bpy
return tuple(
obj for obj in bpy.data.objects if
self in [
mod.texture
for mod in obj.modifiers
if mod.type == 'DISPLACE']
)
class Collection(bpy_types.ID):
__slots__ = ()
@property
def users_dupli_group(self):
"""The collection instance objects this collection is used in"""
import bpy
return tuple(obj for obj in bpy.data.objects
if self == obj.instance_collection)
class Object(bpy_types.ID):
__slots__ = ()
@property
def children(self):
"""All the children of this object. Warning: takes O(len(bpy.data.objects)) time."""
import bpy
return tuple(child for child in bpy.data.objects
if child.parent == self)
@property
def users_collection(self):
"""The collections this object is in. Warning: takes O(len(bpy.data.collections) + len(bpy.data.scenes)) time."""
import bpy
return (
tuple(
collection for collection in bpy.data.collections
if self in collection.objects[:]
) + tuple(
scene.collection for scene in bpy.data.scenes
if self in scene.collection.objects[:]
)
)
@property
def users_scene(self):
"""The scenes this object is in. Warning: takes O(len(bpy.data.scenes) * len(bpy.data.objects)) time."""
import bpy
return tuple(scene for scene in bpy.data.scenes
if self in scene.objects[:])
class WindowManager(bpy_types.ID):
__slots__ = ()
def popup_menu(self, draw_func, title="", icon='NONE'):
import bpy
popup = self.popmenu_begin__internal(title, icon=icon)
try:
draw_func(popup, bpy.context)
finally:
self.popmenu_end__internal(popup)
def popover(
self, draw_func, *,
ui_units_x=0,
keymap=None,
from_active_button=False,
):
import bpy
popup = self.popover_begin__internal(
ui_units_x=ui_units_x,
from_active_button=from_active_button,
)
try:
draw_func(popup, bpy.context)
finally:
self.popover_end__internal(popup, keymap=keymap)
def popup_menu_pie(self, event, draw_func, title="", icon='NONE'):
import bpy
pie = self.piemenu_begin__internal(title, icon=icon, event=event)
if pie:
try:
draw_func(pie, bpy.context)
finally:
self.piemenu_end__internal(pie)
class WorkSpace(bpy_types.ID):
__slots__ = ()
def status_text_set(self, text):
"""
Set the status text or None to clear,
When text is a function, this will be called with the (header, context) arguments.
"""
from bl_ui.space_statusbar import STATUSBAR_HT_header
draw_fn = getattr(STATUSBAR_HT_header, "_draw_orig", None)
if draw_fn is None:
draw_fn = STATUSBAR_HT_header._draw_orig = STATUSBAR_HT_header.draw
if not (text is None or isinstance(text, str)):
draw_fn = text
text = None
self.status_text_set_internal(text)
STATUSBAR_HT_header.draw = draw_fn
class _GenericBone:
"""
functions for bones, common between Armature/Pose/Edit bones.
internal subclassing use only.
"""
__slots__ = ()
def translate(self, vec):
"""Utility function to add *vec* to the head and tail of this bone"""
self.head += vec
self.tail += vec
def parent_index(self, parent_test):
"""
The same as 'bone in other_bone.parent_recursive'
but saved generating a list.
"""
# use the name so different types can be tested.
name = parent_test.name
parent = self.parent
i = 1
while parent:
if parent.name == name:
return i
parent = parent.parent
i += 1
return 0
@property
def x_axis(self):
""" Vector pointing down the x-axis of the bone.
"""
from mathutils import Vector
return self.matrix.to_3x3() @ Vector((1.0, 0.0, 0.0))
@property
def y_axis(self):
""" Vector pointing down the y-axis of the bone.
"""
from mathutils import Vector
return self.matrix.to_3x3() @ Vector((0.0, 1.0, 0.0))
@property
def z_axis(self):
""" Vector pointing down the z-axis of the bone.
"""
from mathutils import Vector
return self.matrix.to_3x3() @ Vector((0.0, 0.0, 1.0))
@property
def basename(self):
"""The name of this bone before any '.' character"""
# return self.name.rsplit(".", 1)[0]
return self.name.split(".")[0]
@property
def parent_recursive(self):
"""A list of parents, starting with the immediate parent"""
parent_list = []
parent = self.parent
while parent:
if parent:
parent_list.append(parent)
parent = parent.parent
return parent_list
@property
def center(self):
"""The midpoint between the head and the tail."""
return (self.head + self.tail) * 0.5
@property
def vector(self):
"""
The direction this bone is pointing.
Utility function for (tail - head)
"""
return (self.tail - self.head)
@property
def children(self):
"""A list of all the bones children. Warning: takes O(len(bones)) time."""
return [child for child in self._other_bones if child.parent == self]
@property
def children_recursive(self):
"""A list of all children from this bone. Warning: takes O(len(bones)**2) time."""
bones_children = []
for bone in self._other_bones:
index = bone.parent_index(self)
if index:
bones_children.append((index, bone))
# sort by distance to parent
bones_children.sort(key=lambda bone_pair: bone_pair[0])
return [bone for index, bone in bones_children]
@property
def children_recursive_basename(self):
"""
Returns a chain of children with the same base name as this bone.
Only direct chains are supported, forks caused by multiple children
with matching base names will terminate the function
and not be returned. Warning: takes O(len(bones)**2) time.
"""
basename = self.basename
chain = []
child = self
while True:
children = child.children
children_basename = []
for child in children:
if basename == child.basename:
children_basename.append(child)
if len(children_basename) == 1:
child = children_basename[0]
chain.append(child)
else:
if children_basename:
print("multiple basenames found, "
"this is probably not what you want!",
self.name, children_basename)
break
return chain
@property
def _other_bones(self):
id_data = self.id_data
id_data_type = type(id_data)
if id_data_type == bpy_types.Object:
bones = id_data.pose.bones
elif id_data_type == bpy_types.Armature:
bones = id_data.edit_bones
if not bones: # not in edit mode
bones = id_data.bones
return bones
class PoseBone(StructRNA, _GenericBone, metaclass=StructMetaPropGroup):
__slots__ = ()
@property
def children(self):
obj = self.id_data
pbones = obj.pose.bones
self_bone = self.bone
return tuple(pbones[bone.name] for bone in obj.data.bones
if bone.parent == self_bone)
class Bone(StructRNA, _GenericBone, metaclass=StructMetaPropGroup):
__slots__ = ()
class EditBone(StructRNA, _GenericBone, metaclass=StructMetaPropGroup):
__slots__ = ()
def align_orientation(self, other):
"""
Align this bone to another by moving its tail and settings its roll
the length of the other bone is not used.
"""
vec = other.vector.normalized() * self.length
self.tail = self.head + vec
self.roll = other.roll
def transform(self, matrix, scale=True, roll=True):
"""
Transform the the bones head, tail, roll and envelope
(when the matrix has a scale component).
:arg matrix: 3x3 or 4x4 transformation matrix.
:type matrix: :class:`mathutils.Matrix`
:arg scale: Scale the bone envelope by the matrix.
:type scale: bool
:arg roll:
Correct the roll to point in the same relative
direction to the head and tail.
:type roll: bool
"""
from mathutils import Vector
z_vec = self.matrix.to_3x3() @ Vector((0.0, 0.0, 1.0))
self.tail = matrix @ self.tail
self.head = matrix @ self.head
if scale:
scalar = matrix.median_scale
self.head_radius *= scalar
self.tail_radius *= scalar
if roll:
self.align_roll(matrix @ z_vec)
def ord_ind(i1, i2):
if i1 < i2:
return i1, i2
return i2, i1
class Mesh(bpy_types.ID):
__slots__ = ()
def from_pydata(self, vertices, edges, faces):
"""
Make a mesh from a list of vertices/edges/faces
Until we have a nicer way to make geometry, use this.
:arg vertices:
float triplets each representing (X, Y, Z)
eg: [(0.0, 1.0, 0.5), ...].
:type vertices: iterable object
:arg edges:
int pairs, each pair contains two indices to the
*vertices* argument. eg: [(1, 2), ...]
:type edges: iterable object
:arg faces:
iterator of faces, each faces contains three or more indices to
the *vertices* argument. eg: [(5, 6, 8, 9), (1, 2, 3), ...]
:type faces: iterable object
.. warning::
Invalid mesh data
*(out of range indices, edges with matching indices,
2 sided faces... etc)* are **not** prevented.
If the data used for mesh creation isn't known to be valid,
run :class:`Mesh.validate` after this function.
"""
from itertools import chain, islice, accumulate
face_lengths = tuple(map(len, faces))
self.vertices.add(len(vertices))
self.edges.add(len(edges))
self.loops.add(sum(face_lengths))
self.polygons.add(len(faces))
self.vertices.foreach_set("co", tuple(chain.from_iterable(vertices)))
self.edges.foreach_set("vertices", tuple(chain.from_iterable(edges)))
vertex_indices = tuple(chain.from_iterable(faces))
loop_starts = tuple(islice(chain([0], accumulate(face_lengths)), len(faces)))
self.polygons.foreach_set("loop_total", face_lengths)
self.polygons.foreach_set("loop_start", loop_starts)
self.polygons.foreach_set("vertices", vertex_indices)
# if no edges - calculate them
if faces and (not edges):
self.update(calc_edges=True)
elif edges:
self.update(calc_edges_loose=True)
@property
def edge_keys(self):
return [ed.key for ed in self.edges]
class MeshEdge(StructRNA):
__slots__ = ()
@property
def key(self):
return ord_ind(*tuple(self.vertices))
class MeshLoopTriangle(StructRNA):
__slots__ = ()
@property
def center(self):
"""The midpoint of the face."""
face_verts = self.vertices[:]
mesh_verts = self.id_data.vertices
return (
mesh_verts[face_verts[0]].co +
mesh_verts[face_verts[1]].co +
mesh_verts[face_verts[2]].co
) / 3.0
@property
def edge_keys(self):
verts = self.vertices[:]
return (
ord_ind(verts[0], verts[1]),
ord_ind(verts[1], verts[2]),
ord_ind(verts[2], verts[0]),
)
class MeshPolygon(StructRNA):
__slots__ = ()
@property
def edge_keys(self):
verts = self.vertices[:]
vlen = len(self.vertices)
return [ord_ind(verts[i], verts[(i + 1) % vlen]) for i in range(vlen)]
@property
def loop_indices(self):
start = self.loop_start
end = start + self.loop_total
return range(start, end)
class Text(bpy_types.ID):
__slots__ = ()
def as_string(self):
"""Return the text as a string."""
return "\n".join(line.body for line in self.lines)
def from_string(self, string):
"""Replace text with this string."""
self.clear()
self.write(string)
def as_module(self):
from os.path import splitext
from types import ModuleType
mod = ModuleType(splitext(self.name)[0])
# TODO: We could use Text.compiled (C struct member)
# if this is called often it will be much faster.
exec(self.as_string(), mod.__dict__)
return mod
class Sound(bpy_types.ID):
__slots__ = ()
@property
def factory(self):
"""The aud.Factory object of the sound."""
import aud
return aud._sound_from_pointer(self.as_pointer())
class RNAMeta(type):
# TODO(campbell): move to C-API
@property
def is_registered(cls):
return "bl_rna" in cls.__dict__
class RNAMetaPropGroup(StructMetaPropGroup, RNAMeta):
pass
# Same as 'Operator'
# only without 'as_keywords'
class Gizmo(StructRNA):
__slots__ = ()
def __getattribute__(self, attr):
properties = StructRNA.path_resolve(self, "properties")
bl_rna = getattr(properties, "bl_rna", None)
if (bl_rna is not None) and (attr in bl_rna.properties):
return getattr(properties, attr)
return super().__getattribute__(attr)
def __setattr__(self, attr, value):
properties = StructRNA.path_resolve(self, "properties")
bl_rna = getattr(properties, "bl_rna", None)
if (bl_rna is not None) and (attr in bl_rna.properties):
return setattr(properties, attr, value)
return super().__setattr__(attr, value)
def __delattr__(self, attr):
properties = StructRNA.path_resolve(self, "properties")
bl_rna = getattr(properties, "bl_rna", None)
if (bl_rna is not None) and (attr in bl_rna.properties):
return delattr(properties, attr)
return super().__delattr__(attr)
from _bpy import (
_rna_gizmo_target_set_handler as target_set_handler,
_rna_gizmo_target_get_value as target_get_value,
_rna_gizmo_target_set_value as target_set_value,
_rna_gizmo_target_get_range as target_get_range,
)
# Convenience wrappers around private `_gpu` module.
def draw_custom_shape(self, shape, *, matrix=None, select_id=None):
"""
Draw a shape created form :class:`bpy.types.Gizmo.draw_custom_shape`.
:arg shape: The cached shape to draw.
:type shape: Undefined.
:arg matrix: 4x4 matrix, when not given
:class:`bpy.types.Gizmo.matrix_world` is used.
:type matrix: :class:`mathutils.Matrix`
:arg select_id: The selection id.
Only use when drawing within :class:`bpy.types.Gizmo.draw_select`.
:type select_it: int
"""
import gpu
if matrix is None:
matrix = self.matrix_world
batch, shader = shape
shader.bind()
if select_id is not None:
gpu.select.load_id(select_id)
else:
if self.is_highlight:
color = (*self.color_highlight, self.alpha_highlight)
else:
color = (*self.color, self.alpha)
shader.uniform_float("color", color)
with gpu.matrix.push_pop():
gpu.matrix.multiply_matrix(matrix)
batch.draw()
@staticmethod
def new_custom_shape(type, verts):
"""
Create a new shape that can be passed to :class:`bpy.types.Gizmo.draw_custom_shape`.
:arg type: The type of shape to create in (POINTS, LINES, TRIS, LINE_STRIP).
:type type: string
:arg verts: Coordinates.
:type verts: sequence of of 2D or 3D coordinates.
:arg display_name: Optional callback that takes the full path, returns the name to display.
:type display_name: Callable that takes a string and returns a string.
:return: The newly created shape.
:rtype: Undefined (it may change).
"""
import gpu
from gpu.types import (
GPUBatch,
GPUVertBuf,
GPUVertFormat,
)
dims = len(verts[0])
if dims not in {2, 3}:
raise ValueError("Expected 2D or 3D vertex")
fmt = GPUVertFormat()
pos_id = fmt.attr_add(id="pos", comp_type='F32', len=dims, fetch_mode='FLOAT')
vbo = GPUVertBuf(len=len(verts), format=fmt)
vbo.attr_fill(id=pos_id, data=verts)
batch = GPUBatch(type=type, buf=vbo)
shader = gpu.shader.from_builtin('3D_UNIFORM_COLOR' if dims == 3 else '2D_UNIFORM_COLOR')
batch.program_set(shader)
return (batch, shader)
# Dummy class to keep the reference in `bpy_types_dict` and avoid
# erros like: "TypeError: expected GizmoGroup subclass of class ..."
class GizmoGroup(StructRNA):
__slots__ = ()
# Only defined so operators members can be used by accessing self.order
# with doc generation 'self.properties.bl_rna.properties' can fail
class Operator(StructRNA, metaclass=RNAMeta):
__slots__ = ()
def __getattribute__(self, attr):
properties = StructRNA.path_resolve(self, "properties")
bl_rna = getattr(properties, "bl_rna", None)
if (bl_rna is not None) and (attr in bl_rna.properties):
return getattr(properties, attr)
return super().__getattribute__(attr)
def __setattr__(self, attr, value):
properties = StructRNA.path_resolve(self, "properties")
bl_rna = getattr(properties, "bl_rna", None)
if (bl_rna is not None) and (attr in bl_rna.properties):
return setattr(properties, attr, value)
return super().__setattr__(attr, value)
def __delattr__(self, attr):
properties = StructRNA.path_resolve(self, "properties")
bl_rna = getattr(properties, "bl_rna", None)
if (bl_rna is not None) and (attr in bl_rna.properties):
return delattr(properties, attr)
return super().__delattr__(attr)
def as_keywords(self, ignore=()):
"""Return a copy of the properties as a dictionary"""
ignore = ignore + ("rna_type",)
return {attr: getattr(self, attr)
for attr in self.properties.rna_type.properties.keys()
if attr not in ignore}
class Macro(StructRNA):
# bpy_types is imported before ops is defined
# so we have to do a local import on each run
__slots__ = ()
@classmethod
def define(self, opname):
from _bpy import ops
return ops.macro_define(self, opname)
class PropertyGroup(StructRNA, metaclass=RNAMetaPropGroup):
__slots__ = ()
class RenderEngine(StructRNA, metaclass=RNAMeta):
__slots__ = ()
class KeyingSetInfo(StructRNA, metaclass=RNAMeta):
__slots__ = ()
class AddonPreferences(StructRNA, metaclass=RNAMeta):
__slots__ = ()
class _GenericUI:
__slots__ = ()
@classmethod
def _dyn_ui_initialize(cls):
draw_funcs = getattr(cls.draw, "_draw_funcs", None)
if draw_funcs is None:
def draw_ls(self, context):
# ensure menus always get default context
operator_context_default = self.layout.operator_context
# Support filtering out by owner
workspace = context.workspace
if workspace.use_filter_by_owner:
owner_names = {owner_id.name for owner_id in workspace.owner_ids}
else:
owner_names = None
for func in draw_ls._draw_funcs:
# Begin 'owner_id' filter.
if owner_names is not None:
owner_id = getattr(func, "_owner", None)
if owner_id is not None:
if func._owner not in owner_names:
continue
# End 'owner_id' filter.
# so bad menu functions don't stop
# the entire menu from drawing
try:
func(self, context)
except:
import traceback
traceback.print_exc()
self.layout.operator_context = operator_context_default
draw_funcs = draw_ls._draw_funcs = [cls.draw]
cls.draw = draw_ls
return draw_funcs
@staticmethod
def _dyn_owner_apply(draw_func):
from _bpy import _bl_owner_id_get
owner_id = _bl_owner_id_get()
if owner_id is not None:
draw_func._owner = owner_id
@classmethod
def is_extended(cls):
return bool(getattr(cls.draw, "_draw_funcs", None))
@classmethod
def append(cls, draw_func):
"""
Append a draw function to this menu,
takes the same arguments as the menus draw function
"""
draw_funcs = cls._dyn_ui_initialize()
cls._dyn_owner_apply(draw_func)
draw_funcs.append(draw_func)
@classmethod
def prepend(cls, draw_func):
"""
Prepend a draw function to this menu, takes the same arguments as
the menus draw function
"""
draw_funcs = cls._dyn_ui_initialize()
cls._dyn_owner_apply(draw_func)
draw_funcs.insert(0, draw_func)
@classmethod
def remove(cls, draw_func):
"""Remove a draw function that has been added to this menu"""
draw_funcs = cls._dyn_ui_initialize()
try:
draw_funcs.remove(draw_func)
except ValueError:
pass
class Panel(StructRNA, _GenericUI, metaclass=RNAMeta):
__slots__ = ()
class UIList(StructRNA, _GenericUI, metaclass=RNAMeta):
__slots__ = ()
class Header(StructRNA, _GenericUI, metaclass=RNAMeta):
__slots__ = ()
class Menu(StructRNA, _GenericUI, metaclass=RNAMeta):
__slots__ = ()
def path_menu(self, searchpaths, operator, *,
props_default=None, prop_filepath="filepath",
filter_ext=None, filter_path=None, display_name=None,
add_operator=None):
"""
Populate a menu from a list of paths.
:arg searchpaths: Paths to scan.
:type searchpaths: sequence of strings.
:arg operator: The operator id to use with each file.
:type operator: string
:arg prop_filepath: Optional operator filepath property (defaults to "filepath").
:type prop_filepath: string
:arg props_default: Properties to assign to each operator.
:type props_default: dict
:arg filter_ext: Optional callback that takes the file extensions.
Returning false excludes the file from the list.
:type filter_ext: Callable that takes a string and returns a bool.
:arg display_name: Optional callback that takes the full path, returns the name to display.
:type display_name: Callable that takes a string and returns a string.
"""
layout = self.layout
import os
import bpy.utils
layout = self.layout
if not searchpaths:
layout.label(text="* Missing Paths *")
# collect paths
files = []
for directory in searchpaths:
files.extend([
(f, os.path.join(directory, f))
for f in os.listdir(directory)
if (not f.startswith("."))
if ((filter_ext is None) or
(filter_ext(os.path.splitext(f)[1])))
if ((filter_path is None) or
(filter_path(f)))
])
files.sort()
col = layout.column(align=True)
for f, filepath in files:
# Intentionally pass the full path to 'display_name' callback,
# since the callback may want to use part a directory in the name.
row = col.row(align=True)
name = display_name(filepath) if display_name else bpy.path.display_name(f)
props = row.operator(
operator,
text=name,
translate=False,
)
if props_default is not None:
for attr, value in props_default.items():
setattr(props, attr, value)
setattr(props, prop_filepath, filepath)
if operator == "script.execute_preset":
props.menu_idname = self.bl_idname
if add_operator:
props = row.operator(add_operator, text="", icon='REMOVE')
props.name = name
props.remove_name = True
if add_operator:
wm = bpy.data.window_managers[0]
layout.separator()
row = layout.row()
sub = row.row()
sub.emboss = 'NORMAL'
sub.prop(wm, "preset_name", text="")
props = row.operator(add_operator, text="", icon='ADD')
props.name = wm.preset_name
def draw_preset(self, _context):
"""
Define these on the subclass:
- preset_operator (string)
- preset_subdir (string)
Optionally:
- preset_add_operator (string)
- preset_extensions (set of strings)
- preset_operator_defaults (dict of keyword args)
"""
import bpy
ext_valid = getattr(self, "preset_extensions", {".py", ".xml"})
props_default = getattr(self, "preset_operator_defaults", None)
add_operator = getattr(self, "preset_add_operator", None)
self.path_menu(
bpy.utils.preset_paths(self.preset_subdir),
self.preset_operator,
props_default=props_default,
filter_ext=lambda ext: ext.lower() in ext_valid,
add_operator=add_operator,
)
@classmethod
def draw_collapsible(cls, context, layout):
# helper function for (optionally) collapsed header menus
# only usable within headers
if context.area.show_menus:
# Align menus to space them closely.
layout.row(align=True).menu_contents(cls.__name__)
else:
layout.menu(cls.__name__, icon='COLLAPSEMENU')
class NodeTree(bpy_types.ID, metaclass=RNAMetaPropGroup):
__slots__ = ()
class Node(StructRNA, metaclass=RNAMetaPropGroup):
__slots__ = ()
@classmethod
def poll(cls, _ntree):
return True
class NodeInternal(Node):
__slots__ = ()
class NodeSocket(StructRNA, metaclass=RNAMetaPropGroup):
__slots__ = ()
@property
def links(self):
"""List of node links from or to this socket. Warning: takes O(len(nodetree.links)) time."""
return tuple(
link for link in self.id_data.links
if (link.from_socket == self or
link.to_socket == self))
class NodeSocketInterface(StructRNA, metaclass=RNAMetaPropGroup):
__slots__ = ()
# These are intermediate subclasses, need a bpy type too
class CompositorNode(NodeInternal):
__slots__ = ()
@classmethod
def poll(cls, ntree):
return ntree.bl_idname == 'CompositorNodeTree'
def update(self):
self.tag_need_exec()
class ShaderNode(NodeInternal):
__slots__ = ()
@classmethod
def poll(cls, ntree):
return ntree.bl_idname == 'ShaderNodeTree'
class TextureNode(NodeInternal):
__slots__ = ()
@classmethod
def poll(cls, ntree):
return ntree.bl_idname == 'TextureNodeTree'
|
py | 1a2eb231f732213c6256302bf03a1bd5272aacd8 | import rospy
from yaw_controller import YawController
from pid import PID
from lowpass import LowPassFilter
GAS_DENSITY = 2.858
ONE_MPH = 0.44704
class Controller(object):
def __init__(self, wheel_base, steer_ratio, max_lat_accel, max_steer_angle, accel_limit, decel_limit, vehicle_mass, fuel_capacity, brake_deadband,wheel_radius):
self.yaw_controller = YawController(wheel_base, steer_ratio, 0.1, max_lat_accel, max_steer_angle)
kp = 0.3
ki = 0.1
kd = 0.
mn = 0.
mx = 0.2
self.throttle_controller = PID(kp,ki,kd,mn,mx)
tau = 0.5 #1/(2pi*tau) cutoff frequency
ts = 0.02 #sample_time
self.vel_lpf = LowPassFilter(tau, ts)
self.vehicle_mass = vehicle_mass
self.fuel_capacity = fuel_capacity
self.brake_deadband = brake_deadband
self.decel_limit = decel_limit
self.accel_limit = accel_limit
self.wheel_radius = wheel_radius
self.last_time = rospy.get_time()
def control(self, current_vel, dbw_enabled, linear_vel,angular_vel):
# TODO: Change the arg, kwarg list to suit your needs
# Return throttle, brake, steer
if not dbw_enabled:
self.throttle_controller.reset()
return 0.0, 0., 0.
current_vel = self.vel_lpf.filt(current_vel)
# rospy.logwarn("Angular vel : {0}".format(angular_vel))
# rospy.logwarn("target vel : {0}".format(linear_vel))
# rospy.logwarn("current vel : {0}".format(current_vel))
steering = self.yaw_controller.get_steering(linear_vel, angular_vel, current_vel)
vel_error = linear_vel - current_vel
self.last_vel = current_vel
current_time = rospy.get_time()
sample_time = current_time - self.last_time
self.last_time = current_time
throttle = self.throttle_controller.step(vel_error, sample_time)
brake = 0
if linear_vel == 0. and current_vel < 1.0:
throttle = 0
brake = 400 #N*m to hold the car when we stop at light
elif throttle < 0.1 and vel_error < 0:
throttle = 0
decel = max(vel_error, self.decel_limit)
brake = abs(decel)*self.vehicle_mass*self.wheel_radius
return throttle, brake, steering
|
py | 1a2eb275e3a88ee2fe89218b22290c923339ca59 | # Rewrite the distance function from the chapter titled Fruitful functions
# so that it takes two Points as parameters instead of four numbers.
class Point:
""" Point class represents and manipulates x,y coords. """
def __init__(self, x=0, y=0):
""" Create a new point at the origin """
self.x = x
self.y = y
def __str__(self):
return "({0}, {1})".format(self.x, self.y)
def distance_from_origin(self):
""" Compute my distance from the origin """
return ((self.x ** 2) + (self.y ** 2)) ** 0.5
def halfway(self, target):
""" Return the halfway point between myself and the target """
mx = (self.x + target.x) / 2
my = (self.y + target.y) / 2
return Point(mx, my)
# Add a method reflect_x to Point which returns a new Point,
# one which is the reflection of the point about the x-axis. For example, Point(3, 5).reflect_x() is (3, -5)
def reflect_x(self):
refx = (self.x)
refy = -(self.y)
return Point(refx, refy)
# Rewrite the distance function from the chapter titled Fruitful functions
# so that it takes two Points as parameters instead of four numbers.
def distance(p1, p2):
dx = p2.x - p1.x
dy = p2.y - p1.y
dsquared = dx*dx + dy*dy
result = dsquared**0.5
return result
# Add a method slope_from_origin which returns the slope of the line joining the origin to the point. For example,
# Point(4, 10).slope_from_origin()
# 2.5
def slope_from_origin(self):
return self.y/self.x
# What cases will cause this method to fail? -> division by zero
# The equation of a straight line is “y = ax + b”, (or perhaps “y = mx + c”).
# The coefficients a and b completely describe the line.
# Write a method in the Point class so that if a point instance is given another point,
# it will compute the equation of the straight line joining the two points.
# It must return the two coefficients as a tuple of two values. For example,
# print(Point(4, 11).get_line_to(Point(6, 15)))
# (2, 3)
def get_line_to(self, target):
mx = target.x-self.x
my = target.y-self.y
slope = my/mx
b = self.y - (slope * self.x)
return (slope, b)
print(Point(4, 11).get_line_to(Point(6, 15)))
|
py | 1a2eb2a46d6034a72443905fc5ceafa74652e189 | from proteus import Context
from proteus import Comm
comm = Comm.get()
ctx = Context.get()
# simulation flags for error analysis
#
# simFlagsList is initialized in proteus.iproteus
#
simFlagsList[0]['errorQuantities']=['u']
simFlagsList[0]['errorTypes']= ['numericalSolution'] #compute error in soln and glob. mass bal
simFlagsList[0]['errorNorms']= ['L2','H1'] #compute L2 norm in space or H0 or ...
simFlagsList[0]['errorTimes']= ['Last'] #'All', 'Last'
simFlagsList[0]['echo']=True
#
start
quit
|
py | 1a2eb3e02d38f25d69dea4a31c9eb8d46c026644 | """
Django settings for bbs project.
Generated by 'django-admin startproject' using Django 1.11.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '6gmpi&j6a5#sbn^d$v41)5xx8j@1yq5bi3_-p3%pu-!e=0m$r!'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'bbs.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'bbs.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
|
py | 1a2eb3e939bb20fab5e85474d5b1d38f42136d12 | from keras_applications import get_submodules_from_kwargs
def Conv2dBn(
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1),
activation=None,
activation_dtype=None,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
use_batchnorm=False,
**kwargs
):
"""Extension of Conv2D layer with batchnorm"""
conv_name, act_name, bn_name = None, None, None
block_name = kwargs.pop('name', None)
backend, layers, models, keras_utils = get_submodules_from_kwargs(kwargs)
if block_name is not None:
conv_name = block_name + '_conv'
if block_name is not None and activation is not None:
act_str = activation.__name__ if callable(activation) else str(activation)
act_name = block_name + '_' + act_str
if block_name is not None and use_batchnorm:
bn_name = block_name + '_bn'
bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1
def wrapper(input_tensor):
x = layers.Conv2D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=None,
use_bias=not (use_batchnorm),
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
name=conv_name,
)(input_tensor)
if use_batchnorm:
x = layers.BatchNormalization(axis=bn_axis, name=bn_name)(x)
if activation:
if activation_dtype is None:
x = layers.Activation(activation, name=act_name)(x)
else:
x = layers.Activation(activation, name=act_name, dtype=activation_dtype)(x)
return x
return wrapper
|
py | 1a2eb4a9fef6c85cf3a67200f11d7b95a09f8f7c | #!/usr/bin/env python3
import os
import random
import unittest
from math import exp, pi
import gpytorch
import torch
from gpytorch.distributions import MultivariateNormal
from gpytorch.kernels import RBFKernel, ScaleKernel
from gpytorch.likelihoods import GaussianLikelihood, FixedNoiseGaussianLikelihood
from gpytorch.means import ConstantMean
from gpytorch.priors import SmoothedBoxPrior
from gpytorch.test.utils import least_used_cuda_device
from torch import optim
class ExactGPModel(gpytorch.models.ExactGP):
def __init__(self, train_inputs, train_targets, likelihood):
super(ExactGPModel, self).__init__(train_inputs, train_targets, likelihood)
self.mean_module = ConstantMean(prior=SmoothedBoxPrior(-1, 1))
self.rbf_covar_module = RBFKernel(lengthscale_prior=SmoothedBoxPrior(exp(-3), exp(3), sigma=0.1))
self.covar_module = ScaleKernel(self.rbf_covar_module)
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return MultivariateNormal(mean_x, covar_x)
class TestWhiteNoiseGPRegression(unittest.TestCase):
def setUp(self):
if os.getenv("UNLOCK_SEED") is None or os.getenv("UNLOCK_SEED").lower() == "false":
self.rng_state = torch.get_rng_state()
torch.manual_seed(1)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(1)
random.seed(1)
def tearDown(self):
if hasattr(self, "rng_state"):
torch.set_rng_state(self.rng_state)
def _get_data(self, cuda=False):
device = torch.device("cuda") if cuda else torch.device("cpu")
# Simple training data: let's try to learn a sine function
train_x = torch.linspace(0, 1, 11, device=device)
train_y = torch.sin(train_x * (2 * pi))
test_x = torch.linspace(0, 1, 51, device=device)
test_y = torch.sin(test_x * (2 * pi))
return train_x, test_x, train_y, test_y
def test_posterior_latent_gp_and_likelihood_without_optimization(self, cuda=False):
train_x, test_x, train_y, test_y = self._get_data(cuda=cuda)
with gpytorch.settings.debug(False):
# We're manually going to set the hyperparameters to be ridiculous
likelihood = FixedNoiseGaussianLikelihood(torch.ones(11) * 1e-8)
gp_model = ExactGPModel(train_x, train_y, likelihood)
# Update lengthscale prior to accommodate extreme parameters
gp_model.rbf_covar_module.initialize(lengthscale=exp(-6))
gp_model.mean_module.initialize(constant=0)
if cuda:
gp_model.cuda()
likelihood.cuda()
# Compute posterior distribution
gp_model.eval()
likelihood.eval()
# Let's see how our model does, conditioned with weird hyperparams
# The posterior should fit all the data
function_predictions = likelihood(gp_model(train_x))
self.assertLess(torch.norm(function_predictions.mean - train_y), 1e-3)
self.assertLess(torch.norm(function_predictions.variance), 5e-3)
# It shouldn't fit much else though
test_function_predictions = gp_model(torch.tensor([1.1]).type_as(test_x))
self.assertLess(torch.norm(test_function_predictions.mean - 0), 1e-4)
self.assertLess(torch.norm(test_function_predictions.variance - gp_model.covar_module.outputscale), 1e-4)
def test_posterior_latent_gp_and_likelihood_without_optimization_cuda(self):
if torch.cuda.is_available():
with least_used_cuda_device():
self.test_posterior_latent_gp_and_likelihood_without_optimization(cuda=True)
def test_posterior_latent_gp_and_likelihood_with_optimization(self, cuda=False):
train_x, test_x, train_y, test_y = self._get_data(cuda=cuda)
# We're manually going to set the hyperparameters to something they shouldn't be
likelihood = FixedNoiseGaussianLikelihood(torch.ones(11) * 0.001)
gp_model = ExactGPModel(train_x, train_y, likelihood)
mll = gpytorch.ExactMarginalLogLikelihood(likelihood, gp_model)
gp_model.rbf_covar_module.initialize(lengthscale=exp(1))
gp_model.mean_module.initialize(constant=0)
if cuda:
gp_model.cuda()
likelihood.cuda()
# Find optimal model hyperparameters
gp_model.train()
likelihood.train()
optimizer = optim.Adam(list(gp_model.parameters()) + list(likelihood.parameters()), lr=0.1)
optimizer.n_iter = 0
with gpytorch.settings.debug(False):
for _ in range(75):
optimizer.zero_grad()
output = gp_model(train_x)
loss = -mll(output, train_y)
loss.backward()
optimizer.n_iter += 1
optimizer.step()
for param in gp_model.parameters():
self.assertTrue(param.grad is not None)
self.assertGreater(param.grad.norm().item(), 0)
for param in likelihood.parameters():
self.assertTrue(param.grad is not None)
self.assertGreater(param.grad.norm().item(), 0)
optimizer.step()
# Test the model
gp_model.eval()
likelihood.eval()
test_function_predictions = likelihood(gp_model(test_x))
mean_abs_error = torch.mean(torch.abs(test_y - test_function_predictions.mean))
self.assertLess(mean_abs_error.squeeze().item(), 0.05)
def test_posterior_latent_gp_and_likelihood_with_optimization_cuda(self):
if torch.cuda.is_available():
with least_used_cuda_device():
self.test_posterior_latent_gp_and_likelihood_with_optimization(cuda=True)
def test_posterior_latent_gp_and_likelihood_fast_pred_var(self, cuda=False):
train_x, test_x, train_y, test_y = self._get_data(cuda=cuda)
with gpytorch.settings.fast_pred_var(), gpytorch.settings.debug(False):
# We're manually going to set the hyperparameters to something they shouldn't be
likelihood = GaussianLikelihood(noise_prior=SmoothedBoxPrior(exp(-3), exp(3), sigma=0.1))
gp_model = ExactGPModel(train_x, train_y, likelihood)
mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model)
gp_model.rbf_covar_module.initialize(lengthscale=exp(1))
gp_model.mean_module.initialize(constant=0)
likelihood.initialize(noise=exp(1))
if cuda:
gp_model.cuda()
likelihood.cuda()
# Find optimal model hyperparameters
gp_model.train()
likelihood.train()
optimizer = optim.Adam(list(gp_model.parameters()) + list(likelihood.parameters()), lr=0.1)
optimizer.n_iter = 0
for _ in range(50):
optimizer.zero_grad()
output = gp_model(train_x)
loss = -mll(output, train_y)
loss.backward()
optimizer.n_iter += 1
optimizer.step()
for param in gp_model.parameters():
self.assertTrue(param.grad is not None)
self.assertGreater(param.grad.norm().item(), 0)
for param in likelihood.parameters():
self.assertTrue(param.grad is not None)
self.assertGreater(param.grad.norm().item(), 0)
optimizer.step()
# Test the model
gp_model.eval()
likelihood.eval()
# Set the cache
test_function_predictions = likelihood(gp_model(train_x))
# Now bump up the likelihood to something huge
# This will make it easy to calculate the variance
likelihood.raw_noise.data.fill_(3)
test_function_predictions = likelihood(gp_model(train_x))
noise = likelihood.noise
var_diff = (test_function_predictions.variance - noise).abs()
self.assertLess(torch.max(var_diff / noise), 0.05)
def test_posterior_latent_gp_and_likelihood_fast_pred_var_cuda(self):
if torch.cuda.is_available():
with least_used_cuda_device():
self.test_posterior_latent_gp_and_likelihood_fast_pred_var(cuda=True)
if __name__ == "__main__":
unittest.main()
|
py | 1a2eb4f6cc3c7505146763fafeed5e28062e1e07 | """
Interfaces for serializing Django objects.
Usage::
from django.core import serializers
json = serializers.serialize("json", some_query_set)
objects = list(serializers.deserialize("json", json))
To add your own serializers, use the SERIALIZATION_MODULES setting::
SERIALIZATION_MODULES = {
"csv" : "path.to.csv.serializer",
"txt" : "path.to.txt.serializer",
}
"""
from django.conf import settings
from django.utils import importlib
# Built-in serializers
BUILTIN_SERIALIZERS = {
"xml" : "django.core.serializers.xml_serializer",
"python" : "django.core.serializers.python",
"json" : "django.core.serializers.json",
}
# Check for PyYaml and register the serializer if it's available.
try:
import yaml
BUILTIN_SERIALIZERS["yaml"] = "django.core.serializers.pyyaml"
except ImportError:
pass
_serializers = {}
def register_serializer(format, serializer_module, serializers=None):
""""Register a new serializer.
``serializer_module`` should be the fully qualified module name
for the serializer.
If ``serializers`` is provided, the registration will be added
to the provided dictionary.
If ``serializers`` is not provided, the registration will be made
directly into the global register of serializers. Adding serializers
directly is not a thread-safe operation.
"""
module = importlib.import_module(serializer_module)
if serializers is None:
_serializers[format] = module
else:
serializers[format] = module
def unregister_serializer(format):
"Unregister a given serializer. This is not a thread-safe operation."
del _serializers[format]
def get_serializer(format):
if not _serializers:
_load_serializers()
return _serializers[format].Serializer
def get_serializer_formats():
if not _serializers:
_load_serializers()
return _serializers.keys()
def get_public_serializer_formats():
if not _serializers:
_load_serializers()
return [k for k, v in _serializers.iteritems() if not v.Serializer.internal_use_only]
def get_deserializer(format):
if not _serializers:
_load_serializers()
return _serializers[format].Deserializer
def serialize(format, queryset, **options):
"""
Serialize a queryset (or any iterator that returns database objects) using
a certain serializer.
"""
s = get_serializer(format)()
s.serialize(queryset, **options)
return s.getvalue()
def deserialize(format, stream_or_string):
"""
Deserialize a stream or a string. Returns an iterator that yields ``(obj,
m2m_relation_dict)``, where ``obj`` is a instantiated -- but *unsaved* --
object, and ``m2m_relation_dict`` is a dictionary of ``{m2m_field_name :
list_of_related_objects}``.
"""
d = get_deserializer(format)
return d(stream_or_string)
def _load_serializers():
"""
Register built-in and settings-defined serializers. This is done lazily so
that user code has a chance to (e.g.) set up custom settings without
needing to be careful of import order.
"""
global _serializers
serializers = {}
for format in BUILTIN_SERIALIZERS:
register_serializer(format, BUILTIN_SERIALIZERS[format], serializers)
if hasattr(settings, "SERIALIZATION_MODULES"):
for format in settings.SERIALIZATION_MODULES:
register_serializer(format, settings.SERIALIZATION_MODULES[format], serializers)
_serializers = serializers
|
py | 1a2eb501f42562afc6ecf94a3a3f06b412f2f738 | __copyright__ = "Copyright 2017, Georgia Institute of Technology"
__license__ = "MIT"
__version_info__ = ('0', '0', '1')
__version__ = '.'.join(__version_info__)
__maintainer__ = "Marat Dukhan"
__email__ = "[email protected]"
import logging
logger = logging.getLogger("confu")
logger.setLevel(logging.INFO)
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
class ConsoleFormatter(logging.Formatter):
def __init__(self):
super(ConsoleFormatter, self).__init__("%(message)s")
def format(self, record):
message = super(ConsoleFormatter, self).format(record)
if record.levelname in ["DEBUG", "INFO"]:
return message[0].upper() + message[1:]
else:
return {
"WARNING": "Warning", "ERROR": "Error", "CRITICAL": "Fatal error"
}[record.levelname] + ": " + message[0].lower() + message[1:]
console_formatter = ConsoleFormatter()
console_handler.setFormatter(console_formatter)
logger.addHandler(console_handler)
from confu.builds import Build
from confu.platform import Platform
def standard_parser(description="Confu configuration script"):
import argparse
from os import linesep
from confu.platform import host, possible_targets
parser = argparse.ArgumentParser(description=description,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("--target", dest="target", metavar="PLATFORM", type=Platform,
default=host.name,
help="platform where the code will run. Potential options:" + linesep +
" " + host.name + " (default)" + linesep +
linesep.join(" " + target for target in possible_targets[1:]))
parser.add_argument("--toolchain", dest="toolchain", metavar="TOOLCHAIN",
choices=["auto", "gnu", "clang"], default="auto",
help="toolchain to use for compilation. Potential options:" + linesep +
linesep.join(" " + name for name in ["auto (default)", "gnu", "clang"]))
return parser
|
py | 1a2eb7178da2387741821181e4ea26b67dd1ed12 | import pytest
from commitizen import BaseCommitizen, defaults, factory
from commitizen.config import BaseConfig
from commitizen.exceptions import NoCommitizenFoundException
def test_factory():
config = BaseConfig()
config.settings.update({"name": defaults.name})
r = factory.commiter_factory(config)
assert isinstance(r, BaseCommitizen)
def test_factory_fails():
config = BaseConfig()
config.settings.update({"name": "Nothing"})
with pytest.raises(NoCommitizenFoundException) as excinfo:
factory.commiter_factory(config)
assert "The committer has not been found in the system." in str(excinfo)
|
py | 1a2eb90337162598e808c77019efb7954485e0d3 | import datetime
import zipfile
from os import chdir, getpid, getcwd, listdir, walk, path
def listdir_cwd():
"""
Lazy listdir for cwd.
:return:
"""
return listdir(getcwd())
def f(x, fn):
"""
Le Zip
:param x:
:param fn:
:return:
"""
with zipfile.ZipFile.open(fn, 'a', zipfile.ZIP_LZMA, True) as file:
file.write(x)
file.close()
print(f'{getpid()}: {x}')
return x
def utc_now_to_file_str():
"""
Format UTC now to _YYYYmmdd_HHMMSS
:return:
"""
return datetime.datetime.strftime(datetime.datetime.utcnow(), '_%Y%m%d_%H%M%S')
def create_filename(prefix, extension):
"""
Create filename to be used.
:param prefix:
:param extension:
:return:
"""
return f'{prefix}{utc_now_to_file_str()}.{extension}'
def log_folders_destination_targets(dest, targets, filename):
"""
Log Folders, destination, and targets.
:param dest:
:param targets:
:param filename:
:return:
"""
print(f'\n Destination Folder: {dest}'
f'\n Targets: {str(len(targets))}'
f'\n File: {filename}')
def get_all_file_locs(targets):
"""
Gets all file locations.
:return:
"""
file_list = []
for target in targets:
chdir(target)
for root, dirs, files in walk(target):
for file in files:
file_list.append(path.join(root, file))
return file_list
|
py | 1a2eb91dd532d9114ec4f0292e619b09f489bbb7 | import collections
import enum
from itertools import starmap, product
import six
from ibis.compat import suppress
import ibis.util as util
import ibis.common as com
import ibis.expr.types as ir
import ibis.expr.schema as sch
import ibis.expr.datatypes as dt
try:
from cytoolz import curry, compose, identity
except ImportError:
from toolz import curry, compose, identity
def highest_precedence_dtype(exprs):
"""Return the highest precedence type from the passed expressions
Also verifies that there are valid implicit casts between any of the types
and the selected highest precedence type.
This is a thin wrapper around datatypes highest precedence check.
Parameters
----------
exprs : Iterable[ir.ValueExpr]
A sequence of Expressions
Returns
-------
dtype: DataType
The highest precedence datatype
"""
if not exprs:
raise ValueError('Must pass at least one expression')
return dt.highest_precedence(expr.type() for expr in exprs)
def castable(source, target):
"""Return whether source ir type is implicitly castable to target
Based on the underlying datatypes and the value in case of Literals
"""
op = source.op()
value = getattr(op, 'value', None)
return dt.castable(source.type(), target.type(), value=value)
def comparable(left, right):
return castable(left, right) or castable(right, left)
def cast(source, target):
"""Currently Literal to *Scalar implicit casts are allowed"""
import ibis.expr.operations as ops # TODO: don't use ops here
if not castable(source, target):
raise com.IbisTypeError('Source is not castable to target type!')
# currently it prevents column -> scalar implicit castings
# however the datatypes are matching
op = source.op()
if not isinstance(op, ops.Literal):
raise com.IbisTypeError('Only able to implicitly cast literals!')
out_type = target.type().scalar_type()
return out_type(op)
# ---------------------------------------------------------------------
# Input type validators / coercion functions
class validator(curry):
def __repr__(self):
return '{}({}{})'.format(
self.func.__name__,
repr(self.args)[1:-1],
', '.join('{}={!r}'.format(k, v) for k, v in self.keywords.items())
)
noop = validator(identity)
@validator
def one_of(inners, arg):
"""At least one of the inner validators must pass"""
for inner in inners:
with suppress(com.IbisTypeError, ValueError):
return inner(arg)
rules_formatted = ', '.join(map(repr, inners))
raise com.IbisTypeError(
'Arg passes neither of the following rules: {}'.format(rules_formatted)
)
@validator
def all_of(inners, arg):
"""All of the inner valudators must pass.
The order of inner validators matters.
Parameters
----------
inners : List[validator]
Functions are applied from right to left so allof([rule1, rule2], arg) is
the same as rule1(rule2(arg)).
arg : Any
Value to be validated.
Returns
-------
arg : Any
Value maybe coerced by inner validators to the appropiate types
"""
return compose(*inners)(arg)
@validator
def isin(values, arg):
if arg not in values:
raise ValueError(
'Value with type {} is not in {!r}'.format(type(arg), values)
)
if isinstance(values, dict): # TODO check for mapping instead
return values[arg]
else:
return arg
@validator
def member_of(obj, arg):
if isinstance(arg, enum.Enum):
enum.unique(obj) # check that enum has unique values
arg = arg.name
if not hasattr(obj, arg):
raise com.IbisTypeError(
'Value with type {} is not a member of {}'.format(type(arg), obj)
)
return getattr(obj, arg)
@validator
def list_of(inner, arg, min_length=0):
if isinstance(arg, six.string_types) or not isinstance(
arg, (collections.Sequence, ir.ListExpr)
):
raise com.IbisTypeError('Argument must be a sequence')
if len(arg) < min_length:
raise com.IbisTypeError(
'Arg must have at least {} number of elements'.format(min_length)
)
return ir.sequence(list(map(inner, arg)))
@validator
def datatype(arg):
return dt.dtype(arg)
@validator
def instance_of(klass, arg):
"""Require that a value has a particular Python type."""
if not isinstance(arg, klass):
raise com.IbisTypeError(
'Given argument with type {} is not an instance of {}'.format(
type(arg), klass
)
)
return arg
@validator
def value(dtype, arg):
"""Validates that the given argument is a Value with a particular datatype
Parameters
----------
dtype : DataType subclass or DataType instance
arg : python literal or an ibis expression
If a python literal is given the validator tries to coerce it to an ibis
literal.
Returns
-------
arg : AnyValue
An ibis value expression with the specified datatype
"""
if not isinstance(arg, ir.Expr):
# coerce python literal to ibis literal
arg = ir.literal(arg)
if not isinstance(arg, ir.AnyValue):
raise com.IbisTypeError('Given argument with type {} is not a value '
'expression'.format(type(arg)))
# retrieve literal values for implicit cast check
value = getattr(arg.op(), 'value', None)
if isinstance(dtype, type) and isinstance(arg.type(), dtype):
# dtype class has been specified like dt.Interval or dt.Decimal
return arg
elif dt.castable(arg.type(), dt.dtype(dtype), value=value):
# dtype instance or string has been specified and arg's dtype is
# implicitly castable to it, like dt.int8 is castable to dt.int64
return arg
else:
raise com.IbisTypeError('Given argument with datatype {} is not '
'subtype of {} nor implicitly castable to '
'it'.format(arg.type(), dtype))
@validator
def scalar(inner, arg):
return instance_of(ir.ScalarExpr, inner(arg))
@validator
def column(inner, arg):
return instance_of(ir.ColumnExpr, inner(arg))
@validator
def array_of(inner, arg):
val = arg if isinstance(arg, ir.Expr) else ir.literal(arg)
argtype = val.type()
if not isinstance(argtype, dt.Array):
raise com.IbisTypeError(
'Argument must be an array, got expression {} which is of type '
'{}'.format(val, val.type()))
return value(dt.Array(inner(val[0]).type()), val)
any = value(dt.any)
double = value(dt.double)
string = value(dt.string)
boolean = value(dt.boolean)
integer = value(dt.int64)
decimal = value(dt.Decimal)
floating = value(dt.float64)
date = value(dt.date)
time = value(dt.time)
timestamp = value(dt.Timestamp)
category = value(dt.category)
temporal = one_of([timestamp, date, time])
strict_numeric = one_of([integer, floating, decimal])
soft_numeric = one_of([integer, floating, decimal, boolean])
numeric = soft_numeric
set_ = value(dt.Set)
array = value(dt.Array)
struct = value(dt.Struct)
mapping = value(dt.Map(dt.any, dt.any))
@validator
def interval(arg, units=None):
arg = value(dt.Interval, arg)
unit = arg.type().unit
if units is not None and unit not in units:
msg = 'Interval unit `{}` is not among the allowed ones {}'
raise com.IbisTypeError(msg.format(unit, units))
return arg
@validator
def client(arg):
from ibis.client import Client
return instance_of(Client, arg)
# ---------------------------------------------------------------------
# Ouput type promoter functions
def promoter(fn):
def wrapper(name_or_value, *args, **kwargs):
if isinstance(name_or_value, str):
return lambda self: fn(getattr(self, name_or_value),
*args, **kwargs)
else:
return fn(name_or_value, *args, **kwargs)
return wrapper
@promoter
def shape_like(arg, dtype=None):
if isinstance(arg, (tuple, list, ir.ListExpr)):
datatype = dtype or highest_precedence_dtype(arg)
columnar = util.any_of(arg, ir.AnyColumn)
else:
datatype = dtype or arg.type()
columnar = isinstance(arg, ir.AnyColumn)
dtype = dt.dtype(datatype)
if columnar:
return dtype.array_type()
else:
return dtype.scalar_type()
@promoter
def scalar_like(arg):
output_dtype = arg.type()
return output_dtype.scalar_type()
@promoter
def array_like(arg):
output_dtype = arg.type()
return output_dtype.array_type()
column_like = array_like
@promoter
def typeof(arg):
return arg._factory
@validator
def table(schema, arg):
"""A table argument.
Parameters
----------
schema : Union[sch.Schema, List[Tuple[str, dt.DataType]]
A validator for the table's columns. Only column subset validators are
currently supported. Accepts any arguments that `sch.schema` accepts.
See the example for usage.
arg : The validatable argument.
Examples
--------
The following op will accept an argument named ``'table'``. Note that the
``schema`` argument specifies rules for columns that are required to be in
the table: ``time``, ``group`` and ``value1``. These must match the types
specified in the column rules. Column ``value2`` is optional, but if
present it must be of the specified type. The table may have extra columns
not specified in the schema.
"""
assert isinstance(arg, ir.TableExpr)
if arg.schema() >= sch.schema(schema):
return arg
raise com.IbisTypeError(
'Argument is not a table with column subset of {}'.format(schema)
)
# TODO: might just use bounds instead of actual literal values
# that could simplify interval binop output_type methods
def _promote_numeric_binop(exprs, op):
bounds, dtypes = [], []
for arg in exprs:
dtypes.append(arg.type())
if hasattr(arg.op(), 'value'):
# arg.op() is a literal
bounds.append([arg.op().value])
else:
bounds.append(arg.type().bounds)
# In some cases, the bounding type might be int8, even though neither
# of the types are that small. We want to ensure the containing type is
# _at least_ as large as the smallest type in the expression.
values = starmap(op, product(*bounds))
dtypes += [dt.infer(value, allow_overflow=True) for value in values]
return dt.highest_precedence(dtypes)
@promoter
def numeric_like(args, op):
if util.all_of(args, ir.IntegerValue):
dtype = _promote_numeric_binop(args, op)
return shape_like(args, dtype=dtype)
else:
return shape_like(args)
# TODO: create varargs marker for impala udfs
|
py | 1a2eb9326f0aab349c6869d901d2e8f44591ddbc | """
Test Convex Breaking
"""
import pytest
import secrets
from convex_api.account import Account
from convex_api.api import API
from convex_api.exceptions import ConvexAPIError
from convex_api.utils import (
add_0x_prefix,
to_address
)
def test_convex_recursion(convex, test_account):
chain_length = 4
address_list = []
for index in range(0, chain_length):
contract = f"""
(def chain-{index}
(deploy
'(do
(def stored-data
^{{:private? true}}
nil
)
(def chain-address
^{{:private? true}}
nil
)
(defn get
^{{:callable? true}}
[]
(call chain-address (get))
)
(defn set
^{{:callable? true}}
[x]
( if chain-address (call chain-address(set x)) (def stored-data x))
)
(defn set-chain-address
^{{:callable? true}}
[x]
(def chain-address x)
)
)
)
)
"""
convex.topup_account(test_account)
result = convex.send(contract, test_account)
address_list.append(to_address(result['value']))
for index in range(0, chain_length):
next_index = index + 1
if next_index == chain_length:
next_index = 0
call_address = address_list[next_index]
result = convex.send(f'(call chain-{index} (set-chain-address #{call_address}))', test_account)
test_number = secrets.randbelow(1000)
if index == chain_length - 1:
with pytest.raises(ConvexAPIError, match='DEPTH'):
result = convex.send(f'(call chain-{index} (set {test_number}))', test_account)
else:
result = convex.send(f'(call chain-0 (set {test_number}))', test_account)
assert(result)
assert(result['value'] == test_number)
with pytest.raises(ConvexAPIError, match='DEPTH'):
convex.query('(call chain-0 (get))', test_account)
def test_schedule_transfer(convex, test_account, other_account):
# you can send coins to an actor , if it exports the receive-coin function
contract = """
(def transfer-for-ever
(deploy
'(do
(defn tx-delay
^{:callable? true}
[to-address amount]
(transfer to-address amount)
(def call-address *address*)
(schedule (+ *timestamp* 1000) (call call-address (tx-delay to-address amount)))
)
(defn tx-now
^{:callable? true}
[to-address amount]
(transfer to-address amount)
)
(defn show-schedule
^{:callable? true}
[]
[(get *state* :schedule) *address*]
)
(defn receive-coin
^{:callable? true}
[sender amount data]
(accept amount)
)
)
)
)
"""
# (call contract-address (tx-to to-address amount))
convex.topup_account(test_account)
convex.topup_account(other_account, 8000000)
result = convex.send(contract, test_account)
contract_address = to_address(result['value'])
convex.transfer(contract_address, 800000, other_account)
convex.topup_account(test_account)
result = convex.send(f'(call #{contract_address} (tx-delay #{other_account.address} 1000))', test_account)
print(result)
result = convex.send(f'(call #{contract_address} (show-schedule))', test_account)
print(result)
|
py | 1a2eb97675cb4a0da568b8029692ffa97378cbb7 | # Autores:
# Darlan de Castro Silva Filho
# Marcos Henrique Fernandes Marcone
from pandas import Series, DataFrame
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import plotly.graph_objs as go
import plotly.express as px
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_table
# Funções de estilização e opções das bibliotecas utilizadas
plt.style.use('classic')
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
# Função para o path das bases de dados
# Entrada: nome = string
# Saida: path = string
def path(nome):
return './'+nome+'.csv'
# Importa os arquivos (bases de dados) utilizados
unidades = pd.read_csv(path('unidades'), sep=';')
docentes = pd.read_csv(path('docentes'), sep=';')
avaliacao = pd.read_csv(path('avaliacaoDocencia'), sep=';')
# Filtra os docentes que trabalham em Natal e que tenham a categoria de Professor do Magistério Superior
unidadesFiltradas = unidades.loc[:, [
'id_unidade', 'municipio', 'unidade_responsavel']]
docentesComUnidadeAcademica = pd.merge(
docentes, unidadesFiltradas, left_on="id_unidade_lotacao", right_on="id_unidade").drop('id_unidade', axis=1)
docentesNatalUnidadeAcademica = docentesComUnidadeAcademica[
docentesComUnidadeAcademica['municipio'] == 'NATAL']
docentesNatalMSUnidadeAcademica = docentesNatalUnidadeAcademica[
docentesNatalUnidadeAcademica['categoria'] == 'PROFESSOR DO MAGISTERIO SUPERIOR']
# Filtra as unidades_dirigentes não aceitas pela aplicação
docentesNatalMSUnidadeAcademica['unidade_dirigente'] = np.where(docentesNatalMSUnidadeAcademica['unidade_responsavel'] == 'UNIVERSIDADE FEDERAL DO RIO GRANDE DO NORTE', (
docentesNatalMSUnidadeAcademica['lotacao']), (docentesNatalMSUnidadeAcademica['unidade_responsavel']))
unidadesNaoAceitas = ['PRÓ-REITORIA DE EXTENSÃO UNIVERSITÁRIA', 'MUSEU CÂMARA CASCUDO', 'UNIVERSIDADE FEDERAL DO RIO GRANDE DO NORTE', 'EDITORA UNIVERSITÁRIA', 'EMPRESA BRASILEIRA DE SERVICOS HOSPITALARES',
'REITORIA', 'INSTITUTO DE MEDICINA TROPICAL - IMT-RN', 'SECRETARIA DE EDUCAÇÃO A DISTÂNCIA', 'GABINETE DO REITOR', 'SUPERINTENDENCIA DE COMUNICACAO', 'PRÓ-REITORIA DE ADMINISTRAÇÃO (PROAD)']
docentesNatalMSUnidadeAcademica = docentesNatalMSUnidadeAcademica[~docentesNatalMSUnidadeAcademica['unidade_dirigente'].isin(
unidadesNaoAceitas)]
# Gráfico de barras da distribuição dos docentes da UFRN por unidade acadêmica
quantidadeDocentesUnidadeDirigente = docentesNatalMSUnidadeAcademica['unidade_dirigente'].value_counts(
)
barraDocentesUnidadeDirigente = go.Bar(x=quantidadeDocentesUnidadeDirigente.index,
y=quantidadeDocentesUnidadeDirigente.values, text=quantidadeDocentesUnidadeDirigente.values, textposition='auto')
layoutDocentesUnidadeDirigente = go.Layout(title='Gráfico de docentes por unidade responsável (UFRN 2021 - Unidades de Natal - Magistério Superior)', xaxis={
'title': 'Unidade responsável'}, yaxis={'title': 'Número de docentes'})
figuraDocentesUnidadeDirigente = go.Figure(
data=[barraDocentesUnidadeDirigente], layout=layoutDocentesUnidadeDirigente)
# Gráfico de pizza da distribuição dos docentes da UFRN por sexo
quantidadeDocentesSexo = docentesNatalMSUnidadeAcademica['sexo'].value_counts()
piechartSexo = go.Pie(labels=['Masculino', 'Feminino'], values=quantidadeDocentesSexo.values, text=quantidadeDocentesSexo.values, marker={
'colors': ['#665FD1', '#FFFF7E'], 'line': dict(color='#000000', width=2)})
layoutDocentesSexo = go.Layout(title='Gráfico de docentes por sexo (UFRN 2021 - Unidades de Natal - Magistério Superior)',
xaxis={'title': 'Docentes'}, yaxis={'title': 'Número de docentes'}, barmode='stack')
figuraDocentesSexo = go.Figure(data=piechartSexo, layout=layoutDocentesSexo)
# Gráfico de pizza da distribuição dos docentes da UFRN por formação acadêmica
quantidadeDocentesFormacao = docentesNatalMSUnidadeAcademica['formacao'].value_counts(
)
piechartFormacao = go.Pie(labels=quantidadeDocentesFormacao.index, values=quantidadeDocentesFormacao.values, text=quantidadeDocentesFormacao.values, marker={
'colors': ['#665FD1', '#FFFF7E', '#F5054F', '#3F012C'], 'line': dict(color='#000000', width=2)})
layoutDocentesFormacao = go.Layout(title='Gráfico de docentes por formação (UFRN 2021 - Unidades de Natal - Magistério Superior)',
xaxis={'title': 'Formação'}, yaxis={'title': 'Número de docentes'})
figuraDocentesFormacao = go.Figure(
data=[piechartFormacao], layout=layoutDocentesFormacao)
# Gráfico de pizza da distribuição dos docentes da UFRN por classe funcional
quantidadeDocentesClasseFuncional = docentesNatalMSUnidadeAcademica['classe_funcional'].value_counts(
).sort_index()
piechartClasseFuncional = go.Pie(labels=quantidadeDocentesClasseFuncional.index, values=quantidadeDocentesClasseFuncional.values,
text=quantidadeDocentesClasseFuncional.values, marker={'colors': px.colors.qualitative.Dark24, 'line': dict(color='#000000', width=2)})
barraDocentesClasseFuncional = go.Bar(x=quantidadeDocentesClasseFuncional.index, y=quantidadeDocentesClasseFuncional.values,
text=quantidadeDocentesClasseFuncional.values, textposition='auto', marker={'color': '#5D21D0'})
layoutDocentesClasseFuncional = go.Layout(title='Gráfico de docentes por classe funcional (UFRN 2021 - Unidades de Natal - Magistério Superior)', xaxis={
'title': 'Classe funcional'}, yaxis={'title': 'Número de docentes'}, height=450)
figuraDocentesClasseFuncional = go.Figure(
data=[piechartClasseFuncional], layout=layoutDocentesClasseFuncional)
# Cria gráfico para ressaltar os dados de classe funcional dos docentes agrupados por unidade_dirigente
filtroClasseFuncional = ['unidade_dirigente', 'classe_funcional']
docentesClasseGroupBy = docentesNatalMSUnidadeAcademica.groupby(
filtroClasseFuncional).count().reset_index().loc[:, filtroClasseFuncional + ['nome']]
docentesClasseGroupBy['quantidade'] = docentesClasseGroupBy['nome']
del docentesClasseGroupBy['nome']
figClasseDetalhe = px.bar(docentesClasseGroupBy, x="unidade_dirigente", y="quantidade", color="classe_funcional",
text='quantidade', color_discrete_sequence=px.colors.qualitative.Bold, height=800)
# Cria gráfico para ressaltar os dados de sexo dos docentes agrupados por unidade_dirigente
filtroSexo = ['unidade_dirigente', 'sexo']
docentesSexoGroupBy = docentesNatalMSUnidadeAcademica.groupby(
filtroSexo).count().reset_index().loc[:, filtroSexo + ['nome']]
docentesSexoGroupBy['quantidade'] = docentesSexoGroupBy['nome']
del docentesSexoGroupBy['nome']
figSexoDetalhe = px.bar(docentesSexoGroupBy, x="unidade_dirigente", y="quantidade",
color="sexo", text='quantidade', color_discrete_sequence=px.colors.qualitative.Bold)
# Cria gráfico para ressaltar os dados de formação acadêmica dos docentes agrupados por unidade_dirigente
filtroFormacao = ['unidade_dirigente', 'formacao']
docentesFormacaoGroupBy = docentesNatalMSUnidadeAcademica.groupby(
filtroFormacao).count().reset_index().loc[:, filtroFormacao + ['nome']]
docentesFormacaoGroupBy['quantidade'] = docentesFormacaoGroupBy['nome']
del docentesFormacaoGroupBy['nome']
figFormacaoDetalhe = px.bar(docentesFormacaoGroupBy, x="unidade_dirigente",
y="quantidade", color="formacao", text='quantidade', range_y=[0, 400])
# Cria um dicionário com os dados indexados por unidade_dirigente
unidadesDirigentes = docentesNatalMSUnidadeAcademica['unidade_dirigente'].unique(
)
unidadesDirigentes
dfUnidadesDirigentes = {}
for unidadeDirigente in unidadesDirigentes:
df = docentesNatalMSUnidadeAcademica[docentesNatalMSUnidadeAcademica['unidade_dirigente'] == unidadeDirigente]
dfUnidadesDirigentes[unidadeDirigente] = df
# Função utilizada na filtragem de um dataFrame agrupando os dados por uma propriedade e o filtrando por outras duas
# Entradas: df = DataFrame, title = string, x = string, y = string, cor = ['rgb(a,b,c)','rgb(d,e,f)'...]
# Saídas: figAdmissao = Gráfico de barras
def filtrarDFPorUnidadeDirigente(df, title, x, y, cor=px.colors.qualitative.Bold):
dfFinal = df[title]
filtro = [x, y]
docentesFiltroGroupBy = dfFinal.groupby(
filtro).count().reset_index().loc[:, filtro + ['nome']]
docentesFiltroGroupBy['quantidade'] = docentesFiltroGroupBy['nome']
del docentesFiltroGroupBy['nome']
figAdmissao = px.bar(docentesFiltroGroupBy, x=x, y="quantidade", color=y,
text='quantidade', color_discrete_sequence=cor, title=title)
return figAdmissao
# Cria e formata um dataFrame geral com todos os professores e os atributos necessários para a geração dos gráficos e da tabela por média
avaliacaoDocentesFiltro = avaliacao[avaliacao['nome_docente'].isin(
docentesNatalMSUnidadeAcademica['nome'])]
avaliacaoDocentesFiltro['total_postura'] = avaliacaoDocentesFiltro['postura_profissional_media'] * \
avaliacaoDocentesFiltro['qtd_discentes']
avaliacaoDocentesFiltro['total_atuacao'] = avaliacaoDocentesFiltro['atuacao_profissional_media'] * \
avaliacaoDocentesFiltro['qtd_discentes']
docentesMedias = avaliacaoDocentesFiltro.loc[:, [
'nome_docente', 'qtd_discentes', 'total_postura', 'total_atuacao']]
docentesMediasGroupBy = docentesMedias.groupby(['nome_docente']).sum()
docentesMediasGroupBy['media_postura'] = docentesMediasGroupBy['total_postura'] / \
docentesMediasGroupBy['qtd_discentes']
docentesMediasGroupBy['media_atuacao'] = docentesMediasGroupBy['total_atuacao'] / \
docentesMediasGroupBy['qtd_discentes']
docentesMediasGroupBy['media_alunos'] = avaliacaoDocentesFiltro.groupby(
['nome_docente']).mean().loc[:, 'autoavaliacao_aluno_media']
docentesMediasNatalMSUnidadeAcademica = pd.merge(
docentesNatalMSUnidadeAcademica, docentesMediasGroupBy, left_on="nome", right_on="nome_docente").round(3)
# Exclui os campos não necessários para a geração da tabela de notas e assinala os campos restantes para um novo dataFrame
docenteParaTabelaNotas = docentesMediasNatalMSUnidadeAcademica.loc[:, [
'nome', 'media_postura', 'media_atuacao', 'media_alunos', 'unidade_dirigente', 'lotacao', 'qtd_discentes']]
# Faz a filtragem e formatação de um dataFrame para agrupas os dados da media_postura, media_atuacao e media_alunos por undade_dirigente
docentesMediaUnidadeDirigente = docentesMediasNatalMSUnidadeAcademica.groupby(
'unidade_dirigente').mean().loc[:, ['media_postura', 'media_atuacao', 'media_alunos']]
docentesMediaUnidadeDirigente['unidade_dirigente'] = docentesMediaUnidadeDirigente.index
# Faz a filtragem e formatação de um dataFrame para conter as informações da media_postura, media_atuacao e media_alunos a serem apresentas no gráfico de linha por evolução temporal
docentesMediasAno = avaliacaoDocentesFiltro.loc[:, [
'nome_docente', 'qtd_discentes', 'total_postura', 'total_atuacao', 'ano']]
docentesMediasAnoGroupBy = docentesMediasAno.groupby(['ano']).sum()
docentesMediasAnoGroupBy['media_postura'] = docentesMediasAnoGroupBy['total_postura'] / \
docentesMediasAnoGroupBy['qtd_discentes']
docentesMediasAnoGroupBy['media_atuacao'] = docentesMediasAnoGroupBy['total_atuacao'] / \
docentesMediasAnoGroupBy['qtd_discentes']
docentesMediasAnoGroupBy['media_alunos'] = avaliacaoDocentesFiltro.groupby(
['ano']).mean().loc[:, 'autoavaliacao_aluno_media']
docentesMediasAnoGroupBy['ano'] = docentesMediasAnoGroupBy.index
# Cria o gráfico de linhas da evolução temporal da media_postura, media_atuacao e media_alunos
figuraMediasAnoGroupBy = go.Figure()
figuraMediasAnoGroupBy.add_trace(go.Scatter(x=docentesMediasAnoGroupBy['ano'], y=docentesMediasAnoGroupBy['media_postura'],
mode='lines',
name='media_postura'))
figuraMediasAnoGroupBy.add_trace(go.Scatter(x=docentesMediasAnoGroupBy['ano'], y=docentesMediasAnoGroupBy['media_atuacao'],
mode='lines',
name='media_atuacao'))
figuraMediasAnoGroupBy.add_trace(go.Scatter(x=docentesMediasAnoGroupBy['ano'], y=docentesMediasAnoGroupBy['media_alunos'],
mode='lines',
name='media_alunos'))
figuraMediasAnoGroupBy.update_layout(
title='Evolução da avaliação dos discentes e docentes do magistério superior da UFRN nos anos de 2013 à 2019')
# Define as opções de unidades dirigentes que serão mostradas no 'dropdown-1'
indicadoresDropdown1 = [
'GERAL'] + list(docentesNatalMSUnidadeAcademica['unidade_dirigente'].unique())
# Estilos das divs dos gráficos iniciais
estilosDivGraficosIniciais = {'width': '95%',
'display': 'inline-block', 'padding': '0 20'}
# Cria a variável app e escolhe os stylesheets da aplicação
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
# Define o layout a ser apresentado na página web
app.layout = html.Div([
html.H1(children='Análise dos dados dos docentes do magistério superior da UFRN das unidades de Natal no ano de 2021'),
html.Div([
dcc.Markdown('''
Trabalho referente à disciplina DCA-0131, Ciência de Dados, ministrada pelo professor Luiz Affonso Hederson Guedes de Oliveira.
Plataforma desenvolvida pelos discentes Darlan de Castro e Marcos Henrique, alunos do curso de Engenharia Computação da UFRN.
A aplicação web desenvolvida consiste em uma análise exploratória dos dados sobre os docentes do magistério superior das unidades de Natal da Universidade Federal do Rio Grande do Norte (UFRN) no ano de 2021.
Os dados utilizados aqui podem ser acessados pelo seguinte site: [http://dados.ufrn.br](http://dados.ufrn.br)
As principais tecnologias usadas para o desenvolvimento da plataforma foram:
* Linguagem Python;
* Pacotes Pandas, Plotly e Dash;
* Heroku (deploy da aplicação).
''')
]),
html.H2(
children='Divisão dos docentes do magistério superior da UFRN no ano de 2021'),
html.Div([
html.Div([
dcc.Markdown('''
Nesta seção da aplicação pode-se acompanhar a divisão dos docentes através de difentes categorias, como sexo, formação e classe funcional, assim como ver como eles estão distribuídos por cada unidade responsável na UFRN.
Na primeira caixa de seleção pode-se escolher qual unidade responsável deseja-se analisar. Assim, são atualizados os três primeiros gráficos com informações das divisões dos decentes referentes a cada lotação que compõe aquela unidade responsável.
Se a opção for escolhida for "GERAL", então pode-se mostar gráficos gerais sobre toda as unidades de Natal da UFRN, ou gráficos detalhados mostrando a divisão por unidades responsáveis.
'''),
dcc.Dropdown(
id='dropdown-1',
options=[{'label': i, 'value': i}
for i in indicadoresDropdown1],
value='GERAL'
),
dcc.RadioItems(
id='radioitems-1',
options=[{'label': i, 'value': i}
for i in ['GERAL', 'DETALHADA']],
value='GERAL',
labelStyle={'display': 'inline-block'}
)
],
style={'width': '80%', 'display': 'inline-block'}),
html.Div([
dcc.Graph(
id='grafico-sexo')
], style=estilosDivGraficosIniciais),
html.Div([
dcc.Graph(
id='grafico-formacao')
], style=estilosDivGraficosIniciais),
html.Div([
dcc.Graph(
id='grafico-classe')
], style=estilosDivGraficosIniciais),
html.Div([
dcc.Graph(
id='grafico-sobra',
figure=figuraDocentesUnidadeDirigente)
], style=estilosDivGraficosIniciais, id='div-grafico-sobra'),
]),
html.H2(children='Estatísticas das avaliações dos docentes do magistério superior da UFRN (campus Natal) nos anos de 2013 à 2019'),
dcc.Markdown('''
Nesta seção da aplicação pode-se acompanhar dados sobre as avaliações dos docentes da UFRN e da autoavalição dos alunos feita a cada fim de semestre. Os dados disponibilizados constam do período de 2013 à 2019.
Ao todo são três dados importantes a serem considerados a média de postura dos docentes, a média de atuação dos docentes e autoavaliação dos alunos.
No primeiro gráfico pode-se acompanhar a média desses três quesitos por cada unidade responsável.
'''),
html.Div([
dcc.Graph(
id='grafico-nota-1')
], style={'width': '95%', 'display': 'inline-block', 'padding': '0 20'}),
html.Div([
dcc.Slider(
id='slider-grafico-nota-1',
min=1,
max=3,
value=1,
marks={str(i): str(i) for i in [1, 2, 3]},
step=None)],
style={'width': '80%', 'padding': '0px 15px 15px 15px'}),
dcc.Markdown('''
* Opção 1 - Média de atuação dos docentes;
* Opção 2 - Média de postura dos docentes;
* Opção 3 - Média da autoavaliação dos discentes.
'''),
dcc.Markdown('''
No segundo gráfico há dados sobre a evolução das médias de postura e atuação dos docentes e autoavaliação dos discentes ao longo dos anos.
'''),
html.Div([
dcc.Graph(
id='grafico-nota-2',
figure=figuraMediasAnoGroupBy)
], style={'width': '95%', 'display': 'inline-block', 'padding': '0 20'}),
dcc.Markdown('''
No terceito gráfico pode-se ver um histograma com a frequência das médias de postura e atuação dos docentes dividida por sexo.
'''),
html.Div([
dcc.Graph(
id='grafico-histograma')
], style={'width': '95%', 'display': 'inline-block', 'padding': '0 20'}),
html.Div([
dcc.Slider(
id='slider-grafico-histograma',
min=1,
max=2,
value=1,
marks={str(i): str(i) for i in [1, 2]},
step=None)],
style={'width': '80%', 'padding': '0px 15px 15px 15px'}),
dcc.Markdown('''
* Opção 1 - Média de atuação dos docentes;
* Opção 2 - Média de postura dos docentes.
'''),
dcc.Markdown('''
Nesta parte, pode-se selecionar uma unidade responsável (primeira caixa de seleção) e a partir dela escolher uma lotação (segunda caixa de seleção) para verificar a média de atuação e postura de cada profressor, assim como da autoavaliação dos discentes das turmas desses docentes e quantidade de discentes que passaram por eles, para cada departamento da UFRN.
'''),
html.Div([
dcc.Dropdown(
id='dropdown-2',
options=[{'label': i, 'value': i}
for i in docenteParaTabelaNotas['unidade_dirigente'].unique()],
value=docenteParaTabelaNotas['unidade_dirigente'].iloc[0]
)],
style={'width': '80%', 'display': 'inline-block'}),
html.Div([
dcc.Dropdown(
id='dropdown-3',
)],
style={'width': '80%', 'display': 'inline-block'}),
html.Div([
dash_table.DataTable(
id='table-nota',
columns=[{"name": i, "id": i} for i in [
'nome', 'media_postura', 'media_atuacao', 'media_alunos', 'qtd_discentes']],
style_cell={'textAlign': 'left'},
)
], style={'width': '95%', 'display': 'inline-block', 'padding': '0 20'}),
])
# Callback para atualização do estilo dos gráfico de barras (quatantidadeDocente x unidade_dirigente)
# Entradas: 'value' - 'dropdown-1', 'value' - 'radioitems-1'
# Saída: 'figure' - 'grafico-classe'
@app.callback(
dash.dependencies.Output('div-grafico-sobra', 'style'),
[dash.dependencies.Input('dropdown-1', 'value'),
dash.dependencies.Input('radioitems-1', 'value')])
def visibility_graficoSobra(dropValue, radioValue):
if(radioValue == 'GERAL' and dropValue == 'GERAL'):
estilosDivGraficosIniciais['display'] = 'inline-block'
return estilosDivGraficosIniciais
estilosDivGraficosIniciais['display'] = 'none'
return estilosDivGraficosIniciais
# Callback para atualização da 'figure' no gráfico por sexo.
# Entradas: 'value' - 'dropdown-1', 'value' - 'radioitems-1'
# Saída: 'figure' - 'grafico-sexo'
@app.callback(
dash.dependencies.Output('grafico-sexo', 'figure'),
[dash.dependencies.Input('dropdown-1', 'value'),
dash.dependencies.Input('radioitems-1', 'value')])
def att_sexo(dropValue, radioValue):
if(radioValue == 'GERAL' and dropValue == 'GERAL'):
return figuraDocentesSexo
elif(radioValue == 'DETALHADA' and dropValue == 'GERAL'):
return figSexoDetalhe
return filtrarDFPorUnidadeDirigente(dfUnidadesDirigentes, dropValue, 'lotacao', 'sexo')
# Callback para atualização da 'figure' no gráfico por formação
# Entradas: 'value' - 'dropdown-1', 'value' - 'radioitems-1'
# Saída: 'figure' - 'grafico-formacao'
@app.callback(
dash.dependencies.Output('grafico-formacao', 'figure'),
[dash.dependencies.Input('dropdown-1', 'value'),
dash.dependencies.Input('radioitems-1', 'value')])
def att_formacao(dropValue, radioValue):
if(radioValue == 'GERAL' and dropValue == 'GERAL'):
return figuraDocentesFormacao
elif(radioValue == 'DETALHADA' and dropValue == 'GERAL'):
return figFormacaoDetalhe
return filtrarDFPorUnidadeDirigente(dfUnidadesDirigentes, dropValue, 'lotacao', 'formacao')
# Callback para atualização da 'figure' no gráfico por classe
# Entradas: 'value' - 'dropdown-1', 'value' - 'radioitems-1'
# Saída: 'figure' - 'grafico-classe'
@app.callback(
dash.dependencies.Output('grafico-classe', 'figure'),
[dash.dependencies.Input('dropdown-1', 'value'),
dash.dependencies.Input('radioitems-1', 'value')])
def att_classe(dropValue, radioValue):
if(radioValue == 'GERAL' and dropValue == 'GERAL'):
return figuraDocentesClasseFuncional
elif(radioValue == 'DETALHADA' and dropValue == 'GERAL'):
return figClasseDetalhe
return filtrarDFPorUnidadeDirigente(dfUnidadesDirigentes, dropValue, 'lotacao', 'classe_funcional')
# Callback para atualização da 'figure' no gráfico por nota
# Entradas: 'value' - 'slider-grafico-nota-1'
# Saída: 'figure' - 'grafico-nota-1'
@app.callback(
dash.dependencies.Output('grafico-nota-1', 'figure'),
[dash.dependencies.Input('slider-grafico-nota-1', 'value')])
def att_nota1(sliderValue):
var = 'media_atuacao'
if sliderValue == 2:
var = 'media_postura'
elif sliderValue == 3:
var = 'media_alunos'
return px.scatter(docentesMediaUnidadeDirigente, x="unidade_dirigente", y=var,
size=var, hover_name="unidade_dirigente", color="unidade_dirigente")
# Callback para atualização da 'figure' no histograma
# Entradas: 'value' - 'slider-grafico-histograma'
# Saída: 'figure' - 'grafico-histograma'
@app.callback(
dash.dependencies.Output('grafico-histograma', 'figure'),
[dash.dependencies.Input('slider-grafico-histograma', 'value')])
def att_histograma(sliderValue):
var = 'media_atuacao'
if sliderValue == 2:
var = 'media_postura'
return px.histogram(docentesMediasNatalMSUnidadeAcademica, x=var, color="sexo", title='Histograma da avaliação dos docentes do magistério superior da UFRN nos anos de 2013 à 2019')
# Callback para atualização das 'options' no dropdown por lotação da tabela
# Entradas: 'value' - 'dropdown-2'
# Saída: 'options' - 'dropdown-3'
@app.callback(
dash.dependencies.Output('dropdown-3', 'options'),
[dash.dependencies.Input('dropdown-2', 'value')])
def att_dropdown3Options(dropValue):
df = docenteParaTabelaNotas[docenteParaTabelaNotas['unidade_dirigente'] == dropValue]
del df['unidade_dirigente']
return [{'label': 'GERAL', 'value': 'GERAL'}] + [{'label': i, 'value': i} for i in df['lotacao'].unique()]
# Callback para atualização do 'value' no dropdown por lotação da tabela
# Entradas: 'value' - 'dropdown-2'
# Saída: 'value' - 'dropdown-3'
@app.callback(
dash.dependencies.Output('dropdown-3', 'value'),
[dash.dependencies.Input('dropdown-2', 'value')])
def att_dropdown3Value(dropValue):
return 'GERAL'
# Callback para atualização da 'data' na tabela de exposição das notas dos professores por unidade_dirigente e lotação
# Entradas: 'value' - 'dropdown-2', value' - 'dropdown-3'
# Saída: 'data' - 'table-nota'
@app.callback(
dash.dependencies.Output('table-nota', 'data'),
[dash.dependencies.Input('dropdown-2', 'value'),
dash.dependencies.Input('dropdown-3', 'value')])
def att_table(dropValue2, dropValue3):
df = docenteParaTabelaNotas[docenteParaTabelaNotas['unidade_dirigente'] == dropValue2]
del df['unidade_dirigente']
if dropValue3 == 'GERAL':
del df['lotacao']
return df.to_dict("records")
df = docenteParaTabelaNotas[docenteParaTabelaNotas['lotacao'] == dropValue3]
del df['lotacao']
return df.to_dict("records")
# Atribui o servidor da aplicação a variável server
server = app.server
if __name__ == '__main__':
app.run_server(debug=True)
|
py | 1a2eba011c074c0754bab4c140f94ad660cb4a7b | # exported from PySB model 'model'
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU', 'C3pro'])
Monomer('SmacM', ['BaxA'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('SmacC', ['Xiap'])
Monomer('ParpC')
Monomer('Xiap', ['SmacC', 'Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd', 'C6A'])
Monomer('C6A', ['C8pro'])
Monomer('C3pro', ['Apop', 'C8A'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'BaxA_1', 'BaxA_2', 'SmacM', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU', 'C6pro'])
Monomer('ApafA')
Monomer('BidM', ['BaxM'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Monomer('C6pro', ['C3A'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr', 1.0)
Parameter('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('SmacM_0', 100000.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('SmacC_0', 0.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 122250.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('C6A_0', 0.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Parameter('C6pro_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('SmacM_obs', SmacM())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('SmacC_obs', SmacC())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('C6A_obs', C6A())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Observable('C6pro_obs', C6pro())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None, C6A=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None, C3pro=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None, C3pro=None) + BidU(C8A=None) | C8A(BidU=1, C3pro=None) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1, C3pro=None) % BidU(C8A=1) >> C8A(BidU=None, C3pro=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, Apop=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, Apop=None, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None, C8A=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(SmacC=None, Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(SmacC=None, Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None, C6pro=None) | Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None) >> Xiap(SmacC=None, Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None, C6pro=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5) >> BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Rule('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=None) + C3pro(Apop=None, C8A=None) | C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1), catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1) >> C8A(BidU=None, C3pro=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=None) + C6pro(C3A=None) | C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1), catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf, catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr)
Rule('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + C6A(C8pro=None), catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc)
Rule('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=None) + C8pro(Fadd=None, C6A=None) | C6A(C8pro=1) % C8pro(Fadd=None, C6A=1), catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf, catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr)
Rule('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=1) % C8pro(Fadd=None, C6A=1) >> C6A(C8pro=None) + C8A(BidU=None, C3pro=None), catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None, C3pro=None), C8A_0)
Initial(SmacM(BaxA=None), SmacM_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(SmacC(Xiap=None), SmacC_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(SmacC=None, Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None, C6A=None), C8pro_0)
Initial(C6A(C8pro=None), C6A_0)
Initial(C3pro(Apop=None, C8A=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None, C6pro=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
Initial(C6pro(C3A=None), C6pro_0)
|
py | 1a2ebb1ee6774e2758c167ef110d7e7763063c69 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
def main():
"""Lists as piles
"""
# Create a stack
my_stack = [1, 2, 3, 4]
print("my_stack", my_stack)
# Push values on the stack
my_stack.append(5)
my_stack.append(6)
my_stack.append(7)
print("my_stack", my_stack)
# Pop values from the stack
print("Poped value", my_stack.pop())
print("my_stack", my_stack)
print("Poped value", my_stack.pop())
print("my_stack", my_stack)
print("Poped value", my_stack.pop())
print("my_stack", my_stack)
print("Poped value", my_stack.pop())
print("my_stack", my_stack)
if __name__ == '__main__':
main()
|
py | 1a2ebbf26fdd2ee9721467b2ee844ef820c2732a | # -*- coding: utf-8 -*-
"""
werkzeug.testapp
~~~~~~~~~~~~~~~~
Provide a small test application that can be used to test a WSGI server
and check it for WSGI compliance.
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
import werkzeug
from textwrap import wrap
from werkzeug.wrappers import BaseRequest as Request, BaseResponse as Response
from werkzeug.utils import escape
import base64
logo = Response(base64.b64decode(
'''R0lGODlhoACgAOMIAAEDACwpAEpCAGdgAJaKAM28AOnVAP3rAP/////////
//////////////////////yH5BAEKAAgALAAAAACgAKAAAAT+EMlJq704680R+F0ojmRpnuj0rWnrv
nB8rbRs33gu0bzu/0AObxgsGn3D5HHJbCUFyqZ0ukkSDlAidctNFg7gbI9LZlrBaHGtzAae0eloe25
7w9EDOX2fst/xenyCIn5/gFqDiVVDV4aGeYiKkhSFjnCQY5OTlZaXgZp8nJ2ekaB0SQOjqphrpnOiq
ncEn65UsLGytLVmQ6m4sQazpbtLqL/HwpnER8bHyLrLOc3Oz8PRONPU1crXN9na263dMt/g4SzjMeX
m5yDpLqgG7OzJ4u8lT/P69ej3JPn69kHzN2OIAHkB9RUYSFCFQYQJFTIkCDBiwoXWGnowaLEjRm7+G
p9A7Hhx4rUkAUaSLJlxHMqVMD/aSycSZkyTplCqtGnRAM5NQ1Ly5OmzZc6gO4d6DGAUKA+hSocWYAo
SlM6oUWX2O/o0KdaVU5vuSQLAa0ADwQgMEMB2AIECZhVSnTno6spgbtXmHcBUrQACcc2FrTrWS8wAf
78cMFBgwIBgbN+qvTt3ayikRBk7BoyGAGABAdYyfdzRQGV3l4coxrqQ84GpUBmrdR3xNIDUPAKDBSA
ADIGDhhqTZIWaDcrVX8EsbNzbkvCOxG8bN5w8ly9H8jyTJHC6DFndQydbguh2e/ctZJFXRxMAqqPVA
tQH5E64SPr1f0zz7sQYjAHg0In+JQ11+N2B0XXBeeYZgBZFx4tqBToiTCPv0YBgQv8JqA6BEf6RhXx
w1ENhRBnWV8ctEX4Ul2zc3aVGcQNC2KElyTDYyYUWvShdjDyMOGMuFjqnII45aogPhz/CodUHFwaDx
lTgsaOjNyhGWJQd+lFoAGk8ObghI0kawg+EV5blH3dr+digkYuAGSaQZFHFz2P/cTaLmhF52QeSb45
Jwxd+uSVGHlqOZpOeJpCFZ5J+rkAkFjQ0N1tah7JJSZUFNsrkeJUJMIBi8jyaEKIhKPomnC91Uo+NB
yyaJ5umnnpInIFh4t6ZSpGaAVmizqjpByDegYl8tPE0phCYrhcMWSv+uAqHfgH88ak5UXZmlKLVJhd
dj78s1Fxnzo6yUCrV6rrDOkluG+QzCAUTbCwf9SrmMLzK6p+OPHx7DF+bsfMRq7Ec61Av9i6GLw23r
idnZ+/OO0a99pbIrJkproCQMA17OPG6suq3cca5ruDfXCCDoS7BEdvmJn5otdqscn+uogRHHXs8cbh
EIfYaDY1AkrC0cqwcZpnM6ludx72x0p7Fo/hZAcpJDjax0UdHavMKAbiKltMWCF3xxh9k25N/Viud8
ba78iCvUkt+V6BpwMlErmcgc502x+u1nSxJSJP9Mi52awD1V4yB/QHONsnU3L+A/zR4VL/indx/y64
gqcj+qgTeweM86f0Qy1QVbvmWH1D9h+alqg254QD8HJXHvjQaGOqEqC22M54PcftZVKVSQG9jhkv7C
JyTyDoAJfPdu8v7DRZAxsP/ky9MJ3OL36DJfCFPASC3/aXlfLOOON9vGZZHydGf8LnxYJuuVIbl83y
Az5n/RPz07E+9+zw2A2ahz4HxHo9Kt79HTMx1Q7ma7zAzHgHqYH0SoZWyTuOLMiHwSfZDAQTn0ajk9
YQqodnUYjByQZhZak9Wu4gYQsMyEpIOAOQKze8CmEF45KuAHTvIDOfHJNipwoHMuGHBnJElUoDmAyX
c2Qm/R8Ah/iILCCJOEokGowdhDYc/yoL+vpRGwyVSCWFYZNljkhEirGXsalWcAgOdeAdoXcktF2udb
qbUhjWyMQxYO01o6KYKOr6iK3fE4MaS+DsvBsGOBaMb0Y6IxADaJhFICaOLmiWTlDAnY1KzDG4ambL
cWBA8mUzjJsN2KjSaSXGqMCVXYpYkj33mcIApyhQf6YqgeNAmNvuC0t4CsDbSshZJkCS1eNisKqlyG
cF8G2JeiDX6tO6Mv0SmjCa3MFb0bJaGPMU0X7c8XcpvMaOQmCajwSeY9G0WqbBmKv34DsMIEztU6Y2
KiDlFdt6jnCSqx7Dmt6XnqSKaFFHNO5+FmODxMCWBEaco77lNDGXBM0ECYB/+s7nKFdwSF5hgXumQe
EZ7amRg39RHy3zIjyRCykQh8Zo2iviRKyTDn/zx6EefptJj2Cw+Ep2FSc01U5ry4KLPYsTyWnVGnvb
UpyGlhjBUljyjHhWpf8OFaXwhp9O4T1gU9UeyPPa8A2l0p1kNqPXEVRm1AOs1oAGZU596t6SOR2mcB
Oco1srWtkaVrMUzIErrKri85keKqRQYX9VX0/eAUK1hrSu6HMEX3Qh2sCh0q0D2CtnUqS4hj62sE/z
aDs2Sg7MBS6xnQeooc2R2tC9YrKpEi9pLXfYXp20tDCpSP8rKlrD4axprb9u1Df5hSbz9QU0cRpfgn
kiIzwKucd0wsEHlLpe5yHXuc6FrNelOl7pY2+11kTWx7VpRu97dXA3DO1vbkhcb4zyvERYajQgAADs
='''), mimetype='image/png')
TEMPLATE = u'''\
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
"http://www.w3.org/TR/html4/loose.dtd">
<title>WSGI Information</title>
<style type="text/css">
@import url(http://fonts.googleapis.com/css?family=Ubuntu);
body { font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva',
'Verdana', sans-serif; background-color: white; color: #000;
font-size: 15px; text-align: center; }
#logo { float: right; padding: 0 0 10px 10px; }
div.box { text-align: left; width: 45em; margin: auto; padding: 50px 0;
background-color: white; }
h1, h2 { font-family: 'Ubuntu', 'Lucida Grande', 'Lucida Sans Unicode',
'Geneva', 'Verdana', sans-serif; font-weight: normal; }
h1 { margin: 0 0 30px 0; }
h2 { font-size: 1.4em; margin: 1em 0 0.5em 0; }
table { width: 100%%; border-collapse: collapse; border: 1px solid #AFC5C9 }
table th { background-color: #AFC1C4; color: white; font-size: 0.72em;
font-weight: normal; width: 18em; vertical-align: top;
padding: 0.5em 0 0.1em 0.5em; }
table td { border: 1px solid #AFC5C9; padding: 0.1em 0 0.1em 0.5em; }
code { font-family: 'Consolas', 'Monaco', 'Bitstream Vera Sans Mono',
monospace; font-size: 0.7em; }
ul li { line-height: 1.5em; }
ul.path { font-size: 0.7em; margin: 0 -30px; padding: 8px 30px;
list-style: none; background: #E8EFF0; }
ul.path li { line-height: 1.6em; }
li.virtual { color: #999; text-decoration: underline; }
li.exp { background: white; }
</style>
<div class="box">
<img src="?resource=logo" id="logo" alt="[The Werkzeug Logo]" />
<h1>WSGI Information</h1>
<p>
This page displays all available information about the WSGI server and
the underlying Python interpreter.
<h2 id="python-interpreter">Python Interpreter</h2>
<table>
<tr>
<th>Python Version
<td>%(python_version)s
<tr>
<th>Platform
<td>%(platform)s [%(os)s]
<tr>
<th>API Version
<td>%(api_version)s
<tr>
<th>Byteorder
<td>%(byteorder)s
<tr>
<th>Werkzeug Version
<td>%(werkzeug_version)s
</table>
<h2 id="wsgi-environment">WSGI Environment</h2>
<table>%(wsgi_env)s</table>
<h2 id="installed-eggs">Installed Eggs</h2>
<p>
The following python packages were installed on the system as
Python eggs:
<ul>%(python_eggs)s</ul>
<h2 id="sys-path">System Path</h2>
<p>
The following paths are the current contents of the load path. The
following entries are looked up for Python packages. Note that not
all items in this path are folders. Gray and underlined items are
entries pointing to invalid resources or used by custom import hooks
such as the zip importer.
<p>
Items with a bright background were expanded for display from a relative
path. If you encounter such paths in the output you might want to check
your setup as relative paths are usually problematic in multithreaded
environments.
<ul class="path">%(sys_path)s</ul>
</div>
'''
def iter_sys_path():
if os.name == 'posix':
def strip(x):
prefix = os.path.expanduser('~')
if x.startswith(prefix):
x = '~' + x[len(prefix):]
return x
else:
strip = lambda x: x
cwd = os.path.abspath(os.getcwd())
for item in sys.path:
path = os.path.join(cwd, item or os.path.curdir)
yield strip(os.path.normpath(path)), \
not os.path.isdir(path), path != item
def render_testapp(req):
try:
import pkg_resources
except ImportError:
eggs = ()
else:
eggs = sorted(pkg_resources.working_set,
key=lambda x: x.project_name.lower())
python_eggs = []
for egg in eggs:
try:
version = egg.version
except (ValueError, AttributeError):
version = 'unknown'
python_eggs.append('<li>%s <small>[%s]</small>' % (
escape(egg.project_name),
escape(version)
))
wsgi_env = []
sorted_environ = sorted(req.environ.items(),
key=lambda x: repr(x[0]).lower())
for key, value in sorted_environ:
wsgi_env.append('<tr><th>%s<td><code>%s</code>' % (
escape(str(key)),
' '.join(wrap(escape(repr(value))))
))
sys_path = []
for item, virtual, expanded in iter_sys_path():
class_ = []
if virtual:
class_.append('virtual')
if expanded:
class_.append('exp')
sys_path.append('<li%s>%s' % (
class_ and ' class="%s"' % ' '.join(class_) or '',
escape(item)
))
return (TEMPLATE % {
'python_version': '<br>'.join(escape(sys.version).splitlines()),
'platform': escape(sys.platform),
'os': escape(os.name),
'api_version': sys.api_version,
'byteorder': sys.byteorder,
'werkzeug_version': werkzeug.__version__,
'python_eggs': '\n'.join(python_eggs),
'wsgi_env': '\n'.join(wsgi_env),
'sys_path': '\n'.join(sys_path)
}).encode('utf-8')
def test_app(environ, start_response):
"""Simple test application that dumps the environment. You can use
it to check if Werkzeug is working properly:
.. sourcecode:: pycon
>>> from werkzeug.serving import run_simple
>>> from werkzeug.testapp import test_app
>>> run_simple('localhost', 3000, test_app)
* Running on http://localhost:3000/
The application displays important information from the WSGI environment,
the Python interpreter and the installed libraries.
"""
req = Request(environ, populate_request=False)
if req.args.get('resource') == 'logo':
response = logo
else:
response = Response(render_testapp(req), mimetype='text/html')
return response(environ, start_response)
if __name__ == '__main__':
from werkzeug.serving import run_simple
run_simple('localhost', 5000, test_app, use_reloader=True)
|
py | 1a2ebd20ec2a87b1412881182523065a77696950 | # -*- coding: utf-8 -*-
import pandas as pd
from .ecg_eventrelated import ecg_eventrelated
from .ecg_intervalrelated import ecg_intervalrelated
def ecg_analyze(data, sampling_rate=1000, method="auto"):
"""Performs ECG analysis on either epochs (event-related
analysis) or on longer periods of data such as resting-state data.
Parameters
----------
data : dict, DataFrame
A dictionary of epochs, containing one DataFrame per epoch,
usually obtained via `epochs_create()`, or a DataFrame
containing all epochs, usually obtained via `epochs_to_df()`.
Can also take a DataFrame of processed signals from
a longer period of data, typically generated by `ecg_process()`
or `bio_process()`. Can also take a dict containing sets of
separate periods of data.
sampling_rate : int
The sampling frequency of the signal (in Hz, i.e., samples/second).
Defaults to 1000Hz.
method : str
Can be one of 'event-related' for event-related analysis on epochs,
or 'interval-related' for analysis on longer periods of data. Defaults
to 'auto' where the right method will be chosen based on the
mean duration of the data ('event-related' for duration under 10s).
Returns
-------
DataFrame
A dataframe containing the analyzed ECG features. If
event-related analysis is conducted, each epoch is indicated
by the `Label` column. See `ecg_eventrelated()` and
`ecg_intervalrelated()` docstrings for details.
See Also
--------
bio_process, ecg_process, epochs_create, ecg_eventrelated, ecg_intervalrelated
Examples
----------
>>> import neurokit2 as nk
>>>
>>> # Example 1: Download the data for event-related analysis
>>> data = nk.data("bio_eventrelated_100hz")
>>>
>>> # Process the data for event-related analysis
>>> df, info = nk.bio_process(ecg=data["ECG"], sampling_rate=100)
>>> events = nk.events_find(data["Photosensor"],
threshold_keep='below',
event_conditions=["Negative",
"Neutral",
"Neutral",
"Negative"])
>>> epochs = nk.epochs_create(df, events,
sampling_rate=100,
epochs_start=-0.1, epochs_end=1.9)
>>> nk.ecg_analyze(epochs, sampling_rate=100)
>>>
>>> # Example 2: Download the resting-state data
>>> data = nk.data("bio_resting_5min_100hz")
>>>
>>> # Process the data
>>> df, info = nk.ecg_process(data["ECG"], sampling_rate=100)
>>>
>>> # Analyze
>>> nk.ecg_analyze(df, sampling_rate=100)
"""
method = method.lower()
# Event-related analysis
if method in ["event-related", "event", "epoch"]:
# Sanity checks
if isinstance(data, dict):
for i in data:
colnames = data[i].columns.values
elif isinstance(data, pd.DataFrame):
colnames = data.columns.values
if len([i for i in colnames if "Label" in i]) == 0:
raise ValueError("NeuroKit error: ecg_analyze(): Wrong input"
"or method, we couldn't extract"
"extract epochs features.")
else:
features = ecg_eventrelated(data)
# Interval-related analysis
elif method in ["interval-related", "interval", "resting-state"]:
features = ecg_intervalrelated(data)
# Auto
elif method in ["auto"]:
if isinstance(data, dict):
for i in data:
duration = len(data[i]) / sampling_rate
if duration >= 10:
features = ecg_intervalrelated(data)
else:
features = ecg_eventrelated(data)
if isinstance(data, pd.DataFrame):
if 'Label' in data.columns:
epoch_len = data['Label'].value_counts()[0]
duration = epoch_len / sampling_rate
else:
duration = len(data) / sampling_rate
if duration >= 10:
features = ecg_intervalrelated(data)
else:
features = ecg_eventrelated(data)
return features
|
py | 1a2ebe466abdbcf87500cf2d6c85c05e718b5ac7 | # Q6
I = (dt_true['income'] < 0.5)
dt_true.loc[I, ['consumption']] = 0.5
dt_true['consumption'].mean() |
py | 1a2ebe723752bd9e0fe6cb791a4bad866178fe19 | # 47. Permutations II
class Solution:
def permuteUnique(self, nums: List[int]) -> List[List[int]]:
"""
Given a collection of numbers that might contain duplicates, return all possible unique permutations.
"""
permutations = set()
self.helper(nums, [], permutations)
return permutations
def helper(self, array, currentPermutation, permutations):
if not len(array) and len(currentPermutation):
permutations.add(tuple(currentPermutation))
else:
for index in range(len(array)):
newArray = array[: index] + array[index + 1:]
newPermutation = currentPermutation + [array[index]]
self.helper(newArray, newPermutation, permutations)
|
py | 1a2ec0438436e70f3f58ef493bca2cccbd7f42d3 | import torch
import numpy as np
def train_perm_orth(train_loader, model, optimizer, scheduler, criterion, regularizer=None, rho=1E-4, delta=0.5,
nu=1E-2, eps=1E-3, tau=1E-2, lagrange_pen=1E-2, perm_flag=True, t_step=40):
if perm_flag:
tau_min = 1E-24
tau_max = 1E-1
c = None
lam_lm = []
for p in optimizer.param_groups[0]['params']:
lam_lm.append(torch.zeros_like(p))
k_iter = 0
ts = torch.empty(len(train_loader), device=model.device).uniform_(0.0, 1.0)
with torch.no_grad():
for p in optimizer.param_groups[0]['params']:
p.data = torch.rand_like(p.data)
p.data, _, _ = torch.svd(p.data)
input_cml = []
target_cml = []
t_cml = []
inner_iter = 0
loss = 0.0
loss_obj = 0.0
for iter, (input, target) in enumerate(train_loader):
t = ts[iter]
input = input.to(model.device, non_blocking=False)
target = target.to(model.device, non_blocking=False)
output = model(input, perm_train=True, t=t)
input_all = input
target_all = target
new_loss = criterion(output, target_all)
loss_obj += new_loss
# This part is for the augmented Lagrangian method
int_pen = integer_penalty(optimizer.param_groups[0]['params'], lam_lm, lagrange_pen)
loss += new_loss + int_pen
inner_iter += 1
input_cml.append(input.clone())
target_cml.append(target.clone())
t_cml.append(t.clone())
if inner_iter % t_step == 0:
optimizer.zero_grad()
loss.backward()
grad_norm = 0.0
violator = 0.0
for p in optimizer.param_groups[0]['params']:
param_norm = p.grad.data.norm(2)
grad_norm += param_norm.item() ** 2
violator += torch.sum((torch.matmul(p.data.t(), p.data) - torch.eye(p.data.shape[0],
device=p.device)) ** 2)
grad_norm = grad_norm ** (1. / 2)
if c is None:
c = loss.clone().item()
q_opt = 1
loss_inner = loss.clone()
print('Iteration: %03d, Loss %.2E, Objective %.2E, Negative Penalty: %.2E,'
'Grad Norm: %.2E, Ortho Violation: %.2E, tau: %.2E' %
(k_iter, loss_inner.item(), loss_obj.item(), int_pen.item(), grad_norm, violator.item(), tau))
# Compute F for defining Y function
F_list = []
with torch.no_grad():
for p in optimizer.param_groups[0]['params']:
f = torch.matmul(p.grad.data, p.t().data) - torch.matmul(p.data, p.grad.t().data)
F_list.append(f)
# Store old parameters
params_old = [None] * len(optimizer.param_groups[0]['params'])
for idx, param in enumerate(optimizer.param_groups[0]['params']):
params_old[idx] = param.clone()
grads_old = [p.grad.data.clone() for p in optimizer.param_groups[0]['params']]
# Compute the values of Y(tau) and Y'(tau), store them into the model
Y_t, Y_ft_prime = compute_ytau(tau, F_list, optimizer.param_groups[0]['params'])
for p, y_t in zip(optimizer.param_groups[0]['params'], Y_t):
p.data = y_t.clone()
loss_inner = 0.0
for t_2, input_2, target_2 in zip(t_cml, input_cml, target_cml):
output = model(input_2, perm_train=True, t=t_2)
loss_inner += criterion(output, target_2)
int_pen = integer_penalty(optimizer.param_groups[0]['params'], lam_lm, lagrange_pen)
loss_inner += int_pen
optimizer.zero_grad()
loss_inner.backward()
grads_new = [p.grad.data.clone() for p in optimizer.param_groups[0]['params']]
with torch.no_grad():
dF_dt = 0.0
for g_new, y_ft_p in zip(grads_new, Y_ft_prime):
df = g_new * (y_ft_p / torch.norm(y_ft_p.data))
df = torch.sum(df)
dF_dt += df.item()
threshold_flag = True
k_inner = 0
while threshold_flag:
with torch.no_grad():
threshold = c + rho * tau * dF_dt
if loss_inner.item() >= threshold:
# Compute Y for smaller value of tau
with torch.no_grad():
tau *= delta
Y_t, Y_ft_prime = compute_ytau(tau, F_list, optimizer.param_groups[0]['params'])
for p, y_t in zip(optimizer.param_groups[0]['params'], Y_t):
p.data = y_t.clone()
loss_old = loss_inner.clone()
loss_inner = 0.0
for t_2, input_2, target_2 in zip(t_cml, input_cml, target_cml):
output = model(input_2, perm_train=True, t=t_2)
loss_inner += criterion(output, target_2)
int_pen = integer_penalty(optimizer.param_groups[0]['params'], lam_lm, lagrange_pen)
loss_inner += int_pen
optimizer.zero_grad()
loss_inner.backward()
grads_new = [p.grad.data.clone() for p in optimizer.param_groups[0]['params']]
k_inner += 1
if (loss_inner.item() - loss_old.item()) / (1 + loss_old.item()) < 1E-5:
threshold_flag = False
else:
threshold_flag = False
with torch.no_grad():
c = (nu * q_opt * c + loss_inner.item())
q_opt = nu * q_opt + 1
c = c / q_opt
bb_num = 0.0
bb_denom = 0.0
yy_sum = 0.0
for p_old, g_old, p_new, g_new in zip(params_old, grads_old, optimizer.param_groups[0]['params'],
grads_new):
s_bb = p_new - p_old
y_bb = g_new - g_old
bb_num += torch.sum(s_bb ** 2)
bb_denom += torch.sum(s_bb * y_bb)
yy_sum += torch.sum(y_bb ** 2)
tau_bb = bb_num / torch.abs(bb_denom)
tau_bb = tau_bb.item()
tau_bb2 = torch.abs(bb_denom) / yy_sum
tau_bb2 = tau_bb2.item()
tau_bb = np.minimum(tau_bb, tau_bb2)
tau = np.minimum(tau_bb, tau_max)
tau = np.maximum(tau, tau_min)
lam_lm, lagrange_pen = integer_penalty_update(optimizer.param_groups[0]['params'], lam_lm,
lagrange_pen)
loss_inner = 0.0
for t_2, input_2, target_2 in zip(t_cml, input_cml, target_cml):
output = model(input_2, perm_train=True, t=t_2)
loss_obj = criterion(output, target_2)
int_pen = integer_penalty(optimizer.param_groups[0]['params'], lam_lm, lagrange_pen)
loss_inner += loss_obj + int_pen
optimizer.zero_grad()
loss_inner.backward()
grads_new = [p.grad.data.clone() for p in optimizer.param_groups[0]['params']]
grad_norm = 0.0
for g_new in grads_new:
gn = g_new.norm(2)
grad_norm += gn.item() ** 2
grad_norm = grad_norm ** (1. / 2)
k_iter += 1
input_cml = []
target_cml = []
t_cml = []
loss = 0.0
loss_obj = 0.0
model.train()
loss_sum = 0.0
correct = 0.0
change_P = np.nan
params_before = [None] * len(optimizer.param_groups[0]['params'])
if nu is not None:
for idx, param in enumerate(optimizer.param_groups[0]['params']):
params_before[idx] = param.clone().detach()
optimizer.step()
lr = scheduler.get_lr()[0]
with torch.no_grad():
for param, param_o in zip(optimizer.param_groups[0]['params'], params_old):
param.data = 1 / (1 + lr / nu) * (param + lr / nu * param_o)
output = model(input_all, perm_train=True)
loss = criterion(output, target_all)
if regularizer is not None:
loss += regularizer(model)
loss_sum += loss.item() * input.size(0)
pred = output.data.argmax(1, keepdim=True)
correct += pred.eq(target_all.data.view_as(pred)).sum().item()
return {
'loss': loss_sum / len(train_loader.dataset),
'accuracy': correct * 100.0 / len(train_loader.dataset),
'change_perm': change_P
}
def hard_int_penalty(p_list, pen=1E1):
pen_loss = 0.0
for p in p_list:
p_mask = p.data * (p.data <= 0)
pen_loss += pen * torch.sum(p_mask ** 2)
return pen_loss
def integer_penalty(p_list, lam_list, mu):
pen_loss = 0.0
for p, lam in zip(p_list, lam_list):
mask = (p - lam / mu) <= 0
mask_alt = (p - lam / mu) > 0
p_l = torch.sum((- lam * p + 0.5 * mu * (p ** 2)) * mask)
p_l += torch.sum((-1/(2 * mu) * lam ** 2) * mask_alt)
pen_loss += p_l
return pen_loss
def integer_penalty_update(p_list, lam_list, mu):
new_lam_list = []
with torch.no_grad():
for p, lam in zip(p_list, lam_list):
upd = lam - mu * p
new_lam_list.append(upd * (upd > 0))
new_mu = mu * 1.01
return new_lam_list, new_mu
def compute_ytau(tau, f_list, p_list):
y_tau = []
y_tau_prime = []
for p, f in zip(p_list, f_list):
eye = torch.eye(f.shape[0], device=f.device)
qmat_inv = torch.inverse(eye + tau / 2 * f)
y_ft = torch.matmul(qmat_inv, eye - tau / 2 * f)
y_ft = torch.matmul(y_ft, p)
y_ft_prime = - torch.matmul(qmat_inv, f)
y_ft_prime = torch.matmul(y_ft_prime, (p + y_ft) / 2)
y_tau.append(y_ft.clone())
y_tau_prime.append(y_ft_prime.clone())
return y_tau, y_tau_prime |
py | 1a2ec086e778aa964a840fc70cf9aae1d766560e | """The test for the History Statistics sensor platform."""
# pylint: disable=protected-access
from datetime import timedelta
import unittest
from unittest.mock import patch
from homeassistant.const import STATE_UNKNOWN
from homeassistant.setup import setup_component
from homeassistant.components.sensor.history_stats import HistoryStatsSensor
import homeassistant.core as ha
from homeassistant.helpers.template import Template
import homeassistant.util.dt as dt_util
from tests.common import init_recorder_component, get_test_home_assistant
class TestHistoryStatsSensor(unittest.TestCase):
"""Test the History Statistics sensor."""
def setUp(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
def tearDown(self):
"""Stop everything that was started."""
self.hass.stop()
def test_setup(self):
"""Test the history statistics sensor setup."""
self.init_recorder()
config = {
'history': {
},
'sensor': {
'platform': 'history_stats',
'entity_id': 'binary_sensor.test_id',
'state': 'on',
'start': '{{ now().replace(hour=0)'
'.replace(minute=0).replace(second=0) }}',
'duration': '02:00',
'name': 'Test',
}
}
self.assertTrue(setup_component(self.hass, 'sensor', config))
state = self.hass.states.get('sensor.test')
self.assertEqual(state.state, STATE_UNKNOWN)
def test_period_parsing(self):
"""Test the conversion from templates to period."""
today = Template('{{ now().replace(hour=0).replace(minute=0)'
'.replace(second=0) }}', self.hass)
duration = timedelta(hours=2, minutes=1)
sensor1 = HistoryStatsSensor(
self.hass, 'test', 'on', today, None, duration, 'time', 'test')
sensor2 = HistoryStatsSensor(
self.hass, 'test', 'on', None, today, duration, 'time', 'test')
sensor1.update_period()
sensor1_start, sensor1_end = sensor1._period
sensor2.update_period()
sensor2_start, sensor2_end = sensor2._period
# Start = 00:00:00
self.assertEqual(sensor1_start.hour, 0)
self.assertEqual(sensor1_start.minute, 0)
self.assertEqual(sensor1_start.second, 0)
# End = 02:01:00
self.assertEqual(sensor1_end.hour, 2)
self.assertEqual(sensor1_end.minute, 1)
self.assertEqual(sensor1_end.second, 0)
# Start = 21:59:00
self.assertEqual(sensor2_start.hour, 21)
self.assertEqual(sensor2_start.minute, 59)
self.assertEqual(sensor2_start.second, 0)
# End = 00:00:00
self.assertEqual(sensor2_end.hour, 0)
self.assertEqual(sensor2_end.minute, 0)
self.assertEqual(sensor2_end.second, 0)
def test_measure(self):
"""Test the history statistics sensor measure."""
t0 = dt_util.utcnow() - timedelta(minutes=40)
t1 = t0 + timedelta(minutes=20)
t2 = dt_util.utcnow() - timedelta(minutes=10)
# Start t0 t1 t2 End
# |--20min--|--20min--|--10min--|--10min--|
# |---off---|---on----|---off---|---on----|
fake_states = {
'binary_sensor.test_id': [
ha.State('binary_sensor.test_id', 'on', last_changed=t0),
ha.State('binary_sensor.test_id', 'off', last_changed=t1),
ha.State('binary_sensor.test_id', 'on', last_changed=t2),
]
}
start = Template('{{ as_timestamp(now()) - 3600 }}', self.hass)
end = Template('{{ now() }}', self.hass)
sensor1 = HistoryStatsSensor(
self.hass, 'binary_sensor.test_id', 'on', start, end, None,
'time', 'Test')
sensor2 = HistoryStatsSensor(
self.hass, 'unknown.id', 'on', start, end, None, 'time', 'Test')
sensor3 = HistoryStatsSensor(
self.hass, 'binary_sensor.test_id', 'on', start, end, None,
'count', 'test')
sensor4 = HistoryStatsSensor(
self.hass, 'binary_sensor.test_id', 'on', start, end, None,
'ratio', 'test')
self.assertEqual(sensor1._type, 'time')
self.assertEqual(sensor3._type, 'count')
self.assertEqual(sensor4._type, 'ratio')
with patch('homeassistant.components.history.'
'state_changes_during_period', return_value=fake_states):
with patch('homeassistant.components.history.get_state',
return_value=None):
sensor1.update()
sensor2.update()
sensor3.update()
sensor4.update()
self.assertEqual(sensor1.state, 0.5)
self.assertEqual(sensor2.state, None)
self.assertEqual(sensor3.state, 2)
self.assertEqual(sensor4.state, 50)
def test_wrong_date(self):
"""Test when start or end value is not a timestamp or a date."""
good = Template('{{ now() }}', self.hass)
bad = Template('{{ TEST }}', self.hass)
sensor1 = HistoryStatsSensor(
self.hass, 'test', 'on', good, bad, None, 'time', 'Test')
sensor2 = HistoryStatsSensor(
self.hass, 'test', 'on', bad, good, None, 'time', 'Test')
before_update1 = sensor1._period
before_update2 = sensor2._period
sensor1.update_period()
sensor2.update_period()
self.assertEqual(before_update1, sensor1._period)
self.assertEqual(before_update2, sensor2._period)
def test_wrong_duration(self):
"""Test when duration value is not a timedelta."""
self.init_recorder()
config = {
'history': {
},
'sensor': {
'platform': 'history_stats',
'entity_id': 'binary_sensor.test_id',
'name': 'Test',
'state': 'on',
'start': '{{ now() }}',
'duration': 'TEST',
}
}
setup_component(self.hass, 'sensor', config)
self.assertEqual(self.hass.states.get('sensor.test'), None)
self.assertRaises(TypeError,
setup_component(self.hass, 'sensor', config))
def test_bad_template(self):
"""Test Exception when the template cannot be parsed."""
bad = Template('{{ x - 12 }}', self.hass) # x is undefined
duration = '01:00'
sensor1 = HistoryStatsSensor(
self.hass, 'test', 'on', bad, None, duration, 'time', 'Test')
sensor2 = HistoryStatsSensor(
self.hass, 'test', 'on', None, bad, duration, 'time', 'Test')
before_update1 = sensor1._period
before_update2 = sensor2._period
sensor1.update_period()
sensor2.update_period()
self.assertEqual(before_update1, sensor1._period)
self.assertEqual(before_update2, sensor2._period)
def test_not_enough_arguments(self):
"""Test config when not enough arguments provided."""
self.init_recorder()
config = {
'history': {
},
'sensor': {
'platform': 'history_stats',
'entity_id': 'binary_sensor.test_id',
'name': 'Test',
'state': 'on',
'start': '{{ now() }}',
}
}
setup_component(self.hass, 'sensor', config)
self.assertEqual(self.hass.states.get('sensor.test'), None)
self.assertRaises(TypeError,
setup_component(self.hass, 'sensor', config))
def test_too_many_arguments(self):
"""Test config when too many arguments provided."""
self.init_recorder()
config = {
'history': {
},
'sensor': {
'platform': 'history_stats',
'entity_id': 'binary_sensor.test_id',
'name': 'Test',
'state': 'on',
'start': '{{ as_timestamp(now()) - 3600 }}',
'end': '{{ now() }}',
'duration': '01:00',
}
}
setup_component(self.hass, 'sensor', config)
self.assertEqual(self.hass.states.get('sensor.test'), None)
self.assertRaises(TypeError,
setup_component(self.hass, 'sensor', config))
def init_recorder(self):
"""Initialize the recorder."""
init_recorder_component(self.hass)
self.hass.start()
|
py | 1a2ec0ab7acb5ec52077bad2e5cfd184845ed972 | """
This module implements some special functions that commonly appear in
combinatorial contexts (e.g. in power series); in particular,
sequences of rational numbers such as Bernoulli and Fibonacci numbers.
Factorials, binomial coefficients and related functions are located in
the separate 'factorials' module.
"""
from __future__ import print_function, division
from sympy.core import S, Symbol, Rational, Integer, Add, Dummy
from sympy.core.compatibility import as_int, SYMPY_INTS, range
from sympy.core.cache import cacheit
from sympy.core.function import Function, expand_mul
from sympy.core.numbers import E, pi
from sympy.core.relational import LessThan, StrictGreaterThan
from sympy.functions.combinatorial.factorials import binomial, factorial
from sympy.functions.elementary.exponential import log
from sympy.functions.elementary.integers import floor
from sympy.functions.elementary.trigonometric import sin, cos, cot
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.utilities.memoization import recurrence_memo
from mpmath import bernfrac, workprec
from mpmath.libmp import ifib as _ifib
def _product(a, b):
p = 1
for k in range(a, b + 1):
p *= k
return p
# Dummy symbol used for computing polynomial sequences
_sym = Symbol('x')
_symbols = Function('x')
#----------------------------------------------------------------------------#
# #
# Fibonacci numbers #
# #
#----------------------------------------------------------------------------#
class fibonacci(Function):
r"""
Fibonacci numbers / Fibonacci polynomials
The Fibonacci numbers are the integer sequence defined by the
initial terms F_0 = 0, F_1 = 1 and the two-term recurrence
relation F_n = F_{n-1} + F_{n-2}. This definition
extended to arbitrary real and complex arguments using
the formula
.. math :: F_z = \frac{\phi^z - \cos(\pi z) \phi^{-z}}{\sqrt 5}
The Fibonacci polynomials are defined by F_1(x) = 1,
F_2(x) = x, and F_n(x) = x*F_{n-1}(x) + F_{n-2}(x) for n > 2.
For all positive integers n, F_n(1) = F_n.
* fibonacci(n) gives the nth Fibonacci number, F_n
* fibonacci(n, x) gives the nth Fibonacci polynomial in x, F_n(x)
Examples
========
>>> from sympy import fibonacci, Symbol
>>> [fibonacci(x) for x in range(11)]
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55]
>>> fibonacci(5, Symbol('t'))
t**4 + 3*t**2 + 1
References
==========
.. [1] http://en.wikipedia.org/wiki/Fibonacci_number
.. [2] http://mathworld.wolfram.com/FibonacciNumber.html
See Also
========
bell, bernoulli, catalan, euler, harmonic, lucas
"""
@staticmethod
def _fib(n):
return _ifib(n)
@staticmethod
@recurrence_memo([None, S.One, _sym])
def _fibpoly(n, prev):
return (prev[-2] + _sym*prev[-1]).expand()
@classmethod
def eval(cls, n, sym=None):
if n is S.Infinity:
return S.Infinity
if n.is_Integer:
n = int(n)
if n < 0:
return S.NegativeOne**(n + 1) * fibonacci(-n)
if sym is None:
return Integer(cls._fib(n))
else:
if n < 1:
raise ValueError("Fibonacci polynomials are defined "
"only for positive integer indices.")
return cls._fibpoly(n).subs(_sym, sym)
def _eval_rewrite_as_sqrt(self, n):
return 2**(-n)*sqrt(5)*((1 + sqrt(5))**n - (-sqrt(5) + 1)**n) / 5
def _eval_rewrite_as_GoldenRatio(self,n):
return (S.GoldenRatio**n - 1/(-S.GoldenRatio)**n)/(2*S.GoldenRatio-1)
class lucas(Function):
"""
Lucas numbers
Lucas numbers satisfy a recurrence relation similar to that of
the Fibonacci sequence, in which each term is the sum of the
preceding two. They are generated by choosing the initial
values L_0 = 2 and L_1 = 1.
* lucas(n) gives the nth Lucas number
Examples
========
>>> from sympy import lucas
>>> [lucas(x) for x in range(11)]
[2, 1, 3, 4, 7, 11, 18, 29, 47, 76, 123]
References
==========
.. [1] http://en.wikipedia.org/wiki/Lucas_number
.. [2] http://mathworld.wolfram.com/LucasNumber.html
See Also
========
bell, bernoulli, catalan, euler, fibonacci, harmonic
"""
@classmethod
def eval(cls, n):
if n is S.Infinity:
return S.Infinity
if n.is_Integer:
return fibonacci(n + 1) + fibonacci(n - 1)
def _eval_rewrite_as_sqrt(self, n):
return 2**(-n)*((1 + sqrt(5))**n + (-sqrt(5) + 1)**n)
#----------------------------------------------------------------------------#
# #
# Bernoulli numbers #
# #
#----------------------------------------------------------------------------#
class bernoulli(Function):
r"""
Bernoulli numbers / Bernoulli polynomials
The Bernoulli numbers are a sequence of rational numbers
defined by B_0 = 1 and the recursive relation (n > 0)::
n
___
\ / n + 1 \
0 = ) | | * B .
/___ \ k / k
k = 0
They are also commonly defined by their exponential generating
function, which is x/(exp(x) - 1). For odd indices > 1, the
Bernoulli numbers are zero.
The Bernoulli polynomials satisfy the analogous formula::
n
___
\ / n \ n-k
B (x) = ) | | * B * x .
n /___ \ k / k
k = 0
Bernoulli numbers and Bernoulli polynomials are related as
B_n(0) = B_n.
We compute Bernoulli numbers using Ramanujan's formula::
/ n + 3 \
B = (A(n) - S(n)) / | |
n \ n /
where A(n) = (n+3)/3 when n = 0 or 2 (mod 6), A(n) = -(n+3)/6
when n = 4 (mod 6), and::
[n/6]
___
\ / n + 3 \
S(n) = ) | | * B
/___ \ n - 6*k / n-6*k
k = 1
This formula is similar to the sum given in the definition, but
cuts 2/3 of the terms. For Bernoulli polynomials, we use the
formula in the definition.
* bernoulli(n) gives the nth Bernoulli number, B_n
* bernoulli(n, x) gives the nth Bernoulli polynomial in x, B_n(x)
Examples
========
>>> from sympy import bernoulli
>>> [bernoulli(n) for n in range(11)]
[1, -1/2, 1/6, 0, -1/30, 0, 1/42, 0, -1/30, 0, 5/66]
>>> bernoulli(1000001)
0
References
==========
.. [1] http://en.wikipedia.org/wiki/Bernoulli_number
.. [2] http://en.wikipedia.org/wiki/Bernoulli_polynomial
.. [3] http://mathworld.wolfram.com/BernoulliNumber.html
.. [4] http://mathworld.wolfram.com/BernoulliPolynomial.html
See Also
========
bell, catalan, euler, fibonacci, harmonic, lucas
"""
# Calculates B_n for positive even n
@staticmethod
def _calc_bernoulli(n):
s = 0
a = int(binomial(n + 3, n - 6))
for j in range(1, n//6 + 1):
s += a * bernoulli(n - 6*j)
# Avoid computing each binomial coefficient from scratch
a *= _product(n - 6 - 6*j + 1, n - 6*j)
a //= _product(6*j + 4, 6*j + 9)
if n % 6 == 4:
s = -Rational(n + 3, 6) - s
else:
s = Rational(n + 3, 3) - s
return s / binomial(n + 3, n)
# We implement a specialized memoization scheme to handle each
# case modulo 6 separately
_cache = {0: S.One, 2: Rational(1, 6), 4: Rational(-1, 30)}
_highest = {0: 0, 2: 2, 4: 4}
@classmethod
def eval(cls, n, sym=None):
if n.is_Number:
if n.is_Integer and n.is_nonnegative:
if n is S.Zero:
return S.One
elif n is S.One:
if sym is None:
return -S.Half
else:
return sym - S.Half
# Bernoulli numbers
elif sym is None:
if n.is_odd:
return S.Zero
n = int(n)
# Use mpmath for enormous Bernoulli numbers
if n > 500:
p, q = bernfrac(n)
return Rational(int(p), int(q))
case = n % 6
highest_cached = cls._highest[case]
if n <= highest_cached:
return cls._cache[n]
# To avoid excessive recursion when, say, bernoulli(1000) is
# requested, calculate and cache the entire sequence ... B_988,
# B_994, B_1000 in increasing order
for i in range(highest_cached + 6, n + 6, 6):
b = cls._calc_bernoulli(i)
cls._cache[i] = b
cls._highest[case] = i
return b
# Bernoulli polynomials
else:
n, result = int(n), []
for k in range(n + 1):
result.append(binomial(n, k)*cls(k)*sym**(n - k))
return Add(*result)
else:
raise ValueError("Bernoulli numbers are defined only"
" for nonnegative integer indices.")
if sym is None:
if n.is_odd and (n - 1).is_positive:
return S.Zero
#----------------------------------------------------------------------------#
# #
# Bell numbers #
# #
#----------------------------------------------------------------------------#
class bell(Function):
r"""
Bell numbers / Bell polynomials
The Bell numbers satisfy `B_0 = 1` and
.. math:: B_n = \sum_{k=0}^{n-1} \binom{n-1}{k} B_k.
They are also given by:
.. math:: B_n = \frac{1}{e} \sum_{k=0}^{\infty} \frac{k^n}{k!}.
The Bell polynomials are given by `B_0(x) = 1` and
.. math:: B_n(x) = x \sum_{k=1}^{n-1} \binom{n-1}{k-1} B_{k-1}(x).
The second kind of Bell polynomials (are sometimes called "partial" Bell
polynomials or incomplete Bell polynomials) are defined as
.. math:: B_{n,k}(x_1, x_2,\dotsc x_{n-k+1}) =
\sum_{j_1+j_2+j_2+\dotsb=k \atop j_1+2j_2+3j_2+\dotsb=n}
\frac{n!}{j_1!j_2!\dotsb j_{n-k+1}!}
\left(\frac{x_1}{1!} \right)^{j_1}
\left(\frac{x_2}{2!} \right)^{j_2} \dotsb
\left(\frac{x_{n-k+1}}{(n-k+1)!} \right) ^{j_{n-k+1}}.
* bell(n) gives the `n^{th}` Bell number, `B_n`.
* bell(n, x) gives the `n^{th}` Bell polynomial, `B_n(x)`.
* bell(n, k, (x1, x2, ...)) gives Bell polynomials of the second kind,
`B_{n,k}(x_1, x_2, \dotsc, x_{n-k+1})`.
Notes
=====
Not to be confused with Bernoulli numbers and Bernoulli polynomials,
which use the same notation.
Examples
========
>>> from sympy import bell, Symbol, symbols
>>> [bell(n) for n in range(11)]
[1, 1, 2, 5, 15, 52, 203, 877, 4140, 21147, 115975]
>>> bell(30)
846749014511809332450147
>>> bell(4, Symbol('t'))
t**4 + 6*t**3 + 7*t**2 + t
>>> bell(6, 2, symbols('x:6')[1:])
6*x1*x5 + 15*x2*x4 + 10*x3**2
References
==========
.. [1] http://en.wikipedia.org/wiki/Bell_number
.. [2] http://mathworld.wolfram.com/BellNumber.html
.. [3] http://mathworld.wolfram.com/BellPolynomial.html
See Also
========
bernoulli, catalan, euler, fibonacci, harmonic, lucas
"""
@staticmethod
@recurrence_memo([1, 1])
def _bell(n, prev):
s = 1
a = 1
for k in range(1, n):
a = a * (n - k) // k
s += a * prev[k]
return s
@staticmethod
@recurrence_memo([S.One, _sym])
def _bell_poly(n, prev):
s = 1
a = 1
for k in range(2, n + 1):
a = a * (n - k + 1) // (k - 1)
s += a * prev[k - 1]
return expand_mul(_sym * s)
@staticmethod
def _bell_incomplete_poly(n, k, symbols):
r"""
The second kind of Bell polynomials (incomplete Bell polynomials).
Calculated by recurrence formula:
.. math:: B_{n,k}(x_1, x_2, \dotsc, x_{n-k+1}) =
\sum_{m=1}^{n-k+1}
\x_m \binom{n-1}{m-1} B_{n-m,k-1}(x_1, x_2, \dotsc, x_{n-m-k})
where
B_{0,0} = 1;
B_{n,0} = 0; for n>=1
B_{0,k} = 0; for k>=1
"""
if (n == 0) and (k == 0):
return S.One
elif (n == 0) or (k == 0):
return S.Zero
s = S.Zero
a = S.One
for m in range(1, n - k + 2):
s += a * bell._bell_incomplete_poly(
n - m, k - 1, symbols) * symbols[m - 1]
a = a * (n - m) / m
return expand_mul(s)
@classmethod
def eval(cls, n, k_sym=None, symbols=None):
if n.is_Integer and n.is_nonnegative:
if k_sym is None:
return Integer(cls._bell(int(n)))
elif symbols is None:
return cls._bell_poly(int(n)).subs(_sym, k_sym)
else:
r = cls._bell_incomplete_poly(int(n), int(k_sym), symbols)
return r
def _eval_rewrite_as_Sum(self, n, k_sym=None, symbols=None):
from sympy import Sum
if (k_sym is not None) or (symbols is not None):
return self
# Dobinski's formula
if not n.is_nonnegative:
return self
k = Dummy('k', integer=True, nonnegative=True)
return 1 / E * Sum(k**n / factorial(k), (k, 0, S.Infinity))
#----------------------------------------------------------------------------#
# #
# Harmonic numbers #
# #
#----------------------------------------------------------------------------#
class harmonic(Function):
r"""
Harmonic numbers
The nth harmonic number is given by `\operatorname{H}_{n} =
1 + \frac{1}{2} + \frac{1}{3} + \ldots + \frac{1}{n}`.
More generally:
.. math:: \operatorname{H}_{n,m} = \sum_{k=1}^{n} \frac{1}{k^m}
As `n \rightarrow \infty`, `\operatorname{H}_{n,m} \rightarrow \zeta(m)`,
the Riemann zeta function.
* ``harmonic(n)`` gives the nth harmonic number, `\operatorname{H}_n`
* ``harmonic(n, m)`` gives the nth generalized harmonic number
of order `m`, `\operatorname{H}_{n,m}`, where
``harmonic(n) == harmonic(n, 1)``
Examples
========
>>> from sympy import harmonic, oo
>>> [harmonic(n) for n in range(6)]
[0, 1, 3/2, 11/6, 25/12, 137/60]
>>> [harmonic(n, 2) for n in range(6)]
[0, 1, 5/4, 49/36, 205/144, 5269/3600]
>>> harmonic(oo, 2)
pi**2/6
>>> from sympy import Symbol, Sum
>>> n = Symbol("n")
>>> harmonic(n).rewrite(Sum)
Sum(1/_k, (_k, 1, n))
We can evaluate harmonic numbers for all integral and positive
rational arguments:
>>> from sympy import S, expand_func, simplify
>>> harmonic(8)
761/280
>>> harmonic(11)
83711/27720
>>> H = harmonic(1/S(3))
>>> H
harmonic(1/3)
>>> He = expand_func(H)
>>> He
-log(6) - sqrt(3)*pi/6 + 2*Sum(log(sin(_k*pi/3))*cos(2*_k*pi/3), (_k, 1, 1))
+ 3*Sum(1/(3*_k + 1), (_k, 0, 0))
>>> He.doit()
-log(6) - sqrt(3)*pi/6 - log(sqrt(3)/2) + 3
>>> H = harmonic(25/S(7))
>>> He = simplify(expand_func(H).doit())
>>> He
log(sin(pi/7)**(-2*cos(pi/7))*sin(2*pi/7)**(2*cos(16*pi/7))*cos(pi/14)**(-2*sin(pi/14))/14)
+ pi*tan(pi/14)/2 + 30247/9900
>>> He.n(40)
1.983697455232980674869851942390639915940
>>> harmonic(25/S(7)).n(40)
1.983697455232980674869851942390639915940
We can rewrite harmonic numbers in terms of polygamma functions:
>>> from sympy import digamma, polygamma
>>> m = Symbol("m")
>>> harmonic(n).rewrite(digamma)
polygamma(0, n + 1) + EulerGamma
>>> harmonic(n).rewrite(polygamma)
polygamma(0, n + 1) + EulerGamma
>>> harmonic(n,3).rewrite(polygamma)
polygamma(2, n + 1)/2 - polygamma(2, 1)/2
>>> harmonic(n,m).rewrite(polygamma)
(-1)**m*(polygamma(m - 1, 1) - polygamma(m - 1, n + 1))/factorial(m - 1)
Integer offsets in the argument can be pulled out:
>>> from sympy import expand_func
>>> expand_func(harmonic(n+4))
harmonic(n) + 1/(n + 4) + 1/(n + 3) + 1/(n + 2) + 1/(n + 1)
>>> expand_func(harmonic(n-4))
harmonic(n) - 1/(n - 1) - 1/(n - 2) - 1/(n - 3) - 1/n
Some limits can be computed as well:
>>> from sympy import limit, oo
>>> limit(harmonic(n), n, oo)
oo
>>> limit(harmonic(n, 2), n, oo)
pi**2/6
>>> limit(harmonic(n, 3), n, oo)
-polygamma(2, 1)/2
However we can not compute the general relation yet:
>>> limit(harmonic(n, m), n, oo)
harmonic(oo, m)
which equals ``zeta(m)`` for ``m > 1``.
References
==========
.. [1] http://en.wikipedia.org/wiki/Harmonic_number
.. [2] http://functions.wolfram.com/GammaBetaErf/HarmonicNumber/
.. [3] http://functions.wolfram.com/GammaBetaErf/HarmonicNumber2/
See Also
========
bell, bernoulli, catalan, euler, fibonacci, lucas
"""
# Generate one memoized Harmonic number-generating function for each
# order and store it in a dictionary
_functions = {}
@classmethod
def eval(cls, n, m=None):
from sympy import zeta
if m is S.One:
return cls(n)
if m is None:
m = S.One
if m.is_zero:
return n
if n is S.Infinity and m.is_Number:
# TODO: Fix for symbolic values of m
if m.is_negative:
return S.NaN
elif LessThan(m, S.One):
return S.Infinity
elif StrictGreaterThan(m, S.One):
return zeta(m)
else:
return cls
if n.is_Integer and n.is_nonnegative and m.is_Integer:
if n == 0:
return S.Zero
if not m in cls._functions:
@recurrence_memo([0])
def f(n, prev):
return prev[-1] + S.One / n**m
cls._functions[m] = f
return cls._functions[m](int(n))
def _eval_rewrite_as_polygamma(self, n, m=1):
from sympy.functions.special.gamma_functions import polygamma
return S.NegativeOne**m/factorial(m - 1) * (polygamma(m - 1, 1) - polygamma(m - 1, n + 1))
def _eval_rewrite_as_digamma(self, n, m=1):
from sympy.functions.special.gamma_functions import polygamma
return self.rewrite(polygamma)
def _eval_rewrite_as_trigamma(self, n, m=1):
from sympy.functions.special.gamma_functions import polygamma
return self.rewrite(polygamma)
def _eval_rewrite_as_Sum(self, n, m=None):
from sympy import Sum
k = Dummy("k", integer=True)
if m is None:
m = S.One
return Sum(k**(-m), (k, 1, n))
def _eval_expand_func(self, **hints):
from sympy import Sum
n = self.args[0]
m = self.args[1] if len(self.args) == 2 else 1
if m == S.One:
if n.is_Add:
off = n.args[0]
nnew = n - off
if off.is_Integer and off.is_positive:
result = [S.One/(nnew + i) for i in range(off, 0, -1)] + [harmonic(nnew)]
return Add(*result)
elif off.is_Integer and off.is_negative:
result = [-S.One/(nnew + i) for i in range(0, off, -1)] + [harmonic(nnew)]
return Add(*result)
if n.is_Rational:
# Expansions for harmonic numbers at general rational arguments (u + p/q)
# Split n as u + p/q with p < q
p, q = n.as_numer_denom()
u = p // q
p = p - u * q
if u.is_nonnegative and p.is_positive and q.is_positive and p < q:
k = Dummy("k")
t1 = q * Sum(1 / (q * k + p), (k, 0, u))
t2 = 2 * Sum(cos((2 * pi * p * k) / S(q)) *
log(sin((pi * k) / S(q))),
(k, 1, floor((q - 1) / S(2))))
t3 = (pi / 2) * cot((pi * p) / q) + log(2 * q)
return t1 + t2 - t3
return self
def _eval_rewrite_as_tractable(self, n, m=1):
from sympy import polygamma
return self.rewrite(polygamma).rewrite("tractable", deep=True)
def _eval_evalf(self, prec):
from sympy import polygamma
if all(i.is_number for i in self.args):
return self.rewrite(polygamma)._eval_evalf(prec)
#----------------------------------------------------------------------------#
# #
# Euler numbers #
# #
#----------------------------------------------------------------------------#
class euler(Function):
r"""
Euler numbers
The euler numbers are given by::
2*n+1 k
___ ___ j 2*n+1
\ \ / k \ (-1) * (k-2*j)
E = I ) ) | | --------------------
2n /___ /___ \ j / k k
k = 1 j = 0 2 * I * k
E = 0
2n+1
* euler(n) gives the n-th Euler number, E_n
Examples
========
>>> from sympy import Symbol
>>> from sympy.functions import euler
>>> [euler(n) for n in range(10)]
[1, 0, -1, 0, 5, 0, -61, 0, 1385, 0]
>>> n = Symbol("n")
>>> euler(n+2*n)
euler(3*n)
References
==========
.. [1] http://en.wikipedia.org/wiki/Euler_numbers
.. [2] http://mathworld.wolfram.com/EulerNumber.html
.. [3] http://en.wikipedia.org/wiki/Alternating_permutation
.. [4] http://mathworld.wolfram.com/AlternatingPermutation.html
See Also
========
bell, bernoulli, catalan, fibonacci, harmonic, lucas
"""
@classmethod
def eval(cls, m):
if m.is_odd:
return S.Zero
if m.is_Integer and m.is_nonnegative:
from mpmath import mp
m = m._to_mpmath(mp.prec)
res = mp.eulernum(m, exact=True)
return Integer(res)
def _eval_rewrite_as_Sum(self, arg):
from sympy import Sum
if arg.is_even:
k = Dummy("k", integer=True)
j = Dummy("j", integer=True)
n = self.args[0] / 2
Em = (S.ImaginaryUnit * Sum(Sum(binomial(k, j) * ((-1)**j * (k - 2*j)**(2*n + 1)) /
(2**k*S.ImaginaryUnit**k * k), (j, 0, k)), (k, 1, 2*n + 1)))
return Em
def _eval_evalf(self, prec):
m = self.args[0]
if m.is_Integer and m.is_nonnegative:
from mpmath import mp
from sympy import Expr
m = m._to_mpmath(prec)
with workprec(prec):
res = mp.eulernum(m)
return Expr._from_mpmath(res, prec)
#----------------------------------------------------------------------------#
# #
# Catalan numbers #
# #
#----------------------------------------------------------------------------#
class catalan(Function):
r"""
Catalan numbers
The n-th catalan number is given by::
1 / 2*n \
C = ----- | |
n n + 1 \ n /
* catalan(n) gives the n-th Catalan number, C_n
Examples
========
>>> from sympy import (Symbol, binomial, gamma, hyper, polygamma,
... catalan, diff, combsimp, Rational, I)
>>> [ catalan(i) for i in range(1,10) ]
[1, 2, 5, 14, 42, 132, 429, 1430, 4862]
>>> n = Symbol("n", integer=True)
>>> catalan(n)
catalan(n)
Catalan numbers can be transformed into several other, identical
expressions involving other mathematical functions
>>> catalan(n).rewrite(binomial)
binomial(2*n, n)/(n + 1)
>>> catalan(n).rewrite(gamma)
4**n*gamma(n + 1/2)/(sqrt(pi)*gamma(n + 2))
>>> catalan(n).rewrite(hyper)
hyper((-n + 1, -n), (2,), 1)
For some non-integer values of n we can get closed form
expressions by rewriting in terms of gamma functions:
>>> catalan(Rational(1,2)).rewrite(gamma)
8/(3*pi)
We can differentiate the Catalan numbers C(n) interpreted as a
continuous real funtion in n:
>>> diff(catalan(n), n)
(polygamma(0, n + 1/2) - polygamma(0, n + 2) + log(4))*catalan(n)
As a more advanced example consider the following ratio
between consecutive numbers:
>>> combsimp((catalan(n + 1)/catalan(n)).rewrite(binomial))
2*(2*n + 1)/(n + 2)
The Catalan numbers can be generalized to complex numbers:
>>> catalan(I).rewrite(gamma)
4**I*gamma(1/2 + I)/(sqrt(pi)*gamma(2 + I))
and evaluated with arbitrary precision:
>>> catalan(I).evalf(20)
0.39764993382373624267 - 0.020884341620842555705*I
References
==========
.. [1] http://en.wikipedia.org/wiki/Catalan_number
.. [2] http://mathworld.wolfram.com/CatalanNumber.html
.. [3] http://functions.wolfram.com/GammaBetaErf/CatalanNumber/
.. [4] http://geometer.org/mathcircles/catalan.pdf
See Also
========
bell, bernoulli, euler, fibonacci, harmonic, lucas
sympy.functions.combinatorial.factorials.binomial
"""
@classmethod
def eval(cls, n):
from sympy import gamma
if (n.is_Integer and n.is_nonnegative) or \
(n.is_noninteger and n.is_negative):
return 4**n*gamma(n + S.Half)/(gamma(S.Half)*gamma(n + 2))
if (n.is_integer and n.is_negative):
if (n + 1).is_negative:
return S.Zero
if (n + 1).is_zero:
return -S.Half
def fdiff(self, argindex=1):
from sympy import polygamma, log
n = self.args[0]
return catalan(n)*(polygamma(0, n + Rational(1, 2)) - polygamma(0, n + 2) + log(4))
def _eval_rewrite_as_binomial(self, n):
return binomial(2*n, n)/(n + 1)
def _eval_rewrite_as_factorial(self, n):
return factorial(2*n) / (factorial(n+1) * factorial(n))
def _eval_rewrite_as_gamma(self, n):
from sympy import gamma
# The gamma function allows to generalize Catalan numbers to complex n
return 4**n*gamma(n + S.Half)/(gamma(S.Half)*gamma(n + 2))
def _eval_rewrite_as_hyper(self, n):
from sympy import hyper
return hyper([1 - n, -n], [2], 1)
def _eval_rewrite_as_Product(self, n):
from sympy import Product
if not (n.is_integer and n.is_nonnegative):
return self
k = Dummy('k', integer=True, positive=True)
return Product((n + k) / k, (k, 2, n))
def _eval_evalf(self, prec):
from sympy import gamma
if self.args[0].is_number:
return self.rewrite(gamma)._eval_evalf(prec)
#----------------------------------------------------------------------------#
# #
# Genocchi numbers #
# #
#----------------------------------------------------------------------------#
class genocchi(Function):
r"""
Genocchi numbers
The Genocchi numbers are a sequence of integers G_n that satisfy the
relation::
oo
____
\ `
2*t \ n
------ = \ G_n*t
t / ------
e + 1 / n!
/___,
n = 1
Examples
========
>>> from sympy import Symbol
>>> from sympy.functions import genocchi
>>> [genocchi(n) for n in range(1, 9)]
[1, -1, 0, 1, 0, -3, 0, 17]
>>> n = Symbol('n', integer=True, positive=True)
>>> genocchi(2 * n + 1)
0
References
==========
.. [1] https://en.wikipedia.org/wiki/Genocchi_number
.. [2] http://mathworld.wolfram.com/GenocchiNumber.html
See Also
========
bell, bernoulli, catalan, euler, fibonacci, harmonic, lucas
"""
@classmethod
def eval(cls, n):
if n.is_Number:
if (not n.is_Integer) or n.is_nonpositive:
raise ValueError("Genocchi numbers are defined only for " +
"positive integers")
return 2 * (1 - S(2) ** n) * bernoulli(n)
if n.is_odd and (n - 1).is_positive:
return S.Zero
if (n - 1).is_zero:
return S.One
def _eval_rewrite_as_bernoulli(self, n):
if n.is_integer and n.is_nonnegative:
return (1 - S(2) ** n) * bernoulli(n) * 2
def _eval_is_integer(self):
if self.args[0].is_integer and self.args[0].is_positive:
return True
def _eval_is_negative(self):
n = self.args[0]
if n.is_integer and n.is_positive:
if n.is_odd:
return False
return (n / 2).is_odd
def _eval_is_positive(self):
n = self.args[0]
if n.is_integer and n.is_positive:
if n.is_odd:
return fuzzy_not((n - 1).is_positive)
return (n / 2).is_even
def _eval_is_even(self):
n = self.args[0]
if n.is_integer and n.is_positive:
if n.is_even:
return False
return (n - 1).is_positive
def _eval_is_odd(self):
n = self.args[0]
if n.is_integer and n.is_positive:
if n.is_even:
return True
return fuzzy_not((n - 1).is_positive)
def _eval_is_prime(self):
n = self.args[0]
# only G_6 = -3 and G_8 = 17 are prime,
# but SymPy does not consider negatives as prime
# so only n=8 is tested
return (n - 8).is_zero
#######################################################################
###
### Functions for enumerating partitions, permutations and combinations
###
#######################################################################
class _MultisetHistogram(tuple):
pass
_N = -1
_ITEMS = -2
_M = slice(None, _ITEMS)
def _multiset_histogram(n):
"""Return tuple used in permutation and combination counting. Input
is a dictionary giving items with counts as values or a sequence of
items (which need not be sorted).
The data is stored in a class deriving from tuple so it is easily
recognized and so it can be converted easily to a list.
"""
if type(n) is dict: # item: count
if not all(isinstance(v, int) and v >= 0 for v in n.values()):
raise ValueError
tot = sum(n.values())
items = sum(1 for k in n if n[k] > 0)
return _MultisetHistogram([n[k] for k in n if n[k] > 0] + [items, tot])
else:
n = list(n)
s = set(n)
if len(s) == len(n):
n = [1]*len(n)
n.extend([len(n), len(n)])
return _MultisetHistogram(n)
m = dict(zip(s, range(len(s))))
d = dict(zip(range(len(s)), [0]*len(s)))
for i in n:
d[m[i]] += 1
return _multiset_histogram(d)
def nP(n, k=None, replacement=False):
"""Return the number of permutations of ``n`` items taken ``k`` at a time.
Possible values for ``n``::
integer - set of length ``n``
sequence - converted to a multiset internally
multiset - {element: multiplicity}
If ``k`` is None then the total of all permutations of length 0
through the number of items represented by ``n`` will be returned.
If ``replacement`` is True then a given item can appear more than once
in the ``k`` items. (For example, for 'ab' permutations of 2 would
include 'aa', 'ab', 'ba' and 'bb'.) The multiplicity of elements in
``n`` is ignored when ``replacement`` is True but the total number
of elements is considered since no element can appear more times than
the number of elements in ``n``.
Examples
========
>>> from sympy.functions.combinatorial.numbers import nP
>>> from sympy.utilities.iterables import multiset_permutations, multiset
>>> nP(3, 2)
6
>>> nP('abc', 2) == nP(multiset('abc'), 2) == 6
True
>>> nP('aab', 2)
3
>>> nP([1, 2, 2], 2)
3
>>> [nP(3, i) for i in range(4)]
[1, 3, 6, 6]
>>> nP(3) == sum(_)
True
When ``replacement`` is True, each item can have multiplicity
equal to the length represented by ``n``:
>>> nP('aabc', replacement=True)
121
>>> [len(list(multiset_permutations('aaaabbbbcccc', i))) for i in range(5)]
[1, 3, 9, 27, 81]
>>> sum(_)
121
References
==========
.. [1] http://en.wikipedia.org/wiki/Permutation
See Also
========
sympy.utilities.iterables.multiset_permutations
"""
try:
n = as_int(n)
except ValueError:
return Integer(_nP(_multiset_histogram(n), k, replacement))
return Integer(_nP(n, k, replacement))
@cacheit
def _nP(n, k=None, replacement=False):
from sympy.functions.combinatorial.factorials import factorial
from sympy.core.mul import prod
if k == 0:
return 1
if isinstance(n, SYMPY_INTS): # n different items
# assert n >= 0
if k is None:
return sum(_nP(n, i, replacement) for i in range(n + 1))
elif replacement:
return n**k
elif k > n:
return 0
elif k == n:
return factorial(k)
elif k == 1:
return n
else:
# assert k >= 0
return _product(n - k + 1, n)
elif isinstance(n, _MultisetHistogram):
if k is None:
return sum(_nP(n, i, replacement) for i in range(n[_N] + 1))
elif replacement:
return n[_ITEMS]**k
elif k == n[_N]:
return factorial(k)/prod([factorial(i) for i in n[_M] if i > 1])
elif k > n[_N]:
return 0
elif k == 1:
return n[_ITEMS]
else:
# assert k >= 0
tot = 0
n = list(n)
for i in range(len(n[_M])):
if not n[i]:
continue
n[_N] -= 1
if n[i] == 1:
n[i] = 0
n[_ITEMS] -= 1
tot += _nP(_MultisetHistogram(n), k - 1)
n[_ITEMS] += 1
n[i] = 1
else:
n[i] -= 1
tot += _nP(_MultisetHistogram(n), k - 1)
n[i] += 1
n[_N] += 1
return tot
@cacheit
def _AOP_product(n):
"""for n = (m1, m2, .., mk) return the coefficients of the polynomial,
prod(sum(x**i for i in range(nj + 1)) for nj in n); i.e. the coefficients
of the product of AOPs (all-one polynomials) or order given in n. The
resulting coefficient corresponding to x**r is the number of r-length
combinations of sum(n) elements with multiplicities given in n.
The coefficients are given as a default dictionary (so if a query is made
for a key that is not present, 0 will be returned).
Examples
========
>>> from sympy.functions.combinatorial.numbers import _AOP_product
>>> from sympy.abc import x
>>> n = (2, 2, 3) # e.g. aabbccc
>>> prod = ((x**2 + x + 1)*(x**2 + x + 1)*(x**3 + x**2 + x + 1)).expand()
>>> c = _AOP_product(n); dict(c)
{0: 1, 1: 3, 2: 6, 3: 8, 4: 8, 5: 6, 6: 3, 7: 1}
>>> [c[i] for i in range(8)] == [prod.coeff(x, i) for i in range(8)]
True
The generating poly used here is the same as that listed in
http://tinyurl.com/cep849r, but in a refactored form.
"""
from collections import defaultdict
n = list(n)
ord = sum(n)
need = (ord + 2)//2
rv = [1]*(n.pop() + 1)
rv.extend([0]*(need - len(rv)))
rv = rv[:need]
while n:
ni = n.pop()
N = ni + 1
was = rv[:]
for i in range(1, min(N, len(rv))):
rv[i] += rv[i - 1]
for i in range(N, need):
rv[i] += rv[i - 1] - was[i - N]
rev = list(reversed(rv))
if ord % 2:
rv = rv + rev
else:
rv[-1:] = rev
d = defaultdict(int)
for i in range(len(rv)):
d[i] = rv[i]
return d
def nC(n, k=None, replacement=False):
"""Return the number of combinations of ``n`` items taken ``k`` at a time.
Possible values for ``n``::
integer - set of length ``n``
sequence - converted to a multiset internally
multiset - {element: multiplicity}
If ``k`` is None then the total of all combinations of length 0
through the number of items represented in ``n`` will be returned.
If ``replacement`` is True then a given item can appear more than once
in the ``k`` items. (For example, for 'ab' sets of 2 would include 'aa',
'ab', and 'bb'.) The multiplicity of elements in ``n`` is ignored when
``replacement`` is True but the total number of elements is considered
since no element can appear more times than the number of elements in
``n``.
Examples
========
>>> from sympy.functions.combinatorial.numbers import nC
>>> from sympy.utilities.iterables import multiset_combinations
>>> nC(3, 2)
3
>>> nC('abc', 2)
3
>>> nC('aab', 2)
2
When ``replacement`` is True, each item can have multiplicity
equal to the length represented by ``n``:
>>> nC('aabc', replacement=True)
35
>>> [len(list(multiset_combinations('aaaabbbbcccc', i))) for i in range(5)]
[1, 3, 6, 10, 15]
>>> sum(_)
35
If there are ``k`` items with multiplicities ``m_1, m_2, ..., m_k``
then the total of all combinations of length 0 hrough ``k`` is the
product, ``(m_1 + 1)*(m_2 + 1)*...*(m_k + 1)``. When the multiplicity
of each item is 1 (i.e., k unique items) then there are 2**k
combinations. For example, if there are 4 unique items, the total number
of combinations is 16:
>>> sum(nC(4, i) for i in range(5))
16
References
==========
.. [1] http://en.wikipedia.org/wiki/Combination
.. [2] http://tinyurl.com/cep849r
See Also
========
sympy.utilities.iterables.multiset_combinations
"""
from sympy.functions.combinatorial.factorials import binomial
from sympy.core.mul import prod
if isinstance(n, SYMPY_INTS):
if k is None:
if not replacement:
return 2**n
return sum(nC(n, i, replacement) for i in range(n + 1))
if k < 0:
raise ValueError("k cannot be negative")
if replacement:
return binomial(n + k - 1, k)
return binomial(n, k)
if isinstance(n, _MultisetHistogram):
N = n[_N]
if k is None:
if not replacement:
return prod(m + 1 for m in n[_M])
return sum(nC(n, i, replacement) for i in range(N + 1))
elif replacement:
return nC(n[_ITEMS], k, replacement)
# assert k >= 0
elif k in (1, N - 1):
return n[_ITEMS]
elif k in (0, N):
return 1
return _AOP_product(tuple(n[_M]))[k]
else:
return nC(_multiset_histogram(n), k, replacement)
@cacheit
def _stirling1(n, k):
if n == k == 0:
return S.One
if 0 in (n, k):
return S.Zero
n1 = n - 1
# some special values
if n == k:
return S.One
elif k == 1:
return factorial(n1)
elif k == n1:
return binomial(n, 2)
elif k == n - 2:
return (3*n - 1)*binomial(n, 3)/4
elif k == n - 3:
return binomial(n, 2)*binomial(n, 4)
# general recurrence
return n1*_stirling1(n1, k) + _stirling1(n1, k - 1)
@cacheit
def _stirling2(n, k):
if n == k == 0:
return S.One
if 0 in (n, k):
return S.Zero
n1 = n - 1
# some special values
if k == n1:
return binomial(n, 2)
elif k == 2:
return 2**n1 - 1
# general recurrence
return k*_stirling2(n1, k) + _stirling2(n1, k - 1)
def stirling(n, k, d=None, kind=2, signed=False):
"""Return Stirling number S(n, k) of the first or second (default) kind.
The sum of all Stirling numbers of the second kind for k = 1
through n is bell(n). The recurrence relationship for these numbers
is::
{0} {n} {0} {n + 1} {n} { n }
{ } = 1; { } = { } = 0; { } = j*{ } + { }
{0} {0} {k} { k } {k} {k - 1}
where ``j`` is::
``n`` for Stirling numbers of the first kind
``-n`` for signed Stirling numbers of the first kind
``k`` for Stirling numbers of the second kind
The first kind of Stirling number counts the number of permutations of
``n`` distinct items that have ``k`` cycles; the second kind counts the
ways in which ``n`` distinct items can be partitioned into ``k`` parts.
If ``d`` is given, the "reduced Stirling number of the second kind" is
returned: ``S^{d}(n, k) = S(n - d + 1, k - d + 1)`` with ``n >= k >= d``.
(This counts the ways to partition ``n`` consecutive integers into
``k`` groups with no pairwise difference less than ``d``. See example
below.)
To obtain the signed Stirling numbers of the first kind, use keyword
``signed=True``. Using this keyword automatically sets ``kind`` to 1.
Examples
========
>>> from sympy.functions.combinatorial.numbers import stirling, bell
>>> from sympy.combinatorics import Permutation
>>> from sympy.utilities.iterables import multiset_partitions, permutations
First kind (unsigned by default):
>>> [stirling(6, i, kind=1) for i in range(7)]
[0, 120, 274, 225, 85, 15, 1]
>>> perms = list(permutations(range(4)))
>>> [sum(Permutation(p).cycles == i for p in perms) for i in range(5)]
[0, 6, 11, 6, 1]
>>> [stirling(4, i, kind=1) for i in range(5)]
[0, 6, 11, 6, 1]
First kind (signed):
>>> [stirling(4, i, signed=True) for i in range(5)]
[0, -6, 11, -6, 1]
Second kind:
>>> [stirling(10, i) for i in range(12)]
[0, 1, 511, 9330, 34105, 42525, 22827, 5880, 750, 45, 1, 0]
>>> sum(_) == bell(10)
True
>>> len(list(multiset_partitions(range(4), 2))) == stirling(4, 2)
True
Reduced second kind:
>>> from sympy import subsets, oo
>>> def delta(p):
... if len(p) == 1:
... return oo
... return min(abs(i[0] - i[1]) for i in subsets(p, 2))
>>> parts = multiset_partitions(range(5), 3)
>>> d = 2
>>> sum(1 for p in parts if all(delta(i) >= d for i in p))
7
>>> stirling(5, 3, 2)
7
References
==========
.. [1] http://en.wikipedia.org/wiki/Stirling_numbers_of_the_first_kind
.. [2] http://en.wikipedia.org/wiki/Stirling_numbers_of_the_second_kind
See Also
========
sympy.utilities.iterables.multiset_partitions
"""
# TODO: make this a class like bell()
n = as_int(n)
k = as_int(k)
if n < 0:
raise ValueError('n must be nonnegative')
if k > n:
return S.Zero
if d:
# assert k >= d
# kind is ignored -- only kind=2 is supported
return _stirling2(n - d + 1, k - d + 1)
elif signed:
# kind is ignored -- only kind=1 is supported
return (-1)**(n - k)*_stirling1(n, k)
if kind == 1:
return _stirling1(n, k)
elif kind == 2:
return _stirling2(n, k)
else:
raise ValueError('kind must be 1 or 2, not %s' % k)
@cacheit
def _nT(n, k):
"""Return the partitions of ``n`` items into ``k`` parts. This
is used by ``nT`` for the case when ``n`` is an integer."""
if k == 0:
return 1 if k == n else 0
return sum(_nT(n - k, j) for j in range(min(k, n - k) + 1))
def nT(n, k=None):
"""Return the number of ``k``-sized partitions of ``n`` items.
Possible values for ``n``::
integer - ``n`` identical items
sequence - converted to a multiset internally
multiset - {element: multiplicity}
Note: the convention for ``nT`` is different than that of ``nC`` and
``nP`` in that
here an integer indicates ``n`` *identical* items instead of a set of
length ``n``; this is in keeping with the ``partitions`` function which
treats its integer-``n`` input like a list of ``n`` 1s. One can use
``range(n)`` for ``n`` to indicate ``n`` distinct items.
If ``k`` is None then the total number of ways to partition the elements
represented in ``n`` will be returned.
Examples
========
>>> from sympy.functions.combinatorial.numbers import nT
Partitions of the given multiset:
>>> [nT('aabbc', i) for i in range(1, 7)]
[1, 8, 11, 5, 1, 0]
>>> nT('aabbc') == sum(_)
True
>>> [nT("mississippi", i) for i in range(1, 12)]
[1, 74, 609, 1521, 1768, 1224, 579, 197, 50, 9, 1]
Partitions when all items are identical:
>>> [nT(5, i) for i in range(1, 6)]
[1, 2, 2, 1, 1]
>>> nT('1'*5) == sum(_)
True
When all items are different:
>>> [nT(range(5), i) for i in range(1, 6)]
[1, 15, 25, 10, 1]
>>> nT(range(5)) == sum(_)
True
References
==========
.. [1] http://undergraduate.csse.uwa.edu.au/units/CITS7209/partition.pdf
See Also
========
sympy.utilities.iterables.partitions
sympy.utilities.iterables.multiset_partitions
"""
from sympy.utilities.enumerative import MultisetPartitionTraverser
if isinstance(n, SYMPY_INTS):
# assert n >= 0
# all the same
if k is None:
return sum(_nT(n, k) for k in range(1, n + 1))
return _nT(n, k)
if not isinstance(n, _MultisetHistogram):
try:
# if n contains hashable items there is some
# quick handling that can be done
u = len(set(n))
if u == 1:
return nT(len(n), k)
elif u == len(n):
n = range(u)
raise TypeError
except TypeError:
n = _multiset_histogram(n)
N = n[_N]
if k is None and N == 1:
return 1
if k in (1, N):
return 1
if k == 2 or N == 2 and k is None:
m, r = divmod(N, 2)
rv = sum(nC(n, i) for i in range(1, m + 1))
if not r:
rv -= nC(n, m)//2
if k is None:
rv += 1 # for k == 1
return rv
if N == n[_ITEMS]:
# all distinct
if k is None:
return bell(N)
return stirling(N, k)
m = MultisetPartitionTraverser()
if k is None:
return m.count_partitions(n[_M])
# MultisetPartitionTraverser does not have a range-limited count
# method, so need to enumerate and count
tot = 0
for discard in m.enum_range(n[_M], k-1, k):
tot += 1
return tot
|
py | 1a2ec17c06808474025044eecb4c169d51620cd8 | # -*- coding: utf-8 -*-
'''
Stage: "シャーロックの家"
'''
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../../..'))
sys.path.append('storybuilder')
from storybuilder.builder.world import World
# NOTE
# 物語が展開する中心。事務所
# シャーロックが借りて住んでいる古い一軒家(二階建て)で、B221という看板が近くにある
# 幽霊が出るといういわくつきの物件で安かった。原因は地場発生の魔石があっただけ
# :2F
# [物置][空き部屋]
# [寝室1][寝室2]
# :1F
# [キッチン][バス]
# [ダイニング][トイレ]
# [研究室]
# [リビング][書斎]
# alias
HOME = "SherlockHouse"
LIVING = "SherlockHouseLiving"
KITCHEN = "SherlockHouseKitchen"
BATHROOM = "SherlockHouseBathroom"
DINING = "SherlockHouseDining"
LABO = "SherlockHouseLabo"
READING = "SherlockHouseReadingRoom"
BEDROOM = "SherlockHouseBedroom"
STORAGE = "SherlockHouseStorage"
## scenes
def about_sherlock(w: World):
return w.scene("$sherlockという男について",
w.plot_note("知っていたならなぜ忠告してくれなかったんだ、と$wilsonが$sherlockに文句をいい、なんとか家に入れてもらえる"),
w.plot_note("中に入るとそこら中に本や資料がちらばっていた"),
w.plot_note("$wilsonはそこで依頼をしようと思ったが、"),
w.plot_note("そこに何も記載のない手紙が投げ込まれた"),
)
def read_prince_letter(w: World):
return w.scene("皇太子の書簡の中身",
w.plot_note("手紙には独特の紙が使われていて、それが王室のものだと$sherlockは分かった"),
w.plot_note("中は皇太子からの手紙で、$sherlockに頼みごとが書かれていた"),
w.plot_note("皇太子は女遊びがひどくてその界隈では有名だが、今回ついに腰を落ち着けて結婚することになった"),
w.plot_note("相手は近隣の公国の王女で、政治的な意味合いも大きい"),
w.plot_note("その結婚に際して過去の女性関係をすべて綺麗にした"),
w.plot_note("ただある一人の女性にプレゼントしてしまった大切なナイフを返してもらいたいが、相手の女性が応じてくれない"),
w.plot_note("揉め事をおこしたくないので、穏便にすませたいから、$sherlockに彼女を説得して、ナイフを返してもらってくれないか、という依頼"),
w.plot_note("$sherlockはその依頼内容について、書かれていない部分の推測を述べる"),
w.plot_note("ナイフと書いているが、実際は王室に伝わる宝剣で、それが王の証の一つで、結婚の際には儀式内で使われる"),
w.plot_note("酒の勢いで大切な宝剣をあげてしまったのだろうと"),
w.plot_note("そんなものを取り戻す義理はないが、恩があるので仕方なく依頼を受けると言った"),
w.plot_note("$sherlockは$wilsonにその女性の家まで送ってほしいと頼んだ"),
)
def important_than_sword(w: World):
return w.scene("宝剣より大事なこと",
w.plot_note("$sherlockは宝剣よりも殺人事件についての調査をしたいと、$wilsonを家に置いて出ていってしまう"),
w.plot_note("$wilsonは$sherlockの家に戻り、そこで彼を待つことにする"),
w.plot_note("やってきた若い刑事は$sherlockがいないことに落胆しつつも、状況を教えてくれる"),
w.plot_note("発見された遺体は一月ほど前に行方不明になった女性だった"),
w.plot_note("$ailyとは何の関係もなく、そこの接点も見つけられないと嘆く"),
w.plot_note("殺害方法も不明で、凶器すら見つけられないと"),
w.plot_note("そこに役所の男から$ailyという女性が住民登録をしたという形跡は見つけられなかったと連絡がきた"),
)
def housemate_mary(w: World):
return w.scene("同居人$mary",
w.plot_note("同居するようになった$maryはやたらと$sherlockにまとわりつく"),
w.plot_note("$sherlockは大好きな読書もできず、困っていた"),
)
def mary_has_worry(w: World):
return w.scene("$maryの悩み",
w.plot_note("$maryは彼の迷惑になりたくなくて、$wilsonに相談する"),
w.plot_note("女手が不足しているから自分が役立つところをアピールしてみたら、と助言を受ける"),
w.plot_note("$maryは掃除や買い物を買って出る"),
)
def about_missings(w: World):
return w.scene("失踪者について",
w.plot_note("やっと外に出てくれてほっとした$sherlockは$wilsonに事件について相談する"),
w.plot_note("最近謎の失踪者が増えていた"),
w.plot_note("失踪事件として新聞や雑誌も特集を組んでいる"),
w.plot_note("$wilsonはその調査を$sherlockに依頼していたが、未だに何も情報がなかった"),
)
def strange_armor_knight(w: World):
return w.scene("奇妙な鎧騎士",
w.plot_note("そこに$maryが見知らぬ人を連れて戻ってくる"),
w.plot_note("道端で困っていたから拾ったけれど言葉がしゃべれないのだと$maryは説明した"),
)
def strange_work(w: World):
return w.scene("奇妙な仕事",
w.plot_note("その鎧騎士は$sherlockに$limeと名乗った(筆談で)"),
w.plot_note("彼女は今ある老夫婦の家に居候しているが、彼らの知人の質屋の護衛のアルバイトをしていた"),
w.plot_note("守衛仲間の$binsと交代しながら閉店時刻まで警備をしている"),
w.plot_note("その$binsから別のバイトを紹介され、今は途中にそちらもやっている"),
w.plot_note("その別のバイトが相談したいことだった"),
w.plot_note("最初に$binsからチラシを見せてもらったときには「赤い鎧の者だけがバイト資格がある」と書かれていた"),
w.plot_note("仕事内容はじっと座ってある本の写しを作る作業を三時間行うだけで、週給で結構な金額がもらえた"),
w.plot_note("実際に面接に行ってみると確かに赤い鎧を来た人間が集まっていたが、$limeみたいに見事に全身赤という者はいなかった"),
w.plot_note("主催者である赤鎧クラブは彼女を合格とし、その翌日から守衛を抜け出して三時間、そのアルバイトをしているらしい"),
w.plot_note("オーナー夫婦には申し訳なく感じているが、そのお金でプレゼントしたいと思っているのだと説明する"),
w.plot_note("その話をきいて$sherlockは彼女に今すぐそのアルバイトを辞めるようにとだけ言った"),
)
def reason_for_lime_work(w: World):
return w.scene("$sherlockの忠告",
w.plot_note("家に帰った$maryはどうしてあんな風に言ったのか$sherlockに問いただす"),
w.plot_note("$sherlockはそんなにうまい話は存在しないし、自分が知る限り「赤鎧クラブ」なんてものは存在しないと断言する"),
w.plot_note("$maryは実際に持ち帰ったチラシを見せながら、彼女を拾ってくれたオーナーさんや同僚の$binsの優しさを力説する"),
w.plot_note("しかし後日$sherlockの言っていたように問題が発生する"),
w.plot_note("その近所にあった改装中の銀行が強盗に襲われた"),
w.plot_note("警備員が気づいて連絡したが、表からも裏からも誰も入ってはおらず、謎の強盗と話題になっていた"),
w.plot_note("しかし現地を調べたところ、抜け穴が掘ってあり、大量のダイヤと金塊が盗まれたあとだった"),
w.plot_note("しかもその抜け穴は質屋に繋がっていたのだ"),
w.plot_note("その質屋のオーナー夫婦も逮捕され、$limeも容疑者の一人として逮捕された"),
)
def help_lime_please(w: World):
return w.scene("$limeを助けて",
w.plot_note("$maryが$limeを助けてやってほしいと$sherlockに言う"),
w.plot_note("$sherlockは自分の忠告を聞かなかったからだと言うが、それでも話だけは聞くと言う"),
w.plot_note("質屋につながっていた抜け穴の中で、重要参考人だった$ignesが遺体で発見された"),
w.plot_note("その容疑者として$limeが逮捕され、オーナー夫婦も事情聴取を受けている最中らしい"),
w.plot_note("強盗の件についても調査中で、全部彼女に押し付けられるかもしれないと言い出す"),
w.plot_note("$sherlockはその質屋に案内してもらう"),
)
def limes_talk_of_strange_case(w: World):
return w.scene("奇妙な事件についての$limeの話",
w.plot_note("$maryが$limeを拾い、再び家へと連れてくる"),
w.plot_note("$sherlockは銀行から盗まれたものがダイヤだけじゃないと睨むが、教えてもらえなかった"),
w.plot_note("家に戻ってくると$sherlockはそこに$limeがいることに頭を抱える"),
w.plot_note("$limeがしゃべれないのは呪いの鎧のせいだと言う"),
w.plot_note("その呪いをといてもらおうと、知人の神官を読んでいた"),
w.plot_note("呪いを解いたが$limeはしゃべれないままだった"),
w.plot_note("その$limeは筆談で自分が王室の人間であると告白する"),
)
def lime_was_royal_family(w: World):
return w.scene("$limeは王家の人間",
w.plot_note("$limeは自分が誘拐された訳ではなく、普通に家出をしたのだと告白する"),
w.plot_note("王室はそんな品の悪い発表をできないから失踪事件にして公表したのだと言った"),
w.plot_note("もともと妾の子で、周囲から浮いていて、王室にも自分の居場所がなく帰りたくないと泣く"),
)
def newcommer_lime(w: World):
return w.scene("新しい同居人$lime",
w.plot_note("$maryは$sherlockに$limeを一緒に住まわせてほしいとお願いする"),
w.plot_note("$sherlockは金銭的な問題さえ解決できればと提案する"),
w.plot_note("$wilsonは金のことなら大丈夫だと、なぜか大金を手にして言う"),
w.plot_note("$wilsonは$sherlockの秘蔵コレクションを売り払っていた"),
w.plot_note("こうして新しい住人$limeをここに加えることになった"),
)
def cooker_lime(w: World):
return w.scene("料理人$lime",
w.plot_note("$limeは料理担当になっていて、そのガチョウをもらってさばいてくれる"),
)
def marys_market_talk(w: World):
return w.scene("$maryの市場の話",
w.plot_note("$maryは市場で仕入れた面白い話を$sherlockに話す"),
w.plot_note("今市場ではガチョウからダイヤが出てくると話題になっていた"),
w.plot_note("$limeがやってきて、何か出たという"),
w.plot_note("ガチョウの中から出てきたのは血がついたナイフだった"),
)
def knife_in_the_goose(w: World):
return w.scene("ガチョウの中の凶器",
w.plot_note("$sherlockはそれがなにかの事件の凶器だと分かり、すぐに警察に連絡を取る"),
w.plot_note("$restradeがやってきて、それは現在彼が追っている事件の重要な証拠品だと言われた"),
)
def restrade_talk_about_goose_knife(w: World):
return w.scene("$restradeのガチョウの凶器事件の話",
w.plot_note("$restradeからその事件についての概要を聞く"),
w.plot_note("事件はある一軒家で起こった"),
w.plot_note("引退した学者が謎の死を遂げた"),
w.plot_note("刺殺だったのだが凶器が発見されず、犯人も特定されないまま現在に至る"),
w.plot_note("そのナイフを警部に渡して調べてもらう"),
w.plot_note("その間に興味をもった$sherlockは一人でその現場を調べに出ていってしまう"),
w.plot_note("後日、そのナイフからずっと失踪中の$jackの指紋が検出された"),
)
def backhome_mary_with_jack_wanted(w: World):
return w.scene("$jack容疑者の話を持って返ってきた$mary",
w.plot_note("戻ってきた$sherlockは$maryからそのことを聞き、"),
w.plot_note("$sherlockは現場を見てきたことを$maryたちに話す"),
)
def talk_about_goose_case(w: World):
return w.scene("ガチョウ凶器事件についての調査",
w.plot_note("現場は住宅街から少し離れた郊外の一軒家で、男は民間の研究所をやめたあとも個人的に何かを研究していた"),
w.plot_note("歴史学と民俗学に造形が深く、$sherlockもその所蔵していた資料に関心をしたくらい"),
w.plot_note("彼が書き残しているものの一つに古代の技法がいくつか紹介されていた"),
w.plot_note("刺された場所は彼の家だが、凶器は消えている"),
w.plot_note("ただし$jackとの関係性は全く見えず、彼女ならそんな手段を使わないと$sherlockは考えた"),
w.plot_note("$sherlockは誰かが$jackを表舞台に引っ張り出したい、その罠だと考える"),
)
def jacks_letter(w: World):
return w.scene("$jackからの手紙",
w.plot_note("と、差出人不明の手紙に$jackからのメッセージがあった"),
w.plot_note("助けてほしいと"),
)
def sherlocks_message_for_jack(w: World):
return w.scene("$sherlockのメッセージ",
w.plot_note("そこに$sherlockからの伝言を$ignesが持ってくる"),
w.plot_note("数日留守にすることと、$jackに会いに行ってくると書かれていた"),
)
def mysterious_case(w :World):
return w.scene("怪奇事件",
w.plot_note("$sherlockは怪奇事件の特集記事を読みながら「こんなものは実在しない」と言う"),
w.plot_note("そもそも奇妙な現象、霊的なもの、不思議なものは人間が理解することを放棄していると説明する"),
w.plot_note("小さい頃、学校内で七不思議というものがあったが、それを全て解明したらみんなから怒られたと"),
)
def legend_of_dark_dog(w: World):
return w.scene("魔獣伝説",
w.plot_note("そこに$wilsonがこんな話がある、と、ある孤島に伝わる魔獣伝説を話した"),
w.plot_note("そこはこの三年の間に六名もの犠牲者が出ているという"),
w.plot_note("最初は飼い犬や家畜が殺されているだけだった"),
w.plot_note("しかし最初に人の犠牲者が出た"),
w.plot_note("それはどう見ても人の手によるものではなく、何か獣による被害だった"),
w.plot_note("最初の事件から次の事件まではかなり時間が開いたが、直近はこの三ヶ月の間に二件も殺人事件が起こっている"),
w.plot_note("$sherlockはそれだけ続くなら必ず人の手が関わっていると断言する"),
)
def invitation_from_dark_island(w: World):
return w.scene("孤島からの招待状",
w.plot_note("そこに招待状が届く"),
w.plot_note("$wilsonはそれを開封し、噂をしていれば、とその伝説の孤島に暮らす城主からの招待状だと言った"),
)
def commision_of_murder_case(w: World):
return w.scene("殺人事件の解決依頼",
w.plot_note("$sherlockは新聞を読んでいた"),
w.plot_note("そこに殺人事件の調査依頼が持ち込まれる"),
w.plot_note("最初は$maryも驚いていたが今では慣れたもので、依頼人を案内して、飲み物を出しながら依頼内容を話すよう促す"),
w.plot_note("$maryは秘書気取りだった"),
w.plot_note("だが$sherlockは依頼人が出した名前に驚く"),
w.plot_note("それは$morianoの大学の後輩だったからだ"),
w.plot_note("犯罪学の研究をしている人間が殺された"),
w.plot_note("大学の研究室内での密室殺人。その手口が全く不明だが自殺ではないと警察は断定しているという"),
w.plot_note("さっそくその調査に向かう$sherlock"),
)
def moriano_is_here(w: World):
return w.scene("$moriano見参",
w.plot_note("$sherlockが家に戻ってくるとそこには老人の姿があった", "$morianoだ"),
w.plot_note("$morianoは「はじめまして」と挨拶をし、それから今$sherlockたちがどういう経路で戻ってきたかを言い当てる"),
w.plot_note("$morianoは$sherlockに自分に関するすべてのことから手を引くようにと警告する"),
w.plot_note("$sherlockは$morianoがここに来ることも推測して既に逮捕する準備を整えているとブラフを張るが、彼には通用しなかった"),
w.plot_note("警察は別のところで起こった事件に駆けつけている"),
w.plot_note("$morianoは言う。すべての人間は自分の意志ではなく、環境要因によって動かされると。つまり誰でもが犯罪者になりうると"),
w.plot_note("$morianoは$maryに問いかける。彼女は$sherlockを好きだろうと"),
w.plot_note("$morianoは$limeに本心では王室に帰りたいだろうと"),
w.plot_note("$wilsonについての言及はとくないが、ここの人間には言えない本音を隠しているだろうと"),
w.plot_note("$morianoは逃げないからいつでも自分の屋敷に来るがいいと言い残して、去っていく"),
)
def marys_strange(w: World):
return w.scene("奇妙な$mary",
w.plot_note("$morianoがきてから$maryの様子がおかしい"),
w.plot_note("$sherlockは$morianoを何とか見つけ出そうと躍起になっている"),
w.plot_note("$maryは$limeに相談することもできず、市場の$nowlisに愚痴る。自分だけが違う気がすると"),
)
def where_is_mary(w: World):
return w.scene("$maryはどこへ?",
w.plot_note("すぐに$sherlockたちは$morianoの邸宅に向かう"),
)
def alive_moriano(w: World):
return w.scene("$morianoは生きている",
w.plot_note("しかし後日、$morianoのメッセージが新聞に掲載される"),
w.plot_note("$maryは無事で丁重に監禁していると。場所は$sherlockなら推理できると書かれて、ヒントが残されていた"),
)
def morianos_whereabouts(w: World):
return w.scene("$morianoの居場所",
w.plot_note("$sherlockはヒントから$maryの居場所は$morianoと関係ない場所にいると推測する"),
w.plot_note("$mary救出隊として少年探偵団の協力を仰ぐ"),
w.plot_note("その間に$sherlockはその新聞記事からたどり、$morianoがどこからメッセージを出しているのかを調べる"),
)
def vanished_sherlock(w: World):
return w.scene("消えた$sherlock",
w.plot_note("$maryたちが戻ると、そこには$sherlockの姿がなかった"),
)
def sherlocks_information(w: World):
return w.scene("$sherlockに関する情報",
w.plot_note("$maryが目覚めるとそこに$sherlockの姿がいなかった"),
w.plot_note("戻ってきた$wilsonは$sherlockの手がかりを追ったが見失ったと言う"),
w.plot_note("$maryは$sherlockが戻ってくると信じて待っていたが、連絡も戻ってくることもなかった"),
)
def no_sherlock_life(w: World):
return w.scene("$sherlock不在の生活",
"同・寝室",
w.plot_note("一月が経ち、$maryたちは$sherlockのいない生活に馴染み始めていた"),
)
def serching_sherlock(w: World):
return w.scene("$sherlockの捜索隊",
w.plot_note("町では$morianoも$sherlockも消えたというのに犯罪は起こっていた"),
w.plot_note("$wilsonは手を尽くして$sherlockを探す"),
w.plot_note("$limeが王室のツテを使い、何とか情報を集めると言い出す"),
w.plot_note("少年探偵団も手を尽くした"),
w.plot_note("今まで世話になった人たちも$sherlockのことを探してくれた"),
w.plot_note("それでも情報すら見つからない"),
)
def arrived_his_message(w: World):
return w.scene("$sherlockからのメッセージ",
w.plot_note("雨の酷いある日、一通の郵便が届く"),
w.plot_note("届いた手紙には宛名がなかったが$sherlock特有の署名が入っていた"),
)
def sadness_report(w: World):
return w.scene("悲しいお知らせ",
w.plot_note("手紙の冒頭にはこう書かれていた"),
w.plot_note("この手紙が届いたならば自分はすでにこの世界にいないだろうと$sherlockは書いていた"),
w.plot_note("手紙は$morianoの隠れ家に向かう直前に書いて出したと書かれている"),
w.plot_note("$morianoは用意周到で、約束通り一人で待っていたりはしない"),
w.plot_note("$sherlockは陽動をして、手下たちを遠ざけて、可能ならば$morianoをおびき出す"),
w.plot_note("どうにか一対一で話せる場所を作る、と書いてある"),
w.plot_note("$morianoがどこまで語るか、告白するかわからないが、彼がやってきた悪事について書き残しておく"),
w.plot_note("$morianoのことは以前教えたが、大学を出たあとの彼については今回独自調査を行うまで不明な部分が多かった"),
w.plot_note("$morianoは$cultXと呼ばれる宗教団体との接触から犯罪者人生が始まる"),
w.plot_note("彼はその教義である人間の本性である「悪」を反映させようとしていた"),
w.plot_note("今までに解決した事件の裏側にはこの教団か、その教団の人間、関係者が細い糸で繋がっていた"),
w.plot_note("その大本である$morianoを何としても打ち取ると宣言されていた"),
w.plot_note("$maryたちは$sherlockがどうなったのか気になり、手紙を出した場所に向かおうとする"),
w.plot_note("だが$wilsonによりそれは止められる"),
w.plot_note("兄の$mikelがやってきて「$sherlockがなくなった」と告げた"),
)
## in Empty House
def believed_his_alive(w: World):
mary, lime, wil = w.get("mary"), w.get("lime"), w.get("wilson")
return w.scene("$sherlockの生存を信じて",
w.change_camera("mary"),
w.change_stage(KITCHEN),
w.change_time("morning"),
w.plot_note("$maryたちは$sherlockが生きていると思って捜索を続けていた"),
w.plot_note("しかし何の情報もなく、ただ時間だけが過ぎていく"),
w.plot_note("家を失い、$wilsonの住まいに居候していた$maryたち"),
w.plot_note("$wilsonは忙しそうに外に出ていることが増えた"),
w.plot_note("$maryは$sherlockの手紙にヒントはないかと考えるが、何も見つからない"),
mary.be("皿洗いをしている$S"),
mary.think("もう一月も$sherlockは失踪を続けている"),
mary.think("完全に死んだものと思われていたが、$Sたちは捜索を続けていた"),
mary.do("棚には$sherlockのコップが残っている"),
mary.think("それを目にして涙が滲む"),
mary.think("でも$sherlockが送ってきたメッセージにはわざわざ自分が死んだと思ってくれと書かれていた"),
mary.think("何故そんなことを書いたのか、$Sは気になっていた"),
mary.talk("あっ"),
mary.do("$wilsonの湯呑が割れてしまう"),
mary.talk("$wilsonのだし、いいか"),
)
def news_of_sherlock_alive(w: World):
mary, lime, wil = w.get("mary"), w.get("lime"), w.get("wilson")
return w.scene("$sherlock生存情報",
w.change_camera("mary"),
w.change_stage(LIVING),
w.change_time("noon"),
w.plot_note("だが$limeはそこに$sherlockが生きているという証拠を見つけた"),
w.plot_note("そこに$wilsonが戻ってくる"),
w.plot_note("$wilsonは「$sherlockに似た人間を見かけた」という情報を聞いたと話した"),
mary.come("買い物を終えて帰ってきた$S"),
lime.be("$Sは家の片付けをしていた"),
wil.come("そこに$Sが興奮した様子で戻ってくる"),
mary.talk("何かあったん?"),
wil.talk("聞いてくれ", "いた", "$sherlockが、いたんだ"),
mary.do("驚きで声が出ない$S"),
lime.do("掃除の手が止まる$S"),
wil.talk("$meもまだ聞いたばかりの話で、本当かどうかの確認すらできていないんだが、それでもこれまで何の情報もなかったところにこれは大きいよ"),
wil.talk("$EastEndの空き家に夜な夜な明かりが灯る家があるそうなんだ",
"どうやらそこに$sherlockによく似た人間が入っていくのを見たって、ホームレスの目撃情報があった"),
mary.talk("でもどうしてそれが$sherlockなん? 別人の可能性はないん?"),
wil.talk("それが以前$sherlockが世話をしたホームレスで、彼のことをよく覚えていたんだよ",
"遠目にもあの特徴的な寝癖頭とそこに被ったハンチング、チェック柄のコートは$sherlockに間違いないって"),
mary.think("その話に興奮する$S"),
mary.talk("場所は?"),
mary.do("荷物を置くと、$wilsonに詰め寄った"),
)
def consideration_of_sherlock(w: World):
return w.scene("容疑者$sherlockについての考察",
# NOTE: omit?
w.plot_note("一旦家に戻り、犯人にされてしまった$sherlockについて考える"),
w.plot_note("$wilsonは$sherlockが$moriano一味に騙されたというのだが"),
w.plot_note("もう一度あの空き家を訪れる"),
)
def help_from_sherlock(w: World):
lime = w.get("lime")
wil = w.get("wilson")
ignes = w.get("ignes")
return w.scene("$sherlockからの救援情報",
w.change_camera("lime"),
w.change_stage(LIVING),
lime.be("一人で$sherlockの家に戻っている$S"),
lime.do("消えた$maryを探してくるとでかけた$wilson"),
lime.do("$Sはポストに入っていた宛名のない封書を見つける"),
lime.do("そこには$maryが$morianoの手の者に捕まり、監禁されていると書かれていた"),
ignes.come("$Sがやってきて"),
ignes.talk("$mary嬢ちゃんは?"),
ignes.do("事情を聞く$S"),
ignes.talk("すぐ手配して、場所を突き止める", "$limeさんは警察に行って事情を説明してきて"),
)
def injured_wilson(w: World):
return w.scene("負傷した$wilson",
# NOTE: omit/方針変更
w.plot_note("家に戻ると$wilsonがいて、ひどい怪我を負っていたが、無事に逃げ出したと言う"),
w.plot_note("$maryは自分たちを助けた男が$sherlockの生存を言っていたと伝える"),
w.plot_note("$wilsonはそのホームレスのことを教えてくれと頼む"),
w.plot_note("$maryたちにここで休むようにいい、$wilsonは$sherlockを探しに出ていった"),
w.plot_note("そこに$wilsonが指名手配されたと$restradeがやってくる"),
)
def burned_shal_home(w: World):
shal = w.get("sherlock")
wil, lime = w.get("wilson"), w.get("lime")
ignes, pat = w.get("ignes"), w.get("patson")
lisa = w.get("lisa")
return w.scene("家が燃えて",
w.change_camera("sherlock"),
w.change_stage(HOME),
shal.come("$Sたちは$wilsonの車で火事で全焼してしまった住居前にやってくる"),
wil.come(),
lime.come(),
shal.do("みんな呆然としてその光景を見ている"),
shal.do("消防士たちが$magicポンプで水をかけている。少しずつ火勢は衰え、もう消火が近い"),
shal.do("近所の人も出て、野次馬が集まっている"),
lisa.come("大家の$Sがやってきて、びっくりして呆然"),
lisa.talk("な、何なんですか、これは!"),
shal.talk("ああ、$ln_lisaさん、どうもご無沙汰しています"),
lisa.talk("ねえ$sherlockさん、これは一体どういうことなのかしら"),
shal.talk("火事みたいですね。おそらく放火でしょう。迷惑なことです"),
lisa.talk("燃えたのは誰の家なの?"),
shal.talk("$meが借りていたあなたの家です"),
lisa.talk("ええ、そうよね。そうだと思ったわ"),
lisa.do("見る間に表情が変わっていく夫人"),
lisa.talk("この弁償、誰が支払ってくれるのかしら"),
shal.talk("契約上は$meに過失があった場合は$meですが、放火の責任を取れと言われても困りますから、おそらくオーナー夫人の方になるかと"),
lisa.talk("何ですって!"),
shal.talk("用事があったのを思い出したので失礼"),
shal.do("$wilsonの車に乗り込む$S"),
wil.do("仕方ない、といった感じで車に乗り込む$S"),
)
|
py | 1a2ec33fff551516eb2ab280bb118cd7e8efbed2 | from .bedloader import ROI
|
py | 1a2ec3e6f6d257f7ceaa7fb9461283f2be172d25 | from __future__ import annotations
import ast
import functools
import sys
from typing import Iterable
from tokenize_rt import NON_CODING_TOKENS
from tokenize_rt import Offset
from tokenize_rt import Token
from pyupgrade._ast_helpers import ast_to_offset
from pyupgrade._ast_helpers import is_name_attr
from pyupgrade._data import register
from pyupgrade._data import State
from pyupgrade._data import TokenFunc
from pyupgrade._token_helpers import CLOSING
from pyupgrade._token_helpers import find_closing_bracket
from pyupgrade._token_helpers import find_token
from pyupgrade._token_helpers import OPENING
def _fix_optional(i: int, tokens: list[Token]) -> None:
j = find_token(tokens, i, '[')
k = find_closing_bracket(tokens, j)
if tokens[j].line == tokens[k].line:
tokens[k] = Token('CODE', ' | None')
del tokens[i:j + 1]
else:
tokens[j] = tokens[j]._replace(src='(')
tokens[k] = tokens[k]._replace(src=')')
tokens[i:j] = [Token('CODE', 'None | ')]
def _fix_union(
i: int,
tokens: list[Token],
*,
arg_count: int,
) -> None:
depth = 1
parens_done = []
open_parens = []
commas = []
coding_depth = None
j = find_token(tokens, i, '[')
k = j + 1
while depth:
# it's possible our first coding token is a close paren
# so make sure this is separate from the if chain below
if (
tokens[k].name not in NON_CODING_TOKENS and
tokens[k].src != '(' and
coding_depth is None
):
if tokens[k].src == ')': # the coding token was an empty tuple
coding_depth = depth - 1
else:
coding_depth = depth
if tokens[k].src in OPENING:
if tokens[k].src == '(':
open_parens.append((depth, k))
depth += 1
elif tokens[k].src in CLOSING:
if tokens[k].src == ')':
paren_depth, open_paren = open_parens.pop()
parens_done.append((paren_depth, (open_paren, k)))
depth -= 1
elif tokens[k].src == ',':
commas.append((depth, k))
k += 1
k -= 1
assert coding_depth is not None
assert not open_parens, open_parens
comma_depth = min((depth for depth, _ in commas), default=sys.maxsize)
min_depth = min(comma_depth, coding_depth)
to_delete = [
paren
for depth, positions in parens_done
if depth < min_depth
for paren in positions
]
if comma_depth <= coding_depth:
comma_positions = [k for depth, k in commas if depth == comma_depth]
if len(comma_positions) == arg_count:
to_delete.append(comma_positions.pop())
else:
comma_positions = []
to_delete.sort()
if tokens[j].line == tokens[k].line:
del tokens[k]
for comma in comma_positions:
tokens[comma] = Token('CODE', ' |')
for paren in reversed(to_delete):
del tokens[paren]
del tokens[i:j + 1]
else:
tokens[j] = tokens[j]._replace(src='(')
tokens[k] = tokens[k]._replace(src=')')
for comma in comma_positions:
tokens[comma] = Token('CODE', ' |')
for paren in reversed(to_delete):
del tokens[paren]
del tokens[i:j]
def _supported_version(state: State) -> bool:
return (
state.in_annotation and (
state.settings.min_version >= (3, 10) or (
not state.settings.keep_runtime_typing and
'annotations' in state.from_imports['__future__']
)
)
)
def _any_arg_is_str(node_slice: ast.expr) -> bool:
return (
isinstance(node_slice, ast.Str) or (
isinstance(node_slice, ast.Tuple) and
any(isinstance(elt, ast.Str) for elt in node_slice.elts)
)
)
@register(ast.Subscript)
def visit_Subscript(
state: State,
node: ast.Subscript,
parent: ast.AST,
) -> Iterable[tuple[Offset, TokenFunc]]:
if not _supported_version(state):
return
# prevent rewriting forward annotations
if (
(sys.version_info >= (3, 9) and _any_arg_is_str(node.slice)) or
(
sys.version_info < (3, 9) and
isinstance(node.slice, ast.Index) and
_any_arg_is_str(node.slice.value)
)
):
return
if is_name_attr(
node.value,
state.from_imports,
('typing',),
('Optional',),
):
yield ast_to_offset(node), _fix_optional
elif is_name_attr(node.value, state.from_imports, ('typing',), ('Union',)):
if sys.version_info >= (3, 9): # pragma: >=3.9 cover
node_slice = node.slice
elif isinstance(node.slice, ast.Index): # pragma: <3.9 cover
node_slice: ast.AST = node.slice.value
else: # pragma: <3.9 cover
node_slice = node.slice # unexpected slice type
if isinstance(node_slice, ast.Slice): # not a valid annotation
return
if isinstance(node_slice, ast.Tuple):
if node_slice.elts:
arg_count = len(node_slice.elts)
else:
return # empty Union
else:
arg_count = 1
func = functools.partial(_fix_union, arg_count=arg_count)
yield ast_to_offset(node), func
|
py | 1a2ec412040c3a5ef81d550375c59efa89b2e989 | import argparse
import logging
import time
import sys
from twilio.rest import Client
import settings
import RPi.GPIO as GPIO
twilio = Client(settings.TWILIO_PUBLIC_KEY, settings.TWILIO_SECRET_KEY)
log = logging.getLogger(__name__)
class SaltLevelMonitor(object):
def __init__(self, force_report=False, unit=settings.METRIC, threshold=0,
tank_depth=settings.DEFAULT_TANK_DEPTH):
self.force_report = force_report
self.unit = unit if unit in settings.VALID_UNITS else settings.METRIC
self.notation = 'inches' if unit == settings.IMPERIAL else 'centimeters'
self.threshold = float(threshold)
self.tank_depth = float(tank_depth)
self.distance = None
self.remaining_salt = None
def check_salt_level(self):
self.distance = self.get_average_distance()
self._convert_units()
self.remaining_salt = self.tank_depth - self.distance
message = self._get_report_message()
log.info('Salt level is: {0:.2f} {1}'.format(self.remaining_salt, self.notation))
if self.remaining_salt < self.threshold or self.force_report:
log.info(message['body'])
self.report_salt_level(message)
def get_average_distance(self):
""" used to get an average read since the sensor isn't 100% accurate """
reads = [self.get_distance() for _ in range(settings.READS_PER_CHECK)]
return sum(reads) / settings.READS_PER_CHECK
@staticmethod
def get_distance():
""" returns distance in centimeters """
# set Trigger to HIGH
GPIO.output(settings.GPIO_TRIGGER, True)
# set Trigger after 0.01ms to LOW
time.sleep(0.00001)
GPIO.output(settings.GPIO_TRIGGER, False)
start_time = time.time()
stop_time = time.time()
# save StartTime
while GPIO.input(settings.GPIO_ECHO) == 0:
start_time = time.time()
# save time of arrival
while GPIO.input(settings.GPIO_ECHO) == 1:
stop_time = time.time()
# time difference between start and arrival
time_elapsed = stop_time - start_time
return (time_elapsed * settings.SPEED_OF_SOUND) / 2
def _convert_units(self):
"""
convert distance to inches if IMPERIAL or convert tank_depth and threshold to centimeters
"""
if self.unit == settings.IMPERIAL:
self.distance = self.distance / settings.CM_TO_INCHES
else:
self.tank_depth = self.tank_depth * settings.CM_TO_INCHES
self.threshold = self.threshold * settings.CM_TO_INCHES
def _get_report_message(self):
message = settings.MESSAGE_TEMPLATE.copy()
message['body'] = settings.SALT_LEVEL_ALERT_MESSAGE.format(
self.remaining_salt, self.notation)
if self.force_report:
message['body'] = '{} (forced report)'.format(message['body'])
return message
@staticmethod
def report_salt_level(message):
twilio.messages.create(**message)
def __enter__(self):
GPIO.setmode(GPIO.BCM)
# set GPIO direction (IN / OUT)
GPIO.setup(settings.GPIO_TRIGGER, GPIO.OUT)
GPIO.setup(settings.GPIO_ECHO, GPIO.IN)
return self
def __exit__(self, *args):
GPIO.cleanup()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Salty Dog')
parser.add_argument('-u',
'--unit',
action='store',
dest='unit',
default='metric',
help='Unit of measure used in reporting')
parser.add_argument('-t',
'--threshold',
action='store',
dest='threshold',
help='Threshold for reporting in inches or cm (must match --unit)')
parser.add_argument('-d',
'--tank-depth',
action='store',
dest='tank_depth',
help='Total depth of your salt tank in inches or cm (must match --unit)')
parser.add_argument('-f',
'--force-report',
action='store_true',
dest='force_report',
default=False,
help='Force Salty Dog to send SMS regardless of salt level measured')
args = parser.parse_args(sys.argv[1:])
parsed_kwargs = {
'force_report': args.force_report,
'unit': args.unit,
'threshold': args.threshold,
'tank_depth': args.tank_depth,
}
with SaltLevelMonitor(**parsed_kwargs) as monitor:
monitor.check_salt_level()
|
py | 1a2ec4fda02beef4f3b97ff5f4100c3805bf6108 | from __future__ import absolute_import
from __future__ import unicode_literals
import time
import socket
import logging
from ._compat import bytes_types, string_types
from ._compat import struct_l
from .version import __version__
try:
import ssl
except ImportError:
ssl = None # pyflakes.ignore
try:
from .snappy_socket import SnappySocket
except ImportError:
SnappySocket = None # pyflakes.ignore
try:
import simplejson as json
except ImportError:
import json # pyflakes.ignore
import tornado.iostream
import tornado.ioloop
try:
from tornado.simple_httpclient import _default_ca_certs as default_ca_certs
except ImportError:
# Tornado < 4
from tornado.simple_httpclient import _DEFAULT_CA_CERTS
def default_ca_certs():
return _DEFAULT_CA_CERTS
from nsq import event, protocol
from .deflate_socket import DeflateSocket
logger = logging.getLogger(__name__)
# states
INIT = 'INIT'
DISCONNECTED = 'DISCONNECTED'
CONNECTING = 'CONNECTING'
CONNECTED = 'CONNECTED'
DEFAULT_USER_AGENT = 'pynsq/%s' % __version__
class AsyncConn(event.EventedMixin):
"""
Low level object representing a TCP connection to nsqd.
When a message on this connection is requeued and the requeue delay
has not been specified, it calculates the delay automatically by an
increasing multiple of ``requeue_delay``.
Generates the following events that can be listened to with
:meth:`nsq.AsyncConn.on`:
* ``connect``
* ``close``
* ``error``
* ``identify``
* ``identify_response``
* ``auth``
* ``auth_response``
* ``heartbeat``
* ``ready``
* ``message``
* ``response``
* ``backoff``
* ``resume``
:param host: the host to connect to
:param port: the post to connect to
:param timeout: the timeout for read/write operations (in seconds)
:param heartbeat_interval: the amount of time (in seconds) to negotiate
with the connected producers to send heartbeats (requires nsqd 0.2.19+)
:param requeue_delay: the base multiple used when calculating requeue delay
(multiplied by # of attempts)
:param tls_v1: enable TLS v1 encryption (requires nsqd 0.2.22+)
:param tls_options: dictionary of options to pass to `ssl.wrap_socket()
<http://docs.python.org/2/library/ssl.html#ssl.wrap_socket>`_ as
``**kwargs``
:param snappy: enable Snappy stream compression (requires nsqd 0.2.23+)
:param deflate: enable deflate stream compression (requires nsqd 0.2.23+)
:param deflate_level: configure the deflate compression level for this
connection (requires nsqd 0.2.23+)
:param output_buffer_size: size of the buffer (in bytes) used by nsqd
for buffering writes to this connection
:param output_buffer_timeout: timeout (in ms) used by nsqd before
flushing buffered writes (set to 0 to disable). **Warning**:
configuring clients with an extremely low (``< 25ms``)
``output_buffer_timeout`` has a significant effect on ``nsqd``
CPU usage (particularly with ``> 50`` clients connected).
:param sample_rate: take only a sample of the messages being sent
to the client. Not setting this or setting it to 0 will ensure
you get all the messages destined for the client.
Sample rate can be greater than 0 or less than 100 and the client
will receive that percentage of the message traffic.
(requires nsqd 0.2.25+)
:param user_agent: a string identifying the agent for this client
in the spirit of HTTP (default: ``<client_library_name>/<version>``)
(requires nsqd 0.2.25+)
:param auth_secret: a byte string passed when using nsq auth
(requires nsqd 1.0+)
:param msg_timeout: the amount of time (in seconds) that nsqd will wait
before considering messages that have been delivered to this
consumer timed out (requires nsqd 0.2.28+)
:param hostname: a string identifying the host where this client runs
(default: ``<hostname>``)
"""
def __init__(
self,
host,
port,
timeout=1.0,
heartbeat_interval=30,
requeue_delay=90,
tls_v1=False,
tls_options=None,
snappy=False,
deflate=False,
deflate_level=6,
user_agent=DEFAULT_USER_AGENT,
output_buffer_size=16 * 1024,
output_buffer_timeout=250,
sample_rate=0,
io_loop=None,
auth_secret=None,
msg_timeout=None,
hostname=None):
assert isinstance(host, string_types)
assert isinstance(port, int)
assert isinstance(timeout, float)
assert isinstance(tls_options, (dict, None.__class__))
assert isinstance(deflate_level, int)
assert isinstance(heartbeat_interval, int) and heartbeat_interval >= 1
assert isinstance(requeue_delay, int) and requeue_delay >= 0
assert isinstance(output_buffer_size, int) and output_buffer_size >= 0
assert isinstance(output_buffer_timeout, int) and output_buffer_timeout >= 0
assert isinstance(sample_rate, int) and sample_rate >= 0 and sample_rate < 100
assert isinstance(auth_secret, bytes_types + (None.__class__,))
assert tls_v1 and ssl or not tls_v1, \
'tls_v1 requires Python 2.6+ or Python 2.5 w/ pip install ssl'
assert msg_timeout is None or (isinstance(msg_timeout, (float, int)) and msg_timeout > 0)
self.state = INIT
self.host = host
self.port = port
self.timeout = timeout
self.last_recv_timestamp = time.time()
self.last_msg_timestamp = time.time()
self.in_flight = 0
self.rdy = 0
self.rdy_timeout = None
# for backwards compatibility when interacting with older nsqd
# (pre 0.2.20), default this to their hard-coded max
self.max_rdy_count = 2500
self.tls_v1 = tls_v1
self.tls_options = tls_options
self.snappy = snappy
self.deflate = deflate
self.deflate_level = deflate_level
self.hostname = hostname
if self.hostname is None:
self.hostname = socket.gethostname()
self.short_hostname = self.hostname.split('.')[0]
self.heartbeat_interval = heartbeat_interval * 1000
self.msg_timeout = int(msg_timeout * 1000) if msg_timeout else None
self.requeue_delay = requeue_delay
self.io_loop = io_loop
if not self.io_loop:
self.io_loop = tornado.ioloop.IOLoop.instance()
self.output_buffer_size = output_buffer_size
self.output_buffer_timeout = output_buffer_timeout
self.sample_rate = sample_rate
self.user_agent = user_agent
self._authentication_required = False # tracking server auth state
self.auth_secret = auth_secret
self.socket = None
self.stream = None
self._features_to_enable = []
self.last_rdy = 0
self.rdy = 0
self.callback_queue = []
super(AsyncConn, self).__init__()
@property
def id(self):
return str(self)
def __str__(self):
return self.host + ':' + str(self.port)
def connected(self):
return self.state == CONNECTED
def connecting(self):
return self.state == CONNECTING
def closed(self):
return self.state in (INIT, DISCONNECTED)
def connect(self):
if not self.closed():
return
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.settimeout(self.timeout)
self.socket.setblocking(0)
self.stream = tornado.iostream.IOStream(self.socket, io_loop=self.io_loop)
self.stream.set_close_callback(self._socket_close)
self.stream.set_nodelay(True)
self.state = CONNECTING
self.on(event.CONNECT, self._on_connect)
self.on(event.DATA, self._on_data)
self.stream.connect((self.host, self.port), self._connect_callback)
def _connect_callback(self):
self.state = CONNECTED
self.stream.write(protocol.MAGIC_V2)
self._start_read()
self.trigger(event.CONNECT, conn=self)
def _read_bytes(self, size, callback):
try:
self.stream.read_bytes(size, callback)
except IOError:
self.close()
self.trigger(
event.ERROR,
conn=self,
error=protocol.ConnectionClosedError('Stream is closed'),
)
def _start_read(self):
self._read_bytes(4, self._read_size)
def _socket_close(self):
self.state = DISCONNECTED
self.trigger(event.CLOSE, conn=self)
def close(self):
self.stream.close()
def _read_size(self, data):
try:
size = struct_l.unpack(data)[0]
except Exception:
self.close()
self.trigger(
event.ERROR,
conn=self,
error=protocol.IntegrityError('failed to unpack size'),
)
return
self._read_bytes(size, self._read_body)
def _read_body(self, data):
try:
self.trigger(event.DATA, conn=self, data=data)
except Exception:
logger.exception('uncaught exception in data event')
self._start_read()
def send(self, data):
self.stream.write(data)
def upgrade_to_tls(self, options=None):
assert ssl, 'tls_v1 requires Python 2.6+ or Python 2.5 w/ pip install ssl'
# in order to upgrade to TLS we need to *replace* the IOStream...
#
# first remove the event handler for the currently open socket
# so that when we add the socket to the new SSLIOStream below,
# it can re-add the appropriate event handlers.
self.io_loop.remove_handler(self.socket.fileno())
opts = {
'cert_reqs': ssl.CERT_REQUIRED,
'ca_certs': default_ca_certs()
}
opts.update(options or {})
self.socket = ssl.wrap_socket(self.socket, ssl_version=ssl.PROTOCOL_TLSv1,
do_handshake_on_connect=False, **opts)
self.stream = tornado.iostream.SSLIOStream(self.socket, io_loop=self.io_loop)
self.stream.set_close_callback(self._socket_close)
# now that the IOStream has been swapped we can kickstart
# the SSL handshake
self.stream._do_ssl_handshake()
def upgrade_to_snappy(self):
assert SnappySocket, 'snappy requires the python-snappy package'
# in order to upgrade to Snappy we need to use whatever IOStream
# is currently in place (normal or SSL)...
#
# first read any compressed bytes the existing IOStream might have
# already buffered and use that to bootstrap the SnappySocket, then
# monkey patch the existing IOStream by replacing its socket
# with a wrapper that will automagically handle compression.
existing_data = self.stream._consume(self.stream._read_buffer_size)
self.socket = SnappySocket(self.socket)
self.socket.bootstrap(existing_data)
self.stream.socket = self.socket
def upgrade_to_deflate(self):
# in order to upgrade to DEFLATE we need to use whatever IOStream
# is currently in place (normal or SSL)...
#
# first read any compressed bytes the existing IOStream might have
# already buffered and use that to bootstrap the DefalteSocket, then
# monkey patch the existing IOStream by replacing its socket
# with a wrapper that will automagically handle compression.
existing_data = self.stream._consume(self.stream._read_buffer_size)
self.socket = DeflateSocket(self.socket, self.deflate_level)
self.socket.bootstrap(existing_data)
self.stream.socket = self.socket
def send_rdy(self, value):
try:
self.send(protocol.ready(value))
except Exception as e:
self.close()
self.trigger(
event.ERROR,
conn=self,
error=protocol.SendError('failed to send RDY %d' % value, e),
)
return False
self.last_rdy = value
self.rdy = value
return True
def _on_connect(self, **kwargs):
identify_data = {
'short_id': self.short_hostname, # TODO remove when deprecating pre 1.0 support
'long_id': self.hostname, # TODO remove when deprecating pre 1.0 support
'client_id': self.short_hostname,
'hostname': self.hostname,
'heartbeat_interval': self.heartbeat_interval,
'feature_negotiation': True,
'tls_v1': self.tls_v1,
'snappy': self.snappy,
'deflate': self.deflate,
'deflate_level': self.deflate_level,
'output_buffer_timeout': self.output_buffer_timeout,
'output_buffer_size': self.output_buffer_size,
'sample_rate': self.sample_rate,
'user_agent': self.user_agent
}
if self.msg_timeout:
identify_data['msg_timeout'] = self.msg_timeout
self.trigger(event.IDENTIFY, conn=self, data=identify_data)
self.on(event.RESPONSE, self._on_identify_response)
try:
self.send(protocol.identify(identify_data))
except Exception as e:
self.close()
self.trigger(
event.ERROR,
conn=self,
error=protocol.SendError('failed to bootstrap connection', e),
)
def _on_identify_response(self, data, **kwargs):
self.off(event.RESPONSE, self._on_identify_response)
if data == b'OK':
logger.warning('nsqd version does not support feature netgotiation')
return self.trigger(event.READY, conn=self)
try:
data = json.loads(data.decode('utf-8'))
except ValueError:
self.close()
self.trigger(
event.ERROR,
conn=self,
error=protocol.IntegrityError(
'failed to parse IDENTIFY response JSON from nsqd - %r' %
data
),
)
return
self.trigger(event.IDENTIFY_RESPONSE, conn=self, data=data)
if self.tls_v1 and data.get('tls_v1'):
self._features_to_enable.append('tls_v1')
if self.snappy and data.get('snappy'):
self._features_to_enable.append('snappy')
if self.deflate and data.get('deflate'):
self._features_to_enable.append('deflate')
if data.get('auth_required'):
self._authentication_required = True
if data.get('max_rdy_count'):
self.max_rdy_count = data.get('max_rdy_count')
else:
# for backwards compatibility when interacting with older nsqd
# (pre 0.2.20), default this to their hard-coded max
logger.warn('setting max_rdy_count to default value of 2500')
self.max_rdy_count = 2500
self.on(event.RESPONSE, self._on_response_continue)
self._on_response_continue(conn=self, data=None)
def _on_response_continue(self, data, **kwargs):
if self._features_to_enable:
feature = self._features_to_enable.pop(0)
if feature == 'tls_v1':
self.upgrade_to_tls(self.tls_options)
elif feature == 'snappy':
self.upgrade_to_snappy()
elif feature == 'deflate':
self.upgrade_to_deflate()
# the server will 'OK' after these conneciton upgrades triggering another response
return
self.off(event.RESPONSE, self._on_response_continue)
if self.auth_secret and self._authentication_required:
self.on(event.RESPONSE, self._on_auth_response)
self.trigger(event.AUTH, conn=self, data=self.auth_secret)
try:
self.send(protocol.auth(self.auth_secret))
except Exception as e:
self.close()
self.trigger(
event.ERROR,
conn=self,
error=protocol.SendError('Error sending AUTH', e),
)
return
self.trigger(event.READY, conn=self)
def _on_auth_response(self, data, **kwargs):
try:
data = json.loads(data.decode('utf-8'))
except ValueError:
self.close()
self.trigger(
event.ERROR,
conn=self,
error=protocol.IntegrityError(
'failed to parse AUTH response JSON from nsqd - %r' % data
),
)
return
self.off(event.RESPONSE, self._on_auth_response)
self.trigger(event.AUTH_RESPONSE, conn=self, data=data)
return self.trigger(event.READY, conn=self)
def _on_data(self, data, **kwargs):
self.last_recv_timestamp = time.time()
frame, data = protocol.unpack_response(data)
if frame == protocol.FRAME_TYPE_MESSAGE:
self.last_msg_timestamp = time.time()
self.in_flight += 1
message = protocol.decode_message(data)
message.on(event.FINISH, self._on_message_finish)
message.on(event.REQUEUE, self._on_message_requeue)
message.on(event.TOUCH, self._on_message_touch)
self.trigger(event.MESSAGE, conn=self, message=message)
elif frame == protocol.FRAME_TYPE_RESPONSE and data == b'_heartbeat_':
self.send(protocol.nop())
self.trigger(event.HEARTBEAT, conn=self)
elif frame == protocol.FRAME_TYPE_RESPONSE:
self.trigger(event.RESPONSE, conn=self, data=data)
elif frame == protocol.FRAME_TYPE_ERROR:
self.trigger(event.ERROR, conn=self, error=protocol.Error(data))
def _on_message_requeue(self, message, backoff=True, time_ms=-1, **kwargs):
if backoff:
self.trigger(event.BACKOFF, conn=self)
else:
self.trigger(event.CONTINUE, conn=self)
self.in_flight -= 1
try:
time_ms = self.requeue_delay * message.attempts * 1000 if time_ms < 0 else time_ms
self.send(protocol.requeue(message.id, time_ms))
except Exception as e:
self.close()
self.trigger(event.ERROR, conn=self, error=protocol.SendError(
'failed to send REQ %s @ %d' % (message.id, time_ms), e))
def _on_message_finish(self, message, **kwargs):
self.trigger(event.RESUME, conn=self)
self.in_flight -= 1
try:
self.send(protocol.finish(message.id))
except Exception as e:
self.close()
self.trigger(
event.ERROR,
conn=self,
error=protocol.SendError('failed to send FIN %s' % message.id, e),
)
def _on_message_touch(self, message, **kwargs):
try:
self.send(protocol.touch(message.id))
except Exception as e:
self.close()
self.trigger(
event.ERROR,
conn=self,
error=protocol.SendError('failed to send TOUCH %s' % message.id, e),
)
|
py | 1a2ec646960eff1682cce5b130079d8d55d99466 | """
An implementation of the basestring type for Python 3
Example use:
>>> s = b'abc'
>>> assert isinstance(s, basestring)
>>> from past.types import str as oldstr
>>> s2 = oldstr(b'abc')
>>> assert isinstance(s2, basestring)
"""
import sys
from past.utils import with_metaclass, PY2
if PY2:
str = unicode
ver = sys.version_info[:2]
class BaseBaseString(type):
def __instancecheck__(cls, instance):
return isinstance(instance, (bytes, str))
def __subclasshook__(cls, thing):
# TODO: What should go here?
raise NotImplemented
class basestring(with_metaclass(BaseBaseString)):
"""
A minimal backport of the Python 2 basestring type to Py3
"""
__all__ = ['basestring']
|
py | 1a2ec68c03005c038c6cf0a9646a77ef913b815a | import numpy as np
from openmdao.api import ExplicitComponent
from pycycle.constants import P_REF, R_UNIVERSAL_ENG, R_UNIVERSAL_SI, MIN_VALID_CONCENTRATION
class PropsCalcs(ExplicitComponent):
"""computes, S, H, Cp, Cv, gamma, given a converged equilibirum mixture"""
def initialize(self):
self.options.declare('thermo', desc='thermodynamic data object', recordable=False)
def setup(self):
thermo = self.options['thermo']
self.add_input('T', val=284., units="degK", desc="Temperature")
self.add_input('P', val=1., units='bar', desc="Pressure")
self.add_input('n', val=np.ones(thermo.num_prod),
desc="molar concentration of the mixtures, last element is the total molar concentration")
self.add_input('n_moles', val=1., desc="1/molar_mass for gaseous mixture")
ne1 = thermo.num_element + 1
self.add_input('result_T', val=np.ones(ne1),
desc="result of the linear solve for T", shape=ne1)
self.add_input('result_P', val=np.ones(ne1),
desc="result of the linear solve for T", shape=ne1)
self.add_output('h', val=1., units="cal/g", desc="enthalpy")
self.add_output('S', val=1., units="cal/(g*degK)", desc="entropy")
self.add_output('gamma', val=1.4, lower=1.0, upper=2.0, desc="ratio of specific heats")
self.add_output('Cp', val=1., units="cal/(g*degK)", desc="Specific heat at constant pressure")
self.add_output('Cv', val=1., units="cal/(g*degK)", desc="Specific heat at constant volume")
self.add_output('rho', val=0.0004, units="g/cm**3", desc="density")
self.add_output('R', val=1., units='(N*m)/(kg*degK)', desc='Specific gas constant')
# self.deriv_options['check_type'] = "cs"
# partial derivs setup
self.declare_partials('h', ['n', 'T'])
self.declare_partials('S', ['n', 'T', 'P'])
self.declare_partials('S', 'n_moles')
self.declare_partials('Cp', ['n', 'T', 'result_T'])
self.declare_partials('rho', ['T', 'P', 'n_moles'])
self.declare_partials('gamma', ['n', 'n_moles', 'T', 'result_T', 'result_P'])
self.declare_partials('Cv', ['n', 'n_moles', 'T', 'result_T', 'result_P'])
self.declare_partials('R', 'n_moles', val=R_UNIVERSAL_SI)
def compute(self, inputs, outputs):
thermo = self.options['thermo']
num_prod = thermo.num_prod
num_element = thermo.num_element
T = inputs['T']
P = inputs['P']
result_T = inputs['result_T']
nj = inputs['n'][:num_prod]
# nj[nj<0] = 1e-10 # ensure all concentrations stay non-zero
n_moles = inputs['n_moles']
self.dlnVqdlnP = dlnVqdlnP = -1 + inputs['result_P'][num_element]
self.dlnVqdlnT = dlnVqdlnT = 1 - result_T[num_element]
self.Cp0_T = Cp0_T = thermo.Cp0(T)
Cpf = np.sum(nj*Cp0_T)
self.H0_T = H0_T = thermo.H0(T)
self.S0_T = S0_T = thermo.S0(T)
self.nj_H0 = nj_H0 = nj*H0_T
# Cpe = 0
# for i in range(0, num_element):
# for j in range(0, num_prod):
# Cpe -= thermo.aij[i][j]*nj[j]*H0_T[j]*self.result_T[i]
# vectorization of this for loop for speed
Cpe = -np.sum(np.sum(thermo.aij*nj_H0, axis=1)*result_T[:num_element])
Cpe += np.sum(nj_H0*H0_T) # nj*H0_T**2
Cpe -= np.sum(nj_H0)*result_T[num_element]
outputs['h'] = np.sum(nj_H0)*R_UNIVERSAL_ENG*T
try:
val = (S0_T+np.log(n_moles/nj/(P/P_REF)))
except FloatingPointError:
P = 1e-5
val = (S0_T+np.log(n_moles/nj/(P/P_REF)))
outputs['S'] = R_UNIVERSAL_ENG * np.sum(nj*val)
outputs['Cp'] = Cp = (Cpe+Cpf)*R_UNIVERSAL_ENG
outputs['Cv'] = Cv = Cp + n_moles*R_UNIVERSAL_ENG*dlnVqdlnT**2/dlnVqdlnP
outputs['gamma'] = -1*Cp/Cv/dlnVqdlnP
outputs['rho'] = P/(n_moles*R_UNIVERSAL_SI*T)*100 # 1 Bar is 100 Kpa
outputs['R'] = R_UNIVERSAL_SI*n_moles #(m**3 * Pa)/(mol*degK)
def compute_partials(self, inputs, J):
thermo = self.options['thermo']
num_prod = thermo.num_prod
num_element = thermo.num_element
T = inputs['T']
P = inputs['P']
nj = inputs['n']
n_moles = inputs['n_moles']
result_T = inputs['result_T']
result_T_last = result_T[num_element]
result_T_rest = result_T[:num_element]
dlnVqdlnP = -1 + inputs['result_P'][num_element]
dlnVqdlnT = 1 - result_T_last
Cp0_T = thermo.Cp0(T)
Cpf = np.sum(nj * Cp0_T)
H0_T = thermo.H0(T)
S0_T = thermo.S0(T)
nj_H0 = nj * H0_T
# Cpe = 0
# for i in range(0, num_element):
# for j in range(0, num_prod):
# Cpe -= thermo.aij[i][j]*nj[j]*H0_T[j]*self.result_T[i]
# vectorization of this for loop for speed
Cpe = -np.sum(np.sum(thermo.aij * nj_H0, axis=1) * result_T_rest)
Cpe += np.sum(nj_H0 * H0_T) # nj*H0_T**2
Cpe -= np.sum(nj_H0) * result_T_last
Cp = (Cpe + Cpf) * R_UNIVERSAL_ENG
Cv = Cp + n_moles * R_UNIVERSAL_ENG * dlnVqdlnT ** 2 / dlnVqdlnP
dH0_dT = thermo.H0_applyJ(T, 1.)
dS0_dT = thermo.S0_applyJ(T, 1.)
dCp0_dT = thermo.Cp0_applyJ(T, 1.)
sum_nj_R = n_moles*R_UNIVERSAL_SI
drho_dT = P/(sum_nj_R*T**2)*100
drho_dnmoles = -P/(n_moles**2*R_UNIVERSAL_SI*T)*100
dCpe_dT = 2*np.sum(nj*H0_T*dH0_dT)
# for i in range(num_element):
# self.dCpe_dT -= np.sum(aij[i]*nj*self.dH0_dT)*self.result_T[i]
dCpe_dT -= np.sum(np.sum(thermo.aij*nj*dH0_dT, axis=1)*result_T_rest)
dCpe_dT -= np.sum(nj*dH0_dT)*result_T_last
dCpf_dT = np.sum(nj*dCp0_dT)
J['h', 'T'] = R_UNIVERSAL_ENG*(np.sum(nj*dH0_dT)*T + np.sum(nj*H0_T))
J['h', 'n'] = R_UNIVERSAL_ENG*T*H0_T
J['S', 'n'] = R_UNIVERSAL_ENG*(S0_T + np.log(n_moles) - np.log(P/P_REF) - np.log(nj) - 1)
# zero out any derivs w.r.t trace species
_trace = np.where(nj <= MIN_VALID_CONCENTRATION+1e-20)
J['S', 'n'][0, _trace] = 0
J['S', 'T'] = R_UNIVERSAL_ENG*np.sum(nj*dS0_dT)
J['S', 'P'] = -R_UNIVERSAL_ENG*np.sum(nj/P)
J['S', 'n_moles'] = R_UNIVERSAL_ENG*np.sum(nj)/n_moles
J['rho', 'T'] = -P/(sum_nj_R*T**2)*100
J['rho', 'n_moles'] = -P/(n_moles**2*R_UNIVERSAL_SI*T)*100
J['rho', 'P'] = 1/(sum_nj_R*T)*100
dCp_dnj = R_UNIVERSAL_ENG*(Cp0_T + H0_T**2)
for j in range(num_prod):
for i in range(num_element):
dCp_dnj[j] -= R_UNIVERSAL_ENG*thermo.aij[i][j]*H0_T[j]*result_T[i]
dCp_dnj -= R_UNIVERSAL_ENG * H0_T * result_T_last
J['Cp', 'n'] = dCp_dnj
dCp_dresultT = np.zeros(num_element+1, dtype=inputs._data.dtype)
# for i in range(num_element):
# self.dCp_dresultT[i] = -R_UNIVERSAL_ENG*np.sum(aij[i]*nj_H0)
dCp_dresultT[:num_element] = -R_UNIVERSAL_ENG*np.sum(thermo.aij*nj_H0, axis=1)
dCp_dresultT[num_element] = - R_UNIVERSAL_ENG*np.sum(nj_H0)
J['Cp', 'result_T'] = dCp_dresultT
dCp_dT = (dCpe_dT + dCpf_dT)*R_UNIVERSAL_ENG
J['Cp', 'T'] = dCp_dT
J['Cv', 'n'] = dCp_dnj
dCv_dnmoles = R_UNIVERSAL_ENG*dlnVqdlnT**2/dlnVqdlnP
J['Cv', 'n_moles'] = dCv_dnmoles
J['Cv', 'T'] = dCp_dT
dCv_dresultP = np.zeros((1, num_element+1), dtype=inputs._data.dtype)
dCv_dresultP[0, -1] = -R_UNIVERSAL_ENG*n_moles*(dlnVqdlnT/dlnVqdlnP)**2
J['Cv', 'result_P'] = dCv_dresultP
dCv_dresultT = dCp_dresultT.copy()
dCv_dresultT[-1] -= n_moles*R_UNIVERSAL_ENG/dlnVqdlnP*(2*dlnVqdlnT)
dCv_dresultT_last = dCv_dresultT[-1]
J['Cv', 'result_T'] = dCv_dresultT
J['gamma', 'n'] = dCp_dnj*(Cp/Cv-1)/(dlnVqdlnP*Cv)
J['gamma', 'n_moles'] = Cp/dlnVqdlnP/Cv**2*dCv_dnmoles
J['gamma', 'T'] = dCp_dT/dlnVqdlnP/Cv*(Cp/Cv-1)
dgamma_dresultT = np.zeros((1, num_element+1), dtype=inputs._data.dtype)
dgamma_dresultT[0, :num_element] = 1/Cv/dlnVqdlnP*dCp_dresultT[:num_element]*(Cp/Cv-1)
dgamma_dresultT[0, -1] = (-dCp_dresultT[-1]/Cv+Cp/Cv**2*dCv_dresultT_last)/dlnVqdlnP
J['gamma', 'result_T'] = dgamma_dresultT
gamma_dresultP = np.zeros((1, num_element+1), dtype=inputs._data.dtype)
gamma_dresultP[0, num_element] = Cp/Cv/dlnVqdlnP*(dCv_dresultP[0, -1]/Cv + 1/dlnVqdlnP)
J['gamma', 'result_P'] = gamma_dresultP
if __name__ == "__main__":
from openmdao.api import Problem, Group, IndepVarComp
from pycycle.cea import species_data
thermo = species_data.Properties(species_data.co2_co_o2)
p = Problem()
model = p.model = Group()
indeps = model.add_subsystem('indeps', IndepVarComp(), promotes=['*'])
indeps.add_output('T', 2761.56784655, units='degK')
indeps.add_output('P', 1.034210, units='bar')
indeps.add_output('n', val=np.array([2.272e-02, 1.000e-10, 1.136e-02]))
indeps.add_output('n_moles', val=0.0340831628675)
indeps.add_output('result_T', val=np.array([-3.02990116, 1.95459777, -0.05024694]))
indeps.add_output('result_P', val=np.array([0.53047724, 0.48627081, -0.00437025]))
model.add_subsystem('calcs', PropsCalcs(thermo=thermo), promotes=['*'])
p.setup()
p.run_model()
print("outputs")
print('h', p['h'])
print('S', p['S'])
print('gamma', p['gamma'])
print('Cp', p['Cp'])
print('Cv', p['Cv'])
print('rho', p['rho'])
print()
print()
print('############################################')
p.model.run_linearize()
jac = p.model.get_subsystem('calcs').jacobian._subjacs
for pair in jac:
print(pair)
print(jac[pair])
print
|
py | 1a2ec6b2a0c25e3d2a554547acbe2827f6ab03ee | import numpy as np
from PIL import Image
from tqdm import tqdm
import torch
from torch import nn, optim
from torch.autograd import Variable, grad
from torchvision import utils
from model import Generator, Discriminator
from datetime import datetime
import random
import copy
import os
import config
import utils
import data
import evaluate
import torch.backends.cudnn as cudnn
cudnn.benchmark = True
from torch.nn import functional as F
args = config.get_config()
writer = None
def batch_size(reso):
if args.gpu_count == 1:
save_memory = False
if not save_memory:
batch_table = {4:128, 8:128, 16:128, 32:64, 64:32, 128:16, 256:8, 512:4, 1024:1}
else:
batch_table = {4:128, 8:128, 16:128, 32:32, 64:16, 128:4, 256:2, 512:2, 1024:1}
elif args.gpu_count == 2:
batch_table = {4:256, 8:256, 16:256, 32:128, 64:64, 128:32, 256:16, 512:8, 1024:2}
elif args.gpu_count == 4:
batch_table = {4:512, 8:256, 16:128, 32:64, 64:32, 128:32, 256:32, 512:16, 1024:4}
elif args.gpu_count == 8:
batch_table = {4:512, 8:512, 16:512, 32:256, 64:256, 128:128, 256:64, 512:32, 1024:8}
else:
assert(False)
return batch_table[reso]
def batch_size_by_phase(phase):
return batch_size(4 * 2 ** phase)
class Session:
def __init__(self):
# Note: 4 requirements for sampling from pre-existing models:
# 1) Ensure you save and load both multi-gpu versions (DataParallel) or both not.
# 2) Ensure you set the same phase value as the pre-existing model and that your local and global alpha=1.0 are set
# 3) Sample from the g_running, not from the latest generator
# 4) You may need to warm up the g_running by running evaluate.reconstruction_dryrun() first
self.alpha = -1
self.sample_i = min(args.start_iteration, 0)
self.phase = args.start_phase
self.generator = nn.DataParallel( Generator(args.nz+1, args.n_label).cuda() )
self.g_running = nn.DataParallel( Generator(args.nz+1, args.n_label).cuda() )
self.encoder = nn.DataParallel( Discriminator(nz = args.nz+1, n_label = args.n_label, binary_predictor = args.train_mode == config.MODE_GAN).cuda() )
print("Using ", torch.cuda.device_count(), " GPUs!")
self.reset_opt()
print('Session created.')
def reset_opt(self):
self.optimizerG = optim.Adam(self.generator.parameters(), args.lr, betas=(0.0, 0.99))
self.optimizerD = optim.Adam(self.encoder.parameters(), args.lr, betas=(0.0, 0.99)) # includes all the encoder parameters...
def save_all(self, path):
torch.save({'G_state_dict': self.generator.state_dict(),
'D_state_dict': self.encoder.state_dict(),
'G_running_state_dict': self.g_running.state_dict(),
'optimizerD': self.optimizerD.state_dict(),
'optimizerG': self.optimizerG.state_dict(),
'iteration': self.sample_i,
'phase': self.phase,
'alpha': self.alpha},
path)
def load(self, path):
checkpoint = torch.load(path)
self.sample_i = int(checkpoint['iteration'])
self.generator.load_state_dict(checkpoint['G_state_dict'])
self.g_running.load_state_dict(checkpoint['G_running_state_dict'])
self.encoder.load_state_dict(checkpoint['D_state_dict'])
if args.reset_optimizers <= 0:
self.optimizerD.load_state_dict(checkpoint['optimizerD'])
self.optimizerG.load_state_dict(checkpoint['optimizerG'])
print("Reloaded old optimizers")
else:
print("Despite loading the state, we reset the optimizers.")
self.alpha = checkpoint['alpha']
self.phase = int(checkpoint['phase'])
if args.start_phase > 0: #If the start phase has been manually set, try to actually use it (e.g. when have trained 64x64 for extra rounds and then turning the model over to 128x128)
self.phase = min(args.start_phase, self.phase)
print("Use start phase: {}".format(self.phase))
if self.phase > args.max_phase:
print('Warning! Loaded model claimed phase {} but max_phase={}'.format(self.phase, args.max_phase))
self.phase = args.max_phase
def create(self):
if args.start_iteration <= 0:
args.start_iteration = 1
if args.no_progression:
self.sample_i = args.start_iteration = int( (args.max_phase + 0.5) * args.images_per_stage ) # Start after the fade-in stage of the last iteration
args.force_alpha = 1.0
print("Progressive growth disabled. Setting start step = {} and alpha = {}".format(args.start_iteration, args.force_alpha))
else:
reload_from = '{}/checkpoint/{}_state'.format(args.save_dir, str(args.start_iteration).zfill(6)) #e.g. '604000' #'600000' #latest'
print(reload_from)
if os.path.exists(reload_from):
self.load(reload_from)
print("Loaded {}".format(reload_from))
print("Iteration asked {} and got {}".format(args.start_iteration, self.sample_i))
if args.testonly:
self.generator = copy.deepcopy(self.g_running)
else:
assert(not args.testonly)
self.sample_i = args.start_iteration
print('Start from iteration {}'.format(self.sample_i))
self.g_running.train(False)
if args.force_alpha >= 0.0:
self.alpha = args.force_alpha
accumulate(self.g_running, self.generator, 0)
def setup():
utils.make_dirs()
if not args.testonly:
config.log_args(args)
if args.use_TB:
from dateutil import tz
from tensorboardX import SummaryWriter
dt = datetime.now(tz.gettz('Europe/Helsinki')).strftime(r"%y%m%d_%H%M")
global writer
writer = SummaryWriter("{}/{}/{}".format(args.summary_dir, args.save_dir, dt))
random.seed(args.manual_seed)
torch.manual_seed(args.manual_seed)
torch.cuda.manual_seed_all(args.manual_seed)
def accumulate(model1, model2, decay=0.999):
par1 = dict(model1.named_parameters())
par2 = dict(model2.named_parameters())
for k in par1.keys():
par1[k].data.mul_(decay).add_(1 - decay, par2[k].data)
def get_grad_penalty(discriminator, real_image, fake_image, step, alpha):
""" Used in WGAN-GP version only. """
eps = torch.rand(batch_size_by_phase(step), 1, 1, 1).cuda()
if eps.size(0) != real_image.size(0) or eps.size(0) != fake_image.size(0):
# If end-of-batch situation, we restrict other vectors to matcht the number of training images available.
eps = eps[:real_image.size(0)]
fake_image = fake_image[:real_image.size(0)]
x_hat = eps * real_image.data + (1 - eps) * fake_image.data
x_hat = Variable(x_hat, requires_grad=True)
if args.train_mode == config.MODE_GAN: # Regular GAN mode
hat_predict, _ = discriminator(x_hat, step, alpha, args.use_ALQ)
grad_x_hat = grad(
outputs=hat_predict.sum(), inputs=x_hat, create_graph=True)[0]
else:
hat_z = discriminator(x_hat, step, alpha, args.use_ALQ)
# KL_fake: \Delta( e(g(Z)) , Z ) -> max_e
KL_maximizer = KLN01Loss(direction=args.KL, minimize=False)
KL_fake = KL_maximizer(hat_z) * args.fake_D_KL_scale
grad_x_hat = grad(
outputs=KL_fake.sum(), inputs=x_hat, create_graph=True)[0]
# Push the gradients of the interpolated samples towards 1
grad_penalty = ((grad_x_hat.view(grad_x_hat.size(0), -1)
.norm(2, dim=1) - 1)**2).mean()
grad_penalty = 10 * grad_penalty
return grad_penalty
def D_prediction_of_G_output(generator, encoder, step, alpha):
# To use labels, enable here and elsewhere:
#label = Variable(torch.ones(batch_size_by_phase(step), args.n_label)).cuda()
# label = Variable(
# torch.multinomial(
# torch.ones(args.n_label), args.batch_size, replacement=True)).cuda()
myz = Variable(torch.randn(batch_size_by_phase(step), args.nz)).cuda(non_blocking=(args.gpu_count>1))
myz = utils.normalize(myz)
myz, label = utils.split_labels_out_of_latent(myz)
fake_image = generator(myz, label, step, alpha)
fake_predict, _ = encoder(fake_image, step, alpha, args.use_ALQ)
loss = fake_predict.mean()
return loss, fake_image
class KLN01Loss(torch.nn.Module): #Adapted from https://github.com/DmitryUlyanov/AGE
def __init__(self, direction, minimize):
super(KLN01Loss, self).__init__()
self.minimize = minimize
assert direction in ['pq', 'qp'], 'direction?'
self.direction = direction
def forward(self, samples):
assert samples.nelement() == samples.size(1) * samples.size(0), '?'
samples = samples.view(samples.size(0), -1)
self.samples_var = utils.var(samples)
self.samples_mean = samples.mean(0)
samples_mean = self.samples_mean
samples_var = self.samples_var
if self.direction == 'pq':
t1 = (1 + samples_mean.pow(2)) / (2 * samples_var.pow(2))
t2 = samples_var.log()
KL = (t1 + t2 - 0.5).mean()
else:
# In the AGE implementation, there is samples_var^2 instead of samples_var^1
t1 = (samples_var + samples_mean.pow(2)) / 2
# In the AGE implementation, this did not have the 0.5 scaling factor:
t2 = -0.5*samples_var.log()
KL = (t1 + t2 - 0.5).mean()
if not self.minimize:
KL *= -1
return KL
def train(generator, encoder, g_running, train_data_loader, test_data_loader, session, total_steps, train_mode):
pbar = tqdm(initial=session.sample_i, total = total_steps)
benchmarking = False
match_x = args.match_x
generatedImagePool = None
refresh_dataset = True
refresh_imagePool = True
# After the Loading stage, we cycle through successive Fade-in and Stabilization stages
batch_count = 0
reset_optimizers_on_phase_start = False
# TODO Unhack this (only affects the episode count statistics anyway):
if args.data != 'celebaHQ':
epoch_len = len(train_data_loader(1,4).dataset)
else:
epoch_len = train_data_loader._len['data4x4']
if args.step_offset != 0:
if args.step_offset == -1:
args.step_offset = session.sample_i
print("Step offset is {}".format(args.step_offset))
session.phase += args.phase_offset
session.alpha = 0.0
while session.sample_i < total_steps:
####################### Phase Maintenance #######################
steps_in_previous_phases = max(session.phase * args.images_per_stage, args.step_offset)
sample_i_current_stage = session.sample_i - steps_in_previous_phases
# If we can move to the next phase
if sample_i_current_stage >= args.images_per_stage:
if session.phase < args.max_phase: # If any phases left
iteration_levels = int(sample_i_current_stage / args.images_per_stage)
session.phase += iteration_levels
sample_i_current_stage -= iteration_levels * args.images_per_stage
match_x = args.match_x # Reset to non-matching phase
print("iteration B alpha={} phase {} will be reduced to 1 and [max]".format(sample_i_current_stage, session.phase))
refresh_dataset = True
refresh_imagePool = True # Reset the pool to avoid images of 2 different resolutions in the pool
if reset_optimizers_on_phase_start:
utils.requires_grad(generator)
utils.requires_grad(encoder)
generator.zero_grad()
encoder.zero_grad()
session.reset_opt()
print("Optimizers have been reset.")
reso = 4 * 2 ** session.phase
# If we can switch from fade-training to stable-training
if sample_i_current_stage >= args.images_per_stage/2:
if session.alpha < 1.0:
refresh_dataset = True # refresh dataset generator since no longer have to fade
match_x = args.match_x * args.matching_phase_x
else:
match_x = args.match_x
session.alpha = min(1, sample_i_current_stage * 2.0 / args.images_per_stage) # For 100k, it was 0.00002 = 2.0 / args.images_per_stage
if refresh_dataset:
train_dataset = data.Utils.sample_data2(train_data_loader, batch_size(reso), reso, session)
refresh_dataset = False
print("Refreshed dataset. Alpha={} and iteration={}".format(session.alpha, sample_i_current_stage))
if refresh_imagePool:
imagePoolSize = 200 if reso < 256 else 100
generatedImagePool = utils.ImagePool(imagePoolSize) #Reset the pool to avoid images of 2 different resolutions in the pool
refresh_imagePool = False
print('Image pool created with size {} because reso is {}'.format(imagePoolSize, reso))
####################### Training init #######################
z = Variable( torch.FloatTensor(batch_size(reso), args.nz, 1, 1) ).cuda(non_blocking=(args.gpu_count>1))
KL_minimizer = KLN01Loss(direction=args.KL, minimize=True)
KL_maximizer = KLN01Loss(direction=args.KL, minimize=False)
stats = {}
one = torch.FloatTensor([1]).cuda(non_blocking=(args.gpu_count>1))
try:
real_image, _ = next(train_dataset)
except (OSError, StopIteration):
train_dataset = data.Utils.sample_data2(train_data_loader, batch_size(reso), reso, session)
real_image, _ = next(train_dataset)
####################### DISCRIMINATOR / ENCODER ###########################
utils.switch_grad_updates_to_first_of(encoder, generator)
encoder.zero_grad()
x = Variable(real_image).cuda(non_blocking=(args.gpu_count>1))
kls = ""
if train_mode == config.MODE_GAN:
# Discriminator for real samples
real_predict, _ = encoder(x, session.phase, session.alpha, args.use_ALQ)
real_predict = real_predict.mean() \
- 0.001 * (real_predict ** 2).mean()
real_predict.backward(-one) # Towards 1
# (1) Generator => D. Identical to (2) see below
fake_predict, fake_image = D_prediction_of_G_output(generator, encoder, session.phase, session.alpha)
fake_predict.backward(one)
# Grad penalty
grad_penalty = get_grad_penalty(encoder, x, fake_image, session.phase, session.alpha)
grad_penalty.backward()
elif train_mode == config.MODE_CYCLIC:
e_losses = []
# e(X)
real_z = encoder(x, session.phase, session.alpha, args.use_ALQ)
if args.use_real_x_KL:
# KL_real: - \Delta( e(X) , Z ) -> max_e
KL_real = KL_minimizer(real_z) * args.real_x_KL_scale
e_losses.append(KL_real)
stats['real_mean'] = KL_minimizer.samples_mean.data.mean()
stats['real_var'] = KL_minimizer.samples_var.data.mean()
stats['KL_real'] = KL_real.data.item()
kls = "{0:.3f}".format(stats['KL_real'])
# The final entries are the label. Normal case, just 1. Extract it/them, and make it [b x 1]:
real_z, label = utils.split_labels_out_of_latent(real_z)
recon_x = generator(real_z, label, session.phase, session.alpha)
if args.use_loss_x_reco:
# match_x: E_x||g(e(x)) - x|| -> min_e
err = utils.mismatch(recon_x, x, args.match_x_metric) * match_x
e_losses.append(err)
stats['x_reconstruction_error'] = err.item()
args.use_wpgan_grad_penalty = False
grad_penalty = 0.0
if args.use_loss_fake_D_KL:
# TODO: The following codeblock is essentially the same as the KL_minimizer part on G side. Unify
utils.populate_z(z, args.nz+args.n_label, args.noise, batch_size(reso))
z = torch.squeeze(z)
z, label = utils.split_labels_out_of_latent(z)
fake = generator(z, label, session.phase, session.alpha).detach()
if session.alpha >= 1.0:
fake = generatedImagePool.query(fake.data)
# e(g(Z))
egz = encoder(fake, session.phase, session.alpha, args.use_ALQ)
# KL_fake: \Delta( e(g(Z)) , Z ) -> max_e
KL_fake = KL_maximizer(egz) * args.fake_D_KL_scale
e_losses.append(KL_fake)
stats['fake_mean'] = KL_maximizer.samples_mean.data.mean()
stats['fake_var'] = KL_maximizer.samples_var.data.mean()
stats['KL_fake'] = -KL_fake.item()
kls = "{0}/{1:.3f}".format(kls, stats['KL_fake'])
if args.use_wpgan_grad_penalty:
grad_penalty = get_grad_penalty(encoder, x, fake, session.phase, session.alpha)
# Update e
if len(e_losses) > 0:
e_loss = sum(e_losses)
stats['E_loss'] = np.float32(e_loss.cpu().detach().numpy())
e_loss.backward()
if args.use_wpgan_grad_penalty:
grad_penalty.backward()
stats['Grad_penalty'] = grad_penalty.data
#book-keeping
disc_loss_val = e_loss.item()
session.optimizerD.step()
torch.cuda.empty_cache()
######################## GENERATOR / DECODER #############################
if (batch_count + 1) % args.n_critic == 0:
utils.switch_grad_updates_to_first_of(generator, encoder)
for _ in range(args.n_generator):
generator.zero_grad()
g_losses = []
if train_mode == config.MODE_GAN:
fake_predict, _ = D_prediction_of_G_output(generator, encoder, session.phase, session.alpha)
loss = -fake_predict
g_losses.append(loss)
elif train_mode == config.MODE_CYCLIC: #TODO We push the z variable around here like idiots
def KL_of_encoded_G_output(generator, z):
utils.populate_z(z, args.nz+args.n_label, args.noise, batch_size(reso))
z, label = utils.split_labels_out_of_latent(z)
fake = generator(z, label, session.phase, session.alpha)
egz = encoder(fake, session.phase, session.alpha, args.use_ALQ)
# KL_fake: \Delta( e(g(Z)) , Z ) -> min_g
return egz, label, KL_minimizer(egz) * args.fake_G_KL_scale, z
egz, label, kl, z = KL_of_encoded_G_output(generator, z)
if args.use_loss_KL_z:
g_losses.append(kl) # G minimizes this KL
stats['KL(Phi(G))'] = kl.item()
kls = "{0}/{1:.3f}".format(kls, stats['KL(Phi(G))'])
if args.use_loss_z_reco:
z = torch.cat((z, label), 1)
z_diff = utils.mismatch(egz, z, args.match_z_metric) * args.match_z # G tries to make the original z and encoded z match
g_losses.append(z_diff)
if len(g_losses) > 0:
loss = sum(g_losses)
stats['G_loss'] = np.float32(loss.cpu().detach().numpy())
loss.backward()
# Book-keeping only:
gen_loss_val = loss.item()
session.optimizerG.step()
torch.cuda.empty_cache()
if train_mode == config.MODE_CYCLIC:
if args.use_loss_z_reco:
stats['z_reconstruction_error'] = z_diff.item()
accumulate(g_running, generator)
del z, x, one, real_image, real_z, KL_real, label, recon_x, fake, egz, KL_fake, kl, z_diff
if train_mode == config.MODE_CYCLIC:
if args.use_TB:
for key,val in stats.items():
writer.add_scalar(key, val, session.sample_i)
elif batch_count % 100 == 0:
print(stats)
if args.use_TB:
writer.add_scalar('LOD', session.phase + session.alpha, session.sample_i)
######################## Statistics ########################
b = batch_size_by_phase(session.phase)
zr, xr = (stats['z_reconstruction_error'], stats['x_reconstruction_error']) if train_mode == config.MODE_CYCLIC else (0.0, 0.0)
e = (session.sample_i / float(epoch_len))
pbar.set_description(
('{0}; it: {1}; phase: {2}; b: {3:.1f}; Alpha: {4:.3f}; Reso: {5}; E: {6:.2f}; KL(real/fake/fakeG): {7}; z-reco: {8:.2f}; x-reco {9:.3f}; real_var {10:.4f}').format(batch_count+1, session.sample_i+1, session.phase, b, session.alpha, reso, e, kls, zr, xr, stats['real_var'])
)
#(f'{i + 1}; it: {iteration+1}; b: {b:.1f}; G: {gen_loss_val:.5f}; D: {disc_loss_val:.5f};'
# f' Grad: {grad_loss_val:.5f}; Alpha: {alpha:.3f}; Reso: {reso}; S-mean: {real_mean:.3f}; KL(real/fake/fakeG): {kls}; z-reco: {zr:.2f}'))
pbar.update(batch_size(reso))
session.sample_i += batch_size(reso) # if not benchmarking else 100
batch_count += 1
######################## Saving ########################
if batch_count % args.checkpoint_cycle == 0:
for postfix in {'latest', str(session.sample_i).zfill(6)}:
session.save_all('{}/{}_state'.format(args.checkpoint_dir, postfix))
print("Checkpointed to {}".format(session.sample_i))
######################## Tests ########################
try:
evaluate.tests_run(g_running, encoder, test_data_loader, session, writer,
reconstruction = (batch_count % 800 == 0),
interpolation = (batch_count % 800 == 0),
collated_sampling = (batch_count % 800 == 0),
individual_sampling = (batch_count % (args.images_per_stage/batch_size(reso)/4) == 0)
)
except (OSError, StopIteration):
print("Skipped periodic tests due to an exception.")
pbar.close()
def main():
setup()
session = Session()
session.create()
print('PyTorch {}'.format(torch.__version__))
if args.train_path:
train_data_loader = data.get_loader(args.data, args.train_path)
else:
train_data_loader = None
if args.test_path:
test_data_loader = data.get_loader(args.data, args.test_path)
elif args.aux_inpath:
test_data_loader = data.get_loader(args.data, args.aux_inpath)
else:
test_data_loader = None
# 4 modes: Train (with data/train), test (with data/test), aux-test (with custom aux_inpath), dump-training-set
if args.run_mode == config.RUN_TRAIN:
train(session.generator, session.encoder, session.g_running, train_data_loader, test_data_loader,
session = session,
total_steps = args.total_kimg * 1000,
train_mode = args.train_mode)
elif args.run_mode == config.RUN_TEST:
if args.reconstructions_N > 0 or args.interpolate_N > 0:
evaluate.Utils.reconstruction_dryrun(session.generator, session.encoder, test_data_loader, session=session)
evaluate.tests_run(session.generator, session.encoder, test_data_loader, session=session, writer=writer)
elif args.run_mode == config.RUN_DUMP:
session.phase = args.start_phase
data.dump_training_set(train_data_loader, args.dump_trainingset_N, args.dump_trainingset_dir, session)
if __name__ == '__main__':
main()
|
py | 1a2ec6d541179c18d0ecd204d4a93792951a8923 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import copy
import itertools
import json
import logging
import math
import os
import pkgutil
import socket
import traceback
from collections import defaultdict
from datetime import datetime, timedelta
from typing import Dict, List, Optional, Tuple
from urllib.parse import quote, unquote
import lazy_object_proxy
import markdown
import sqlalchemy as sqla
from flask import (
Markup, Response, escape, flash, jsonify, make_response, redirect, render_template, request,
session as flask_session, url_for,
)
from flask_appbuilder import BaseView, ModelView, expose, has_access, permission_name
from flask_appbuilder.actions import action
from flask_appbuilder.models.sqla.filters import BaseFilter
from flask_babel import lazy_gettext
from jinja2.utils import htmlsafe_json_dumps # type: ignore
from pygments import highlight, lexers
from pygments.formatters import HtmlFormatter
from sqlalchemy import and_, desc, func, or_, union_all
from sqlalchemy.orm import joinedload
from wtforms import SelectField, validators
import airflow
from airflow import models, settings
from airflow._vendor import nvd3
from airflow.api.common.experimental.mark_tasks import (
set_dag_run_state_to_failed, set_dag_run_state_to_success,
)
from airflow.configuration import AIRFLOW_CONFIG, conf
from airflow.exceptions import AirflowException
from airflow.executors.executor_loader import ExecutorLoader
from airflow.jobs.base_job import BaseJob
from airflow.jobs.scheduler_job import SchedulerJob
from airflow.models import Connection, DagModel, DagTag, Log, SlaMiss, TaskFail, XCom, errors
from airflow.models.dagcode import DagCode
from airflow.models.dagrun import DagRun, DagRunType
from airflow.settings import STORE_SERIALIZED_DAGS
from airflow.ti_deps.dep_context import DepContext
from airflow.ti_deps.dependencies_deps import RUNNING_DEPS, SCHEDULER_QUEUED_DEPS
from airflow.utils import timezone
from airflow.utils.dates import infer_time_unit, scale_time_units
from airflow.utils.helpers import alchemy_to_dict, render_log_filename
from airflow.utils.session import create_session, provide_session
from airflow.utils.state import State
from airflow.www import utils as wwwutils
from airflow.www.app import appbuilder
from airflow.www.decorators import action_logging, gzipped, has_dag_access
from airflow.www.forms import (
ConnectionForm, DagRunForm, DateTimeForm, DateTimeWithNumRunsForm, DateTimeWithNumRunsWithDagRunsForm,
)
from airflow.www.widgets import AirflowModelListWidget
PAGE_SIZE = conf.getint('webserver', 'page_size')
FILTER_TAGS_COOKIE = 'tags_filter'
FILTER_STATUS_COOKIE = 'dag_status_filter'
if os.environ.get('SKIP_DAGS_PARSING') != 'True':
dagbag = models.DagBag(settings.DAGS_FOLDER, store_serialized_dags=STORE_SERIALIZED_DAGS)
else:
dagbag = models.DagBag(os.devnull, include_examples=False)
def get_date_time_num_runs_dag_runs_form_data(request, session, dag):
dttm = request.args.get('execution_date')
if dttm:
dttm = timezone.parse(dttm)
else:
dttm = dag.get_latest_execution_date(session=session) or timezone.utcnow()
base_date = request.args.get('base_date')
if base_date:
base_date = timezone.parse(base_date)
else:
# The DateTimeField widget truncates milliseconds and would loose
# the first dag run. Round to next second.
base_date = (dttm + timedelta(seconds=1)).replace(microsecond=0)
default_dag_run = conf.getint('webserver', 'default_dag_run_display_number')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else default_dag_run
DR = models.DagRun
drs = (
session.query(DR)
.filter(
DR.dag_id == dag.dag_id,
DR.execution_date <= base_date)
.order_by(desc(DR.execution_date))
.limit(num_runs)
.all()
)
dr_choices = []
dr_state = None
for dr in drs:
dr_choices.append((dr.execution_date.isoformat(), dr.run_id))
if dttm == dr.execution_date:
dr_state = dr.state
# Happens if base_date was changed and the selected dag run is not in result
if not dr_state and drs:
dr = drs[0]
dttm = dr.execution_date
dr_state = dr.state
return {
'dttm': dttm,
'base_date': base_date,
'num_runs': num_runs,
'execution_date': dttm.isoformat(),
'dr_choices': dr_choices,
'dr_state': dr_state,
}
######################################################################################
# Error handlers
######################################################################################
def circles(error):
return render_template(
'airflow/circles.html', hostname=socket.getfqdn() if conf.getboolean(
'webserver',
'EXPOSE_HOSTNAME',
fallback=True) else 'redact'), 404
def show_traceback(error):
from airflow.utils import asciiart as ascii_
return render_template(
'airflow/traceback.html',
hostname=socket.getfqdn() if conf.getboolean(
'webserver',
'EXPOSE_HOSTNAME',
fallback=True) else 'redact',
nukular=ascii_.nukular,
info=traceback.format_exc() if conf.getboolean(
'webserver',
'EXPOSE_STACKTRACE',
fallback=True) else 'Error! Please contact server admin'), 500
######################################################################################
# BaseViews
######################################################################################
class AirflowBaseView(BaseView):
from airflow import macros
route_base = ''
# Make our macros available to our UI templates too.
extra_args = {
'macros': macros,
}
def render_template(self, *args, **kwargs):
return super().render_template(
*args,
# Cache this at most once per request, not for the lifetime of the view instance
scheduler_job=lazy_object_proxy.Proxy(SchedulerJob.most_recent_job),
**kwargs
)
class Airflow(AirflowBaseView):
@expose('/health')
def health(self):
"""
An endpoint helping check the health status of the Airflow instance,
including metadatabase and scheduler.
"""
payload = {
'metadatabase': {'status': 'unhealthy'}
}
latest_scheduler_heartbeat = None
scheduler_status = 'unhealthy'
payload['metadatabase'] = {'status': 'healthy'}
try:
scheduler_job = SchedulerJob.most_recent_job()
if scheduler_job:
latest_scheduler_heartbeat = scheduler_job.latest_heartbeat.isoformat()
if scheduler_job.is_alive():
scheduler_status = 'healthy'
except Exception:
payload['metadatabase']['status'] = 'unhealthy'
payload['scheduler'] = {'status': scheduler_status,
'latest_scheduler_heartbeat': latest_scheduler_heartbeat}
return wwwutils.json_response(payload)
@expose('/home')
@has_access
def index(self):
hide_paused_dags_by_default = conf.getboolean('webserver',
'hide_paused_dags_by_default')
default_dag_run = conf.getint('webserver', 'default_dag_run_display_number')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else default_dag_run
def get_int_arg(value, default=0):
try:
return int(value)
except ValueError:
return default
arg_current_page = request.args.get('page', '0')
arg_search_query = request.args.get('search', None)
arg_tags_filter = request.args.getlist('tags', None)
arg_status_filter = request.args.get('status', None)
if request.args.get('reset_tags') is not None:
flask_session[FILTER_TAGS_COOKIE] = None
arg_tags_filter = None
else:
cookie_val = flask_session.get(FILTER_TAGS_COOKIE)
if arg_tags_filter:
flask_session[FILTER_TAGS_COOKIE] = ','.join(arg_tags_filter)
elif cookie_val:
arg_tags_filter = cookie_val.split(',')
if arg_status_filter is None:
cookie_val = flask_session.get(FILTER_STATUS_COOKIE)
if cookie_val:
arg_status_filter = cookie_val
else:
arg_status_filter = 'active' if hide_paused_dags_by_default else 'all'
flask_session[FILTER_STATUS_COOKIE] = arg_status_filter
else:
status = arg_status_filter.strip().lower()
flask_session[FILTER_STATUS_COOKIE] = status
arg_status_filter = status
dags_per_page = PAGE_SIZE
current_page = get_int_arg(arg_current_page, default=0)
start = current_page * dags_per_page
end = start + dags_per_page
# Get all the dag id the user could access
filter_dag_ids = appbuilder.sm.get_accessible_dag_ids()
with create_session() as session:
# read orm_dags from the db
dags_query = session.query(DagModel).filter(
~DagModel.is_subdag, DagModel.is_active
)
if arg_search_query:
dags_query = dags_query.filter(
DagModel.dag_id.ilike('%' + arg_search_query + '%') |
DagModel.owners.ilike('%' + arg_search_query + '%')
)
if arg_tags_filter:
dags_query = dags_query.filter(DagModel.tags.any(DagTag.name.in_(arg_tags_filter)))
if 'all_dags' not in filter_dag_ids:
dags_query = dags_query.filter(DagModel.dag_id.in_(filter_dag_ids))
all_dags = dags_query
active_dags = dags_query.filter(~DagModel.is_paused)
paused_dags = dags_query.filter(DagModel.is_paused)
is_paused_count = dict(
all_dags.with_entities(DagModel.is_paused, func.count(DagModel.dag_id))
.group_by(DagModel.is_paused).all()
)
status_count_active = is_paused_count.get(False, 0)
status_count_paused = is_paused_count.get(True, 0)
all_dags_count = status_count_active + status_count_paused
if arg_status_filter == 'active':
current_dags = active_dags
num_of_all_dags = status_count_active
elif arg_status_filter == 'paused':
current_dags = paused_dags
num_of_all_dags = status_count_paused
else:
current_dags = all_dags
num_of_all_dags = all_dags_count
dags = current_dags.order_by(DagModel.dag_id).options(
joinedload(DagModel.tags)).offset(start).limit(dags_per_page).all()
dagtags = session.query(DagTag.name).distinct(DagTag.name).all()
tags = [
{"name": name, "selected": bool(arg_tags_filter and name in arg_tags_filter)}
for name, in dagtags
]
import_errors = session.query(errors.ImportError).all()
for ie in import_errors:
flash(
"Broken DAG: [{ie.filename}] {ie.stacktrace}".format(ie=ie),
"dag_import_error")
from airflow.plugins_manager import import_errors as plugin_import_errors
for filename, stacktrace in plugin_import_errors.items():
flash(
"Broken plugin: [{filename}] {stacktrace}".format(
stacktrace=stacktrace,
filename=filename),
"error")
num_of_pages = int(math.ceil(num_of_all_dags / float(dags_per_page)))
state_color_mapping = State.state_color.copy()
state_color_mapping["null"] = state_color_mapping.pop(None)
return self.render_template(
'airflow/dags.html',
dags=dags,
current_page=current_page,
search_query=arg_search_query if arg_search_query else '',
page_size=dags_per_page,
num_of_pages=num_of_pages,
num_dag_from=min(start + 1, num_of_all_dags),
num_dag_to=min(end, num_of_all_dags),
num_of_all_dags=num_of_all_dags,
paging=wwwutils.generate_pages(current_page,
num_of_pages,
search=escape(arg_search_query) if arg_search_query else None,
status=arg_status_filter if arg_status_filter else None),
num_runs=num_runs,
tags=tags,
state_color=state_color_mapping,
status_filter=arg_status_filter,
status_count_all=all_dags_count,
status_count_active=status_count_active,
status_count_paused=status_count_paused)
@expose('/dag_stats', methods=['POST'])
@has_access
@provide_session
def dag_stats(self, session=None):
dr = models.DagRun
allowed_dag_ids = appbuilder.sm.get_accessible_dag_ids()
if 'all_dags' in allowed_dag_ids:
allowed_dag_ids = [dag_id for dag_id, in session.query(models.DagModel.dag_id)]
dag_state_stats = session.query(dr.dag_id, dr.state, sqla.func.count(dr.state))\
.group_by(dr.dag_id, dr.state)
# Filter by post parameters
selected_dag_ids = {
unquote(dag_id) for dag_id in request.form.getlist('dag_ids') if dag_id
}
if selected_dag_ids:
filter_dag_ids = selected_dag_ids.intersection(allowed_dag_ids)
else:
filter_dag_ids = allowed_dag_ids
if not filter_dag_ids:
return wwwutils.json_response({})
payload = {}
dag_state_stats = dag_state_stats.filter(dr.dag_id.in_(filter_dag_ids))
data = {}
for dag_id, state, count in dag_state_stats:
if dag_id not in data:
data[dag_id] = {}
data[dag_id][state] = count
for dag_id in filter_dag_ids:
payload[dag_id] = []
for state in State.dag_states:
count = data.get(dag_id, {}).get(state, 0)
payload[dag_id].append({
'state': state,
'count': count
})
return wwwutils.json_response(payload)
@expose('/task_stats', methods=['POST'])
@has_access
@provide_session
def task_stats(self, session=None):
TI = models.TaskInstance
DagRun = models.DagRun
Dag = models.DagModel
allowed_dag_ids = set(appbuilder.sm.get_accessible_dag_ids())
if not allowed_dag_ids:
return wwwutils.json_response({})
if 'all_dags' in allowed_dag_ids:
allowed_dag_ids = {dag_id for dag_id, in session.query(models.DagModel.dag_id)}
# Filter by post parameters
selected_dag_ids = {
unquote(dag_id) for dag_id in request.form.getlist('dag_ids') if dag_id
}
if selected_dag_ids:
filter_dag_ids = selected_dag_ids.intersection(allowed_dag_ids)
else:
filter_dag_ids = allowed_dag_ids
RunningDagRun = (
session.query(DagRun.dag_id, DagRun.execution_date)
.join(Dag, Dag.dag_id == DagRun.dag_id)
.filter(DagRun.state == State.RUNNING, Dag.is_active)
)
if selected_dag_ids:
RunningDagRun = RunningDagRun.filter(DagRun.dag_id.in_(filter_dag_ids))
RunningDagRun = RunningDagRun.subquery('running_dag_run')
# Select all task_instances from active dag_runs.
RunningTI = (
session.query(TI.dag_id.label('dag_id'), TI.state.label('state'))
.join(RunningDagRun,
and_(RunningDagRun.c.dag_id == TI.dag_id,
RunningDagRun.c.execution_date == TI.execution_date))
)
if selected_dag_ids:
RunningTI = RunningTI.filter(TI.dag_id.in_(filter_dag_ids))
if conf.getboolean('webserver', 'SHOW_RECENT_STATS_FOR_COMPLETED_RUNS', fallback=True):
LastDagRun = (
session.query(
DagRun.dag_id,
sqla.func.max(DagRun.execution_date).label('execution_date')
)
.join(Dag, Dag.dag_id == DagRun.dag_id)
.filter(DagRun.state != State.RUNNING, Dag.is_active)
.group_by(DagRun.dag_id)
)
if selected_dag_ids:
LastDagRun = LastDagRun.filter(DagRun.dag_id.in_(filter_dag_ids))
LastDagRun = LastDagRun.subquery('last_dag_run')
# Select all task_instances from active dag_runs.
# If no dag_run is active, return task instances from most recent dag_run.
LastTI = (
session.query(TI.dag_id.label('dag_id'), TI.state.label('state'))
.join(LastDagRun,
and_(LastDagRun.c.dag_id == TI.dag_id,
LastDagRun.c.execution_date == TI.execution_date))
)
if selected_dag_ids:
LastTI = LastTI.filter(TI.dag_id.in_(filter_dag_ids))
FinalTI = union_all(LastTI, RunningTI).alias('final_ti')
else:
FinalTI = RunningTI.subquery('final_ti')
qry = (
session.query(FinalTI.c.dag_id, FinalTI.c.state, sqla.func.count())
.group_by(FinalTI.c.dag_id, FinalTI.c.state)
)
data = {}
for dag_id, state, count in qry:
if dag_id not in data:
data[dag_id] = {}
data[dag_id][state] = count
payload = {}
for dag_id in filter_dag_ids:
payload[dag_id] = []
for state in State.task_states:
count = data.get(dag_id, {}).get(state, 0)
payload[dag_id].append({
'state': state,
'count': count
})
return wwwutils.json_response(payload)
@expose('/last_dagruns', methods=['POST'])
@has_access
@provide_session
def last_dagruns(self, session=None):
DagRun = models.DagRun
allowed_dag_ids = appbuilder.sm.get_accessible_dag_ids()
if 'all_dags' in allowed_dag_ids:
allowed_dag_ids = [dag_id for dag_id, in session.query(models.DagModel.dag_id)]
# Filter by post parameters
selected_dag_ids = {
unquote(dag_id) for dag_id in request.form.getlist('dag_ids') if dag_id
}
if selected_dag_ids:
filter_dag_ids = selected_dag_ids.intersection(allowed_dag_ids)
else:
filter_dag_ids = allowed_dag_ids
if not filter_dag_ids:
return wwwutils.json_response({})
query = session.query(
DagRun.dag_id, sqla.func.max(DagRun.execution_date).label('last_run')
).group_by(DagRun.dag_id)
# Filter to only ask for accessible and selected dags
query = query.filter(DagRun.dag_id.in_(filter_dag_ids))
resp = {
r.dag_id.replace('.', '__dot__'): {
'dag_id': r.dag_id,
'last_run': r.last_run.isoformat(),
} for r in query
}
return wwwutils.json_response(resp)
@expose('/code')
@has_dag_access(can_dag_read=True)
@has_access
@provide_session
def code(self, session=None):
all_errors = ""
try:
dag_id = request.args.get('dag_id')
dag_orm = DagModel.get_dagmodel(dag_id, session=session)
code = DagCode.get_code_by_fileloc(dag_orm.fileloc)
html_code = highlight(
code, lexers.PythonLexer(), HtmlFormatter(linenos=True))
except Exception as e:
all_errors += (
"Exception encountered during " +
"dag_id retrieval/dag retrieval fallback/code highlighting:\n\n{}\n".format(e)
)
html_code = '<p>Failed to load file.</p><p>Details: {}</p>'.format(
escape(all_errors))
return self.render_template(
'airflow/dag_code.html', html_code=html_code, dag=dag_orm, title=dag_id,
root=request.args.get('root'),
demo_mode=conf.getboolean('webserver', 'demo_mode'),
wrapped=conf.getboolean('webserver', 'default_wrap'))
@expose('/dag_details')
@has_dag_access(can_dag_read=True)
@has_access
@provide_session
def dag_details(self, session=None):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
title = "DAG details"
root = request.args.get('root', '')
TI = models.TaskInstance
states = (
session.query(TI.state, sqla.func.count(TI.dag_id))
.filter(TI.dag_id == dag_id)
.group_by(TI.state)
.all()
)
active_runs = models.DagRun.find(
dag_id=dag_id,
state=State.RUNNING,
external_trigger=False
)
return self.render_template(
'airflow/dag_details.html',
dag=dag, title=title, root=root, states=states, State=State, active_runs=active_runs)
@expose('/rendered')
@has_dag_access(can_dag_read=True)
@has_access
@action_logging
def rendered(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dttm = timezone.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
root = request.args.get('root', '')
logging.info("Retrieving rendered templates.")
dag = dagbag.get_dag(dag_id)
task = copy.copy(dag.get_task(task_id))
ti = models.TaskInstance(task=task, execution_date=dttm)
try:
ti.get_rendered_template_fields()
except AirflowException as e:
msg = "Error rendering template: " + escape(e)
if e.__cause__:
msg += Markup("<br/><br/>OriginalError: ") + escape(e.__cause__)
flash(msg, "error")
except Exception as e:
flash("Error rendering template: " + str(e), "error")
title = "Rendered Template"
html_dict = {}
for template_field in task.template_fields:
content = getattr(task, template_field)
if template_field in wwwutils.get_attr_renderer():
html_dict[template_field] = \
wwwutils.get_attr_renderer()[template_field](content)
else:
html_dict[template_field] = (
"<pre><code>" + str(content) + "</pre></code>")
return self.render_template(
'airflow/ti_code.html',
html_dict=html_dict,
dag=dag,
task_id=task_id,
execution_date=execution_date,
form=form,
root=root,
title=title)
@expose('/get_logs_with_metadata')
@has_dag_access(can_dag_read=True)
@has_access
@action_logging
@provide_session
def get_logs_with_metadata(self, session=None):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dttm = timezone.parse(execution_date)
if request.args.get('try_number') is not None:
try_number = int(request.args.get('try_number'))
else:
try_number = None
metadata = request.args.get('metadata')
metadata = json.loads(metadata)
response_format = request.args.get('format', 'json')
# metadata may be null
if not metadata:
metadata = {}
# Convert string datetime into actual datetime
try:
execution_date = timezone.parse(execution_date)
except ValueError:
error_message = (
'Given execution date, {}, could not be identified '
'as a date. Example date format: 2015-11-16T14:34:15+00:00'.format(
execution_date))
response = jsonify({'error': error_message})
response.status_code = 400
return response
logger = logging.getLogger('airflow.task')
task_log_reader = conf.get('logging', 'task_log_reader')
handler = next((handler for handler in logger.handlers
if handler.name == task_log_reader), None)
ti = session.query(models.TaskInstance).filter(
models.TaskInstance.dag_id == dag_id,
models.TaskInstance.task_id == task_id,
models.TaskInstance.execution_date == dttm).first()
def _get_logs_with_metadata(try_number, metadata):
if ti is None:
logs = ["*** Task instance did not exist in the DB\n"]
metadata['end_of_log'] = True
else:
logs, metadatas = handler.read(ti, try_number, metadata=metadata)
metadata = metadatas[0]
return logs, metadata
try:
if ti is not None:
dag = dagbag.get_dag(dag_id)
if dag:
ti.task = dag.get_task(ti.task_id)
if response_format == 'json':
logs, metadata = _get_logs_with_metadata(try_number, metadata)
message = logs[0] if try_number is not None else logs
return jsonify(message=message, metadata=metadata)
filename_template = conf.get('logging', 'LOG_FILENAME_TEMPLATE')
attachment_filename = render_log_filename(
ti=ti,
try_number="all" if try_number is None else try_number,
filename_template=filename_template)
metadata['download_logs'] = True
def _generate_log_stream(try_number, metadata):
if try_number is None and ti is not None:
next_try = ti.next_try_number
try_numbers = list(range(1, next_try))
else:
try_numbers = [try_number]
for try_number in try_numbers:
metadata.pop('end_of_log', None)
metadata.pop('max_offset', None)
metadata.pop('offset', None)
while 'end_of_log' not in metadata or not metadata['end_of_log']:
logs, metadata = _get_logs_with_metadata(try_number, metadata)
yield "\n".join(logs) + "\n"
return Response(_generate_log_stream(try_number, metadata),
mimetype="text/plain",
headers={"Content-Disposition": "attachment; filename={}".format(
attachment_filename)})
except AttributeError as e:
error_message = ["Task log handler {} does not support read logs.\n{}\n"
.format(task_log_reader, str(e))]
metadata['end_of_log'] = True
return jsonify(message=error_message, error=True, metadata=metadata)
@expose('/log')
@has_dag_access(can_dag_read=True)
@has_access
@action_logging
@provide_session
def log(self, session=None):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dttm = timezone.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag_model = DagModel.get_dagmodel(dag_id)
ti = session.query(models.TaskInstance).filter(
models.TaskInstance.dag_id == dag_id,
models.TaskInstance.task_id == task_id,
models.TaskInstance.execution_date == dttm).first()
num_logs = 0
if ti is not None:
num_logs = ti.next_try_number - 1
if ti.state == State.UP_FOR_RESCHEDULE:
# Tasks in reschedule state decremented the try number
num_logs += 1
logs = [''] * num_logs
root = request.args.get('root', '')
return self.render_template(
'airflow/ti_log.html',
logs=logs, dag=dag_model, title="Log by attempts",
dag_id=dag_id, task_id=task_id,
execution_date=execution_date, form=form,
root=root, wrapped=conf.getboolean('webserver', 'default_wrap'))
@expose('/elasticsearch')
@has_dag_access(can_dag_read=True)
@has_access
@action_logging
def elasticsearch(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
try_number = request.args.get('try_number', 1)
elasticsearch_frontend = conf.get('elasticsearch', 'frontend')
log_id_template = conf.get('elasticsearch', 'log_id_template')
log_id = log_id_template.format(
dag_id=dag_id, task_id=task_id,
execution_date=execution_date, try_number=try_number)
url = 'https://' + elasticsearch_frontend.format(log_id=quote(log_id))
return redirect(url)
@expose('/task')
@has_dag_access(can_dag_read=True)
@has_access
@action_logging
def task(self):
TI = models.TaskInstance
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
# Carrying execution_date through, even though it's irrelevant for
# this context
execution_date = request.args.get('execution_date')
dttm = timezone.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
root = request.args.get('root', '')
dag = dagbag.get_dag(dag_id)
if not dag or task_id not in dag.task_ids:
flash(
"Task [{}.{}] doesn't seem to exist"
" at the moment".format(dag_id, task_id),
"error")
return redirect(url_for('Airflow.index'))
task = copy.copy(dag.get_task(task_id))
task.resolve_template_files()
ti = TI(task=task, execution_date=dttm)
ti.refresh_from_db()
ti_attrs = []
for attr_name in dir(ti):
if not attr_name.startswith('_'):
attr = getattr(ti, attr_name)
if type(attr) != type(self.task): # noqa
ti_attrs.append((attr_name, str(attr)))
task_attrs = []
for attr_name in dir(task):
if not attr_name.startswith('_'):
attr = getattr(task, attr_name)
if type(attr) != type(self.task) and \
attr_name not in wwwutils.get_attr_renderer(): # noqa
task_attrs.append((attr_name, str(attr)))
# Color coding the special attributes that are code
special_attrs_rendered = {}
for attr_name in wwwutils.get_attr_renderer():
if hasattr(task, attr_name):
source = getattr(task, attr_name)
special_attrs_rendered[attr_name] = \
wwwutils.get_attr_renderer()[attr_name](source)
no_failed_deps_result = [(
"Unknown",
"All dependencies are met but the task instance is not running. In most "
"cases this just means that the task will probably be scheduled soon "
"unless:<br/>\n- The scheduler is down or under heavy load<br/>\n{}\n"
"<br/>\nIf this task instance does not start soon please contact your "
"Airflow administrator for assistance.".format(
"- This task instance already ran and had it's state changed manually "
"(e.g. cleared in the UI)<br/>" if ti.state == State.NONE else ""))]
# Use the scheduler's context to figure out which dependencies are not met
dep_context = DepContext(SCHEDULER_QUEUED_DEPS)
failed_dep_reasons = [(dep.dep_name, dep.reason) for dep in
ti.get_failed_dep_statuses(
dep_context=dep_context)]
title = "Task Instance Details"
return self.render_template(
'airflow/task.html',
task_attrs=task_attrs,
ti_attrs=ti_attrs,
failed_dep_reasons=failed_dep_reasons or no_failed_deps_result,
task_id=task_id,
execution_date=execution_date,
special_attrs_rendered=special_attrs_rendered,
form=form,
root=root,
dag=dag, title=title)
@expose('/xcom')
@has_dag_access(can_dag_read=True)
@has_access
@action_logging
@provide_session
def xcom(self, session=None):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
# Carrying execution_date through, even though it's irrelevant for
# this context
execution_date = request.args.get('execution_date')
dttm = timezone.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
root = request.args.get('root', '')
dm_db = models.DagModel
ti_db = models.TaskInstance
dag = session.query(dm_db).filter(dm_db.dag_id == dag_id).first()
ti = session.query(ti_db).filter(ti_db.dag_id == dag_id and ti_db.task_id == task_id).first()
if not ti:
flash(
"Task [{}.{}] doesn't seem to exist"
" at the moment".format(dag_id, task_id),
"error")
return redirect(url_for('Airflow.index'))
xcomlist = session.query(XCom).filter(
XCom.dag_id == dag_id, XCom.task_id == task_id,
XCom.execution_date == dttm).all()
attributes = []
for xcom in xcomlist:
if not xcom.key.startswith('_'):
attributes.append((xcom.key, xcom.value))
title = "XCom"
return self.render_template(
'airflow/xcom.html',
attributes=attributes,
task_id=task_id,
execution_date=execution_date,
form=form,
root=root,
dag=dag, title=title)
@expose('/run', methods=['POST'])
@has_dag_access(can_dag_edit=True)
@has_access
@action_logging
def run(self):
dag_id = request.form.get('dag_id')
task_id = request.form.get('task_id')
origin = request.form.get('origin')
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
execution_date = request.form.get('execution_date')
execution_date = timezone.parse(execution_date)
ignore_all_deps = request.form.get('ignore_all_deps') == "true"
ignore_task_deps = request.form.get('ignore_task_deps') == "true"
ignore_ti_state = request.form.get('ignore_ti_state') == "true"
executor = ExecutorLoader.get_default_executor()
valid_celery_config = False
valid_kubernetes_config = False
try:
from airflow.executors.celery_executor import CeleryExecutor
valid_celery_config = isinstance(executor, CeleryExecutor)
except ImportError:
pass
try:
from airflow.executors.kubernetes_executor import KubernetesExecutor
valid_kubernetes_config = isinstance(executor, KubernetesExecutor)
except ImportError:
pass
if not valid_celery_config and not valid_kubernetes_config:
flash("Only works with the Celery or Kubernetes executors, sorry", "error")
return redirect(origin)
ti = models.TaskInstance(task=task, execution_date=execution_date)
ti.refresh_from_db()
# Make sure the task instance can be run
dep_context = DepContext(
deps=RUNNING_DEPS,
ignore_all_deps=ignore_all_deps,
ignore_task_deps=ignore_task_deps,
ignore_ti_state=ignore_ti_state)
failed_deps = list(ti.get_failed_dep_statuses(dep_context=dep_context))
if failed_deps:
failed_deps_str = ", ".join(
["{}: {}".format(dep.dep_name, dep.reason) for dep in failed_deps])
flash("Could not queue task instance for execution, dependencies not met: "
"{}".format(failed_deps_str),
"error")
return redirect(origin)
executor.start()
executor.queue_task_instance(
ti,
ignore_all_deps=ignore_all_deps,
ignore_task_deps=ignore_task_deps,
ignore_ti_state=ignore_ti_state)
executor.heartbeat()
flash(
"Sent {} to the message queue, "
"it should start any moment now.".format(ti))
return redirect(origin)
@expose('/delete', methods=['POST'])
@has_dag_access(can_dag_edit=True)
@has_access
@action_logging
def delete(self):
from airflow.api.common.experimental import delete_dag
from airflow.exceptions import DagNotFound, DagFileExists
dag_id = request.values.get('dag_id')
origin = request.values.get('origin') or url_for('Airflow.index')
try:
delete_dag.delete_dag(dag_id)
except DagNotFound:
flash("DAG with id {} not found. Cannot delete".format(dag_id), 'error')
return redirect(request.referrer)
except DagFileExists:
flash("Dag id {} is still in DagBag. "
"Remove the DAG file first.".format(dag_id),
'error')
return redirect(request.referrer)
flash("Deleting DAG with id {}. May take a couple minutes to fully"
" disappear.".format(dag_id))
# Upon success return to origin.
return redirect(origin)
@expose('/trigger', methods=['POST', 'GET'])
@has_dag_access(can_dag_edit=True)
@has_access
@action_logging
@provide_session
def trigger(self, session=None):
dag_id = request.values.get('dag_id')
origin = request.values.get('origin') or url_for('Airflow.index')
if request.method == 'GET':
return self.render_template(
'airflow/trigger.html',
dag_id=dag_id,
origin=origin,
conf=''
)
dag_orm = session.query(models.DagModel).filter(models.DagModel.dag_id == dag_id).first()
if not dag_orm:
flash("Cannot find dag {}".format(dag_id))
return redirect(origin)
execution_date = timezone.utcnow()
run_id = f"{DagRunType.MANUAL.value}__{execution_date.isoformat()}"
dr = DagRun.find(dag_id=dag_id, run_id=run_id)
if dr:
flash("This run_id {} already exists".format(run_id))
return redirect(origin)
run_conf = {}
conf = request.values.get('conf')
if conf:
try:
run_conf = json.loads(conf)
except json.decoder.JSONDecodeError:
flash("Invalid JSON configuration", "error")
return self.render_template(
'airflow/trigger.html',
dag_id=dag_id,
origin=origin,
conf=conf
)
dag = dagbag.get_dag(dag_id)
dag.create_dagrun(
run_id=run_id,
execution_date=execution_date,
state=State.RUNNING,
conf=run_conf,
external_trigger=True
)
flash(
"Triggered {}, "
"it should start any moment now.".format(dag_id))
return redirect(origin)
def _clear_dag_tis(self, dag, start_date, end_date, origin,
recursive=False, confirmed=False, only_failed=False):
from airflow.exceptions import AirflowException
if confirmed:
count = dag.clear(
start_date=start_date,
end_date=end_date,
include_subdags=recursive,
include_parentdag=recursive,
only_failed=only_failed,
)
flash("{0} task instances have been cleared".format(count))
return redirect(origin)
try:
tis = dag.clear(
start_date=start_date,
end_date=end_date,
include_subdags=recursive,
include_parentdag=recursive,
only_failed=only_failed,
dry_run=True,
)
except AirflowException as ex:
flash(str(ex), 'error')
return redirect(origin)
if not tis:
flash("No task instances to clear", 'error')
response = redirect(origin)
else:
details = "\n".join([str(t) for t in tis])
response = self.render_template(
'airflow/confirm.html',
message=("Here's the list of task instances you are about "
"to clear:"),
details=details)
return response
@expose('/clear', methods=['POST'])
@has_dag_access(can_dag_edit=True)
@has_access
@action_logging
def clear(self):
dag_id = request.form.get('dag_id')
task_id = request.form.get('task_id')
origin = request.form.get('origin')
dag = dagbag.get_dag(dag_id)
execution_date = request.form.get('execution_date')
execution_date = timezone.parse(execution_date)
confirmed = request.form.get('confirmed') == "true"
upstream = request.form.get('upstream') == "true"
downstream = request.form.get('downstream') == "true"
future = request.form.get('future') == "true"
past = request.form.get('past') == "true"
recursive = request.form.get('recursive') == "true"
only_failed = request.form.get('only_failed') == "true"
dag = dag.sub_dag(
task_regex=r"^{0}$".format(task_id),
include_downstream=downstream,
include_upstream=upstream)
end_date = execution_date if not future else None
start_date = execution_date if not past else None
return self._clear_dag_tis(dag, start_date, end_date, origin,
recursive=recursive, confirmed=confirmed, only_failed=only_failed)
@expose('/dagrun_clear', methods=['POST'])
@has_dag_access(can_dag_edit=True)
@has_access
@action_logging
def dagrun_clear(self):
dag_id = request.form.get('dag_id')
origin = request.form.get('origin')
execution_date = request.form.get('execution_date')
confirmed = request.form.get('confirmed') == "true"
dag = dagbag.get_dag(dag_id)
execution_date = timezone.parse(execution_date)
start_date = execution_date
end_date = execution_date
return self._clear_dag_tis(dag, start_date, end_date, origin,
recursive=True, confirmed=confirmed)
@expose('/blocked', methods=['POST'])
@has_access
@provide_session
def blocked(self, session=None):
allowed_dag_ids = appbuilder.sm.get_accessible_dag_ids()
if 'all_dags' in allowed_dag_ids:
allowed_dag_ids = [dag_id for dag_id, in session.query(models.DagModel.dag_id)]
# Filter by post parameters
selected_dag_ids = {
unquote(dag_id) for dag_id in request.form.getlist('dag_ids') if dag_id
}
if selected_dag_ids:
filter_dag_ids = selected_dag_ids.intersection(allowed_dag_ids)
else:
filter_dag_ids = allowed_dag_ids
if not filter_dag_ids:
return wwwutils.json_response([])
DR = models.DagRun
dags = (
session.query(DR.dag_id, sqla.func.count(DR.id))
.filter(DR.state == State.RUNNING)
.filter(DR.dag_id.in_(filter_dag_ids))
.group_by(DR.dag_id)
)
payload = []
for dag_id, active_dag_runs in dags:
max_active_runs = 0
dag = dagbag.get_dag(dag_id)
if dag:
# TODO: Make max_active_runs a column so we can query for it directly
max_active_runs = dag.max_active_runs
payload.append({
'dag_id': dag_id,
'active_dag_run': active_dag_runs,
'max_active_runs': max_active_runs,
})
return wwwutils.json_response(payload)
def _mark_dagrun_state_as_failed(self, dag_id, execution_date, confirmed, origin):
if not execution_date:
flash('Invalid execution date', 'error')
return redirect(origin)
execution_date = timezone.parse(execution_date)
dag = dagbag.get_dag(dag_id)
if not dag:
flash('Cannot find DAG: {}'.format(dag_id), 'error')
return redirect(origin)
new_dag_state = set_dag_run_state_to_failed(dag, execution_date, commit=confirmed)
if confirmed:
flash('Marked failed on {} task instances'.format(len(new_dag_state)))
return redirect(origin)
else:
details = '\n'.join([str(t) for t in new_dag_state])
response = self.render_template(
'airflow/confirm.html',
message="Here's the list of task instances you are about to mark as failed",
details=details)
return response
def _mark_dagrun_state_as_success(self, dag_id, execution_date, confirmed, origin):
if not execution_date:
flash('Invalid execution date', 'error')
return redirect(origin)
execution_date = timezone.parse(execution_date)
dag = dagbag.get_dag(dag_id)
if not dag:
flash('Cannot find DAG: {}'.format(dag_id), 'error')
return redirect(origin)
new_dag_state = set_dag_run_state_to_success(dag, execution_date,
commit=confirmed)
if confirmed:
flash('Marked success on {} task instances'.format(len(new_dag_state)))
return redirect(origin)
else:
details = '\n'.join([str(t) for t in new_dag_state])
response = self.render_template(
'airflow/confirm.html',
message="Here's the list of task instances you are about to mark as success",
details=details)
return response
@expose('/dagrun_failed', methods=['POST'])
@has_dag_access(can_dag_edit=True)
@has_access
@action_logging
def dagrun_failed(self):
dag_id = request.form.get('dag_id')
execution_date = request.form.get('execution_date')
confirmed = request.form.get('confirmed') == 'true'
origin = request.form.get('origin')
return self._mark_dagrun_state_as_failed(dag_id, execution_date,
confirmed, origin)
@expose('/dagrun_success', methods=['POST'])
@has_dag_access(can_dag_edit=True)
@has_access
@action_logging
def dagrun_success(self):
dag_id = request.form.get('dag_id')
execution_date = request.form.get('execution_date')
confirmed = request.form.get('confirmed') == 'true'
origin = request.form.get('origin')
return self._mark_dagrun_state_as_success(dag_id, execution_date,
confirmed, origin)
def _mark_task_instance_state(self, dag_id, task_id, origin, execution_date,
confirmed, upstream, downstream,
future, past, state):
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
task.dag = dag
latest_execution_date = dag.get_latest_execution_date()
if not latest_execution_date:
flash(f"Cannot make {state}, seem that dag {dag_id} has never run", "error")
return redirect(origin)
execution_date = timezone.parse(execution_date)
from airflow.api.common.experimental.mark_tasks import set_state
if confirmed:
altered = set_state(tasks=[task], execution_date=execution_date,
upstream=upstream, downstream=downstream,
future=future, past=past, state=state,
commit=True)
flash("Marked {} on {} task instances".format(state, len(altered)))
return redirect(origin)
to_be_altered = set_state(tasks=[task], execution_date=execution_date,
upstream=upstream, downstream=downstream,
future=future, past=past, state=state,
commit=False)
details = "\n".join([str(t) for t in to_be_altered])
response = self.render_template(
"airflow/confirm.html",
message=("Here's the list of task instances you are about to mark as {}:".format(state)),
details=details)
return response
@expose('/failed', methods=['POST'])
@has_dag_access(can_dag_edit=True)
@has_access
@action_logging
def failed(self):
dag_id = request.form.get('dag_id')
task_id = request.form.get('task_id')
origin = request.form.get('origin')
execution_date = request.form.get('execution_date')
confirmed = request.form.get('confirmed') == "true"
upstream = request.form.get('failed_upstream') == "true"
downstream = request.form.get('failed_downstream') == "true"
future = request.form.get('failed_future') == "true"
past = request.form.get('failed_past') == "true"
return self._mark_task_instance_state(dag_id, task_id, origin, execution_date,
confirmed, upstream, downstream,
future, past, State.FAILED)
@expose('/success', methods=['POST'])
@has_dag_access(can_dag_edit=True)
@has_access
@action_logging
def success(self):
dag_id = request.form.get('dag_id')
task_id = request.form.get('task_id')
origin = request.form.get('origin')
execution_date = request.form.get('execution_date')
confirmed = request.form.get('confirmed') == "true"
upstream = request.form.get('success_upstream') == "true"
downstream = request.form.get('success_downstream') == "true"
future = request.form.get('success_future') == "true"
past = request.form.get('success_past') == "true"
return self._mark_task_instance_state(dag_id, task_id, origin, execution_date,
confirmed, upstream, downstream,
future, past, State.SUCCESS)
@expose('/tree')
@has_dag_access(can_dag_read=True)
@has_access
@gzipped
@action_logging
def tree(self):
dag_id = request.args.get('dag_id')
blur = conf.getboolean('webserver', 'demo_mode')
dag = dagbag.get_dag(dag_id)
if not dag:
flash('DAG "{0}" seems to be missing from DagBag.'.format(dag_id), "error")
return redirect(url_for('Airflow.index'))
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_downstream=False,
include_upstream=True)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
if num_runs:
num_runs = int(num_runs)
else:
num_runs = conf.getint('webserver', 'default_dag_run_display_number')
if base_date:
base_date = timezone.parse(base_date)
else:
base_date = dag.get_latest_execution_date() or timezone.utcnow()
with create_session() as session:
dag_runs = (
session.query(DagRun)
.filter(
DagRun.dag_id == dag.dag_id,
DagRun.execution_date <= base_date)
.order_by(DagRun.execution_date.desc())
.limit(num_runs)
.all()
)
dag_runs = {
dr.execution_date: alchemy_to_dict(dr) for dr in dag_runs
}
dates = sorted(list(dag_runs.keys()))
max_date = max(dates) if dates else None
min_date = min(dates) if dates else None
tis = dag.get_task_instances(start_date=min_date, end_date=base_date)
task_instances: Dict[Tuple[str, datetime], models.TaskInstance] = {}
for ti in tis:
task_instances[(ti.task_id, ti.execution_date)] = ti
expanded = set()
# The default recursion traces every path so that tree view has full
# expand/collapse functionality. After 5,000 nodes we stop and fall
# back on a quick DFS search for performance. See PR #320.
node_count = 0
node_limit = 5000 / max(1, len(dag.leaves))
def encode_ti(ti: Optional[models.TaskInstance]) -> Optional[List]:
if not ti:
return None
# NOTE: order of entry is important here because client JS relies on it for
# tree node reconstruction. Remember to change JS code in tree.html
# whenever order is altered.
data = [
ti.state,
ti.try_number,
None, # start_ts
None, # duration
]
if ti.start_date:
# round to seconds to reduce payload size
data[2] = int(ti.start_date.timestamp())
if ti.duration is not None:
data[3] = int(ti.duration)
return data
def recurse_nodes(task, visited):
nonlocal node_count
node_count += 1
visited.add(task)
task_id = task.task_id
node = {
'name': task.task_id,
'instances': [
encode_ti(task_instances.get((task_id, d)))
for d in dates
],
'num_dep': len(task.downstream_list),
'operator': task.task_type,
'retries': task.retries,
'owner': task.owner,
'ui_color': task.ui_color,
}
if task.downstream_list:
children = [
recurse_nodes(t, visited) for t in task.downstream_list
if node_count < node_limit or t not in visited]
# D3 tree uses children vs _children to define what is
# expanded or not. The following block makes it such that
# repeated nodes are collapsed by default.
if task.task_id not in expanded:
children_key = 'children'
expanded.add(task.task_id)
else:
children_key = "_children"
node[children_key] = children
if task.depends_on_past:
node['depends_on_past'] = task.depends_on_past
if task.start_date:
# round to seconds to reduce payload size
node['start_ts'] = int(task.start_date.timestamp())
if task.end_date:
# round to seconds to reduce payload size
node['end_ts'] = int(task.end_date.timestamp())
if task.extra_links:
node['extra_links'] = task.extra_links
return node
data = {
'name': '[DAG]',
'children': [recurse_nodes(t, set()) for t in dag.roots],
'instances': [
dag_runs.get(d) or {'execution_date': d.isoformat()}
for d in dates
],
}
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
external_logs = conf.get('elasticsearch', 'frontend')
# avoid spaces to reduce payload size
data = htmlsafe_json_dumps(data, separators=(',', ':'))
# escape slashes to avoid JSON parse error in JS
data = data.replace('\\', '\\\\')
return self.render_template(
'airflow/tree.html',
operators=sorted({op.task_type: op for op in dag.tasks}.values(), key=lambda x: x.task_type),
root=root,
form=form,
dag=dag,
data=data,
blur=blur, num_runs=num_runs,
show_external_logs=bool(external_logs))
@expose('/graph')
@has_dag_access(can_dag_read=True)
@has_access
@gzipped
@action_logging
@provide_session
def graph(self, session=None):
dag_id = request.args.get('dag_id')
blur = conf.getboolean('webserver', 'demo_mode')
dag = dagbag.get_dag(dag_id)
if not dag:
flash('DAG "{0}" seems to be missing.'.format(dag_id), "error")
return redirect(url_for('Airflow.index'))
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
arrange = request.args.get('arrange', dag.orientation)
nodes = []
edges = []
for task in dag.tasks:
nodes.append({
'id': task.task_id,
'value': {
'label': task.task_id,
'labelStyle': "fill:{0};".format(task.ui_fgcolor),
'style': "fill:{0};".format(task.ui_color),
'rx': 5,
'ry': 5,
}
})
def get_downstream(task):
for t in task.downstream_list:
edge = {
'source_id': task.task_id,
'target_id': t.task_id,
}
if edge not in edges:
edges.append(edge)
get_downstream(t)
for t in dag.roots:
get_downstream(t)
dt_nr_dr_data = get_date_time_num_runs_dag_runs_form_data(request, session, dag)
dt_nr_dr_data['arrange'] = arrange
dttm = dt_nr_dr_data['dttm']
class GraphForm(DateTimeWithNumRunsWithDagRunsForm):
arrange = SelectField("Layout", choices=(
('LR', "Left->Right"),
('RL', "Right->Left"),
('TB', "Top->Bottom"),
('BT', "Bottom->Top"),
))
form = GraphForm(data=dt_nr_dr_data)
form.execution_date.choices = dt_nr_dr_data['dr_choices']
task_instances = {
ti.task_id: alchemy_to_dict(ti)
for ti in dag.get_task_instances(dttm, dttm)}
tasks = {
t.task_id: {
'dag_id': t.dag_id,
'task_type': t.task_type,
'extra_links': t.extra_links,
}
for t in dag.tasks}
if not tasks:
flash("No tasks found", "error")
session.commit()
doc_md = markdown.markdown(dag.doc_md) \
if hasattr(dag, 'doc_md') and dag.doc_md else ''
external_logs = conf.get('elasticsearch', 'frontend')
return self.render_template(
'airflow/graph.html',
dag=dag,
form=form,
width=request.args.get('width', "100%"),
height=request.args.get('height', "800"),
execution_date=dttm.isoformat(),
state_token=wwwutils.state_token(dt_nr_dr_data['dr_state']),
doc_md=doc_md,
arrange=arrange,
operators=sorted({op.task_type: op for op in dag.tasks}.values(), key=lambda x: x.task_type),
blur=blur,
root=root or '',
task_instances=task_instances,
tasks=tasks,
nodes=nodes,
edges=edges,
show_external_logs=bool(external_logs))
@expose('/duration')
@has_dag_access(can_dag_read=True)
@has_access
@action_logging
@provide_session
def duration(self, session=None):
default_dag_run = conf.getint('webserver', 'default_dag_run_display_number')
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else default_dag_run
if dag is None:
flash('DAG "{0}" seems to be missing.'.format(dag_id), "error")
return redirect(url_for('Airflow.index'))
if base_date:
base_date = timezone.parse(base_date)
else:
base_date = dag.get_latest_execution_date() or timezone.utcnow()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else timezone.utc_epoch()
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
chart_height = wwwutils.get_chart_height(dag)
chart = nvd3.lineChart(
name="lineChart", x_is_date=True, height=chart_height, width="1200")
cum_chart = nvd3.lineChart(
name="cumLineChart", x_is_date=True, height=chart_height, width="1200")
y = defaultdict(list)
x = defaultdict(list)
cum_y = defaultdict(list)
tis = dag.get_task_instances(start_date=min_date, end_date=base_date)
TF = TaskFail
ti_fails = (
session.query(TF)
.filter(TF.dag_id == dag.dag_id,
TF.execution_date >= min_date,
TF.execution_date <= base_date,
TF.task_id.in_([t.task_id for t in dag.tasks]))
.all() # noqa
)
fails_totals = defaultdict(int)
for tf in ti_fails:
dict_key = (tf.dag_id, tf.task_id, tf.execution_date)
if tf.duration:
fails_totals[dict_key] += tf.duration
for ti in tis:
if ti.duration:
dttm = wwwutils.epoch(ti.execution_date)
x[ti.task_id].append(dttm)
y[ti.task_id].append(float(ti.duration))
fails_dict_key = (ti.dag_id, ti.task_id, ti.execution_date)
fails_total = fails_totals[fails_dict_key]
cum_y[ti.task_id].append(float(ti.duration + fails_total))
# determine the most relevant time unit for the set of task instance
# durations for the DAG
y_unit = infer_time_unit([d for t in y.values() for d in t])
cum_y_unit = infer_time_unit([d for t in cum_y.values() for d in t])
# update the y Axis on both charts to have the correct time units
chart.create_y_axis('yAxis', format='.02f', custom_format=False,
label='Duration ({})'.format(y_unit))
chart.axislist['yAxis']['axisLabelDistance'] = '-15'
cum_chart.create_y_axis('yAxis', format='.02f', custom_format=False,
label='Duration ({})'.format(cum_y_unit))
cum_chart.axislist['yAxis']['axisLabelDistance'] = '-15'
for task in dag.tasks:
if x[task.task_id]:
chart.add_serie(name=task.task_id, x=x[task.task_id],
y=scale_time_units(y[task.task_id], y_unit))
cum_chart.add_serie(name=task.task_id, x=x[task.task_id],
y=scale_time_units(cum_y[task.task_id],
cum_y_unit))
dates = sorted(list({ti.execution_date for ti in tis}))
max_date = max([ti.execution_date for ti in tis]) if dates else None
session.commit()
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
chart.buildcontent()
cum_chart.buildcontent()
s_index = cum_chart.htmlcontent.rfind('});')
cum_chart.htmlcontent = (cum_chart.htmlcontent[:s_index] +
"$( document ).trigger('chartload')" +
cum_chart.htmlcontent[s_index:])
return self.render_template(
'airflow/duration_chart.html',
dag=dag,
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
form=form,
chart=chart.htmlcontent,
cum_chart=cum_chart.htmlcontent
)
@expose('/tries')
@has_dag_access(can_dag_read=True)
@has_access
@action_logging
@provide_session
def tries(self, session=None):
default_dag_run = conf.getint('webserver', 'default_dag_run_display_number')
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else default_dag_run
if base_date:
base_date = timezone.parse(base_date)
else:
base_date = dag.get_latest_execution_date() or timezone.utcnow()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else timezone.utc_epoch()
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
chart_height = wwwutils.get_chart_height(dag)
chart = nvd3.lineChart(
name="lineChart", x_is_date=True, y_axis_format='d', height=chart_height,
width="1200")
for task in dag.tasks:
y = []
x = []
for ti in task.get_task_instances(start_date=min_date, end_date=base_date):
dttm = wwwutils.epoch(ti.execution_date)
x.append(dttm)
# y value should reflect completed tries to have a 0 baseline.
y.append(ti.prev_attempted_tries)
if x:
chart.add_serie(name=task.task_id, x=x, y=y)
tis = dag.get_task_instances(start_date=min_date, end_date=base_date)
tries = sorted(list({ti.try_number for ti in tis}))
max_date = max([ti.execution_date for ti in tis]) if tries else None
session.commit()
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
chart.buildcontent()
return self.render_template(
'airflow/chart.html',
dag=dag,
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
form=form,
chart=chart.htmlcontent,
tab_title='Tries',
)
@expose('/landing_times')
@has_dag_access(can_dag_read=True)
@has_access
@action_logging
@provide_session
def landing_times(self, session=None):
default_dag_run = conf.getint('webserver', 'default_dag_run_display_number')
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else default_dag_run
if base_date:
base_date = timezone.parse(base_date)
else:
base_date = dag.get_latest_execution_date() or timezone.utcnow()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else timezone.utc_epoch()
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
chart_height = wwwutils.get_chart_height(dag)
chart = nvd3.lineChart(
name="lineChart", x_is_date=True, height=chart_height, width="1200")
y = {}
x = {}
for task in dag.tasks:
task_id = task.task_id
y[task_id] = []
x[task_id] = []
for ti in task.get_task_instances(start_date=min_date, end_date=base_date):
ts = ti.execution_date
if dag.schedule_interval and dag.following_schedule(ts):
ts = dag.following_schedule(ts)
if ti.end_date:
dttm = wwwutils.epoch(ti.execution_date)
secs = (ti.end_date - ts).total_seconds()
x[task_id].append(dttm)
y[task_id].append(secs)
# determine the most relevant time unit for the set of landing times
# for the DAG
y_unit = infer_time_unit([d for t in y.values() for d in t])
# update the y Axis to have the correct time units
chart.create_y_axis('yAxis', format='.02f', custom_format=False,
label='Landing Time ({})'.format(y_unit))
chart.axislist['yAxis']['axisLabelDistance'] = '-15'
for task in dag.tasks:
if x[task.task_id]:
chart.add_serie(name=task.task_id, x=x[task.task_id],
y=scale_time_units(y[task.task_id], y_unit))
tis = dag.get_task_instances(start_date=min_date, end_date=base_date)
dates = sorted(list({ti.execution_date for ti in tis}))
max_date = max([ti.execution_date for ti in tis]) if dates else None
session.commit()
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
chart.buildcontent()
return self.render_template(
'airflow/chart.html',
dag=dag,
chart=chart.htmlcontent,
height=str(chart_height + 100) + "px",
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
form=form,
tab_title='Landing times',
)
@expose('/paused', methods=['POST'])
@has_dag_access(can_dag_edit=True)
@has_access
@action_logging
def paused(self):
dag_id = request.args.get('dag_id')
is_paused = True if request.args.get('is_paused') == 'false' else False
models.DagModel.get_dagmodel(dag_id).set_is_paused(
is_paused=is_paused)
return "OK"
@expose('/refresh', methods=['POST'])
@has_dag_access(can_dag_edit=True)
@has_access
@action_logging
@provide_session
def refresh(self, session=None):
DagModel = models.DagModel
dag_id = request.values.get('dag_id')
orm_dag = session.query(
DagModel).filter(DagModel.dag_id == dag_id).first()
if orm_dag:
orm_dag.last_expired = timezone.utcnow()
session.merge(orm_dag)
session.commit()
dag = dagbag.get_dag(dag_id)
# sync dag permission
appbuilder.sm.sync_perm_for_dag(dag_id, dag.access_control)
flash("DAG [{}] is now fresh as a daisy".format(dag_id))
return redirect(request.referrer)
@expose('/gantt')
@has_dag_access(can_dag_read=True)
@has_access
@action_logging
@provide_session
def gantt(self, session=None):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
demo_mode = conf.getboolean('webserver', 'demo_mode')
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
dt_nr_dr_data = get_date_time_num_runs_dag_runs_form_data(request, session, dag)
dttm = dt_nr_dr_data['dttm']
form = DateTimeWithNumRunsWithDagRunsForm(data=dt_nr_dr_data)
form.execution_date.choices = dt_nr_dr_data['dr_choices']
tis = [
ti for ti in dag.get_task_instances(dttm, dttm)
if ti.start_date and ti.state]
tis = sorted(tis, key=lambda ti: ti.start_date)
TF = TaskFail
ti_fails = list(itertools.chain(*[(
session
.query(TF)
.filter(TF.dag_id == ti.dag_id,
TF.task_id == ti.task_id,
TF.execution_date == ti.execution_date)
.all()
) for ti in tis]))
# determine bars to show in the gantt chart
gantt_bar_items = []
tasks = []
for ti in tis:
end_date = ti.end_date or timezone.utcnow()
# prev_attempted_tries will reflect the currently running try_number
# or the try_number of the last complete run
# https://issues.apache.org/jira/browse/AIRFLOW-2143
try_count = ti.prev_attempted_tries
gantt_bar_items.append((ti.task_id, ti.start_date, end_date, ti.state, try_count))
d = alchemy_to_dict(ti)
d['extraLinks'] = dag.get_task(ti.task_id).extra_links
tasks.append(d)
tf_count = 0
try_count = 1
prev_task_id = ""
for tf in ti_fails:
end_date = tf.end_date or timezone.utcnow()
start_date = tf.start_date or end_date
if tf_count != 0 and tf.task_id == prev_task_id:
try_count = try_count + 1
else:
try_count = 1
prev_task_id = tf.task_id
gantt_bar_items.append((tf.task_id, start_date, end_date, State.FAILED, try_count))
tf_count = tf_count + 1
task = dag.get_task(tf.task_id)
d = alchemy_to_dict(tf)
d['state'] = State.FAILED
d['operator'] = task.task_type
d['try_number'] = try_count
d['extraLinks'] = task.extra_links
tasks.append(d)
data = {
'taskNames': [ti.task_id for ti in tis],
'tasks': tasks,
'height': len(tis) * 25 + 25,
}
session.commit()
return self.render_template(
'airflow/gantt.html',
dag=dag,
execution_date=dttm.isoformat(),
form=form,
data=data,
base_date='',
demo_mode=demo_mode,
root=root,
)
@expose('/extra_links')
@has_dag_access(can_dag_read=True)
@has_access
@action_logging
def extra_links(self):
"""
A restful endpoint that returns external links for a given Operator
It queries the operator that sent the request for the links it wishes
to provide for a given external link name.
API: GET
Args: dag_id: The id of the dag containing the task in question
task_id: The id of the task in question
execution_date: The date of execution of the task
link_name: The name of the link reference to find the actual URL for
Returns:
200: {url: <url of link>, error: None} - returned when there was no problem
finding the URL
404: {url: None, error: <error message>} - returned when the operator does
not return a URL
"""
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
link_name = request.args.get('link_name')
dttm = timezone.parse(execution_date)
dag = dagbag.get_dag(dag_id)
if not dag or task_id not in dag.task_ids:
response = jsonify(
{'url': None,
'error': "can't find dag {dag} or task_id {task_id}".format(
dag=dag,
task_id=task_id
)}
)
response.status_code = 404
return response
task = dag.get_task(task_id)
try:
url = task.get_extra_links(dttm, link_name)
except ValueError as err:
response = jsonify({'url': None, 'error': str(err)})
response.status_code = 404
return response
if url:
response = jsonify({'error': None, 'url': url})
response.status_code = 200
return response
else:
response = jsonify(
{'url': None, 'error': 'No URL found for {dest}'.format(dest=link_name)})
response.status_code = 404
return response
@expose('/object/task_instances')
@has_dag_access(can_dag_read=True)
@has_access
@action_logging
def task_instances(self):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
dttm = request.args.get('execution_date')
if dttm:
dttm = timezone.parse(dttm)
else:
return "Error: Invalid execution_date"
task_instances = {
ti.task_id: alchemy_to_dict(ti)
for ti in dag.get_task_instances(dttm, dttm)}
return json.dumps(task_instances)
class VersionView(AirflowBaseView):
default_view = 'version'
@expose('/version')
@has_access
def version(self):
try:
airflow_version = airflow.__version__
except Exception as e:
airflow_version = None
logging.error(e)
# Get the Git repo and git hash
git_version = None
try:
git_version = str(pkgutil.get_data('airflow', 'git_version'), encoding="UTF-8")
except Exception as e:
logging.error(e)
# Render information
title = "Version Info"
return self.render_template(
'airflow/version.html',
title=title,
airflow_version=airflow_version,
git_version=git_version)
class ConfigurationView(AirflowBaseView):
default_view = 'conf'
@expose('/configuration')
@has_access
def conf(self):
raw = request.args.get('raw') == "true"
title = "Airflow Configuration"
subtitle = AIRFLOW_CONFIG
# Don't show config when expose_config variable is False in airflow config
if conf.getboolean("webserver", "expose_config"):
with open(AIRFLOW_CONFIG, 'r') as file:
config = file.read()
table = [(section, key, value, source)
for section, parameters in conf.as_dict(True, True).items()
for key, (value, source) in parameters.items()]
else:
config = (
"# Your Airflow administrator chose not to expose the "
"configuration, most likely for security reasons.")
table = None
if raw:
return Response(
response=config,
status=200,
mimetype="application/text")
else:
code_html = Markup(highlight(
config,
lexers.IniLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
return self.render_template(
'airflow/config.html',
pre_subtitle=settings.HEADER + " v" + airflow.__version__,
code_html=code_html, title=title, subtitle=subtitle,
table=table)
######################################################################################
# ModelViews
######################################################################################
class DagFilter(BaseFilter):
def apply(self, query, func): # noqa
if appbuilder.sm.has_all_dags_access():
return query
filter_dag_ids = appbuilder.sm.get_accessible_dag_ids()
return query.filter(self.model.dag_id.in_(filter_dag_ids))
class AirflowModelView(ModelView):
list_widget = AirflowModelListWidget
page_size = PAGE_SIZE
CustomSQLAInterface = wwwutils.CustomSQLAInterface
class SlaMissModelView(AirflowModelView):
route_base = '/slamiss'
datamodel = AirflowModelView.CustomSQLAInterface(SlaMiss)
base_permissions = ['can_list']
list_columns = ['dag_id', 'task_id', 'execution_date', 'email_sent', 'timestamp']
add_columns = ['dag_id', 'task_id', 'execution_date', 'email_sent', 'timestamp']
edit_columns = ['dag_id', 'task_id', 'execution_date', 'email_sent', 'timestamp']
search_columns = ['dag_id', 'task_id', 'email_sent', 'timestamp', 'execution_date']
base_order = ('execution_date', 'desc')
base_filters = [['dag_id', DagFilter, lambda: []]]
formatters_columns = {
'task_id': wwwutils.task_instance_link,
'execution_date': wwwutils.datetime_f('execution_date'),
'timestamp': wwwutils.datetime_f('timestamp'),
'dag_id': wwwutils.dag_link,
}
class XComModelView(AirflowModelView):
route_base = '/xcom'
datamodel = AirflowModelView.CustomSQLAInterface(XCom)
base_permissions = ['can_add', 'can_list', 'can_edit', 'can_delete']
search_columns = ['key', 'value', 'timestamp', 'execution_date', 'task_id', 'dag_id']
list_columns = ['key', 'value', 'timestamp', 'execution_date', 'task_id', 'dag_id']
add_columns = ['key', 'value', 'execution_date', 'task_id', 'dag_id']
edit_columns = ['key', 'value', 'execution_date', 'task_id', 'dag_id']
base_order = ('execution_date', 'desc')
base_filters = [['dag_id', DagFilter, lambda: []]]
formatters_columns = {
'task_id': wwwutils.task_instance_link,
'execution_date': wwwutils.datetime_f('execution_date'),
'timestamp': wwwutils.datetime_f('timestamp'),
'dag_id': wwwutils.dag_link,
}
@action('muldelete', 'Delete', "Are you sure you want to delete selected records?",
single=False)
def action_muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
def pre_add(self, item):
item.execution_date = timezone.make_aware(item.execution_date)
item.value = XCom.serialize_value(item.value)
def pre_update(self, item):
item.execution_date = timezone.make_aware(item.execution_date)
item.value = XCom.serialize_value(item.value)
class ConnectionModelView(AirflowModelView):
route_base = '/connection'
datamodel = AirflowModelView.CustomSQLAInterface(Connection)
base_permissions = ['can_add', 'can_list', 'can_edit', 'can_delete']
extra_fields = ['extra__jdbc__drv_path', 'extra__jdbc__drv_clsname',
'extra__google_cloud_platform__project',
'extra__google_cloud_platform__key_path',
'extra__google_cloud_platform__keyfile_dict',
'extra__google_cloud_platform__scope',
'extra__google_cloud_platform__num_retries',
'extra__grpc__auth_type',
'extra__grpc__credential_pem_file',
'extra__grpc__scopes',
'extra__yandexcloud__service_account_json',
'extra__yandexcloud__service_account_json_path',
'extra__yandexcloud__oauth',
'extra__yandexcloud__public_ssh_key',
'extra__yandexcloud__folder_id',
'extra__kubernetes__in_cluster',
'extra__kubernetes__kube_config',
'extra__kubernetes__namespace']
list_columns = ['conn_id', 'conn_type', 'host', 'port', 'is_encrypted',
'is_extra_encrypted']
add_columns = edit_columns = ['conn_id', 'conn_type', 'host', 'schema',
'login', 'password', 'port', 'extra'] + extra_fields
add_form = edit_form = ConnectionForm
add_template = 'airflow/conn_create.html'
edit_template = 'airflow/conn_edit.html'
base_order = ('conn_id', 'asc')
@action('muldelete', 'Delete', 'Are you sure you want to delete selected records?',
single=False)
@has_dag_access(can_dag_edit=True)
def action_muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
def process_form(self, form, is_created):
formdata = form.data
if formdata['conn_type'] in ['jdbc', 'google_cloud_platform', 'grpc', 'yandexcloud', 'kubernetes']:
extra = {
key: formdata[key]
for key in self.extra_fields if key in formdata}
form.extra.data = json.dumps(extra)
def prefill_form(self, form, pk):
try:
d = json.loads(form.data.get('extra', '{}'))
except Exception:
d = {}
if not hasattr(d, 'get'):
logging.warning('extra field for {} is not iterable'.format(
form.data.get('conn_id', '<unknown>')))
return
for field in self.extra_fields:
value = d.get(field, '')
if value:
field = getattr(form, field)
field.data = value
class PoolModelView(AirflowModelView):
route_base = '/pool'
datamodel = AirflowModelView.CustomSQLAInterface(models.Pool)
base_permissions = ['can_add', 'can_list', 'can_edit', 'can_delete']
list_columns = ['pool', 'slots', 'running_slots', 'queued_slots']
add_columns = ['pool', 'slots', 'description']
edit_columns = ['pool', 'slots', 'description']
base_order = ('pool', 'asc')
@action('muldelete', 'Delete', 'Are you sure you want to delete selected records?',
single=False)
def action_muldelete(self, items):
if any(item.pool == models.Pool.DEFAULT_POOL_NAME for item in items):
flash("default_pool cannot be deleted", 'error')
self.update_redirect()
return redirect(self.get_redirect())
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
def pool_link(attr):
pool_id = attr.get('pool')
if pool_id is not None:
url = url_for('TaskInstanceModelView.list', _flt_3_pool=pool_id)
return Markup("<a href='{url}'>{pool_id}</a>").format(url=url, pool_id=pool_id)
else:
return Markup('<span class="label label-danger">Invalid</span>')
def frunning_slots(attr):
pool_id = attr.get('pool')
running_slots = attr.get('running_slots')
if pool_id is not None and running_slots is not None:
url = url_for('TaskInstanceModelView.list', _flt_3_pool=pool_id, _flt_3_state='running')
return Markup("<a href='{url}'>{running_slots}</a>").format(url=url, running_slots=running_slots)
else:
return Markup('<span class="label label-danger">Invalid</span>')
def fqueued_slots(attr):
pool_id = attr.get('pool')
queued_slots = attr.get('queued_slots')
if pool_id is not None and queued_slots is not None:
url = url_for('TaskInstanceModelView.list', _flt_3_pool=pool_id, _flt_3_state='queued')
return Markup("<a href='{url}'>{queued_slots}</a>").format(url=url, queued_slots=queued_slots)
else:
return Markup('<span class="label label-danger">Invalid</span>')
formatters_columns = {
'pool': pool_link,
'running_slots': frunning_slots,
'queued_slots': fqueued_slots
}
validators_columns = {
'pool': [validators.DataRequired()],
'slots': [validators.NumberRange(min=-1)]
}
class VariableModelView(AirflowModelView):
route_base = '/variable'
list_template = 'airflow/variable_list.html'
edit_template = 'airflow/variable_edit.html'
datamodel = AirflowModelView.CustomSQLAInterface(models.Variable)
base_permissions = ['can_add', 'can_list', 'can_edit', 'can_delete', 'can_varimport']
list_columns = ['key', 'val', 'is_encrypted']
add_columns = ['key', 'val']
edit_columns = ['key', 'val']
search_columns = ['key', 'val']
base_order = ('key', 'asc')
def hidden_field_formatter(attr):
key = attr.get('key')
val = attr.get('val')
if wwwutils.should_hide_value_for_key(key):
return Markup('*' * 8)
if val:
return val
else:
return Markup('<span class="label label-danger">Invalid</span>')
formatters_columns = {
'val': hidden_field_formatter,
}
validators_columns = {
'key': [validators.DataRequired()]
}
def prefill_form(self, form, id):
if wwwutils.should_hide_value_for_key(form.key.data):
form.val.data = '*' * 8
@action('muldelete', 'Delete', 'Are you sure you want to delete selected records?',
single=False)
def action_muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
@action('varexport', 'Export', '', single=False)
def action_varexport(self, items):
var_dict = {}
d = json.JSONDecoder()
for var in items:
try:
val = d.decode(var.val)
except Exception:
val = var.val
var_dict[var.key] = val
response = make_response(json.dumps(var_dict, sort_keys=True, indent=4))
response.headers["Content-Disposition"] = "attachment; filename=variables.json"
response.headers["Content-Type"] = "application/json; charset=utf-8"
return response
@expose('/varimport', methods=["POST"])
@has_access
@action_logging
def varimport(self):
try:
out = request.files['file'].read()
if isinstance(out, bytes):
d = json.loads(out.decode('utf-8'))
else:
d = json.loads(out)
except Exception:
self.update_redirect()
flash("Missing file or syntax error.", 'error')
return redirect(self.get_redirect())
else:
suc_count = fail_count = 0
for k, v in d.items():
try:
models.Variable.set(k, v, serialize_json=not isinstance(v, str))
except Exception as e:
logging.info('Variable import failed: {}'.format(repr(e)))
fail_count += 1
else:
suc_count += 1
flash("{} variable(s) successfully updated.".format(suc_count))
if fail_count:
flash("{} variable(s) failed to be updated.".format(fail_count), 'error')
self.update_redirect()
return redirect(self.get_redirect())
class JobModelView(AirflowModelView):
route_base = '/job'
datamodel = AirflowModelView.CustomSQLAInterface(BaseJob)
base_permissions = ['can_list']
list_columns = ['id', 'dag_id', 'state', 'job_type', 'start_date',
'end_date', 'latest_heartbeat',
'executor_class', 'hostname', 'unixname']
search_columns = ['id', 'dag_id', 'state', 'job_type', 'start_date',
'end_date', 'latest_heartbeat', 'executor_class',
'hostname', 'unixname']
base_order = ('start_date', 'desc')
base_filters = [['dag_id', DagFilter, lambda: []]]
formatters_columns = {
'start_date': wwwutils.datetime_f('start_date'),
'end_date': wwwutils.datetime_f('end_date'),
'hostname': wwwutils.nobr_f('hostname'),
'state': wwwutils.state_f,
'latest_heartbeat': wwwutils.datetime_f('latest_heartbeat'),
}
class DagRunModelView(AirflowModelView):
route_base = '/dagrun'
datamodel = AirflowModelView.CustomSQLAInterface(models.DagRun)
base_permissions = ['can_list', 'can_add']
add_columns = ['state', 'dag_id', 'execution_date', 'run_id', 'external_trigger', 'conf']
list_columns = ['state', 'dag_id', 'execution_date', 'run_id', 'external_trigger']
search_columns = ['state', 'dag_id', 'execution_date', 'run_id', 'external_trigger']
base_order = ('execution_date', 'desc')
base_filters = [['dag_id', DagFilter, lambda: []]]
add_form = edit_form = DagRunForm
formatters_columns = {
'execution_date': wwwutils.datetime_f('execution_date'),
'state': wwwutils.state_f,
'start_date': wwwutils.datetime_f('start_date'),
'dag_id': wwwutils.dag_link,
'run_id': wwwutils.dag_run_link,
}
@action('muldelete', "Delete", "Are you sure you want to delete selected records?",
single=False)
@has_dag_access(can_dag_edit=True)
@provide_session
def action_muldelete(self, items, session=None):
self.datamodel.delete_all(items)
self.update_redirect()
dirty_ids = []
for item in items:
dirty_ids.append(item.dag_id)
return redirect(self.get_redirect())
@action('set_running', "Set state to 'running'", '', single=False)
@provide_session
def action_set_running(self, drs, session=None):
try:
DR = models.DagRun
count = 0
dirty_ids = []
for dr in session.query(DR).filter(
DR.id.in_([dagrun.id for dagrun in drs])).all():
dirty_ids.append(dr.dag_id)
count += 1
dr.start_date = timezone.utcnow()
dr.state = State.RUNNING
session.commit()
flash("{count} dag runs were set to running".format(count=count))
except Exception as ex:
flash(str(ex), 'error')
flash('Failed to set state', 'error')
return redirect(self.get_default_url())
@action('set_failed', "Set state to 'failed'",
"All running task instances would also be marked as failed, are you sure?",
single=False)
@provide_session
def action_set_failed(self, drs, session=None):
try:
DR = models.DagRun
count = 0
dirty_ids = []
altered_tis = []
for dr in session.query(DR).filter(
DR.id.in_([dagrun.id for dagrun in drs])).all():
dirty_ids.append(dr.dag_id)
count += 1
altered_tis += \
set_dag_run_state_to_failed(dagbag.get_dag(dr.dag_id),
dr.execution_date,
commit=True,
session=session)
altered_ti_count = len(altered_tis)
flash(
"{count} dag runs and {altered_ti_count} task instances "
"were set to failed".format(count=count, altered_ti_count=altered_ti_count))
except Exception:
flash('Failed to set state', 'error')
return redirect(self.get_default_url())
@action('set_success', "Set state to 'success'",
"All task instances would also be marked as success, are you sure?",
single=False)
@provide_session
def action_set_success(self, drs, session=None):
try:
DR = models.DagRun
count = 0
dirty_ids = []
altered_tis = []
for dr in session.query(DR).filter(
DR.id.in_([dagrun.id for dagrun in drs])).all():
dirty_ids.append(dr.dag_id)
count += 1
altered_tis += \
set_dag_run_state_to_success(dagbag.get_dag(dr.dag_id),
dr.execution_date,
commit=True,
session=session)
altered_ti_count = len(altered_tis)
flash(
"{count} dag runs and {altered_ti_count} task instances "
"were set to success".format(count=count, altered_ti_count=altered_ti_count))
except Exception:
flash('Failed to set state', 'error')
return redirect(self.get_default_url())
class LogModelView(AirflowModelView):
route_base = '/log'
datamodel = AirflowModelView.CustomSQLAInterface(Log)
base_permissions = ['can_list']
list_columns = ['id', 'dttm', 'dag_id', 'task_id', 'event', 'execution_date',
'owner', 'extra']
search_columns = ['dag_id', 'task_id', 'event', 'execution_date', 'owner', 'extra']
base_order = ('dttm', 'desc')
base_filters = [['dag_id', DagFilter, lambda: []]]
formatters_columns = {
'dttm': wwwutils.datetime_f('dttm'),
'execution_date': wwwutils.datetime_f('execution_date'),
'dag_id': wwwutils.dag_link,
}
class TaskInstanceModelView(AirflowModelView):
route_base = '/taskinstance'
datamodel = AirflowModelView.CustomSQLAInterface(models.TaskInstance)
base_permissions = ['can_list']
page_size = PAGE_SIZE
list_columns = ['state', 'dag_id', 'task_id', 'execution_date', 'operator',
'start_date', 'end_date', 'duration', 'job_id', 'hostname',
'unixname', 'priority_weight', 'queue', 'queued_dttm', 'try_number',
'pool', 'log_url']
order_columns = [item for item in list_columns if item not in ['try_number', 'log_url']]
search_columns = ['state', 'dag_id', 'task_id', 'execution_date', 'hostname',
'queue', 'pool', 'operator', 'start_date', 'end_date']
base_order = ('job_id', 'asc')
base_filters = [['dag_id', DagFilter, lambda: []]]
def log_url_formatter(attr):
log_url = attr.get('log_url')
return Markup(
'<a href="{log_url}">'
' <span class="glyphicon glyphicon-book" aria-hidden="true">'
'</span></a>').format(log_url=log_url)
def duration_f(attr):
end_date = attr.get('end_date')
duration = attr.get('duration')
if end_date and duration:
return timedelta(seconds=duration)
formatters_columns = {
'log_url': log_url_formatter,
'task_id': wwwutils.task_instance_link,
'hostname': wwwutils.nobr_f('hostname'),
'state': wwwutils.state_f,
'execution_date': wwwutils.datetime_f('execution_date'),
'start_date': wwwutils.datetime_f('start_date'),
'end_date': wwwutils.datetime_f('end_date'),
'queued_dttm': wwwutils.datetime_f('queued_dttm'),
'dag_id': wwwutils.dag_link,
'duration': duration_f,
}
@provide_session
@action('clear', lazy_gettext('Clear'),
lazy_gettext('Are you sure you want to clear the state of the selected task'
' instance(s) and set their dagruns to the running state?'),
single=False)
def action_clear(self, tis, session=None):
try:
dag_to_tis = {}
for ti in tis:
dag = dagbag.get_dag(ti.dag_id)
tis = dag_to_tis.setdefault(dag, [])
tis.append(ti)
for dag, tis in dag_to_tis.items():
models.clear_task_instances(tis, session, dag=dag)
session.commit()
flash("{0} task instances have been cleared".format(len(tis)))
self.update_redirect()
return redirect(self.get_redirect())
except Exception:
flash('Failed to clear task instances', 'error')
@provide_session
def set_task_instance_state(self, tis, target_state, session=None):
try:
count = len(tis)
for ti in tis:
ti.set_state(target_state, session)
session.commit()
flash("{count} task instances were set to '{target_state}'".format(
count=count, target_state=target_state))
except Exception:
flash('Failed to set state', 'error')
@action('set_running', "Set state to 'running'", '', single=False)
@has_dag_access(can_dag_edit=True)
def action_set_running(self, tis):
self.set_task_instance_state(tis, State.RUNNING)
self.update_redirect()
return redirect(self.get_redirect())
@action('set_failed', "Set state to 'failed'", '', single=False)
@has_dag_access(can_dag_edit=True)
def action_set_failed(self, tis):
self.set_task_instance_state(tis, State.FAILED)
self.update_redirect()
return redirect(self.get_redirect())
@action('set_success', "Set state to 'success'", '', single=False)
@has_dag_access(can_dag_edit=True)
def action_set_success(self, tis):
self.set_task_instance_state(tis, State.SUCCESS)
self.update_redirect()
return redirect(self.get_redirect())
@action('set_retry', "Set state to 'up_for_retry'", '', single=False)
@has_dag_access(can_dag_edit=True)
def action_set_retry(self, tis):
self.set_task_instance_state(tis, State.UP_FOR_RETRY)
self.update_redirect()
return redirect(self.get_redirect())
class DagModelView(AirflowModelView):
route_base = '/dagmodel'
datamodel = AirflowModelView.CustomSQLAInterface(models.DagModel)
base_permissions = ['can_list', 'can_show']
list_columns = ['dag_id', 'is_paused', 'last_scheduler_run',
'last_expired', 'scheduler_lock', 'fileloc', 'owners']
formatters_columns = {
'dag_id': wwwutils.dag_link
}
base_filters = [['dag_id', DagFilter, lambda: []]]
def get_query(self):
"""
Default filters for model
"""
return (
super().get_query()
.filter(or_(models.DagModel.is_active,
models.DagModel.is_paused))
.filter(~models.DagModel.is_subdag)
)
def get_count_query(self):
"""
Default filters for model
"""
return (
super().get_count_query()
.filter(models.DagModel.is_active)
.filter(~models.DagModel.is_subdag)
)
@has_access
@permission_name("list")
@provide_session
@expose('/autocomplete')
def autocomplete(self, session=None):
query = unquote(request.args.get('query', ''))
if not query:
wwwutils.json_response([])
# Provide suggestions of dag_ids and owners
dag_ids_query = session.query(DagModel.dag_id.label('item')).filter(
~DagModel.is_subdag, DagModel.is_active,
DagModel.dag_id.ilike('%' + query + '%'))
owners_query = session.query(func.distinct(DagModel.owners).label('item')).filter(
~DagModel.is_subdag, DagModel.is_active,
DagModel.owners.ilike('%' + query + '%'))
# Hide DAGs if not showing status: "all"
status = flask_session.get(FILTER_STATUS_COOKIE)
if status == 'active':
dag_ids_query = dag_ids_query.filter(~DagModel.is_paused)
owners_query = owners_query.filter(~DagModel.is_paused)
elif status == 'paused':
dag_ids_query = dag_ids_query.filter(DagModel.is_paused)
owners_query = owners_query.filter(DagModel.is_paused)
filter_dag_ids = appbuilder.sm.get_accessible_dag_ids()
if 'all_dags' not in filter_dag_ids:
dag_ids_query = dag_ids_query.filter(DagModel.dag_id.in_(filter_dag_ids))
owners_query = owners_query.filter(DagModel.dag_id.in_(filter_dag_ids))
payload = [row[0] for row in dag_ids_query.union(owners_query).limit(10).all()]
return wwwutils.json_response(payload)
|
py | 1a2ec7c86582405243210f553e885066a4c90da0 | # -*- coding: utf-8 -*-
# Copyright (c) 2010-2017 Tuukka Turto
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Module defining classes related to inventory actions
"""
from pyherc.data import is_armour, is_weapon, is_boots
from pyherc.aspects import log_debug, log_info
from pyherc.events import new_unequip_event
from pyherc.rules.factory import SubActionFactory
class UnEquipFactory(SubActionFactory):
"""
Factory for creating unequip actions
.. versionadded:: 0.8
"""
@log_debug
def __init__(self):
"""
Constructor for this factory
"""
super().__init__()
self.sub_action = 'unequip'
@log_debug
def can_handle(self, parameters):
"""
Can this factory process these parameters
:param parameters: parameters to check
:returns: True if factory is capable of handling parameters
:rtype: Boolean
"""
return self.sub_action == parameters.sub_action
@log_info
def get_action(self, parameters):
"""
Create an unequip action
:param parameters: parameters used to control creation
:type parameters: InventoryParameters
"""
return UnEquipAction(parameters.character, parameters.item)
class UnEquipAction():
"""
Action for unequiping an item
.. versionadded:: 0.8
"""
@log_debug
def __init__(self, character, item):
"""
Default constructor
:param character: character wearing the item
:type character: Character
:param item: item to unequip
:type item: Item
"""
super().__init__()
self.character = character
self.item = item
@log_info
def execute(self):
"""
Executes this action
"""
if is_armour(self.item):
self.character.inventory.armour = None
self.character.raise_event(new_unequip_event(self.character,
self.item))
if is_weapon(self.item):
self.character.inventory.weapon = None
self.character.raise_event(new_unequip_event(self.character,
self.item))
if is_boots(self.item):
self.character.inventory.boots = None
self.character.raise_event(new_unequip_event(self.character,
self.item))
@log_debug
def is_legal(self):
"""
Check if the action is possible to perform
:returns: True if move is possible, false otherwise
:rtype: Boolean
"""
return True
|
py | 1a2ecae7b94c0b648c258f3ae55cf4ff0371822d | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from pytorch_lightning import Callback, Trainer
from tests.helpers.boring_model import BoringModel
@pytest.mark.parametrize("single_cb", [False, True])
def test_train_step_no_return(tmpdir, single_cb: bool):
"""
Tests that only training_step can be used
"""
class CB(Callback):
def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):
assert "loss" in outputs
def on_validation_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):
assert "x" in outputs
def on_test_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):
assert "x" in outputs
class TestModel(BoringModel):
def on_train_batch_end(self, outputs, batch, batch_idx: int, dataloader_idx: int) -> None:
assert "loss" in outputs
def on_validation_batch_end(self, outputs, batch, batch_idx: int, dataloader_idx: int) -> None:
assert "x" in outputs
def on_test_batch_end(self, outputs, batch, batch_idx: int, dataloader_idx: int) -> None:
assert "x" in outputs
def training_epoch_end(self, outputs) -> None:
assert len(outputs) == self.trainer.num_training_batches
model = TestModel()
trainer = Trainer(
callbacks=CB() if single_cb else [CB()],
default_root_dir=tmpdir,
limit_train_batches=2,
limit_val_batches=2,
max_epochs=1,
log_every_n_steps=1,
weights_summary=None,
)
assert any(isinstance(c, CB) for c in trainer.callbacks)
trainer.fit(model)
def test_free_memory_on_eval_outputs(tmpdir):
class CB(Callback):
def on_epoch_end(self, trainer, pl_module):
assert len(trainer._evaluation_loop.outputs) == 0
model = BoringModel()
trainer = Trainer(
callbacks=CB(),
default_root_dir=tmpdir,
limit_train_batches=2,
limit_val_batches=2,
max_epochs=1,
weights_summary=None,
)
trainer.fit(model)
|
py | 1a2ecb255b4155ad265b3f3067fad5454f01a1b3 | # pylint: disable=W0611
# coding: utf-8
'''
Window
======
Core class for creating the default Kivy window. Kivy supports only one window
per application: please don't try to create more than one.
'''
__all__ = ('Keyboard', 'WindowBase', 'Window')
from os.path import join, exists
from os import getcwd
from kivy.core import core_select_lib
from kivy.clock import Clock
from kivy.config import Config
from kivy.logger import Logger
from kivy.base import EventLoop, stopTouchApp
from kivy.modules import Modules
from kivy.event import EventDispatcher
from kivy.properties import ListProperty, ObjectProperty, AliasProperty, \
NumericProperty, OptionProperty, StringProperty, BooleanProperty
from kivy.utils import platform, reify
from kivy.context import get_current_context
from kivy.uix.behaviors import FocusBehavior
from kivy.setupconfig import USE_SDL2
from kivy.graphics.transformation import Matrix
# late import
VKeyboard = None
android = None
class Keyboard(EventDispatcher):
'''Keyboard interface that is returned by
:meth:`WindowBase.request_keyboard`. When you request a keyboard,
you'll get an instance of this class. Whatever the keyboard input is
(system or virtual keyboard), you'll receive events through this
instance.
:Events:
`on_key_down`: keycode, text, modifiers
Fired when a new key is pressed down
`on_key_up`: keycode
Fired when a key is released (up)
Here is an example of how to request a Keyboard in accordance with the
current configuration:
.. include:: ../../examples/widgets/keyboardlistener.py
:literal:
'''
# Keycodes mapping, between str <-> int. These keycodes are
# currently taken from pygame.key. But when a new provider will be
# used, it must do the translation to these keycodes too.
keycodes = {
# specials keys
'backspace': 8, 'tab': 9, 'enter': 13, 'rshift': 303, 'shift': 304,
'alt': 308, 'rctrl': 306, 'lctrl': 305,
'super': 309, 'alt-gr': 307, 'compose': 311, 'pipe': 310,
'capslock': 301, 'escape': 27, 'spacebar': 32, 'pageup': 280,
'pagedown': 281, 'end': 279, 'home': 278, 'left': 276, 'up':
273, 'right': 275, 'down': 274, 'insert': 277, 'delete': 127,
'numlock': 300, 'print': 144, 'screenlock': 145, 'pause': 19,
# a-z keys
'a': 97, 'b': 98, 'c': 99, 'd': 100, 'e': 101, 'f': 102, 'g': 103,
'h': 104, 'i': 105, 'j': 106, 'k': 107, 'l': 108, 'm': 109, 'n': 110,
'o': 111, 'p': 112, 'q': 113, 'r': 114, 's': 115, 't': 116, 'u': 117,
'v': 118, 'w': 119, 'x': 120, 'y': 121, 'z': 122,
# 0-9 keys
'0': 48, '1': 49, '2': 50, '3': 51, '4': 52,
'5': 53, '6': 54, '7': 55, '8': 56, '9': 57,
# numpad
'numpad0': 256, 'numpad1': 257, 'numpad2': 258, 'numpad3': 259,
'numpad4': 260, 'numpad5': 261, 'numpad6': 262, 'numpad7': 263,
'numpad8': 264, 'numpad9': 265, 'numpaddecimal': 266,
'numpaddivide': 267, 'numpadmul': 268, 'numpadsubstract': 269,
'numpadadd': 270, 'numpadenter': 271,
# F1-15
'f1': 282, 'f2': 283, 'f3': 284, 'f4': 285, 'f5': 286, 'f6': 287,
'f7': 288, 'f8': 289, 'f9': 290, 'f10': 291, 'f11': 292, 'f12': 293,
'f13': 294, 'f14': 295, 'f15': 296,
# other keys
'(': 40, ')': 41,
'[': 91, ']': 93,
'{': 123, '}': 125,
':': 59, ';': 59,
'=': 61, '+': 43,
'-': 45, '_': 95,
'/': 47, '*': 42,
'?': 47,
'`': 96, '~': 126,
'´': 180, '¦': 166,
'\\': 92, '|': 124,
'"': 34, "'": 39,
',': 44, '.': 46,
'<': 60, '>': 62,
'@': 64, '!': 33,
'#': 35, '$': 36,
'%': 37, '^': 94,
'&': 38, '¬': 172,
'¨': 168, '…': 8230,
'ù': 249, 'à': 224,
'é': 233, 'è': 232,
}
__events__ = ('on_key_down', 'on_key_up', 'on_textinput')
def __init__(self, **kwargs):
super(Keyboard, self).__init__()
#: Window which the keyboard is attached too
self.window = kwargs.get('window', None)
#: Callback that will be called when the keyboard is released
self.callback = kwargs.get('callback', None)
#: Target that have requested the keyboard
self.target = kwargs.get('target', None)
#: VKeyboard widget, if allowed by the configuration
self.widget = kwargs.get('widget', None)
def on_key_down(self, keycode, text, modifiers):
pass
def on_key_up(self, keycode):
pass
def on_textinput(self, text):
pass
def release(self):
'''Call this method to release the current keyboard.
This will ensure that the keyboard is no longer attached to your
callback.'''
if self.window:
self.window.release_keyboard(self.target)
def _on_window_textinput(self, instance, text):
return self.dispatch('on_textinput', text)
def _on_window_key_down(self, instance, keycode, scancode, text,
modifiers):
keycode = (keycode, self.keycode_to_string(keycode))
if text == '\x04':
Window.trigger_keyboard_height()
return
return self.dispatch('on_key_down', keycode, text, modifiers)
def _on_window_key_up(self, instance, keycode, *largs):
keycode = (keycode, self.keycode_to_string(keycode))
return self.dispatch('on_key_up', keycode)
def _on_vkeyboard_key_down(self, instance, keycode, text, modifiers):
if keycode is None:
keycode = text.lower()
keycode = (self.string_to_keycode(keycode), keycode)
return self.dispatch('on_key_down', keycode, text, modifiers)
def _on_vkeyboard_key_up(self, instance, keycode, text, modifiers):
if keycode is None:
keycode = text
keycode = (self.string_to_keycode(keycode), keycode)
return self.dispatch('on_key_up', keycode)
def _on_vkeyboard_textinput(self, instance, text):
return self.dispatch('on_textinput', text)
def string_to_keycode(self, value):
'''Convert a string to a keycode number according to the
:attr:`Keyboard.keycodes`. If the value is not found in the
keycodes, it will return -1.
'''
return Keyboard.keycodes.get(value, -1)
def keycode_to_string(self, value):
'''Convert a keycode number to a string according to the
:attr:`Keyboard.keycodes`. If the value is not found in the
keycodes, it will return ''.
'''
keycodes = list(Keyboard.keycodes.values())
if value in keycodes:
return list(Keyboard.keycodes.keys())[keycodes.index(value)]
return ''
class WindowBase(EventDispatcher):
'''WindowBase is an abstract window widget for any window implementation.
:Parameters:
`borderless`: str, one of ('0', '1')
Set the window border state. Check the
:mod:`~kivy.config` documentation for a
more detailed explanation on the values.
`fullscreen`: str, one of ('0', '1', 'auto', 'fake')
Make the window fullscreen. Check the
:mod:`~kivy.config` documentation for a
more detailed explanation on the values.
`width`: int
Width of the window.
`height`: int
Height of the window.
:Events:
`on_motion`: etype, motionevent
Fired when a new :class:`~kivy.input.motionevent.MotionEvent` is
dispatched
`on_touch_down`:
Fired when a new touch event is initiated.
`on_touch_move`:
Fired when an existing touch event changes location.
`on_touch_up`:
Fired when an existing touch event is terminated.
`on_draw`:
Fired when the :class:`Window` is being drawn.
`on_flip`:
Fired when the :class:`Window` GL surface is being flipped.
`on_rotate`: rotation
Fired when the :class:`Window` is being rotated.
`on_close`:
Fired when the :class:`Window` is closed.
`on_request_close`:
Fired when the event loop wants to close the window, or if the
escape key is pressed and `exit_on_escape` is `True`. If a function
bound to this event returns `True`, the window will not be closed.
If the the event is triggered because of the keyboard escape key,
the keyword argument `source` is dispatched along with a value of
`keyboard` to the bound functions.
.. versionadded:: 1.9.0
`on_keyboard`: key, scancode, codepoint, modifier
Fired when the keyboard is used for input.
.. versionchanged:: 1.3.0
The *unicode* parameter has been deprecated in favor of
codepoint, and will be removed completely in future versions.
`on_key_down`: key, scancode, codepoint
Fired when a key pressed.
.. versionchanged:: 1.3.0
The *unicode* parameter has been deprecated in favor of
codepoint, and will be removed completely in future versions.
`on_key_up`: key, scancode, codepoint
Fired when a key is released.
.. versionchanged:: 1.3.0
The *unicode* parameter has be deprecated in favor of
codepoint, and will be removed completely in future versions.
`on_dropfile`: str
Fired when a file is dropped on the application.
'''
__instance = None
__initialized = False
_fake_fullscreen = False
_density = 1
# private properties
_size = ListProperty([0, 0])
_modifiers = ListProperty([])
_rotation = NumericProperty(0)
_clearcolor = ObjectProperty([0, 0, 0, 1])
children = ListProperty([])
'''List of the children of this window.
:attr:`children` is a :class:`~kivy.properties.ListProperty` instance and
defaults to an empty list.
Use :meth:`add_widget` and :meth:`remove_widget` to manipulate the list of
children. Don't manipulate the list directly unless you know what you are
doing.
'''
parent = ObjectProperty(None, allownone=True)
'''Parent of this window.
:attr:`parent` is a :class:`~kivy.properties.ObjectProperty` instance and
defaults to None. When created, the parent is set to the window itself.
You must take care of it if you are doing a recursive check.
'''
icon = StringProperty()
def _get_modifiers(self):
return self._modifiers
modifiers = AliasProperty(_get_modifiers, None)
'''List of keyboard modifiers currently active.
'''
def _get_size(self):
r = self._rotation
w, h = self._size
if self._density != 1:
w, h = self._win._get_gl_size()
if self.softinput_mode == 'resize':
h -= self.keyboard_height
if r in (0, 180):
return w, h
return h, w
def _set_size(self, size):
if self._size != size:
r = self._rotation
if r in (0, 180):
self._size = size
else:
self._size = size[1], size[0]
self.dispatch('on_resize', *size)
return True
else:
return False
size = AliasProperty(_get_size, _set_size, bind=('_size', ))
'''Get the rotated size of the window. If :attr:`rotation` is set, then the
size will change to reflect the rotation.
'''
def _get_clearcolor(self):
return self._clearcolor
def _set_clearcolor(self, value):
if value is not None:
if type(value) not in (list, tuple):
raise Exception('Clearcolor must be a list or tuple')
if len(value) != 4:
raise Exception('Clearcolor must contain 4 values')
self._clearcolor = value
clearcolor = AliasProperty(_get_clearcolor, _set_clearcolor,
bind=('_clearcolor', ))
'''Color used to clear the window.
::
from kivy.core.window import Window
# red background color
Window.clearcolor = (1, 0, 0, 1)
# don't clear background at all
Window.clearcolor = None
.. versionchanged:: 1.7.2
The clearcolor default value is now: (0, 0, 0, 1).
'''
# make some property read-only
def _get_width(self):
_size = self._size
if self._density != 1:
_size = self._win._get_gl_size()
r = self._rotation
if r == 0 or r == 180:
return _size[0]
return _size[1]
width = AliasProperty(_get_width, None, bind=('_rotation', '_size'))
'''Rotated window width.
:attr:`width` is a read-only :class:`~kivy.properties.AliasProperty`.
'''
def _get_height(self):
'''Rotated window height'''
r = self._rotation
_size = self._size
if self._density != 1:
_size = self._win._get_gl_size()
kb = self.keyboard_height if self.softinput_mode == 'resize' else 0
if r == 0 or r == 180:
return _size[1] - kb
return _size[0] - kb
height = AliasProperty(_get_height, None, bind=('_rotation', '_size'))
'''Rotated window height.
:attr:`height` is a read-only :class:`~kivy.properties.AliasProperty`.
'''
def _get_center(self):
return self.width / 2., self.height / 2.
center = AliasProperty(_get_center, None, bind=('width', 'height'))
'''Center of the rotated window.
:attr:`center` is a :class:`~kivy.properties.AliasProperty`.
'''
def _get_rotation(self):
return self._rotation
def _set_rotation(self, x):
x = int(x % 360)
if x == self._rotation:
return
if x not in (0, 90, 180, 270):
raise ValueError('can rotate only 0, 90, 180, 270 degrees')
self._rotation = x
if self.initialized is False:
return
self.dispatch('on_resize', *self.size)
self.dispatch('on_rotate', x)
rotation = AliasProperty(_get_rotation, _set_rotation,
bind=('_rotation', ))
'''Get/set the window content rotation. Can be one of 0, 90, 180, 270
degrees.
'''
softinput_mode = OptionProperty('', options=('', 'pan', 'scale', 'resize'))
'''This specifies the behavior of window contents on display of soft
keyboard on mobile platform. Can be one of '', 'pan', 'scale', 'resize'.
When '' The main window is left as it is allowing the user to use
:attr:`keyboard_height` to manage the window contents the way they want.
when 'pan' The main window pans moving the bottom part of the window to be
always on top of the keyboard.
when 'resize' The window is resized and the contents scaled to fit the
remaining space.
.. versionadded:: 1.9.0
:attr:`softinput_mode` is a :class:`OptionProperty` defaults to None.
'''
_keyboard_changed = BooleanProperty(False)
def _upd_kbd_height(self, *kargs):
self._keyboard_changed = not self._keyboard_changed
def _get_ios_kheight(self):
return 0
def _get_android_kheight(self):
global android
if not android:
import android
return android.get_keyboard_height()
def _get_kheight(self):
if platform == 'android':
return self._get_android_kheight()
if platform == 'ios':
return self._get_ios_kheight()
return 0
keyboard_height = AliasProperty(_get_kheight, None,
bind=('_keyboard_changed',))
'''Rerturns the height of the softkeyboard/IME on mobile platforms.
Will return 0 if not on mobile platform or if IME is not active.
.. versionadded:: 1.9.0
:attr:`keyboard_height` is a read-only :class:`AliasProperty` defaults to 0.
'''
def _set_system_size(self, size):
self._size = size
def _get_system_size(self):
if self.softinput_mode == 'resize':
return self._size[0], self._size[1] - self.keyboard_height
return self._size
system_size = AliasProperty(
_get_system_size,
_set_system_size,
bind=('_size', ))
'''Real size of the window ignoring rotation.
'''
borderless = BooleanProperty(False)
'''When set to True, this property removes the window border/decoration.
.. versionadded:: 1.9.0
:attr:`borderless` is a :class:`BooleanProperty`, defaults to False.
'''
fullscreen = OptionProperty(False, options=(True, False, 'auto', 'fake'))
'''This property sets the fullscreen mode of the window. Available options
are: True, False, 'auto', 'fake'. Check the :mod:`~kivy.config`
documentation for a more detailed explanation on the values.
.. versionadded:: 1.2.0
.. note::
The 'fake' option has been deprecated, use the :attr:`borderless`
property instead.
'''
mouse_pos = ObjectProperty([0, 0])
'''2d position of the mouse within the window.
.. versionadded:: 1.2.0
'''
@property
def __self__(self):
return self
top = NumericProperty(None, allownone=True)
left = NumericProperty(None, allownone=True)
position = OptionProperty('auto', options=['auto', 'custom'])
render_context = ObjectProperty(None)
canvas = ObjectProperty(None)
title = StringProperty('Kivy')
__events__ = (
'on_draw', 'on_flip', 'on_rotate', 'on_resize', 'on_close',
'on_motion', 'on_touch_down', 'on_touch_move', 'on_touch_up',
'on_mouse_down', 'on_mouse_move', 'on_mouse_up', 'on_keyboard',
'on_key_down', 'on_key_up', 'on_textinput', 'on_dropfile',
'on_request_close', 'on_joy_axis', 'on_joy_hat', 'on_joy_ball',
'on_joy_button_down', "on_joy_button_up")
def __new__(cls, **kwargs):
if cls.__instance is None:
cls.__instance = EventDispatcher.__new__(cls)
return cls.__instance
def __init__(self, **kwargs):
force = kwargs.pop('force', False)
# don't init window 2 times,
# except if force is specified
if WindowBase.__instance is not None and not force:
return
self.initialized = False
self._is_desktop = Config.getboolean('kivy', 'desktop')
# create a trigger for update/create the window when one of window
# property changes
self.trigger_create_window = Clock.create_trigger(
self.create_window, -1)
# Create a trigger for updating the keyboard height
self.trigger_keyboard_height = Clock.create_trigger(
self._upd_kbd_height, .5)
# set the default window parameter according to the configuration
if 'borderless' not in kwargs:
kwargs['borderless'] = Config.getboolean('graphics', 'borderless')
if 'fullscreen' not in kwargs:
fullscreen = Config.get('graphics', 'fullscreen')
if fullscreen not in ('auto', 'fake'):
fullscreen = fullscreen.lower() in ('true', '1', 'yes', 'yup')
kwargs['fullscreen'] = fullscreen
if 'width' not in kwargs:
kwargs['width'] = Config.getint('graphics', 'width')
if 'height' not in kwargs:
kwargs['height'] = Config.getint('graphics', 'height')
if 'rotation' not in kwargs:
kwargs['rotation'] = Config.getint('graphics', 'rotation')
if 'position' not in kwargs:
kwargs['position'] = Config.getdefault('graphics', 'position',
'auto')
if 'top' in kwargs:
kwargs['position'] = 'custom'
kwargs['top'] = kwargs['top']
else:
kwargs['top'] = Config.getint('graphics', 'top')
if 'left' in kwargs:
kwargs['position'] = 'custom'
kwargs['left'] = kwargs['left']
else:
kwargs['left'] = Config.getint('graphics', 'left')
kwargs['_size'] = (kwargs.pop('width'), kwargs.pop('height'))
super(WindowBase, self).__init__(**kwargs)
# bind all the properties that need to recreate the window
self._bind_create_window()
self.bind(size=self.trigger_keyboard_height,
rotation=self.trigger_keyboard_height)
self.bind(softinput_mode=lambda *dt: self.update_viewport(),
keyboard_height=lambda *dt: self.update_viewport())
# init privates
self._system_keyboard = Keyboard(window=self)
self._keyboards = {'system': self._system_keyboard}
self._vkeyboard_cls = None
self.children = []
self.parent = self
# before creating the window
import kivy.core.gl # NOQA
# configure the window
self.create_window()
# attach modules + listener event
EventLoop.set_window(self)
Modules.register_window(self)
EventLoop.add_event_listener(self)
# manage keyboard(s)
self.configure_keyboards()
# assign the default context of the widget creation
if not hasattr(self, '_context'):
self._context = get_current_context()
# mark as initialized
self.initialized = True
def _bind_create_window(self):
for prop in (
'fullscreen', 'borderless', 'position', 'top',
'left', '_size', 'system_size'):
self.bind(**{prop: self.trigger_create_window})
def _unbind_create_window(self):
for prop in (
'fullscreen', 'borderless', 'position', 'top',
'left', '_size', 'system_size'):
self.unbind(**{prop: self.trigger_create_window})
def toggle_fullscreen(self):
'''Toggle between fullscreen and windowed mode.
.. deprecated:: 1.9.0
Use :attr:`fullscreen` instead.
'''
pass
def maximize(self):
'''Maximizes the window. This method should be used on desktop
platforms only.
.. versionadded:: 1.9.0
.. note::
This feature requires a SDL2 window provider and is currently only
supported on desktop platforms.
.. warning::
This code is still experimental, and its API may be subject to
change in a future version.
'''
Logger.warning('Window: maximize() is not implemented in the current '
'window provider.')
def minimize(self):
'''Minimizes the window. This method should be used on desktop
platforms only.
.. versionadded:: 1.9.0
.. note::
This feature requires a SDL2 window provider and is currently only
supported on desktop platforms.
.. warning::
This code is still experimental, and its API may be subject to
change in a future version.
'''
Logger.warning('Window: minimize() is not implemented in the current '
'window provider.')
def restore(self):
'''Restores the size and position of a maximized or minimized window.
This method should be used on desktop platforms only.
.. versionadded:: 1.9.0
.. note::
This feature requires a SDL2 window provider and is currently only
supported on desktop platforms.
.. warning::
This code is still experimental, and its API may be subject to
change in a future version.
'''
Logger.warning('Window: restore() is not implemented in the current '
'window provider.')
def hide(self):
'''Hides the window. This method should be used on desktop
platforms only.
.. versionadded:: 1.9.0
.. note::
This feature requires a SDL2 window provider and is currently only
supported on desktop platforms.
.. warning::
This code is still experimental, and its API may be subject to
change in a future version.
'''
Logger.warning('Window: hide() is not implemented in the current '
'window provider.')
def show(self):
'''Shows the window. This method should be used on desktop
platforms only.
.. versionadded:: 1.9.0
.. note::
This feature requires a SDL2 window provider and is currently only
supported on desktop platforms.
.. warning::
This code is still experimental, and its API may be subject to
change in a future version.
'''
Logger.warning('Window: show() is not implemented in the current '
'window provider.')
def close(self):
'''Close the window'''
pass
def create_window(self, *largs):
'''Will create the main window and configure it.
.. warning::
This method is called automatically at runtime. If you call it, it
will recreate a RenderContext and Canvas. This means you'll have a
new graphics tree, and the old one will be unusable.
This method exist to permit the creation of a new OpenGL context
AFTER closing the first one. (Like using runTouchApp() and
stopTouchApp()).
This method has only been tested in a unittest environment and
is not suitable for Applications.
Again, don't use this method unless you know exactly what you are
doing!
'''
# just to be sure, if the trigger is set, and if this method is
# manually called, unset the trigger
Clock.unschedule(self.create_window)
# ensure the window creation will not be called twice
if platform in ('android', 'ios'):
self._unbind_create_window()
if not self.initialized:
from kivy.core.gl import init_gl
init_gl()
# create the render context and canvas, only the first time.
from kivy.graphics import RenderContext, Canvas
self.render_context = RenderContext()
self.canvas = Canvas()
self.render_context.add(self.canvas)
else:
# if we get initialized more than once, then reload opengl state
# after the second time.
# XXX check how it's working on embed platform.
if platform == 'linux' or Window.__class__.__name__ == 'WindowSDL':
# on linux, it's safe for just sending a resize.
self.dispatch('on_resize', *self.system_size)
else:
# on other platform, window are recreated, we need to reload.
from kivy.graphics.context import get_context
get_context().reload()
Clock.schedule_once(lambda x: self.canvas.ask_update(), 0)
self.dispatch('on_resize', *self.system_size)
# ensure the gl viewport is correct
self.update_viewport()
def on_flip(self):
'''Flip between buffers (event)'''
self.flip()
def flip(self):
'''Flip between buffers'''
pass
def _update_childsize(self, instance, value):
self.update_childsize([instance])
def add_widget(self, widget, canvas=None):
'''Add a widget to a window'''
widget.parent = self
self.children.insert(0, widget)
canvas = self.canvas.before if canvas == 'before' else \
self.canvas.after if canvas == 'after' else self.canvas
canvas.add(widget.canvas)
self.update_childsize([widget])
widget.bind(
pos_hint=self._update_childsize,
size_hint=self._update_childsize,
size=self._update_childsize,
pos=self._update_childsize)
def remove_widget(self, widget):
'''Remove a widget from a window
'''
if not widget in self.children:
return
self.children.remove(widget)
if widget.canvas in self.canvas.children:
self.canvas.remove(widget.canvas)
elif widget.canvas in self.canvas.after.children:
self.canvas.after.remove(widget.canvas)
elif widget.canvas in self.canvas.before.children:
self.canvas.before.remove(widget.canvas)
widget.parent = None
widget.unbind(
pos_hint=self._update_childsize,
size_hint=self._update_childsize,
size=self._update_childsize,
pos=self._update_childsize)
def clear(self):
'''Clear the window with the background color'''
# XXX FIXME use late binding
from kivy.graphics.opengl import glClearColor, glClear, \
GL_COLOR_BUFFER_BIT, GL_DEPTH_BUFFER_BIT, GL_STENCIL_BUFFER_BIT
cc = self._clearcolor
if cc is not None:
glClearColor(*cc)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT |
GL_STENCIL_BUFFER_BIT)
def set_title(self, title):
'''Set the window title.
.. versionadded:: 1.0.5
'''
self.title = title
def set_icon(self, filename):
'''Set the icon of the window.
.. versionadded:: 1.0.5
'''
self.icon = filename
def to_widget(self, x, y, initial=True, relative=False):
return (x, y)
def to_window(self, x, y, initial=True, relative=False):
return (x, y)
def _apply_transform(self, m):
return m
def get_window_matrix(self, x=0, y=0):
m = Matrix()
m.translate(x, y, 0)
return m
def get_root_window(self):
return self
def get_parent_window(self):
return self
def get_parent_layout(self):
return None
def on_draw(self):
self.clear()
self.render_context.draw()
def on_motion(self, etype, me):
'''Event called when a Motion Event is received.
:Parameters:
`etype`: str
One of 'begin', 'update', 'end'
`me`: :class:`~kivy.input.motionevent.MotionEvent`
The Motion Event currently dispatched.
'''
if me.is_touch:
w, h = self.system_size
if platform == 'ios' or self._density != 1:
w, h = self.size
me.scale_for_screen(w, h, rotation=self._rotation,
smode=self.softinput_mode,
kheight=self.keyboard_height)
if etype == 'begin':
self.dispatch('on_touch_down', me)
elif etype == 'update':
self.dispatch('on_touch_move', me)
elif etype == 'end':
self.dispatch('on_touch_up', me)
FocusBehavior._handle_post_on_touch_up(me)
def on_touch_down(self, touch):
'''Event called when a touch down event is initiated.
.. versionchanged:: 1.9.0
The touch `pos` is now transformed to window coordinates before
this method is called. Before, the touch `pos` coordinate would be
`(0, 0)` when this method was called.
'''
for w in self.children[:]:
if w.dispatch('on_touch_down', touch):
return True
def on_touch_move(self, touch):
'''Event called when a touch event moves (changes location).
.. versionchanged:: 1.9.0
The touch `pos` is now transformed to window coordinates before
this method is called. Before, the touch `pos` coordinate would be
`(0, 0)` when this method was called.
'''
for w in self.children[:]:
if w.dispatch('on_touch_move', touch):
return True
def on_touch_up(self, touch):
'''Event called when a touch event is released (terminated).
.. versionchanged:: 1.9.0
The touch `pos` is now transformed to window coordinates before
this method is called. Before, the touch `pos` coordinate would be
`(0, 0)` when this method was called.
'''
for w in self.children[:]:
if w.dispatch('on_touch_up', touch):
return True
def on_resize(self, width, height):
'''Event called when the window is resized.'''
self.update_viewport()
def update_viewport(self):
from kivy.graphics.opengl import glViewport
from kivy.graphics.transformation import Matrix
from math import radians
w, h = self.system_size
if self._density != 1:
w, h = self.size
smode = self.softinput_mode
kheight = self.keyboard_height
w2, h2 = w / 2., h / 2.
r = radians(self.rotation)
x, y = 0, 0
_h = h
if smode:
y = kheight
if smode == 'scale':
_h -= kheight
# prepare the viewport
glViewport(x, y, w, _h)
# do projection matrix
projection_mat = Matrix()
projection_mat.view_clip(0.0, w, 0.0, h, -1.0, 1.0, 0)
self.render_context['projection_mat'] = projection_mat
# do modelview matrix
modelview_mat = Matrix().translate(w2, h2, 0)
modelview_mat = modelview_mat.multiply(Matrix().rotate(r, 0, 0, 1))
w, h = self.size
w2, h2 = w / 2., h / 2.
modelview_mat = modelview_mat.multiply(Matrix().translate(-w2, -h2, 0))
self.render_context['modelview_mat'] = modelview_mat
# redraw canvas
self.canvas.ask_update()
# and update childs
self.update_childsize()
def update_childsize(self, childs=None):
width, height = self.size
if childs is None:
childs = self.children
for w in childs:
shw, shh = w.size_hint
if shw and shh:
w.size = shw * width, shh * height
elif shw:
w.width = shw * width
elif shh:
w.height = shh * height
for key, value in w.pos_hint.items():
if key == 'x':
w.x = value * width
elif key == 'right':
w.right = value * width
elif key == 'y':
w.y = value * height
elif key == 'top':
w.top = value * height
elif key == 'center_x':
w.center_x = value * width
elif key == 'center_y':
w.center_y = value * height
def screenshot(self, name='screenshot{:04d}.png'):
'''Save the actual displayed image in a file
'''
i = 0
path = None
if name != 'screenshot{:04d}.png':
_ext = name.split('.')[-1]
name = ''.join((name[:-(len(_ext) + 1)], '{:04d}.', _ext))
while True:
i += 1
path = join(getcwd(), name.format(i))
if not exists(path):
break
return path
def on_rotate(self, rotation):
'''Event called when the screen has been rotated.
'''
pass
def on_close(self, *largs):
'''Event called when the window is closed'''
Modules.unregister_window(self)
EventLoop.remove_event_listener(self)
def on_request_close(self, *largs, **kwargs):
'''Event called before we close the window. If a bound function returns
`True`, the window will not be closed. If the the event is triggered
because of the keyboard escape key, the keyword argument `source` is
dispatched along with a value of `keyboard` to the bound functions.
.. warning::
When the bound function returns True the window will not be closed,
so use with care because the user would not be able to close the
program, even if the red X is clicked.
'''
pass
def on_mouse_down(self, x, y, button, modifiers):
'''Event called when the mouse is used (pressed/released)'''
pass
def on_mouse_move(self, x, y, modifiers):
'''Event called when the mouse is moved with buttons pressed'''
pass
def on_mouse_up(self, x, y, button, modifiers):
'''Event called when the mouse is moved with buttons pressed'''
pass
def on_joy_axis(self, stickid, axisid, value):
'''Event called when a joystick has a stick or other axis moved
.. versionadded:: 1.9.0'''
pass
def on_joy_hat(self, stickid, hatid, value):
'''Event called when a joystick has a hat/dpad moved
.. versionadded:: 1.9.0'''
pass
def on_joy_ball(self, stickid, ballid, value):
'''Event called when a joystick has a ball moved
.. versionadded:: 1.9.0'''
pass
def on_joy_button_down(self, stickid, buttonid):
'''Event called when a joystick has a button pressed
.. versionadded:: 1.9.0'''
pass
def on_joy_button_up(self, stickid, buttonid):
'''Event called when a joystick has a button released
.. versionadded:: 1.9.0'''
pass
def on_keyboard(self, key, scancode=None, codepoint=None,
modifier=None, **kwargs):
'''Event called when keyboard is used.
.. warning::
Some providers may omit `scancode`, `codepoint` and/or `modifier`.
'''
if 'unicode' in kwargs:
Logger.warning("The use of the unicode parameter is deprecated, "
"and will be removed in future versions. Use "
"codepoint instead, which has identical "
"semantics.")
# Quit if user presses ESC or the typical OSX shortcuts CMD+q or CMD+w
# TODO If just CMD+w is pressed, only the window should be closed.
is_osx = platform == 'darwin'
if WindowBase.on_keyboard.exit_on_escape:
if key == 27 or all([is_osx, key in [113, 119], modifier == 1024]):
if not self.dispatch('on_request_close', source='keyboard'):
stopTouchApp()
self.close()
return True
if Config:
on_keyboard.exit_on_escape = Config.getboolean('kivy', 'exit_on_escape')
def __exit(section, name, value):
WindowBase.__dict__['on_keyboard'].exit_on_escape = \
Config.getboolean('kivy', 'exit_on_escape')
Config.add_callback(__exit, 'kivy', 'exit_on_escape')
def on_key_down(self, key, scancode=None, codepoint=None,
modifier=None, **kwargs):
'''Event called when a key is down (same arguments as on_keyboard)'''
if 'unicode' in kwargs:
Logger.warning("The use of the unicode parameter is deprecated, "
"and will be removed in future versions. Use "
"codepoint instead, which has identical "
"semantics.")
def on_key_up(self, key, scancode=None, codepoint=None,
modifier=None, **kwargs):
'''Event called when a key is released (same arguments as on_keyboard)
'''
if 'unicode' in kwargs:
Logger.warning("The use of the unicode parameter is deprecated, "
"and will be removed in future versions. Use "
"codepoint instead, which has identical "
"semantics.")
def on_textinput(self, text):
'''Event called whem text: i.e. alpha numeric non control keys or set
of keys is entered. As it is not gaurenteed whether we get one
character or multiple ones, this event supports handling multiple
characters.
.. versionadded:: 1.9.0
'''
pass
def on_dropfile(self, filename):
'''Event called when a file is dropped on the application.
.. warning::
This event currently works with sdl2 window provider, on pygame
window provider and MacOSX with a patched version of pygame.
This event is left in place for further evolution
(ios, android etc.)
.. versionadded:: 1.2.0
'''
pass
@reify
def dpi(self):
'''Return the DPI of the screen. If the implementation doesn't support
any DPI lookup, it will just return 96.
.. warning::
This value is not cross-platform. Use
:attr:`kivy.base.EventLoop.dpi` instead.
'''
return 96.
def configure_keyboards(self):
# Configure how to provide keyboards (virtual or not)
# register system keyboard to listening keys from window
sk = self._system_keyboard
self.bind(
on_key_down=sk._on_window_key_down,
on_key_up=sk._on_window_key_up,
on_textinput=sk._on_window_textinput)
# use the device's real keyboard
self.use_syskeyboard = True
# use the device's real keyboard
self.allow_vkeyboard = False
# one single vkeyboard shared between all widgets
self.single_vkeyboard = True
# the single vkeyboard is always sitting at the same position
self.docked_vkeyboard = False
# now read the configuration
mode = Config.get('kivy', 'keyboard_mode')
if mode not in ('', 'system', 'dock', 'multi', 'systemanddock',
'systemandmulti'):
Logger.critical('Window: unknown keyboard mode %r' % mode)
# adapt mode according to the configuration
if mode == 'system':
self.use_syskeyboard = True
self.allow_vkeyboard = False
self.single_vkeyboard = True
self.docked_vkeyboard = False
elif mode == 'dock':
self.use_syskeyboard = False
self.allow_vkeyboard = True
self.single_vkeyboard = True
self.docked_vkeyboard = True
elif mode == 'multi':
self.use_syskeyboard = False
self.allow_vkeyboard = True
self.single_vkeyboard = False
self.docked_vkeyboard = False
elif mode == 'systemanddock':
self.use_syskeyboard = True
self.allow_vkeyboard = True
self.single_vkeyboard = True
self.docked_vkeyboard = True
elif mode == 'systemandmulti':
self.use_syskeyboard = True
self.allow_vkeyboard = True
self.single_vkeyboard = False
self.docked_vkeyboard = False
Logger.info(
'Window: virtual keyboard %sallowed, %s, %s' % (
'' if self.allow_vkeyboard else 'not ',
'single mode' if self.single_vkeyboard else 'multiuser mode',
'docked' if self.docked_vkeyboard else 'not docked'))
def set_vkeyboard_class(self, cls):
'''.. versionadded:: 1.0.8
Set the VKeyboard class to use. If set to None, it will use the
:class:`kivy.uix.vkeyboard.VKeyboard`.
'''
self._vkeyboard_cls = cls
def release_all_keyboards(self):
'''.. versionadded:: 1.0.8
This will ensure that no virtual keyboard / system keyboard is
requested. All instances will be closed.
'''
for key in list(self._keyboards.keys())[:]:
keyboard = self._keyboards[key]
if keyboard:
keyboard.release()
def request_keyboard(self, callback, target, input_type='text'):
'''.. versionadded:: 1.0.4
Internal widget method to request the keyboard. This method is rarely
required by the end-user as it is handled automatically by the
:class:`~kivy.uix.textinput.TextInput`. We expose it in case you want
to handle the keyboard manually for unique input scenarios.
A widget can request the keyboard, indicating a callback to call
when the keyboard is released (or taken by another widget).
:Parameters:
`callback`: func
Callback that will be called when the keyboard is
closed. This can be because somebody else requested the
keyboard or the user closed it.
`target`: Widget
Attach the keyboard to the specified `target`. This should be
the widget that requested the keyboard. Ensure you have a
different target attached to each keyboard if you're working in
a multi user mode.
.. versionadded:: 1.0.8
`input_type`: string
Choose the type of soft keyboard to request. Can be one of
'text', 'number', 'url', 'mail', 'datetime', 'tel', 'address'.
.. note::
`input_type` is currently only honored on mobile devices.
.. versionadded:: 1.8.0
:Return:
An instance of :class:`Keyboard` containing the callback, target,
and if the configuration allows it, a
:class:`~kivy.uix.vkeyboard.VKeyboard` instance attached as a
*.widget* property.
.. note::
The behavior of this function is heavily influenced by the current
`keyboard_mode`. Please see the Config's
:ref:`configuration tokens <configuration-tokens>` section for
more information.
'''
# release any previous keyboard attached.
self.release_keyboard(target)
# if we can use virtual vkeyboard, activate it.
if self.allow_vkeyboard:
keyboard = None
# late import
global VKeyboard
if VKeyboard is None and self._vkeyboard_cls is None:
from kivy.uix.vkeyboard import VKeyboard
self._vkeyboard_cls = VKeyboard
# if the keyboard doesn't exist, create it.
key = 'single' if self.single_vkeyboard else target
if key not in self._keyboards:
vkeyboard = self._vkeyboard_cls()
keyboard = Keyboard(widget=vkeyboard, window=self)
vkeyboard.bind(
on_key_down=keyboard._on_vkeyboard_key_down,
on_key_up=keyboard._on_vkeyboard_key_up,
on_textinput=keyboard._on_vkeyboard_textinput)
self._keyboards[key] = keyboard
else:
keyboard = self._keyboards[key]
# configure vkeyboard
keyboard.target = keyboard.widget.target = target
keyboard.callback = keyboard.widget.callback = callback
# add to the window
self.add_widget(keyboard.widget)
# only after add, do dock mode
keyboard.widget.docked = self.docked_vkeyboard
keyboard.widget.setup_mode()
else:
# system keyboard, just register the callback.
keyboard = self._system_keyboard
keyboard.callback = callback
keyboard.target = target
# use system (hardware) keyboard according to flag
if self.allow_vkeyboard and self.use_syskeyboard:
self.unbind(
on_key_down=keyboard._on_window_key_down,
on_key_up=keyboard._on_window_key_up,
on_textinput=keyboard._on_window_textinput)
self.bind(
on_key_down=keyboard._on_window_key_down,
on_key_up=keyboard._on_window_key_up,
on_textinput=keyboard._on_window_textinput)
return keyboard
def release_keyboard(self, target=None):
'''.. versionadded:: 1.0.4
Internal method for the widget to release the real-keyboard. Check
:meth:`request_keyboard` to understand how it works.
'''
if self.allow_vkeyboard:
key = 'single' if self.single_vkeyboard else target
if key not in self._keyboards:
return
keyboard = self._keyboards[key]
callback = keyboard.callback
if callback:
keyboard.callback = None
callback()
keyboard.target = None
self.remove_widget(keyboard.widget)
if key != 'single' and key in self._keyboards:
del self._keyboards[key]
elif self._system_keyboard.callback:
# this way will prevent possible recursion.
callback = self._system_keyboard.callback
self._system_keyboard.callback = None
callback()
return True
#: Instance of a :class:`WindowBase` implementation
window_impl = []
if platform == 'linux':
window_impl += [('egl_rpi', 'window_egl_rpi', 'WindowEglRpi')]
if USE_SDL2:
window_impl += [('sdl2', 'window_sdl2', 'WindowSDL')]
else:
window_impl += [
('pygame', 'window_pygame', 'WindowPygame')]
if platform == 'linux':
window_impl += [('x11', 'window_x11', 'WindowX11')]
Window = core_select_lib('window', window_impl, True)
|
py | 1a2ecbcb6ca1ff23a1edf9dcb9c5598434fd0b11 | import pytest
from nmcli.data import Connection
from nmcli.dummy._connection import DummyConnectionControl
def test_call():
result_call = [Connection('a', 'b', 'ethernet', 'eth0')]
c = DummyConnectionControl(result_call)
assert c() == result_call
def test_call_when_raise_error():
c = DummyConnectionControl(raise_error=Exception)
with pytest.raises(Exception):
c()
def test_add():
c = DummyConnectionControl()
conn_type = 'ethernet'
options = {
'key': 'value'
}
ifname = 'eth0'
name = 'MyHome'
autoconnect = True
c.add(conn_type, options, ifname, name, autoconnect)
assert c.add_args[0] == (conn_type, options, ifname, name, autoconnect)
c.add(conn_type, options, ifname, name, False)
assert c.add_args[1] == (conn_type, options, ifname, name, False)
c.add(conn_type, options, ifname, name)
assert c.add_args[2] == (conn_type, options, ifname, name, None)
def test_add_when_raise_error():
c = DummyConnectionControl(raise_error=Exception)
with pytest.raises(Exception):
c.add('ethernet')
def test_modify():
c = DummyConnectionControl()
options = {
'key': 'value'
}
name = 'MyHome'
c.modify(name, options)
assert c.modify_args[0] == (name, options)
def test_modify_when_raise_error():
c = DummyConnectionControl(raise_error=Exception)
with pytest.raises(Exception):
c.modify('ethernet', {'key': 'value'})
def test_delete():
c = DummyConnectionControl()
name = 'MyHome'
c.delete(name)
assert c.delete_args[0] == name
def test_delete_when_raise_error():
c = DummyConnectionControl(raise_error=Exception)
with pytest.raises(Exception):
c.delete('ethernet')
def test_up():
c = DummyConnectionControl()
name = 'MyHome'
c.up(name)
assert c.up_args[0] == name
def test_up_when_raise_error():
c = DummyConnectionControl(raise_error=Exception)
with pytest.raises(Exception):
c.up('ethernet')
def test_down():
c = DummyConnectionControl()
name = 'MyHome'
c.down(name)
assert c.down_args[0] == name
def test_down_when_raise_error():
c = DummyConnectionControl(raise_error=Exception)
with pytest.raises(Exception):
c.down('ethernet')
def test_show():
result_show = {
'key': 'value'
}
c = DummyConnectionControl(result_show=result_show)
name = 'MyHome'
assert c.show(name) == result_show
def test_show_when_raise_error():
c = DummyConnectionControl(raise_error=Exception)
with pytest.raises(Exception):
c.show('MyHome')
def test_show_when_no_arguments_are_passed():
c = DummyConnectionControl()
with pytest.raises(ValueError):
c.show('MyHome')
def test_reload():
c = DummyConnectionControl()
c.reload()
assert c.called_reload == 1
def test_reload_when_raise_error():
c = DummyConnectionControl(raise_error=Exception)
with pytest.raises(Exception):
c.reload()
|
py | 1a2ecc01c02d5a7aaafdb9765bdc7895de56fd7b | # Copyright 2017 Workonline Communications (Pty) Ltd. All rights reserved.
#
# The contents of this file are licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Method test classes for djangolg."""
from __future__ import print_function
from __future__ import unicode_literals
from django.test import TestCase
from djangolg import dialects, methods
from djangolg.methods.base import BaseMethod
class MethodTestCase(TestCase):
"""Test djangolg methods."""
def test_available_methods(self):
"""Test available_methods helper."""
methods_map = methods.available_methods("map")
methods_list = methods.available_methods("list")
assert isinstance(methods_map, dict)
assert isinstance(methods_list, list)
try:
methods.available_methods("wrong")
except Exception as e:
assert isinstance(e, ValueError)
assert "{}".format(e) == "invalid output type: wrong"
def test_get_method(self):
"""Test get_method helper."""
for method_name in methods.available_methods("list"):
method = methods.get_method(name=method_name)
assert isinstance(method, BaseMethod)
try:
methods.get_method()
except Exception as e:
assert isinstance(e, methods.MethodNotFound)
try:
methods.get_method(name=dict())
except Exception as e:
assert isinstance(e, methods.LookingGlassMethodError)
def test_method_init_failure(self):
"""Test method initiation failure."""
try:
BaseMethod(dialect="string")
except Exception as e:
assert isinstance(e, TypeError)
def test_method_dialect_functions(self):
"""Test method dialect getter and setter and other methods."""
for method_name in methods.available_methods(output="list"):
method = methods.get_method(name=method_name)
assert method.dialect is None
try:
method.dialect = "wrong_type"
except Exception as e:
assert isinstance(e, TypeError)
for dialect_name in dialects.available_dialects(output="list"):
dialect = dialects.get_dialect(dialect_name)
method.dialect = dialect
assert method.dialect is dialect
if method.options:
for index, option in method.option_choices():
assert method.get_command(target=method.test_target,
option_index=index)
else:
assert method.get_command(target=method.test_target)
|
py | 1a2ecc4174f6c45060cfce4f1cd969cb4664de63 | import numpy as np
import tensorflow as tf
from numbers import Number
import gym
import time
from spinup.algos.tf1.sac1 import core
from spinup.algos.tf1.sac1.core import get_vars
from spinup.utils.logx import EpochLogger
from gym.spaces import Box, Discrete
from spinup.utils.frame_stack import FrameStack
import os
class ReplayBuffer:
"""
A simple FIFO experience replay buffer for SAC agents.
"""
def __init__(self, obs_dim, act_dim, size):
self.obs1_buf = np.zeros([size, obs_dim], dtype=np.float32)
self.obs2_buf = np.zeros([size, obs_dim], dtype=np.float32)
self.acts_buf = np.zeros([size, act_dim], dtype=np.float32)
self.rews_buf = np.zeros(size, dtype=np.float32)
self.done_buf = np.zeros(size, dtype=np.float32)
self.ptr, self.size, self.max_size = 0, 0, size
def store(self, obs, act, rew, next_obs, done):
self.obs1_buf[self.ptr] = obs
self.obs2_buf[self.ptr] = next_obs
self.acts_buf[self.ptr] = act
self.rews_buf[self.ptr] = rew
self.done_buf[self.ptr] = done
self.ptr = (self.ptr+1) % self.max_size
self.size = min(self.size+1, self.max_size)
def sample_batch(self, batch_size=32):
idxs = np.random.randint(0, self.size, size=batch_size)
return dict(obs1=self.obs1_buf[idxs],
obs2=self.obs2_buf[idxs],
acts=self.acts_buf[idxs],
rews=self.rews_buf[idxs],
done=self.done_buf[idxs])
"""
Soft Actor-Critic
(With slight variations that bring it closer to TD3)
"""
def sac1(args, env_fn, actor_critic=core.mlp_actor_critic, ac_kwargs=dict(), seed=0,
steps_per_epoch=1000, epochs=100, replay_size=int(2e6), gamma=0.99, reward_scale=1.0,
polyak=0.995, lr=5e-4, alpha=0.2, batch_size=200, start_steps=10000,
max_ep_len_train=1000, max_ep_len_test=1000, logger_kwargs=dict(), save_freq=1):
"""
Args:
env_fn : A function which creates a copy of the environment.
The environment must satisfy the OpenAI Gym API.
actor_critic: A function which takes in placeholder symbols
for state, ``x_ph``, and action, ``a_ph``, and returns the main
outputs from the agent's Tensorflow computation graph:
=========== ================ ======================================
Symbol Shape Description
=========== ================ ======================================
``mu`` (batch, act_dim) | Computes mean actions from policy
| given states.
``pi`` (batch, act_dim) | Samples actions from policy given
| states.
``logp_pi`` (batch,) | Gives log probability, according to
| the policy, of the action sampled by
| ``pi``. Critical: must be differentiable
| with respect to policy parameters all
| the way through action sampling.
``q1`` (batch,) | Gives one estimate of Q* for
| states in ``x_ph`` and actions in
| ``a_ph``.
``q2`` (batch,) | Gives another estimate of Q* for
| states in ``x_ph`` and actions in
| ``a_ph``.
``q1_pi`` (batch,) | Gives the composition of ``q1`` and
| ``pi`` for states in ``x_ph``:
| q1(x, pi(x)).
``q2_pi`` (batch,) | Gives the composition of ``q2`` and
| ``pi`` for states in ``x_ph``:
| q2(x, pi(x)).
=========== ================ ======================================
ac_kwargs (dict): Any kwargs appropriate for the actor_critic
function you provided to SAC.
seed (int): Seed for random number generators.
steps_per_epoch (int): Number of steps of interaction (state-action pairs)
for the agent and the environment in each epoch.
epochs (int): Number of epochs to run and train agent.
replay_size (int): Maximum length of replay buffer.
gamma (float): Discount factor. (Always between 0 and 1.)
polyak (float): Interpolation factor in polyak averaging for target
networks. Target networks are updated towards main networks
according to:
.. math:: \\theta_{\\text{targ}} \\leftarrow
\\rho \\theta_{\\text{targ}} + (1-\\rho) \\theta
where :math:`\\rho` is polyak. (Always between 0 and 1, usually
close to 1.)
lr (float): Learning rate (used for policy/value/alpha learning).
alpha (float/'auto'): Entropy regularization coefficient. (Equivalent to
inverse of reward scale in the original SAC paper.) / 'auto': alpha is automated.
batch_size (int): Minibatch size for SGD.
start_steps (int): Number of steps for uniform-random action selection,
before running real policy. Helps exploration.
max_ep_len (int): Maximum length of trajectory / episode / rollout.
logger_kwargs (dict): Keyword args for EpochLogger.
save_freq (int): How often (in terms of gap between epochs) to save
the current policy and value function.
"""
if not args.is_test:
logger = EpochLogger(**logger_kwargs)
logger.save_config(locals())
tf.set_random_seed(seed)
np.random.seed(seed)
env, test_env = env_fn(3), env_fn(1)
obs_dim = env.observation_space.shape[0]
act_dim = env.action_space.shape[0]
# Action limit for clamping: critically, assumes all dimensions share the same bound!
act_limit = env.action_space.high[0]
# Share information about action space with policy architecture
ac_kwargs['action_space'] = env.action_space
# Inputs to computation graph
x_ph, a_ph, x2_ph, r_ph, d_ph = core.placeholders(obs_dim, act_dim, obs_dim, None, None)
# Main outputs from computation graph
with tf.variable_scope('main'):
mu, pi, logp_pi, logp_pi2, q1, q2, q1_pi, q2_pi = actor_critic(x_ph, x2_ph, a_ph, **ac_kwargs)
# Target value network
with tf.variable_scope('target'):
_, _, logp_pi_, _, _, _,q1_pi_, q2_pi_= actor_critic(x2_ph, x2_ph, a_ph, **ac_kwargs)
# Experience buffer
replay_buffer = ReplayBuffer(obs_dim=obs_dim, act_dim=act_dim, size=replay_size)
# Count variables
var_counts = tuple(core.count_vars(scope) for scope in
['main/pi', 'main/q1', 'main/q2', 'main'])
print(('\nNumber of parameters: \t pi: %d, \t' + \
'q1: %d, \t q2: %d, \t total: %d\n')%var_counts)
######
if alpha == 'auto':
target_entropy = (-np.prod(env.action_space.shape))
log_alpha = tf.get_variable( 'log_alpha', dtype=tf.float32, initializer=0.0)
alpha = tf.exp(log_alpha)
alpha_loss = tf.reduce_mean(-log_alpha * tf.stop_gradient(logp_pi + target_entropy))
alpha_optimizer = tf.train.AdamOptimizer(learning_rate=lr*0.1, name='alpha_optimizer')
train_alpha_op = alpha_optimizer.minimize(loss=alpha_loss, var_list=[log_alpha])
######
# Min Double-Q:
min_q_pi = tf.minimum(q1_pi_, q2_pi_)
# Targets for Q and V regression
v_backup = tf.stop_gradient(min_q_pi - alpha * logp_pi2)
q_backup = r_ph + gamma*(1-d_ph)*v_backup
# Soft actor-critic losses
pi_loss = tf.reduce_mean(alpha * logp_pi - q1_pi)
q1_loss = 0.5 * tf.reduce_mean((q_backup - q1)**2)
q2_loss = 0.5 * tf.reduce_mean((q_backup - q2)**2)
value_loss = q1_loss + q2_loss
# Policy train op
# (has to be separate from value train op, because q1_pi appears in pi_loss)
pi_optimizer = tf.train.AdamOptimizer(learning_rate=lr)
train_pi_op = pi_optimizer.minimize(pi_loss, var_list=get_vars('main/pi'))
# Value train op
# (control dep of train_pi_op because sess.run otherwise evaluates in nondeterministic order)
value_optimizer = tf.train.AdamOptimizer(learning_rate=lr)
value_params = get_vars('main/q')
with tf.control_dependencies([train_pi_op]):
train_value_op = value_optimizer.minimize(value_loss, var_list=value_params)
# Polyak averaging for target variables
# (control flow because sess.run otherwise evaluates in nondeterministic order)
with tf.control_dependencies([train_value_op]):
target_update = tf.group([tf.assign(v_targ, polyak*v_targ + (1-polyak)*v_main)
for v_main, v_targ in zip(get_vars('main'), get_vars('target'))])
# All ops to call during one training step
if isinstance(alpha, Number):
step_ops = [pi_loss, q1_loss, q2_loss, q1, q2, logp_pi, tf.identity(alpha),
train_pi_op, train_value_op, target_update]
else:
step_ops = [pi_loss, q1_loss, q2_loss, q1, q2, logp_pi, alpha,
train_pi_op, train_value_op, target_update, train_alpha_op]
# Initializing targets to match main variables
target_init = tf.group([tf.assign(v_targ, v_main)
for v_main, v_targ in zip(get_vars('main'), get_vars('target'))])
sess = tf.Session()
sess.run(tf.global_variables_initializer())
sess.run(target_init)
############################## save and restore ############################
saver = tf.train.Saver()
checkpoint_path = logger_kwargs['output_dir'] + '/checkpoints'
if not os.path.exists(checkpoint_path):
os.makedirs(checkpoint_path)
if args.is_test or args.is_restore_train:
ckpt = tf.train.get_checkpoint_state(checkpoint_path)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
print("Model restored.")
def get_action(o, deterministic=False):
act_op = mu if deterministic else pi
return sess.run(act_op, feed_dict={x_ph: o.reshape(1,-1)})[0]
############################## test ############################
if args.is_test:
test_env = gym.make(args.env)
ave_ep_ret = 0
for j in range(10000):
o, r, d, ep_ret, ep_len = test_env.reset(), 0, False, 0, 0
while not d: # (d or (ep_len == 2000)):
o, r, d, _ = test_env.step(get_action(o, True))
ep_ret += r
ep_len += 1
if args.test_render:
test_env.render()
ave_ep_ret = (j*ave_ep_ret + ep_ret)/(j+1)
print('ep_len', ep_len, 'ep_ret:', ep_ret, 'ave_ep_ret:',ave_ep_ret,'({}/10000)'.format(j+1) )
return
############################## train ############################
def test_agent(n=25):
global sess, mu, pi, q1, q2, q1_pi, q2_pi
for j in range(n):
o, r, d, ep_ret, ep_len = test_env.reset(), 0, False, 0, 0
while not(d or (ep_len == max_ep_len_test)):
# Take deterministic actions at test time
o, r, d, _ = test_env.step(get_action(o, True))
ep_ret += r
ep_len += 1
# test_env.render()
logger.store(TestEpRet=ep_ret, TestEpLen=ep_len)
start_time = time.time()
o, r, d, ep_ret, ep_len = env.reset(), 0, False, 0, 0
total_steps = steps_per_epoch * epochs
ep_index = 0
test_ep_ret_best = test_ep_ret = -10000.0
# Main loop: collect experience in env and update/log each epoch
for t in range(total_steps):
"""
Until start_steps have elapsed, randomly sample actions
from a uniform distribution for better exploration. Afterwards,
use the learned policy.
"""
if t > start_steps:
a = get_action(o)
else:
a = env.action_space.sample()
# Step the env
o2, r, d, _ = env.step(a)
ep_ret += r
ep_len += 1
# Ignore the "done" signal if it comes from hitting the time
# horizon (that is, when it's an artificial terminal signal
# that isn't based on the agent's state)
# d = False if ep_len==max_ep_len_train else d
# Store experience to replay buffer
replay_buffer.store(o, a, r, o2, d)
# Super critical, easy to overlook step: make sure to update
# most recent observation!
o = o2
# End of episode. Training (ep_len times).
if d or (ep_len == max_ep_len_train):
ep_index += 1
print('episode: {}, ep_len: {}, reward: {}'.format(ep_index, ep_len, ep_ret/reward_scale))
"""
Perform all SAC updates at the end of the trajectory.
This is a slight difference from the SAC specified in the
original paper.
"""
for j in range(int(1.5*ep_len)):
batch = replay_buffer.sample_batch(batch_size)
feed_dict = {x_ph: batch['obs1'],
x2_ph: batch['obs2'],
a_ph: batch['acts'],
r_ph: batch['rews'],
d_ph: batch['done'],
}
# step_ops = [pi_loss, q1_loss, q2_loss, q1, q2, logp_pi, alpha, train_pi_op, train_value_op, target_update]
outs = sess.run(step_ops, feed_dict)
logger.store(LossPi=outs[0], LossQ1=outs[1], LossQ2=outs[2],
Q1Vals=outs[3], Q2Vals=outs[4],
LogPi=outs[5], Alpha=outs[6])
logger.store(EpRet=ep_ret/reward_scale, EpLen=ep_len)
o, r, d, ep_ret, ep_len = env.reset(), 0, False, 0, 0
# End of epoch wrap-up
if t > 0 and t % steps_per_epoch == 0:
epoch = t // steps_per_epoch
test_agent(10)
# test_ep_ret = logger.get_stats('TestEpRet')[0]
# print('TestEpRet', test_ep_ret, 'Best:', test_ep_ret_best)
if logger.get_stats('TestEpRet')[0] >= 180:
print('Recalculating TestEpRet...')
test_agent(100)
test_ep_ret = logger.get_stats('TestEpRet')[0]
# logger.epoch_dict['TestEpRet'] = []
print('TestEpRet', test_ep_ret, 'Best:', test_ep_ret_best)
# logger.store(): store the data; logger.log_tabular(): log the data; logger.dump_tabular(): write the data
# Log info about epoch
logger.log_tabular('Epoch', epoch)
logger.log_tabular('Num_Ep', ep_index)
logger.log_tabular('EpRet', with_min_and_max=True)
logger.log_tabular('TestEpRet', with_min_and_max=False)
logger.log_tabular('EpLen', average_only=True)
logger.log_tabular('TestEpLen', average_only=True)
logger.log_tabular('TotalEnvInteracts', t)
logger.log_tabular('Alpha',average_only=True)
logger.log_tabular('Q1Vals', with_min_and_max=True)
logger.log_tabular('Q2Vals', with_min_and_max=True)
# logger.log_tabular('VVals', with_min_and_max=True)
logger.log_tabular('LogPi', with_min_and_max=True)
logger.log_tabular('LossPi', average_only=True)
logger.log_tabular('LossQ1', average_only=True)
logger.log_tabular('LossQ2', average_only=True)
# logger.log_tabular('LossV', average_only=True)
logger.log_tabular('Time', time.time()-start_time)
logger.dump_tabular()
# Save model
if ((epoch % save_freq == 0) or (epoch == epochs - 1)) and test_ep_ret > test_ep_ret_best:
save_path = saver.save(sess, checkpoint_path+'/model.ckpt', t)
print("Model saved in path: %s" % save_path)
test_ep_ret_best = test_ep_ret
if test_ep_ret >= 200:
print("Model saved in path: %s" % save_path)
print('\nEnvironment solved in {:d} episodes!\tAverage Score: {:.2f}'.format(ep_index, test_ep_ret))
exit()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--env', type=str, default='LunarLanderContinuous-v2') # 'Pendulum-v0'
parser.add_argument('--is_restore_train', type=bool, default=False)
parser.add_argument('--is_test', type=bool, default=False)
parser.add_argument('--test_render', type=bool, default=False)
parser.add_argument('--max_ep_len_test', type=int, default=2000) # 'BipedalWalkerHardcore-v2' max_ep_len is 2000
parser.add_argument('--max_ep_len_train', type=int, default=1000) # max_ep_len_train < 2000//3 # 'BipedalWalkerHardcore-v2' max_ep_len is 2000
parser.add_argument('--start_steps', type=int, default=100)
parser.add_argument('--hid', type=int, default=300)
parser.add_argument('--l', type=int, default=1)
parser.add_argument('--gamma', type=float, default=0.99)
parser.add_argument('--lr', type=float, default=1e-3)
parser.add_argument('--seed', '-s', type=int, default=np.random.random_integers(1000))
parser.add_argument('--epochs', type=int, default=10000)
parser.add_argument('--alpha', default='auto', help="alpha can be either 'auto' or float(e.g:0.2).")
parser.add_argument('--reward_scale', type=float, default=1.0)
parser.add_argument('--act_noise', type=float, default=0.3)
parser.add_argument('--obs_noise', type=float, default=0.0)
parser.add_argument('--exp_name', type=str, default='sac1_LunarLanderContinuous-v2_debug3')
parser.add_argument('--stack_frames', type=int, default=4)
args = parser.parse_args()
from spinup.utils.run_utils import setup_logger_kwargs
logger_kwargs = setup_logger_kwargs(args.exp_name, args.seed)
class Wrapper(object):
def __init__(self, env, action_repeat):
self._env = env
self.action_repeat = action_repeat
def __getattr__(self, name):
return getattr(self._env, name)
def step(self, action):
r = 0.0
for _ in range(self.action_repeat):
obs_, reward_, done_, info_ = self._env.step(action)
reward_ = reward_ if reward_ > -99.0 else 0.0
r = r + reward_
if done_:
return obs_, r, done_, info_
return obs_, r, done_, info_
# env = FrameStack(env, args.stack_frames)
env_lunar1 = gym.make(args.env)
env_lunar3 = Wrapper(gym.make(args.env),3)
sac1(args, lambda n : env_lunar3 if n==3 else env_lunar1, actor_critic=core.mlp_actor_critic,
ac_kwargs=dict(hidden_sizes=[200,150]), start_steps = args.start_steps,
gamma=args.gamma, seed=args.seed, epochs=args.epochs, alpha=args.alpha,
logger_kwargs=logger_kwargs, lr = args.lr, reward_scale=args.reward_scale,
max_ep_len_train = args.max_ep_len_train, max_ep_len_test=args.max_ep_len_test)
|
py | 1a2ecc6bad59824fa1f6ac55ea8fd73202781d3d | # -*- coding: utf-8 -*-
"""
hashing package.
"""
from pyrin.packaging.base import Package
class HashingPackage(Package):
"""
hashing package class.
"""
NAME = __name__
DEPENDS = []
COMPONENT_NAME = 'security.hashing.component'
|
py | 1a2ecd7107a85cf463bbc690de50a35bcbc58805 | """1248. Count Number of Nice Subarrays
Medium"""
class Solution(object):
def numberOfSubarrays(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
#########
m = [0]*50001
res = 0
curr = 0
m[0] = 1
for i in range(len(nums)):
curr += (nums[i]%2)
if curr >= k:
res += m[curr-k]
m[curr] += 1
return res
#######
return self.atMost(nums, k)-self.atMost(nums, k-1)
def atMost(self, nums, k):
res = 0
count = 0
left = 0
right = 0
while right < len(nums):
n = nums[right]
count += n%2
while count >= k:
c = nums[left]
count -= c%2
left += 1
res += right - left + 1
right += 1
return res
#################
def atMost(k):
res = 0
left = 0
for right in range(len(nums)):
k -= nums[right]%2
while k < 0:
k += nums[left]%2
left += 1
res += right -left + 1
return res
return atMost(k) - atMost(k-1)
#
|
py | 1a2ecd80be15ea48380f7886225f68a675e972bf | import numpy as np
import pandas as pd
from bokeh.plotting import figure, output_file, show
# generate some synthetic time series for six different categories
cats = list("abcdef")
yy = np.random.randn(2000)
g = np.random.choice(cats, 2000)
for i, l in enumerate(cats):
yy[g == l] += i // 2
df = pd.DataFrame(dict(score=yy, group=g))
# find the quartiles and IQR for each category
groups = df.groupby('group')
q1 = groups.quantile(q=0.25)
q2 = groups.quantile(q=0.5)
q3 = groups.quantile(q=0.75)
iqr = q3 - q1
upper = q3 + 1.5*iqr
lower = q1 - 1.5*iqr
# find the outliers for each category
def outliers(group):
cat = group.name
return group[(group.score > upper.loc[cat]['score']) | (group.score < lower.loc[cat]['score'])]['score']
out = groups.apply(outliers).dropna()
# prepare outlier data for plotting, we need coordinates for every outlier.
if not out.empty:
outx = []
outy = []
for keys in out.index:
outx.append(keys[0])
outy.append(out.loc[keys[0]].loc[keys[1]])
p = figure(tools="", background_fill_color="#efefef", x_range=cats, toolbar_location=None)
# if no outliers, shrink lengths of stems to be no longer than the minimums or maximums
qmin = groups.quantile(q=0.00)
qmax = groups.quantile(q=1.00)
upper.score = [min([x,y]) for (x,y) in zip(list(qmax.loc[:,'score']),upper.score)]
lower.score = [max([x,y]) for (x,y) in zip(list(qmin.loc[:,'score']),lower.score)]
# stems
p.segment(cats, upper.score, cats, q3.score, line_color="black")
p.segment(cats, lower.score, cats, q1.score, line_color="black")
# boxes
p.vbar(cats, 0.7, q2.score, q3.score, fill_color="#E08E79", line_color="black")
p.vbar(cats, 0.7, q1.score, q2.score, fill_color="#3B8686", line_color="black")
# whiskers (almost-0 height rects simpler than segments)
p.rect(cats, lower.score, 0.2, 0.01, line_color="black")
p.rect(cats, upper.score, 0.2, 0.01, line_color="black")
# outliers
if not out.empty:
p.circle(outx, outy, size=6, color="#F38630", fill_alpha=0.6)
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = "white"
p.grid.grid_line_width = 2
p.xaxis.major_label_text_font_size="16px"
output_file("boxplot.html", title="boxplot.py example")
show(p)
|
py | 1a2ecd9f1d6d31168200762d078ed37afef0b3f4 | #!/usr/bin/env python
# coding=utf-8
"""
__created__ = '4/22/16'
__author__ = 'deling.ma'
"""
import multiprocessing
bind = '0.0.0.0:7777'
max_requests = 10000
keepalive = 5
proc_name = 'fitahol'
workers = multiprocessing.cpu_count() * 2 + 1
worker_class = 'gaiohttp'
loglevel = 'info'
errorlog = '-'
x_forwarded_for_header = 'X-FORWARDED-FOR'
|
py | 1a2ece63ba632d7fd8e3f9695efe9d0224cde988 | import numpy as np
import tensorflow as tf
import tensorflow.compat.v1.keras as keras
import pickle
import os
from math import ceil
from utils import preprocess_flags, save_kernel, save_kernel_partial
from utils import load_data,load_model,load_model_json,load_kernel
from utils import data_folder,kernel_folder,arch_folder
def main(_):
FLAGS = tf.compat.v1.app.flags.FLAGS.flag_values_dict()
FLAGS = preprocess_flags(FLAGS)
globals().update(FLAGS)
if init_dist != "gaussian":
raise NotImplementedError("Initialization distributions other than Gaussian are not implemented for computing kernels!")
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
print(rank)
if n_gpus>0:
os.environ["CUDA_VISIBLE_DEVICES"]=str((rank)%n_gpus)
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
set_session = keras.backend.set_session
config.log_device_placement = False # to log device placement (on which device the operation ran)
config.allow_soft_placement = True # so that it uses any other existing and supported devices, if the requested GPU:0 isn't found
sess = tf.compat.v1.Session(config=config)
set_session(sess) # set this TensorFlow session as the default session for Keras
train_images,flat_train_images,_,test_images,_ = load_data(FLAGS)
image_size = train_images.shape[1]
number_channels = train_images.shape[-1]
#print("image_size", image_size)
X = train_images
flat_X = flat_train_images
if compute_for_GP_train:
test_images = test_images[:1000]
data = test_images
tp_order = np.concatenate([[0,len(data.shape)-1], np.arange(1, len(data.shape)-1)])
print(data.shape,tp_order)
flat_data = np.transpose(data, tp_order) # NHWC -> NCHW # this is because the cnn GP kernels assume this
flat_test_images = np.array([test_image.flatten() for test_image in flat_data])
Xfull = np.concatenate([flat_train_images,flat_test_images])
flat_X = Xfull
X = np.concatenate([train_images,test_images])
print("compute kernel", network, dataset)
# COMPUTE KERNEL
if use_empirical_NTK:
from nngp_kernel.empirical_ntk import empirical_NTK
print(ceil(int(X.shape[0])*n_samples_repeats))
from tensorflow.keras.models import model_from_json
model = load_model(FLAGS)
K = empirical_NTK(model,X)#,sess=sess)
elif use_empirical_K:
from nngp_kernel.empirical_kernel import empirical_K
print("n_samples_repeats",n_samples_repeats)
print(ceil(int(X.shape[0])*n_samples_repeats))
arch_json_string = load_model_json(FLAGS)
K = empirical_K(arch_json_string,X,ceil(int(X.shape[0])*n_samples_repeats),sigmaw=sigmaw,sigmab=sigmab,n_gpus=n_gpus,empirical_kernel_batch_size=empirical_kernel_batch_size, sess=sess, truncated_init_dist=truncated_init_dist,data_parallelism=False,store_partial_kernel=store_partial_kernel,partial_kernel_n_proc=partial_kernel_n_proc,partial_kernel_index=partial_kernel_index)
if rank == 0:
if not (use_empirical_K or use_empirical_NTK):
if network=="cnn":
from nngp_kernel.cnn_kernel import kernel_matrix
K = kernel_matrix(flat_X,image_size=image_size,number_channels=number_channels,filter_sizes=filter_sizes,padding=padding,strides=strides,sigmaw=sigmaw,sigmab=sigmab,n_gpus=n_gpus)
elif network=="resnet":
from nngp_kernel.resnet_kernel import kernel_matrix
K = kernel_matrix(flat_X,depth=number_layers,image_size=image_size,number_channels=number_channels,n_blocks=3,sigmaw=sigmaw,sigmab=sigmab,n_gpus=n_gpus)
elif network == "fc":
from nngp_kernel.fc_kernel import kernel_matrix
K = kernel_matrix(flat_X,number_layers=number_layers,sigmaw=sigmaw,sigmab=sigmab,n_gpus=n_gpus)
print(K)
'''SAVE KERNEL'''
if store_partial_kernel:
save_kernel_partial(K,FLAGS,partial_kernel_index)
else:
save_kernel(K,FLAGS)
if __name__ == '__main__':
f = tf.compat.v1.app.flags
from utils import define_default_flags
define_default_flags(f)
f.DEFINE_boolean('compute_for_GP_train', False, "Whether to add a bit of test set to kernel, to be able to use it for GP training")
f.DEFINE_boolean('store_partial_kernel', False, "Whether to store the kernels partially on a file to free the processes")
f.DEFINE_integer('empirical_kernel_batch_size', 256, "batch size to use when computing the empirical kernel, larger models need smaller values, but smaller models can use larger values")
f.DEFINE_integer('partial_kernel_n_proc', 175, "number of processes over which we are parallelizing the when computing partial kernels and saving")
f.DEFINE_integer('partial_kernel_index', 0, "index of the process when using partial_kernels method")
tf.compat.v1.app.run()
|
py | 1a2eceda5f4aee7b68d75c4aa2f4346d9be6b522 | """
This namespace holds Inference decorators.
"""
from .inference import Inference
# --------------------------------------------------------------------------
def get(label):
"""
This is an inference decorator which can be used to decorate
a getter method.
..code-block:: python
>>> import recollection
>>>
>>> class Foo(recollection.Inference):
...
... def __init__(self):
... super(Foo, self).__init__()
...
... # -- Demonstrate a private attribute with getter
... # -- and setter methods
... self._letter = 1
...
... # -- Declare that this is a memento getter
... @recollection.infer.get('foobar')
... def get_letter(self):
... return self._letter
:param label: You must specify a label to be used when storing
this variable. For the registration to be processed there
must be a correlating setter defined on the class with the same
label.
:type label: str
:return:
"""
def inner_decor(func):
def inner(*args, **kwargs):
return func(*args, **kwargs)
# -- Assign our attributes
inner.label = label
inner.is_memento_getter = True
inner.is_memento_setter = False
return inner
return inner_decor
# --------------------------------------------------------------------------
def store(label, copy_value=True, serialise=False):
"""
This is an inference decorator which can be used to decorate
a getter method.
..code-block:: python
>>> import recollection
>>>
>>> class Foo(recollection.Inference):
...
... def __init__(self):
... super(Foo, self).__init__()
...
... # -- Demonstrate a private attribute with getter
... # -- and setter methods
... self._letter = 1
...
... # -- Declare that this is a memento getter
... @recollection.infer.store('letter', serialise=True)
... def set_letter(self):
... return self._letter
>>> foo = Foo()
:param label: You must specify a label to be used when storing
this variable. For the registration to be processed there
must be a correlating setter defined on the class with the same
label.
:type label: str
:param copy_value: Default is True, this defines whether the object
being stored will be copied or referenced. Typically if this is
likely to be left to True, however if you want a reference to an
object to be stored in history rather than copies of the object you
should set this to False.
:type copy_value: bool
:param serialise: If true this will perform a serialisation of the
memento object each time the setter is called. This expects a
serialiser to be registered. The default is False.
:type serialise: bool
:return:
"""
def inner_decor(func):
def inner(*args, **kwargs):
result = func(*args, **kwargs)
# -- Now store
if isinstance(args[0], Inference):
args[0].memento.store(serialise=serialise)
return result
# -- Assign our attributes
inner.label = label
inner.is_memento_getter = False
inner.is_memento_setter = True
inner.copy_value = copy_value
return inner
return inner_decor
|
py | 1a2ed04be8d0202c2b5d6919530f398ef3156a94 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import re
from nose.plugins.skip import SkipTest
from nose.tools import assert_true, assert_equal, assert_false
from django.contrib.auth.models import User
from django.urls import reverse
import desktop.conf as desktop_conf
from desktop.lib.django_test_util import make_logged_in_client
from desktop.lib.test_utils import add_to_group
from desktop.models import Document
from hadoop.pseudo_hdfs4 import get_db_prefix, is_live_cluster
from beeswax import data_export
from beeswax.design import hql_query
from beeswax.data_export import download
from beeswax.models import SavedQuery, QueryHistory
from beeswax.server import dbms
from beeswax.test_base import get_query_server_config, wait_for_query_to_finish, fetch_query_result_data
from beeswax.tests import _make_query
from impala import conf
from impala.dbms import ImpalaDbms
LOG = logging.getLogger(__name__)
class MockDbms:
def get_databases(self):
return ['db1', 'db2']
def get_tables(self, database):
return ['table1', 'table2']
class TestMockedImpala:
def setUp(self):
self.client = make_logged_in_client()
# Mock DB calls as we don't need the real ones
self.prev_dbms = dbms.get
dbms.get = lambda a, b: MockDbms()
def tearDown(self):
# Remove monkey patching
dbms.get = self.prev_dbms
def test_basic_flow(self):
response = self.client.get("/impala/")
assert_true(re.search('Impala', response.content), response.content)
assert_true('Query Editor' in response.content)
response = self.client.get("/impala/execute/")
assert_true('Query Editor' in response.content)
def test_saved_queries(self):
user = User.objects.get(username='test')
response = self.client.get("/impala/list_designs")
assert_equal(len(response.context[0]['page'].object_list), 0)
try:
beewax_query = create_saved_query('beeswax', user)
response = self.client.get("/impala/list_designs")
assert_equal(len(response.context[0]['page'].object_list), 0)
impala_query = create_saved_query('impala', user)
response = self.client.get("/impala/list_designs")
assert_equal(len(response.context[0]['page'].object_list), 1)
# Test my query page
QueryHistory.objects.create(owner=user, design=impala_query, query='', last_state=QueryHistory.STATE.available.value)
resp = self.client.get('/impala/my_queries')
assert_equal(len(resp.context[0]['q_page'].object_list), 1)
assert_equal(resp.context[0]['h_page'].object_list[0].design.name, 'create_saved_query')
finally:
if beewax_query is not None:
beewax_query.delete()
if impala_query is not None:
impala_query.delete()
class TestImpalaIntegration:
@classmethod
def setup_class(cls):
cls.finish = []
if not is_live_cluster():
raise SkipTest
cls.client = make_logged_in_client()
cls.user = User.objects.get(username='test')
add_to_group('test')
cls.db = dbms.get(cls.user, get_query_server_config(name='impala'))
cls.DATABASE = get_db_prefix(name='impala')
queries = ["""
DROP TABLE IF EXISTS %(db)s.tweets;
""" % {'db': cls.DATABASE}, """
DROP DATABASE IF EXISTS %(db)s CASCADE;
""" % {'db': cls.DATABASE}, """
CREATE DATABASE %(db)s;
""" % {'db': cls.DATABASE}]
for query in queries:
resp = _make_query(cls.client, query, database='default', local=False, server_name='impala')
resp = wait_for_query_to_finish(cls.client, resp, max=180.0)
content = json.loads(resp.content)
assert_true(content['status'] == 0, resp.content)
queries = ["""
CREATE TABLE tweets (row_num INTEGER, id_str STRING, text STRING) STORED AS PARQUET;
""", """
INSERT INTO TABLE tweets VALUES (1, "531091827395682000", "My dad looks younger than costa");
""", """
INSERT INTO TABLE tweets VALUES (2, "531091827781550000", "There is a thin line between your partner being vengeful and you reaping the consequences of your bad actions towards your partner.");
""", """
INSERT INTO TABLE tweets VALUES (3, "531091827768979000", "@Mustang_Sally83 and they need to get into you :))))");
""", """
INSERT INTO TABLE tweets VALUES (4, "531091827114668000", "@RachelZJohnson thank you rach!xxx");
""", """
INSERT INTO TABLE tweets VALUES (5, "531091827949309000", "i think @WWERollins was robbed of the IC title match this week on RAW also i wonder if he will get a rematch i hope so @WWE");
"""]
for query in queries:
resp = _make_query(cls.client, query, database=cls.DATABASE, local=False, server_name='impala')
resp = wait_for_query_to_finish(cls.client, resp, max=180.0)
content = json.loads(resp.content)
assert_true(content['status'] == 0, resp.content)
@classmethod
def teardown_class(cls):
# We need to drop tables before dropping the database
queries = ["""
DROP TABLE IF EXISTS %(db)s.tweets;
""" % {'db': cls.DATABASE}, """
DROP DATABASE %(db)s CASCADE;
""" % {'db': cls.DATABASE}]
for query in queries:
resp = _make_query(cls.client, query, database='default', local=False, server_name='impala')
resp = wait_for_query_to_finish(cls.client, resp, max=180.0)
# Check the cleanup
databases = cls.db.get_databases()
assert_false(cls.DATABASE in databases)
assert_false('%(db)s_other' % {'db': cls.DATABASE} in databases)
for f in cls.finish:
f()
def test_basic_flow(self):
dbs = self.db.get_databases()
assert_true('_impala_builtins' in dbs, dbs)
assert_true(self.DATABASE in dbs, dbs)
tables = self.db.get_tables(database=self.DATABASE)
assert_true('tweets' in tables, tables)
QUERY = """
SELECT * FROM tweets ORDER BY row_num;
"""
response = _make_query(self.client, QUERY, database=self.DATABASE, local=False, server_name='impala')
content = json.loads(response.content)
query_history = QueryHistory.get(content['id'])
response = wait_for_query_to_finish(self.client, response, max=180.0)
results = []
# Check that we multiple fetches get all the result set
while len(results) < 5:
content = fetch_query_result_data(self.client, response, n=len(results), server_name='impala') # We get less than 5 results most of the time, so increase offset
results += content['results']
assert_equal([1, 2, 3, 4, 5], [col[0] for col in results])
# Check start over
results_start_over = []
while len(results_start_over) < 5:
content = fetch_query_result_data(self.client, response, n=len(results_start_over), server_name='impala')
results_start_over += content['results']
assert_equal(results_start_over, results)
# Check cancel query
resp = self.client.post(reverse('impala:api_cancel_query', kwargs={'query_history_id': query_history.id}))
content = json.loads(resp.content)
assert_equal(0, content['status'])
def test_data_download(self):
hql = 'SELECT * FROM tweets %(limit)s'
FETCH_SIZE = data_export.FETCH_SIZE
data_export.FETCH_SIZE = 2 # Decrease fetch size to validate last fetch logic
try:
query = hql_query(hql % {'limit': ''})
handle = self.db.execute_and_wait(query)
# Get the result in csv. Should have 5 + 1 header row.
csv_resp = download(handle, 'csv', self.db)
csv_content = ''.join(csv_resp.streaming_content)
assert_equal(len(csv_content.strip().split('\n')), 5 + 1)
query = hql_query(hql % {'limit': 'LIMIT 0'})
handle = self.db.execute_and_wait(query)
csv_resp = download(handle, 'csv', self.db)
csv_content = ''.join(csv_resp.streaming_content)
assert_equal(len(csv_content.strip().split('\n')), 1)
query = hql_query(hql % {'limit': 'LIMIT 1'})
handle = self.db.execute_and_wait(query)
csv_resp = download(handle, 'csv', self.db)
csv_content = ''.join(csv_resp.streaming_content)
assert_equal(len(csv_content.strip().split('\n')), 1 + 1)
query = hql_query(hql % {'limit': 'LIMIT 2'})
handle = self.db.execute_and_wait(query)
csv_resp = download(handle, 'csv', self.db)
csv_content = ''.join(csv_resp.streaming_content)
assert_equal(len(csv_content.strip().split('\n')), 1 + 2)
finally:
data_export.FETCH_SIZE = FETCH_SIZE
def test_explain(self):
QUERY = """
SELECT * FROM tweets ORDER BY row_num;
"""
response = _make_query(self.client, QUERY, database=self.DATABASE, local=False, server_name='impala', submission_type='Explain')
json_response = json.loads(response.content)
assert_true('MERGING-EXCHANGE' in json_response['explanation'], json_response)
assert_true('SCAN HDFS' in json_response['explanation'], json_response)
def test_get_table_sample(self):
client = make_logged_in_client()
resp = client.get(reverse('impala:get_sample_data', kwargs={'database': self.DATABASE, 'table': 'tweets'}))
data = json.loads(resp.content)
assert_equal(0, data['status'], data)
assert_equal([u'row_num', u'id_str', u'text'], data['headers'], data)
assert_true(len(data['rows']), data)
def test_get_session(self):
session = None
try:
# Create open session
session = self.db.open_session(self.user)
resp = self.client.get(reverse("impala:api_get_session"))
data = json.loads(resp.content)
assert_true('properties' in data)
assert_true(data['properties'].get('http_addr'))
assert_true('session' in data, data)
assert_true('id' in data['session'], data['session'])
finally:
if session is not None:
try:
self.db.close_session(session)
except Exception:
pass
def test_get_settings(self):
resp = self.client.get(reverse("impala:get_settings"))
json_resp = json.loads(resp.content)
assert_equal(0, json_resp['status'])
assert_true('QUERY_TIMEOUT_S' in json_resp['settings'])
def test_invalidate_tables(self):
# Helper function to get Impala and Beeswax (HMS) tables
def get_impala_beeswax_tables():
impala_resp = self.client.get(reverse('impala:api_autocomplete_tables', kwargs={'database': self.DATABASE}))
impala_tables_meta = json.loads(impala_resp.content)['tables_meta']
impala_tables = [table['name'] for table in impala_tables_meta]
beeswax_resp = self.client.get(reverse('beeswax:api_autocomplete_tables', kwargs={'database': self.DATABASE}))
beeswax_tables_meta = json.loads(beeswax_resp.content)['tables_meta']
beeswax_tables = [table['name'] for table in beeswax_tables_meta]
return impala_tables, beeswax_tables
impala_tables, beeswax_tables = get_impala_beeswax_tables()
assert_equal(impala_tables, beeswax_tables,
"\ntest_invalidate_tables: `%s`\nImpala Tables: %s\nBeeswax Tables: %s" % (self.DATABASE, ','.join(impala_tables), ','.join(beeswax_tables)))
hql = """
CREATE TABLE new_table (a INT);
"""
resp = _make_query(self.client, hql, wait=True, local=False, max=180.0, database=self.DATABASE)
impala_tables, beeswax_tables = get_impala_beeswax_tables()
# New table is not found by Impala
assert_true('new_table' in beeswax_tables, beeswax_tables)
assert_false('new_table' in impala_tables, impala_tables)
resp = self.client.post(reverse('impala:invalidate'), {'database': self.DATABASE})
impala_tables, beeswax_tables = get_impala_beeswax_tables()
# Invalidate picks up new table
assert_equal(impala_tables, beeswax_tables,
"\ntest_invalidate_tables: `%s`\nImpala Tables: %s\nBeeswax Tables: %s" % (self.DATABASE, ','.join(impala_tables), ','.join(beeswax_tables)))
def test_refresh_table(self):
# Helper function to get Impala and Beeswax (HMS) columns
def get_impala_beeswax_columns():
impala_resp = self.client.get(reverse('impala:api_autocomplete_columns', kwargs={'database': self.DATABASE, 'table': 'tweets'}))
impala_columns = json.loads(impala_resp.content)['columns']
beeswax_resp = self.client.get(reverse('beeswax:api_autocomplete_columns', kwargs={'database': self.DATABASE, 'table': 'tweets'}))
beeswax_columns = json.loads(beeswax_resp.content)['columns']
return impala_columns, beeswax_columns
impala_columns, beeswax_columns = get_impala_beeswax_columns()
assert_equal(impala_columns, beeswax_columns,
"\ntest_refresh_table: `%s`.`%s`\nImpala Columns: %s\nBeeswax Columns: %s" % (self.DATABASE, 'tweets', ','.join(impala_columns), ','.join(beeswax_columns)))
hql = """
ALTER TABLE tweets ADD COLUMNS (new_column INT);
"""
resp = _make_query(self.client, hql, wait=True, local=False, max=180.0, database=self.DATABASE)
impala_columns, beeswax_columns = get_impala_beeswax_columns()
# New column is not found by Impala
assert_true('new_column' in beeswax_columns, beeswax_columns)
assert_false('new_column' in impala_columns, impala_columns)
resp = self.client.post(reverse('impala:refresh_table', kwargs={'database': self.DATABASE, 'table': 'tweets'}))
impala_columns, beeswax_columns = get_impala_beeswax_columns()
# Invalidate picks up new column
assert_equal(impala_columns, beeswax_columns,
"\ntest_refresh_table: `%s`.`%s`\nImpala Columns: %s\nBeeswax Columns: %s" % (self.DATABASE, 'tweets', ','.join(impala_columns), ','.join(beeswax_columns)))
def test_get_exec_summary(self):
query = """
SELECT COUNT(1) FROM tweets;
"""
response = _make_query(self.client, query, database=self.DATABASE, local=False, server_name='impala')
content = json.loads(response.content)
query_history = QueryHistory.get(content['id'])
wait_for_query_to_finish(self.client, response, max=180.0)
resp = self.client.post(reverse('impala:get_exec_summary', kwargs={'query_history_id': query_history.id}))
data = json.loads(resp.content)
assert_equal(0, data['status'], data)
assert_true('nodes' in data['summary'], data)
assert_true(len(data['summary']['nodes']) > 0, data['summary']['nodes'])
# Attempt to call get_exec_summary on a closed query
resp = self.client.post(reverse('impala:get_exec_summary', kwargs={'query_history_id': query_history.id}))
data = json.loads(resp.content)
assert_equal(0, data['status'], data)
assert_true('nodes' in data['summary'], data)
assert_true(len(data['summary']['nodes']) > 0, data['summary']['nodes'])
def test_get_runtime_profile(self):
query = """
SELECT COUNT(1) FROM tweets;
"""
response = _make_query(self.client, query, database=self.DATABASE, local=False, server_name='impala')
content = json.loads(response.content)
query_history = QueryHistory.get(content['id'])
wait_for_query_to_finish(self.client, response, max=180.0)
resp = self.client.post(reverse('impala:get_runtime_profile', kwargs={'query_history_id': query_history.id}))
data = json.loads(resp.content)
assert_equal(0, data['status'], data)
assert_true('Execution Profile' in data['profile'], data)
# Could be refactored with SavedQuery.create_empty()
def create_saved_query(app_name, owner):
query_type = SavedQuery.TYPES_MAPPING[app_name]
design = SavedQuery(owner=owner, type=query_type)
design.name = 'create_saved_query'
design.desc = ''
design.data = hql_query('show $tables', database='db1').dumps()
design.is_auto = False
design.save()
Document.objects.link(design, owner=design.owner, extra=design.type, name=design.name, description=design.desc)
return design
def test_ssl_cacerts():
for desktop_kwargs, conf_kwargs, expected in [
({'present': False}, {'present': False}, ''),
({'present': False}, {'data': 'local-cacerts.pem'}, 'local-cacerts.pem'),
({'data': 'global-cacerts.pem'}, {'present': False}, 'global-cacerts.pem'),
({'data': 'global-cacerts.pem'}, {'data': 'local-cacerts.pem'}, 'local-cacerts.pem'),
]:
resets = [
desktop_conf.SSL_CACERTS.set_for_testing(**desktop_kwargs),
conf.SSL.CACERTS.set_for_testing(**conf_kwargs),
]
try:
assert_equal(conf.SSL.CACERTS.get(), expected,
'desktop:%s conf:%s expected:%s got:%s' % (desktop_kwargs, conf_kwargs, expected, conf.SSL.CACERTS.get()))
finally:
for reset in resets:
reset()
def test_ssl_validate():
for desktop_kwargs, conf_kwargs, expected in [
({'present': False}, {'present': False}, True),
({'present': False}, {'data': False}, False),
({'present': False}, {'data': True}, True),
({'data': False}, {'present': False}, False),
({'data': False}, {'data': False}, False),
({'data': False}, {'data': True}, True),
({'data': True}, {'present': False}, True),
({'data': True}, {'data': False}, False),
({'data': True}, {'data': True}, True),
]:
resets = [
desktop_conf.SSL_VALIDATE.set_for_testing(**desktop_kwargs),
conf.SSL.VALIDATE.set_for_testing(**conf_kwargs),
]
try:
assert_equal(conf.SSL.VALIDATE.get(), expected,
'desktop:%s conf:%s expected:%s got:%s' % (desktop_kwargs, conf_kwargs, expected, conf.SSL.VALIDATE.get()))
finally:
for reset in resets:
reset()
class TestImpalaDbms():
def test_get_impala_nested_select(self):
assert_equal(ImpalaDbms.get_nested_select('default', 'customers', 'id', None), ('id', '`default`.`customers`'))
assert_equal(ImpalaDbms.get_nested_select('default', 'customers', 'email_preferences', 'categories/promos/'),
('email_preferences.categories.promos', '`default`.`customers`'))
assert_equal(ImpalaDbms.get_nested_select('default', 'customers', 'addresses', 'key'),
('key', '`default`.`customers`.`addresses`'))
assert_equal(ImpalaDbms.get_nested_select('default', 'customers', 'addresses', 'value/street_1/'),
('street_1', '`default`.`customers`.`addresses`'))
assert_equal(ImpalaDbms.get_nested_select('default', 'customers', 'orders', 'item/order_date'),
('order_date', '`default`.`customers`.`orders`'))
assert_equal(ImpalaDbms.get_nested_select('default', 'customers', 'orders', 'item/items/item/product_id'),
('product_id', '`default`.`customers`.`orders`.`items`'))
|
py | 1a2ed0f63f2195f08946f3130775ae4a141631b0 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import random
import math
def concat(list):
return reduce(lambda l, c: l+c, list, [])
def makedata(topic_count, word_count, paper_count, common_words, number_topics_paper=1, word_count_min=1, word_count_max=20, common_word_count_min=10, common_word_count_max=100):
# generate topic specific words
# res :: [[[(string, string, string, string)]]]
with open("lda_big.csv", "w", 10**9) as f:
for paper in range(paper_count):
for topicval in [random.randint(1, topic_count) for _ in range(number_topics_paper)]:
for word in range(word_count):
f.write(','.join(("paper-"+str(paper),"word-"+str(word)+str(topicval), str(random.randint(word_count_min,word_count_max)), str(topicval), "\n")))
# generate general words
# res2 :: [[(string, string, string, string)]]
for paper in range(paper_count):
for word in range(common_words):
f.write(','.join(("paper-"+str(paper),"word-"+str(word), str(int(math.ceil(random.uniform(common_word_count_min, common_word_count_max)))), "-1", "\n")))
if __name__ == '__main__':
makedata(10000, 1000, 20000, 100000)
|
py | 1a2ed1b0af58417fbaec840751451fe72805fac0 | #
# BSD 3-Clause License
#
# Copyright (c) 2017 xxxx
# All rights reserved.
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ============================================================================
#
from .misc import *
|
py | 1a2ed1d24a934ad713b86596821a24ca5ddd831a | #!/usr/bin/env python3
##############################################################################
# EVOLIFE http://evolife.telecom-paris.fr Jean-Louis Dessalles #
# Telecom Paris 2021 www.dessalles.fr #
# -------------------------------------------------------------------------- #
# License: Creative Commons BY-NC-SA #
##############################################################################
##############################################################################
# Draw curves offline using matplotlib #
##############################################################################
""" Draw curves offline.
Takes a csv file as input and draws curves.
Creates image file.
"""
import sys
import os
import re
import glob
import numpy as np
import matplotlib
matplotlib.use('Agg') # to use offline
import matplotlib.pyplot as plt
import logging # for tracing
# modifying print priority of console handler
logging.basicConfig(level='WARNING')
sys.path.append('..')
sys.path.append('../..')
sys.path.append('../../..')
sys.path.append('../../../..')
import Evolife.Scenarii.Parameters as EP
try: import TableCsv as CSV
except ImportError: import Evolife.Tools.TableCsv as CSV
def figsave(FileName):
if os.path.exists(FileName): os.remove(FileName)
plt.savefig(FileName)
print("%s created" % FileName)
def str2nb(x):
try: return int(x)
except ValueError: return float(x)
"""
plt.plot(*zip(*Maxima), c='k', linewidth=1, marker='o')
plt.clf()
plt.scatter(alphaValues, [p[1] for p in Prices], color=colours, s=44)
plt.plot(alphaValues, [p[1] for p in Prices], 'r', label='Signal prices')
plt.scatter(alphaValues, [thetaU(a, UC) for a in alphaValues], color=colours, s=44)
"""
class Plot:
def __init__(self, ExpeFile, FieldDraw=True, ConstantConfigFileName=None):
self.ExpeFile = os.path.splitext(ExpeFile)[0]
if self.ExpeFile.endswith('_res'):
self.ExpeFile = self.ExpeFile[:-4]
SkipFile = True # not a data file
OutputFile = self.ExpeFile + '.png'
if not os.path.exists(OutputFile):
self.Dirname, self.ExpeName = os.path.split(self.ExpeFile)
PlotFile = self.ExpeFile + '.csv'
self.ConfigFileName = self.ExpeFile + '_res.csv'
self.Cfg = self.RetrieveConfig(self.ConfigFileName) # retrieve actual parameters from _res file
self.RelevantParam = self.RelevantConfig(self.ExpeName, ConstantConfigFileName) # display parameters
# drawing curves
plt.figure(1, figsize=(6 + 6 * FieldDraw, 4))
if FieldDraw: plt.subplot(1,2,1)
ymax = self.Draw_Curve(PlotFile)
if self.RelevantParam: plt.title(' '.join(sorted(['%s = %s' % (P, self.RelevantParam[P]) for P in self.RelevantParam])))
if FieldDraw:
# drawing field
plt.subplot(1,2,2)
# self.Draw_Field(self.ExpeFile + '_dmp.csv', ymax=ymax)
self.Draw_Field(self.ExpeFile + '_dmp.csv', ymax=100)
plt.title(self.ExpeFile)
self.save(OutputFile)
else: print('%s already exists' % OutputFile)
def Draw_Curve(self, CurveFileName):
# colours = ['#000000', '#00BF00', '#78FF78', '#BF0000', '#FF7878', '#0000BF', '#7878FF']
colours = ['#00BF00', '#78FF78', '#BF0000', '#FF7878', '#0000BF', '#7878FF']
# Retrieving coordinates
PlotOrders = CSV.load(CurveFileName, sniff=True) # loading csv file
# Retrieving legend
try: Legend = next(PlotOrders) # reading first line with curve names
except StopIteration: sys.exit(0)
# Retrieving data
Data = list(zip(*PlotOrders))
Data = list(map(lambda L: list(map(str2nb, L)), Data))
# Data = list(map(lambda L: list(map(str2nb, L)), [*PlotOrders]))
for Col in range(1,len(Data)):
plt.plot(Data[0], Data[Col], linewidth=2, color=colours[Col-1], label=Legend[Col])
x1,x2,y1,y2 = plt.axis()
plt.axis((x1, x2, 0, y2+0.05))
# plt.ylim(top=100)
plt.xlabel('year')
# plt.ylabel('price or sales')
# plt.legend(bbox_to_anchor=(0.1, 1))
plt.legend(loc='upper right')
return plt.ylim()[1] # max coordinate
@classmethod
def RetrieveConfig(self, ConfigFile):
" Retrieves parameters from _res file "
if os.path.exists(ConfigFile):
CfgLines = open(ConfigFile).readlines()
# reading parameters
Sep = max([';', '\t', ','], key=lambda x: CfgLines[0].count(x))
if len(CfgLines) > 1:
Parameters = dict(zip(*map(lambda x: x.strip().split(Sep), CfgLines[:2])))
return EP.Parameters(ParamDict=Parameters)
return None
def RelevantConfig(self, ExpeName, ConstantParameterFile):
" Try to find relevant parameters "
Irrelevant = ['BatchMode', 'DisplayPeriod', 'TimeLimit', 'DumpStart']
if self.Cfg is None or not ConstantParameterFile:
print('ConfigFile not found')
return None
RelevantParameters = {}
CP = EP.Parameters(ConstantParameterFile)
# determining relevant parameters
for p in CP:
if p in Irrelevant: continue
if p in self.Cfg and CP[p] != self.Cfg[p]:
# print(p, RelevantParameters[p], self.Cfg[p])
RelevantParameters[p] = self.Cfg[p]
# CP.addParameter(p, self.Cfg[p])
RelevantParameters = EP.Parameters(ParamDict=RelevantParameters)
print(RelevantParameters)
return RelevantParameters
def Draw_Field(self, DumpFile, ymax=None):
if not os.path.exists(DumpFile): return None
Lines = open(DumpFile).readlines()
# reading recorded positions
FieldPlot = None
if len(Lines) > 1:
FieldPlot = Lines[1].strip().split(';')[1:]
NbP = len(FieldPlot)
plt.scatter(list(range(NbP)), list(map(float, FieldPlot)), s=11)
# print(FieldPlot)
if ymax is not None:
plt.ylim(top=ymax)
plt.xlabel('quality')
plt.ylabel('signal')
return FieldPlot
def save(self, OutputFile): figsave(OutputFile)
def Parse(Args):
Files = []
ConstantConfigFileName = None
if len(Args) < 2:
# find last file
CsvFiles = glob.glob('___Results/*.csv')
if CsvFiles:
CsvFiles.sort(key=lambda x: os.stat(x).st_mtime)
Files = [CsvFiles[-1]]
elif len(Args) > 3:
print('''Usage: %s <curve file name> [<constant config file name>]''' % os.path.basename(Args[0]))
else:
Files = glob.glob(Args[1])
ConstantConfigFileName = Args[2] if (len(Args) == 3) else None
for Argfile in Files:
yield (Argfile, ConstantConfigFileName)
if __name__ == "__main__":
for (Argfile, ConstantConfigFileName) in Parse(sys.argv):
if Argfile:
print(Argfile)
plot = Plot(Argfile, FieldDraw=True, ConstantConfigFileName=ConstantConfigFileName)
# print()
__author__ = 'Dessalles'
|
py | 1a2ed1edc9f9195a4dab60e84c698275f4b1aa32 | from .settings import *
DATABASES = {
'default': {
'ENGINE': 'tenant_schemas.postgresql_backend',
'NAME': os.environ.get('PG_NAME', 'dts_test_project'),
'USER': os.environ.get('PG_USER'),
'PASSWORD': os.environ.get('PG_PASSWORD'),
'HOST': os.environ.get('PG_HOST'),
'PORT': int(os.environ.get('PG_PORT')) if os.environ.get('PG_PORT') else None,
},
'db1': {
'ENGINE': 'tenant_schemas.postgresql_backend',
'NAME': os.environ.get('PG_NAME', 'dts_test_project1'),
'USER': os.environ.get('PG_USER'),
'PASSWORD': os.environ.get('PG_PASSWORD'),
'HOST': os.environ.get('PG_HOST'),
'PORT': int(os.environ.get('PG_PORT')) if os.environ.get('PG_PORT') else None,
},
'db2': {
'ENGINE': 'tenant_schemas.postgresql_backend',
'NAME': os.environ.get('PG_NAME', 'dts_test_project2'),
'USER': os.environ.get('PG_USER'),
'PASSWORD': os.environ.get('PG_PASSWORD'),
'HOST': os.environ.get('PG_HOST'),
'PORT': int(os.environ.get('PG_PORT')) if os.environ.get('PG_PORT') else None,
},
} |
py | 1a2ed1f5b3c5dda671b90424d004600ad3dfc622 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for bucketing data into groups.
The classes and functions in this module are used to queue up data into
buckets conditional on side information (e.g. sequence length).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.summary import summary
from tensorflow.python.training import input as input_py
from tensorflow.python.training import queue_runner
# pylint: disable=protected-access
_as_original_type = input_py._as_original_type
_as_tensor_list = input_py._as_tensor_list
_restore_sparse_tensors = input_py._restore_sparse_tensors
_dtypes = input_py._dtypes
_store_sparse_tensors = input_py._store_sparse_tensors
_shapes = input_py._shapes
_which_queue = input_py._which_queue
# pylint: enable=protected-access
def _validate_bucket(tensor_list):
tensor_list = ops.convert_n_to_tensor_or_indexed_slices(tensor_list)
if not tensor_list:
raise ValueError("Expected at least one tensor in bucket().")
return tensor_list
def bucket(tensors,
which_bucket,
batch_size,
num_buckets,
num_threads=1,
capacity=32,
shapes=None,
dynamic_pad=False,
allow_smaller_final_batch=False,
keep_input=None,
shared_name=None,
name=None):
"""Lazy bucketing of input tensors according to `which_bucket`.
The argument `tensors` can be a list or a dictionary of tensors.
The value returned by the function will be of the same type
as `tensors`.
The tensors entering this function are put into the bucket given by
`which_bucket`. Each bucket has its own queue. When a bucket contains
`batch_size` elements, this minibatch is pushed onto a top queue. The
tensors returned from this function are a the result of dequeueing the
next minibatch from this top queue.
This function is implemented using several queues. A `QueueRunner` for the
queues is added to the current `Graph`'s `QUEUE_RUNNER` collection.
As the returned tensors are the result of of a dequeue operation, evaluating
them will throw a `tf.errors.OutOfRangeError` when the input queue is
exhausted. If these tensors are feeding another input queue, its queue runner
will catch this exception, however, if they are used in your main thread
you are responsible for catching this yourself.
*N.B.:* If `dynamic_pad` is `False`, you must ensure that either
(i) the `shapes` argument is passed, or (ii) all of the tensors in
`tensors` must have fully-defined shapes. `ValueError` will be
raised if neither of these conditions holds.
If `dynamic_pad` is `True`, it is sufficient that the *rank* of the
tensors is known, but individual dimensions may have shape `None`.
In this case, for each enqueue the dimensions with value `None`
may have a variable length; upon dequeue, the output tensors will be padded
on the right to the maximum shape of the tensors in the current minibatch.
For numbers, this padding takes value 0. For strings, this padding is
the empty string. See `PaddingFIFOQueue` for more info.
If `allow_smaller_final_batch` is `True`, a smaller batch value than
`batch_size` is returned when the queues are closed and there are not enough
elements to fill the batch, otherwise the pending elements are discarded.
In addition, all output tensors' static shapes, as accessed via the
`get_shape()` method will have a 0th `Dimension` value of `None`, and
operations that depend on fixed batch_size would fail.
Args:
tensors: The list or dictionary of tensors, representing a single element,
to bucket. Nested lists are not supported.
which_bucket: An `int32` scalar Tensor taking a value in `[0, num_buckets)`.
batch_size: The new batch size pulled from the queue (all queues will have
the same size). If a list is passed in then each bucket will have a
different batch_size.
(python int, int32 scalar or iterable of integers of length num_buckets).
num_buckets: A python integer, the number of buckets.
num_threads: An integer. The number of threads enqueuing `tensors`.
capacity: An integer. The maximum number of minibatches in the top queue,
and also the maximum number of elements within each bucket.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensors`.
dynamic_pad: Boolean. Allow variable dimensions in input shapes.
The given dimensions are padded upon dequeue so that tensors within a
batch have the same shapes.
allow_smaller_final_batch: (Optional) Boolean. If `True`, allow the final
batches to be smaller if there are insufficient items left in the queues.
keep_input: (Optional). A `bool` scalar Tensor. If provided, this tensor
controls whether the input is added to the queue or not. If it evaluates
`True`, then `tensors` are added to the bucket; otherwise they are
dropped. This tensor essentially acts as a filtering mechanism.
The default behavior is to assume `keep_input=True`.
shared_name: (Optional). If set, the queues will be shared under the given
name across multiple sessions.
name: (Optional) A name for the operations.
Returns:
A tuple `(bucket, outputs)` where `bucket` is
a `int32` scalar tensor and `outputs` is a list or
dictionary of batched outputs corresponding to elements of `tensors`.
Every step will receive a new bucket of outputs.
Raises:
ValueError: If the `shapes` are not specified, and cannot be
inferred from the elements of `tensors` or if batch_size is a sequence
but it's length != num_buckets.
"""
batch_size_per_bucket = False
if isinstance(batch_size, (list, tuple)):
batch_size_per_bucket = True
if len(batch_size) != num_buckets:
raise ValueError(
"If batch_size is a list it must have num_buckets elements")
else:
batch_size = [batch_size] * num_buckets
tensor_list = _as_tensor_list(tensors)
with ops.name_scope(name, "bucket", tensor_list) as name:
tensor_list = _validate_bucket(tensor_list)
(tensor_list, sparse_info) = _store_sparse_tensors(
tensor_list, enqueue_many=False, keep_input=constant_op.constant(True))
# Round-trip batch_size to a tensor, and possibly back
for i, bucket_batch_size in enumerate(batch_size):
bucket_batch_size = ops.convert_to_tensor(
bucket_batch_size, dtype=dtypes.int32, name="batch_size")
static_batch_size = tensor_util.constant_value(bucket_batch_size)
batch_size[i] = (static_batch_size if static_batch_size is not None else
bucket_batch_size)
types = _dtypes([tensor_list])
shapes = _shapes([tensor_list], shapes, enqueue_many=False)
which_bucket = ops.convert_to_tensor(
which_bucket, dtype=dtypes.int32, name="which_bucket")
queue_creator = _which_queue(dynamic_pad)
bucket_queues = []
for i in range(num_buckets):
shared_name_i = ("%s_%d" % (shared_name, i) if shared_name is not None
else None)
bucket_queues.append(
queue_creator(
capacity=capacity,
dtypes=types,
shapes=shapes,
shared_name=shared_name_i,
name="bucket_queue_%d" % i))
maybe_static_batch_size = (
None if (allow_smaller_final_batch or batch_size_per_bucket)
else static_batch_size)
bucket_shapes = [
tensor_shape.vector(maybe_static_batch_size).concatenate(s)
for s in bucket_queues[0].shapes
]
# top_queue is a PaddingFIFOQueue even if the bucket queues are regular FIFO
# queues because if we use allow_smaller_final_batch, shapes will
# contain Nones in their first entry; as a result, a regular
# FIFOQueue would die when being passed shapes that are not fully defined.
top_queue = data_flow_ops.PaddingFIFOQueue(
capacity=capacity,
dtypes=[dtypes.int32] + types,
shapes=[tensor_shape.scalar()] + bucket_shapes,
shared_name=shared_name,
name="top_queue")
def enqueue_which():
def enqueue_single(i):
return bucket_queues[i].enqueue(tensor_list)
enqueues = [
control_flow_ops.cond(
math_ops.equal(which_bucket, i),
functools.partial(enqueue_single, i), control_flow_ops.no_op)
for i in range(num_buckets)
]
return control_flow_ops.group(*enqueues, name="group_enqueues")
if keep_input is not None:
# TODO(ebrevdo): Expand keep_input param to core training
# methods, and pipe through to _store_sparse_tensors; so
# that expensive serialization is guarded by keep_input.
maybe_enqueue = control_flow_ops.cond(keep_input, enqueue_which,
control_flow_ops.no_op)
else:
maybe_enqueue = enqueue_which()
bucket_enqueue_ops = [maybe_enqueue] * num_threads
if allow_smaller_final_batch:
which_dequeue = lambda q: q.dequeue_up_to
else:
which_dequeue = lambda q: q.dequeue_many
enqueues_to_top = [
top_queue.enqueue(
[constant_op.constant(i)] + which_dequeue(q)(
bs, name="read_bucket_%d" % i),
name="enqueue_from_bucket_%d" % i)
for i, (q, bs) in enumerate(zip(bucket_queues, batch_size))
]
for i, q in enumerate(bucket_queues):
queue_runner.add_queue_runner(
queue_runner.QueueRunner(
q, [enqueues_to_top[i]],
queue_closed_exception_types=(errors.OutOfRangeError,
errors.CancelledError)))
queue_runner.add_queue_runner(
queue_runner.QueueRunner(
top_queue,
bucket_enqueue_ops,
queue_closed_exception_types=(errors.OutOfRangeError,
errors.CancelledError)))
for q in bucket_queues:
summary.scalar("bucket/%s/size" % q.name,
math_ops.cast(top_queue.size(), dtypes.float32))
summary.scalar("bucket/%s/fraction_of_%d_full" % (top_queue.name, capacity),
math_ops.cast(top_queue.size(), dtypes.float32) *
(1. / capacity))
dequeued = top_queue.dequeue(name="dequeue_top")
which_bucket_dequeued = dequeued[0]
dequeued = dequeued[1:]
dequeued = _restore_sparse_tensors(dequeued, sparse_info)
return (which_bucket_dequeued, _as_original_type(tensors, dequeued))
def bucket_by_sequence_length(input_length,
tensors,
batch_size,
bucket_boundaries,
num_threads=1,
capacity=32,
shapes=None,
dynamic_pad=False,
allow_smaller_final_batch=False,
keep_input=None,
shared_name=None,
name=None):
"""Lazy bucketing of inputs according to their length.
This method calls `tf.contrib.training.bucket` under the hood, after first
subdividing the bucket boundaries into separate buckets and identifying which
bucket the given `input_length` belongs to. See the documentation for
`which_bucket` for details of the other arguments.
Args:
input_length: `int32` scalar `Tensor`, the sequence length of tensors.
tensors: The list or dictionary of tensors, representing a single element,
to bucket. Nested lists are not supported.
batch_size: The new batch size pulled from the queue (all queues will have
the same size). If a list is passed in then each bucket will have a
different batch_size.
(python int, int32 scalar or iterable of integers of length num_buckets).
bucket_boundaries: int list, increasing non-negative numbers.
The edges of the buckets to use when bucketing tensors. Two extra buckets
are created, one for `input_length < bucket_boundaries[0]` and
one for `input_length >= bucket_boundaries[-1]`.
num_threads: An integer. The number of threads enqueuing `tensors`.
capacity: An integer. The maximum number of minibatches in the top queue,
and also the maximum number of elements within each bucket.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensors`.
dynamic_pad: Boolean. Allow variable dimensions in input shapes.
The given dimensions are padded upon dequeue so that tensors within a
batch have the same shapes.
allow_smaller_final_batch: (Optional) Boolean. If `True`, allow the final
batches to be smaller if there are insufficient items left in the queues.
keep_input: (Optional). A `bool` scalar Tensor. If provided, this tensor
controls whether the input is added to the queue or not. If it evaluates
`True`, then `tensors` are added to the bucket; otherwise they are
dropped. This tensor essentially acts as a filtering mechanism.
The default behavior is to assume `keep_input=True`.
shared_name: (Optional). If set, the queues will be shared under the given
name across multiple sessions.
name: (Optional) A name for the operations.
Returns:
A tuple `(sequence_length, outputs)` where `sequence_length` is
a 1-D `Tensor` of size `batch_size` and `outputs` is a list or dictionary
of batched, bucketed, outputs corresponding to elements of `tensors`.
Raises:
TypeError: if `bucket_boundaries` is not a list of python integers.
ValueError: if `bucket_boundaries` is empty or contains non-increasing
values or if batch_size is a list and it's length doesn't equal the number
of buckets.
"""
tensor_list = _as_tensor_list(tensors)
if not isinstance(bucket_boundaries, (list, tuple)):
raise TypeError(
"bucket_boundaries must be a list or tuple, but received: %s" %
bucket_boundaries)
if not bucket_boundaries:
raise ValueError("bucket_boundaries must not be empty")
for (s, e) in zip(bucket_boundaries[:-1], bucket_boundaries[1:]):
if not isinstance(s, int) or not isinstance(e, int):
raise TypeError("bucket boundaries must be integers, but saw: %s and %s" %
(s, e))
if s >= e:
raise ValueError(
"Buckets must contain sequential increasing lengths, but saw: "
"%d before %d" % (s, e))
with ops.name_scope(name, "bucket_by_sequence_length",
[input_length] + tensor_list) as name:
input_length = ops.convert_to_tensor(
input_length, dtype=dtypes.int32, name="input_length")
# Bucketing conditions are:
# l < b[0]
# b[0] <= l < b[1]
# b[1] <= l < b[2]
# ...
# b[N-2] <= l < b[N-1]
# b[N-1] <= l
# Equivalent to:
# [-inf, b[0], b[1], ..., b[N-1]] <= l < [b[0], b[1], ..., b[N-1], inf]
buckets_min = [np.iinfo(np.int32).min] + list(bucket_boundaries)
buckets_max = list(bucket_boundaries) + [np.iinfo(np.int32).max]
conditions_c = math_ops.logical_and(
math_ops.less_equal(buckets_min, input_length),
math_ops.less(input_length, buckets_max))
which_bucket = math_ops.reduce_min(array_ops.where(conditions_c))
which_bucket = math_ops.to_int32(which_bucket)
if shapes is not None:
shapes = [tensor_shape.scalar()] + shapes
_, dequeued = bucket(
tensors=[input_length] + tensor_list,
which_bucket=which_bucket,
batch_size=batch_size,
num_buckets=len(bucket_boundaries) + 1,
num_threads=num_threads,
capacity=capacity,
shapes=shapes,
dynamic_pad=dynamic_pad,
allow_smaller_final_batch=allow_smaller_final_batch,
keep_input=keep_input,
shared_name=shared_name)
return (dequeued[0], _as_original_type(tensors, dequeued[1:]))
__all__ = ["bucket", "bucket_by_sequence_length"]
|
py | 1a2ed23e5ff49379e6935d99a53b1dd765e63b5c | import hashlib
import json
import logging
from pathlib import Path
from typing import List
import ckanapi
import pandas as pd
from airflow.models.baseoperator import BaseOperator
from airflow.utils.decorators import apply_defaults
class BackupDatastoreResourceOperator(BaseOperator):
"""
Reads datastore resource, creates backup files for fields (json) and records (parquet). Args:
- address: CKAN instance URL
- apikey: CKAN API key
- resource_task_id: task_id that returns resource object (ie. GetOrCreateResourcePackage)
- dir_task_id: task_id that returns backup directory
Returns dictionary containing:
- fields: json file path containing fields for datastore resource
- data: parquet file path containing fields for datastore resource
- columns: number of columns in datastore resource
- rows: number of rows in datastore_resource
- resource_id: datastore resource ID
"""
@apply_defaults
def __init__(
self,
address: str,
apikey: str,
resource_task_id: str,
dir_task_id: str,
sort_columns: List[str] = [],
**kwargs,
) -> None:
super().__init__(**kwargs)
self.dir_task_id = dir_task_id
self.resource_task_id = resource_task_id
self.sort_columns = sort_columns
self.ckan = ckanapi.RemoteCKAN(apikey=apikey, address=address)
def _checksum_datastore_response(self, datastore_response):
data = pd.DataFrame(datastore_response["records"])
if "_id" in data.columns.values:
data = data.drop("_id", axis=1)
if len(self.sort_columns) > 0:
data = data.sort_values(by=self.sort_columns)
data_hash = hashlib.md5()
data_hash.update(data.to_csv(index=False).encode("utf-8"))
return data_hash.hexdigest()
def _build_dataframe(self, records):
data = pd.DataFrame(records)
if "_id" in data.columns.values:
data = data.drop("_id", axis=1)
return data
def _save_fields_json(self, datastore_response, checksum, backups_dir):
fields_file_path = backups_dir / f"fields.{checksum}.json"
if not fields_file_path.exists():
fields = [f for f in datastore_response["fields"] if f["id"] != "_id"]
with open(fields_file_path, "w") as f:
json.dump(fields, f)
return fields_file_path
def _save_data_parquet(self, datastore_response, checksum, backups_dir, data):
data_file_path = backups_dir / f"data.{checksum}.parquet"
if not data_file_path.exists():
data.to_parquet(path=data_file_path, engine="fastparquet", compression=None)
return data_file_path
def execute(self, context):
# get a resource and backup directory via xcom
ti = context["ti"]
resource = ti.xcom_pull(task_ids=self.resource_task_id)
backups_dir = Path(ti.xcom_pull(task_ids=self.dir_task_id))
# get number of records for this datastore resource
record_count = self.ckan.action.datastore_search(id=resource["id"], limit=0)[
"total"
]
# get data from datastore resource
datastore_response = self.ckan.action.datastore_search(
id=resource["id"], limit=record_count
)
# turn data into dataframe
data = self._build_dataframe(datastore_response["records"])
checksum = self._checksum_datastore_response(datastore_response)
# return filepath for fields json, data parquet, row/col counts, checksum, and resource_id
result = {
"fields_file_path": self._save_fields_json(
datastore_response, checksum, backups_dir
),
"data_file_path": self._save_data_parquet(
datastore_response, checksum, backups_dir, data
),
"records": data.shape[0],
"columns": data.shape[1],
"resource_id": datastore_response["resource_id"],
"checksum": checksum,
}
logging.info(f"Returning: {result}")
return result
class DeleteDatastoreResourceOperator(BaseOperator):
"""
Deletes a datastore resource
Inputs:
- address: CKAN instance URL
- apikey: CKAN API key
- resource_id: CKAN resource id to be deleted
Resource id can be given with n actual value, or with a reference to a task_id and task_key that returns the value
Note: Deleting the entire resource also deletes the data dictionary (i.e. schema, field definitions and types).
To keep the existing schema, delete the datastore resource records instead by using the DeleteDatastoreResourceRecordsOperator - this keeps the schema.
"""
@apply_defaults
def __init__(
self,
address: str,
apikey: str,
resource_id: str = None,
resource_id_task_id: str = None,
resource_id_task_key: str = None,
**kwargs,
) -> None:
# init ckan client and resource_id to be truncated
super().__init__(**kwargs)
self.resource_id, self.resource_id_task_id, self.resource_id_task_key = resource_id, resource_id_task_id, resource_id_task_key
self.ckan = ckanapi.RemoteCKAN(apikey=apikey, address=address)
def execute(self, context):
# get task instance from context
ti = context['ti']
# get resource id from task, if task info provided in input
if self.resource_id_task_id and self.resource_id_task_key:
self.resource_id = ti.xcom_pull(task_ids=self.resource_id_task_id)[self.resource_id_task_key]
self.resource = ti.xcom_pull(task_ids=self.resource_id_task_id)
logging.info(self.resource)
logging.info("Pulled {} from {} via xcom".format(self.resource_id, self.resource_id_task_id) )
assert self.resource_id, "Resource ID is empty! This operator needs a way to get the resource ID in order to delete the right datastore resource!"
# Delete the resource
try:
self.ckan.action.datastore_delete(id=self.resource_id, force=True)
logging.info("Deleted " + self.resource_id)
except Exception as e:
logging.error("Error while trying to delete resource: " + e)
class DeleteDatastoreResourceRecordsOperator(BaseOperator):
"""
Deletes datastore resource records. Args:
- address: CKAN instance URL
- apikey: CKAN API key
- backup_task_id: task_id that returns backup file information (BackupDatastoreResourceOperator)
"""
@apply_defaults
def __init__(
self, address: str, apikey: str, backup_task_id: str, **kwargs,
) -> None:
super().__init__(**kwargs)
self.backup_task_id = backup_task_id
self.ckan = ckanapi.RemoteCKAN(apikey=apikey, address=address)
def execute(self, context):
backups_info = context["ti"].xcom_pull(task_ids=self.backup_task_id)
self.ckan.action.datastore_delete(id=backups_info["resource_id"], force=True)
with open(Path(backups_info["fields_file_path"]), "r") as f:
fields = json.load(f)
self.ckan.action.datastore_create(id=backups_info["resource_id"], fields=fields, force=True)
record_count = self.ckan.action.datastore_search(
id=backups_info["resource_id"], limit=0
)["total"]
assert record_count == 0, f"Resource not empty after cleanup: {record_count}"
class InsertDatastoreResourceRecordsOperator(BaseOperator):
@apply_defaults
def __init__(
self,
address: str,
apikey: str,
resource_task_id: str,
parquet_filepath_task_id: str = None,
fields_json_path_task_id: str = None,
chunk_size: int = 20000,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.parquet_filepath_task_id = parquet_filepath_task_id
self.resource_task_id = resource_task_id
self.chunk_size = chunk_size
self.fields_json_path_task_id = fields_json_path_task_id
self.ckan = ckanapi.RemoteCKAN(apikey=apikey, address=address)
def _create_empty_resource_with_fields(self, fields_path, resource_id):
with open(fields_path, "r") as f:
fields = json.load(f)
self.ckan.action.datastore_create(id=resource_id, fields=fields, force=True)
def execute(self, context):
ti = context["ti"]
resource = ti.xcom_pull(task_ids=self.resource_task_id)
if self.fields_json_path_task_id is not None:
fields_path = Path(ti.xcom_pull(task_ids=self.fields_json_path_task_id))
self._create_empty_resource_with_fields(fields_path, resource["id"])
if self.parquet_filepath_task_id is not None:
path = Path(ti.xcom_pull(task_ids=self.parquet_filepath_task_id))
data = pd.read_parquet(path)
records = data.to_dict(orient="records")
chunks = [
records[i : i + self.chunk_size]
for i in range(0, len(records), self.chunk_size)
]
for chunk in chunks:
clean_records = []
logging.info(f"Removing NaNs and inserting {len(records)} records")
for r in chunk:
record = {}
for key, value in r.items():
if value == value:
record[key] = value
clean_records.append(record)
self.ckan.action.datastore_create(
id=resource["id"], records=clean_records, force=True
)
logging.info(f"Records inserted: {data.shape[0]}")
return data.shape[0]
class RestoreDatastoreResourceBackupOperator(BaseOperator):
@apply_defaults
def __init__(
self, address: str, apikey: str, backup_task_id: str, **kwargs,
) -> None:
super().__init__(**kwargs)
self.backup_task_id = backup_task_id
self.ckan = ckanapi.RemoteCKAN(apikey=apikey, address=address)
def execute(self, context):
backups_info = context["ti"].xcom_pull(task_ids=self.backup_task_id)
assert backups_info is not None, "No backup information"
resource_id = backups_info["resource_id"]
with open(Path(backups_info["fields_file_path"]), "r") as f:
fields = json.load(f)
data = pd.read_parquet(Path(backups_info["data_file_path"]))
records = data.to_dict(orient="records")
try:
self.ckan.action.datastore_delete(id=resource_id)
except Exception as e:
logging.error(e)
result = self.ckan.action.datastore_create(
id=resource_id, fields=fields, records=records
)
logging.info(f"Result: {result}")
return result
class InsertDatastoreResourceRecordsFromJSONOperator(BaseOperator):
'''
Reads a JSON file and write the output into a CKAN datastore resource.
JSON must be a list of dicts, with each dict being a record, like the following:
[
{ "column1": "string", "column2": 100, "column3": true},
{ "column1": "some other string", "column2": 34, "column3": false}
]
The fields must match the CKAN standard, like the following:
[
{
"id": "column1",
"type": "text" ,
"info": {
"notes": "Description of the field goes here. Info key is optional."
}
},
{
"id": "column2",
"type": "int"
},
{
"id": "column3",
"type": "bool"
}
]
Expects as inputs:
- address - url of target ckan
- apikey - key needed to make authorized ckan calls
- resource_id - id of the resource that will receive this data
- data_path - location of the json data file
- fields_path - location of the data's fields, already in a CKAN-friendly format
All of the above, except the address and apikey, can be given with an actual value, or with a reference to a task_id and task_key that returns the value
'''
@apply_defaults
def __init__(
self,
address: str,
apikey: str,
resource_id: str = None,
resource_id_task_id: str = None,
resource_id_task_key: str = None,
data_path: str = None,
data_path_task_id: str = None,
data_path_task_key: str = None,
fields_path: str = None,
fields_path_task_id: str = None,
fields_path_task_key: str = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.resource_id, self.resource_id_task_id, self.resource_id_task_key = resource_id, resource_id_task_id, resource_id_task_key
self.data_path, self.data_path_task_id, self.data_path_task_key = data_path, data_path_task_id, data_path_task_key
self.fields_path, self.fields_path_task_id, self.fields_path_task_key = fields_path, fields_path_task_id, fields_path_task_key
self.ckan = ckanapi.RemoteCKAN(apikey=apikey, address=address)
def execute(self, context):
# init task instance from context
ti = context['ti']
# assign important vars if provided from other tasks
if self.resource_id_task_id and self.resource_id_task_key:
self.resource_id = ti.xcom_pull(task_ids=self.resource_id_task_id)[self.resource_id_task_key]
if self.data_path_task_id and self.data_path_task_key:
self.data_path = ti.xcom_pull(task_ids=self.data_path_task_id)[self.data_path_task_key]
if self.fields_path_task_id and self.fields_path_task_key:
self.fields_path = ti.xcom_pull(task_ids=self.fields_path_task_id)[self.fields_path_task_key]
# get fields from file
with open(self.fields_path, "r") as f:
fields = json.load(f)
logging.info("Loaded the following fields from {}: {}".format( self.fields_path, fields ))
# populate that resource w data from the path provided
assert self.data_path, "Data path, or the filepath to the data to be inserted, must be provided!"
with open(self.data_path) as f:
data = json.load(f)
logging.info("Data parsed from JSON file")
logging.info("Fields from fields file: " + str(fields))
logging.info("Fields from data file: " + str(data[0].keys()))
self.ckan.action.datastore_create(id=self.resource_id, fields=fields, records=data)
logging.info("Resource created and populated from input fields and data")
return {"resource_id": self.resource_id, "data_inserted": len(data)}
|
py | 1a2ed2604bd803aec2ca1994ff502f014e0ffc81 | import os
import torch
from utils.runs import Run
from utils.utils import print_message, save_checkpoint
from parameters import SAVED_CHECKPOINTS
def print_progress(scores):
positive_avg, negative_avg = round(scores[:, 0].mean().item(), 2), round(scores[:, 1].mean().item(), 2)
print("#>>> ", positive_avg, negative_avg, '\t\t|\t\t', positive_avg - negative_avg)
def manage_checkpoints(args, colbert, optimizer, batch_idx):
arguments = args.input_arguments.__dict__
path = os.path.join(Run.path, 'checkpoints')
if not os.path.exists(path):
os.mkdir(path)
if batch_idx % 2000 == 0:
name = os.path.join(path, "colbert.dnn")
save_checkpoint(name, 0, batch_idx, colbert, optimizer, arguments)
if batch_idx in SAVED_CHECKPOINTS:
name = os.path.join(path, "colbert-{}.dnn".format(batch_idx))
save_checkpoint(name, 0, batch_idx, colbert, optimizer, arguments)
|
py | 1a2ed37fea8e4429bfb60b85d8d46565ef016023 | from dataclasses import dataclass
from bindings.gmd.operation_method_property_type import OperationMethodPropertyType
__NAMESPACE__ = "http://www.opengis.net/gml"
@dataclass
class OperationMethodRef(OperationMethodPropertyType):
class Meta:
name = "operationMethodRef"
namespace = "http://www.opengis.net/gml"
|
py | 1a2ed38f590f29ace6484c9226f88b9f4ac5951b | import os
import numpy as np
import tables
import os
from .normalize import normalize_data_storage, reslice_image_set
def create_data_file(out_file, n_channels, n_samples, n_truth_labels, image_shape):
""" Initializes the hdf5 file and gives pointers for its three arrays
"""
try:
os.makedirs(os.path.dirname(out_file))
except:
pass
hdf5_file = tables.open_file(out_file, mode='w')
filters = tables.Filters(complevel=5, complib='blosc')
data_shape = tuple([0, n_channels] + list(image_shape))
truth_shape = tuple([0, n_truth_labels] + list(image_shape))
data_storage = hdf5_file.create_earray(hdf5_file.root, 'data', tables.Float32Atom(), shape=data_shape,
filters=filters, expectedrows=n_samples)
truth_storage = hdf5_file.create_earray(hdf5_file.root, 'truth', tables.UInt8Atom(), shape=truth_shape,
filters=filters, expectedrows=n_samples)
affine_storage = hdf5_file.create_earray(hdf5_file.root, 'affine', tables.Float32Atom(), shape=(0, 4, 4),
filters=filters, expectedrows=n_samples)
return hdf5_file, data_storage, truth_storage, affine_storage
def write_image_data_to_file(image_files, data_storage, truth_storage, image_shape, n_channels, affine_storage,
truth_dtype=np.uint8, crop=True):
for set_of_files in image_files: # set_of_files is both the volume file and the label file
images = reslice_image_set(set_of_files, image_shape, label_indices=len(set_of_files) - 1,
crop=crop) # both volume and label is resliced
subject_data = [image.get_data() for image in images]
add_data_to_storage(data_storage, truth_storage, affine_storage, subject_data, images[0].affine, n_channels,
truth_dtype)
return data_storage, truth_storage
def add_data_to_storage(data_storage, truth_storage, affine_storage, subject_data, affine, n_channels, truth_dtype):
data_storage.append(np.asarray(subject_data[:n_channels])[
np.newaxis]) # Anything but the last element of subject_data must be volume data
# split_truth_into_binary_labels(subject_data[n_channels])
# what_to_append = split_truth_into_binary_labels(subject_data[n_channels], truth_dtype, truth_labels)[np.newaxis]
# truth_storage.append(what_to_append)
truth_storage.append(np.asarray(subject_data[n_channels], dtype=truth_dtype)[np.newaxis][
np.newaxis]) # the last element of subject_data must be the labels
affine_storage.append(np.asarray(affine)[np.newaxis])
def write_data_to_file(training_data_files, out_file, image_shape, truth_dtype=np.uint8, subject_ids=None,
normalize=True, crop=True):
"""
Takes in a set of training images and writes those images to an hdf5 file.
:param training_data_files: List of tuples containing the training data files. The modalities should be listed in
the same order in each tuple. The last item in each tuple must be the labeled image.
Example: [('sub1-T1.nii.gz', 'sub1-T2.nii.gz', 'sub1-truth.nii.gz'),
('sub2-T1.nii.gz', 'sub2-T2.nii.gz', 'sub2-truth.nii.gz')]
:param out_file: Where the hdf5 file will be written to.
:param image_shape: Shape of the images that will be saved to the hdf5 file.
:param truth_dtype: Default is 8-bit unsigned integer.
:return: Location of the hdf5 file with the image data written to it.
"""
n_samples = len(training_data_files)
n_channels = len(training_data_files[0]) - 1
n_truth_labels = 1
try:
hdf5_file, data_storage, truth_storage, affine_storage = create_data_file(out_file,
n_channels=n_channels,
n_samples=n_samples,
n_truth_labels=n_truth_labels,
image_shape=image_shape)
except Exception as e:
# If something goes wrong, delete the incomplete data file
os.remove(out_file)
raise e
write_image_data_to_file(training_data_files, data_storage, truth_storage, image_shape, truth_dtype=truth_dtype,
n_channels=n_channels, affine_storage=affine_storage, crop=crop)
if subject_ids:
hdf5_file.create_earray(hdf5_file.root, 'subject_ids', obj=subject_ids)
if normalize:
normalize_data_storage(data_storage)
hdf5_file.close()
return out_file
def open_data_file(filename, readwrite="r"):
return tables.open_file(filename, readwrite)
|
py | 1a2ed3af00dd0272577cc60cc2b72f17f5efad1f |
# -*- coding: utf-8 -*-
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from parler.models import TranslatableModel, TranslatedFields
class TimeStampedModel(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
modified_at = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
@python_2_unicode_compatible
class AuthenticationSource(TimeStampedModel, TranslatableModel):
"""MPASS authentication sources."""
auth_id = models.CharField(max_length=128)
icon_url = models.CharField(max_length=2048, blank=True, null=True)
tags = models.ManyToManyField('AuthenticationTag', blank=True)
translations = TranslatedFields(
title=models.CharField(max_length=2048)
)
@property
def shib_auth_selection_parameter(self):
return 'authnContextClassRef=urn:mpass.id:authnsource:%s' % self.auth_id
def __str__(self):
return self.auth_id
@python_2_unicode_compatible
class AuthenticationTag(TimeStampedModel, TranslatableModel):
"""MPASS authentication tags used for grouping AuthenticationSources."""
tag_id = models.CharField(max_length=128)
translations = TranslatedFields(
title=models.CharField(max_length=2048)
)
@property
def shib_auth_selection_parameter(self):
return 'authnContextClassRef=urn:mpass.id:authntag:%s' % self.auth_id
def __str__(self):
return self.tag_id
@python_2_unicode_compatible
class Service(TimeStampedModel, TranslatableModel):
"""MPASS Service."""
service_id = models.CharField(max_length=128)
icon_url = models.CharField(max_length=2048, blank=True, null=True)
service_url = models.CharField(max_length=2048, blank=True, null=True)
sso_url = models.CharField(max_length=2048, blank=True, null=True)
translations = TranslatedFields(
description=models.CharField(max_length=2048),
title=models.CharField(max_length=2048)
)
def __str__(self):
return self.service_id
# vim: tabstop=2 expandtab shiftwidth=2 softtabstop=2
|
py | 1a2ed3bf39547120ff86d95576269db1453f8e77 | # Generated by Django 2.1.3 on 2018-11-23 05:47
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('multiplicity', '0008_datasettypestructure_icon'),
]
operations = [
migrations.CreateModel(
name='Photo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('image', models.ImageField(blank=True, null=True, upload_to='photos')),
('author', models.CharField(max_length=255)),
('source_url', models.CharField(blank=True, max_length=255, null=True)),
('description', models.TextField(blank=True, null=True)),
('deleted', models.BooleanField(db_index=True, default=False)),
('space', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='multiplicity.ReferenceSpace')),
('uploaded_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-created_at', '-updated_at'],
'abstract': False,
},
),
]
|
py | 1a2ed4161d2439401a69c931198a039dbc93f74f | import os
import time
from collections import defaultdict
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torchvision.utils import save_image
from config import get_cfg
# models
from models.volume_rendering import VolumeRenderer
from models.anim_nerf import AnimNeRF
from models.body_model_params import BodyModelParams
# losses
# datasets
from datasets import dataset_dict
# optimizer, scheduler, visualization
from utils import *
from utils.util import load_pickle_file
# pytorch-lightning
from torchmetrics.functional import psnr, ssim
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning import LightningDataModule, LightningModule, Trainer
from pytorch_lightning.loggers import TensorBoardLogger
class AnimNeRFData(LightningDataModule):
def __init__(self, hparams):
super(AnimNeRFData, self).__init__()
# self.hparams = hparams
self.save_hyperparameters(hparams)
def setup(self, stage=None):
dataset = dataset_dict[self.hparams.dataset_name]
if self.hparams.deformation_dim + self.hparams.apperance_dim > 0 or self.hparams.optim_body_params:
frame_ids_index = {}
for i, frame_id in enumerate(self.hparams.frame_IDs):
frame_ids_index[frame_id] = i
else:
frame_ids_index = None
kwargs = {'root_dir': self.hparams.root_dir,
'img_wh': tuple(self.hparams.img_wh),
'frame_start_ID': self.hparams.train.frame_start_ID,
'frame_end_ID': self.hparams.train.frame_end_ID,
'frame_skip': self.hparams.train.frame_skip,
'subsampletype': self.hparams.train.subsampletype,
'subsamplesize': self.hparams.train.subsamplesize,
'model_type': self.hparams.model_type,
'cam_IDs': self.hparams.train.cam_IDs
}
self.train_dataset = dataset(mode='train', frame_ids_index=frame_ids_index, **kwargs)
kwargs = {'root_dir': self.hparams.root_dir,
'img_wh': tuple(self.hparams.img_wh),
'frame_start_ID': self.hparams.val.frame_start_ID,
'frame_end_ID': self.hparams.val.frame_end_ID,
'frame_skip': self.hparams.val.frame_skip,
'model_type': self.hparams.model_type,
'cam_IDs': self.hparams.val.cam_IDs
}
self.val_dataset = dataset(mode='val', frame_ids_index=frame_ids_index, **kwargs)
kwargs = {'root_dir': self.hparams.root_dir,
'img_wh': tuple(self.hparams.img_wh),
'frame_start_ID': self.hparams.test.frame_start_ID,
'frame_end_ID': self.hparams.test.frame_end_ID,
'frame_skip': self.hparams.test.frame_skip,
'model_type': self.hparams.model_type,
'cam_IDs': self.hparams.test.cam_IDs
}
self.test_dataset = dataset(mode='val', frame_ids_index=frame_ids_index, **kwargs)
def train_dataloader(self):
return DataLoader(self.train_dataset,
shuffle=True,
num_workers=self.hparams.train.num_workers,
batch_size=self.hparams.train.batch_size,
pin_memory=False)
def val_dataloader(self):
return DataLoader(self.val_dataset,
shuffle=False,
num_workers=self.hparams.val.num_workers,
batch_size=self.hparams.val.batch_size, # validate one image (H*W rays) at a time
pin_memory=False)
def test_dataloader(self):
return DataLoader(self.test_dataset,
shuffle=False,
num_workers=self.hparams.test.num_workers,
batch_size=self.hparams.test.batch_size, # validate one image (H*W rays) at a time
pin_memory=False)
class AnimNeRFSystem(LightningModule):
def __init__(self, hparams):
super(AnimNeRFSystem, self).__init__()
# self.hparams = hparams
self.save_hyperparameters(hparams)
self.anim_nerf = AnimNeRF(
model_path=self.hparams.model_path,
model_type=self.hparams.model_type,
gender=self.hparams.gender,
freqs_xyz=self.hparams.freqs_xyz,
freqs_dir=self.hparams.freqs_dir,
use_view=self.hparams.use_view,
k_neigh=self.hparams.k_neigh,
use_knn=self.hparams.use_knn,
use_unpose=self.hparams.use_unpose,
unpose_view=self.hparams.unpose_view,
use_deformation=self.hparams.use_deformation,
pose_dim=self.hparams.pose_dim,
deformation_dim=self.hparams.deformation_dim,
apperance_dim=self.hparams.apperance_dim,
use_fine=self.hparams.n_importance>0 or self.hparams.n_depth>0,
share_fine=self.hparams.share_fine,
dis_threshold=self.hparams.dis_threshold,
query_inside=self.hparams.query_inside,
)
self.models = [self.anim_nerf]
if self.hparams.deformation_dim > 0 or self.hparams.apperance_dim > 0:
self.hparams.latent_dim = self.hparams.deformation_dim + self.hparams.apperance_dim
self.latent_codes = nn.Embedding(self.hparams.num_frames, self.hparams.latent_dim)
self.latent_codes.weight.data.normal_(0, 0.1)
self.models += [self.latent_codes]
self.body_model_params = BodyModelParams(self.hparams.num_frames, model_type=self.hparams.model_type)
self.load_body_model_params()
if self.hparams.optim_body_params:
optim_params = self.body_model_params.param_names
for param_name in optim_params:
self.body_model_params.set_requires_grad(param_name, requires_grad=True)
self.models += [self.body_model_params]
self.volume_renderer = VolumeRenderer(n_coarse=self.hparams.n_samples, n_fine=self.hparams.n_importance, n_fine_depth=self.hparams.n_depth, share_fine=self.hparams.share_fine, white_bkgd=self.hparams.white_bkgd)
def load_body_model_params(self):
body_model_params = {param_name: [] for param_name in self.body_model_params.param_names}
body_model_params_dir = os.path.join(self.hparams.root_dir, '{}s'.format(self.hparams.model_type))
for frame_id in self.hparams.frame_IDs:
params = load_pickle_file(os.path.join(body_model_params_dir, "{:0>6}.pkl".format(frame_id)))
for param_name in body_model_params.keys():
body_model_params[param_name].append(torch.from_numpy(params[param_name]).float().unsqueeze(0))
for param_name in body_model_params.keys():
body_model_params[param_name] = torch.cat(body_model_params[param_name], dim=0)
self.body_model_params.init_parameters(param_name, body_model_params[param_name], requires_grad=False)
@torch.no_grad()
def decode_batch(self, batch):
frame_id = batch['frame_id']
cam_id = batch['cam_id']
frame_idx = batch['frame_idx']
rays = batch['rays'] # (bs, n_rays, 8)
rgbs = batch['rgbs'] # (bs, n_rays, 3)
alphas = batch['alphas'] # (bs, n_rays, 1)
body_model_params = {
'betas': batch['betas'],
'global_orient': batch['global_orient'],
'body_pose': batch['body_pose'],
'transl': batch['transl']
}
body_model_params_template = {
'betas': batch['betas_template'],
'global_orient': batch['global_orient_template'],
'body_pose': batch['body_pose_template'],
'transl': batch['transl_template']
}
fg_points = batch['fg_points'] # (bs, num_points, 3)
bg_points = batch['bg_points'] # (bs, num_points, 3)
return frame_id, cam_id, frame_idx, rays, rgbs, alphas, body_model_params, body_model_params_template, fg_points, bg_points
def forward(self, rays, body_model_params, body_model_params_template, latent_code=None, perturb=1.0):
bs, n_rays = rays.shape[:2]
results = defaultdict(list)
chunk = self.hparams.chunk
self.anim_nerf.set_body_model(body_model_params, body_model_params_template)
rays = self.anim_nerf.convert_to_body_model_space(rays)
self.anim_nerf.clac_ober2cano_transform()
if latent_code is not None:
self.anim_nerf.set_latent_code(latent_code)
for i in range(0, n_rays, chunk):
rays_chunk = rays[:, i:i+chunk, :]
rendered_ray_chunks = self.volume_renderer(self.anim_nerf, rays_chunk, perturb=perturb)
for k, v in rendered_ray_chunks.items():
results[k] += [v]
for k, v in results.items():
results[k] = torch.cat(v, 1)
return results
def configure_optimizers(self):
parameters = [ {'params': self.anim_nerf.parameters(), 'lr': self.hparams.train.lr}]
if self.hparams.deformation_dim > 0 or self.hparams.apperance_dim > 0:
parameters.append({'params': self.latent_codes.parameters(), 'lr': self.hparams.train.lr})
if self.hparams.optim_body_params:
parameters.append({'params': self.body_model_params.parameters(), 'lr': self.hparams.train.lr*0.5})
self.optimizer = get_optimizer(self.hparams.train, parameters)
self.scheduler = get_scheduler(self.hparams.train, self.optimizer)
return [self.optimizer], [self.scheduler]
def compute_loss(self, rgbs, alphas, results, frame_idx=None, latent_code=None, fg_points=None, bg_points=None):
loss = 0
loss_details = {}
# rgb
loss_rgb = F.mse_loss(results['rgbs'], rgbs, reduction='mean')
loss += loss_rgb
loss_details['loss_rgb'] = loss_rgb
if self.hparams.n_importance > 0 and not self.hparams.share_fine:
loss_rgb_fine = F.mse_loss(results['rgbs_fine'], rgbs, reduction='mean')
loss += loss_rgb_fine
loss_details['loss_rgb_fine'] = loss_rgb_fine
# alphas
loss_alphas = F.l1_loss(results['alphas'], alphas)
loss += self.hparams.train.lambda_alphas * loss_alphas
loss_details['loss_alphas'] = loss_alphas
if self.hparams.n_importance > 0 and not self.hparams.share_fine:
loss_alphas_fine = F.l1_loss(results['alphas_fine'], alphas)
loss += self.hparams.train.lambda_alphas * loss_alphas_fine
loss_details['loss_alphas_fine'] = loss_alphas_fine
# if latent_code is not None:
# loss_latent = torch.mean(torch.pow(latent_code, 2))
# loss += self.hparams.lambda_latent * loss_latent
# loss_details['loss_latent'] = loss_latent
# frame_idx_ = torch.clamp(frame_idx+1, 0, self.hparams.num_frames)
# latent_code_ = self.latent_codes(frame_idx_)
# loss_latent_smooth = F.mse_loss(latent_code, latent_code_)
# loss += self.hparams.lambda_latent_smooth * loss_latent_smooth
# loss_details['loss_latent_smooth'] = loss_latent_smooth
if self.hparams.use_unpose and fg_points is not None:
fg_points_sigma = self.anim_nerf.query_canonical_space(fg_points, use_fine=False, only_sigma=True)
loss_foreground = torch.mean(torch.exp(-2.0/self.hparams.n_samples * torch.relu(fg_points_sigma)))
loss += self.hparams.train.lambda_foreground * loss_foreground
loss_details['loss_foreground'] = loss_foreground
if self.hparams.n_importance > 0 and not self.hparams.share_fine:
fg_points_sigma_fine = self.anim_nerf.query_canonical_space(fg_points, use_fine=True, only_sigma=True)
loss_foreground_fine = torch.mean(torch.exp(-2.0/self.hparams.n_samples * torch.relu(fg_points_sigma_fine)))
loss += self.hparams.train.lambda_foreground * loss_foreground_fine
loss_details['loss_foreground_fine'] = loss_foreground_fine
if self.hparams.use_unpose and bg_points is not None:
bg_points_sigma = self.anim_nerf.query_canonical_space(bg_points, use_fine=False, only_sigma=True)
loss_background = torch.mean(1 - torch.exp(-2.0/self.hparams.n_samples * torch.relu(bg_points_sigma)))
loss += self.hparams.train.lambda_background * loss_background
loss_details['loss_background'] = loss_background
if self.hparams.n_importance > 0 and not self.hparams.share_fine:
bg_points_sigma_fine = self.anim_nerf.query_canonical_space(bg_points, use_fine=True, only_sigma=True)
loss_background_fine = torch.mean(1 - torch.exp(-2.0/self.hparams.n_samples * torch.relu(bg_points_sigma_fine)))
loss += self.hparams.train.lambda_background * loss_background_fine
loss_details['loss_background_fine'] = loss_background_fine
# normal
points = self.anim_nerf.verts_template.detach()
points += torch.randn_like(points) * self.hparams.dis_threshold * 0.5
points_neighbs = points + torch.randn_like(points) * self.hparams.train.epsilon
points_normal = self.anim_nerf.query_canonical_space(points, use_fine=False, only_normal=True)
points_neighbs_normal = self.anim_nerf.query_canonical_space(points_neighbs, use_fine=False, only_normal=True)
points_normal = points_normal / (torch.norm(points_normal, p=2, dim=-1, keepdim=True) + 1e-5)
points_neighbs_normal = points_neighbs_normal / (torch.norm(points_neighbs_normal, p=2, dim=-1, keepdim=True) + 1e-5)
loss_normals = F.mse_loss(points_normal, points_neighbs_normal)
# loss_normals = torch.mean((torch.norm(points_normal, p=2, dim=-1) - 1)**2)
loss += self.hparams.train.lambda_normals * loss_normals
loss_details['loss_normals'] = loss_normals
if self.hparams.n_importance > 0 and not self.hparams.share_fine:
points_normal_fine = self.anim_nerf.query_canonical_space(points, use_fine=True, only_normal=True)
points_neighbs_normal_fine = self.anim_nerf.query_canonical_space(points_neighbs, use_fine=True, only_normal=True)
points_normal_fine = points_normal_fine / (torch.norm(points_normal_fine, p=2, dim=-1, keepdim=True) + 1e-5)
points_neighbs_normal_fine = points_neighbs_normal_fine / (torch.norm(points_neighbs_normal_fine, p=2, dim=-1, keepdim=True) + 1e-5)
loss_normals_fine = F.mse_loss(points_normal_fine, points_neighbs_normal_fine)
# loss_normals_fine = torch.mean((torch.norm(points_normal_fine, p=2, dim=-1) - 1)**2)
loss += self.hparams.train.lambda_normals * loss_normals_fine
loss_details['loss_normals_fine'] = loss_normals_fine
# if body_model_params is not None:
# loss_pose = F.mse_loss(results['joints'].clone(), self.anim_nerf.model(**body_model_params)['joints'].clone())
# loss += self.hparams.lambda_pose * loss_pose
# loss_details['loss_pose'] = loss_pose
# frame_id_ = torch.clamp(frame_id+1, 0, self.body_model_params.num_frame-1)
# body_model_params_ref_ = self.body_model_params(frame_id_)
# loss_pose_smooth = F.mse_loss(self.anim_nerf.joints, self.anim_nerf.model(**body_model_params_ref_)['joints'])
# loss += self.hparams.lambda_pose_smooth * loss_pose_smooth
# loss_details['loss_pose_smooth'] = loss_pose_smooth
return loss, loss_details
def training_step(self, batch, batch_nb):
frame_id, cam_id, frame_idx, rays, rgbs, alphas, body_model_params, body_model_params_template, fg_points, bg_points = self.decode_batch(batch)
if self.hparams.latent_dim > 0:
latent_code = self.latent_codes(frame_idx)
else:
latent_code = None
if self.hparams.optim_body_params:
body_model_params = self.body_model_params(frame_idx)
results = self(rays, body_model_params, body_model_params_template, latent_code=latent_code)
loss, loss_details = self.compute_loss(rgbs, alphas, results, frame_idx=frame_idx, fg_points=fg_points, bg_points=bg_points)
self.log('train/loss', loss, on_step=True, on_epoch=False, prog_bar=True, logger=True)
for loss_name in loss_details.keys():
self.log('train/{}'.format(loss_name), loss_details[loss_name], on_step=True, on_epoch=False, prog_bar=True, logger=True)
with torch.no_grad():
if 'rgbs_fine' in results:
train_psnr = psnr(results['rgbs_fine'], rgbs)
else:
train_psnr = psnr(results['rgbs'], rgbs)
self.log('train/psnr', train_psnr, on_step=True, on_epoch=False, prog_bar=True, logger=True)
lr = get_learning_rate(self.optimizer)
self.log('lr', lr, on_step=False, on_epoch=True, prog_bar=False, logger=True)
return loss
def validation_step(self, batch, batch_nb):
frame_id, cam_id, frame_idx, rays, rgbs, alphas, body_model_params, body_model_params_template, fg_points, bg_points = self.decode_batch(batch)
if self.hparams.latent_dim > 0:
if frame_idx != -1:
latent_code = self.latent_codes(frame_idx)
else:
latent_code = self.latent_codes(torch.zeros_like(frame_idx))
else:
latent_code = None
if self.hparams.optim_body_params and frame_idx != -1:
body_model_params = self.body_model_params(frame_idx)
# else:
# body_model_params['betas'] = self.body_model_params.betas(torch.zeros_like(frame_idx))
results = self(rays, body_model_params, body_model_params_template, latent_code=latent_code)
loss, _ = self.compute_loss(rgbs, alphas, results)
self.log('val/loss', loss, on_step=False, on_epoch=True, prog_bar=True, logger=True)
if 'rgbs_fine' in results:
val_psnr = psnr(results['rgbs_fine'], rgbs)
else:
val_psnr = psnr(results['rgbs'], rgbs)
self.log('val/psnr', val_psnr, on_step=False, on_epoch=True, prog_bar=True, logger=True)
W, H = self.hparams.img_wh
def visualize(frame_id, cam_id, rgbs_gt, rgbs, depths, W, H):
img = rgbs.cpu().view(H, W, 3).permute(2, 0, 1) # (3, H, W)
img_gt = rgbs_gt.cpu().view(H, W, 3).permute(2, 0, 1) # (3, H, W)
depth = visualize_depth(depths.cpu().view(H, W))
stack = torch.stack([img_gt, img, depth]) # (3, 3, H, W)
self.logger.experiment.add_images('val/GT_pred_depth_cam{:0>3d}_{:0>6d}'.format(cam_id, frame_id), stack, self.global_step)
if batch_nb % self.hparams.val.vis_freq == 0:
if 'rgbs_fine' in results:
visualize(frame_id.item(), cam_id.item(), rgbs, results['rgbs_fine'], results['depths_fine'], W, H)
else:
visualize(frame_id.item(), cam_id.item(), rgbs, results['rgbs'], results['depths'], W, H)
return loss
def test_step(self, batch, batch_nb):
frame_id, cam_id, frame_idx, rays, rgbs, alphas, body_model_params, body_model_params_template, fg_points, bg_points = self.decode_batch(batch)
if self.hparams.latent_dim > 0:
if frame_idx != -1:
latent_code = self.latent_codes(frame_idx)
else:
latent_code = self.latent_codes(torch.zeros_like(frame_idx))
else:
latent_code = None
if self.hparams.optim_body_params and frame_idx != -1:
body_model_params = self.body_model_params(frame_idx)
# else:
# body_model_params['betas'] = self.body_model_params.betas(torch.zeros_like(frame_idx))
results = self(rays, body_model_params, body_model_params_template, latent_code=latent_code, perturb=0.0)
loss, _ = self.compute_loss(rgbs, alphas, results)
self.log('test/loss', loss, on_step=False, on_epoch=True, prog_bar=True, logger=False)
if 'rgbs_fine' in results:
test_psnr = psnr(results['rgbs_fine'], rgbs)
else:
test_psnr = psnr(results['rgbs'], rgbs)
self.log('test/psnr', test_psnr, on_step=False, on_epoch=True, prog_bar=True, logger=False)
W, H = self.hparams.img_wh
def visualize(frame_id, cam_id, rgbs_gt, rgbs, depths, W, H):
img = rgbs.cpu().view(H, W, 3).permute(2, 0, 1) # (3, H, W)
img_gt = rgbs_gt.cpu().view(H, W, 3).permute(2, 0, 1) # (3, H, W)
depth = visualize_depth(depths.cpu().view(H, W))
stack = torch.stack([img_gt, img, depth]) # (3, 3, H, W)
os.makedirs(os.path.join(self.hparams.outputs_dir, self.hparams.exp_name, 'cam{:0>3d}'.format(cam_id)), exist_ok=True)
save_image(stack, '{}/{}/cam{:0>3d}/{:0>6d}.png'.format(self.hparams.outputs_dir, self.hparams.exp_name, cam_id, frame_id))
#self.logger.experiment.add_images('test/GT_pred_depth_{}'.format(nb), stack, self.global_step)
if batch_nb % self.hparams.test.vis_freq == 0:
if 'rgbs_fine' in results:
visualize(frame_id.item(), cam_id.item(), rgbs, results['rgbs_fine'], results['depths_fine'], W, H)
else:
visualize(frame_id.item(), cam_id.item(), rgbs, results['rgbs'], results['depths'], W, H)
return loss
if __name__ == '__main__':
# torch.autograd.set_detect_anomaly(True)
train_start_time = time.time()
cfg = get_cfg()
data = AnimNeRFData(cfg)
system = AnimNeRFSystem(cfg)
print(system)
if cfg.train.ckpt_path is not None:
for model_name in cfg.train.model_names_to_load:
load_ckpt(getattr(system, model_name), cfg.train.ckpt_path, model_name)
for param in getattr(system, model_name).parameters():
param.requires_grad = cfg.train.pretrained_model_requires_grad
checkpoint_callback = ModelCheckpoint(dirpath=f'{cfg.checkpoints_dir}/{cfg.exp_name}',
filename='{epoch:d}',
monitor='train/psnr',
mode='max',
save_top_k=cfg.train.save_top_k,
save_last=cfg.train.save_last)
logger = TensorBoardLogger(
save_dir=cfg.logs_dir,
name=cfg.exp_name,
)
trainer = Trainer(max_epochs=cfg.train.max_epochs,
callbacks=[checkpoint_callback],
logger=logger,
gpus=cfg.num_gpus,
strategy=cfg.train.strategy,
num_sanity_val_steps=1,
benchmark=True,
profiler="simple")
trainer.fit(system, data, ckpt_path=cfg.train.ckpt_path if cfg.train.resume else None)
trainer.test(datamodule=data)
train_end_message = 'End of training \t Time Taken: %.3f hours' % ((time.time() - train_start_time)/3600.0)
print(train_end_message) |
py | 1a2ed4403a4bf9c2636129169b534c8c00be1050 | from mmdet.models.builder import DETECTORS
from .single_stage_text_detector import SingleStageTextDetector
from .text_detector_mixin import TextDetectorMixin
@DETECTORS.register_module()
class FCENet(TextDetectorMixin, SingleStageTextDetector):
"""The class for implementing FCENet text detector
FCENet(CVPR2021): Fourier Contour Embedding for Arbitrary-shaped Text
Detection
[https://arxiv.org/abs/2104.10442]
"""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
show_score=False,
init_cfg=None):
SingleStageTextDetector.__init__(self, backbone, neck, bbox_head,
train_cfg, test_cfg, pretrained,
init_cfg)
TextDetectorMixin.__init__(self, show_score)
def simple_test(self, img, img_metas, rescale=False):
x = self.extract_feat(img)
outs = self.bbox_head(x)
boundaries = self.bbox_head.get_boundary(outs, img_metas, rescale)
return [boundaries]
|
py | 1a2ed47bfc16e006146e2f741497c36729e0cf0b | from collections.abc import MutableSet, Sequence
from itertools import tee
class OrderedSet(MutableSet, Sequence):
def __init__(self, iterable=None):
if iterable is None:
iterable = []
it1, it2 = tee(iterable)
self._set = set(it1)
self._list = []
for i in it2:
if i not in self._list:
self._list.append(i)
def add(self, value):
if value not in self._set:
self._set.add(value)
self._list.append(value)
def discard(self, value):
if value in self._set:
self._set.discard(value)
self._list.remove(value)
def __contains__(self, value):
return value in self._set
def __len__(self):
return len(self._set)
def __iter__(self):
return iter(self._list)
def __getitem__(self, slice_):
return self._list[slice_]
def index(self, value, start=0, stop=None):
return self._list[start: stop].index(value)
def __repr__(self):
return 'OrderedSet(' + ','.join(repr(i) for i in self) + ')'
|
py | 1a2ed50591b8fa7750fb49f6df4e5567ff91879b | #!/usr/bin/env python3
# Copyright (c) 2004-present Facebook All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
from typing import List, Optional
from pysymphony import SymphonyClient
from ..common.constant import SUPERUSER_ROLE, USER_ROLE
from ..common.data_enum import Entity
from ..exceptions import EntityNotFoundError
from ..graphql.enum.user_role import UserRole
from ..graphql.enum.user_status import UserStatus
from ..graphql.fragment.user import UserFragment
from ..graphql.input.edit_user import EditUserInput
from ..graphql.mutation.edit_user import EditUserMutation
from ..graphql.query.user import UserQuery
from ..graphql.query.users import UsersQuery
def get_user(client: SymphonyClient, email: str) -> UserFragment:
"""Returns `pyinventory.graphql.fragment.user.UserFragment` object by its email
Args:
email (str): the email address the user registered with
Returns:
`pyinventory.graphql.fragment.user.UserFragment` object
Raises:
`pyinventory.exceptions.EntityNotFoundError`: the user was not found
FailedOperationException: internal inventory error
Example:
```
user = client.get_user(email="[email protected]")
```
"""
user = UserQuery.execute(client, email)
if user is None:
raise EntityNotFoundError(entity=Entity.User, entity_name=email)
return UserFragment(
id=user.id,
authID=user.authID,
email=user.email,
status=user.status,
role=user.role,
)
def add_user(client: SymphonyClient, email: str, password: str) -> UserFragment:
"""Adds new user to inventory with its email and password
Args:
email (str): the email address of the user
password (str): password the user would connect with
Returns:
`pyinventory.graphql.fragment.user.UserFragment` object
Raises:
`pyinventory.exceptions.EntityNotFoundError`: the user was not created properly
FailedOperationException: internal inventory error
AssertionError: The user was not created for some known reason
HTTPError: Error with connection
Example:
```
user = client.add_user(email="[email protected]", password="P0ssW!rd0f43")
```
"""
resp = client.post(
"/user/async/",
{"email": email, "password": password, "role": USER_ROLE, "networkIDs": []},
)
if not resp.ok:
error_message = resp.json().get("error", None)
if error_message is not None:
raise AssertionError(error_message)
raise
return get_user(client=client, email=email)
def edit_user(
client: SymphonyClient,
user: UserFragment,
new_password: Optional[str] = None,
new_role: Optional[UserRole] = None,
) -> None:
"""Edit user password and role
Args:
user ( `pyinventory.graphql.fragment.user.UserFragment` ): user to edit
new_password (Optional[str]): new password the user would connect with
new_role ( `pyinventory.graphql.enum.user_role.UserRole` ): user new role
Raises:
FailedOperationException: internal inventory error
AssertionError: The user was not edited for some known reason
HTTPError: Error with connection
Example:
```
user = client.add_user(email="[email protected]", password="P0ssW!rd0f43")
client.edit_user(user=user, new_password="New_Password4Ever", new_role=UserRole.ADMIN)
```
"""
params = {}
if new_password is not None:
params.update({"password": new_password})
if new_role is not None:
params.update(
{"role": USER_ROLE if new_role == UserRole.USER else SUPERUSER_ROLE}
)
resp = client.put(f"/user/set/{user.email}", params)
if not resp.ok:
error_message = resp.json().get("error", None)
if error_message is not None:
raise AssertionError(error_message)
raise
if new_role is not None:
EditUserMutation.execute(client, input=EditUserInput(id=user.id, role=new_role))
def deactivate_user(client: SymphonyClient, user: UserFragment) -> None:
"""Deactivate the user which would prevent the user from login in to symphony
Users in symphony are never deleted. Only de-activated.
Args:
user ( `pyinventory.graphql.fragment.user.UserFragment` ): user to deactivate
Raises:
FailedOperationException: internal inventory error
Example:
```
user = client.get_user(email="[email protected]")
client.deactivate_user(user=user)
```
"""
EditUserMutation.execute(
client, input=EditUserInput(id=user.id, status=UserStatus.DEACTIVATED)
)
def activate_user(client: SymphonyClient, user: UserFragment) -> None:
"""Activate the user which would allow the user to login again to symphony
Args:
user ( `pyinventory.graphql.fragment.user.UserFragment` ): user to activate
Raises:
FailedOperationException: internal inventory error
Example:
```
user = client.get_user(email="[email protected]")
client.activate_user(user=user)
```
"""
EditUserMutation.execute(
client, input=EditUserInput(id=user.id, status=UserStatus.ACTIVE)
)
def get_users(client: SymphonyClient) -> List[UserFragment]:
"""Get the list of users in the system (both active and deactivate)
Returns:
List[ `pyinventory.graphql.fragment.user.UserFragment` ]
Raises:
FailedOperationException: internal inventory error
Example:
```
users = client.get_users()
for user in users:
print(user.email)
```
"""
result = UsersQuery.execute(client)
if result is None:
return []
users = []
for edge in result.edges:
node = edge.node
if node is not None:
users.append(
UserFragment(
id=node.id,
authID=node.authID,
email=node.email,
status=node.status,
role=node.role,
)
)
return users
def get_active_users(client: SymphonyClient) -> List[UserFragment]:
"""Get the list of the active users in the system
Returns:
List[ `pyinventory.graphql.fragment.user.UserFragment` ]
Raises:
FailedOperationException: internal inventory error
Example:
```
users = client.get_active_users()
for user in users:
print(user.email)
```
"""
users = get_users(client=client)
return [user for user in users if user.status == UserStatus.ACTIVE]
|
py | 1a2ed521273fc18d627e33dab6c4d4e5e9b8bead | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import string
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
integer_types = int,
else:
string_types = basestring,
integer_types = (int, long)
SEP = "____"
KLS_NAME_CHARSET = set(string.ascii_letters + string.digits)
VAR_NAME_CHARSET = set(string.ascii_lowercase + string.digits + "_")
VAR_FORBIDDEN_CHARSET = set(
r"""~`!@#$%^&*()-+={}[]|\:;"'<,>.?/""" + string.ascii_uppercase)
INDEX_KEY_FORBIDDEN_CHARSET = set(r"""~`!@#$%^&*()-+={}[]|\:;"'<,>.?/""")
WHITE_SPACE = set(string.whitespace)
def is_valid_class_name(name):
"""Check if it is a valid variable name.
A valid variable name has to:
- start wither upper case
- only alpha digits
"""
try:
assert name[0].isupper()
assert len(set(name).difference(KLS_NAME_CHARSET)) == 0
return True
except:
return False
def is_valid_variable_name(name):
"""Check if it is a valid variable name.
A valid variable name has to:
- start wither lower case
- reserved SEPTERATOR is not in it.
"""
try:
assert name[0].islower()
assert SEP not in name
assert len(set(name).difference(VAR_NAME_CHARSET)) == 0
return True
except:
return False
def is_valid_surfix(name):
"""Surfix is the attribute name used for index.
**中文文档**
此方法暂时没用。
"""
try:
assert SEP not in name
assert len(VAR_FORBIDDEN_CHARSET.intersection(name)) == 0
return True
except:
return False
def to_variable_name(cls_name):
"""Convert class name to variable name format. usually use "_" to connect
each word.
**中文文档**
将类名转化为其实例的变量名。
"""
assert is_valid_class_name(cls_name)
words = list()
chunks = list()
for char in cls_name:
if char.isupper():
words.append("".join(chunks))
chunks = ["_", char.lower()]
else:
chunks.append(char)
words.append("".join(chunks))
return "".join(words)[1:]
def to_index_key(value):
"""Convert a value to it's index key in string.
Only alpha and digits and underscore is allowed. Whitespace delimiter will
replaced with underscore.
`` *David# #John* `` -> ``David_John``
"""
if isinstance(value, integer_types):
key = str(value)
elif isinstance(value, string_types):
l = list()
for c in value:
if c not in INDEX_KEY_FORBIDDEN_CHARSET:
if c in WHITE_SPACE:
l.append(" ")
else:
l.append(c)
words = [word for word in "".join(
l).strip().split(" ") if word.strip()]
key = "_".join(words)
elif isinstance(value, float):
key = str(value).replace(".", "d")
else:
raise TypeError("%r is not an indexable value.")
return key
def test_is_valid_class_name():
for name in ["User", "MyClass", "TestCase"]:
assert is_valid_class_name(name) is True
for name in ["user", "My_Class", "testCase"]:
assert is_valid_class_name(name) is False
def test_is_valid_variable_name():
for name in ["name", "my_class", "num1"]:
assert is_valid_variable_name(name) is True
for name in ["Name", "myClass", "1a"]:
assert is_valid_variable_name(name) is False
def test_is_valid_surfix():
assert is_valid_surfix("大卫") is True
def test_to_variable_name():
assert to_variable_name("User") == "user"
assert to_variable_name("MyClass") == "my_class"
def test_to_index_key():
assert to_index_key(1) == "1"
assert to_index_key("David John") == "David_John"
assert to_index_key(" *David+ +John* ") == "David_John"
assert to_index_key("中文") == "中文"
assert to_index_key(" 英 文 ") == "英_文"
assert to_index_key(3.14) == "3d14"
if __name__ == "__main__":
test_is_valid_class_name()
test_is_valid_variable_name()
test_is_valid_surfix()
test_to_variable_name()
test_to_index_key()
|
py | 1a2ed5304b47b5ddd430dfd7759ebbcc9a4f02b4 | from typing import List
import dash_html_components as html
from .. import WebvizPluginABC
class ExampleTour(WebvizPluginABC):
@property
def tour_steps(self) -> List[dict]:
return [
{"id": self.uuid("blue_text"), "content": "This is the first step"},
{"id": self.uuid("red_text"), "content": "This is the second step"},
]
@property
def layout(self) -> html.Div:
return html.Div(
children=[
html.Span(
"Here is some blue text to explain... ",
id=self.uuid("blue_text"),
style={"color": "blue"},
),
html.Span(
" ...and here is some red text that also needs an explanation.",
id=self.uuid("red_text"),
style={"color": "red"},
),
]
)
|
py | 1a2ed6b5572c5252fd3e6bd67d2091d5e8e96cb4 | #-----------Welcome to DeAdSeC Python Codex----------#
#-------Made By DeAdSeC-------#
#---Version 1.0.0---#
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
W = '\033[0m' # white (normal)
R = '\033[31m' # red
G = '\033[32m' # green
O = '\033[33m' # orange
B = '\033[34m' # blue
P = '\033[35m' # purple
C = '\033[36m' # cyan
GR = '\033[37m' # gray
D = '\033[2m' # dims current color. {W} resets.
Plus = f'{W}{D}[{W}{G}+{W}{D}]{W}' #[+]
Danger = f'{O}[{R}!{O}]{W}' #[!]
WTF = f'{W}[{C}?{W}]' #[?]
|
py | 1a2ed73c6938b74922ea5d4c0a29b3adcb86a5a0 | import torch
def config_device():
# determine if gpu is to be used
return torch.device("cuda" if torch.cuda.is_available() else "cpu")
def update_rule(fast_lr):
def update_q(old, new):
return old + fast_lr * (new - old)
return update_q |
py | 1a2ed8fd542eee35350c522db44f819721ca587c | import numpy as np
from scipy.optimize import minimize
from scipy.optimize import Bounds
__all__ = [
"MAD",
"SemiDeviation",
"VaR_Hist",
"CVaR_Hist",
"WR",
"LPM",
"Entropic_RM",
"EVaR_Hist",
"MDD_Abs",
"ADD_Abs",
"DaR_Abs",
"CDaR_Abs",
"EDaR_Abs",
"UCI_Abs",
"MDD_Rel",
"ADD_Rel",
"DaR_Rel",
"CDaR_Rel",
"EDaR_Rel",
"UCI_Rel",
"Sharpe_Risk",
"Sharpe",
"Risk_Contribution",
]
def MAD(X):
r"""
Calculate the Mean Absolute Deviation (MAD) of a returns series.
.. math::
\text{MAD}(X) = \frac{1}{T}\sum_{t=1}^{T}
| X_{t} - \mathbb{E}(X_{t}) |
Parameters
----------
X : 1d-array
Returns series, must have Tx1 size.
Returns
-------
value : float
MAD of a returns series.
Raises
------
ValueError
When the value cannot be calculated.
Examples
--------
Examples should be written in doctest format, and should illustrate how
to use the function.
>>> print([i for i in example_generator(4)])
[0, 1, 2, 3]
"""
a = np.array(X, ndmin=2)
if a.shape[0] == 1 and a.shape[1] > 1:
a = a.T
if a.shape[0] > 1 and a.shape[1] > 1:
raise ValueError("returns must have Tx1 size")
value = np.mean(np.absolute(a - np.mean(a, axis=0)), axis=0)
value = np.array(value).item()
return value
def SemiDeviation(X):
r"""
Calculate the Semi Deviation of a returns series.
.. math::
\text{SemiDev}(X) = \left [ \frac{1}{T-1}\sum_{t=1}^{T}
(X_{t} - \mathbb{E}(X_{t}))^2 \right ]^{1/2}
Parameters
----------
X : 1d-array
Returns series, must have Tx1 size.
Raises
------
ValueError
When the value cannot be calculated.
Returns
-------
value : float
Semi Deviation of a returns series.
"""
a = np.array(X, ndmin=2)
if a.shape[0] == 1 and a.shape[1] > 1:
a = a.T
if a.shape[0] > 1 and a.shape[1] > 1:
raise ValueError("returns must have Tx1 size")
mu = np.mean(a, axis=0)
value = mu - a
n = value.shape[0]
value = np.sum(np.power(value[np.where(value >= 0)], 2)) / (n - 1)
value = np.power(value, 0.5).item()
return value
def VaR_Hist(X, alpha=0.05):
r"""
Calculate the Value at Risk (VaR) of a returns series.
.. math::
\text{VaR}_{\alpha}(X) = -\inf_{t \in (0,T)} \left \{ X_{t} \in
\mathbb{R}: F_{X}(X_{t})>\alpha \right \}
Parameters
----------
X : 1d-array
Returns series, must have Tx1 size.
alpha : float, optional
Significance level of VaR. The default is 0.05.
Raises
------
ValueError
When the value cannot be calculated.
Returns
-------
value : float
VaR of a returns series.
"""
a = np.array(X, ndmin=2)
if a.shape[0] == 1 and a.shape[1] > 1:
a = a.T
if a.shape[0] > 1 and a.shape[1] > 1:
raise ValueError("returns must have Tx1 size")
sorted_a = np.sort(a, axis=0)
index = int(np.ceil(alpha * len(sorted_a)) - 1)
value = -sorted_a[index]
value = np.array(value).item()
return value
def CVaR_Hist(X, alpha=0.05):
r"""
Calculate the Conditional Value at Risk (CVaR) of a returns series.
.. math::
\text{CVaR}_{\alpha}(X) = \text{VaR}_{\alpha}(X) +
\frac{1}{\alpha T} \sum_{t=1}^{T} \max(-X_{t} -
\text{VaR}_{\alpha}(X), 0)
Parameters
----------
X : 1d-array
Returns series, must have Tx1 size.
alpha : float, optional
Significance level of CVaR. The default is 0.05.
Raises
------
ValueError
When the value cannot be calculated.
Returns
-------
value : float
CVaR of a returns series.
"""
a = np.array(X, ndmin=2)
if a.shape[0] == 1 and a.shape[1] > 1:
a = a.T
if a.shape[0] > 1 and a.shape[1] > 1:
raise ValueError("returns must have Tx1 size")
sorted_a = np.sort(a, axis=0)
index = int(np.ceil(alpha * len(sorted_a)) - 1)
sum_var = 0
for i in range(0, index + 1):
sum_var = sum_var + sorted_a[i] - sorted_a[index]
value = -sorted_a[index] - sum_var / (alpha * len(sorted_a))
value = np.array(value).item()
return value
def WR(X):
r"""
Calculate the Worst Realization (WR) or Worst Scenario of a returns series.
.. math::
\text{WR}(X) = \max(-X)
Parameters
----------
X : 1d-array
Returns series, must have Tx1 size.
Raises
------
ValueError
When the value cannot be calculated.
Returns
-------
value : float
WR of a returns series.
"""
a = np.array(X, ndmin=2)
if a.shape[0] == 1 and a.shape[1] > 1:
a = a.T
if a.shape[0] > 1 and a.shape[1] > 1:
raise ValueError("returns must have Tx1 size")
sorted_a = np.sort(a, axis=0)
value = -sorted_a[0]
value = np.array(value).item()
return value
def LPM(X, MAR=0, p=1):
r"""
Calculate the First or Second Lower Partial Moment of a returns series.
.. math::
\text{LPM}(X, \text{MAR}, 1) &= \frac{1}{T}\sum_{t=1}^{T}
\max(\text{MAR} - X_{t}, 0) \\
\text{LPM}(X, \text{MAR}, 2) &= \left [ \frac{1}{T-1}\sum_{t=1}^{T}
\max(\text{MAR} - X_{t}, 0)^{2} \right ]^{\frac{1}{2}} \\
Where:
:math:`\text{MAR}` is the minimum acceptable return.
:math:`p` is the order of the :math:`\text{LPM}`.
Parameters
----------
X : 1d-array
Returns series, must have Tx1 size.
MAR : float, optional
Minimum acceptable return. The default is 0.
p : float, optional can be {1,2}
order of the :math:`\text{LPM}`. The default is 1.
Raises
------
ValueError
When the value cannot be calculated.
Returns
-------
value : float
p-th Lower Partial Moment of a returns series.
"""
a = np.array(X, ndmin=2)
if a.shape[0] == 1 and a.shape[1] > 1:
a = a.T
if a.shape[0] > 1 and a.shape[1] > 1:
raise ValueError("returns must have Tx1 size")
if p not in [1, 2]:
raise ValueError("p can only be 1 or 2")
value = MAR - a
if p == 2:
n = value.shape[0] - 1
else:
n = value.shape[0]
value = np.sum(np.power(value[np.where(value >= 0)], p)) / n
value = np.power(value, 1 / p).item()
return value
def Entropic_RM(X, z=1, alpha=0.05):
r"""
Calculate the Entropic Risk Measure (ERM) of a returns series.
.. math::
\text{ERM}_{\alpha}(X) = z\ln \left (\frac{M_X(z^{-1})}{\alpha} \right )
Where:
:math:`M_X(z)` is the moment generating function of X.
Parameters
----------
X : 1d-array
Returns series, must have Tx1 size.
theta : float, optional
Risk aversion parameter, must be greater than zero. The default is 1.
alpha : float, optional
Significance level of EVaR. The default is 0.05.
Raises
------
ValueError
When the value cannot be calculated.
Returns
-------
value : float
ERM of a returns series.
"""
a = np.array(X, ndmin=2)
if a.shape[0] == 1 and a.shape[1] > 1:
a = a.T
if a.shape[0] > 1 and a.shape[1] > 1:
raise ValueError("returns must have Tx1 size")
value = np.mean(np.exp(-1 / z * a), axis=0)
value = z * (np.log(value) + np.log(1 / alpha))
value = np.array(value).item()
return value
def _Entropic_RM(z, X, alpha=0.05):
a = np.array(X, ndmin=2)
if a.shape[0] == 1 and a.shape[1] > 1:
a = a.T
if a.shape[0] > 1 and a.shape[1] > 1:
raise ValueError("returns must have Tx1 size")
value = np.mean(np.exp(-1 / z * a), axis=0)
value = z * (np.log(value) + np.log(1 / alpha))
value = np.array(value).item()
return value
def EVaR_Hist(X, alpha=0.05):
r"""
Calculate the Entropic Value at Risk (EVaR) of a returns series.
.. math::
\text{EVaR}_{\alpha}(X) = \inf_{z>0} \left \{ z
\ln \left (\frac{M_X(z^{-1})}{\alpha} \right ) \right \}
Where:
:math:`M_X(t)` is the moment generating function of X.
Parameters
----------
X : 1d-array
Returns series, must have Tx1 size.
alpha : float, optional
Significance level of EVaR. The default is 0.05.
Raises
------
ValueError
When the value cannot be calculated.
Returns
-------
(value, z) : tuple
EVaR of a returns series and value of z that minimize EVaR.
"""
a = np.array(X, ndmin=2)
if a.shape[0] == 1 and a.shape[1] > 1:
a = a.T
if a.shape[0] > 1 and a.shape[1] > 1:
raise ValueError("returns must have Tx1 size")
bnd = Bounds([1e-12], [np.inf])
result = minimize(
_Entropic_RM, [1], args=(X, alpha), method="SLSQP", bounds=bnd, tol=1e-12
)
t = result.x
t = t.item()
value = _Entropic_RM(t, X, alpha)
return (value, t)
def MDD_Abs(X):
r"""
Calculate the Maximum Drawdown (MDD) of a returns series
using uncumpounded cumulative returns.
.. math::
\text{MDD}(X) = \max_{j \in (0,T)} \left [\max_{t \in (0,j)}
\left ( \sum_{i=0}^{t}X_{i} \right ) - \sum_{i=0}^{j}X_{i} \right ]
Parameters
----------
X : 1d-array
Returns series, must have Tx1 size.
Raises
------
ValueError
When the value cannot be calculated.
Returns
-------
value : float
MDD of an uncumpounded cumulative returns.
"""
a = np.array(X, ndmin=2)
if a.shape[0] == 1 and a.shape[1] > 1:
a = a.T
if a.shape[0] > 1 and a.shape[1] > 1:
raise ValueError("returns must have Tx1 size")
prices = np.insert(np.array(a), 0, 1, axis=0)
NAV = np.cumsum(np.array(prices), axis=0)
value = 0
peak = -99999
for i in NAV:
if i > peak:
peak = i
DD = peak - i
if DD > value:
value = DD
value = np.array(value).item()
return value
def ADD_Abs(X):
r"""
Calculate the Average Drawdown (ADD) of a returns series
using uncumpounded cumulative returns.
.. math::
\text{ADD}(X) = \frac{1}{T}\sum_{j=0}^{T}\left [ \max_{t \in (0,j)}
\left ( \sum_{i=0}^{t}X_{i} \right ) - \sum_{i=0}^{j}X_{i} \right ]
Parameters
----------
X : 1d-array
Returns series, must have Tx1 size.
Raises
------
ValueError
When the value cannot be calculated.
Returns
-------
value : float
ADD of an uncumpounded cumulative returns.
"""
a = np.array(X, ndmin=2)
if a.shape[0] == 1 and a.shape[1] > 1:
a = a.T
if a.shape[0] > 1 and a.shape[1] > 1:
raise ValueError("returns must have Tx1 size")
prices = np.insert(np.array(a), 0, 1, axis=0)
NAV = np.cumsum(np.array(prices), axis=0)
value = 0
peak = -99999
n = 0
for i in NAV:
if i > peak:
peak = i
DD = peak - i
if DD > 0:
value += DD
n += 1
if n == 0:
value = 0
else:
value = value / (n - 1)
value = np.array(value).item()
return value
def DaR_Abs(X, alpha=0.05):
r"""
Calculate the Drawdown at Risk (DaR) of a returns series
using uncumpounded cumulative returns.
.. math::
\text{DaR}_{\alpha}(X) & = \max_{j \in (0,T)} \left \{ \text{DD}(X,j)
\in \mathbb{R}: F_{\text{DD}} \left ( \text{DD}(X,j) \right )< 1-\alpha
\right \} \\
\text{DD}(X,j) & = \max_{t \in (0,j)} \left ( \sum_{i=0}^{t}X_{i}
\right )- \sum_{i=0}^{j}X_{i}
Parameters
----------
X : 1d-array
Returns series, must have Tx1 size..
alpha : float, optional
Significance level of DaR. The default is 0.05.
Raises
------
ValueError
When the value cannot be calculated.
Returns
-------
value : float
DaR of an uncumpounded cumulative returns series.
"""
a = np.array(X, ndmin=2)
if a.shape[0] == 1 and a.shape[1] > 1:
a = a.T
if a.shape[0] > 1 and a.shape[1] > 1:
raise ValueError("returns must have Tx1 size")
prices = np.insert(np.array(a), 0, 1, axis=0)
NAV = np.cumsum(np.array(prices), axis=0)
DD = []
peak = -99999
for i in NAV:
if i > peak:
peak = i
DD.append(-(peak - i))
del DD[0]
sorted_DD = np.sort(np.array(DD), axis=0)
index = int(np.ceil(alpha * len(sorted_DD)) - 1)
value = -sorted_DD[index]
value = np.array(value).item()
return value
def CDaR_Abs(X, alpha=0.05):
r"""
Calculate the Conditional Drawdown at Risk (CDaR) of a returns series
using uncumpounded cumulative returns.
.. math::
\text{CDaR}_{\alpha}(X) = \text{DaR}_{\alpha}(X) + \frac{1}{\alpha T}
\sum_{j=0}^{T} \max \left [ \max_{t \in (0,j)}
\left ( \sum_{i=0}^{t}X_{i} \right ) - \sum_{i=0}^{j}X_{i}
- \text{DaR}_{\alpha}(X), 0 \right ]
Where:
:math:`\text{DaR}_{\alpha}` is the Drawdown at Risk of an uncumpound
cumulated return series :math:`X`.
Parameters
----------
X : 1d-array
Returns series, must have Tx1 size..
alpha : float, optional
Significance level of CDaR. The default is 0.05.
Raises
------
ValueError
When the value cannot be calculated.
Returns
-------
value : float
CDaR of an uncumpounded cumulative returns series.
"""
a = np.array(X, ndmin=2)
if a.shape[0] == 1 and a.shape[1] > 1:
a = a.T
if a.shape[0] > 1 and a.shape[1] > 1:
raise ValueError("returns must have Tx1 size")
prices = np.insert(np.array(a), 0, 1, axis=0)
NAV = np.cumsum(np.array(prices), axis=0)
DD = []
peak = -99999
for i in NAV:
if i > peak:
peak = i
DD.append(-(peak - i))
del DD[0]
sorted_DD = np.sort(np.array(DD), axis=0)
index = int(np.ceil(alpha * len(sorted_DD)) - 1)
sum_var = 0
for i in range(0, index + 1):
sum_var = sum_var + sorted_DD[i] - sorted_DD[index]
value = -sorted_DD[index] - sum_var / (alpha * len(sorted_DD))
value = np.array(value).item()
return value
def EDaR_Abs(X, alpha=0.05):
r"""
Calculate the Entropic Drawdown at Risk (EDaR) of a returns series
using uncumpounded cumulative returns.
.. math::
\text{EDaR}_{\alpha}(X) & = \inf_{z>0} \left \{ z
\ln \left (\frac{M_{\text{DD}(X)}(z^{-1})}{\alpha} \right ) \right \} \\
\text{DD}(X,j) & = \max_{t \in (0,j)} \left ( \sum_{i=0}^{t}X_{i}
\right )- \sum_{i=0}^{j}X_{i} \\
Parameters
----------
X : 1d-array
Returns series, must have Tx1 size..
alpha : float, optional
Significance level of EDaR. The default is 0.05.
Raises
------
ValueError
When the value cannot be calculated.
Returns
-------
(value, z) : tuple
EDaR of an uncumpounded cumulative returns series
and value of z that minimize EDaR.
"""
a = np.array(X, ndmin=2)
if a.shape[0] == 1 and a.shape[1] > 1:
a = a.T
if a.shape[0] > 1 and a.shape[1] > 1:
raise ValueError("returns must have Tx1 size")
prices = np.insert(np.array(a), 0, 1, axis=0)
NAV = np.cumsum(np.array(prices), axis=0)
DD = []
peak = -99999
for i in NAV:
if i > peak:
peak = i
DD.append(-(peak - i))
del DD[0]
(value, t) = EVaR_Hist(np.array(DD), alpha=alpha)
return (value, t)
def UCI_Abs(X):
r"""
Calculate the Ulcer Index (UCI) of a returns series
using uncumpounded cumulative returns.
.. math::
\text{UCI}(X) =\sqrt{\frac{1}{T}\sum_{j=0}^{T} \left [ \max_{t \in
(0,j)} \left ( \sum_{i=0}^{t}X_{i} \right ) - \sum_{i=0}^{j}X_{i}
\right ] ^2}
Parameters
----------
X : 1d-array
Returns series, must have Tx1 size.
Raises
------
ValueError
When the value cannot be calculated.
Returns
-------
value : float
Ulcer Index of an uncumpounded cumulative returns.
"""
a = np.array(X, ndmin=2)
if a.shape[0] == 1 and a.shape[1] > 1:
a = a.T
if a.shape[0] > 1 and a.shape[1] > 1:
raise ValueError("returns must have Tx1 size")
prices = np.insert(np.array(a), 0, 1, axis=0)
NAV = np.cumsum(np.array(prices), axis=0)
value = 0
peak = -99999
n = 0
for i in NAV:
if i > peak:
peak = i
DD = peak - i
if DD > 0:
value += DD ** 2
n += 1
if n == 0:
value = 0
else:
value = np.sqrt(value / (n - 1))
value = np.array(value).item()
return value
def MDD_Rel(X):
r"""
Calculate the Maximum Drawdown (MDD) of a returns series
using cumpounded cumulative returns.
.. math::
\text{MDD}(X) = \max_{j \in (0,T)}\left[\max_{t \in (0,j)}
\left ( \prod_{i=0}^{t}(1+X_{i}) \right ) - \prod_{i=0}^{j}(1+X_{i})
\right]
Parameters
----------
X : 1d-array
Returns series, must have Tx1 size.
Raises
------
ValueError
When the value cannot be calculated.
Returns
-------
value : float
MDD of a cumpounded cumulative returns.
"""
a = np.array(X, ndmin=2)
if a.shape[0] == 1 and a.shape[1] > 1:
a = a.T
if a.shape[0] > 1 and a.shape[1] > 1:
raise ValueError("returns must have Tx1 size")
prices = 1 + np.insert(np.array(a), 0, 0, axis=0)
NAV = np.cumprod(prices, axis=0)
value = 0
peak = -99999
for i in NAV:
if i > peak:
peak = i
DD = (peak - i) / peak
if DD > value:
value = DD
value = np.array(value).item()
return value
def ADD_Rel(X):
r"""
Calculate the Average Drawdown (ADD) of a returns series
using cumpounded cumulative returns.
.. math::
\text{ADD}(X) = \frac{1}{T}\sum_{j=0}^{T} \left [ \max_{t \in (0,j)}
\left ( \prod_{i=0}^{t}(1+X_{i}) \right )- \prod_{i=0}^{j}(1+X_{i})
\right ]
Parameters
----------
X : 1d-array
Returns series, must have Tx1 size.
Raises
------
ValueError
When the value cannot be calculated.
Returns
-------
value : float
ADD of a cumpounded cumulative returns.
"""
a = np.array(X, ndmin=2)
if a.shape[0] == 1 and a.shape[1] > 1:
a = a.T
if a.shape[0] > 1 and a.shape[1] > 1:
raise ValueError("returns must have Tx1 size")
prices = 1 + np.insert(np.array(a), 0, 0, axis=0)
NAV = np.cumprod(prices, axis=0)
value = 0
peak = -99999
n = 0
for i in NAV:
if i > peak:
peak = i
DD = (peak - i) / peak
if DD > 0:
value += DD
n += 1
if n == 0:
value = 0
else:
value = value / (n - 1)
value = np.array(value).item()
return value
def DaR_Rel(X, alpha=0.05):
r"""
Calculate the Drawdown at Risk (DaR) of a returns series
using cumpounded cumulative returns.
.. math::
\text{DaR}_{\alpha}(X) & = \max_{j \in (0,T)} \left \{ \text{DD}(X,j)
\in \mathbb{R}: F_{\text{DD}} \left ( \text{DD}(X,j) \right )< 1 - \alpha
\right \} \\
\text{DD}(X,j) & = \max_{t \in (0,j)} \left ( \prod_{i=0}^{t}(1+X_{i})
\right )- \prod_{i=0}^{j}(1+X_{i})
Parameters
----------
X : 1d-array
Returns series, must have Tx1 size..
alpha : float, optional
Significance level of DaR. The default is 0.05.
Raises
------
ValueError
When the value cannot be calculated.
Returns
-------
value : float
DaR of a cumpounded cumulative returns series.
"""
a = np.array(X, ndmin=2)
if a.shape[0] == 1 and a.shape[1] > 1:
a = a.T
if a.shape[0] > 1 and a.shape[1] > 1:
raise ValueError("X must have Tx1 size")
prices = 1 + np.insert(np.array(a), 0, 0, axis=0)
NAV = np.cumprod(prices, axis=0)
DD = []
peak = -99999
for i in NAV:
if i > peak:
peak = i
DD.append(-(peak - i) / peak)
del DD[0]
sorted_DD = np.sort(np.array(DD), axis=0)
index = int(np.ceil(alpha * len(sorted_DD)) - 1)
value = -sorted_DD[index]
value = np.array(value).item()
return value
def CDaR_Rel(X, alpha=0.05):
r"""
Calculate the Conditional Drawdown at Risk (CDaR) of a returns series
using cumpounded cumulative returns.
.. math::
\text{CDaR}_{\alpha}(X) = \text{DaR}_{\alpha}(X) + \frac{1}{\alpha T}
\sum_{i=0}^{T} \max \left [ \max_{t \in (0,T)}
\left ( \prod_{i=0}^{t}(1+X_{i}) \right )- \prod_{i=0}^{j}(1+X_{i})
- \text{DaR}_{\alpha}(X), 0 \right ]
Where:
:math:`\text{DaR}_{\alpha}` is the Drawdown at Risk of a cumpound
cumulated return series :math:`X`.
Parameters
----------
X : 1d-array
Returns series, must have Tx1 size..
alpha : float, optional
Significance level of CDaR. The default is 0.05.
Raises
------
ValueError
When the value cannot be calculated.
Returns
-------
value : float
CDaR of a cumpounded cumulative returns series.
"""
a = np.array(X, ndmin=2)
if a.shape[0] == 1 and a.shape[1] > 1:
a = a.T
if a.shape[0] > 1 and a.shape[1] > 1:
raise ValueError("X must have Tx1 size")
prices = 1 + np.insert(np.array(a), 0, 0, axis=0)
NAV = np.cumprod(prices, axis=0)
DD = []
peak = -99999
for i in NAV:
if i > peak:
peak = i
DD.append(-(peak - i) / peak)
del DD[0]
sorted_DD = np.sort(np.array(DD), axis=0)
index = int(np.ceil(alpha * len(sorted_DD)) - 1)
sum_var = 0
for i in range(0, index + 1):
sum_var = sum_var + sorted_DD[i] - sorted_DD[index]
value = -sorted_DD[index] - sum_var / (alpha * len(sorted_DD))
value = np.array(value).item()
return value
def EDaR_Rel(X, alpha=0.05):
r"""
Calculate the Entropic Drawdown at Risk (EDaR) of a returns series
using cumpounded cumulative returns.
.. math::
\text{EDaR}_{\alpha}(X) & = \inf_{z>0} \left \{ z
\ln \left (\frac{M_{\text{DD}(X)}(z^{-1})}{\alpha} \right ) \right \} \\
\text{DD}(X,j) & = \max_{t \in (0,j)} \left ( \prod_{i=0}^{t}(1+X_{i})
\right )- \prod_{i=0}^{j}(1+X_{i})
Parameters
----------
X : 1d-array
Returns series, must have Tx1 size..
alpha : float, optional
Significance level of EDaR. The default is 0.05.
Raises
------
ValueError
When the value cannot be calculated.
Returns
-------
(value, z) : tuple
EDaR of a cumpounded cumulative returns series
and value of z that minimize EDaR.
"""
a = np.array(X, ndmin=2)
if a.shape[0] == 1 and a.shape[1] > 1:
a = a.T
if a.shape[0] > 1 and a.shape[1] > 1:
raise ValueError("X must have Tx1 size")
prices = 1 + np.insert(np.array(a), 0, 0, axis=0)
NAV = np.cumprod(prices, axis=0)
DD = []
peak = -99999
for i in NAV:
if i > peak:
peak = i
DD.append(-(peak - i) / peak)
del DD[0]
(value, t) = EVaR_Hist(np.array(DD), alpha=alpha)
return (value, t)
def UCI_Rel(X):
r"""
Calculate the Ulcer Index (UCI) of a returns series
using cumpounded cumulative returns.
.. math::
\text{UCI}(X) =\sqrt{\frac{1}{T}\sum_{j=0}^{T} \left [ \max_{t \in
(0,j)} \left ( \prod_{i=0}^{t}(1+X_{i}) \right )- \prod_{i=0}^{j}
(1+X_{i}) \right ] ^2}
Parameters
----------
X : 1d-array
Returns series, must have Tx1 size.
Raises
------
ValueError
When the value cannot be calculated.
Returns
-------
value : float
Ulcer Index of a cumpounded cumulative returns.
"""
a = np.array(X, ndmin=2)
if a.shape[0] == 1 and a.shape[1] > 1:
a = a.T
if a.shape[0] > 1 and a.shape[1] > 1:
raise ValueError("returns must have Tx1 size")
prices = 1 + np.insert(np.array(a), 0, 0, axis=0)
NAV = np.cumprod(prices, axis=0)
value = 0
peak = -99999
n = 0
for i in NAV:
if i > peak:
peak = i
DD = (peak - i) / peak
if DD > 0:
value += DD ** 2
n += 1
if n == 0:
value = 0
else:
value = np.sqrt(value / (n - 1))
value = np.array(value).item()
return value
###############################################################################
# Risk Adjusted Return Ratios
###############################################################################
def Sharpe_Risk(w, cov=None, returns=None, rm="MV", rf=0, alpha=0.05):
r"""
Calculate the risk measure available on the Sharpe function.
Parameters
----------
w : DataFrame or 1d-array of shape (n_assets, 1)
Weights matrix, where n_assets is the number of assets.
cov : DataFrame or nd-array of shape (n_features, n_features)
Covariance matrix, where n_features is the number of features.
returns : DataFrame or nd-array of shape (n_samples, n_features)
Features matrix, where n_samples is the number of samples and
n_features is the number of features.
rm : str, optional
Risk measure used in the denominator of the ratio. The default is
'MV'. Posible values are:
- 'MV': Standard Deviation.
- 'MAD': Mean Absolute Deviation.
- 'MSV': Semi Standard Deviation.
- 'FLPM': First Lower Partial Moment (Omega Ratio).
- 'SLPM': Second Lower Partial Moment (Sortino Ratio).
- 'VaR': Value at Risk.
- 'CVaR': Conditional Value at Risk.
- 'EVaR': Entropic Value at Risk.
- 'WR': Worst Realization (Minimax)
- 'MDD': Maximum Drawdown of uncompounded cumulative returns (Calmar Ratio).
- 'ADD': Average Drawdown of uncompounded cumulative returns.
- 'DaR': Drawdown at Risk of uncompounded cumulative returns.
- 'CDaR': Conditional Drawdown at Risk of uncompounded cumulative returns.
- 'EDaR': Entropic Drawdown at Risk of uncompounded cumulative returns.
- 'UCI': Ulcer Index of uncompounded cumulative returns.
- 'MDD_Rel': Maximum Drawdown of compounded cumulative returns (Calmar Ratio).
- 'ADD_Rel': Average Drawdown of compounded cumulative returns.
- 'CDaR_Rel': Conditional Drawdown at Risk of compounded cumulative returns.
- 'EDaR_Rel': Entropic Drawdown at Risk of compounded cumulative returns.
- 'UCI_Rel': Ulcer Index of compounded cumulative returns.
rf : float, optional
Risk free rate. The default is 0.
alpha : float, optional
Significance level of VaR, CVaR, EDaR, DaR, CDaR and EDaR.
The default is 0.05.
Raises
------
ValueError
When the value cannot be calculated.
Returns
-------
value : float
Risk measure of the portfolio.
"""
w_ = np.array(w, ndmin=2)
if w_.shape[0] == 1 and w_.shape[1] > 1:
w_ = w_.T
if w_.shape[0] > 1 and w_.shape[1] > 1:
raise ValueError("weights must have n_assets x 1 size")
if cov is not None:
cov_ = np.array(cov, ndmin=2)
if returns is not None:
returns_ = np.array(returns, ndmin=2)
a = returns_ @ w_
if rm == "MV":
risk = w_.T @ cov_ @ w_
risk = np.sqrt(risk.item())
elif rm == "MAD":
risk = MAD(a)
elif rm == "MSV":
risk = SemiDeviation(a)
elif rm == "FLPM":
risk = LPM(a, MAR=rf, p=1)
elif rm == "SLPM":
risk = LPM(a, MAR=rf, p=2)
elif rm == "VaR":
risk = VaR_Hist(a, alpha=alpha)
elif rm == "CVaR":
risk = CVaR_Hist(a, alpha=alpha)
elif rm == "EVaR":
risk = EVaR_Hist(a, alpha=alpha)[0]
elif rm == "WR":
risk = WR(a)
elif rm == "MDD":
risk = MDD_Abs(a)
elif rm == "ADD":
risk = ADD_Abs(a)
elif rm == "DaR":
risk = DaR_Abs(a, alpha=alpha)
elif rm == "CDaR":
risk = CDaR_Abs(a, alpha=alpha)
elif rm == "EDaR":
risk = EDaR_Abs(a, alpha=alpha)[0]
elif rm == "UCI":
risk = UCI_Abs(a)
elif rm == "MDD_Rel":
risk = MDD_Rel(a)
elif rm == "ADD_Rel":
risk = ADD_Rel(a)
elif rm == "DaR_Rel":
risk = DaR_Rel(a, alpha=alpha)
elif rm == "CDaR_Rel":
risk = CDaR_Rel(a, alpha=alpha)
elif rm == "EDaR_Rel":
risk = EDaR_Rel(a, alpha=alpha)[0]
elif rm == "UCI_Rel":
risk = UCI_Rel(a)
value = risk
return value
def Sharpe(w, mu, cov=None, returns=None, rm="MV", rf=0, alpha=0.05):
r"""
Calculate the Risk Adjusted Return Ratio from a portfolio returns series.
.. math::
\text{Sharpe}(X) = \frac{\mathbb{E}(X) -
r_{f}}{\phi(X)}
Where:
:math:`X` is the vector of portfolio returns.
:math:`r_{f}` is the risk free rate, when the risk measure is
:math:`\text{LPM}` uses instead of :math:`r_{f}` the :math:`\text{MAR}`.
:math:`\phi(X)` is a convex risk measure. The risk measures availabe are:
Parameters
----------
w : DataFrame or 1d-array of shape (n_assets, 1)
Weights matrix, where n_assets is the number of assets.
mu : DataFrame or nd-array of shape (1, n_assets)
Vector of expected returns, where n_assets is the number of assets.
cov : DataFrame or nd-array of shape (n_features, n_features)
Covariance matrix, where n_features is the number of features.
returns : DataFrame or nd-array of shape (n_samples, n_features)
Features matrix, where n_samples is the number of samples and
n_features is the number of features.
rm : str, optional
Risk measure used in the denominator of the ratio. The default is
'MV'. Posible values are:
- 'MV': Standard Deviation.
- 'MAD': Mean Absolute Deviation.
- 'MSV': Semi Standard Deviation.
- 'FLPM': First Lower Partial Moment (Omega Ratio).
- 'SLPM': Second Lower Partial Moment (Sortino Ratio).
- 'VaR': Value at Risk.
- 'CVaR': Conditional Value at Risk.
- 'EVaR': Entropic Value at Risk.
- 'WR': Worst Realization (Minimax)
- 'MDD': Maximum Drawdown of uncompounded cumulative returns (Calmar Ratio).
- 'ADD': Average Drawdown of uncompounded cumulative returns.
- 'DaR': Drawdown at Risk of uncompounded cumulative returns.
- 'CDaR': Conditional Drawdown at Risk of uncompounded cumulative returns.
- 'EDaR': Entropic Drawdown at Risk of uncompounded cumulative returns.
- 'UCI': Ulcer Index of uncompounded cumulative returns.
- 'MDD_Rel': Maximum Drawdown of compounded cumulative returns (Calmar Ratio).
- 'ADD_Rel': Average Drawdown of compounded cumulative returns.
- 'CDaR_Rel': Conditional Drawdown at Risk of compounded cumulative returns.
- 'EDaR_Rel': Entropic Drawdown at Risk of compounded cumulative returns.
- 'UCI_Rel': Ulcer Index of compounded cumulative returns.
rf : float, optional
Risk free rate. The default is 0.
alpha : float, optional
Significance level of VaR, CVaR, EDaR, DaR, CDaR and EDaR.
The default is 0.05.
Raises
------
ValueError
When the value cannot be calculated.
Returns
-------
value : float
Risk adjusted return ratio of :math:`X`.
"""
w_ = np.array(w, ndmin=2)
if w_.shape[0] == 1 and w_.shape[1] > 1:
w_ = w_.T
if w_.shape[0] > 1 and w_.shape[1] > 1:
raise ValueError("weights must have n_assets x 1 size")
if cov is None and rm == "MV":
raise ValueError("covariance matrix is necessary to calculate the sharpe ratio")
elif returns is None and rm != "MV":
raise ValueError(
"returns scenarios are necessary to calculate the sharpe ratio"
)
mu_ = np.array(mu, ndmin=2)
if cov is not None:
cov_ = np.array(cov, ndmin=2)
if returns is not None:
returns_ = np.array(returns, ndmin=2)
ret = mu_ @ w_
ret = ret.item()
risk = Sharpe_Risk(w, cov=cov_, returns=returns_, rm=rm, rf=rf, alpha=alpha)
value = (ret - rf) / risk
return value
###############################################################################
# Risk Contribution Vectors
###############################################################################
def Risk_Contribution(w, cov=None, returns=None, rm="MV", rf=0, alpha=0.05):
r"""
Calculate the risk contribution for each asset based on the risk measure
selected.
Parameters
----------
w : DataFrame or 1d-array of shape (n_assets, 1)
Weights matrix, where n_assets is the number of assets.
cov : DataFrame or nd-array of shape (n_features, n_features)
Covariance matrix, where n_features is the number of features.
returns : DataFrame or nd-array of shape (n_samples, n_features)
Features matrix, where n_samples is the number of samples and
n_features is the number of features.
rm : str, optional
Risk measure used in the denominator of the ratio. The default is
'MV'. Posible values are:
- 'MV': Standard Deviation.
- 'MAD': Mean Absolute Deviation.
- 'MSV': Semi Standard Deviation.
- 'FLPM': First Lower Partial Moment (Omega Ratio).
- 'SLPM': Second Lower Partial Moment (Sortino Ratio).
- 'VaR': Value at Risk.
- 'CVaR': Conditional Value at Risk.
- 'EVaR': Entropic Value at Risk.
- 'WR': Worst Realization (Minimax)
- 'MDD': Maximum Drawdown of uncompounded cumulative returns (Calmar Ratio).
- 'ADD': Average Drawdown of uncompounded cumulative returns.
- 'DaR': Drawdown at Risk of uncompounded cumulative returns.
- 'CDaR': Conditional Drawdown at Risk of uncompounded cumulative returns.
- 'EDaR': Entropic Drawdown at Risk of uncompounded cumulative returns.
- 'UCI': Ulcer Index of uncompounded cumulative returns.
- 'MDD_Rel': Maximum Drawdown of compounded cumulative returns (Calmar Ratio).
- 'ADD_Rel': Average Drawdown of compounded cumulative returns.
- 'CDaR_Rel': Conditional Drawdown at Risk of compounded cumulative returns.
- 'EDaR_Rel': Entropic Drawdown at Risk of compounded cumulative returns.
- 'UCI_Rel': Ulcer Index of compounded cumulative returns.
rf : float, optional
Risk free rate. The default is 0.
alpha : float, optional
Significance level of VaR, CVaR, EDaR, DaR, CDaR and EDaR.
The default is 0.05.
Raises
------
ValueError
When the value cannot be calculated.
Returns
-------
value : float
Risk measure of the portfolio.
"""
w_ = np.array(w, ndmin=2)
if w_.shape[0] == 1 and w_.shape[1] > 1:
w_ = w_.T
if w_.shape[0] > 1 and w_.shape[1] > 1:
raise ValueError("weights must have n_assets x 1 size")
if cov is not None:
cov_ = np.array(cov, ndmin=2)
if returns is not None:
returns_ = np.array(returns, ndmin=2)
RC = []
d_i = 0.0000001
for i in range(0, w_.shape[0]):
delta = np.zeros((w_.shape[0], 1))
delta[i, 0] = d_i
w_1 = w_ + delta
w_2 = w_ - delta
a_1 = returns_ @ w_1
a_2 = returns_ @ w_2
if rm == "MV":
risk_1 = w_1.T @ cov_ @ w_1
risk_1 = np.sqrt(risk_1.item())
risk_2 = w_2.T @ cov_ @ w_2
risk_2 = np.sqrt(risk_2.item())
elif rm == "MAD":
risk_1 = MAD(a_1)
risk_2 = MAD(a_2)
elif rm == "MSV":
risk_1 = SemiDeviation(a_1)
risk_2 = SemiDeviation(a_2)
elif rm == "FLPM":
risk_1 = LPM(a_1, MAR=rf, p=1)
risk_2 = LPM(a_2, MAR=rf, p=1)
elif rm == "SLPM":
risk_1 = LPM(a_1, MAR=rf, p=2)
risk_2 = LPM(a_2, MAR=rf, p=2)
elif rm == "VaR":
risk_1 = VaR_Hist(a_1, alpha=alpha)
risk_2 = VaR_Hist(a_2, alpha=alpha)
elif rm == "CVaR":
risk_1 = CVaR_Hist(a_1, alpha=alpha)
risk_2 = CVaR_Hist(a_2, alpha=alpha)
elif rm == "EVaR":
risk_1 = EVaR_Hist(a_1, alpha=alpha)[0]
risk_2 = EVaR_Hist(a_2, alpha=alpha)[0]
elif rm == "WR":
risk_1 = WR(a_1)
risk_2 = WR(a_2)
elif rm == "MDD":
risk_1 = MDD_Abs(a_1)
risk_2 = MDD_Abs(a_2)
elif rm == "ADD":
risk_1 = ADD_Abs(a_1)
risk_2 = ADD_Abs(a_2)
elif rm == "DaR":
risk_1 = DaR_Abs(a_1, alpha=alpha)
risk_2 = DaR_Abs(a_2, alpha=alpha)
elif rm == "CDaR":
risk_1 = CDaR_Abs(a_1, alpha=alpha)
risk_2 = CDaR_Abs(a_2, alpha=alpha)
elif rm == "EDaR":
risk_1 = EDaR_Abs(a_1, alpha=alpha)[0]
risk_2 = EDaR_Abs(a_2, alpha=alpha)[0]
elif rm == "UCI":
risk_1 = UCI_Abs(a_1)
risk_2 = UCI_Abs(a_2)
elif rm == "MDD_Rel":
risk_1 = MDD_Rel(a_1)
risk_2 = MDD_Rel(a_2)
elif rm == "ADD_Rel":
risk_1 = ADD_Rel(a_1)
risk_2 = ADD_Rel(a_2)
elif rm == "DaR_Rel":
risk_1 = DaR_Rel(a_1, alpha=alpha)
risk_2 = DaR_Rel(a_2, alpha=alpha)
elif rm == "CDaR_Rel":
risk_1 = CDaR_Rel(a_1, alpha=alpha)
risk_2 = CDaR_Rel(a_2, alpha=alpha)
elif rm == "EDaR_Rel":
risk_1 = EDaR_Rel(a_1, alpha=alpha)[0]
risk_2 = EDaR_Rel(a_2, alpha=alpha)[0]
elif rm == "UCI_Rel":
risk_1 = UCI_Rel(a_1)
risk_2 = UCI_Rel(a_2)
RC_i = (risk_1 - risk_2) / (2 * d_i) * w_[i, 0]
RC.append(RC_i)
RC = np.array(RC, ndmin=1)
return RC
|
py | 1a2ed91e678357e837cc805b58d9cba55a2abf29 | #!/usr/bin/env python
try:
from setuptools import setup
requires = {
'install_requires': ['django >= 4.0'],
}
except ImportError:
from distutils.core import setup
requires = {}
from os.path import abspath, dirname, join
with open(join(dirname(abspath(__file__)), 'src', 'rfdoc', 'version.py')) as f:
exec(f.read())
# Maximum width in Windows installer seems to be 70 characters -------|
DESCRIPTION = """
RFDoc is a web application for storing and searching Robot Framework
test library and resource file documentations.
Required packages:
django >= 4.0
"""[1:-1]
CLASSIFIERS = """
Development Status :: 5 - Production/Stable
License :: OSI Approved :: Apache Software License
Operating System :: OS Independent
Programming Language :: Python
Topic :: Software Development :: Testing
"""[1:-1]
setup(
name = 'robotframework-rfdoc',
version = VERSION,
description = 'Web-based Robot Framework library documentation server',
long_description = DESCRIPTION,
author = 'Robot Framework Developers',
author_email = '[email protected]',
url = 'http://code.google.com/p/rfdoc/',
license = 'Apache License 2.0',
keywords = 'robotframework testing testautomation documentation',
platforms = 'any',
classifiers = CLASSIFIERS.splitlines(),
package_dir = {'rfdoc': 'src/rfdoc'},
packages = ['rfdoc', 'rfdoc.rfdocapp', 'rfdoc.rfdocapp.views',
'rfdoc.rfdocapp.templatetags', 'rfdoc.rfdocapp.utils'],
package_data = {'rfdoc': ['*.tmpl', 'rfdocapp/templates/*.html',
'rfdocapp/static/*.css',
'rfdocapp/static/*.js']},
**requires
)
|
py | 1a2ed92f3415b527cd451de99a65af6710e9de8e | text = """
//------------------------------------------------------------------------------
// Explicit instantiation.
//------------------------------------------------------------------------------
#include "Geometry/Dimension.hh"
#include "IncrementFieldList.cc"
namespace Spheral {
template class IncrementFieldList<Dim< %(ndim)s >, Dim< %(ndim)s >::Scalar>;
template class IncrementFieldList<Dim< %(ndim)s >, Dim< %(ndim)s >::Vector>;
template class IncrementFieldList<Dim< %(ndim)s >, Dim< %(ndim)s >::Vector3d>;
template class IncrementFieldList<Dim< %(ndim)s >, Dim< %(ndim)s >::Tensor>;
template class IncrementFieldList<Dim< %(ndim)s >, Dim< %(ndim)s >::SymTensor>;
}
"""
|
py | 1a2ed9391a43aef205a579841ea907a66b02a0bb | from pelican import signals
from pelican.generators import ArticlesGenerator, PagesGenerator
# Make sure than when a title breaks, there will never be
# a single word "alone" on its line
# Does not work if the last "word" of the title is an emoji
# in the form of an image (like Twemoji)
# Title has to be more than four words
# in order to be considered
SMART_BREAK_MIN_LEN = 4
def smart_break(document):
# Get the number of words
splited = document.title.split(' ')
length = len(splited)
if length > SMART_BREAK_MIN_LEN:
# Join the last two elements with a non-breaking space
end = ' '.join(splited[length - 2:])
# Get the start of the title back
start = ' '.join(splited[:length-2])
# Glue the title back together
final = f'{start} {end}'
# Write to a custom property
# Writing the title directly leads to not being
# interpreted at various places
document.smart_title = final
def run(generators):
for g in generators:
if isinstance(g, ArticlesGenerator):
for a in g.articles:
smart_break(a)
if isinstance(g, PagesGenerator):
for p in g.pages:
smart_break(p)
def register():
signals.all_generators_finalized.connect(run)
|
py | 1a2eda00fae388833afb0376528a47736a300622 | from werkzeug.local import LocalStack, LocalProxy
def _find_bot():
from .wx import get_bot
top = _wx_ctx_stack.top
if top is None:
top = get_bot()
_wx_ctx_stack.push(top)
return top
_wx_ctx_stack = LocalStack()
current_bot = LocalProxy(_find_bot)
|
py | 1a2edb2174b9c85e952fd7d274a2017cc64b1a9d | _base_ = 'faster_rcnn_r50_fpn_mstrain_3x_coco.py'
model = dict(
backbone=dict(
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=[(1333, 640), (1333, 800)],
multiscale_mode='range',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(dataset=dict(pipeline=train_pipeline)),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline)) |
py | 1a2edbb39b797f4afb516ce7671752d1cdfac609 | import smbus2 as smbus
import ctypes
class I2cdev:
def __init__(self, a_bus=1):
self.bus = smbus.SMBus(a_bus)
# I2Cdev::I2Cdev() { }
# void I2Cdev::initialize() {
# bcm2835_init();
# bcm2835_i2c_set_baudrate( i2c_baudrate );
# }
# /** Enable or disable I2C,
# * @param isEnabled true = enable, false = disable
# */
# void I2Cdev::enable(bool isEnabled) {
# if ( set_I2C_pins ){
# if (isEnabled)
# bcm2835_i2c_end();
# else
# bcm2835_i2c_begin() ;
# }
# }
# /** Read a single bit from an 8-bit device register.
# * @param devAddr I2C slave device address
# * @param regAddr Register regAddr to read from
# * @param bitNum Bit position to read (0-7)
# * @param data Container for single bit value
# * @return Status of read operation (true = success)
# */
# int8_t - uint8_t devAddr, uint8_t regAddr, uint8_t bitNum, uint8_t *data
def readBit(self, devAddr, regAddr, bitNum):
data = self.bus.read_byte_data(devAddr, regAddr)
return data & (1 << bitNum)
# /** Read multiple bits from an 8-bit device register.
# * @param devAddr I2C slave device address
# * @param regAddr Register regAddr to read from
# * @param bitStart First bit position to read (0-7)
# * @param length Number of bits to read (not more than 8)
# * @param data Container for right-aligned value (i.e. '101' read from any bitStart position will equal 0x05)
# * @return Status of read operation (true = success)
# */
# int8_t
# def readBits(uint8_t devAddr, uint8_t regAddr, uint8_t bitStart, uint8_t length, uint8_t *data):
# # // 01101001 read byte
# # // 76543210 bit numbers
# # // xxx args: bitStart=4, length=3
# # // 010 masked
# # // -> 010 shifted
# bcm2835_i2c_setSlaveAddress(devAddr);
# sendBuf[0] = regAddr;
# uint8_t response = bcm2835_i2c_write_read_rs(sendBuf, 1, recvBuf, 1);
# uint8_t b = (uint8_t) recvBuf[0];
# if (response == BCM2835_I2C_REASON_OK) {
# uint8_t mask = ((1 << length) - 1) << (bitStart - length + 1);
# b &= mask;
# b >>= (bitStart - length + 1);
# *data = b;
# }
# return response == BCM2835_I2C_REASON_OK;
def readBits(self, devAddr, a_reg_add, a_bit_start, a_length):
byte = self.bus.read_byte_data(devAddr, a_reg_add)
mask = ((1 << a_length) - 1) << (a_bit_start - a_length + 1)
byte &= mask
byte >>= a_bit_start - a_length + 1
return byte
# /** Read single byte from an 8-bit device register.
# * @param devAddr I2C slave device address
# * @param regAddr Register regAddr to read from
# * @param data Container for byte value read from device
# * @return Status of read operation (true = success)
# */
# int8_t - uint8_t devAddr, uint8_t regAddr, uint8_t *data
def readByte(self, devAddr, regAddr):
return self.bus.read_byte_data(devAddr, regAddr)
# /** Read multiple bytes from an 8-bit device register.
# * @param devAddr I2C slave device address
# * @param regAddr First register regAddr to read from
# * @param length Number of bytes to read
# * @param data Buffer to store read data in
# * @return I2C_TransferReturn_TypeDef http://downloads.energymicro.com/documentation/doxygen/group__I2C.html
# */
# int8_t - uint8_t devAddr, uint8_t regAddr, uint8_t length, uint8_t *data
# def readBytes(devAddr, regAddr, length):
# bcm2835_i2c_setSlaveAddress(devAddr);
# sendBuf[0] = regAddr;
# uint8_t response = bcm2835_i2c_write_read_rs(sendBuf, 1, recvBuf, length);
# int i ;
# for (i = 0; i < length ; i++) {
# data[i] = (uint8_t) recvBuf[i];
# }
# return response == BCM2835_I2C_REASON_OK;
def readBytes(self, devAddr, a_address, a_length):
# if a_length > len(a_data_list):
# print('read_bytes, length of passed list too short')
# return a_data_list
# Attempt to use the built in read bytes function in the adafruit lib
# a_data_list = self.__bus.read_i2c_block_data(self.__dev_id, a_address,
# a_length)
# Attempt to bypass adafruit lib
#a_data_list = self.__mpu.bus.read_i2c_block_data(0x68, a_address, a_length)
#print('data' + str(a_data_list))
a_data_list = list()
for x in range(0, a_length):
# print("x:{}".format(x))
a_data_list.append(self.bus.read_byte_data(devAddr, a_address + x))
return a_data_list
# /** Read single word from a 16-bit device register.
# * @param devAddr I2C slave device address
# * @param regAddr Register regAddr to read from
# * @param data Container for word value read from device
# * @return Status of read operation (true = success)
# */
# int8_t - uint8_t devAddr, uint8_t regAddr, uint16_t *data
# def readWord(self, devAddr, regAddr):
# return self.bus.read_word_data(devAddr, regAddr)
# /** Read multiple words from a 16-bit device register.
# * @param devAddr I2C slave device address
# * @param regAddr First register regAddr to read from
# * @param length Number of words to read
# * @param data Buffer to store read data in
# * @return Number of words read (-1 indicates failure)
# */
# int8_t
# def readWords(uint8_t devAddr, uint8_t regAddr, uint8_t length, uint16_t *data):
# bcm2835_i2c_setSlaveAddress(devAddr);
# sendBuf[0] = regAddr;
# uint8_t response = bcm2835_i2c_write_read_rs(sendBuf, 1, recvBuf, length*2 );
# uint8_t i;
# for (i = 0; i < length; i++) {
# data[i] = (recvBuf[i*2] << 8) | recvBuf[i*2+1] ;
# }
# return response == BCM2835_I2C_REASON_OK ;
# /** write a single bit in an 8-bit device register.
# * @param devAddr I2C slave device address
# * @param regAddr Register regAddr to write to
# * @param bitNum Bit position to write (0-7)
# * @param value New bit value to write
# * @return Status of operation (true = success)
# */
# bool - uint8_t devAddr, uint8_t regAddr, uint8_t bitNum, uint8_t data
def writeBit(self, devAddr, regAddr, bitNum, data):
prev_data = self.bus.read_byte_data(devAddr, regAddr)
next_data = 0
if data != 0:
next_data = (prev_data | (1 << bitNum))
else:
next_data = (prev_data & ~(1 << bitNum))
self.bus.write_byte_data(devAddr, regAddr, next_data)
# self.bus.write_byte_data(devAddr, regAddr, ctypes.c_int8(next_data).value)
# def write_bit(self, devAddr, a_reg_add, a_bit_num, a_bit):
# byte = self.bus.read_byte_data(self.__dev_id, a_reg_add)
# if a_bit:
# byte |= 1 << a_bit_num
# else:
# byte &= ~(1 << a_bit_num)
# self.bus.write_byte_data(
# self.__dev_id, a_reg_add, ctypes.c_int8(byte).value)
# /** Write multiple bits in an 8-bit device register.
# * @param devAddr I2C slave device address
# * @param regAddr Register regAddr to write to
# * @param bitStart First bit position to write (0-7)
# * @param length Number of bits to write (not more than 8)
# * @param data Right-aligned value to write
# * @return Status of operation (true = success)
# */
# bool
# def writeBits(uint8_t devAddr, uint8_t regAddr, uint8_t bitStart, uint8_t length, uint8_t data):
# # // 010 value to write
# # // 76543210 bit numbers
# # // xxx args: bitStart=4, length=3
# # // 00011100 mask byte
# # // 10101111 original value (sample)
# # // 10100011 original & ~mask
# # // 10101011 masked | value
# bcm2835_i2c_setSlaveAddress(devAddr);
# # //first reading registery value
# sendBuf[0] = regAddr;
# uint8_t response = bcm2835_i2c_write_read_rs(sendBuf, 1, recvBuf, 1 );
# if ( response == BCM2835_I2C_REASON_OK ) {
# uint8_t b = recvBuf[0];
# uint8_t mask = ((1 << length) - 1) << (bitStart - length + 1);
# data <<= (bitStart - length + 1); // shift data into correct position
# data &= mask; // zero all non-important bits in data
# b &= ~(mask); // zero all important bits in existing byte
# b |= data; // combine data with existing byte
# sendBuf[1] = b ;
# response = bcm2835_i2c_write(sendBuf, 2);
# }
# return response == BCM2835_I2C_REASON_OK;
def writeBits(self, devAddr, a_reg_add, a_bit_start, a_length, a_data):
byte = self.bus.read_byte_data(devAddr, a_reg_add)
mask = ((1 << a_length) - 1) << (a_bit_start - a_length + 1)
# Get data in position and zero all non-important bits in data
a_data <<= a_bit_start - a_length + 1
a_data &= mask
# Clear all important bits in read byte and combine with data
byte &= ~mask
byte = byte | a_data
# Write the data to the I2C device
# self.__bus.write_byte_data(self.__dev_id, a_reg_add, ctypes.c_int8(byte).value)
self.bus.write_byte_data(devAddr, a_reg_add, byte)
# /** Write single byte to an 8-bit device register.
# * @param devAddr I2C slave device address
# * @param regAddr Register address to write to
# * @param data New byte value to write
# * @return Status of operation (true = success)
# */
# bool - uint8_t devAddr, uint8_t regAddr, uint8_t data
def writeByte(self, devAddr, regAddr, data):
self.bus.write_byte_data(devAddr, regAddr, data)
# bool
# def writeBytes(uint8_t devAddr, uint8_t regAddr, uint8_t length, uint8_t *data):
# bcm2835_i2c_setSlaveAddress(devAddr);
# sendBuf[0] = regAddr;
# uint8_t i;
# for (i = 0; i < length; i++) {
# sendBuf[i+1] = data[i] ;
# }
# uint8_t response = bcm2835_i2c_write(sendBuf, 1+length);
# return response == BCM2835_I2C_REASON_OK ;
# bool - uint8_t devAddr, uint8_t regAddr, uint16_t data
def writeWord(self, devAddr, regAddr, data):
self.bus.write_word_data(devAddr, regAddr, data)
# bool
# def writeWords(uint8_t devAddr, uint8_t regAddr, uint8_t length, uint16_t *data):
# bcm2835_i2c_setSlaveAddress(devAddr);
# sendBuf[0] = regAddr;
# uint8_t i;
# for (i = 0; i < length; i++) {
# sendBuf[1+2*i] = (uint8_t) (data[i] >> 8); //MSByte
# sendBuf[2+2*i] = (uint8_t) (data[i] >> 0); //LSByte
# }
# uint8_t response = bcm2835_i2c_write(sendBuf, 1+2*length);
# return response == BCM2835_I2C_REASON_OK ;
|
py | 1a2edc555fdf1d04a6698fb3713254ffa32e7c81 | def time_converter(string):
string = string.split()
hora = int(string[0][1])+(10*int(string[0][0]))
if hora>=18:
hora_pm = hora-12
lista = [str(hora_pm), string[0][2], string[0][3], string[0][4], ' p.m.']
lista = ''.join(lista)
return lista
else:
if hora>=12:
string.append(' p.m.')
string = ''.join(string)
return string
else:
if hora==0:
lista = ['12', string[0][2], string[0][3], string[0][4], ' a.m.']
string = ''.join(lista)
return string
elif hora<10 and hora!=0:
string.append(' a.m.')
string = ''.join(string)
return string[1:]
else:
return string
print(time_converter('12:30'))
print(time_converter('09:00'))
print(time_converter('23:15'))
print(time_converter('00:30'))
print(time_converter('00:00')) |
py | 1a2edc63eae0a1495b0654d3138a54b6bd8f5a57 | #!/usr/bin/env python3
import os
os.environ['NOCRASH'] = '1'
import unittest
import matplotlib
matplotlib.use('svg')
from selfdrive.config import Conversions as CV
from selfdrive.car.honda.values import CruiseButtons as CB
from selfdrive.test.longitudinal_maneuvers.maneuver import Maneuver
from selfdrive.manager.process_config import managed_processes
from common.file_helpers import mkdirs_exists_ok
from common.params import Params
def check_no_collision(log):
return min(log['d_rel']) > 0
def check_fcw(log):
return any(log['fcw'])
def check_engaged(log):
return log['controls_state_msgs'][-1][-1].active
maneuvers = [
Maneuver(
'while cruising at 40 mph, change cruise speed to 50mph',
duration=30.,
initial_speed=40. * CV.MPH_TO_MS,
cruise_button_presses=[(CB.DECEL_SET, 2.), (0, 2.3),
(CB.RES_ACCEL, 10.), (0, 10.1),
(CB.RES_ACCEL, 10.2), (0, 10.3)],
checks=[check_engaged],
),
Maneuver(
'while cruising at 60 mph, change cruise speed to 50mph',
duration=30.,
initial_speed=60. * CV.MPH_TO_MS,
cruise_button_presses=[(CB.DECEL_SET, 2.), (0, 2.3),
(CB.DECEL_SET, 10.), (0, 10.1),
(CB.DECEL_SET, 10.2), (0, 10.3)],
checks=[check_engaged],
),
Maneuver(
'while cruising at 20mph, uphill grade of 10%',
duration=25.,
initial_speed=20. * CV.MPH_TO_MS,
cruise_button_presses=[(CB.DECEL_SET, 1.2), (0, 1.3)],
grade_values=[0., 0., .1],
grade_breakpoints=[0., 10., 11.],
checks=[check_engaged],
),
Maneuver(
'while cruising at 20mph, downhill grade of -10%',
duration=25.,
initial_speed=20. * CV.MPH_TO_MS,
cruise_button_presses=[(CB.DECEL_SET, 1.2), (0, 1.3)],
grade_values=[0., 0., -.1],
grade_breakpoints=[0., 10., 11.],
checks=[check_engaged],
),
Maneuver(
'approaching a 40mph car while cruising at 60mph from 100m away',
duration=30.,
initial_speed=60. * CV.MPH_TO_MS,
lead_relevancy=True,
initial_distance_lead=100.,
speed_lead_values=[40. * CV.MPH_TO_MS, 40. * CV.MPH_TO_MS],
speed_lead_breakpoints=[0., 100.],
cruise_button_presses=[(CB.DECEL_SET, 1.2), (0, 1.3)],
checks=[check_engaged, check_no_collision],
),
Maneuver(
'approaching a 0mph car while cruising at 40mph from 150m away',
duration=30.,
initial_speed=40. * CV.MPH_TO_MS,
lead_relevancy=True,
initial_distance_lead=150.,
speed_lead_values=[0. * CV.MPH_TO_MS, 0. * CV.MPH_TO_MS],
speed_lead_breakpoints=[0., 100.],
cruise_button_presses=[(CB.DECEL_SET, 1.2), (0, 1.3)],
checks=[check_engaged, check_no_collision],
),
Maneuver(
'steady state following a car at 20m/s, then lead decel to 0mph at 1m/s^2',
duration=50.,
initial_speed=20.,
lead_relevancy=True,
initial_distance_lead=35.,
speed_lead_values=[20., 20., 0.],
speed_lead_breakpoints=[0., 15., 35.0],
cruise_button_presses=[(CB.DECEL_SET, 1.2), (0, 1.3)],
checks=[check_engaged, check_no_collision],
),
Maneuver(
'steady state following a car at 20m/s, then lead decel to 0mph at 2m/s^2',
duration=50.,
initial_speed=20.,
lead_relevancy=True,
initial_distance_lead=35.,
speed_lead_values=[20., 20., 0.],
speed_lead_breakpoints=[0., 15., 25.0],
cruise_button_presses=[(CB.DECEL_SET, 1.2), (0, 1.3)],
checks=[check_engaged, check_no_collision],
),
Maneuver(
'steady state following a car at 20m/s, then lead decel to 0mph at 3m/s^2',
duration=50.,
initial_speed=20.,
lead_relevancy=True,
initial_distance_lead=35.,
speed_lead_values=[20., 20., 0.],
speed_lead_breakpoints=[0., 15., 21.66],
cruise_button_presses=[(CB.DECEL_SET, 1.2), (0, 1.3)],
checks=[check_engaged, check_fcw],
),
Maneuver(
'steady state following a car at 20m/s, then lead decel to 0mph at 5m/s^2',
duration=40.,
initial_speed=20.,
lead_relevancy=True,
initial_distance_lead=35.,
speed_lead_values=[20., 20., 0.],
speed_lead_breakpoints=[0., 15., 19.],
cruise_button_presses=[(CB.DECEL_SET, 1.2), (0, 1.3)],
checks=[check_engaged, check_fcw],
),
Maneuver(
'starting at 0mph, approaching a stopped car 100m away',
duration=30.,
initial_speed=0.,
lead_relevancy=True,
initial_distance_lead=100.,
cruise_button_presses=[(CB.DECEL_SET, 1.2), (0, 1.3),
(CB.RES_ACCEL, 1.4), (0.0, 1.5),
(CB.RES_ACCEL, 1.6), (0.0, 1.7),
(CB.RES_ACCEL, 1.8), (0.0, 1.9)],
checks=[check_engaged, check_no_collision],
),
Maneuver(
"following a car at 60mph, lead accel and decel at 0.5m/s^2 every 2s",
duration=25.,
initial_speed=30.,
lead_relevancy=True,
initial_distance_lead=49.,
speed_lead_values=[30., 30., 29., 31., 29., 31., 29.],
speed_lead_breakpoints=[0., 6., 8., 12., 16., 20., 24.],
cruise_button_presses=[(CB.DECEL_SET, 1.2), (0, 1.3),
(CB.RES_ACCEL, 1.4), (0.0, 1.5),
(CB.RES_ACCEL, 1.6), (0.0, 1.7)],
checks=[check_engaged, check_no_collision],
),
Maneuver(
"following a car at 10mph, stop and go at 1m/s2 lead dece1 and accel",
duration=70.,
initial_speed=10.,
lead_relevancy=True,
initial_distance_lead=20.,
speed_lead_values=[10., 0., 0., 10., 0., 10.],
speed_lead_breakpoints=[10., 20., 30., 40., 50., 60.],
cruise_button_presses=[(CB.DECEL_SET, 1.2), (0, 1.3),
(CB.RES_ACCEL, 1.4), (0.0, 1.5),
(CB.RES_ACCEL, 1.6), (0.0, 1.7)],
checks=[check_engaged, check_no_collision],
),
Maneuver(
"green light: stopped behind lead car, lead car accelerates at 1.5 m/s",
duration=30.,
initial_speed=0.,
lead_relevancy=True,
initial_distance_lead=4.,
speed_lead_values=[0, 0, 45],
speed_lead_breakpoints=[0, 10., 40.],
cruise_button_presses=[(CB.DECEL_SET, 1.2), (0, 1.3),
(CB.RES_ACCEL, 1.4), (0.0, 1.5),
(CB.RES_ACCEL, 1.6), (0.0, 1.7),
(CB.RES_ACCEL, 1.8), (0.0, 1.9),
(CB.RES_ACCEL, 2.0), (0.0, 2.1),
(CB.RES_ACCEL, 2.2), (0.0, 2.3)],
checks=[check_engaged, check_no_collision],
),
Maneuver(
"stop and go with 1m/s2 lead decel and accel, with full stops",
duration=70.,
initial_speed=0.,
lead_relevancy=True,
initial_distance_lead=20.,
speed_lead_values=[10., 0., 0., 10., 0., 0.],
speed_lead_breakpoints=[10., 20., 30., 40., 50., 60.],
cruise_button_presses=[(CB.DECEL_SET, 1.2), (0, 1.3),
(CB.RES_ACCEL, 1.4), (0.0, 1.5),
(CB.RES_ACCEL, 1.6), (0.0, 1.7)],
checks=[check_engaged, check_no_collision],
),
Maneuver(
"stop and go with 1.5m/s2 lead accel and 3.3m/s^2 lead decel, with full stops",
duration=45.,
initial_speed=0.,
lead_relevancy=True,
initial_distance_lead=20.,
speed_lead_values=[10., 0., 0., 10., 0., 0.],
speed_lead_breakpoints=[10., 13., 26., 33., 36., 45.],
cruise_button_presses=[(CB.DECEL_SET, 1.2), (0, 1.3),
(CB.RES_ACCEL, 1.4), (0.0, 1.5),
(CB.RES_ACCEL, 1.6), (0.0, 1.7)],
checks=[check_engaged, check_no_collision],
),
Maneuver(
"accelerate from 20 while lead vehicle decelerates from 40 to 20 at 1m/s2",
duration=30.,
initial_speed=10.,
lead_relevancy=True,
initial_distance_lead=10.,
speed_lead_values=[20., 10.],
speed_lead_breakpoints=[1., 11.],
cruise_button_presses=[(CB.DECEL_SET, 1.2), (0, 1.3),
(CB.RES_ACCEL, 1.4), (0.0, 1.5),
(CB.RES_ACCEL, 1.6), (0.0, 1.7),
(CB.RES_ACCEL, 1.8), (0.0, 1.9),
(CB.RES_ACCEL, 2.0), (0.0, 2.1),
(CB.RES_ACCEL, 2.2), (0.0, 2.3)],
checks=[check_engaged, check_no_collision],
),
Maneuver(
"accelerate from 20 while lead vehicle decelerates from 40 to 0 at 2m/s2",
duration=30.,
initial_speed=10.,
lead_relevancy=True,
initial_distance_lead=10.,
speed_lead_values=[20., 0.],
speed_lead_breakpoints=[1., 11.],
cruise_button_presses=[(CB.DECEL_SET, 1.2), (0, 1.3),
(CB.RES_ACCEL, 1.4), (0.0, 1.5),
(CB.RES_ACCEL, 1.6), (0.0, 1.7),
(CB.RES_ACCEL, 1.8), (0.0, 1.9),
(CB.RES_ACCEL, 2.0), (0.0, 2.1),
(CB.RES_ACCEL, 2.2), (0.0, 2.3)],
checks=[check_engaged, check_no_collision],
),
Maneuver(
"fcw: traveling at 30 m/s and approaching lead traveling at 20m/s",
duration=15.,
initial_speed=30.,
lead_relevancy=True,
initial_distance_lead=100.,
speed_lead_values=[20.],
speed_lead_breakpoints=[1.],
cruise_button_presses=[],
checks=[check_fcw],
),
Maneuver(
"fcw: traveling at 20 m/s following a lead that decels from 20m/s to 0 at 1m/s2",
duration=18.,
initial_speed=20.,
lead_relevancy=True,
initial_distance_lead=35.,
speed_lead_values=[20., 0.],
speed_lead_breakpoints=[3., 23.],
cruise_button_presses=[],
checks=[check_fcw],
),
Maneuver(
"fcw: traveling at 20 m/s following a lead that decels from 20m/s to 0 at 3m/s2",
duration=13.,
initial_speed=20.,
lead_relevancy=True,
initial_distance_lead=35.,
speed_lead_values=[20., 0.],
speed_lead_breakpoints=[3., 9.6],
cruise_button_presses=[],
checks=[check_fcw],
),
Maneuver(
"fcw: traveling at 20 m/s following a lead that decels from 20m/s to 0 at 5m/s2",
duration=8.,
initial_speed=20.,
lead_relevancy=True,
initial_distance_lead=35.,
speed_lead_values=[20., 0.],
speed_lead_breakpoints=[3., 7.],
cruise_button_presses=[],
checks=[check_fcw],
)
]
def setup_output():
output_dir = os.path.join(os.getcwd(), 'out/longitudinal')
if not os.path.exists(os.path.join(output_dir, "index.html")):
# write test output header
css_style = """
.maneuver_title {
font-size: 24px;
text-align: center;
}
.maneuver_graph {
width: 100%;
}
"""
view_html = "<html><head><style>%s</style></head><body><table>" % (css_style,)
for i, man in enumerate(maneuvers):
view_html += "<tr><td class='maneuver_title' colspan=5><div>%s</div></td></tr><tr>" % (man.title,)
for c in ['distance.svg', 'speeds.svg', 'acceleration.svg', 'pedals.svg', 'pid.svg']:
view_html += "<td><img class='maneuver_graph' src='%s'/></td>" % (os.path.join("maneuver" + str(i + 1).zfill(2), c), )
view_html += "</tr>"
mkdirs_exists_ok(output_dir)
with open(os.path.join(output_dir, "index.html"), "w") as f:
f.write(view_html)
class LongitudinalControl(unittest.TestCase):
@classmethod
def setUpClass(cls):
os.environ['SIMULATION'] = "1"
os.environ['SKIP_FW_QUERY'] = "1"
os.environ['NO_CAN_TIMEOUT'] = "1"
setup_output()
params = Params()
params.clear_all()
params.put_bool("Passive", bool(os.getenv("PASSIVE")))
params.put_bool("OpenpilotEnabledToggle", True)
params.put_bool("CommunityFeaturesToggle", True)
# hack
def test_longitudinal_setup(self):
pass
def run_maneuver_worker(k):
man = maneuvers[k]
output_dir = os.path.join(os.getcwd(), 'out/longitudinal')
def run(self):
print(man.title)
valid = False
for _ in range(3):
managed_processes['radard'].start()
managed_processes['controlsd'].start()
managed_processes['plannerd'].start()
plot, valid = man.evaluate()
plot.write_plot(output_dir, "maneuver" + str(k + 1).zfill(2))
managed_processes['radard'].stop()
managed_processes['controlsd'].stop()
managed_processes['plannerd'].stop()
if valid:
break
self.assertTrue(valid)
return run
for k in range(len(maneuvers)):
setattr(LongitudinalControl, "test_longitudinal_maneuvers_%d" % (k + 1), run_maneuver_worker(k))
if __name__ == "__main__":
unittest.main(failfast=True)
|